
前面介绍了单独的匹配,如果把这个匹配过程接入到LLM,就是完整的RAG,即检索增强生成。我们先看看上一个例子还没介绍的最后几行代码
result, err := chains.Run(
ctx,
chains.NewRetrievalQAFromLLM(
llm,
vectorstores.ToRetriever(redisStore, 3, vectorstores.WithScoreThreshold(0.8)),
),
"有哪些猫?",
)
fmt.Println(result)我们从最内层函数开始介绍
func ToRetriever(vectorStore VectorStore, numDocuments int, options ...Option) Retriever {
return Retriever{
v: vectorStore,
numDocs: numDocuments,
options: options,
}
}type Retriever struct {
CallbacksHandler callbacks.Handler
v VectorStore
numDocs int
options []Option
}这里定义了向量数据库,检索结果个数,和相似度的阈值。
// NewRetrievalQAFromLLM loads a question answering combine documents chain
// from the llm and creates a new retrievalQA chain.
func NewRetrievalQAFromLLM(llm llms.Model, retriever schema.Retriever) RetrievalQA {
return NewRetrievalQA(
LoadStuffQA(llm),
retriever,
)
}func NewRetrievalQA(combineDocumentsChain Chain, retriever schema.Retriever) RetrievalQA {
return RetrievalQA{
Retriever: retriever,
CombineDocumentsChain: combineDocumentsChain,
InputKey: _retrievalQADefaultInputKey,
ReturnSourceDocuments: false,
}
}// LoadStuffQA loads a StuffDocuments chain with default prompts for the llm chain.
func LoadStuffQA(llm llms.Model) StuffDocuments {
defaultQAPromptTemplate := prompts.NewPromptTemplate(
_defaultStuffQATemplate,
[]string{"context", "question"},
)
qaPromptSelector := ConditionalPromptSelector{
DefaultPrompt: defaultQAPromptTemplate,
}
prompt := qaPromptSelector.GetPrompt(llm)
llmChain := NewLLMChain(llm, prompt)
return NewStuffDocuments(llmChain)
}可以看到里面定义了qaPromptSelector,最后选择了一个提示词,并封装了一个langchain的节点。
type ConditionalPromptSelector struct {
DefaultPrompt prompts.PromptTemplate
Conditionals []struct {
Condition func(llms.Model) bool
Prompt prompts.PromptTemplate
}
}var _ PromptSelector = ConditionalPromptSelector{}
func (s ConditionalPromptSelector) GetPrompt(llm llms.Model) prompts.PromptTemplate {
for _, conditional := range s.Conditionals {
if conditional.Condition(llm) {
return conditional.Prompt
}
}
return s.DefaultPrompt
}func NewStuffDocuments(llmChain *LLMChain) StuffDocuments {
return StuffDocuments{
LLMChain: llmChain,
InputKey: _combineDocumentsDefaultInputKey,
DocumentVariableName: _combineDocumentsDefaultDocumentVariableName,
Separator: _stuffDocumentsDefaultSeparator,
}
}最后我们看看最外层的Run函数,它最终调用了前面介绍的Call函数本质上就是一个langchain的调用
func Run(ctx context.Context, c Chain, input any, options ...ChainCallOption) (string, error) {
inputKeys := c.GetInputKeys()
memoryKeys := c.GetMemory().MemoryVariables(ctx)
neededKeys := make([]string, 0, len(inputKeys))
// Remove keys gotten from the memory.
for _, inputKey := range inputKeys {
isInMemory := false
for _, memoryKey := range memoryKeys {
if inputKey == memoryKey {
isInMemory = true
continue
}
}
if isInMemory {
continue
}
neededKeys = append(neededKeys, inputKey)
}
if len(neededKeys) != 1 {
return "", ErrMultipleInputsInRun
}
outputKeys := c.GetOutputKeys()
if len(outputKeys) != 1 {
return "", ErrMultipleOutputsInRun
}
inputValues := map[string]any{neededKeys[0]: input}
outputValues, err := Call(ctx, c, inputValues, options...)本文分享自 golang算法架构leetcode技术php 微信公众号,前往查看
如有侵权,请联系 cloudcommunity@tencent.com 删除。
本文参与 腾讯云自媒体同步曝光计划 ,欢迎热爱写作的你一起参与!