from langchain.chains import LLMChainfrom langchain_community.llms.yuan2 import Yuan2
复制
向 AI 提问
# default infer_api for a local deployed Yuan2.0 inference serverinfer_api = "http://127.0.0.1:8000/yuan"# direct access endpoint in a proxied environment# import os# os.environ["no_proxy"]="localhost,127.0.0.1,::1"yuan_llm = Yuan2( infer_api=infer_api, max_tokens=2048, temp=1.0, top_p=0.9, use_history=False,)# turn on use_history only when you want the Yuan2.0 to keep track of the conversation history# and send the accumulated context to the backend model api, which make it stateful. By default it is stateless.# llm.use_history = True