-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlanghain_llm_stream.py
41 lines (36 loc) · 1.1 KB
/
langhain_llm_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
from typing import Literal
from dotenv import load_dotenv
load_dotenv()
def get_chat_llm(
provider: Literal["openai", "google"]="google",
temperature: int=0.1,
max_token: int=1024
):
if provider == "google":
llm = ChatGoogleGenerativeAI(
model="gemini-1.5-flash",
temperature=temperature,
max_tokens=max_token,
api_key=os.environ["GOOGLE_API_KEY"]
)
elif provider == "openai":
llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=temperature,
max_tokens=max_token,
api_key=os.environ["OPENAI_API_KEY"]
)
else:
raise ValueError("Provider should be 'openai' or 'google'")
return llm
llm = get_chat_llm(provider="openai")
prompt = "Who are you?"
messages = [
{"role":"system", "content": "You are a helpful AI chat Bot!"},
{"role": "user", "content": prompt},
]
for chunk in llm.stream(messages):
print(chunk.content, end="", flush=True)