One of the best places to prototype an OpenAI's ChatGPT API based app is chainlit.
Let us make sure we have a python (> 3.8 ) environment and latest dependencies installed. I recommend using conda.
conda create -n conda-py-3.11-env2 python=3.11
pip install chainlit openai
Then here's a quick way to test OpenAI. First make sure to create a .env
file with the following content: OPENAI_API_KEY=sk-XXX
.
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "What is the capital of France?"},
]
)
print(completion.choices[0].message)
Here's another quick snippet to test OpenAI Visual model.
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv()
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What’s in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
},
},
],
}
],
max_tokens=300,
)
print(response.choices[0])
Finally here's a minimal code to get started on chainlit which can be run as follows: chainlit run app.py -w
.
import chainlit as cl
from openai import AsyncOpenAI
import os
import pdb
client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
settings = {
"model": "gpt-3.5-turbo",
"temperature": 0.0,
}
@cl.on_chat_start
def start_chat():
cl.user_session.set(
"message_history",
[{"role": "system", "content": "You are a helpful assistant"}],
)
@cl.on_message
async def main(message: cl.Message):
print(message)
pdb.set_trace()
message_history = cl.user_session.get("message_history")
message_history.append({"role": "user", "content": message.content})
msg = cl.Message(content="")
await msg.send()
stream = await client.chat.completions.create(
messages=message_history, stream=True, **settings
)
async for part in stream:
if token := part.choices[0].delta.content or "":
await msg.stream_token(token)
message_history.append({"role": "assistant", "content": msg.content})
await msg.update()
Here is how to display an image back.
import chainlit as cl
@cl.on_message
async def on_message(msg: cl.Message):
if not msg.elements:
await cl.Message(content="No file attached").send()
return
# Processing images exclusively
images = [file for file in msg.elements if "image" in file.mime]
if images:
for image in images:
await cl.Message(content=f"Received image", elements=[image]).send()
else:
await cl.Message(content="No image found in the message").send()
Top comments (0)