DEV Community

Cover image for Python & OpenAI beginner journey 5 | A Flask Chatbot on Heroku with Celery and Redis
Gregor Schafroth
Gregor Schafroth

Posted on

Python & OpenAI beginner journey 5 | A Flask Chatbot on Heroku with Celery and Redis

Today I got back to working on my OpenAI Chatbot after a break of about 2 weeks, so I (re)made 7 small apps to get up to speed again. Here Iโ€™ll share a list of my programs and some of the code below ๐Ÿ™‚

Also I am now on my own and I think I would be much faster if I could learn from experienced programmers. If anyone reading this would consider coaching for a junior programmer send me a message. Iโ€™m of course happy to pay something ๐Ÿ˜„

So here is my work today:

  1. OpenAI Assistant API interaction in Python
  2. OpenAI Assistant API continuous conversation in Python
  3. A chat interface with Flask (always answers with โ€˜๐ŸŒย bananaโ€™ after 2 seconds)
  4. A chat with the OpenAI API in this Flask interface
  5. An easy exercise to understand how Celery and Redis work
  6. Again the OpenAI API chat with Flask locally but this time with Celery & Redis
  7. The same as 6. again but online, deployed on Heroku

Side note: The reason I am using Celery & Redis is because I previously ran into Heroku timeout issues when the OpenAI API took more than 30 seconds to respond, which happens fairly often in the case of complicated prompts. Iโ€™m actually not sure this is a good approach, but chatGPT recommended it to me and it looks like it works, so Iโ€™ll go with this for now

And here is some of my code from today:

1. OpenAI Assistant API interaction in Python

"""
In this program I practice to recreate a simple interaction with the OpenAI Assistant's API
"""

import logging
import os
import time
from openai import OpenAI

# Set up basic configuration for logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')

# Create own logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)

api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

def main():
    logger.debug('create assistant')
    assistant = client.beta.assistants.create(
        name="Helpful AI",
        instructions="You are a helpful AI. Just answer any questions a user asks you to the best of your ability.",
        tools=[{"type": "code_interpreter"}],
        model="gpt-3.5-turbo-1106"
    )

    logger.debug('create thread')
    thread = client.beta.threads.create()

    logger.debug('create message')
    client.beta.threads.messages.create(
        thread_id=thread.id,
        role="user",
        content=input('You: ')
    )

    logger.debug('create run')
    run = client.beta.threads.runs.create(
        thread_id=thread.id,
        assistant_id=assistant.id,
    )

    while run.status == 'queued' or run.status == 'in_progress':
        logger.debug('run retrieve')
        run = client.beta.threads.runs.retrieve(
            thread_id=thread.id,
            run_id=run.id
        )
        logger.info(f'run.status: {run.status}')
        time.sleep(1)

    logger.debug('get messages')
    messages = client.beta.threads.messages.list(
        thread_id=thread.id
    )

    logger.debug('get response')
    message = client.beta.threads.messages.retrieve(
        thread_id=thread.id,
        message_id=messages.data[0].id
    )

    print(f'AI: {message.content[0].text.value}')

if __name__ == '__main__':
    main()
Enter fullscreen mode Exit fullscreen mode

2. OpenAI Assistant API continuous conversation in Python

"""
In this seoncond iteration of the program I practice creating a continuous conversation with the OpenAI Assistant API Chatbot
"""

import logging
import os
import time
from openai import OpenAI

# Set up basic configuration for logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')

# Create own logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

def create_assistant():
    logger.debug('Creating assistant...')
    assistant = client.beta.assistants.create(
        name="Helpful AI",
        instructions="You are a helpful AI. Just answer any questions a user asks you to the best of your ability.",
        tools=[{"type": "code_interpreter"}],
        model="gpt-3.5-turbo-1106"
    )
    return assistant

def create_thread():
    logger.debug('Creating thread...')
    thread = client.beta.threads.create()
    return thread

def create_message(thread_id, user_input):
    logger.debug('Creating message...')
    client.beta.threads.messages.create(
        thread_id=thread_id,
        role="user",
        content=user_input
    )

def create_run(thread_id, assistant_id):
    logger.debug('Creating run...')
    run = client.beta.threads.runs.create(
        thread_id=thread_id,
        assistant_id=assistant_id,
    )
    return run

def wait_for_response(thread_id, run):
    logger.info(f'run.status: {run.status}')
    while run.status == 'queued' or run.status == 'in_progress':
        logger.debug('Retrieving run...')
        run = client.beta.threads.runs.retrieve(
            thread_id=thread_id,
            run_id=run.id
        )
        time.sleep(1)
        logger.info(f'run.status: {run.status}')

def retrieve_response(thread_id):
    logger.debug('Retrieving messages...')
    messages = client.beta.threads.messages.list(
        thread_id=thread_id
    )
    return messages

def main():
    assistant = create_assistant()
    thread = create_thread()
    print("Chatbot is ready! Type your questions and press Enter. Press Ctrl+C to exit.")
    try:
        while True: # This loop enables a continuous conversation
            user_input = input('You: ')
            if not user_input.strip():
                print("Please enter a question.")
                continue
            create_message(thread.id, user_input)
            run = create_run(thread.id, assistant.id)
            wait_for_response(thread.id, run)
            messages = retrieve_response(thread.id)
            if messages.data:
                print(f'AI: {messages.data[0].content[0].text.value}')
            else:
                print("No response from AI.")         
    except KeyboardInterrupt:
        print("\nExiting chatbot. Goodbye!")

if __name__ == '__main__':
    main()
Enter fullscreen mode Exit fullscreen mode

3. A chat interface with Flask (always answers with โ€˜๐ŸŒย bananaโ€™ after 2 seconds)

"""
In this app I practice creating a simple chat interface with Flask
I can enter any message and after 2 seconds receive "๐ŸŒ Banana" as a response
"""

from flask import Flask, render_template, request, jsonify
import time

app = Flask(__name__)

@app.route('/')
def home():
    return render_template('chat.html')

@app.route('/send_message', methods=['POST'])
def send_message():
    message = request.form['message']  # Get the received message
    response = f"{message}? ๐ŸŒ Banana"  # Add a question mark and append "Banana"
    time.sleep(2)  # Simulate delay
    return jsonify({'message': response})

if __name__ == '__main__':
    app.run(debug=True)
Enter fullscreen mode Exit fullscreen mode

4. A chat with the OpenAI API in this Flask interface

"""
In this app I expand the previous simple chatinterface to interact with the OpenAI API.
I outsourced the required functions to openai_functions.py to keep this file here short
"""

from flask import Flask, render_template, request, jsonify
from openai import OpenAI
from openai_functions import create_assistant, create_thread, create_message, create_run, wait_for_response, retrieve_response
import logging
import os

# Configuring logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

# Setting up Flask
app = Flask(__name__)

# Initialize the OpenAI assistant and thread as global variables
assistant = None
thread = None

@app.route('/')
def home():
    return render_template('chat.html')

@app.route('/send_message', methods=['POST'])
def send_message():
    global assistant, thread  # Access the global variables

    # Initialize assistant and thread if they are None (first message)
    if assistant is None or thread is None:
        assistant = create_assistant()
        thread = create_thread()

    message = request.form['message']  # Get the received message

    while True: # This loop enables a continuous conversation
        if not message.strip():
            return jsonify({'message': 'Please enter a question.'})
        create_message(thread.id, message)
        run = create_run(thread.id, assistant.id)
        wait_for_response(thread.id, run)
        messages = retrieve_response(thread.id)
        if messages.data:
            ai_response = messages.data[0].content[0].text.value
        else:
            ai_response = 'No response from AI.'   
        return jsonify({'message': ai_response})      

if __name__ == '__main__':
    app.run(debug=True)
Enter fullscreen mode Exit fullscreen mode

5. An easy exercise to understand how Celery and Redis work

# main.py
from tasks import add_numbers

def main():
    result = add_numbers.delay(5, 3)  # Send the task to Celery
    print("Task sent, waiting for result...")
    print(result.get())  # Get the result when it's ready

if __name__ == '__main__':
    main()
Enter fullscreen mode Exit fullscreen mode
# tasks.py
from celery import Celery
import time

app = Celery(
    'myapp',
    broker='redis://localhost:6379/0',  # Redis URL for message broker
    backend='redis://localhost:6379/1'  # Redis URL for result backend
)

@app.task
def add_numbers(a, b):
    time.sleep(3)
    return a + b
Enter fullscreen mode Exit fullscreen mode

6. Again the OpenAI API chat with Flask locally but this time with Celery & Redis

"""
app.py further builds on a4 by outsourcing long-running tasks to Celery and Redis to avoid a time-out issue with Heroku
"""

from celery_app import process_openai_response
from flask import Flask, render_template, request, jsonify
from openai_functions import create_assistant, create_thread, create_message, create_run, wait_for_response, retrieve_response
import logging

# Configuring logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

# Setting up Flask
app = Flask(__name__)

# Initialize the OpenAI assistant and thread as global variables
assistant = None
thread = None

@app.route('/')
def home():
    return render_template('chat.html')

@app.route('/send_message', methods=['POST'])
def send_message():
    global assistant, thread  # Access the global variables

    # Initialize assistant and thread if they are None (first message)
    if assistant is None or thread is None:
        assistant = create_assistant()
        thread = create_thread()

    message = request.form['message']  # Get the received message
    if not message.strip():
        return jsonify({'message': 'Please enter a question.'})

    create_message(thread.id, message)
    task = process_openai_response.delay(thread.id, assistant.id)  # Dispatch Celery task
    return jsonify({'task_id': task.id})  # Return task ID to client

@app.route('/get_response/<task_id>')
def get_response(task_id):
    task = process_openai_response.AsyncResult(task_id)
    if task.state == 'PENDING':
        return jsonify({'status': 'waiting'})
    elif task.state == 'SUCCESS':
        return jsonify({'status': 'complete', 'message': task.result})
    return jsonify({'status': 'error'})   

if __name__ == '__main__':
    app.run(debug=True)
Enter fullscreen mode Exit fullscreen mode
"""
celery.py supports the main flask app by taking care of potentially long-running tasks.
"""

from celery import Celery
from openai_functions import create_assistant, create_thread, create_message, create_run, wait_for_response, retrieve_response
import logging
import os

# Configuring logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

# Initialize Celery with the name of your application
celery = Celery(__name__)

# Configure Celery using environment variables
celery.conf.broker_url = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0')
celery.conf.result_backend = os.getenv('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0')

# Define tasks to be executed by Celery workers
@celery.task
def process_openai_response(thread_id, assistant_id):
    logger.info("Celery: Starting OpenAI response processing")
    try:
        run = create_run(thread_id, assistant_id)
        wait_for_response(thread_id, run)
        messages = retrieve_response(thread_id)
        if messages.data:
            ai_response = messages.data[0].content[0].text.value
        else:
            ai_response = 'No response from AI.'
        return ai_response
    except Exception as e:
        logger.error(f"Error processing OpenAI response: {e}")
        raise
Enter fullscreen mode Exit fullscreen mode
"""
openai_functions.py stores OpenAI related functions
"""

from openai import OpenAI
import logging
import os
import time

logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

def create_assistant():
    logger.debug('Creating assistant...')
    assistant = client.beta.assistants.create(
        name="Helpful AI",
        instructions="You are a helpful AI. Just answer any questions a user asks you to the best of your ability.",
        tools=[{"type": "code_interpreter"}],
        model="gpt-3.5-turbo-1106"
    )
    return assistant

def create_thread():
    logger.debug('Creating thread...')
    thread = client.beta.threads.create()
    return thread

def create_message(thread_id, user_input):
    logger.debug('Creating message...')
    client.beta.threads.messages.create(
        thread_id=thread_id,
        role="user",
        content=user_input
    )

def create_run(thread_id, assistant_id):
    logger.debug('Creating run...')
    run = client.beta.threads.runs.create(
        thread_id=thread_id,
        assistant_id=assistant_id,
    )
    return run

def wait_for_response(thread_id, run):
    logger.info(f'run.status: {run.status}')
    while run.status == 'queued' or run.status == 'in_progress':
        logger.debug('Retrieving run...')
        run = client.beta.threads.runs.retrieve(
            thread_id=thread_id,
            run_id=run.id
        )
        time.sleep(1)
        logger.info(f'run.status: {run.status}')

def retrieve_response(thread_id):
    logger.debug('Retrieving messages...')
    messages = client.beta.threads.messages.list(
        thread_id=thread_id
    )
    return messages
Enter fullscreen mode Exit fullscreen mode

7. The same as 6. again but online, deployed on Heroku

"""
app.py is similar to a6, but with modifications to work on Heroku. This code already works on Heroku.
"""

from celery_app import process_openai_response
from flask import Flask, render_template, request, jsonify
from openai_functions import create_assistant, create_thread, create_message, create_run, wait_for_response, retrieve_response
import logging

# Configuring logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

# Setting up Flask
app = Flask(__name__)

# Initialize the OpenAI assistant and thread as global variables
assistant = None
thread = None

@app.route('/')
def home():
    return render_template('chat.html')

@app.route('/send_message', methods=['POST'])
def send_message():
    global assistant, thread  # Access the global variables

    # Initialize assistant and thread if they are None (first message)
    if assistant is None or thread is None:
        assistant = create_assistant()
        thread = create_thread()

    message = request.form['message']  # Get the received message
    if not message.strip():
        return jsonify({'message': 'Please enter a question.'})

    create_message(thread.id, message)
    task = process_openai_response.delay(thread.id, assistant.id)  # Dispatch Celery task
    return jsonify({'task_id': task.id})  # Return task ID to client

@app.route('/get_response/<task_id>')
def get_response(task_id):
    task = process_openai_response.AsyncResult(task_id)
    if task.state == 'PENDING':
        return jsonify({'status': 'waiting'})
    elif task.state == 'SUCCESS':
        return jsonify({'status': 'complete', 'message': task.result})
    return jsonify({'status': 'error'})   

if __name__ == '__main__':
    app.run(debug=True)
Enter fullscreen mode Exit fullscreen mode
"""
celery.py supports the main flask app by taking care of potentially long-running tasks.
"""

from celery import Celery
from openai_functions import create_assistant, create_thread, create_message, create_run, wait_for_response, retrieve_response
import logging
import os

# Configuring logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

# Initialize Celery with the name of your application
celery = Celery(__name__)

# Configure Celery using environment variables
celery.conf.broker_url = os.getenv('REDIS_URL', 'redis://localhost:6379/0')
celery.conf.result_backend = os.getenv('REDIS_URL', 'redis://localhost:6379/0')

# Define tasks to be executed by Celery workers
@celery.task
def process_openai_response(thread_id, assistant_id):
    logger.info("Celery: Starting OpenAI response processing")
    try:
        run = create_run(thread_id, assistant_id)
        wait_for_response(thread_id, run)
        messages = retrieve_response(thread_id)
        if messages.data:
            ai_response = messages.data[0].content[0].text.value
        else:
            ai_response = 'No response from AI.'
        return ai_response
    except Exception as e:
        logger.error(f"Error processing OpenAI response: {e}")
        raise
Enter fullscreen mode Exit fullscreen mode
"""
openai_functions.py stores OpenAI related functions
"""

from openai import OpenAI
import logging
import os
import time

logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG) 

api_key = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)

def create_assistant():
    logger.debug('Creating assistant...')
    assistant = client.beta.assistants.create(
        name="Helpful AI",
        instructions="You are a helpful AI. Just answer any questions a user asks you to the best of your ability.",
        tools=[{"type": "code_interpreter"}],
        model="gpt-3.5-turbo-1106"
    )
    return assistant

def create_thread():
    logger.debug('Creating thread...')
    thread = client.beta.threads.create()
    return thread

def create_message(thread_id, user_input):
    logger.debug('Creating message...')
    client.beta.threads.messages.create(
        thread_id=thread_id,
        role="user",
        content=user_input
    )

def create_run(thread_id, assistant_id):
    logger.debug('Creating run...')
    run = client.beta.threads.runs.create(
        thread_id=thread_id,
        assistant_id=assistant_id,
    )
    return run

def wait_for_response(thread_id, run):
    logger.info(f'run.status: {run.status}')
    while run.status == 'queued' or run.status == 'in_progress':
        logger.debug('Retrieving run...')
        run = client.beta.threads.runs.retrieve(
            thread_id=thread_id,
            run_id=run.id
        )
        time.sleep(1)
        logger.info(f'run.status: {run.status}')

def retrieve_response(thread_id):
    logger.debug('Retrieving messages...')
    messages = client.beta.threads.messages.list(
        thread_id=thread_id
    )
    return messages
Enter fullscreen mode Exit fullscreen mode

Top comments (0)