Pseudo Code Generator | JavaScript

Simplified Pseudo Code for Python Script Functionality

This document presents a clear pseudo code representation of a Python script, detailing initialization, event handling, file processing, chat management, and audio handling for better understanding without syntax complexities.


Empty image or helper icon

Prompt

import os
import plotly
from io import BytesIO
from pathlib import Path
from typing import List

from openai import AsyncAssistantEventHandler, AsyncOpenAI, OpenAI

from literalai.helper import utc_now

import chainlit as cl
from chainlit.config import config
from chainlit.element import Element
from openai.types.beta.threads.runs import RunStep


async_openai_client = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
sync_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))

assistant = sync_openai_client.beta.assistants.retrieve(
    os.environ.get("OPENAI_ASSISTANT_ID")
)

config.ui.name = assistant.name

class EventHandler(AsyncAssistantEventHandler):

    def __init__(self, assistant_name: str) -> None:
        super().__init__()
        self.current_message: cl.Message = None
        self.current_step: cl.Step = None
        self.current_tool_call = None
        self.assistant_name = assistant_name

    async def on_run_step_created(self, run_step: RunStep) -> None:
        cl.user_session.set("run_step", run_step)

    async def on_text_created(self, text) -> None:
        self.current_message = await cl.Message(author=self.assistant_name, content="").send()

    async def on_text_delta(self, delta, snapshot):
        if delta.value:
            await self.current_message.stream_token(delta.value)

    async def on_text_done(self, text):
        await self.current_message.update()
        if text.annotations:
            print(text.annotations)
            for annotation in text.annotations:
                if annotation.type == "file_path":
                    response = await async_openai_client.files.with_raw_response.content(annotation.file_path.file_id)
                    file_name = annotation.text.split("/")[-1]
                    try:
                        fig = plotly.io.from_json(response.content)
                        element = cl.Plotly(name=file_name, figure=fig)
                        await cl.Message(
                            content="",
                            elements=[element]).send()
                    except Exception as e:
                        element = cl.File(content=response.content, name=file_name)
                        await cl.Message(
                            content="",
                            elements=[element]).send()
                    # Hack to fix links
                    if annotation.text in self.current_message.content and element.chainlit_key:
                        self.current_message.content = self.current_message.content.replace(annotation.text, f"/project/file/{element.chainlit_key}?session_id={cl.context.session.id}")
                        await self.current_message.update()

    async def on_tool_call_created(self, tool_call):
        self.current_tool_call = tool_call.id
        self.current_step = cl.Step(name=tool_call.type, type="tool")
        self.current_step.show_input = "python"
        self.current_step.start = utc_now()
        await self.current_step.send()

    async def on_tool_call_delta(self, delta, snapshot): 
        if snapshot.id != self.current_tool_call:
            self.current_tool_call = snapshot.id
            self.current_step = cl.Step(name=delta.type, type="tool")
            self.current_step.start = utc_now()
            if snapshot.type == "code_interpreter":
                 self.current_step.show_input = "python"
            if snapshot.type == "function":
                self.current_step.name = snapshot.function.name
                self.current_step.language = "json"
            await self.current_step.send()
        
        if delta.type == "function":
            pass
        
        if delta.type == "code_interpreter":
            if delta.code_interpreter.outputs:
                for output in delta.code_interpreter.outputs:
                    if output.type == "logs":
                        self.current_step.output += output.logs
                        self.current_step.language = "markdown"
                        self.current_step.end = utc_now()
                        await self.current_step.update()
                    elif output.type == "image":
                        self.current_step.language = "json"
                        self.current_step.output = output.image.model_dump_json()
            else:
                if delta.code_interpreter.input:
                    await self.current_step.stream_token(delta.code_interpreter.input, is_input=True)  

    async def on_event(self, event) -> None:
        if event.event == "error":
            return cl.ErrorMessage(content=str(event.data.message)).send()

    async def on_exception(self, exception: Exception) -> None:
        return cl.ErrorMessage(content=str(exception)).send()

    async def on_tool_call_done(self, tool_call):       
        self.current_step.end = utc_now()
        await self.current_step.update()

    async def on_image_file_done(self, image_file, message):
        image_id = image_file.file_id
        response = await async_openai_client.files.with_raw_response.content(image_id)
        image_element = cl.Image(
            name=image_id,
            content=response.content,
            display="inline",
            size="large"
        )
        if not self.current_message.elements:
            self.current_message.elements = []
        self.current_message.elements.append(image_element)
        await self.current_message.update()


@cl.step(type="tool")
async def speech_to_text(audio_file):
    response = await async_openai_client.audio.transcriptions.create(
        model="whisper-1", file=audio_file
    )

    return response.text


async def upload_files(files: List[Element]):
    file_ids = []
    for file in files:
        uploaded_file = await async_openai_client.files.create(
            file=Path(file.path), purpose="assistants"
        )
        file_ids.append(uploaded_file.id)
    return file_ids


async def process_files(files: List[Element]):
    # Upload files if any and get file_ids
    file_ids = []
    if len(files) > 0:
        file_ids = await upload_files(files)

    return [
        {
            "file_id": file_id,
            "tools": [{"type": "code_interpreter"}, {"type": "file_search"}] if file.mime in ["application/vnd.openxmlformats-officedocument.wordprocessingml.document", "text/markdown", "application/pdf", "text/plain"] else [{"type": "code_interpreter"}],
        }
        for file_id, file in zip(file_ids, files)
    ]


@cl.set_starters
async def set_starters():
    return [
        cl.Starter(
            label="Run Tesla stock analysis",
            message="Make a data analysis on the tesla-stock-price.csv file I previously uploaded.",
            icon="/public/write.svg",
            ),
        cl.Starter(
            label="Run a data analysis on my CSV",
            message="Make a data analysis on the next CSV file I will upload.",
            icon="/public/write.svg",
            )
        ]

@cl.on_chat_start
async def start_chat():
    # Create a Thread
    thread = await async_openai_client.beta.threads.create()
    # Store thread ID in user session for later use
    cl.user_session.set("thread_id", thread.id)
    
    
@cl.on_stop
async def stop_chat():
    current_run_step: RunStep = cl.user_session.get("run_step")
    if current_run_step:
        await async_openai_client.beta.threads.runs.cancel(thread_id=current_run_step.thread_id, run_id=current_run_step.run_id)


@cl.on_message
async def main(message: cl.Message):
    thread_id = cl.user_session.get("thread_id")

    attachments = await process_files(message.elements)

    # Add a Message to the Thread
    oai_message = await async_openai_client.beta.threads.messages.create(
        thread_id=thread_id,
        role="user",
        content=message.content,
        attachments=attachments,
    )

    # Create and Stream a Run
    async with async_openai_client.beta.threads.runs.stream(
        thread_id=thread_id,
        assistant_id=assistant.id,
        event_handler=EventHandler(assistant_name=assistant.name),
    ) as stream:
        await stream.until_done()


@cl.on_audio_chunk
async def on_audio_chunk(chunk: cl.AudioChunk):
    if chunk.isStart:
        buffer = BytesIO()
        # This is required for whisper to recognize the file type
        buffer.name = f"input_audio.{chunk.mimeType.split('/')[1]}"
        # Initialize the session for a new audio stream
        cl.user_session.set("audio_buffer", buffer)
        cl.user_session.set("audio_mime_type", chunk.mimeType)

    # Write the chunks to a buffer and transcribe the whole audio at the end
    cl.user_session.get("audio_buffer").write(chunk.data)


@cl.on_audio_end
async def on_audio_end(elements: list[Element]):
    # Get the audio buffer from the session
    audio_buffer: BytesIO = cl.user_session.get("audio_buffer")
    audio_buffer.seek(0)  # Move the file pointer to the beginning
    audio_file = audio_buffer.read()
    audio_mime_type: str = cl.user_session.get("audio_mime_type")

    input_audio_el = cl.Audio(
        mime=audio_mime_type, content=audio_file, name=audio_buffer.name
    )
    await cl.Message(
        type="user_message",
        content="",
        elements=[input_audio_el, *elements],
    ).send()

    whisper_input = (audio_buffer.name, audio_file, audio_mime_type)
    transcription = await speech_to_text(whisper_input)

    msg = cl.Message(author="You", content=transcription, elements=elements)

    await main(message=msg)

Answer

Pseudo Code Representation

This document provides a simplified pseudo code version of the original Python script. The pseudo code captures the essential logic without delving into the specifics of Python syntax, thereby enhancing clarity and understanding.

Main Components

  1. Initialization
  2. Event Handler Class
  3. File Upload and Processing
  4. Chat Management
  5. Audio Handling
  6. Message Processing

Pseudo Code

1. Initialization

SET async_openai_client WITH OpenAI API_KEY
SET sync_openai_client WITH OpenAI API_KEY
RETRIEVE assistant USING OpenAI ASSISTANT_ID
SET config UI name to assistant name

2. Event Handler Class

CLASS EventHandler EXTENDS AsyncAssistantEventHandler:
    INITIALIZE with assistant_name:
        SET current_message to NULL
        SET current_step to NULL
        SET current_tool_call to NULL

    ASYNC FUNCTION on_run_step_created(run_step):
        STORE run_step in user session

    ASYNC FUNCTION on_text_created(text):
        CREATE current_message with assistant name, empty content

    ASYNC FUNCTION on_text_delta(delta, snapshot):
        IF delta has value:
            STREAM delta value to current_message

    ASYNC FUNCTION on_text_done(text):
        UPDATE current_message
        IF annotations exist:
            FOR EACH annotation in text.annotations:
                IF annotation type is file_path:
                    RETRIEVE file content using OpenAI
                    HANDLE file content
                    IF link MUST be fixed:
                        FIX link in current_message content
                        UPDATE current_message

    ASYNC FUNCTION on_tool_call_created(tool_call):
        SET current_tool_call to tool_call ID
        INIT current_step with name and type
        SHOW input in python
        RECORD start time
        SEND current_step

    ASYNC FUNCTION on_tool_call_delta(delta, snapshot):
        IF snapshot ID differs from current_tool_call:
            UPDATE current_tool_call and current_step
            SEND current_step
        HANDLE outputs based on delta type (logs or image)
    
    ASYNC FUNCTION on_event(event):
        IF event is "error":
            SEND error message with event data

    ASYNC FUNCTION on_exception(exception):
        SEND error message with exception content

    ASYNC FUNCTION on_tool_call_done(tool_call):
        RECORD end time for current_step
        UPDATE current_step

    ASYNC FUNCTION on_image_file_done(image_file, message):
        RETRIEVE image and create Image element
        APPEND image element to current_message
        UPDATE current_message

3. File Upload and Processing

ASYN FUNCTION upload_files(files):
    INITIALIZE file_ids LIST
    FOR EACH file IN files:
        UPLOAD file to OpenAI
        APPEND uploaded file ID to file_ids
    RETURN file_ids

ASYN FUNCTION process_files(files):
    INITIALIZE file_ids LIST
    IF files are not empty:
        file_ids = upload_files(files)

    RETURN LIST of dictionaries containing:
        file_id,
        tools based on file MIME type

4. Chat Management

ASYN FUNCTION set_starters():
    RETURN LIST of starters with labels and messages

ASYN FUNCTION start_chat():
    CREATE a new thread using OpenAI
    STORE thread ID in user session

ASYN FUNCTION stop_chat():
    RETRIEVE current_run_step from user session
    IF it exists:
        CANCEL the current thread run

ASYN FUNCTION main(message):
    RETRIEVE thread ID from user session
    PROCESS message elements to get attachments

    SEND message to OpenAI thread
    STREAM run with event handler

5. Audio Handling

ASYN FUNCTION on_audio_chunk(chunk):
    IF chunk is the start:
        CREATE buffer for audio
        STORE buffer and MIME type in user session
    WRITE chunk data to buffer

ASYN FUNCTION on_audio_end(elements):
    RETRIEVE audio buffer and MIME type
    CREATE audio element
    SEND user message with audio elements
    TRANSCRIBE audio file using speech_to_text
    CREATE message with transcription and SEND to main function

Conclusion

The above pseudo code provides a clear and simplified representation of the original script's functionality, aiding in understanding the workflow and logic without requiring knowledge of Python syntax intricacies. Each section breaks down the responsibilities and actions taken during the execution, facilitating documentation and discussions regarding implementation.

Create your Thread using our flexible tools, share it with friends and colleagues.

Your current query will become the main foundation for the thread, which you can expand with other tools presented on our platform. We will help you choose tools so that your thread is structured and logically built.

Description

This document presents a clear pseudo code representation of a Python script, detailing initialization, event handling, file processing, chat management, and audio handling for better understanding without syntax complexities.