auto_chaos.gpt_utils

🧠 GPT utils 🧠

 1"""
 2\U0001F9E0
 3GPT utils
 4\U0001F9E0
 5"""
 6import os
 7import openai
 8from typing import List, Generator
 9
10
11def generate_text(
12    messages: List[str], temperature: float, model: str = os.getenv("MODEL_NAME")
13) -> Generator:
14    """
15    Generate text from a list of messages relying on OpenAI compatible API.
16
17    Args:
18        messages (List[str]): List of messages (system, user, assistant)
19        model (str): LLM model to be used
20        temperature (float): Temperature
21
22    Returns:
23        Generator: GPT response object
24    """
25    response = openai.chat.completions.create(
26        model=model,
27        messages=messages,
28        temperature=temperature,
29        max_tokens=6742,
30        top_p=1,
31        frequency_penalty=0,
32        presence_penalty=0,
33    )
34
35    return response
def generate_text(messages: List[str], temperature: float, model: str = None) -> Generator:
12def generate_text(
13    messages: List[str], temperature: float, model: str = os.getenv("MODEL_NAME")
14) -> Generator:
15    """
16    Generate text from a list of messages relying on OpenAI compatible API.
17
18    Args:
19        messages (List[str]): List of messages (system, user, assistant)
20        model (str): LLM model to be used
21        temperature (float): Temperature
22
23    Returns:
24        Generator: GPT response object
25    """
26    response = openai.chat.completions.create(
27        model=model,
28        messages=messages,
29        temperature=temperature,
30        max_tokens=6742,
31        top_p=1,
32        frequency_penalty=0,
33        presence_penalty=0,
34    )
35
36    return response

Generate text from a list of messages relying on OpenAI compatible API.

Arguments:
  • messages (List[str]): List of messages (system, user, assistant)
  • model (str): LLM model to be used
  • temperature (float): Temperature
Returns:

Generator: GPT response object