openai.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. import openai
  2. import requests
  3. import json
  4. from .logging import Logger
  5. from typing import Dict, List, Tuple, Generator, Optional
  6. class OpenAI:
  7. api_key: str
  8. chat_model: str = "gpt-3.5-turbo"
  9. logger: Logger
  10. api_code: str = "openai"
  11. @property
  12. def chat_api(self) -> str:
  13. return self.chat_model
  14. classification_api = chat_api
  15. image_api: str = "dalle"
  16. operator: str = "OpenAI ([https://openai.com](https://openai.com))"
  17. def __init__(self, api_key, chat_model=None, logger=None):
  18. self.api_key = api_key
  19. self.chat_model = chat_model or self.chat_model
  20. self.logger = logger or Logger()
  21. def generate_chat_response(self, messages: List[Dict[str, str]], user: Optional[str] = None) -> Tuple[str, int]:
  22. """Generate a response to a chat message.
  23. Args:
  24. messages (List[Dict[str, str]]): A list of messages to use as context.
  25. Returns:
  26. Tuple[str, int]: The response text and the number of tokens used.
  27. """
  28. self.logger.log(f"Generating response to {len(messages)} messages using {self.chat_model}...")
  29. response = openai.ChatCompletion.create(
  30. model=self.chat_model,
  31. messages=messages,
  32. api_key=self.api_key,
  33. user = user
  34. )
  35. result_text = response.choices[0].message['content']
  36. tokens_used = response.usage["total_tokens"]
  37. self.logger.log(f"Generated response with {tokens_used} tokens.")
  38. return result_text, tokens_used
  39. def classify_message(self, query: str, user: Optional[str] = None) -> Tuple[Dict[str, str], int]:
  40. system_message = """You are a classifier for different types of messages. You decide whether an incoming message is meant to be a prompt for an AI chat model, an image generation AI, or a calculation for WolframAlpha. You respond with a JSON object like this:
  41. { "type": event_type, "prompt": prompt }
  42. - If the message you received is meant for the AI chat model, the event_type is "chat", and the prompt is the literal content of the message you received. This is also the default if none of the other options apply.
  43. - If it is a prompt for a calculation that can be answered better by WolframAlpha than an AI chat bot, the event_type is "calculate". Optimize the message you received for input to WolframAlpha, and return it as the prompt attribute.
  44. - If it is a prompt for an AI image generation, the event_type is "imagine". Optimize the message you received for use with DALL-E, and return it as the prompt attribute.
  45. - If for any reason you are unable to classify the message (for example, if it infringes on your terms of service), the event_type is "error", and the prompt is a message explaining why you are unable to process the message.
  46. Only the event_types mentioned above are allowed, you must not respond in any other way."""
  47. messages = [
  48. {
  49. "role": "system",
  50. "content": system_message
  51. },
  52. {
  53. "role": "user",
  54. "content": query
  55. }
  56. ]
  57. self.logger.log(f"Classifying message '{query}'...")
  58. response = openai.ChatCompletion.create(
  59. model=self.chat_model,
  60. messages=messages,
  61. api_key=self.api_key,
  62. user = user
  63. )
  64. try:
  65. result = json.loads(response.choices[0].message['content'])
  66. except:
  67. result = {"type": "chat", "prompt": query}
  68. tokens_used = response.usage["total_tokens"]
  69. self.logger.log(f"Classified message as {result['type']} with {tokens_used} tokens.")
  70. return result, tokens_used
  71. def generate_image(self, prompt: str, user: Optional[str] = None) -> Generator[bytes, None, None]:
  72. """Generate an image from a prompt.
  73. Args:
  74. prompt (str): The prompt to use.
  75. Yields:
  76. bytes: The image data.
  77. """
  78. self.logger.log(f"Generating image from prompt '{prompt}'...")
  79. response = openai.Image.create(
  80. prompt=prompt,
  81. n=1,
  82. api_key=self.api_key,
  83. size="1024x1024",
  84. user = user
  85. )
  86. images = []
  87. for image in response.data:
  88. image = requests.get(image.url).content
  89. images.append(image)
  90. return images, len(images)