openai.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. import openai
  2. import requests
  3. import asyncio
  4. import json
  5. from functools import partial
  6. from .logging import Logger
  7. from typing import Dict, List, Tuple, Generator, AsyncGenerator, Optional, Any
  8. class OpenAI:
  9. api_key: str
  10. chat_model: str = "gpt-3.5-turbo"
  11. logger: Logger
  12. api_code: str = "openai"
  13. @property
  14. def chat_api(self) -> str:
  15. return self.chat_model
  16. classification_api = chat_api
  17. image_api: str = "dalle"
  18. operator: str = "OpenAI ([https://openai.com](https://openai.com))"
  19. def __init__(self, api_key, chat_model=None, logger=None):
  20. self.api_key = api_key
  21. self.chat_model = chat_model or self.chat_model
  22. self.logger = logger or Logger()
  23. async def _request_with_retries(self, request: partial, attempts: int = 5, retry_interval: int = 2) -> AsyncGenerator[Any | list | Dict, None]:
  24. """Retry a request a set number of times if it fails.
  25. Args:
  26. request (partial): The request to make with retries.
  27. attempts (int, optional): The number of attempts to make. Defaults to 5.
  28. retry_interval (int, optional): The interval in seconds between attempts. Defaults to 2 seconds.
  29. Returns:
  30. AsyncGenerator[Any | list | Dict, None]: The OpenAI response for the request.
  31. """
  32. # call the request function and return the response if it succeeds, else retry
  33. current_attempt = 1
  34. while current_attempt <= attempts:
  35. try:
  36. response = await request()
  37. return response
  38. except Exception as e:
  39. self.logger.log(f"Request failed: {e}", "error")
  40. self.logger.log(f"Retrying in {retry_interval} seconds...")
  41. await asyncio.sleep(retry_interval)
  42. current_attempt += 1
  43. # if all attempts failed, raise an exception
  44. raise Exception("Request failed after all attempts.")
  45. async def generate_chat_response(self, messages: List[Dict[str, str]], user: Optional[str] = None) -> Tuple[str, int]:
  46. """Generate a response to a chat message.
  47. Args:
  48. messages (List[Dict[str, str]]): A list of messages to use as context.
  49. Returns:
  50. Tuple[str, int]: The response text and the number of tokens used.
  51. """
  52. self.logger.log(f"Generating response to {len(messages)} messages using {self.chat_model}...")
  53. chat_partial = partial(
  54. openai.ChatCompletion.acreate,
  55. model=self.chat_model,
  56. messages=messages,
  57. api_key=self.api_key,
  58. user=user
  59. )
  60. response = await self._request_with_retries(chat_partial)
  61. result_text = response.choices[0].message['content']
  62. tokens_used = response.usage["total_tokens"]
  63. self.logger.log(f"Generated response with {tokens_used} tokens.")
  64. return result_text, tokens_used
  65. async def classify_message(self, query: str, user: Optional[str] = None) -> Tuple[Dict[str, str], int]:
  66. system_message = """You are a classifier for different types of messages. You decide whether an incoming message is meant to be a prompt for an AI chat model, or meant for a different API. You respond with a JSON object like this:
  67. { "type": event_type, "prompt": prompt }
  68. - If the message you received is meant for the AI chat model, the event_type is "chat", and the prompt is the literal content of the message you received. This is also the default if none of the other options apply.
  69. - If it is a prompt for a calculation that can be answered better by WolframAlpha than an AI chat bot, the event_type is "calculate". Optimize the message you received for input to WolframAlpha, and return it as the prompt attribute.
  70. - If it is a prompt for an AI image generation, the event_type is "imagine". Optimize the message you received for use with DALL-E, and return it as the prompt attribute.
  71. - If the user is asking you to create a new room, the event_type is "newroom", and the prompt is the name of the room, if one is given, else an empty string.
  72. - If the user is asking you to throw a coin, the event_type is "coin". The prompt is an empty string.
  73. - If the user is asking you to roll a dice, the event_type is "dice". The prompt is an string containing an optional number of sides, if one is given, else an empty string.
  74. - If for any reason you are unable to classify the message (for example, if it infringes on your terms of service), the event_type is "error", and the prompt is a message explaining why you are unable to process the message.
  75. Only the event_types mentioned above are allowed, you must not respond in any other way."""
  76. messages = [
  77. {
  78. "role": "system",
  79. "content": system_message
  80. },
  81. {
  82. "role": "user",
  83. "content": query
  84. }
  85. ]
  86. self.logger.log(f"Classifying message '{query}'...")
  87. chat_partial = partial(
  88. openai.ChatCompletion.acreate,
  89. model=self.chat_model,
  90. messages=messages,
  91. api_key=self.api_key,
  92. user=user
  93. )
  94. response = await self._request_with_retries(chat_partial)
  95. try:
  96. result = json.loads(response.choices[0].message['content'])
  97. except:
  98. result = {"type": "chat", "prompt": query}
  99. tokens_used = response.usage["total_tokens"]
  100. self.logger.log(f"Classified message as {result['type']} with {tokens_used} tokens.")
  101. return result, tokens_used
  102. async def generate_image(self, prompt: str, user: Optional[str] = None) -> Generator[bytes, None, None]:
  103. """Generate an image from a prompt.
  104. Args:
  105. prompt (str): The prompt to use.
  106. Yields:
  107. bytes: The image data.
  108. """
  109. self.logger.log(f"Generating image from prompt '{prompt}'...")
  110. image_partial = partial(
  111. openai.Image.acreate,
  112. prompt=prompt,
  113. n=1,
  114. api_key=self.api_key,
  115. size="1024x1024",
  116. user=user
  117. )
  118. response = await self._request_with_retries(image_partial)
  119. images = []
  120. for image in response.data:
  121. image = requests.get(image.url).content
  122. images.append(image)
  123. return images, len(images)