gptbot.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. import sqlite3
  2. import os
  3. import inspect
  4. import openai
  5. import asyncio
  6. import markdown2
  7. import tiktoken
  8. from nio import AsyncClient, RoomMessageText, MatrixRoom, Event, InviteEvent
  9. from nio.api import MessageDirection
  10. from nio.responses import RoomMessagesError, SyncResponse
  11. from configparser import ConfigParser
  12. from datetime import datetime
  13. config = ConfigParser()
  14. config.read("config.ini")
  15. # Set up GPT API
  16. openai.api_key = config["OpenAI"]["APIKey"]
  17. MODEL = config["OpenAI"].get("Model", "gpt-3.5-turbo")
  18. # Set up Matrix client
  19. MATRIX_HOMESERVER = config["Matrix"]["Homeserver"]
  20. MATRIX_ACCESS_TOKEN = config["Matrix"]["AccessToken"]
  21. BOT_USER_ID = config["Matrix"]["UserID"]
  22. client = AsyncClient(MATRIX_HOMESERVER, BOT_USER_ID)
  23. SYNC_TOKEN = None
  24. # Set up SQLite3 database
  25. conn = sqlite3.connect("token_usage.db")
  26. cursor = conn.cursor()
  27. cursor.execute(
  28. """
  29. CREATE TABLE IF NOT EXISTS token_usage (
  30. room_id TEXT NOT NULL,
  31. tokens INTEGER NOT NULL,
  32. timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
  33. )
  34. """
  35. )
  36. conn.commit()
  37. # Define the system message and max token limit
  38. SYSTEM_MESSAGE = "You are a helpful assistant."
  39. MAX_TOKENS = 3000
  40. def logging(message, log_level="info"):
  41. caller = inspect.currentframe().f_back.f_code.co_name
  42. timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S:%f")
  43. print(f"[{timestamp} - {caller}] [{log_level.upper()}] {message}")
  44. async def gpt_query(messages):
  45. logging(f"Querying GPT with {len(messages)} messages")
  46. try:
  47. response = openai.ChatCompletion.create(
  48. model=MODEL,
  49. messages=messages
  50. )
  51. result_text = response.choices[0].message['content']
  52. tokens_used = response.usage["total_tokens"]
  53. logging(f"Used {tokens_used} tokens")
  54. return result_text, tokens_used
  55. except Exception as e:
  56. logging(f"Error during GPT API call: {e}", "error")
  57. return None, 0
  58. async def fetch_last_n_messages(room_id, n=20):
  59. # Fetch the last n messages from the room
  60. room = await client.join(room_id)
  61. messages = []
  62. logging(f"Fetching last {n} messages from room {room_id} (starting at {SYNC_TOKEN})...")
  63. response = await client.room_messages(
  64. room_id=room_id,
  65. start=SYNC_TOKEN,
  66. limit=n,
  67. )
  68. if isinstance(response, RoomMessagesError):
  69. logging(
  70. f"Error fetching messages: {response.message} (status code {response.status_code})", "error")
  71. return []
  72. for event in response.chunk:
  73. if isinstance(event, RoomMessageText):
  74. messages.append(event)
  75. logging(f"Found {len(messages)} messages")
  76. # Reverse the list so that messages are in chronological order
  77. return messages[::-1]
  78. def truncate_messages_to_fit_tokens(messages, max_tokens=MAX_TOKENS):
  79. encoding = tiktoken.encoding_for_model(MODEL)
  80. total_tokens = 0
  81. system_message_tokens = len(encoding.encode(SYSTEM_MESSAGE)) + 1
  82. if system_message_tokens > max_tokens:
  83. logging(
  84. f"System message is too long to fit within token limit ({system_message_tokens} tokens) - cannot proceed", "error")
  85. return []
  86. total_tokens += system_message_tokens
  87. total_tokens = len(SYSTEM_MESSAGE) + 1
  88. truncated_messages = []
  89. for message in [messages[0]] + list(reversed(messages[1:])):
  90. content = message["content"]
  91. tokens = len(encoding.encode(content)) + 1
  92. if total_tokens + tokens > max_tokens:
  93. break
  94. total_tokens += tokens
  95. truncated_messages.append(message)
  96. return [truncated_messages[0]] + list(reversed(truncated_messages[1:]))
  97. async def message_callback(room: MatrixRoom, event: RoomMessageText):
  98. logging(f"Received message from {event.sender} in room {room.room_id}")
  99. if event.sender == BOT_USER_ID:
  100. logging("Message is from bot - ignoring")
  101. return
  102. await client.room_typing(room.room_id, True)
  103. await client.room_read_markers(room.room_id, event.event_id)
  104. last_messages = await fetch_last_n_messages(room.room_id, 20)
  105. if not last_messages or all(message.sender == BOT_USER_ID for message in last_messages):
  106. logging("No messages to respond to")
  107. await client.room_typing(room.room_id, False)
  108. return
  109. chat_messages = [{"role": "system", "content": SYSTEM_MESSAGE}]
  110. for message in last_messages:
  111. role = "assistant" if message.sender == BOT_USER_ID else "user"
  112. if not message.event_id == event.event_id:
  113. chat_messages.append({"role": role, "content": message.body})
  114. chat_messages.append({"role": "user", "content": event.body})
  115. # Truncate messages to fit within the token limit
  116. truncated_messages = truncate_messages_to_fit_tokens(
  117. chat_messages, MAX_TOKENS - 1)
  118. response, tokens_used = await gpt_query(truncated_messages)
  119. if response:
  120. # Send the response to the room
  121. logging(f"Sending response to room {room.room_id}...")
  122. markdowner = markdown2.Markdown(extras=["fenced-code-blocks"])
  123. formatted_body = markdowner.convert(response)
  124. await client.room_send(
  125. room.room_id, "m.room.message", {"msgtype": "m.text", "body": response,
  126. "format": "org.matrix.custom.html", "formatted_body": formatted_body}
  127. )
  128. logging("Logging tokens used...")
  129. cursor.execute(
  130. "INSERT INTO token_usage (room_id, tokens) VALUES (?, ?)", (room.room_id, tokens_used))
  131. conn.commit()
  132. else:
  133. # Send a notice to the room if there was an error
  134. logging("Error during GPT API call - sending notice to room")
  135. await client.room_send(
  136. room.room_id, "m.room.message", {
  137. "msgtype": "m.notice", "body": "Sorry, I'm having trouble connecting to the GPT API right now. Please try again later."}
  138. )
  139. print("No response from GPT API")
  140. await client.room_typing(room.room_id, False)
  141. async def room_invite_callback(room: MatrixRoom, event):
  142. logging(f"Received invite to room {room.room_id} - joining...")
  143. await client.join(room.room_id)
  144. await client.room_send(
  145. room.room_id,
  146. "m.room.message",
  147. {"msgtype": "m.text",
  148. "body": "Hello! I'm a helpful assistant. How can I help you today?"}
  149. )
  150. async def accept_pending_invites():
  151. logging("Accepting pending invites...")
  152. for room_id in list(client.invited_rooms.keys()):
  153. logging(f"Joining room {room_id}...")
  154. await client.join(room_id)
  155. await client.room_send(
  156. room_id,
  157. "m.room.message",
  158. {"msgtype": "m.text",
  159. "body": "Hello! I'm a helpful assistant. How can I help you today?"}
  160. )
  161. async def sync_cb(response):
  162. global SYNC_TOKEN
  163. logging(f"Sync response received (next batch: {response.next_batch})")
  164. SYNC_TOKEN = response.next_batch
  165. async def main():
  166. logging("Starting bot...")
  167. client.access_token = MATRIX_ACCESS_TOKEN # Set the access token directly
  168. client.user_id = BOT_USER_ID # Set the user_id directly
  169. client.add_response_callback(sync_cb, SyncResponse)
  170. logging("Syncing...")
  171. await client.sync(timeout=30000)
  172. client.add_event_callback(message_callback, RoomMessageText)
  173. client.add_event_callback(room_invite_callback, InviteEvent)
  174. await accept_pending_invites() # Accept pending invites
  175. logging("Bot started")
  176. try:
  177. await client.sync_forever(timeout=30000) # Continue syncing events
  178. finally:
  179. await client.close() # Properly close the aiohttp client session
  180. logging("Bot stopped")
  181. if __name__ == "__main__":
  182. try:
  183. asyncio.run(main())
  184. finally:
  185. conn.close()