Browse Source

Prepare for option to disable replying to everything
Automatically accept room invites on sync
Leave rooms if everyone else leaves
README update
!gptbot dice command
Minor fixes

Kumi 1 year ago
parent
commit
5b500d34b5

+ 5 - 2
README.md

@@ -15,11 +15,14 @@ probably add more in the future, so the name is a bit misleading.
   - Currently supports OpenAI (DALL-E)
 - Mathematical calculations via the `!gptbot calculate` command
   - Currently supports WolframAlpha
-- DuckDB database to store spent tokens
+- Really useful commands like `!gptbot help` and `!gptbot coin`
+- DuckDB database to store room context
 
 ## Planned features
 
 - End-to-end encryption support (partly implemented, but not yet working)
+- Automatic classification of messages (for `imagine`, `calculate`, etc.)
+  - Beta feature, enable for a room using `!gptbot roomsettings classification true`
 
 ## Installation
 
@@ -98,4 +101,4 @@ please check the logs and open an issue if you can't figure out what's going on.
 
 ## License
 
-This project is licensed under the terms of the MIT license.
+This project is licensed under the terms of the MIT license. See the [LICENSE](LICENSE) file for details.

+ 3 - 0
callbacks/__init__.py

@@ -8,6 +8,7 @@ from nio import (
     InviteEvent,
     OlmEvent,
     MegolmEvent,
+    RoomMemberEvent,
 )
 
 from .test import test_callback
@@ -15,6 +16,7 @@ from .sync import sync_callback
 from .invite import room_invite_callback
 from .join import join_callback
 from .message import message_callback
+from .roommember import roommember_callback
 
 RESPONSE_CALLBACKS = {
     SyncResponse: sync_callback,
@@ -26,4 +28,5 @@ EVENT_CALLBACKS = {
     InviteEvent: room_invite_callback,
     RoomMessageText: message_callback,
     MegolmEvent: message_callback,
+    RoomMemberEvent: roommember_callback,
 }

+ 10 - 0
callbacks/roommember.py

@@ -0,0 +1,10 @@
+from nio import RoomMemberEvent, MatrixRoom
+
+async def roommember_callback(room: MatrixRoom, event: RoomMemberEvent, bot):
+    if event.membership == "leave":
+        bot.logger.log(f"User {event.state_key} left room {room.room_id} - am I alone now?")
+
+        if len(room.users) == 1:
+            bot.logger.log("Yes, I was abandoned - leaving...")
+            await bot.matrix_client.leave(room.room_id)
+            return

+ 1 - 1
callbacks/sync.py

@@ -5,4 +5,4 @@ async def sync_callback(response, bot):
 
     bot.sync_token = SYNC_TOKEN
 
-    bot.accept_pending_invites()
+    await bot.accept_pending_invites()

+ 72 - 17
classes/bot.py

@@ -235,11 +235,11 @@ class GPTBot:
 
         await COMMANDS.get(command, COMMANDS[None])(room, event, self)
 
-    def room_uses_classification(self, room: MatrixRoom | int) -> bool:
+    def room_uses_classification(self, room: MatrixRoom | str) -> bool:
         """Check if a room uses classification.
 
         Args:
-            room (MatrixRoom): The room to check.
+            room (MatrixRoom | str): The room to check.
 
         Returns:
             bool: Whether the room uses classification.
@@ -276,9 +276,18 @@ class GPTBot:
         invites = self.matrix_client.invited_rooms
 
         for invite in invites.keys():
+            self.logger.log(f"Accepting invite to room {invite}")
             await self.matrix_client.join(invite)
 
     async def send_image(self, room: MatrixRoom, image: bytes, message: Optional[str] = None):
+        """Send an image to a room.
+
+        Args:
+            room (MatrixRoom): The room to send the image to.
+            image (bytes): The image to send.
+            message (str, optional): The message to send with the image. Defaults to None.
+        """
+
         self.logger.log(
             f"Sending image of size {len(image)} bytes to room {room.room_id}")
 
@@ -325,6 +334,14 @@ class GPTBot:
         self.logger.log("Sent image")
 
     async def send_message(self, room: MatrixRoom, message: str, notice: bool = False):
+        """Send a message to a room.
+
+        Args:
+            room (MatrixRoom): The room to send the message to.
+            message (str): The message to send.
+            notice (bool): Whether to send the message as a notice. Defaults to False.
+        """
+
         markdowner = markdown2.Markdown(extras=["fenced-code-blocks"])
         formatted_body = markdowner.convert(message)
 
@@ -371,12 +388,12 @@ class GPTBot:
 
         return await self.matrix_client._send(RoomSendResponse, method, path, data, (room.room_id,))
 
-    def log_api_usage(self, message: Event | str, room: MatrixRoom | int, api: str, tokens: int):
+    def log_api_usage(self, message: Event | str, room: MatrixRoom | str, api: str, tokens: int):
         """Log API usage to the database.
 
         Args:
             message (Event): The event that triggered the API usage.
-            room (MatrixRoom | int): The room the event was sent in.
+            room (MatrixRoom | str): The room the event was sent in.
             api (str): The API that was used.
             tokens (int): The number of tokens used.
         """
@@ -447,7 +464,7 @@ class GPTBot:
 
             self.matrix_client.encrypted_rooms = self.matrix_client.store.load_encrypted_rooms()
 
-        # Run initial sync
+        # Run initial sync (now includes joining rooms)
         sync = await self.matrix_client.sync(timeout=30000)
         if isinstance(sync, SyncResponse):
             await self.response_callback(sync)
@@ -461,11 +478,6 @@ class GPTBot:
         self.matrix_client.add_response_callback(
             self.response_callback, Response)
 
-        # Accept pending invites
-
-        self.logger.log("Accepting pending invites...")
-        await self.accept_pending_invites()
-
         # Start syncing events
         self.logger.log("Starting sync loop...")
         try:
@@ -474,15 +486,48 @@ class GPTBot:
             self.logger.log("Syncing one last time...")
             await self.matrix_client.sync(timeout=30000)
 
-    async def process_query(self, room: MatrixRoom, event: RoomMessageText, allow_classify: bool = True):
+    def respond_to_room_messages(self, room: MatrixRoom | str) -> bool:
+        """Check whether the bot should respond to messages sent in a room.
+
+        Args:
+            room (MatrixRoom | str): The room to check.
+
+        Returns:
+            bool: Whether the bot should respond to messages sent in the room.
+        """
+
+        if isinstance(room, MatrixRoom):
+            room = room.room_id
+
+        with self.database.cursor() as cursor:
+            cursor.execute(
+                "SELECT value FROM room_settings WHERE room_id = ? AND setting = ?", (room, "respond_to_messages"))
+            result = cursor.fetchone()
+
+        return True if not result else bool(int(result[0]))
+
+    async def process_query(self, room: MatrixRoom, event: RoomMessageText, from_chat_command: bool = False):
+        """Process a query message. Generates a response and sends it to the room.
+
+        Args:
+            room (MatrixRoom): The room the message was sent in.
+            event (RoomMessageText): The event that triggered the query.
+            from_chat_command (bool, optional): Whether the query was sent via the `!gptbot chat` command. Defaults to False.
+        """
+
+        if not (from_chat_command or self.respond_to_room_messages(room) or self.matrix_client.user_id in event.body):
+            return
+
         await self.matrix_client.room_typing(room.room_id, True)
 
         await self.matrix_client.room_read_markers(room.room_id, event.event_id)
 
-        if allow_classify and self.room_uses_classification(room):
-            classification, tokens = self.classification_api.classify_message(event.body, room.room_id)
+        if (not from_chat_command) and self.room_uses_classification(room):
+            classification, tokens = self.classification_api.classify_message(
+                event.body, room.room_id)
 
-            self.log_api_usage(event, room, f"{self.classification_api.api_code}-{self.classification_api.classification_api}", tokens)
+            self.log_api_usage(
+                event, room, f"{self.classification_api.api_code}-{self.classification_api.classification_api}", tokens)
 
             if not classification["type"] == "chat":
                 event.body = f"!gptbot {classification['type']} {classification['prompt']}"
@@ -522,7 +567,8 @@ class GPTBot:
             return
 
         if response:
-            self.log_api_usage(event, room, f"{self.chat_api.api_code}-{self.chat_api.chat_api}", tokens_used)
+            self.log_api_usage(
+                event, room, f"{self.chat_api.api_code}-{self.chat_api.chat_api}", tokens_used)
 
             self.logger.log(f"Sending response to room {room.room_id}...")
 
@@ -538,10 +584,19 @@ class GPTBot:
 
         await self.matrix_client.room_typing(room.room_id, False)
 
-    def get_system_message(self, room: MatrixRoom | int) -> str:
+    def get_system_message(self, room: MatrixRoom | str) -> str:
+        """Get the system message for a room.
+
+        Args:
+            room (MatrixRoom | str): The room to get the system message for.
+
+        Returns:
+            str: The system message.
+        """
+
         default = self.default_system_message
 
-        if isinstance(room, int):
+        if isinstance(room, str):
             room_id = room
         else:
             room_id = room.room_id

+ 4 - 1
classes/openai.py

@@ -53,13 +53,16 @@ class OpenAI:
         return result_text, tokens_used
 
     def classify_message(self, query: str, user: Optional[str] = None) -> Tuple[Dict[str, str], int]:
-        system_message = """You are a classifier for different types of messages. You decide whether an incoming message is meant to be a prompt for an AI chat model, an image generation AI, or a calculation for WolframAlpha. You respond with a JSON object like this:
+        system_message = """You are a classifier for different types of messages. You decide whether an incoming message is meant to be a prompt for an AI chat model, or meant for a different API. You respond with a JSON object like this:
 
 { "type": event_type, "prompt": prompt }
 
 - If the message you received is meant for the AI chat model, the event_type is "chat", and the prompt is the literal content of the message you received. This is also the default if none of the other options apply.
 - If it is a prompt for a calculation that can be answered better by WolframAlpha than an AI chat bot, the event_type is "calculate". Optimize the message you received for input to WolframAlpha, and return it as the prompt attribute.
 - If it is a prompt for an AI image generation, the event_type is "imagine". Optimize the message you received for use with DALL-E, and return it as the prompt attribute.
+- If the user is asking you to create a new room, the event_type is "newroom", and the prompt is the name of the room, if one is given, else an empty string.
+- If the user is asking you to throw a coin, the event_type is "coin". The prompt is an empty string.
+- If the user is asking you to roll a dice, the event_type is "dice". The prompt is an string containing an optional number of sides, if one is given, else an empty string.
 - If for any reason you are unable to classify the message (for example, if it infringes on your terms of service), the event_type is "error", and the prompt is a message explaining why you are unable to process the message.
 
 Only the event_types mentioned above are allowed, you must not respond in any other way."""

+ 1 - 0
commands/__init__.py

@@ -19,6 +19,7 @@ for command in [
     "custom",
     "privacy",
     "roomsettings",
+    "dice",
 ]:
     function = getattr(import_module(
         "commands." + command), "command_" + command)

+ 1 - 1
commands/calculate.py

@@ -29,7 +29,7 @@ async def command_calculate(room: MatrixRoom, event: RoomMessageText, bot):
             else:
                 await bot.send_message(room, subpod, True)
 
-        bot.log_api_usage(event, room, f"{self.calculation_api.api_code}-{self.calculation_api.calculation_api}", tokens_used)
+        bot.log_api_usage(event, room, f"{bot.calculation_api.api_code}-{bot.calculation_api.calculation_api}", tokens_used)
 
         return
 

+ 1 - 1
commands/chat.py

@@ -8,7 +8,7 @@ async def command_chat(room: MatrixRoom, event: RoomMessageText, bot):
     if prompt:
         bot.logger.log("Sending chat message...")
         event.body = prompt
-        await bot.process_query(room, event, allow_classify=False)
+        await bot.process_query(room, event, from_chat_command=True)
 
         return
 

+ 1 - 1
commands/classify.py

@@ -17,7 +17,7 @@ async def command_classify(room: MatrixRoom, event: RoomMessageText, bot):
 
         await bot.send_message(room, message, True)
 
-        bot.log_api_usage(event, room, f"{self.classification_api.api_code}-{self.classification_api.classification_api}", tokens_used)
+        bot.log_api_usage(event, room, f"{bot.classification_api.api_code}-{bot.classification_api.classification_api}", tokens_used)
 
         return
 

+ 22 - 0
commands/dice.py

@@ -0,0 +1,22 @@
+from nio.events.room_events import RoomMessageText
+from nio.rooms import MatrixRoom
+
+from random import SystemRandom
+
+
+async def command_dice(room: MatrixRoom, event: RoomMessageText, bot):
+    bot.logger.log("Rolling a dice...")
+
+    try:
+        sides = int(event.body.split()[2])
+    except ValueError:
+        sides = 6
+
+    if sides < 2:
+        await bot.send_message(room, f"A dice with {sides} sides? How would that work?", True)
+
+    else:
+        result = SystemRandom().randint(1, sides)
+        body = f"Rolling a {sides}-sided dice... It's a {result}!"
+
+    await bot.send_message(room, body, True)

+ 1 - 0
commands/help.py

@@ -12,6 +12,7 @@ async def command_help(room: MatrixRoom, event: RoomMessageText, bot):
 - !gptbot stats - Show usage statistics for this room
 - !gptbot systemmessage \<message\> - Get or set the system message for this room
 - !gptbot coin - Flip a coin (heads or tails)
+- !gptbot dice [number] - Roll a dice with the specified number of sides (default: 6)
 - !gptbot imagine \<prompt\> - Generate an image from a prompt
 - !gptbot calculate [--text] [--details] \<query\> - Calculate a result to a calculation, optionally forcing text output instead of an image, and optionally showing additional details like the input interpretation
 - !gptbot chat \<message\> - Send a message to the chat API