diff --git a/chatmastermind/ai_factory.py b/chatmastermind/ai_factory.py index 36a987b..42b27c1 100644 --- a/chatmastermind/ai_factory.py +++ b/chatmastermind/ai_factory.py @@ -3,18 +3,20 @@ Creates different AI instances, based on the given configuration. """ import argparse -from typing import cast +from typing import cast, Optional from .configuration import Config, AIConfig, OpenAIConfig from .ai import AI, AIError from .ais.openai import OpenAI -def create_ai(args: argparse.Namespace, config: Config) -> AI: # noqa: 11 +def create_ai(args: argparse.Namespace, config: Config, # noqa: 11 + def_ai: Optional[str] = None, + def_model: Optional[str] = None) -> AI: """ - Creates an AI subclass instance from the given arguments - and configuration file. If AI has not been set in the - arguments, it searches for the ID 'default'. If that - is not found, it uses the first AI in the list. + Creates an AI subclass instance from the given arguments and configuration file. + If AI has not been set in the arguments, it searches for the ID 'default'. If + that is not found, it uses the first AI in the list. It's also possible to + specify a default AI and model using 'def_ai' and 'def_model'. """ ai_conf: AIConfig if hasattr(args, 'AI') and args.AI: @@ -22,6 +24,8 @@ def create_ai(args: argparse.Namespace, config: Config) -> AI: # noqa: 11 ai_conf = config.ais[args.AI] except KeyError: raise AIError(f"AI ID '{args.AI}' does not exist in this configuration") + elif def_ai: + ai_conf = config.ais[def_ai] elif 'default' in config.ais: ai_conf = config.ais['default'] else: @@ -34,6 +38,8 @@ def create_ai(args: argparse.Namespace, config: Config) -> AI: # noqa: 11 ai = OpenAI(cast(OpenAIConfig, ai_conf)) if hasattr(args, 'model') and args.model: ai.config.model = args.model + elif def_model: + ai.config.model = def_model if hasattr(args, 'max_tokens') and args.max_tokens: ai.config.max_tokens = args.max_tokens if hasattr(args, 'temperature') and args.temperature: diff --git a/chatmastermind/ais/openai.py b/chatmastermind/ais/openai.py index 0e7ad41..d7bb12f 100644 --- a/chatmastermind/ais/openai.py +++ b/chatmastermind/ais/openai.py @@ -44,7 +44,7 @@ class OpenAI(AI): frequency_penalty=self.config.frequency_penalty, presence_penalty=self.config.presence_penalty) question.answer = Answer(response['choices'][0]['message']['content']) - question.tags = otags + question.tags = set(otags) if otags is not None else None question.ai = self.ID question.model = self.config.model answers: list[Message] = [question] diff --git a/chatmastermind/commands/question.py b/chatmastermind/commands/question.py index 37b62c4..da77e1a 100644 --- a/chatmastermind/commands/question.py +++ b/chatmastermind/commands/question.py @@ -2,6 +2,7 @@ import sys import argparse from pathlib import Path from itertools import zip_longest +from copy import deepcopy from ..configuration import Config from ..chat import ChatDB from ..message import Message, MessageFilter, MessageError, Question, source_code @@ -105,13 +106,75 @@ def make_request(ai: AI, chat: ChatDB, message: Message, args: argparse.Namespac print(response.tokens) +def create_msg_args(msg: Message, args: argparse.Namespace) -> argparse.Namespace: + """ + Takes an existing message and CLI arguments, and returns modified args based + on the members of the given message. Used e.g. when repeating messages, where + it's necessary to determine the correct AI, module and output tags to use + (either from the existing message or the given args). + """ + msg_args = args + # if AI, model or output tags have not been specified, + # use those from the original message + if (args.AI is None + or args.model is None # noqa: W503 + or args.output_tags is None): # noqa: W503 + msg_args = deepcopy(args) + if args.AI is None and msg.ai is not None: + msg_args.AI = msg.ai + if args.model is None and msg.model is not None: + msg_args.model = msg.model + if args.output_tags is None and msg.tags is not None: + msg_args.output_tags = msg.tags + return msg_args + + +def repeat_messages(messages: list[Message], chat: ChatDB, args: argparse.Namespace, config: Config) -> None: + """ + Repeat the given messages using the given arguments. + """ + ai: AI + for msg in messages: + msg_args = create_msg_args(msg, args) + ai = create_ai(msg_args, config) + print(f"--------- Repeating message '{msg.msg_id()}': ---------") + # overwrite the latest message if requested or empty + # -> but not if it's in the DB! + if ((msg.answer is None or msg_args.overwrite is True) + and (not chat.msg_in_db(msg))): # noqa: W503 + msg.clear_answer() + make_request(ai, chat, msg, msg_args) + # otherwise create a new one + else: + msg_args.ask = [msg.question] + message = create_message(chat, msg_args) + make_request(ai, chat, message, msg_args) + + +def invert_input_tag_args(args: argparse.Namespace) -> None: + """ + Changes the semantics of the INPUT tags for this command: + * not tags specified on the CLI -> no tags are selected + * empty tags specified on the CLI -> all tags are selected + """ + if args.or_tags is None: + args.or_tags = set() + elif len(args.or_tags) == 0: + args.or_tags = None + if args.and_tags is None: + args.and_tags = set() + elif len(args.and_tags) == 0: + args.and_tags = None + + def question_cmd(args: argparse.Namespace, config: Config) -> None: """ Handler for the 'question' command. """ - mfilter = MessageFilter(tags_or=args.or_tags if args.or_tags is not None else set(), - tags_and=args.and_tags if args.and_tags is not None else set(), - tags_not=args.exclude_tags if args.exclude_tags is not None else set()) + invert_input_tag_args(args) + mfilter = MessageFilter(tags_or=args.or_tags, + tags_and=args.and_tags, + tags_not=args.exclude_tags) chat = ChatDB.from_dir(cache_path=Path(config.cache), db_path=Path(config.db), mfilter=mfilter) @@ -121,30 +184,24 @@ def question_cmd(args: argparse.Namespace, config: Config) -> None: if args.create: return - # create the correct AI instance - ai: AI = create_ai(args, config) - # === ASK === if args.ask: + ai: AI = create_ai(args, config) make_request(ai, chat, message, args) # === REPEAT === elif args.repeat is not None: - lmessage = chat.msg_latest(loc='cache') - if lmessage is None: - print("No message found to repeat!") - sys.exit(1) + repeat_msgs: list[Message] = [] + # repeat latest message + if len(args.repeat) == 0: + lmessage = chat.msg_latest(loc='cache') + if lmessage is None: + print("No message found to repeat!") + sys.exit(1) + repeat_msgs.append(lmessage) + # repeat given message(s) else: - print(f"Repeating message '{lmessage.msg_id()}':") - # overwrite the latest message if requested or empty - if lmessage.answer is None or args.overwrite is True: - lmessage.clear_answer() - make_request(ai, chat, lmessage, args) - # otherwise create a new one - else: - args.ask = [lmessage.question] - message = create_message(chat, args) - make_request(ai, chat, message, args) - + repeat_msgs = chat.msg_find(args.repeat, loc='disk') + repeat_messages(repeat_msgs, chat, args, config) # === PROCESS === elif args.process is not None: # TODO: process either all questions without an diff --git a/chatmastermind/main.py b/chatmastermind/main.py index 62c4539..ac4f7cc 100755 --- a/chatmastermind/main.py +++ b/chatmastermind/main.py @@ -34,13 +34,13 @@ def create_parser() -> argparse.ArgumentParser: # a parent parser for all commands that support tag selection tag_parser = argparse.ArgumentParser(add_help=False) - tag_arg = tag_parser.add_argument('-t', '--or-tags', nargs='+', + tag_arg = tag_parser.add_argument('-t', '--or-tags', nargs='*', help='List of tags (one must match)', metavar='OTAGS') tag_arg.completer = tags_completer # type: ignore - atag_arg = tag_parser.add_argument('-k', '--and-tags', nargs='+', + atag_arg = tag_parser.add_argument('-k', '--and-tags', nargs='*', help='List of tags (all must match)', metavar='ATAGS') atag_arg.completer = tags_completer # type: ignore - etag_arg = tag_parser.add_argument('-x', '--exclude-tags', nargs='+', + etag_arg = tag_parser.add_argument('-x', '--exclude-tags', nargs='*', help='List of tags to exclude', metavar='XTAGS') etag_arg.completer = tags_completer # type: ignore otag_arg = tag_parser.add_argument('-o', '--output-tags', nargs='+', diff --git a/tests/test_common.py b/tests/test_common.py new file mode 100644 index 0000000..eff7c00 --- /dev/null +++ b/tests/test_common.py @@ -0,0 +1,100 @@ +import unittest +import argparse +from typing import Union, Optional +from chatmastermind.configuration import Config, AIConfig +from chatmastermind.tags import Tag +from chatmastermind.message import Message, Answer +from chatmastermind.chat import Chat +from chatmastermind.ai import AI, AIResponse, Tokens, AIError + + +class FakeAI(AI): + """ + A mocked version of the 'AI' class. + """ + ID: str + name: str + config: AIConfig + + def models(self) -> list[str]: + raise NotImplementedError + + def tokens(self, data: Union[Message, Chat]) -> int: + return 123 + + def print(self) -> None: + pass + + def print_models(self) -> None: + pass + + def __init__(self, ID: str, model: str, error: bool = False): + self.ID = ID + self.model = model + self.error = error + + def request(self, + question: Message, + chat: Chat, + num_answers: int = 1, + otags: Optional[set[Tag]] = None) -> AIResponse: + """ + Mock the 'ai.request()' function by either returning fake + answers or raising an exception. + """ + if self.error: + raise AIError + question.answer = Answer("Answer 0") + question.tags = set(otags) if otags is not None else None + question.ai = self.ID + question.model = self.model + answers: list[Message] = [question] + for n in range(1, num_answers): + answers.append(Message(question=question.question, + answer=Answer(f"Answer {n}"), + tags=otags, + ai=self.ID, + model=self.model)) + return AIResponse(answers, Tokens(10, 10, 20)) + + +class TestWithFakeAI(unittest.TestCase): + """ + Base class for all tests that need to use the FakeAI. + """ + def assert_msgs_equal_except_file_path(self, msg1: list[Message], msg2: list[Message]) -> None: + """ + Compare messages using Question, Answer and all metadata excecot for the file_path. + """ + self.assertEqual(len(msg1), len(msg2)) + for m1, m2 in zip(msg1, msg2): + # exclude the file_path, compare only Q, A and metadata + self.assertTrue(m1.equals(m2, file_path=False, verbose=True)) + + def assert_msgs_all_equal(self, msg1: list[Message], msg2: list[Message]) -> None: + """ + Compare messages using Question, Answer and ALL metadata. + """ + self.assertEqual(len(msg1), len(msg2)) + for m1, m2 in zip(msg1, msg2): + self.assertTrue(m1.equals(m2, verbose=True)) + + def assert_msgs_content_equal(self, msg1: list[Message], msg2: list[Message]) -> None: + """ + Compare messages using only Question and Answer. + """ + self.assertEqual(len(msg1), len(msg2)) + for m1, m2 in zip(msg1, msg2): + self.assertEqual(m1, m2) + + def mock_create_ai(self, args: argparse.Namespace, config: Config) -> AI: + """ + Mocked 'create_ai' that returns a 'FakeAI' instance. + """ + return FakeAI(args.AI, args.model) + + def mock_create_ai_with_error(self, args: argparse.Namespace, config: Config) -> AI: + """ + Mocked 'create_ai' that returns a 'FakeAI' instance. + """ + return FakeAI(args.AI, args.model, error=True) diff --git a/tests/test_question_cmd.py b/tests/test_question_cmd.py index 89c72c7..77d679c 100644 --- a/tests/test_question_cmd.py +++ b/tests/test_question_cmd.py @@ -1,31 +1,20 @@ import os -import unittest import argparse import tempfile +from copy import copy from pathlib import Path from unittest import mock -from unittest.mock import MagicMock, call, ANY -from typing import Optional +from unittest.mock import MagicMock, call from chatmastermind.configuration import Config from chatmastermind.commands.question import create_message, question_cmd from chatmastermind.tags import Tag from chatmastermind.message import Message, Question, Answer from chatmastermind.chat import Chat, ChatDB -from chatmastermind.ai import AI, AIResponse, Tokens, AIError +from chatmastermind.ai import AIError +from .test_common import TestWithFakeAI -class TestQuestionCmdBase(unittest.TestCase): - def assert_messages_equal(self, msg1: list[Message], msg2: list[Message]) -> None: - """ - Compare messages using more than just Question and Answer. - """ - self.assertEqual(len(msg1), len(msg2)) - for m1, m2 in zip(msg1, msg2): - # exclude the file_path, compare only Q, A and metadata - self.assertTrue(m1.equals(m2, file_path=False, verbose=True)) - - -class TestMessageCreate(TestQuestionCmdBase): +class TestMessageCreate(TestWithFakeAI): """ Test if messages created by the 'question' command have the correct format. @@ -212,7 +201,7 @@ It is embedded code """)) -class TestQuestionCmd(TestQuestionCmdBase): +class TestQuestionCmd(TestWithFakeAI): def setUp(self) -> None: # create DB and cache @@ -227,8 +216,8 @@ class TestQuestionCmd(TestQuestionCmdBase): ask=['What is the meaning of life?'], num_answers=1, output_tags=['science'], - AI='openai', - model='gpt-3.5-turbo', + AI='FakeAI', + model='FakeModel', or_tags=None, and_tags=None, exclude_tags=None, @@ -239,75 +228,40 @@ class TestQuestionCmd(TestQuestionCmdBase): process=None, overwrite=None ) - # create a mock AI instance - self.ai = MagicMock(spec=AI) - self.ai.request.side_effect = self.mock_request - - def input_message(self, args: argparse.Namespace) -> Message: - """ - Create the expected input message for a question using the - given arguments. - """ - # NOTE: we only use the first question from the "ask" list - # -> message creation using "question.create_message()" is - # tested above - # the answer is always empty for the input message - return Message(Question(args.ask[0]), - tags=args.output_tags, - ai=args.AI, - model=args.model) - - def mock_request(self, - question: Message, - chat: Chat, - num_answers: int = 1, - otags: Optional[set[Tag]] = None) -> AIResponse: - """ - Mock the 'ai.request()' function - """ - question.answer = Answer("Answer 0") - question.tags = set(otags) if otags else None - question.ai = 'FakeAI' - question.model = 'FakeModel' - answers: list[Message] = [question] - for n in range(1, num_answers): - answers.append(Message(question=question.question, - answer=Answer(f"Answer {n}"), - tags=otags, - ai='FakeAI', - model='FakeModel')) - return AIResponse(answers, Tokens(10, 10, 20)) def message_list(self, tmp_dir: tempfile.TemporaryDirectory) -> list[Path]: # exclude '.next' return sorted([f for f in Path(tmp_dir.name).glob('*.[ty]*')]) + +class TestQuestionCmdAsk(TestQuestionCmd): + @mock.patch('chatmastermind.commands.question.create_ai') def test_ask_single_answer(self, mock_create_ai: MagicMock) -> None: """ Test single answer with no errors. """ - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages + mock_create_ai.side_effect = self.mock_create_ai + expected_question = Message(Question(self.args.ask[0]), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path('')) + fake_ai = self.mock_create_ai(self.args, self.config) + expected_responses = fake_ai.request(expected_question, + Chat([]), + self.args.num_answers, + self.args.output_tags).messages # execute the command question_cmd(self.args, self.config) - # check for correct request call - self.ai.request.assert_called_once_with(expected_question, - ANY, - self.args.num_answers, - self.args.output_tags) # check for the expected message files chat = ChatDB.from_dir(Path(self.cache_dir.name), Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) + self.assert_msgs_equal_except_file_path(cached_msg, expected_responses) @mock.patch('chatmastermind.commands.question.ChatDB.from_dir') @mock.patch('chatmastermind.commands.question.create_ai') @@ -318,22 +272,21 @@ class TestQuestionCmd(TestQuestionCmdBase): chat = MagicMock(spec=ChatDB) mock_from_dir.return_value = chat - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages + mock_create_ai.side_effect = self.mock_create_ai + expected_question = Message(Question(self.args.ask[0]), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path('')) + fake_ai = self.mock_create_ai(self.args, self.config) + expected_responses = fake_ai.request(expected_question, + Chat([]), + self.args.num_answers, + self.args.output_tags).messages # execute the command question_cmd(self.args, self.config) - # check for correct request call - self.ai.request.assert_called_once_with(expected_question, - chat, - self.args.num_answers, - self.args.output_tags) - # check for the correct ChatDB calls: # - initial question has been written (prior to the actual request) # - responses have been written (after the request) @@ -350,86 +303,98 @@ class TestQuestionCmd(TestQuestionCmdBase): Provoke an error during the AI request and verify that the question has been correctly stored in the cache. """ - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - self.ai.request.side_effect = AIError + mock_create_ai.side_effect = self.mock_create_ai_with_error + expected_question = Message(Question(self.args.ask[0]), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path('')) # execute the command with self.assertRaises(AIError): question_cmd(self.args, self.config) - # check for correct request call - self.ai.request.assert_called_once_with(expected_question, - ANY, - self.args.num_answers, - self.args.output_tags) # check for the expected message files chat = ChatDB.from_dir(Path(self.cache_dir.name), Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, [expected_question]) + self.assert_msgs_equal_except_file_path(cached_msg, [expected_question]) + + +class TestQuestionCmdRepeat(TestQuestionCmd): @mock.patch('chatmastermind.commands.question.create_ai') def test_repeat_single_question(self, mock_create_ai: MagicMock) -> None: """ Repeat a single question. """ - # 1. ask a question - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages - question_cmd(self.args, self.config) - chat = ChatDB.from_dir(Path(self.cache_dir.name), - Path(self.db_dir.name)) - cached_msg = chat.msg_gather(loc='cache') - self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) + mock_create_ai.side_effect = self.mock_create_ai + # create a message + message = Message(Question(self.args.ask[0]), + Answer('Old Answer'), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + message.to_file() - # 2. repeat the last question (without overwriting) + # repeat the last question (without overwriting) # -> expect two identical messages (except for the file_path) self.args.ask = None self.args.repeat = [] self.args.overwrite = False - expected_responses += expected_responses + expected_response = Message(Question(message.question), + Answer('Answer 0'), + ai=message.ai, + model=message.model, + tags=message.tags, + file_path=Path('')) + # we expect the original message + the one with the new response + expected_responses = [message] + [expected_response] question_cmd(self.args, self.config) + chat = ChatDB.from_dir(Path(self.cache_dir.name), + Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') + print(self.message_list(self.cache_dir)) self.assertEqual(len(self.message_list(self.cache_dir)), 2) - self.assert_messages_equal(cached_msg, expected_responses) + self.assert_msgs_equal_except_file_path(cached_msg, expected_responses) @mock.patch('chatmastermind.commands.question.create_ai') def test_repeat_single_question_overwrite(self, mock_create_ai: MagicMock) -> None: """ Repeat a single question and overwrite the old one. """ - # 1. ask a question - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages - question_cmd(self.args, self.config) + mock_create_ai.side_effect = self.mock_create_ai + # create a message + message = Message(Question(self.args.ask[0]), + Answer('Old Answer'), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + message.to_file() chat = ChatDB.from_dir(Path(self.cache_dir.name), Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') assert cached_msg[0].file_path cached_msg_file_id = cached_msg[0].file_path.stem - self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) - # 2. repeat the last question (WITH overwriting) - # -> expect a single message afterwards + # repeat the last question (WITH overwriting) + # -> expect a single message afterwards (with a new answer) self.args.ask = None self.args.repeat = [] self.args.overwrite = True + expected_response = Message(Question(message.question), + Answer('Answer 0'), + ai=message.ai, + model=message.model, + tags=message.tags, + file_path=Path('')) question_cmd(self.args, self.config) cached_msg = chat.msg_gather(loc='cache') self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) + self.assert_msgs_equal_except_file_path(cached_msg, [expected_response]) # also check that the file ID has not been changed assert cached_msg[0].file_path self.assertEqual(cached_msg_file_id, cached_msg[0].file_path.stem) @@ -439,35 +404,37 @@ class TestQuestionCmd(TestQuestionCmdBase): """ Repeat a single question after an error. """ - # 1. ask a question and provoke an error - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - self.ai.request.side_effect = AIError - with self.assertRaises(AIError): - question_cmd(self.args, self.config) + mock_create_ai.side_effect = self.mock_create_ai + # create a question WITHOUT an answer + # -> just like after an error, which is tested above + message = Message(Question(self.args.ask[0]), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + message.to_file() chat = ChatDB.from_dir(Path(self.cache_dir.name), Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') assert cached_msg[0].file_path cached_msg_file_id = cached_msg[0].file_path.stem - self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, [expected_question]) - # 2. repeat the last question (without overwriting) + # repeat the last question (without overwriting) # -> expect a single message because if the original has # no answer, it should be overwritten by default self.args.ask = None self.args.repeat = [] self.args.overwrite = False - self.ai.request.side_effect = self.mock_request - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages + expected_response = Message(Question(message.question), + Answer('Answer 0'), + ai=message.ai, + model=message.model, + tags=message.tags, + file_path=Path('')) question_cmd(self.args, self.config) cached_msg = chat.msg_gather(loc='cache') self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) + self.assert_msgs_equal_except_file_path(cached_msg, [expected_response]) # also check that the file ID has not been changed assert cached_msg[0].file_path self.assertEqual(cached_msg_file_id, cached_msg[0].file_path.stem) @@ -477,37 +444,132 @@ class TestQuestionCmd(TestQuestionCmdBase): """ Repeat a single question with new arguments. """ - # 1. ask a question - mock_create_ai.return_value = self.ai - expected_question = self.input_message(self.args) - expected_responses = self.mock_request(expected_question, - Chat([]), - self.args.num_answers, - self.args.output_tags).messages - question_cmd(self.args, self.config) + mock_create_ai.side_effect = self.mock_create_ai + # create a message + message = Message(Question(self.args.ask[0]), + Answer('Old Answer'), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + message.to_file() chat = ChatDB.from_dir(Path(self.cache_dir.name), Path(self.db_dir.name)) cached_msg = chat.msg_gather(loc='cache') - self.assertEqual(len(self.message_list(self.cache_dir)), 1) - self.assert_messages_equal(cached_msg, expected_responses) + assert cached_msg[0].file_path - # 2. repeat the last question with new arguments (without overwriting) - # -> expect two messages with identical question and answer, but different metadata + # repeat the last question with new arguments (without overwriting) + # -> expect two messages with identical question but different metadata and new answer self.args.ask = None self.args.repeat = [] self.args.overwrite = False self.args.output_tags = ['newtag'] self.args.AI = 'newai' self.args.model = 'newmodel' - new_expected_question = Message(question=Question(expected_question.question), - tags=set(self.args.output_tags), - ai=self.args.AI, - model=self.args.model) - expected_responses += self.mock_request(new_expected_question, - Chat([]), - self.args.num_answers, - set(self.args.output_tags)).messages + new_expected_response = Message(Question(message.question), + Answer('Answer 0'), + ai='newai', + model='newmodel', + tags={Tag('newtag')}, + file_path=Path('')) question_cmd(self.args, self.config) cached_msg = chat.msg_gather(loc='cache') self.assertEqual(len(self.message_list(self.cache_dir)), 2) - self.assert_messages_equal(cached_msg, expected_responses) + self.assert_msgs_equal_except_file_path(cached_msg, [message] + [new_expected_response]) + + @mock.patch('chatmastermind.commands.question.create_ai') + def test_repeat_single_question_new_args_overwrite(self, mock_create_ai: MagicMock) -> None: + """ + Repeat a single question with new arguments, overwriting the old one. + """ + mock_create_ai.side_effect = self.mock_create_ai + # create a message + message = Message(Question(self.args.ask[0]), + Answer('Old Answer'), + tags=set(self.args.output_tags), + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + message.to_file() + chat = ChatDB.from_dir(Path(self.cache_dir.name), + Path(self.db_dir.name)) + cached_msg = chat.msg_gather(loc='cache') + assert cached_msg[0].file_path + + # repeat the last question with new arguments + self.args.ask = None + self.args.repeat = [] + self.args.overwrite = True + self.args.output_tags = ['newtag'] + self.args.AI = 'newai' + self.args.model = 'newmodel' + new_expected_response = Message(Question(message.question), + Answer('Answer 0'), + ai='newai', + model='newmodel', + tags={Tag('newtag')}, + file_path=Path('')) + question_cmd(self.args, self.config) + cached_msg = chat.msg_gather(loc='cache') + self.assertEqual(len(self.message_list(self.cache_dir)), 1) + self.assert_msgs_equal_except_file_path(cached_msg, [new_expected_response]) + + @mock.patch('chatmastermind.commands.question.create_ai') + def test_repeat_multiple_questions(self, mock_create_ai: MagicMock) -> None: + """ + Repeat multiple questions. + """ + mock_create_ai.side_effect = self.mock_create_ai + # 1. === create three questions === + # cached message without an answer + message1 = Message(Question(self.args.ask[0]), + tags=self.args.output_tags, + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0001.txt') + # cached message with an answer + message2 = Message(Question(self.args.ask[0]), + Answer('Old Answer'), + tags=self.args.output_tags, + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.cache_dir.name) / '0002.txt') + # DB message without an answer + message3 = Message(Question(self.args.ask[0]), + tags=self.args.output_tags, + ai=self.args.AI, + model=self.args.model, + file_path=Path(self.db_dir.name) / '0003.txt') + message1.to_file() + message2.to_file() + message3.to_file() + questions = [message1, message2, message3] + expected_responses: list[Message] = [] + fake_ai = self.mock_create_ai(self.args, self.config) + for question in questions: + # since the message's answer is modified, we use a copy + # -> the original is used for comparison below + expected_responses += fake_ai.request(copy(question), + Chat([]), + self.args.num_answers, + set(self.args.output_tags)).messages + + # 2. === repeat all three questions (without overwriting) === + self.args.ask = None + self.args.repeat = ['0001', '0002', '0003'] + self.args.overwrite = False + question_cmd(self.args, self.config) + # two new files should be in the cache directory + # * the repeated cached message with answer + # * the repeated DB message + # -> the cached message without answer should be overwritten + self.assertEqual(len(self.message_list(self.cache_dir)), 4) + self.assertEqual(len(self.message_list(self.db_dir)), 1) + expected_cache_messages = [expected_responses[0], message2, expected_responses[1], expected_responses[2]] + chat = ChatDB.from_dir(Path(self.cache_dir.name), + Path(self.db_dir.name)) + cached_msg = chat.msg_gather(loc='cache') + self.assert_msgs_equal_except_file_path(cached_msg, expected_cache_messages) + # check that the DB message has not been modified at all + db_msg = chat.msg_gather(loc='db') + self.assert_msgs_all_equal(db_msg, [message3])