diff --git a/gpt_computer_assistant/agent/proccess.py b/gpt_computer_assistant/agent/proccess.py index fd448b20..6f7ebe18 100644 --- a/gpt_computer_assistant/agent/proccess.py +++ b/gpt_computer_assistant/agent/proccess.py @@ -80,7 +80,7 @@ def process_audio(take_screenshot=True, take_system_audio=False, dont_save_image def play_text(): from ..gpt_computer_assistant import the_input_box global last_ai_response - if the_input_box.text() == "" or the_input_box.text() == last_ai_response: + if the_input_box.text() == "" or the_input_box.text() == "Thinking..." or the_input_box.text() == last_ai_response: the_input_box.setText(llm_output) last_ai_response = llm_output @@ -137,7 +137,7 @@ def process_screenshot(): def play_text(): from ..gpt_computer_assistant import the_input_box global last_ai_response - if the_input_box.text() == "" or the_input_box.text() == last_ai_response: + if the_input_box.text() == "" or the_input_box.text() == "Thinking..." or the_input_box.text() == last_ai_response: the_input_box.setText(llm_output) last_ai_response = llm_output @@ -205,7 +205,7 @@ def process_text(text, screenshot_path=None): def play_text(): from ..gpt_computer_assistant import the_input_box global last_ai_response - if the_input_box.text() == "" or the_input_box.text() == last_ai_response: + if the_input_box.text() == "" or the_input_box.text() == "Thinking..." or the_input_box.text() == last_ai_response: the_input_box.setText(llm_output) last_ai_response = llm_output diff --git a/gpt_computer_assistant/audio/record.py b/gpt_computer_assistant/audio/record.py index 4da826bd..d78149c4 100644 --- a/gpt_computer_assistant/audio/record.py +++ b/gpt_computer_assistant/audio/record.py @@ -24,6 +24,8 @@ def start_recording(take_system_audio=False): + from ..gpt_computer_assistant import the_input_box + the_input_box.setText("Click again when recording is done") global recording, audio_data recording = True audio_data = np.array([], dtype='float32') diff --git a/gpt_computer_assistant/gpt_computer_assistant.py b/gpt_computer_assistant/gpt_computer_assistant.py index 668edcff..6435c89a 100644 --- a/gpt_computer_assistant/gpt_computer_assistant.py +++ b/gpt_computer_assistant/gpt_computer_assistant.py @@ -75,7 +75,7 @@ def __init__(self): self.old_position = self.pos() # For moving window def initUI(self): - self.setWindowTitle('GPT-4o') + self.setWindowTitle('GPT') self.setGeometry(100, 100, 200, 200) # Adjust the size as needed @@ -220,6 +220,7 @@ def paintEvent(self, event): radius = 70 + radius_variation painter.drawEllipse(int(center_x - radius / 2), int(center_y - radius / 2), int(radius), int(radius)) elif self.state == 'thinking': + the_input_box.setText("Thinking...") # more slow pulsating circle with smooth easing animation radius_variation = 5 * (1 + math.sin(self.pulse_frame * math.pi / 100)) radius = 70 + radius_variation @@ -293,17 +294,17 @@ def pulse_circle(self): self.update() def mousePressEvent(self, event: QMouseEvent): - if self.circle_rect.contains(event.pos()): - self.button_handler.toggle_recording(dont_save_image=True) - elif self.small_circle_rect.contains(event.pos()): - self.button_handler.toggle_recording(no_screenshot=True) - elif self.small_circle_left.contains(event.pos()): - self.button_handler.toggle_recording(take_system_audio=True) - elif self.small_circle_left_top.contains(event.pos()): - self.button_handler.just_screenshot() - - elif self.small_rect_right_top.contains(event.pos()): - self.button_handler.settings_popup() + if self.state == 'idle' or self.state == 'talking': + if self.circle_rect.contains(event.pos()): + self.button_handler.toggle_recording(dont_save_image=True) + elif self.small_circle_rect.contains(event.pos()): + self.button_handler.toggle_recording(no_screenshot=True) + elif self.small_circle_left.contains(event.pos()): + self.button_handler.toggle_recording(take_system_audio=True) + elif self.small_circle_left_top.contains(event.pos()): + self.button_handler.just_screenshot() + +