Skip to content
This repository has been archived by the owner on Oct 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #37 from jina-ai/fix-babyagi-pg
Browse files Browse the repository at this point in the history
fix(app): babyagi-as-a-service playground
  • Loading branch information
deepankarm authored Apr 24, 2023
2 parents 98db387 + 4518297 commit 745e297
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 22 deletions.
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -564,6 +564,7 @@ curl -sX POST 'https://langchain.wolf.jina.ai/api/run' \

- [My client that connects to the App gets timed-out, what should I do?](#my-client-that-connects-to-the-app-gets-timed-out-what-should-I-do)
- [JCloud deployment failed at pushing image to Jina Hubble, what should I do?](#jcloud-deployment-failed-at-pushing-image-to-jina-hubble-what-should-i-di)
- [Debug babyagi playground request/response for external integration](#debug-babyagi-playground-requestresponse-for-external-integration)

### My client that connects to the App gets timed-out, what should I do?

Expand All @@ -572,3 +573,17 @@ If you make long HTTP requests, you may experience timeouts due to limitations i
### JCloud deployment failed at pushing image to Jina Hubble, what should I do?

Please use `--verbose` and retry to get more information. If you are operating on computer with `arm64` arch, please retry with `--platform linux/amd64` so the image can be built correctly.

### Debug babyagi playground request/response for external integration

1. Start textual console in a terminal (exclude following groups to reduce the noise in logging)

```bash
textual console -x EVENT -x SYSTEM -x DEBUG
```

2. Start the playground with `--verbose` flag. Start interacting and see the logs in the console.

```bash
lc-serve playground babyagi --verbose
```
12 changes: 9 additions & 3 deletions lcserve/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ async def serve_on_jcloud(
timeout=timeout,
app_id=app_id,
gateway_id=gateway_id_wo_tag + ':' + tag,
websocket=is_websocket,
is_websocket=is_websocket,
),
app_id=app_id,
verbose=verbose,
Expand Down Expand Up @@ -362,11 +362,17 @@ def playground():


@playground.command(help='Play with babyagi on JCloud.')
def babyagi():
@click.option(
'--verbose',
is_flag=True,
help='Verbose mode.',
show_default=True,
)
def babyagi(verbose):
sys.path.append(os.path.join(os.path.dirname(__file__), 'playground', 'babyagi'))
from .playground.babyagi.playground import play

play()
play(verbose=verbose)


if __name__ == "__main__":
Expand Down
3 changes: 3 additions & 0 deletions lcserve/apps/babyagi/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
openai
wikipedia
tiktoken
faiss-cpu
google-search-results
22 changes: 13 additions & 9 deletions lcserve/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,15 +388,15 @@ def get_with_args_for_jcloud() -> Dict:

def get_gateway_jcloud_args(
instance: str = Defaults.instance,
websocket: bool = False,
is_websocket: bool = False,
timeout: int = DEFAULT_TIMEOUT,
) -> Dict:

_autoscale = AutoscaleConfig(stable_window=timeout)

# TODO: remove this when websocket + autoscale is supported in JCloud
_timeout = 600 if websocket else timeout
_autoscale_args = {} if websocket else _autoscale.to_dict()
_timeout = 600 if is_websocket else timeout
_autoscale_args = {} if is_websocket else _autoscale.to_dict()

return {
'jcloud': {
Expand All @@ -405,7 +405,7 @@ def get_gateway_jcloud_args(
'instance': instance,
'capacity': 'spot',
},
'healthcheck': False if websocket else True,
'healthcheck': False if is_websocket else True,
'timeout': _timeout,
**_autoscale_args,
}
Expand All @@ -420,7 +420,7 @@ def get_flow_dict(
timeout: int = DEFAULT_TIMEOUT,
app_id: str = None,
gateway_id: str = None,
websocket: bool = False,
is_websocket: bool = False,
) -> Dict:
if isinstance(module, str):
module = [module]
Expand All @@ -435,10 +435,10 @@ def get_flow_dict(
'modules': module,
},
'port': [port],
'protocol': ['websocket'] if websocket else ['http'],
'protocol': ['websocket'] if is_websocket else ['http'],
**get_uvicorn_args(),
**(
get_gateway_jcloud_args(timeout=timeout, websocket=websocket)
get_gateway_jcloud_args(timeout=timeout, is_websocket=is_websocket)
if jcloud
else {}
),
Expand All @@ -452,11 +452,15 @@ def get_flow_yaml(
jcloud: bool = False,
port: int = 8080,
name: str = APP_NAME,
websocket: bool = False,
is_websocket: bool = False,
) -> str:
return yaml.safe_dump(
get_flow_dict(
module=module, jcloud=jcloud, port=port, name=name, websocket=websocket
module=module,
jcloud=jcloud,
port=port,
name=name,
is_websocket=is_websocket,
),
sort_keys=False,
)
Expand Down
24 changes: 16 additions & 8 deletions lcserve/playground/babyagi/playground.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os
import asyncio

import aiohttp
import nest_asyncio
from pydantic import BaseModel, ValidationError
from textual import log
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import DataTable, Header, TextLog, Button, Static
Expand Down Expand Up @@ -67,17 +69,18 @@ class HumanPrompt(BaseModel):
async def talk_to_agent(user_input: UserInput):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(f'{user_input.host}/{user_input.endpoint}') as ws:
print(f'Connected to {user_input.host}/{user_input.endpoint}')
log.info(f'Connected to {user_input.host}/{user_input.endpoint}')
log.info(f'📤 {user_input.json(exclude={"host", "endpoint"})}')
await ws.send_json(user_input.dict(exclude={'host', 'endpoint'}))
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close cmd':
await ws.close()
break
else:
print(f'Got message: {msg.data}')
try:
response = CoTResponse.parse_raw(msg.data)
log.info(f'📥 {response.json()}')
text = None
if response.result:
text = response.result
Expand All @@ -93,27 +96,30 @@ async def talk_to_agent(user_input: UserInput):
except ValidationError:
try:
task_details = TaskDetailsResponse.parse_raw(msg.data)
log.info(f'📥 {task_details.json()}')
await task_details_queue.put(task_details)
continue
except ValidationError:
try:
task_result = TaskResultResponse.parse_raw(msg.data)
log.info(f'📥 {task_result.json()}')
await task_result_queue.put(task_result)
continue
except ValidationError as e:
try:
prompt = HumanPrompt.parse_raw(msg.data)
log.info(f'📥 {prompt.json()}')
await human_prompt_question_queue.put(prompt)
answer = await human_prompt_answer_queue.get()
await ws.send_str(answer)
continue
except ValidationError:
print(f'Unknown message: {msg.data}')
log.info(f'Unknown message: {msg.data}')

elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' % ws.exception())
log.info('ws connection closed with exception %s' % ws.exception())
else:
print(msg)
log.info(msg)


class ChainOfThoughts(Horizontal):
Expand Down Expand Up @@ -193,6 +199,7 @@ async def _read_prompt(self):
self._no.variant = 'primary'

async def _send_answer(self, answer: str):
log.info(f'📤 {answer}')
await self._human_prompt_answer_queue.put(answer)
self._yes.disabled = True
self._no.disabled = True
Expand Down Expand Up @@ -271,9 +278,10 @@ def on_button_pressed(self, event: Button.Pressed) -> None:
self.exit()


def play():
def play(verbose: bool = False):
user_input = prompt_user()

if verbose:
os.environ['TEXTUAL'] = 'devtools'
task = loop.create_task(talk_to_agent(user_input))
try:
BabyAGIPlayground().run()
Expand All @@ -282,4 +290,4 @@ def play():


if __name__ == "__main__":
play()
play(verbose=False)
3 changes: 2 additions & 1 deletion lcserve/playground/babyagi/user_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ class UserInput(BaseModel):
first_task: str
predefined_tools: PredefinedTools
custom_tools: List[CustomTool]
endpoint: str = '/baby_agi'
endpoint: str = 'baby_agi'
interactive: bool = True
envs: Dict[str, str] = {}


def prompt_user() -> UserInput:
Expand Down
1 change: 0 additions & 1 deletion lcserve/playground/pdf_qna/playground.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import os
import sys
import requests
import json

import streamlit as st
from pydantic import BaseModel
Expand Down

0 comments on commit 745e297

Please sign in to comment.