Skip to content

Commit b4370ad

Browse files
authored
Merge pull request #73 from liuooo/add-more-examples
move e2e tests to the examples section to lower the barrier to trial, add more examples
2 parents d685401 + 1b77f2e commit b4370ad

19 files changed

+392
-403
lines changed

README.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,15 +92,17 @@ Interface documentation address: http://127.0.0.1:8086/docs
9292

9393
### Complete Usage Example
9494

95-
In this example, an AI assistant is created and run using the official OpenAI client library, including two built-in
96-
tools, web_search and retrieval, and a custom function.
95+
In this example, an AI assistant is created and run using the official OpenAI client library. If you need to explore other usage methods,
96+
such as streaming output, tools (web_search, retrieval, function), etc., you can find the corresponding code under the examples directory.
9797
Before running, you need to run `pip install openai` to install the Python `openai` library.
9898

9999
```sh
100100
# !pip install openai
101-
python tests/e2e/index.py
101+
export PYTHONPATH=$(pwd)
102+
python examples/run_assistant.py
102103
```
103104

105+
104106
### Permissions
105107
Simple user isolation is provided based on tokens to meet SaaS deployment requirements. It can be enabled by configuring `APP_AUTH_ENABLE`.
106108

README_CN.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,12 +89,14 @@ Api Base URL: http://127.0.0.1:8086/api/v1
8989

9090
### 完整使用示例
9191

92-
此示例中使用 OpenAI 官方的 client 库创建并运行了一个 AI 助手,包含了 web_search 和 retrieval 两个内置 tool 和一个自定义 function。
92+
此示例中使用 OpenAI 官方的 client 库创建并运行了一个 AI 助手。如果需要查看其它使用方式,如流式输出、工具(web_search、retrieval、function)的使用等,
93+
可以在 examples 查看对应示例。
9394
运行之前需要运行 `pip install openai` 安装 Python `openai` 库。
9495

9596
```sh
9697
# !pip install openai
97-
python tests/e2e/index.py
98+
export PYTHONPATH=$(pwd)
99+
python examples/run_assistant.py
98100
```
99101

100102
### 权限

examples/__init__.py

Whitespace-only changes.

examples/action/__init__.py

Whitespace-only changes.

tests/run/run_with_auth_action_test.py renamed to examples/action/run_with_auth_action.py

Lines changed: 48 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,27 @@
11
import time
2+
import logging
3+
import requests
4+
import json
25

3-
import openai
4-
import pytest
6+
from app.exceptions.exception import BadRequestError
7+
from examples.prerun import client
8+
from examples.prerun import base_url
9+
from examples.prerun import api_key
510

6-
from app.providers.database import session
7-
from app.schemas.tool.action import ActionBulkCreateRequest
8-
from app.schemas.tool.authentication import Authentication, AuthenticationType
9-
from app.services.tool.action import ActionService
1011

12+
# To test the localhost, you can listen to a port using the shell command 'echo -e "HTTP/1.1 200 OK\r\n\r\n Success" | nc -l 9999'.
13+
# Make sure to change the URL to match your API server.
14+
auth_server_url = "http://localhost:9999/api/v1"
1115

12-
@pytest.fixture
13-
def api_url():
14-
return "http://127.0.0.1:8086/api/v1/actions"
15-
16-
17-
@pytest.fixture
18-
def create_workspace_with_authentication():
19-
return {
16+
def create_worksapce_action():
17+
"""
18+
create action with actions api
19+
"""
20+
openapi_schema = {
2021
"openapi_schema": {
2122
"openapi": "3.0.0",
2223
"info": {"title": "Create New Workspace", "version": "1.0.0"},
23-
"servers": [{"url": "https://tx.c.csvfx.com/api"}],
24+
"servers": [{"url": f"{auth_server_url}"}],
2425
"paths": {
2526
"/tx/v1/workspaces": {
2627
"post": {
@@ -74,68 +75,73 @@ def create_workspace_with_authentication():
7475
},
7576
}
7677
}
78+
openapi_schema["authentication"] = {"type": "none"}
79+
actions_url = f"{base_url}/actions"
80+
headers = {
81+
'Content-Type': 'application/json',
82+
'Authorization': f'Bearer {api_key}'
83+
}
84+
response = requests.request("POST", actions_url, headers=headers, data=json.dumps(openapi_schema), timeout=1000)
85+
if response.status_code != 200:
86+
raise BadRequestError(f"Failed to create action: {response.text}")
87+
return response.json()
7788

7889

79-
# 测试带有action的助手,run 的时候传递自己的auth信息
80-
def test_run_with_action_auth(create_workspace_with_authentication):
81-
body = ActionBulkCreateRequest(**create_workspace_with_authentication)
82-
body.authentication = Authentication(type=AuthenticationType.none)
83-
actions = ActionService.create_actions_sync(session=session, body=body)
84-
[create_workspace_with_authentication] = actions
85-
86-
client = openai.OpenAI(base_url="http://localhost:8086/api/v1", api_key="xxx")
90+
if __name__ == "__main__":
91+
[create_workspace_with_authentication] = create_worksapce_action()
92+
logging.info("=====> action: %s\n", create_workspace_with_authentication)
8793

88-
# 创建带有 action 的 assistant
94+
# create a assistant with action
8995
assistant = client.beta.assistants.create(
9096
name="Assistant Demo",
91-
instructions="你是一个有用的助手",
92-
tools=[{"type": "action", "id": create_workspace_with_authentication.id}],
97+
instructions="you are a personal assistant",
98+
tools=[{"type": "action", "id": create_workspace_with_authentication["id"]}],
9399
model="gpt-3.5-turbo-1106",
94100
)
95-
print(assistant, end="\n\n")
101+
logging.info("=====> : %s\n", assistant)
96102

97103
thread = client.beta.threads.create()
98-
print(thread, end="\n\n")
104+
logging.info("=====> : %s\n", thread)
99105

100106
message = client.beta.threads.messages.create(
101107
thread_id=thread.id,
102108
role="user",
103-
content="在组织63db49f7dcc8bf7b0990903c下,创建一个随机名字的工作空间",
109+
content="在组织 63db49f7dcc8bf7b0990903c 下, 创建一个随机名字的工作空间",
104110
)
105-
print(message, end="\n\n")
111+
logging.info("=====> : %s\n", message)
106112

113+
# create a run with auth info
107114
run = client.beta.threads.runs.create(
108-
# model="gpt-3.5-turbo-1106",
109115
thread_id=thread.id,
110116
assistant_id=assistant.id,
111117
instructions="",
112118
extra_body={
113119
"extra_body": {
114120
"action_authentications": {
115-
create_workspace_with_authentication.id: {
121+
create_workspace_with_authentication["id"]: {
122+
# auth info, change as needed
116123
"type": "bearer",
117-
"secret": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJqdGkiOiI2M2RiNDlhY2RjYzhiZjdiMDk5MDhmZDYiLCJhdWQiOiI2M2RiNDlmN2RjYzhiZjdiMDk5MDkwM2MiLCJ1aWQiOiI2M2RiNDlhY2RjYzhiZjdiMDk5MDhmZDYiLCJpYXQiOjE3MTAxNDkxODcsImV4cCI6MTcxMDIzNTU4N30.h96cKhB8rPGKM2PEq6bg4k2j09gR82HCJHUws232Oe4",
124+
"secret": "xxx",
118125
}
119126
}
120127
}
121128
},
122129
)
123-
print(run, end="\n\n")
130+
logging.info("=====> : %s\n", run)
124131

125132
while True:
126-
# run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
127133
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
128134
if run.status == "completed":
129-
print("done!", end="\n\n")
130135
messages = client.beta.threads.messages.list(thread_id=thread.id)
131136

132-
print("messages: ")
137+
logging.info("=====> messages:")
133138
for message in messages:
134139
assert message.content[0].type == "text"
135-
print(messages)
136-
print({"role": message.role, "message": message.content[0].text.value})
137-
140+
logging.info("%s", {"role": message.role, "message": message.content[0].text.value})
141+
break
142+
elif run.status == "failed":
143+
logging.error("run failed %s\n", run.last_error)
138144
break
139145
else:
140-
print("\nin progress...")
141-
time.sleep(1)
146+
logging.info("in progress...\n")
147+
time.sleep(5)

examples/assistant/__init__.py

Whitespace-only changes.
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import logging
2+
3+
from examples.prerun import client
4+
5+
if __name__ == "__main__":
6+
assistant = client.beta.assistants.create(
7+
name="Assistant Demo",
8+
instructions="you are a personal assistant, reply 'hello' to user",
9+
model="gpt-3.5-turbo-1106",
10+
extra_body={
11+
"extra_body": {
12+
"model_params": {
13+
"frequency_penalty": 0,
14+
"logit_bias": None,
15+
"max_tokens": 1024,
16+
"temperature": 1,
17+
"presence_penalty": 0,
18+
"top_p": 1,
19+
}
20+
}
21+
},
22+
)
23+
logging.info("=====> : %s\n", assistant)
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import logging
2+
3+
from examples.prerun import client
4+
5+
if __name__ == "__main__":
6+
# support three types of memory: navie, zero, window
7+
assistant = client.beta.assistants.create(
8+
name="Assistant Demo",
9+
instructions="you are a personal assistant, reply 'hello' to user",
10+
model="gpt-3.5-turbo-1106",
11+
metadata={"memory": {"type": "naive"}}
12+
# metadata={"memory": {"type": "zero"}}
13+
# metadata={"memory": {"type": "window", "window_size": 5}}
14+
)
15+
logging.info("=====> : %s\n", assistant)

examples/prerun.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import logging
2+
import openai
3+
4+
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
5+
6+
# Use this client to run examples.
7+
# If you have enabled authentication management, please set the 'api_key'. Otherwise, you can ignore this parameter.
8+
base_url = "http://localhost:8086/api/v1"
9+
api_key = "ml-xxx"
10+
client = openai.OpenAI(base_url=base_url, api_key=api_key)

examples/run_assistant.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import time
2+
import logging
3+
4+
from examples.prerun import client
5+
6+
if __name__ == "__main__":
7+
8+
assistant = client.beta.assistants.create(
9+
name="Assistant Demo",
10+
instructions="you are a personal assistant, reply 'hello' to user",
11+
model="gpt-3.5-turbo-1106",
12+
)
13+
logging.info("=====> : %s\n", assistant)
14+
15+
thread = client.beta.threads.create()
16+
logging.info("=====> : %s\n", thread)
17+
18+
message = client.beta.threads.messages.create(
19+
thread_id=thread.id,
20+
role="user",
21+
content="hello",
22+
)
23+
logging.info("=====> : %s\n", message)
24+
25+
run = client.beta.threads.runs.create(
26+
thread_id=thread.id,
27+
assistant_id=assistant.id,
28+
instructions="",
29+
)
30+
logging.info("=====> : %s\n", run)
31+
32+
logging.info("checking assistant status. \n")
33+
while True:
34+
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
35+
run_steps = client.beta.threads.runs.steps.list(run_id=run.id, thread_id=thread.id).data
36+
for run_step in run_steps:
37+
logging.info("=====> : %s\n", run_step)
38+
39+
if run.status == "completed":
40+
messages = client.beta.threads.messages.list(thread_id=thread.id)
41+
42+
logging.info("=====> messages:")
43+
for message in messages:
44+
assert message.content[0].type == "text"
45+
logging.info("%s", {"role": message.role, "message": message.content[0].text.value})
46+
47+
# delete asst
48+
client.beta.assistants.delete(assistant.id)
49+
break
50+
elif run.status == "failed":
51+
logging.error("run failed %s\n", run.last_error)
52+
break
53+
else:
54+
logging.info("in progress...\n")
55+
time.sleep(5)

0 commit comments

Comments
 (0)