remove personal configs
parent
f6cc8995fb
commit
fdbb9fd9c3
|
@ -1,15 +0,0 @@
|
||||||
[humanus_cli]
|
|
||||||
llm = "qwen-max-latest" # Key in config_llm.toml
|
|
||||||
memory = "long-context" # Key in config_mem.toml
|
|
||||||
tools = ["filesystem", "playwright", "image_loader"] # Builtin tools configuration
|
|
||||||
mcp_servers = ["python_execute"] # Key in config_mcp.toml, all MCP tools provided by servers will be added
|
|
||||||
max_steps = 30 # Maximum automatic steps without user's check
|
|
||||||
duplicate_threshold = 2 # Used to detect repeating condition (will be checked by LCS)
|
|
||||||
|
|
||||||
[humanus_plan]
|
|
||||||
llm = "deepseek-chat"
|
|
||||||
memory = "long-context"
|
|
||||||
tools = ["filesystem", "playwright", "image_loader"]
|
|
||||||
mcp_servers = ["python_execute"]
|
|
||||||
max_steps = 30
|
|
||||||
duplicate_threshold = 2
|
|
|
@ -1,17 +0,0 @@
|
||||||
["nomic-embed-text-v1.5"]
|
|
||||||
provider = "oai" # Only support OAI-Compatible style for now
|
|
||||||
base_url = "http://localhost:8080" # Base url. Note: Don't add any endpoint behind
|
|
||||||
endpoint = "/v1/embeddings" # Endpoint of embeddings
|
|
||||||
model = "nomic-embed-text-v1.5.f16.gguf" # Model name
|
|
||||||
api_key = "" # Your API Key
|
|
||||||
embeddings_dim = 768 # Dimension of embeddings (refer to API docs)
|
|
||||||
max_retries = 3 # Maximum retry count
|
|
||||||
|
|
||||||
[qwen-text-embedding-v3]
|
|
||||||
provider = "oai"
|
|
||||||
base_url = "https://dashscope.aliyuncs.com"
|
|
||||||
endpoint = "/compatible-mode/v1/embeddings"
|
|
||||||
model = "text-embedding-v3"
|
|
||||||
api_key = "sk-cb1bb2a240d84182bb93f6dd0fe03600"
|
|
||||||
embeddings_dim = 1024
|
|
||||||
max_retries = 3
|
|
|
@ -1,45 +0,0 @@
|
||||||
[qwen-max]
|
|
||||||
model = "qwen-max" # Model name
|
|
||||||
base_url = "https://dashscope.aliyuncs.com" # Base url. Note: Don't add any endpoint behind
|
|
||||||
endpoint = "/compatible-mode/v1/chat/completions" # Endpoint of chat completions
|
|
||||||
api_key = "sk-cb1bb2a240d84182bb93f6dd0fe03600" # Your API Key
|
|
||||||
|
|
||||||
[qwen-max-latest]
|
|
||||||
model = "qwen-max-latest"
|
|
||||||
base_url = "https://dashscope.aliyuncs.com"
|
|
||||||
endpoint = "/compatible-mode/v1/chat/completions"
|
|
||||||
api_key = "sk-cb1bb2a240d84182bb93f6dd0fe03600"
|
|
||||||
|
|
||||||
[qwen-vl-max-latest]
|
|
||||||
model = "qwen-vl-max-latest"
|
|
||||||
base_url = "https://dashscope.aliyuncs.com"
|
|
||||||
endpoint = "/compatible-mode/v1/chat/completions"
|
|
||||||
api_key = "sk-cb1bb2a240d84182bb93f6dd0fe03600"
|
|
||||||
enable_vision = true # This means the model could accept content item like {"image_url", {"url", "xxx"}}
|
|
||||||
|
|
||||||
["claude-3.5-sonnet"]
|
|
||||||
model = "anthropic/claude-3.5-sonnet"
|
|
||||||
base_url = "https://openrouter.ai"
|
|
||||||
endpoint = "/api/v1/chat/completions"
|
|
||||||
api_key = "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"
|
|
||||||
enable_vision = true
|
|
||||||
|
|
||||||
["claude-3.7-sonnet"]
|
|
||||||
model = "anthropic/claude-3.7-sonnet"
|
|
||||||
base_url = "https://openrouter.ai"
|
|
||||||
endpoint = "/api/v1/chat/completions"
|
|
||||||
api_key = "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"
|
|
||||||
enable_vision = true
|
|
||||||
|
|
||||||
[deepseek-chat]
|
|
||||||
model = "deepseek-chat"
|
|
||||||
base_url = "https://api.deepseek.com"
|
|
||||||
endpoint = "/v1/chat/completions"
|
|
||||||
api_key = "sk-93c5bfcb920c4a8aa345791d429b8536"
|
|
||||||
|
|
||||||
[deepseek-r1]
|
|
||||||
model = "deepseek-reasoner"
|
|
||||||
base_url = "https://api.deepseek.com"
|
|
||||||
endpoint = "/v1/chat/completions"
|
|
||||||
api_key = "sk-93c5bfcb920c4a8aa345791d429b8536"
|
|
||||||
enable_tool = false
|
|
|
@ -1,23 +0,0 @@
|
||||||
[python_execute]
|
|
||||||
type = "sse"
|
|
||||||
host = "localhost"
|
|
||||||
port = 8895
|
|
||||||
sse_endpoint = "/sse"
|
|
||||||
message_enpoint = "/message"
|
|
||||||
|
|
||||||
[puppeteer]
|
|
||||||
type = "stdio"
|
|
||||||
command = "npx"
|
|
||||||
args = ["-y", "@modelcontextprotocol/server-puppeteer"]
|
|
||||||
|
|
||||||
[playwright]
|
|
||||||
type = "stdio"
|
|
||||||
command = "npx"
|
|
||||||
args = ["-y", "@executeautomation/playwright-mcp-server"]
|
|
||||||
|
|
||||||
[filesystem]
|
|
||||||
type = "stdio"
|
|
||||||
command = "npx"
|
|
||||||
args = ["-y",
|
|
||||||
"@modelcontextprotocol/server-filesystem",
|
|
||||||
"/Users/hyde/Desktop"] # Allowed paths
|
|
|
@ -1,19 +0,0 @@
|
||||||
[default]
|
|
||||||
max_messages = 16 # Maximum number of messages in short-term memory
|
|
||||||
max_tokens_message = 32768 # Maximum number of tokens in single message
|
|
||||||
max_tokens_messages = 65536 # Maximum number of tokens in short-term memory
|
|
||||||
max_tokens_context = 131072 # Maximum number of tokens in context (used by `get_messages`)
|
|
||||||
retrieval_limit = 32 # Maximum number of results to retrive from long-term memory
|
|
||||||
embedding_model = "qwen-text-embedding-v3" # Key in config_embd.toml
|
|
||||||
vector_store = "hnswlib" # Key in config_vec.toml
|
|
||||||
llm = "qwen-max-latest" # Key in config_llm.toml
|
|
||||||
|
|
||||||
[long-context]
|
|
||||||
max_messages = 32
|
|
||||||
max_tokens_message = 64000
|
|
||||||
max_tokens_messages = 128000
|
|
||||||
max_tokens_context = 128000
|
|
||||||
retrieval_limit = 32
|
|
||||||
embedding_model = "qwen-text-embedding-v3"
|
|
||||||
vector_store = "hnswlib"
|
|
||||||
llm = "qwen-max-latest"
|
|
|
@ -1,8 +0,0 @@
|
||||||
[hnswlib]
|
|
||||||
provider = "hnswlib"
|
|
||||||
dim = 768 # Dimension of the elements
|
|
||||||
max_elements = 100 # Maximum number of elements, should be known beforehand
|
|
||||||
M = 16 # Tightly connected with internal dimensionality of the data
|
|
||||||
# strongly affects the memory consumption
|
|
||||||
ef_construction = 200 # Controls index search speed/build speed tradeoff
|
|
||||||
metric = "L2" # Distance metric to use, can be L2 or IP
|
|
Loading…
Reference in New Issue