check before compilation

main
hkr04 2025-03-16 22:56:03 +08:00
parent 256c2dcac8
commit d0bb48aced
30 changed files with 659 additions and 534 deletions

View File

@ -32,30 +32,43 @@ struct BaseAgent : std::enable_shared_from_this<BaseAgent> {
int duplicate_threshold; // Threshold for duplicate messages
BaseAgent(const std::string& name,
BaseAgent(
const std::string& name,
const std::string& description,
const std::string& system_prompt,
const std::string& next_step_prompt,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 10,
int current_step = 0,
int duplicate_threshold = 2) :
name(name), description(description), system_prompt(system_prompt), next_step_prompt(next_step_prompt), max_steps(max_steps), current_step(current_step), duplicate_threshold(duplicate_threshold) {
state = AgentState::IDLE;
int duplicate_threshold = 2
) : name(name),
description(description),
system_prompt(system_prompt),
next_step_prompt(next_step_prompt),
llm(llm),
memory(memory),
state(state),
max_steps(max_steps),
current_step(current_step),
duplicate_threshold(duplicate_threshold) {
initialize_agent();
}
// Initialize agent with default settings if not provided.
BaseAgent* initialize_agent() {
void initialize_agent() {
if (!llm) {
llm = LLM::get_instance(name);
}
if (!memory) {
memory = std::make_shared<Memory>();
}
return this;
}
// Add a message to the agent's memory
void update_memory(const std::string& role, const std::string& content) {
template<typename... Args>
void update_memory(const std::string& role, const std::string& content, Args&&... args) {
if (role == "user") {
memory->add_message(Message::user_message(content));
} else if (role == "assistant") {
@ -63,12 +76,14 @@ struct BaseAgent : std::enable_shared_from_this<BaseAgent> {
} else if (role == "system") {
memory->add_message(Message::system_message(content));
} else if (role == "tool") {
memory->add_message(Message::tool_message(content));
memory->add_message(Message::tool_message(content, std::forward<Args>(args)...));
} else {
throw std::runtime_error("Unsupported message role: " + role);
}
}
// Execute the agent's main loop asynchronously
virtual std::string run(const std::string& request) {
virtual std::string run(const std::string& request = "") {
if (state != AgentState::IDLE) {
throw std::runtime_error("Cannot run agent from state" + agent_state_map[state]);
}
@ -77,7 +92,7 @@ struct BaseAgent : std::enable_shared_from_this<BaseAgent> {
update_memory("user", request);
}
state = AgentState::RUNNING;
state = AgentState::RUNNING; // IDLE -> RUNNING
std::vector<std::string> results;
while (current_step < max_steps && state == AgentState::RUNNING) {
current_step++;
@ -93,7 +108,7 @@ struct BaseAgent : std::enable_shared_from_this<BaseAgent> {
if (current_step >= max_steps) {
results.push_back("Terminated: Reached max steps (" + std::to_string(max_steps) + ")");
}
state = AgentState::IDLE;
state = AgentState::IDLE; // RUNNING -> IDLE
std::string result_str = "";
@ -136,18 +151,22 @@ struct BaseAgent : std::enable_shared_from_this<BaseAgent> {
}
// Count identical content occurrences
int count = 0;
int duplicate_count = 0;
for (auto r_it = messages.rbegin(); r_it != messages.rend(); ++r_it) {
const Message& message = *r_it;
if (message.role == "assistant" && message.content == last_message.content) {
count++;
if (count >= duplicate_threshold) {
duplicate_count++;
if (duplicate_count >= duplicate_threshold) {
break;
}
} else {
break;
// Stop counting if a non-duplicate message is encountered
// Slightly differenr from OpenManus implementation
}
}
return count >= duplicate_threshold;
return duplicate_count >= duplicate_threshold;
}
void set_messages(const std::vector<Message>& messages) {

View File

@ -7,8 +7,8 @@
#include "../tool/tool_collection.h"
#include "../tool/python_execute.h"
#include "../tool/terminate.h"
#include "../tool/google_search.h"
#include "../tool/file_saver.h"
#include "../tool/puppeteer.h"
#include "../tool/filesystem.h"
namespace humanus {
@ -20,18 +20,11 @@ namespace humanus {
* to handle a wide range of user requests.
*/
struct Manus : ToolCallAgent {
std::string name = "manus";
std::string description = "A versatile agent that can solve various tasks using multiple tools";
std::string system_prompt = prompt::manus::SYSTEM_PROMPT;
std::string next_step_prompt = prompt::manus::NEXT_STEP_PROMPT;
Manus(
const ToolCollection& available_tools = ToolCollection(
const ToolCollection& available_tools = ToolCollection( // Add general-purpose tools to the tool collection
{
std::make_shared<PythonExecute>(),
std::make_shared<Puppeteer>(),
std::make_shared<Puppeteer>(), // for web browsing
std::make_shared<FileSystem>(),
std::make_shared<Terminate>()
}
@ -42,6 +35,9 @@ struct Manus : ToolCallAgent {
const std::string& description = "A versatile agent that can solve various tasks using multiple tools",
const std::string& system_prompt = prompt::manus::SYSTEM_PROMPT,
const std::string& next_step_prompt = prompt::manus::NEXT_STEP_PROMPT,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 30,
int current_step = 0,
int duplicate_threshold = 2
@ -53,6 +49,9 @@ struct Manus : ToolCallAgent {
description,
system_prompt,
next_step_prompt,
llm,
memory,
state,
max_steps,
current_step,
duplicate_threshold

View File

@ -3,22 +3,19 @@
namespace humanus {
// Initialize the agent with a default plan ID and validate required tools.
PlanningAgent* PlanningAgent::initialize_plan_and_verify_tools() {
void PlanningAgent::initialize_plan_and_verify_tools() {
active_plan_id = "plan_" + std::chrono::system_clock::now().time_since_epoch().count();
if (available_tools.tools_map.find("planning") == available_tools.tools_map.end()) {
available_tools.add_tool(std::make_shared<PlanningTool>());
}
return this;
}
// Decide the next action based on plan status.
bool PlanningAgent::think() {
std::string prompt;
if (!active_plan_id.empty()) {
auto plan = get_plan();
prompt = "CURRENT PLAN STATUS:\n" + plan + "\n\n" + next_step_prompt;
prompt = "CURRENT PLAN STATUS:\n" + get_plan() + "\n\n" + next_step_prompt;
} else {
prompt = next_step_prompt;
}
@ -27,17 +24,19 @@ bool PlanningAgent::think() {
// Get the current step index before thinking
current_step_index = _get_current_step_index();
bool result = ToolCallAgent::think();
bool result = ToolCallAgent::think(); // Will set tool_calls
// After thinking, if we decided to execute a tool and it's not a planning tool or special tool,
// associate it with the current step for tracking
if (result && !tool_calls.empty()) {
auto latest_tool_call = tool_calls.back();
auto latest_tool_call = tool_calls.back(); // Get the most recent tool call
if (latest_tool_call.function.name != "planning"
&& !_is_special_tool(latest_tool_call.function.name)
&& current_step_index >= 0) {
step_execution_tracker[latest_tool_call.id] = {
{"step_index", current_step_index},
{"tool_name", latest_tool_call.function.name},
{"status", "pending"} // will be updated after execution
{"status", "pending"} // Will be updated after execution
};
}
}
@ -49,6 +48,7 @@ bool PlanningAgent::think() {
std::string PlanningAgent::act() {
std::string result = ToolCallAgent::act();
// After executing the tool, update the plan status
if (!tool_calls.empty()) {
auto latest_tool_call = tool_calls.back();
@ -58,8 +58,10 @@ std::string PlanningAgent::act() {
step_execution_tracker[latest_tool_call.id]["result"] = result;
// Update the plan status if this was a non-planning, non-special tool
if (latest_tool_call.function.name != "planning"
&& !_is_special_tool(latest_tool_call.function.name)) {
if (
latest_tool_call.function.name != "planning"
&& !_is_special_tool(latest_tool_call.function.name)
) {
update_plan_status(latest_tool_call.id);
}
}
@ -83,12 +85,12 @@ std::string PlanningAgent::get_plan() {
}
// Run the agent with an optional initial request.
std::string PlanningAgent::run(const std::string& request = "") {
std::string PlanningAgent::run(const std::string& request) {
if (!request.empty()) {
create_initial_plan(request);
}
return BaseAgent::run(request);
return BaseAgent::run();
}
// Update the current plan progress based on completed tool execution.
@ -115,16 +117,23 @@ void PlanningAgent::update_plan_status(const std::string& tool_call_id) {
// Mark the step as completed
available_tools.execute(
"planning",
{{"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", step_index}, {"status", "completed"}}
{
{"command", "mark_step"},
{"plan_id", active_plan_id},
{"step_index", step_index},
{"step_status", "completed"}
}
);
logger->info(
"Marked step " + std::to_string(step_index) + " as completed in plan " + active_plan_id
);
logger->info("Marked step " + std::to_string(step_index) + " as completed in plan " + active_plan_id);
} catch (const std::exception& e) {
logger->warn("Failed to update plan status: " + std::string(e.what()));
}
}
// Parse the current plan to identify the first non-completed step's index.
// Returns None if no active step is found.
// Returns -1 if no active step is found.
int PlanningAgent::_get_current_step_index() {
if (active_plan_id.empty()) {
return -1;
@ -165,7 +174,12 @@ int PlanningAgent::_get_current_step_index() {
// Mark current step as in_progress
available_tools.execute(
"planning",
{{"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", i}, {"status", "in_progress"}}
{
{"command", "mark_step"},
{"plan_id", active_plan_id},
{"step_index", i},
{"step_status", "in_progress"}
}
);
return i;
}
@ -223,7 +237,7 @@ void PlanningAgent::create_initial_plan(const std::string& request) {
if (!plan_created) {
logger->warn("No plan created from initial request");
Message tool_msg = Message::tool_message(
Message tool_msg = Message::assistant_message(
"Error: Parameter `plan_id` is required for command: create"
);
memory->add_message(tool_msg);

View File

@ -32,15 +32,33 @@ struct PlanningAgent : ToolCallAgent {
const std::string& description = "An agent that creates and manages plans to solve tasks",
const std::string& system_prompt = prompt::planning::PLANNING_SYSTEM_PROMPT,
const std::string& next_step_prompt = prompt::planning::NEXT_STEP_PROMPT,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 20,
int current_step = 0,
int duplicate_threshold = 2
) : ToolCallAgent(available_tools, tool_choice, special_tool_names, name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold) {
current_step_index = -1; // no plan yet
) : ToolCallAgent(
available_tools,
tool_choice,
special_tool_names,
name,
description,
system_prompt,
next_step_prompt,
llm,
memory,
state,
max_steps,
current_step,
duplicate_threshold
) {
current_step_index = -1; // will be set in think()
initialize_plan_and_verify_tools();
}
// Initialize the agent with a default plan ID and validate required tools.
PlanningAgent* initialize_plan_and_verify_tools();
void initialize_plan_and_verify_tools();
// Decide the next action based on plan status.
bool think() override;

View File

@ -6,14 +6,29 @@
namespace humanus {
struct ReActAgent : BaseAgent {
ReActAgent(const std::string& name,
ReActAgent(
const std::string& name,
const std::string& description,
const std::string& system_prompt,
const std::string& next_step_prompt,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 10,
int current_step = 0,
int duplicate_threshold = 2) :
BaseAgent(name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold) {}
int duplicate_threshold = 2
) : BaseAgent(
name,
description,
system_prompt,
next_step_prompt,
llm,
memory,
state,
max_steps,
current_step,
duplicate_threshold
) {}
// Process current state and decide next actions using tools
virtual bool think() = 0;

View File

@ -4,6 +4,8 @@
#include "toolcall.h"
#include "../tool/tool_collection.h"
#include "../tool/terminate.h"
#include "../tool/shell.h"
#include "../tool/filesystem.h"
#include "../prompt.h"
namespace humanus {
@ -11,11 +13,9 @@ namespace humanus {
// An agent that implements the SWEAgent paradigm for executing code and natural conversations.
struct SweAgent : ToolCallAgent {
std::string working_dir;
std::shared_ptr<Bash> bash;
SweAgent(
const std::string& working_dir = ".",
const std::shared_ptr<Bash>& bash = std::make_shared<Bash>(),
const ToolCollection& available_tools = ToolCollection(
{
std::make_shared<Shell>(),
@ -29,16 +29,32 @@ struct SweAgent : ToolCallAgent {
const std::string& description = "an autonomous AI programmer that interacts directly with the computer to solve tasks.",
const std::string& system_prompt = prompt::swe::SYSTEM_PROMPT,
const std::string& next_step_prompt = prompt::swe::NEXT_STEP_TEMPLATE,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 100,
int current_step = 0,
int duplicate_threshold = 2
) : ToolCallAgent(available_tools, tool_choice, special_tool_names, name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold),
bash(bash),
) : ToolCallAgent(
available_tools,
tool_choice,
special_tool_names,
name,
description,
system_prompt,
next_step_prompt,
llm,
memory,
state,
max_steps,
current_step,
duplicate_threshold
),
working_dir(working_dir) {}
bool think() override {
// Update working directory
working_dir = bash->execute("pwd");
working_dir = std::filesystem::current_path().string(); // TODO: Maybe use predefined working directory?
next_step_prompt = prompt::swe::NEXT_STEP_TEMPLATE;
next_step_prompt = next_step_prompt.replace(
next_step_prompt.find("{working_dir}"), std::string("{working_dir}").length(), working_dir

View File

@ -12,7 +12,7 @@ bool ToolCallAgent::think() {
// Get response with tool options
auto response = llm->ask_tool(
memory->messages,
std::vector<Message>{Message::system_message(system_prompt)},
system_prompt.empty() ? std::vector<Message>{} : std::vector<Message>{Message::system_message(system_prompt)},
available_tools.to_params(),
tool_choice
);
@ -54,12 +54,12 @@ bool ToolCallAgent::think() {
memory->add_message(assistant_msg);
if (tool_choice == "required" && tool_calls.empty()) {
return true; // will be handled in act()
return true; // Will be handled in act()
}
// For 'auto' mode, continue with content if no commands but content exists
if (tool_choice == "auto" && !tool_calls.empty()) {
return !response["content"].is_null() && !response["content"].empty();
return !response["content"].empty();
}
return !tool_calls.empty();
@ -80,24 +80,22 @@ std::string ToolCallAgent::act() {
}
// Return last message content if no tool calls
return memory->messages.back().content.empty() ? "No content or commands to execute" : memory->messages.back().content.dump();
return memory->messages.empty() || memory->messages.back().content.empty() ? "No content or commands to execute" : memory->messages.back().content.dump();
}
std::vector<std::string> results;
for (const auto& tool_call : tool_calls) {
auto result = std::async(std::launch::async, [this, tool_call]() {
return execute_tool(tool_call);
});
auto result = execute_tool(tool_call);
logger->info(
"🎯 Tool '" + tool_call.function.name + "' ompleted its mission! Result: " + result.get()
"🎯 Tool '" + tool_call.function.name + "' ompleted its mission! Result: " + result
);
// Add tool response to memory
Message tool_msg = Message::tool_message(
result.get(), tool_call.id, tool_call.function.name
result, tool_call.id, tool_call.function.name
);
memory->add_message(tool_msg);
results.push_back(result.get());
results.push_back(result);
}
std::string result_str;
@ -108,6 +106,7 @@ std::string ToolCallAgent::act() {
return result_str;
}
// Execute a single tool call with robust error handling
std::string ToolCallAgent::execute_tool(ToolCall tool_call) {
if (tool_call.empty() || tool_call.function.empty() || tool_call.function.name.empty()) {
return "Error: Invalid command format";
@ -122,12 +121,16 @@ std::string ToolCallAgent::execute_tool(ToolCall tool_call) {
// Parse arguments
json args = tool_call.function.arguments;
if (args.is_string()) {
args = json::parse(args.get<std::string>());
}
// Execute the tool
logger->info("🔧 Activating tool: '" + name + "'...");
ToolResult result = available_tools.execute(name, args);
// Format result for display
auto observation = result.to_string().empty() ?
auto observation = result.empty() ?
"Cmd `" + name + "` completed with no output" :
"Observed output of cmd `" + name + "` executed:\n" + result.to_string();
@ -162,7 +165,7 @@ void ToolCallAgent::_handle_special_tool(const std::string& name, const ToolResu
// Determine if tool execution should finish the agent
bool ToolCallAgent::_should_finish_execution(const std::string& name, const ToolResult& result, const json& kwargs) {
return true; // currently, all special tools (terminate) finish the agent
return true; // Currently, all special tools (terminate) finish the agent
}
bool ToolCallAgent::_is_special_tool(const std::string& name) {

View File

@ -31,10 +31,24 @@ struct ToolCallAgent : ReActAgent {
const std::string& description = "an agent that can execute tool calls.",
const std::string& system_prompt = prompt::toolcall::SYSTEM_PROMPT,
const std::string& next_step_prompt = prompt::toolcall::NEXT_STEP_PROMPT,
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<Memory>& memory = nullptr,
AgentState state = AgentState::IDLE,
int max_steps = 30,
int current_step = 0,
int duplicate_threshold = 2
) : ReActAgent(name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold),
) : ReActAgent(
name,
description,
system_prompt,
next_step_prompt,
llm,
memory,
state,
max_steps,
current_step,
duplicate_threshold
),
available_tools(available_tools),
tool_choice(tool_choice),
special_tool_names(special_tool_names) {}
@ -45,6 +59,7 @@ struct ToolCallAgent : ReActAgent {
// Execute tool calls and handle their results
std::string act() override;
// Execute a single tool call with robust error handling
std::string execute_tool(ToolCall tool_call);
// Handle special tool execution and state changes

View File

@ -11,49 +11,61 @@ Config* Config::_instance = nullptr;
std::mutex Config::_mutex;
// 全局配置实例
Config& config = Config::getInstance();
Config& config = Config::get_instance();
void Config::_load_initial_config() {
try {
auto config_path = _get_config_path();
std::cout << "加载配置文件: " << config_path.string() << std::endl;
auto data = toml::parse_file(config_path.string());
const auto& data = toml::parse_file(config_path.string());
// 检查工具配置是否存在
if (!data.contains("llm") || !data["llm"].is_table()) {
throw std::runtime_error("MCP配置文件中找不到llm配置: ");
}
const auto& llm_table = *data["llm"].as_table();
// 解析LLM设置
if (data.contains("llm") && data["llm"].is_table()) {
auto& llm_table = data["llm"].as_table();
for (const auto& [name, settings] : llm_table) {
LLMSettings llm_settings;
if (settings.contains("model") && settings["model"].is_string()) {
llm_settings.model = settings["model"].as_string();
if (llm_table.contains("model") && llm_table["model"].is_string()) {
llm_settings.model = llm_table["model"].as_string()->get();
} else {
throw std::runtime_error("Invalid `model` configuration");
}
if (settings.contains("api_key") && settings["api_key"].is_string()) {
llm_settings.api_key = settings["api_key"].as_string();
if (llm_table.contains("api_key") && llm_table["api_key"].is_string()) {
llm_settings.api_key = llm_table["api_key"].as_string()->get();
} else {
throw std::runtime_error("Invalid `api_key` configuration");
}
if (settings.contains("base_url") && settings["base_url"].is_string()) {
llm_settings.base_url = settings["base_url"].as_string();
if (llm_table.contains("base_url") && llm_table["base_url"].is_string()) {
llm_settings.base_url = llm_table["base_url"].as_string()->get();
} else {
throw std::runtime_error("Invalid `base_url` configuration");
}
if (settings.contains("end_point") && settings["end_point"].is_string()) {
llm_settings.end_point = settings["end_point"].as_string();
if (llm_table.contains("end_point") && llm_table["end_point"].is_string()) {
llm_settings.end_point = llm_table["end_point"].as_string()->get();
} else {
throw std::runtime_error("Invalid `end_point` configuration");
}
if (settings.contains("max_tokens") && settings["max_tokens"].is_integer()) {
llm_settings.max_tokens = settings["max_tokens"].as_integer();
if (llm_table.contains("max_tokens") && llm_table["max_tokens"].is_integer()) {
llm_settings.max_tokens = llm_table["max_tokens"].as_integer()->get();
} else {
llm_settings.max_tokens = 4096;
}
if (settings.contains("temperature") && settings["temperature"].is_floating()) {
llm_settings.temperature = settings["temperature"].as_floating();
}
_config.llm[name] = llm_settings;
}
if (llm_table.contains("temperature") && llm_table["temperature"].is_floating_point()) {
llm_settings.temperature = llm_table["temperature"].as_floating_point()->get();
} else {
llm_settings.temperature = 1.0;
}
_config.llm["default"] = llm_settings;
} catch (const std::exception& e) {
std::cerr << "加载配置文件失败: " << e.what() << std::endl;
// 设置默认配置

View File

@ -33,10 +33,10 @@ struct LLMSettings {
double temperature;
LLMSettings(
std::string model = "",
std::string api_key = "",
std::string base_url = "",
std::string end_point = "/v1/chat/completions",
const std::string& model = "",
const std::string& api_key = "",
const std::string& base_url = "",
const std::string& end_point = "/v1/chat/completions",
int max_tokens = 4096,
double temperature = 1.0
) : model(model), api_key(api_key), base_url(base_url), end_point(end_point),
@ -54,27 +54,10 @@ struct LLMSettings {
}
};
/**
* @brief
*/
struct AppConfig {
std::map<std::string, LLMSettings> llm;
json to_json() const {
json j;
json llm_json;
for (const auto& [name, settings] : llm) {
llm_json[name] = settings.to_json();
}
j["llm"] = llm_json;
return j;
}
};
/**
* @class Config
* @brief TOML
*/
class Config {
private:
static Config* _instance;
@ -119,7 +102,7 @@ public:
* @brief
* @return
*/
static Config& getInstance() {
static Config& get_instance() {
if (_instance == nullptr) {
std::lock_guard<std::mutex> lock(_mutex);
if (_instance == nullptr) {
@ -133,7 +116,7 @@ public:
* @brief LLM
* @return LLM
*/
const std::map<std::string, LLMSettings>& getLLMSettings() const {
const std::map<std::string, LLMSettings>& llm() const {
return _config.llm;
}
@ -141,7 +124,7 @@ public:
* @brief
* @return
*/
const AppConfig& getConfig() const {
const AppConfig& get_config() const {
return _config;
}
};

View File

@ -1,16 +1,17 @@
#ifndef HUMANUS_FLOW_BASE_H
#define HUMANUS_FLOW_BASE_H
#include "../tool/base.h"
#include "../agent/base.h"
namespace humanus {
enum FlowType {
PLANING = 0
PLANNING = 0
};
const std::map<FlowType, std::string> FLOW_TYPE_MAP = {
{PLANING, "planning"}
{PLANNING, "planning"}
};
// Base class for execution flows supporting multiple agents
@ -19,10 +20,31 @@ struct BaseFlow {
std::vector<std::shared_ptr<BaseTool>> tools;
std::string primary_agent_key;
BaseFlow(const std::map<std::string, std::shared_ptr<BaseAgent>>& agents = {}, const std::vector<std::shared_ptr<BaseTool>>& tools = {}, const std::string& primary_agent_key = "") : agents(agents), tools(tools), primary_agent_key(primary_agent_key) {
BaseFlow(const std::map<std::string, std::shared_ptr<BaseAgent>>& agents = {}, const std::vector<std::shared_ptr<BaseTool>>& tools = {}, const std::string& primary_agent_key = "")
: agents(agents), tools(tools), primary_agent_key(primary_agent_key) {
// If primary agent not specified, use first agent
if (primary_agent_key.empty() && !agents.empty()) {
primary_agent_key = agents.begin()->first;
this->primary_agent_key = agents.begin()->first;
}
}
BaseFlow(const std::shared_ptr<BaseAgent>& agent, const std::vector<std::shared_ptr<BaseTool>>& tools = {}, const std::string& primary_agent_key = "")
: tools(tools), primary_agent_key(primary_agent_key) {
agents["default"] = agent;
// If primary agent not specified, use first agent
if (primary_agent_key.empty()) {
this->primary_agent_key = "default";
}
}
BaseFlow(const std::vector<std::shared_ptr<BaseAgent>>& agents_list, const std::vector<std::shared_ptr<BaseTool>>& tools = {}, const std::string& primary_agent_key = "")
: tools(tools), primary_agent_key(primary_agent_key) {
for (size_t i = 0; i < agents_list.size(); i++) {
agents["agent_" + std::to_string(i)] = agents_list[i];
}
// If primary agent not specified, use first agent
if (primary_agent_key.empty() && !agents.empty()) {
this->primary_agent_key = agents.begin()->first;
}
}
@ -42,7 +64,7 @@ struct BaseFlow {
}
// Execute the flow with the given input
virtual std::string execute(const std::string& input) = 0;
virtual std::string execute(const std::string& input_text) = 0;
};
}

View File

@ -3,16 +3,17 @@
#include "base.h"
#include "../agent/base.h"
#include "flow_planning.h"
#include "planning.h"
namespace humanus {
// Factory for creating different types of flows with support for multiple agents
struct FlowFactory {
static BaseFlow create_flow(FlowType flow_type, std::map<std::string, std::shared_ptr<BaseAgent>> agents, std::vector<std::shared_ptr<BaseTool>> tools, std::string primary_agent_key) {
template<typename... Args>
static std::unique_ptr<BaseFlow> create_flow(FlowType flow_type, Args&&... args) {
switch (flow_type) {
case FlowType::PLANNING:
return std::make_shared<PlanningFlow>(agents, tools, primary_agent_key);
return std::make_unique<PlanningFlow>(std::forward<Args>(args)...);
default:
throw std::invalid_argument("Unknown flow type: " + std::to_string(static_cast<int>(flow_type)));
}

View File

@ -1,4 +1,4 @@
#include "flow_planning.h"
#include "planning.h"
namespace humanus {
@ -7,13 +7,13 @@ namespace humanus {
std::shared_ptr<BaseAgent> PlanningFlow::get_executor(const std::string& step_type = "") const {
// If step type is provided and matches an agent key, use that agent
if (!step_type.empty() && agents.find(step_type) != agents.end()) {
return agents[step_type];
return agents.at(step_type);
}
// Otherwise use the first available executor or fall back to primary agent
for (const auto& key : executor_keys) {
if (agents.find(key) != agents.end()) {
return agents[key];
return agents.at(key);
}
}
@ -33,7 +33,7 @@ std::string PlanningFlow::execute(const std::string& input) {
_create_initial_plan(input);
// Verify plan was created successfully
if (planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) {
if (planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) {
logger->error("Plan creation failed. Plan ID " + active_plan_id + " not found in planning tool.");
return "Failed to create plan for: " + input;
}
@ -42,7 +42,7 @@ std::string PlanningFlow::execute(const std::string& input) {
std::string result = "";
while (true) {
// Get current step to execute
mcp::json step_info;
json step_info;
_get_current_step_info(current_step_index, step_info);
// Exit if no more steps or plan completed
@ -58,7 +58,7 @@ std::string PlanningFlow::execute(const std::string& input) {
result += step_result + "\n";
// Check if agent wants to terminate
if (executor->state == AgentState::TERMINATED) {
if (executor->state == AgentState::FINISHED) {
break;
}
}
@ -88,13 +88,13 @@ void PlanningFlow::_create_initial_plan(const std::string& request) {
auto response = llm->ask_tool(
{user_message},
{system_message},
{planning_tool.to_param()},
{planning_tool->to_param()},
"required"
);
// Process tool calls if present
if (response.contains("tool_calls") && !response["tool_calls"].empty()) {
tool_calls = ToolCall::from_json_list(response["tool_calls"]);
auto tool_calls = ToolCall::from_json_list(response["tool_calls"]);
for (const auto& tool_call : tool_calls) {
// Parse the arguments
@ -112,7 +112,7 @@ void PlanningFlow::_create_initial_plan(const std::string& request) {
args["plan_id"] = active_plan_id;
// Execute the tool via ToolCollection instead of directly
auto result = planning_tool.execute(tool_call.function.name, args);
auto result = planning_tool->execute(args);
logger->info("Plan creation result: " + result.to_string());
return;
@ -123,7 +123,7 @@ void PlanningFlow::_create_initial_plan(const std::string& request) {
logger->warn("Creating default plan");
// Create default plan using the ToolCollection
planning_tool.execute({
planning_tool->execute({
{"command", "create"},
{"plan_id", active_plan_id},
{"title", request.substr(0, std::min(50, static_cast<int>(request.size()))) + (request.size() > 50 ? "..." : "")},
@ -133,23 +133,23 @@ void PlanningFlow::_create_initial_plan(const std::string& request) {
// Parse the current plan to identify the first non-completed step's index and info.
// Returns (None, None) if no active step is found.
void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& step_info) {
if (active_plan_id.empty() || planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) {
void PlanningFlow::_get_current_step_info(int& current_step_index, json& step_info) {
if (active_plan_id.empty() || planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) {
logger->error("Plan with ID " + active_plan_id + " not found");
current_step_index = -1;
step_info = mcp::json::object();
step_info = json::object();
return;
}
try {
// Direct access to plan data from planning tool storage
mcp::json& plan_data = planning_tool.plans[active_plan_id];
mcp::json steps = plan_data.value("steps", mcp::json::array());
mcp::json step_status = plan_data.value("status", mcp::json::array());
json& plan_data = planning_tool->plans[active_plan_id];
json steps = plan_data.value("steps", json::array());
json step_statuses = plan_data.value("step_statuses", json::array());
// Find first non-completed step
for (size_t i = 0; i < steps.size(); ++i) {
const auto& step = steps[i];
const auto& step = steps[i].get<std::string>();
std::string step_status;
if (i >= step_status.size()) {
@ -161,8 +161,8 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st
if (step_status == "not_started" || step_status == "in_progress") {
// Extract step type/category if available
step_info = {
{"type", step},
}
{"type", step}
};
}
// Try to extract step type from the text (e.g., [SEARCH] or [CODE])
@ -174,7 +174,7 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st
// Mark current step as in_progress
try {
planning_tool.execute({
planning_tool->execute({
{"command", "mark_step"},
{"plan_id", active_plan_id},
{"step_index", i},
@ -183,13 +183,13 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st
} catch (const std::exception& e) {
logger->error("Error marking step as in_progress: " + std::string(e.what()));
// Update step status directly if needed
if (i < step_status.size()) {
step_status[i] = "in_progress";
if (i < step_statuses.size()) {
step_statuses[i] = "in_progress";
} else {
while (i > step_status.size()) {
step_status.push_back("not_started");
while (i > step_statuses.size()) {
step_statuses.push_back("not_started");
}
step_status.push_back("in_progress");
step_statuses.push_back("in_progress");
}
plan_data["step_statuses"] = step_statuses;
@ -200,31 +200,31 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st
return;
}
current_step_index = -1;
step_info = mcp::json::object(); // No active step found
step_info = json::object(); // No active step found
} catch (const std::exception& e) {
logger->error("Error finding current step index: " + std::string(e.what()));
current_step_index = -1;
step_info = mcp::json::object();
step_info = json::object();
}
}
// Execute the current step with the specified agent using agent.run().
std::string PlanningFlow::_execute_step(const std::shared_ptr<BaseAgent>& executor, const mcp::json& step_info) {
std::string PlanningFlow::_execute_step(const std::shared_ptr<BaseAgent>& executor, const json& step_info) {
// Prepare context for the agent with current plan status
mcp::json plan_status = _get_plan_status();
json plan_status = _get_plan_text();
std::string step_text = step_info.value("text", "Step " + std::to_string(current_step_index));
// Create a prompt for the agent to execute the current step
std::string step_prompt;
step_prompt += "\n\nCURRENT PLAN STATUS:\n";
step_prompt += plan_status.dump(4);
step_prompt += "\nCURRENT PLAN STATUS:\n";
step_prompt += plan_status.dump(2);
step_prompt += "\n\nYOUR CURRENT TASK:\n";
step_prompt += "You are now working on step " + std::to_string(current_step_index) + ": \"" + step_text + "\"\n";
step_prompt += "Please execute this step using the appropriate tools. When you're done, provide a summary of what you accomplished.";
// Use agent.run() to execute the step
try {
std::string result = executor->run(step_prompt);
std::string step_result = executor->run(step_prompt);
// Mark the step as completed after successful execution
_mark_step_completed();
@ -244,7 +244,7 @@ void PlanningFlow::_mark_step_completed() {
try {
// Mark the step as completed
planning_tool.execute({
planning_tool->execute({
{"command", "mark_step"},
{"plan_id", active_plan_id},
{"step_index", current_step_index},
@ -254,11 +254,11 @@ void PlanningFlow::_mark_step_completed() {
"Marked step " + std::to_string(current_step_index) + " as completed in plan " + active_plan_id
);
} catch (const std::exception& e) {
LOG_WARN("Failed to update plan status: " + std::string(e.what()));
logger->warn("Failed to update plan status: " + std::string(e.what()));
// Update step status directly in planning tool storage
if (planning_tool.plans.find(active_plan_id) != planning_tool.plans.end()) {
mcp::json& plan_data = planning_tool.plans[active_plan_id];
mcp::json step_statuses = plan_data.value("step_statuses", mcp::json::array());
if (planning_tool->plans.find(active_plan_id) != planning_tool->plans.end()) {
json& plan_data = planning_tool->plans[active_plan_id];
json step_statuses = plan_data.value("step_statuses", json::array());
// Ensure the step_statuses list is long enough
while (current_step_index >= step_statuses.size()) {
@ -275,12 +275,12 @@ void PlanningFlow::_mark_step_completed() {
// Get the current plan as formatted text.
std::string PlanningFlow::_get_plan_text() {
try {
auto result = planning_tool.execute({
auto result = planning_tool->execute({
{"command", "get"},
{"plan_id", active_plan_id}
});
return result.to_string();
return !result.output.empty() ? result.output.dump() : result.to_string();
} catch (const std::exception& e) {
LOG_ERROR("Error getting plan: " + std::string(e.what()));
return _generate_plan_text_from_storage();
@ -290,15 +290,15 @@ std::string PlanningFlow::_get_plan_text() {
// Generate plan text directly from storage if the planning tool fails.
std::string PlanningFlow::_generate_plan_text_from_storage() {
try {
if (planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) {
if (planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) {
return "Error: Plan with ID " + active_plan_id + " not found";
}
mcp::json& plan_data = planning_tool.plans[active_plan_id];
auto title = plan_data.value("title", "");
auto steps = plan_data.value("steps", mcp::json::array());
auto step_statuses = plan_data.value("step_statuses", mcp::json::array());
auto step_notes = plan_data.value("step_notes", mcp::json::array());
json& plan_data = planning_tool->plans[active_plan_id];
auto title = plan_data.value("title", "Untitled Plan");
auto steps = plan_data.value("steps", json::array());
auto step_statuses = plan_data.value("step_statuses", json::array());
auto step_notes = plan_data.value("step_notes", json::array());
// Ensure step_statuses and step_notes match the number of steps
while (step_statuses.size() < steps.size()) {
@ -309,7 +309,7 @@ std::string PlanningFlow::_generate_plan_text_from_storage() {
}
// Count steps by status
mcp::json status_counts = {
std::map<std::string, int> status_counts = {
{"completed", 0},
{"in_progress", 0},
{"blocked", 0},
@ -317,8 +317,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() {
};
for (const auto& status : step_statuses) {
if (status_counts.contains(status)) {
status_counts[status]++;
if (status_counts.find(status) != status_counts.end()) {
status_counts[status] = status_counts[status] + 1;
}
}
@ -331,8 +331,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() {
plan_text_ss << std::string(plan_text_ss.str().size(), '=') << "\n\n";
plan_text_ss << "Total steps: " << completed << "/" << total << " steps completed (" << std::fixed << std::setprecision(1) << progress << "%)\n";
plan_text_ss << "Status: " << status_counts["completed"].get<int>() << " completed, " << status_counts["in_progress"].get<int>() << " in progress, "
<< status_counts["blocked"].get<int>() << " blocked, " << status_counts["not_started"].get<int>() << " not started\n\n";
plan_text_ss << "Status: " << status_counts["completed"] << " completed, " << status_counts["in_progress"] << " in progress, "
<< status_counts["blocked"] << " blocked, " << status_counts["not_started"] << " not started\n\n";
plan_text_ss << "Steps:\n";
for (size_t i = 0; i < steps.size(); ++i) {
@ -350,6 +350,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() {
status_mark = "[!]";
} else if (status == "not_started") {
status_mark = "[ ]";
} else { // unknown status
status_mark = "[?]";
}
plan_text_ss << i << ". " << status_mark << " " << step << "\n";
@ -385,7 +387,7 @@ std::string PlanningFlow::_finalize_plan() {
{system_message}
);
return response.to_string();
return response;
} catch (const std::exception& e) {
LOG_ERROR("Error finalizing plan with LLM: " + std::string(e.what()));
@ -394,7 +396,7 @@ std::string PlanningFlow::_finalize_plan() {
auto agent = primary_agent();
std::string summary_prompt = "\nThe plan has been completed. Here is the final plan status:\n\n";
summary_prompt += plan_text + "\n\n";
summary_prompt += "Please provide a summary of what was accomplished and any final thoughts\n";
summary_prompt += "Please provide a summary of what was accomplished and any final thoughts.\n";
std::string summary = agent->run(summary_prompt);
return "Plan completed:\n\n" + summary;
} catch (const std::exception& e2) {

View File

@ -12,28 +12,35 @@
namespace humanus {
// A flow that manages planning and execution of tasks using agents.
struct FlowPlanning : public BaseFlow {
struct PlanningFlow : public BaseFlow {
std::shared_ptr<LLM> llm;
PlanningTool planning_tool;
std::shared_ptr<PlanningTool> planning_tool;
std::vector<std::string> executor_keys;
std::string active_plan_id;
int current_step_index = -1;
FlowPlanning(const std::shared_ptr<LLM>& llm,
const PlanningTool& planning_tool = PlanningTool(),
PlanningFlow(
const std::shared_ptr<LLM>& llm = nullptr,
const std::shared_ptr<PlanningTool>& planning_tool = nullptr,
const std::vector<std::string>& executor_keys = {},
const std::string& active_plan_id = "",
const std::map<std::string, std::shared_ptr<BaseAgent>>& agents = {},
const std::vector<std::shared_ptr<BaseTool>>& tools = {},
const std::string& primary_agent_key = "")
: BaseFlow(agents, tools, primary_agent_key),
const std::string& primary_agent_key = ""
) : BaseFlow(agents, tools, primary_agent_key),
llm(llm),
planning_tool(planning_tool),
executor_keys(executor_keys),
active_plan_id(active_plan_id) {
if (!llm) {
this->llm = LLM::get_instance();
}
if (!planning_tool) {
this->planning_tool = std::make_shared<PlanningTool>();
}
if (executor_keys.empty()) {
for (const auto& [key, agent] : agents) {
executor_keys.push_back(key);
this->executor_keys.push_back(key);
}
}
}
@ -50,10 +57,10 @@ struct FlowPlanning : public BaseFlow {
// Parse the current plan to identify the first non-completed step's index and info.
// Returns (None, None) if no active step is found.
void _get_current_step_info(int& current_step_index, mcp::json& step_info);
void _get_current_step_info(int& current_step_index, json& step_info);
// Execute the current step with the specified agent using agent.run().
std::string _execute_step(const std::shared_ptr<BaseAgent>& executor, const mcp::json& step_info);
std::string _execute_step(const std::shared_ptr<BaseAgent>& executor, const json& step_info);
// Mark the current step as completed.
void _mark_step_completed();

48
llm.h
View File

@ -19,23 +19,16 @@ class LLM {
private:
static std::map<std::string, std::shared_ptr<LLM>> _instances;
std::string model;
std::string api_key;
int max_tokens;
double temperature;
std::unique_ptr<httplib::Client> client_ = nullptr;
int max_retries = 3;
std::unique_ptr<httplib::Client> client_;
LLMSettings llm_config_;
// 私有构造函数,防止直接创建实例
LLM(const std::string& config_name, const LLMSettings llm_config) : llm_config_(llm_config) {
model = llm_config.model;
api_key = llm_config.api_key;
max_tokens = llm_config.max_tokens;
temperature = llm_config.temperature;
client_ = std::make_unique<httplib::Client>(llm_config.base_url);
client_->set_default_headers({
{"Authorization", "Bearer " + llm_config_.api_key}
});
}
public:
@ -65,7 +58,7 @@ public:
if (message["role"] != "user" && message["role"] != "assistant" && message["role"] != "system" && message["role"] != "tool") {
throw std::invalid_argument("Invalid role: " + message["role"]);
}
if (!message.contains("content") && !message.contains("tool_calls")) {
if (message["content"].empty() && message["tool_calls"].empty()) {
throw std::invalid_argument("Message must contain either 'content' or 'tool_calls'");
}
}
@ -94,7 +87,7 @@ public:
if (message["role"] != "user" && message["role"] != "assistant" && message["role"] != "system" && message["role"] != "tool") {
throw std::invalid_argument("Invalid role: " + message["role"]);
}
if (!message.contains("content") && !message.contains("tool_calls")) {
if (message["content"].empty() && message["tool_calls"].empty()) {
throw std::invalid_argument("Message must contain either 'content' or 'tool_calls'");
}
}
@ -127,23 +120,19 @@ public:
formatted_messages.insert(formatted_messages.end(), _formatted_messages.begin(), _formatted_messages.end());
json body = {
{"model", model},
{"model", llm_config_.model},
{"messages", formatted_messages},
{"temperature", temperature},
{"max_tokens", max_tokens}
{"temperature", llm_config_.temperature},
{"max_tokens", llm_config_.max_tokens}
};
std::string body_str = body.dump();
httplib::Headers headers = {
{"Authorization", "Bearer " + api_key}
};
int retry = 0;
while (retry <= max_retries) {
// send request
auto res = client_->Post(llm_config_.end_point, headers, body_str, "application/json");
auto res = client_->Post(llm_config_.end_point, body_str, "application/json");
if (!res) {
logger->error("Failed to send request: " + httplib::to_string(res.error()));
@ -176,6 +165,7 @@ public:
* @param timeout
* @param tools
* @param tool_choice
* @param max_retries
* @return assistant message (content, tool_calls)
* @throws std::invalid_argument
* @throws std::runtime_error API
@ -185,7 +175,8 @@ public:
const std::vector<Message>& system_msgs = {},
const std::vector<json> tools = {},
const std::string& tool_choice = "auto",
int timeout = 60
int timeout = 60,
int max_retries = 3
) {
if (tool_choice != "none" && tool_choice != "auto" && tool_choice != "required") {
throw std::invalid_argument("Invalid tool_choice: " + tool_choice);
@ -210,10 +201,10 @@ public:
}
json body = {
{"model", model},
{"model", llm_config_.model},
{"messages", formatted_messages},
{"temperature", temperature},
{"max_tokens", max_tokens},
{"temperature", llm_config_.temperature},
{"max_tokens", llm_config_.max_tokens},
{"tools", tools},
{"tool_choice", tool_choice}
};
@ -222,15 +213,11 @@ public:
std::string body_str = body.dump();
httplib::Headers headers = {
{"Authorization", "Bearer " + api_key}
};
int retry = 0;
while (retry <= max_retries) {
// send request
auto res = client_->Post(llm_config_.end_point, headers, body_str, "application/json");
auto res = client_->Post(llm_config_.end_point, body_str, "application/json");
if (!res) {
logger->error("Failed to send request: " + httplib::to_string(res.error()));
@ -254,7 +241,6 @@ public:
}
throw std::runtime_error("Failed to get response from LLM");
}
};

View File

@ -14,8 +14,6 @@ namespace humanus {
static spdlog::level::level_enum _print_level = spdlog::level::info;
static std::shared_ptr<spdlog::logger> logger = spdlog::default_logger();
/**
* @brief
* @param print_level
@ -43,22 +41,23 @@ std::shared_ptr<spdlog::logger> define_log_level(spdlog::level::level_enum print
std::filesystem::create_directories((PROJECT_ROOT / "logs").string());
// 重置日志输出
// 清除现有的sinks
logger->sinks().clear();
std::shared_ptr<spdlog::logger> _logger = spdlog::default_logger();
// 添加标准错误输出sink相当于Python中的sys.stderr
auto stderr_sink = std::make_shared<spdlog::sinks::stderr_color_sink_mt>();
stderr_sink->set_level(print_level);
logger->sinks().push_back(stderr_sink);
_logger->sinks().push_back(stderr_sink);
// 添加文件sink相当于Python中的PROJECT_ROOT / f"logs/{log_name}.log"
auto file_sink = std::make_shared<spdlog::sinks::basic_file_sink_mt>(log_file_path, true);
file_sink->set_level(logfile_level);
logger->sinks().push_back(file_sink);
_logger->sinks().push_back(file_sink);
return logger;
return _logger;
}
static std::shared_ptr<spdlog::logger> logger = define_log_level();
} // namespace humanus
#endif

View File

@ -1,9 +1,46 @@
#include "agent/manus.h"
#include "logger.h"
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
#include <signal.h>
#include <unistd.h>
#elif defined (_WIN32)
#define WIN32_LEAN_AND_MEAN
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <signal.h>
#endif
using namespace humanus;
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
static void sigint_handler(int signo) {
if (signo == SIGINT) {
logger->warn("Goodbye!");
exit(0);
}
}
#endif
int main() {
// ctrl+C handling
{
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
struct sigaction sigint_action;
sigint_action.sa_handler = sigint_handler;
sigemptyset (&sigint_action.sa_mask);
sigint_action.sa_flags = 0;
sigaction(SIGINT, &sigint_action, NULL);
#elif defined (_WIN32)
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
};
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
#endif
}
Manus agent = Manus();
while (true) {
std::string prompt;

2
mcp

@ -1 +1 @@
Subproject commit 84e2e092f0b90e44aede47469dc58c1b78e7db36
Subproject commit 5bc09d6ab03c3d793c3550954fc5cd959d017303

View File

@ -13,11 +13,9 @@ const char* NEXT_STEP_PROMPT = R"(You can interact with the computer using Pytho
PythonExecute: Execute Python code to interact with the computer system, data processing, automation tasks, etc.
FileSaver: Save files locally, such as txt, py, html, etc.
FileSystem: Read/write files locally, such as txt, py, html, etc. Create/list/delete directories, move files/directories, search for files and get file metadata.
BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file.
GoogleSearch: Perform web information retrieval
Puppeteer: Open, browse, and get screenshots of web pages using Puppeteer, a headless Chrome browser.
Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.)";
} // namespace manus
@ -44,7 +42,7 @@ namespace swe {
const char* SYSTEM_PROMPT = R"(SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.
The special interface consists of a file editor that shows you {WINDOW} lines of a file at a time.
In addition to typical bash commands, you can also use specific commands to help you navigate and edit files.
In addition to typical shell commands, you can also use specific commands to help you navigate and edit files.
To call a command, you need to invoke it with a function call/tool call.
Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.
@ -54,7 +52,7 @@ RESPONSE FORMAT:
Your shell prompt is formatted as follows:
(Open file: <path>)
(Current directory: <cwd>)
bash-$
shell-$
First, you should _always_ include a general thought about what you're going to do next.
Then, for every response, you must include exactly _ONE_ tool call/function call.
@ -66,7 +64,7 @@ Note that the environment does NOT support interactive session commands (e.g. py
const char* NEXT_STEP_TEMPLATE = R"({observation}
(Open file: {open_file})
(Current directory: {working_dir})
bash-$)";
shell-$)";
} // namespace swe
namespace toolcall {

View File

@ -52,7 +52,7 @@ struct ToolCall {
json tool_call;
tool_call["id"] = id;
tool_call["type"] = type;
tool_call["function"] = function.to_json().dump();
tool_call["function"] = function.to_json();
return tool_call;
}
@ -117,7 +117,7 @@ struct Message {
json to_json() const {
json message;
message["role"] = role;
if (!content.is_null()) {
if (!content.empty()) {
message["content"] = content;
}
if (!tool_calls.empty()) {
@ -152,7 +152,7 @@ struct Message {
return Message("assistant", content);
}
static Message tool_message(const json& content, const std::string& name = "", const std::string& tool_call_id = "") {
static Message tool_message(const json& content, const std::string& tool_call_id = "", const std::string& name = "") {
return Message("tool", content, name, tool_call_id);
}
@ -164,17 +164,6 @@ struct Message {
* @return Message with tool calls
*/
static Message from_tool_calls(const std::vector<ToolCall>& tool_calls, const json& content = json::object()) {
std::vector<json> formatted_calls;
formatted_calls.reserve(tool_calls.size());
for (const auto& call : tool_calls) {
json formatted_call;
formatted_call["id"] = call.id;
formatted_call["type"] = "function";
formatted_call["function"] = call.function.to_json();
formatted_calls.push_back(formatted_call);
}
return Message("assistant", content, "", "", tool_calls);
}
};
@ -200,11 +189,6 @@ struct Memory {
}
}
// Set the messages
void set_messages(const std::vector<Message>& messages) {
this->messages = messages;
}
// Clear all messages
void clear() {
messages.clear();
@ -212,6 +196,7 @@ struct Memory {
// Get the last n messages
std::vector<Message> get_recent_messages(int n) const {
n = std::min(n, static_cast<int>(messages.size()));
return std::vector<Message>(messages.end() - n, messages.end());
}
@ -225,7 +210,6 @@ struct Memory {
}
};
} // namespace humanus
#endif // HUMANUS_SCHEMA_H

View File

@ -1,8 +1,12 @@
#ifndef HUMANUS_TOOL_BASE_H
#define HUMANUS_TOOL_BASE_H
#include "toml.hpp"
#include "../schema.h"
#include "../agent/base.h"
#include "../mcp/include/mcp_client.h"
#include "../mcp/include/mcp_stdio_client.h"
#include "../mcp/include/mcp_sse_client.h"
#include <string>
namespace humanus {
@ -13,12 +17,12 @@ struct BaseTool {
std::string description;
json parameters;
std::unique_ptr<mcp::client> client_;
std::unique_ptr<mcp::client> _client;
BaseTool(const std::string& name, const std::string& description, const json& parameters) :
name(name), description(description), parameters(parameters) {
// 从配置文件加载工具配置
_config = MCPToolConfig::load_from_toml(name);
auto _config = MCPToolConfig::load_from_toml(name);
if (_config.type == "stdio") {
std::string command = _config.command;
@ -38,15 +42,21 @@ struct BaseTool {
}
}
client_->initialize(name + "_client", "0.0.1");
_client->initialize(name + "_client", "0.0.1");
}
// Execute the tool with given parameters.
ToolResult operator()(const json& arguments) {
return execute(arguments);
}
// Execute the tool with given parameters.
virtual ToolResult execute(const json& arguments) {
try {
if (!_client) {
throw std::runtime_error("MCP 客户端未初始化");
}
json result = _client->tool_call(name, arguments);
json result = _client->call_tool(name, arguments);
bool is_error = result.value("isError", false);
// 根据是否有错误返回不同的ToolResult
if (is_error) {
@ -80,16 +90,16 @@ struct ToolResult {
ToolResult(const json& output, const json& error = {}, const json& system = {})
: output(output), error(error), system(system) {}
bool is_null() const {
return output.is_null() && error.is_null() && system.is_null();
bool empty() const {
return output.empty() && error.empty() && system.empty();
}
ToolResult operator+(const ToolResult& other) const {
auto combined_field = [](const json& field, const json& other_field) {
if (field.is_null()) {
if (field.empty()) {
return other_field;
}
if (other_field.is_null()) {
if (other_field.empty()) {
return field;
}
json result = json::array();
@ -114,15 +124,10 @@ struct ToolResult {
}
std::string to_string() const {
return !error.is_null() ? "Error: " + error.dump() : output.dump();
return !error.empty() ? "Error: " + error.dump() : output.dump();
}
};
// A ToolResult that can be rendered as a CLI output.
struct CLIResult : ToolResult {
};
// A ToolResult that represents a failure.
struct ToolError : ToolResult {
ToolError(const std::string& error) : ToolResult({}, error) {}
@ -134,6 +139,10 @@ struct AgentAware : ToolResult {
// 从config_mcp.toml中读取工具配置
struct MCPToolConfig {
std::string type;
std::string host;
std::string port;
std::string url;
std::string command;
std::vector<std::string> args;
json env_vars = json::object();
@ -149,28 +158,34 @@ struct MCPToolConfig {
}
// 解析TOML文件
auto data = toml::parse_file(config_path.string());
const auto& data = toml::parse_file(config_path.string());
// 检查工具配置是否存在
if (!data.contains(tool_name) || !data[tool_name].is_table()) {
throw std::runtime_error("MCP配置文件中找不到工具配置: " + tool_name);
}
auto& tool_table = data[tool_name].as_table();
const auto& tool_table = *data[tool_name].as_table();
// 读取命令
if (tool_table.contains("command") && tool_table["command"].is_string()) {
config.command = tool_table["command"].as_string();
} else {
throw std::runtime_error("工具配置缺少command字段: " + tool_name);
// 读取类型
if (!tool_table.contains("type") || !tool_table["type"].is_string()) {
throw std::runtime_error("工具配置缺少type字段: " + tool_name);
}
config.type = tool_table["type"].as_string()->get();
if (config.type == "stdio") {
// 读取命令
if (!tool_table.contains("command") || !tool_table["command"].is_string()) {
throw std::runtime_error("stdio类型工具配置缺少command字段: " + tool_name);
}
config.command = tool_table["command"].as_string()->get();
// 读取参数(如果有)
if (tool_table.contains("args") && tool_table["args"].is_array()) {
auto& args_array = tool_table["args"].as_array();
const auto& args_array = *tool_table["args"].as_array();
for (const auto& arg : args_array) {
if (arg.is_string()) {
config.args.push_back(arg.as_string());
config.args.push_back(arg.as_string()->get());
}
}
}
@ -178,21 +193,40 @@ struct MCPToolConfig {
// 读取环境变量
std::string env_section = tool_name + ".env";
if (data.contains(env_section) && data[env_section].is_table()) {
auto& env_table = data[env_section].as_table();
const auto& env_table = *data[env_section].as_table();
for (const auto& [key, value] : env_table) {
if (value.is_string()) {
config.env_vars[key] = value.as_string();
config.env_vars[key] = value.as_string()->get();
} else if (value.is_integer()) {
config.env_vars[key] = value.as_integer();
} else if (value.is_floating()) {
config.env_vars[key] = value.as_floating();
config.env_vars[key] = value.as_integer()->get();
} else if (value.is_floating_point()) {
config.env_vars[key] = value.as_floating_point()->get();
} else if (value.is_boolean()) {
config.env_vars[key] = value.as_boolean();
config.env_vars[key] = value.as_boolean()->get();
}
}
}
} else if (config.type == "sse") {
// 读取host和port或url
if (tool_table.contains("url") && tool_table["url"].is_string()) {
config.url = tool_table["url"].as_string()->get();
} else {
if (!tool_table.contains("host") || !tool_table["host"].is_string()) {
throw std::runtime_error("sse类型工具配置缺少host字段: " + tool_name);
}
config.host = tool_table["host"].as_string()->get();
if (!tool_table.contains("port") || !tool_table["port"].is_string()) {
throw std::runtime_error("sse类型工具配置缺少port字段: " + tool_name);
}
config.port = tool_table["port"].as_string()->get();
}
} else {
throw std::runtime_error("不支持的工具类型: " + config.type);
}
} catch (const std::exception& e) {
std::cerr << "加载MCP工具配置失败: " << e.what() << std::endl;
throw;
}
return config;

View File

@ -14,28 +14,11 @@ json CreateChatCompletion::_build_parameters() const {
}},
{"required", required}
};
} else if (response_type == "object") {
// 处理对象类型
return {
{"type", "object"},
{"properties", json::object()},
{"required", required}
};
} else if (response_type == "array") {
// 处理数组类型
return {
{"type", "object"},
{"properties", {
{"response", {
{"type", "array"},
{"items", json::object()}
}}
}},
{"required", required}
};
} else {
throw std::runtime_error("Invalid response type: " + response_type);
}
// TODO: handle other types
return _create_type_schema(response_type);
}
json CreateChatCompletion::_create_type_schema(const std::string& type_hint) const {
@ -83,7 +66,16 @@ json CreateChatCompletion::_create_type_schema(const std::string& type_hint) con
}
// 默认返回字符串类型
return _build_parameters();
return {
{"type", "object"},
{"properties", {
{"response", {
{"type", "string"},
{"description", "The response text that should be delivered to the user."}
}}
}},
{"required", required}
};
}
json CreateChatCompletion::_get_type_info(const std::string& type_hint) const {
@ -108,25 +100,29 @@ json CreateChatCompletion::_create_union_schema(const std::vector<std::string>&
};
}
// Execute the chat completion with type conversion.
ToolResult CreateChatCompletion::execute(const json& args) {
std::vector<std::string> req_fields = args.contains("req") ? args["req"].get<std::vector<std::string>>() : required;
std::vector<std::string> req_fields = args.contains("required") ? args["required"].get<std::vector<std::string>>() : required;
// Handle case when required is a list
if (!req_fields.empty()) {
if (req_fields.size() == 1) {
std::string required_field = req_fields[0];
return args.contains(required_field) ? args.at(required_field) : "";
return ToolResult(args.contains(required_field) ? args.at(required_field) : "");
} else {
// 返回多个字段作为对象
// Return multiple fields as an object
json result = json::object();
for (const auto& field : req_fields) {
result[field] = args.contains(field) ? args.at(field) : "";
}
return result;
return ToolResult(result);
}
} else {
std::string required_field = "response";
return args.contains(required_field) ? args.at(required_field) : "";
return ToolResult(args.contains(required_field) ? args.at(required_field) : "");
}
// TODO: handle other types (Only string and array are supported for now)
}
} // namespace humanuse

View File

@ -26,9 +26,9 @@ struct CreateChatCompletion : BaseTool {
};
std::string response_type;
std::vector<std::string> required = {"response"};
std::vector<std::string> required;
CreateChatCompletion(const std::string& response_type) : BaseTool(name_, description_, json::object()), response_type(response_type) {
CreateChatCompletion(const std::string& response_type = "string", const std::vector<std::string>& required = {"response"}) : BaseTool(name_, description_, json::object()), response_type(response_type), required(required) {
parameters = _build_parameters();
}

View File

@ -70,10 +70,19 @@ struct FileSystem : BaseTool {
return ToolError("Failed to initialize shell client");
}
json tool_args = args;
tool_args.erase("tool");
// 处理命令参数
std::string tool;
if (args.contains("tool")) {
if (args["tool"].is_string()) {
tool = args["tool"].get<std::string>();
} else {
return ToolError("Invalid tool format");
}
} else {
return ToolError("Tool is required");
}
json result = _client->call_tool(args["tool"].get<std::string>(), tool_args);
json result = _client->call_tool("puppeteer_" + tool, args);
bool is_error = result.value("isError", false);

View File

@ -3,25 +3,26 @@
namespace humanus {
/**
* Execute the planning tool with the given command and parameters.
*
* Parameters:
* - command: The operation to perform
* - plan_id: Unique identifier for the plan
* - title: Title for the plan (used with create command)
* - steps: List of steps for the plan (used with create command)
* - step_index: Index of the step to update (used with mark_step command)
* - step_status: Status to set for a step (used with mark_step command)
* - step_notes: Additional notes for a step (used with mark_step command)
*/
* Execute the planning tool with the given command and parameters.
*
* Parameters:
* - command: The operation to perform
* - plan_id: Unique identifier for the plan
* - title: Title for the plan (used with create command)
* - steps: List of steps for the plan (used with create command)
* - step_index: Index of the step to update (used with mark_step command)
* - step_status: Status to set for a step (used with mark_step command)
* - step_notes: Additional notes for a step (used with mark_step command)
*/
ToolResult PlanningTool::execute(const json& args) {
std::string command = args["command"];
std::string plan_id = args["plan_id"];
std::string title = args["title"];
std::vector<std::string> steps = args["steps"];
int step_index = args["step_index"];
std::string step_status = args["step_status"];
std::string step_notes = args["step_notes"];
try {
std::string command = args.value("command", "");
std::string plan_id = args.value("plan_id", "");
std::string title = args.value("title", "");
std::vector<std::string> steps = args.value("steps", std::vector<std::string>());
int step_index = args.value("step_index", -1);
std::string step_status = args.value("step_status", "");
std::string step_notes = args.value("step_notes", "");
if (command == "create") {
return _create_plan(plan_id, title, steps);
@ -40,24 +41,27 @@ ToolResult PlanningTool::execute(const json& args) {
} else {
throw std::runtime_error("Unrecognized command: " + command + ". Allowed commands are: create, update, list, get, set_active, mark_step, delete");
}
} catch (const std::exception& e) {
return ToolError(e.what());
}
}
// Create a new plan with the given ID, title, and steps.
ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::string& title, const std::vector<std::string>& steps) {
if (plan_id.empty()) {
throw std::runtime_error("Parameter `plan_id` is required for command: create");
return ToolError("Parameter `plan_id` is required for command: create");
}
if (plans.find(plan_id) != plans.end()) {
throw std::runtime_error("Plan with ID " + plan_id + " already exists. Use 'update' to modify existing plans.");
return ToolError("Plan with ID " + plan_id + " already exists. Use 'update' to modify existing plans.");
}
if (title.empty()) {
throw std::runtime_error("Parameter `title` is required for command: create");
return ToolError("Parameter `title` is required for command: create");
}
if (steps.empty()) {
throw std::runtime_error("Parameter `steps` must be a non-empty list of strings for command: create");
return ToolError("Parameter `steps` must be a non-empty list of strings for command: create");
}
// Create a new plan with initialized step statuses
@ -70,7 +74,7 @@ ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::str
};
plans[plan_id] = plan;
_current_plan_id = plan_id;
_current_plan_id = plan_id; // Set as active plan
return ToolResult(
"Plan created successfully with ID: " + plan_id + "\n\n" + _format_plan(plan)
@ -80,11 +84,11 @@ ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::str
// Update an existing plan with new title or steps.
ToolResult PlanningTool::_update_plan(const std::string& plan_id, const std::string& title, const std::vector<std::string>& steps) {
if (plan_id.empty()) {
throw std::runtime_error("Parameter `plan_id` is required for command: update");
return ToolError("Parameter `plan_id` is required for command: update");
}
if (plans.find(plan_id) == plans.end()) {
throw std::runtime_error("No plan found with ID: " + plan_id);
return ToolError("No plan found with ID: " + plan_id);
}
json plan = plans[plan_id];
@ -119,14 +123,13 @@ ToolResult PlanningTool::_update_plan(const std::string& plan_id, const std::str
plan["step_notes"] = new_step_notes;
}
plans[plan_id] = plan;
plans[plan_id] = plan; // Note: Remember to update the plan in the map
return ToolResult(
"Plan updated successfully with ID: " + plan_id + "\n\n" + _format_plan(plan)
);
}
// List all available plans.
ToolResult PlanningTool::_list_plans() {
if (plans.empty()) {
@ -143,7 +146,7 @@ ToolResult PlanningTool::_list_plans() {
});
int total = plan["steps"].size();
std::string progress = std::to_string(completed) + "/" + std::to_string(total) + " steps completed";
output += "" + plan_id + current_marker + ": " + plan["title"].get<std::string>() + " - " + progress + "\n";
output += "" + plan_id + current_marker + ": " + plan.value("title", "Unknown Plan") + " - " + progress + "\n";
}
return ToolResult(output);
@ -156,13 +159,13 @@ ToolResult PlanningTool::_get_plan(const std::string& plan_id) {
if (plan_id.empty()) {
// If no plan_id is provided, use the current active plan
if (_current_plan_id.empty()) {
throw std::runtime_error("No active plan. Please specify a plan_id or set an active plan.");
return ToolError("No active plan. Please specify a plan_id or set an active plan.");
}
_plan_id = _current_plan_id;
}
if (plans.find(_plan_id) == plans.end()) {
throw std::runtime_error("No plan found with ID: " + _plan_id);
return ToolError("No plan found with ID: " + _plan_id);
}
json plan = plans[_plan_id];
@ -172,11 +175,11 @@ ToolResult PlanningTool::_get_plan(const std::string& plan_id) {
// Set a plan as the active plan.
ToolResult PlanningTool::_set_active_plan(const std::string& plan_id) {
if (plan_id.empty()) {
throw std::runtime_error("Parameter `plan_id` is required for command: set_active");
return ToolError("Parameter `plan_id` is required for command: set_active");
}
if (plans.find(plan_id) == plans.end()) {
throw std::runtime_error("No plan found with ID: " + plan_id);
return ToolError("No plan found with ID: " + plan_id);
}
_current_plan_id = plan_id;
@ -192,24 +195,24 @@ ToolResult PlanningTool::_mark_step(const std::string& plan_id, int step_index,
if (plan_id.empty()) {
// If no plan_id is provided, use the current active plan
if (_current_plan_id.empty()) {
throw std::runtime_error("No active plan. Please specify a plan_id or set an active plan.");
return ToolError("No active plan. Please specify a plan_id or set an active plan.");
}
_plan_id = _current_plan_id;
}
if (plans.find(_plan_id) == plans.end()) {
throw std::runtime_error("No plan found with ID: " + _plan_id);
return ToolError("No plan found with ID: " + _plan_id);
}
json plan = plans[_plan_id];
if (step_index < 0 || step_index >= plan["steps"].size()) {
throw std::runtime_error("Invalid step index: " + std::to_string(step_index) + ". Valid indices range from 0 to " + std::to_string((int)plan["steps"].size() - 1));
return ToolError("Invalid step index: " + std::to_string(step_index) + ". Valid indices range from 0 to " + std::to_string((int)plan["steps"].size() - 1));
}
if (!step_status.empty()) {
if (step_status != "not_started" && step_status != "in_progress" && step_status != "completed" && step_status != "blocked") {
throw std::runtime_error("Invalid step status: " + step_status + ". Valid statuses are: not_started, in_progress, completed, blocked");
return ToolError("Invalid step status: " + step_status + ". Valid statuses are: not_started, in_progress, completed, blocked");
}
plan["step_statuses"][step_index] = step_status;
}
@ -228,18 +231,18 @@ ToolResult PlanningTool::_mark_step(const std::string& plan_id, int step_index,
// Delete a plan.
ToolResult PlanningTool::_delete_plan(const std::string& plan_id) {
if (plan_id.empty()) {
throw std::runtime_error("Parameter `plan_id` is required for command: delete");
return ToolError("Parameter `plan_id` is required for command: delete");
}
if (plans.find(plan_id) == plans.end()) {
throw std::runtime_error("No plan found with ID: " + plan_id);
return ToolError("No plan found with ID: " + plan_id);
}
plans.erase(plan_id);
// If the deleted plan was the active plan, clear the active plan
if (_current_plan_id == plan_id) {
_current_plan_id = "";
_current_plan_id.clear();
}
return ToolResult(
@ -249,14 +252,15 @@ ToolResult PlanningTool::_delete_plan(const std::string& plan_id) {
// Format a plan for display.
std::string PlanningTool::_format_plan(const json& plan) {
std::string output = "Plan ID: " + plan["plan_id"].get<std::string>() + "\n";
int current_length = output.length();
std::stringstream output_ss;
output_ss << "Plan ID: " << plan["plan_id"].get<std::string>() << "\n";
int current_length = output_ss.str().length();
for (int i = 0; i < current_length; i++) {
output += "=";
output_ss << "=";
}
output += "\n\n";
output_ss << "\n\n";
// Calculate progress statistics
int total_steps = plan["steps"].size();
@ -273,17 +277,16 @@ std::string PlanningTool::_format_plan(const json& plan) {
return status == "not_started";
});
// Add progress statistics to the output
output += "Progress: " + std::to_string(completed_steps) + "/" + std::to_string(total_steps) + " steps completed\n";
output_ss << "Progress: " << completed_steps << "/" << total_steps << " steps completed ";
if (total_steps > 0) {
double percentage = (double)completed_steps / total_steps * 100;
output += "(" + std::to_string(std::round(percentage * 10) / 10) + "%)\n";
output_ss << "(" << std::fixed << std::setprecision(1) << percentage << "%)\n";
} else {
output += "(0%)\n";
output_ss << "(0%)\n";
}
output += "Status: " + std::to_string(completed_steps) + " completed, " + std::to_string(in_progress_steps) + " in progress, " + std::to_string(blocked_steps) + " blocked, " + std::to_string(not_started_steps) + " not started\n";
output += "Steps:\n";
output_ss << "Status: " << completed_steps << " completed, " << in_progress_steps << " in progress, " << blocked_steps << " blocked, " << not_started_steps << " not started\n\n";
output_ss << "Steps:\n";
static std::map<std::string, std::string> status_symbols = {
{"not_started", "[ ]"},
@ -298,11 +301,13 @@ std::string PlanningTool::_format_plan(const json& plan) {
std::string step_status = plan["step_statuses"][i];
std::string step_notes = plan["step_notes"][i];
std::string status_symbol = status_symbols.find(step_status) != status_symbols.end() ? status_symbols[step_status] : "[ ]";
output += std::to_string(i) + ". " + status_symbols[step_status] + " " + step + "\n";
output_ss << i << ". " + status_symbols[step_status] << " " << step << "\n";
if (!step_notes.empty()) {
output += " Notes: " + step_notes + "\n";
output_ss << " Notes: " << step_notes << "\n";
}
}
return output_ss.str();
}
} // namespace humanus

View File

@ -51,10 +51,19 @@ struct Puppeteer : BaseTool {
return ToolError("Failed to initialize puppeteer client");
}
json tool_args = args;
tool_args.erase("tool");
// 处理命令参数
std::string tool;
if (args.contains("tool")) {
if (args["tool"].is_string()) {
tool = args["tool"].get<std::string>();
} else {
return ToolError("Invalid tool format");
}
} else {
return ToolError("Tool is required");
}
json result = _client->call_tool("puppeteer_" + args["tool"].get<std::string>(), tool_args);
json result = _client->call_tool("puppeteer_" + tool, args);
bool is_error = result.value("isError", false);

View File

@ -26,30 +26,6 @@ struct PythonExecute : BaseTool {
};
PythonExecute() : BaseTool(name_, description_, parameters_) {}
ToolResult execute(const json& arguments) override {
try {
// 创建MCP客户端
mcp::client client("localhost", 8088);
// 初始化客户端
client.initialize("OpenManusCppClient", "0.1.0");
client.set_timeout(arguments["timeout"].get<double>());
// 调用工具
json tool_params = {{"code", arguments["code"]}};
json result = client.call_tool("python_execute", tool_params);
if (result["isError"]) {
return ToolError(result["error"].get<std::string>());
}
return ToolResult(result["content"].get<std::string>());
} catch (const std::exception& e) {
return ToolError(e.what());
}
}
};
}

View File

@ -54,42 +54,7 @@ struct Shell : BaseTool {
"required": ["command"]
})json");
std::unique_ptr<mcp::stdio_client> _client;
bool _initialized = false;
MCPToolConfig _config;
std::string last_request_id_;
Shell() : BaseTool(name_, description_, parameters_) {
}
~Shell() {
// 确保客户端正确关闭
if (_client && _client->is_running()) {
try {
if (!last_request_id_.empty()) {
_client->send_notification("notifications/cancelled", {
{"requestId", last_request_id_},
{"reason", "Client shutdown"}
});
}
} catch (...) {
// 忽略关闭时的错误
}
}
}
// 初始化客户端连接
bool initialize() {
if (_initialized) return true;
bool success = _client->initialize("humanus", "1.0.0");
if (success) {
_initialized = true;
}
return success;
}
Shell() : BaseTool(name_, description_, parameters_) {}
ToolResult execute(const json& args) override {
try {

View File

@ -23,9 +23,10 @@ struct Terminate : BaseTool {
Terminate() : BaseTool(name_, description_, parameters_) {}
// Finish the current execution
ToolResult execute(const json& arguments) override {
return ToolResult{
"The interaction has been completed with status: " + arguments["status"].get<std::string>()
"The interaction has been completed with status: " + arguments.value("status", "unknown")
};
}
};

View File

@ -26,7 +26,7 @@ struct ToolCollection {
ToolResult execute(const std::string& name, const json& args) const {
auto tool_iter = tools_map.find(name);
if (tool_iter == tools_map.end()) {
return ToolError("Tool not found: " + name);
return ToolError("Tool " + name + " is invalid");
}
try {
return tool_iter->second->execute(args);
@ -35,19 +35,19 @@ struct ToolCollection {
}
}
// Execute all tools in the collection sequentially.
std::vector<ToolResult> execute_all(const json& args) const {
std::vector<ToolResult> results;
for (auto tool : tools) {
try {
auto result = tool->execute(args);
results.push_back(result);
} catch (const std::exception& e) {
results.push_back(ToolError(e.what()));
}
}
return results;
}
// // Execute all tools in the collection sequentially.
// std::vector<ToolResult> execute_all(const json& args) const { // No reference now
// std::vector<ToolResult> results;
// for (auto tool : tools) {
// try {
// auto result = tool->execute(args);
// results.push_back(result);
// } catch (const std::exception& e) {
// results.push_back(ToolError(e.what()));
// }
// }
// return results;
// }
void add_tool(const std::shared_ptr<BaseTool>& tool) {
tools.push_back(tool);