diff --git a/agent/base.h b/agent/base.h index 7453d1a..8149360 100644 --- a/agent/base.h +++ b/agent/base.h @@ -32,30 +32,43 @@ struct BaseAgent : std::enable_shared_from_this { int duplicate_threshold; // Threshold for duplicate messages - BaseAgent(const std::string& name, - const std::string& description, - const std::string& system_prompt, - const std::string& next_step_prompt, - int max_steps = 10, - int current_step = 0, - int duplicate_threshold = 2) : - name(name), description(description), system_prompt(system_prompt), next_step_prompt(next_step_prompt), max_steps(max_steps), current_step(current_step), duplicate_threshold(duplicate_threshold) { - state = AgentState::IDLE; - } + BaseAgent( + const std::string& name, + const std::string& description, + const std::string& system_prompt, + const std::string& next_step_prompt, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, + int max_steps = 10, + int current_step = 0, + int duplicate_threshold = 2 + ) : name(name), + description(description), + system_prompt(system_prompt), + next_step_prompt(next_step_prompt), + llm(llm), + memory(memory), + state(state), + max_steps(max_steps), + current_step(current_step), + duplicate_threshold(duplicate_threshold) { + initialize_agent(); + } // Initialize agent with default settings if not provided. - BaseAgent* initialize_agent() { + void initialize_agent() { if (!llm) { llm = LLM::get_instance(name); } if (!memory) { memory = std::make_shared(); } - return this; } // Add a message to the agent's memory - void update_memory(const std::string& role, const std::string& content) { + template + void update_memory(const std::string& role, const std::string& content, Args&&... args) { if (role == "user") { memory->add_message(Message::user_message(content)); } else if (role == "assistant") { @@ -63,12 +76,14 @@ struct BaseAgent : std::enable_shared_from_this { } else if (role == "system") { memory->add_message(Message::system_message(content)); } else if (role == "tool") { - memory->add_message(Message::tool_message(content)); + memory->add_message(Message::tool_message(content, std::forward(args)...)); + } else { + throw std::runtime_error("Unsupported message role: " + role); } } // Execute the agent's main loop asynchronously - virtual std::string run(const std::string& request) { + virtual std::string run(const std::string& request = "") { if (state != AgentState::IDLE) { throw std::runtime_error("Cannot run agent from state" + agent_state_map[state]); } @@ -77,7 +92,7 @@ struct BaseAgent : std::enable_shared_from_this { update_memory("user", request); } - state = AgentState::RUNNING; + state = AgentState::RUNNING; // IDLE -> RUNNING std::vector results; while (current_step < max_steps && state == AgentState::RUNNING) { current_step++; @@ -93,7 +108,7 @@ struct BaseAgent : std::enable_shared_from_this { if (current_step >= max_steps) { results.push_back("Terminated: Reached max steps (" + std::to_string(max_steps) + ")"); } - state = AgentState::IDLE; + state = AgentState::IDLE; // RUNNING -> IDLE std::string result_str = ""; @@ -136,18 +151,22 @@ struct BaseAgent : std::enable_shared_from_this { } // Count identical content occurrences - int count = 0; + int duplicate_count = 0; for (auto r_it = messages.rbegin(); r_it != messages.rend(); ++r_it) { const Message& message = *r_it; if (message.role == "assistant" && message.content == last_message.content) { - count++; - if (count >= duplicate_threshold) { + duplicate_count++; + if (duplicate_count >= duplicate_threshold) { break; } + } else { + break; + // Stop counting if a non-duplicate message is encountered + // Slightly differenr from OpenManus implementation } } - return count >= duplicate_threshold; + return duplicate_count >= duplicate_threshold; } void set_messages(const std::vector& messages) { diff --git a/agent/manus.h b/agent/manus.h index 390d7fd..2221091 100644 --- a/agent/manus.h +++ b/agent/manus.h @@ -7,8 +7,8 @@ #include "../tool/tool_collection.h" #include "../tool/python_execute.h" #include "../tool/terminate.h" -#include "../tool/google_search.h" -#include "../tool/file_saver.h" +#include "../tool/puppeteer.h" +#include "../tool/filesystem.h" namespace humanus { @@ -20,18 +20,11 @@ namespace humanus { * to handle a wide range of user requests. */ struct Manus : ToolCallAgent { - std::string name = "manus"; - std::string description = "A versatile agent that can solve various tasks using multiple tools"; - - std::string system_prompt = prompt::manus::SYSTEM_PROMPT; - std::string next_step_prompt = prompt::manus::NEXT_STEP_PROMPT; - - Manus( - const ToolCollection& available_tools = ToolCollection( + const ToolCollection& available_tools = ToolCollection( // Add general-purpose tools to the tool collection { std::make_shared(), - std::make_shared(), + std::make_shared(), // for web browsing std::make_shared(), std::make_shared() } @@ -42,21 +35,27 @@ struct Manus : ToolCallAgent { const std::string& description = "A versatile agent that can solve various tasks using multiple tools", const std::string& system_prompt = prompt::manus::SYSTEM_PROMPT, const std::string& next_step_prompt = prompt::manus::NEXT_STEP_PROMPT, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, int max_steps = 30, int current_step = 0, int duplicate_threshold = 2 ) : ToolCallAgent( - available_tools, - tool_choice, - special_tool_names, - name, - description, - system_prompt, - next_step_prompt, - max_steps, - current_step, - duplicate_threshold - ) {} + available_tools, + tool_choice, + special_tool_names, + name, + description, + system_prompt, + next_step_prompt, + llm, + memory, + state, + max_steps, + current_step, + duplicate_threshold + ) {} }; } diff --git a/agent/planning.cpp b/agent/planning.cpp index af5ed24..fdfef85 100644 --- a/agent/planning.cpp +++ b/agent/planning.cpp @@ -3,22 +3,19 @@ namespace humanus { // Initialize the agent with a default plan ID and validate required tools. -PlanningAgent* PlanningAgent::initialize_plan_and_verify_tools() { +void PlanningAgent::initialize_plan_and_verify_tools() { active_plan_id = "plan_" + std::chrono::system_clock::now().time_since_epoch().count(); if (available_tools.tools_map.find("planning") == available_tools.tools_map.end()) { available_tools.add_tool(std::make_shared()); } - - return this; } // Decide the next action based on plan status. bool PlanningAgent::think() { std::string prompt; if (!active_plan_id.empty()) { - auto plan = get_plan(); - prompt = "CURRENT PLAN STATUS:\n" + plan + "\n\n" + next_step_prompt; + prompt = "CURRENT PLAN STATUS:\n" + get_plan() + "\n\n" + next_step_prompt; } else { prompt = next_step_prompt; } @@ -27,17 +24,19 @@ bool PlanningAgent::think() { // Get the current step index before thinking current_step_index = _get_current_step_index(); - bool result = ToolCallAgent::think(); + bool result = ToolCallAgent::think(); // Will set tool_calls + // After thinking, if we decided to execute a tool and it's not a planning tool or special tool, + // associate it with the current step for tracking if (result && !tool_calls.empty()) { - auto latest_tool_call = tool_calls.back(); + auto latest_tool_call = tool_calls.back(); // Get the most recent tool call if (latest_tool_call.function.name != "planning" && !_is_special_tool(latest_tool_call.function.name) && current_step_index >= 0) { step_execution_tracker[latest_tool_call.id] = { {"step_index", current_step_index}, {"tool_name", latest_tool_call.function.name}, - {"status", "pending"} // will be updated after execution + {"status", "pending"} // Will be updated after execution }; } } @@ -49,6 +48,7 @@ bool PlanningAgent::think() { std::string PlanningAgent::act() { std::string result = ToolCallAgent::act(); + // After executing the tool, update the plan status if (!tool_calls.empty()) { auto latest_tool_call = tool_calls.back(); @@ -58,8 +58,10 @@ std::string PlanningAgent::act() { step_execution_tracker[latest_tool_call.id]["result"] = result; // Update the plan status if this was a non-planning, non-special tool - if (latest_tool_call.function.name != "planning" - && !_is_special_tool(latest_tool_call.function.name)) { + if ( + latest_tool_call.function.name != "planning" + && !_is_special_tool(latest_tool_call.function.name) + ) { update_plan_status(latest_tool_call.id); } } @@ -83,12 +85,12 @@ std::string PlanningAgent::get_plan() { } // Run the agent with an optional initial request. -std::string PlanningAgent::run(const std::string& request = "") { +std::string PlanningAgent::run(const std::string& request) { if (!request.empty()) { create_initial_plan(request); } - return BaseAgent::run(request); + return BaseAgent::run(); } // Update the current plan progress based on completed tool execution. @@ -115,16 +117,23 @@ void PlanningAgent::update_plan_status(const std::string& tool_call_id) { // Mark the step as completed available_tools.execute( "planning", - {{"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", step_index}, {"status", "completed"}} + { + {"command", "mark_step"}, + {"plan_id", active_plan_id}, + {"step_index", step_index}, + {"step_status", "completed"} + } + ); + logger->info( + "Marked step " + std::to_string(step_index) + " as completed in plan " + active_plan_id ); - logger->info("Marked step " + std::to_string(step_index) + " as completed in plan " + active_plan_id); } catch (const std::exception& e) { logger->warn("Failed to update plan status: " + std::string(e.what())); } } // Parse the current plan to identify the first non-completed step's index. -// Returns None if no active step is found. +// Returns -1 if no active step is found. int PlanningAgent::_get_current_step_index() { if (active_plan_id.empty()) { return -1; @@ -165,7 +174,12 @@ int PlanningAgent::_get_current_step_index() { // Mark current step as in_progress available_tools.execute( "planning", - {{"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", i}, {"status", "in_progress"}} + { + {"command", "mark_step"}, + {"plan_id", active_plan_id}, + {"step_index", i}, + {"step_status", "in_progress"} + } ); return i; } @@ -223,7 +237,7 @@ void PlanningAgent::create_initial_plan(const std::string& request) { if (!plan_created) { logger->warn("No plan created from initial request"); - Message tool_msg = Message::tool_message( + Message tool_msg = Message::assistant_message( "Error: Parameter `plan_id` is required for command: create" ); memory->add_message(tool_msg); diff --git a/agent/planning.h b/agent/planning.h index 86d5506..83d732c 100644 --- a/agent/planning.h +++ b/agent/planning.h @@ -20,27 +20,45 @@ struct PlanningAgent : ToolCallAgent { int current_step_index; PlanningAgent( - const ToolCollection& available_tools = ToolCollection( - { - std::make_shared(), - std::make_shared() - } - ), - const std::string& tool_choice = "auto", - const std::set& special_tool_names = {"terminate"}, - const std::string& name = "planning", - const std::string& description = "An agent that creates and manages plans to solve tasks", - const std::string& system_prompt = prompt::planning::PLANNING_SYSTEM_PROMPT, - const std::string& next_step_prompt = prompt::planning::NEXT_STEP_PROMPT, - int max_steps = 20, - int current_step = 0, - int duplicate_threshold = 2 - ) : ToolCallAgent(available_tools, tool_choice, special_tool_names, name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold) { - current_step_index = -1; // no plan yet - } + const ToolCollection& available_tools = ToolCollection( + { + std::make_shared(), + std::make_shared() + } + ), + const std::string& tool_choice = "auto", + const std::set& special_tool_names = {"terminate"}, + const std::string& name = "planning", + const std::string& description = "An agent that creates and manages plans to solve tasks", + const std::string& system_prompt = prompt::planning::PLANNING_SYSTEM_PROMPT, + const std::string& next_step_prompt = prompt::planning::NEXT_STEP_PROMPT, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, + int max_steps = 20, + int current_step = 0, + int duplicate_threshold = 2 + ) : ToolCallAgent( + available_tools, + tool_choice, + special_tool_names, + name, + description, + system_prompt, + next_step_prompt, + llm, + memory, + state, + max_steps, + current_step, + duplicate_threshold + ) { + current_step_index = -1; // will be set in think() + initialize_plan_and_verify_tools(); + } // Initialize the agent with a default plan ID and validate required tools. - PlanningAgent* initialize_plan_and_verify_tools(); + void initialize_plan_and_verify_tools(); // Decide the next action based on plan status. bool think() override; diff --git a/agent/react.h b/agent/react.h index fbbcd40..06960f7 100644 --- a/agent/react.h +++ b/agent/react.h @@ -6,14 +6,29 @@ namespace humanus { struct ReActAgent : BaseAgent { - ReActAgent(const std::string& name, - const std::string& description, - const std::string& system_prompt, - const std::string& next_step_prompt, - int max_steps = 10, - int current_step = 0, - int duplicate_threshold = 2) : - BaseAgent(name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold) {} + ReActAgent( + const std::string& name, + const std::string& description, + const std::string& system_prompt, + const std::string& next_step_prompt, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, + int max_steps = 10, + int current_step = 0, + int duplicate_threshold = 2 + ) : BaseAgent( + name, + description, + system_prompt, + next_step_prompt, + llm, + memory, + state, + max_steps, + current_step, + duplicate_threshold + ) {} // Process current state and decide next actions using tools virtual bool think() = 0; diff --git a/agent/swe.h b/agent/swe.h index 0e7cfd2..31db206 100644 --- a/agent/swe.h +++ b/agent/swe.h @@ -4,6 +4,8 @@ #include "toolcall.h" #include "../tool/tool_collection.h" #include "../tool/terminate.h" +#include "../tool/shell.h" +#include "../tool/filesystem.h" #include "../prompt.h" namespace humanus { @@ -11,11 +13,9 @@ namespace humanus { // An agent that implements the SWEAgent paradigm for executing code and natural conversations. struct SweAgent : ToolCallAgent { std::string working_dir; - std::shared_ptr bash; SweAgent( const std::string& working_dir = ".", - const std::shared_ptr& bash = std::make_shared(), const ToolCollection& available_tools = ToolCollection( { std::make_shared(), @@ -29,16 +29,32 @@ struct SweAgent : ToolCallAgent { const std::string& description = "an autonomous AI programmer that interacts directly with the computer to solve tasks.", const std::string& system_prompt = prompt::swe::SYSTEM_PROMPT, const std::string& next_step_prompt = prompt::swe::NEXT_STEP_TEMPLATE, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, int max_steps = 100, int current_step = 0, int duplicate_threshold = 2 - ) : ToolCallAgent(available_tools, tool_choice, special_tool_names, name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold), - bash(bash), + ) : ToolCallAgent( + available_tools, + tool_choice, + special_tool_names, + name, + description, + system_prompt, + next_step_prompt, + llm, + memory, + state, + max_steps, + current_step, + duplicate_threshold + ), working_dir(working_dir) {} bool think() override { // Update working directory - working_dir = bash->execute("pwd"); + working_dir = std::filesystem::current_path().string(); // TODO: Maybe use predefined working directory? next_step_prompt = prompt::swe::NEXT_STEP_TEMPLATE; next_step_prompt = next_step_prompt.replace( next_step_prompt.find("{working_dir}"), std::string("{working_dir}").length(), working_dir diff --git a/agent/toolcall.cpp b/agent/toolcall.cpp index 8afed84..13eab7c 100644 --- a/agent/toolcall.cpp +++ b/agent/toolcall.cpp @@ -12,7 +12,7 @@ bool ToolCallAgent::think() { // Get response with tool options auto response = llm->ask_tool( memory->messages, - std::vector{Message::system_message(system_prompt)}, + system_prompt.empty() ? std::vector{} : std::vector{Message::system_message(system_prompt)}, available_tools.to_params(), tool_choice ); @@ -54,12 +54,12 @@ bool ToolCallAgent::think() { memory->add_message(assistant_msg); if (tool_choice == "required" && tool_calls.empty()) { - return true; // will be handled in act() + return true; // Will be handled in act() } // For 'auto' mode, continue with content if no commands but content exists if (tool_choice == "auto" && !tool_calls.empty()) { - return !response["content"].is_null() && !response["content"].empty(); + return !response["content"].empty(); } return !tool_calls.empty(); @@ -80,24 +80,22 @@ std::string ToolCallAgent::act() { } // Return last message content if no tool calls - return memory->messages.back().content.empty() ? "No content or commands to execute" : memory->messages.back().content.dump(); + return memory->messages.empty() || memory->messages.back().content.empty() ? "No content or commands to execute" : memory->messages.back().content.dump(); } std::vector results; for (const auto& tool_call : tool_calls) { - auto result = std::async(std::launch::async, [this, tool_call]() { - return execute_tool(tool_call); - }); + auto result = execute_tool(tool_call); logger->info( - "🎯 Tool '" + tool_call.function.name + "' ompleted its mission! Result: " + result.get() + "🎯 Tool '" + tool_call.function.name + "' ompleted its mission! Result: " + result ); // Add tool response to memory Message tool_msg = Message::tool_message( - result.get(), tool_call.id, tool_call.function.name + result, tool_call.id, tool_call.function.name ); memory->add_message(tool_msg); - results.push_back(result.get()); + results.push_back(result); } std::string result_str; @@ -108,6 +106,7 @@ std::string ToolCallAgent::act() { return result_str; } +// Execute a single tool call with robust error handling std::string ToolCallAgent::execute_tool(ToolCall tool_call) { if (tool_call.empty() || tool_call.function.empty() || tool_call.function.name.empty()) { return "Error: Invalid command format"; @@ -122,12 +121,16 @@ std::string ToolCallAgent::execute_tool(ToolCall tool_call) { // Parse arguments json args = tool_call.function.arguments; + if (args.is_string()) { + args = json::parse(args.get()); + } + // Execute the tool logger->info("🔧 Activating tool: '" + name + "'..."); ToolResult result = available_tools.execute(name, args); // Format result for display - auto observation = result.to_string().empty() ? + auto observation = result.empty() ? "Cmd `" + name + "` completed with no output" : "Observed output of cmd `" + name + "` executed:\n" + result.to_string(); @@ -162,7 +165,7 @@ void ToolCallAgent::_handle_special_tool(const std::string& name, const ToolResu // Determine if tool execution should finish the agent bool ToolCallAgent::_should_finish_execution(const std::string& name, const ToolResult& result, const json& kwargs) { - return true; // currently, all special tools (terminate) finish the agent + return true; // Currently, all special tools (terminate) finish the agent } bool ToolCallAgent::_is_special_tool(const std::string& name) { diff --git a/agent/toolcall.h b/agent/toolcall.h index c4d4fc0..415fac2 100644 --- a/agent/toolcall.h +++ b/agent/toolcall.h @@ -31,10 +31,24 @@ struct ToolCallAgent : ReActAgent { const std::string& description = "an agent that can execute tool calls.", const std::string& system_prompt = prompt::toolcall::SYSTEM_PROMPT, const std::string& next_step_prompt = prompt::toolcall::NEXT_STEP_PROMPT, + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& memory = nullptr, + AgentState state = AgentState::IDLE, int max_steps = 30, int current_step = 0, int duplicate_threshold = 2 - ) : ReActAgent(name, description, system_prompt, next_step_prompt, max_steps, current_step, duplicate_threshold), + ) : ReActAgent( + name, + description, + system_prompt, + next_step_prompt, + llm, + memory, + state, + max_steps, + current_step, + duplicate_threshold + ), available_tools(available_tools), tool_choice(tool_choice), special_tool_names(special_tool_names) {} @@ -44,7 +58,8 @@ struct ToolCallAgent : ReActAgent { // Execute tool calls and handle their results std::string act() override; - + + // Execute a single tool call with robust error handling std::string execute_tool(ToolCall tool_call); // Handle special tool execution and state changes diff --git a/config.cpp b/config.cpp index 57e6562..949c6ef 100644 --- a/config.cpp +++ b/config.cpp @@ -11,49 +11,61 @@ Config* Config::_instance = nullptr; std::mutex Config::_mutex; // 全局配置实例 -Config& config = Config::getInstance(); +Config& config = Config::get_instance(); void Config::_load_initial_config() { try { auto config_path = _get_config_path(); std::cout << "加载配置文件: " << config_path.string() << std::endl; - auto data = toml::parse_file(config_path.string()); - - // 解析LLM设置 - if (data.contains("llm") && data["llm"].is_table()) { - auto& llm_table = data["llm"].as_table(); - for (const auto& [name, settings] : llm_table) { - LLMSettings llm_settings; - - if (settings.contains("model") && settings["model"].is_string()) { - llm_settings.model = settings["model"].as_string(); - } - - if (settings.contains("api_key") && settings["api_key"].is_string()) { - llm_settings.api_key = settings["api_key"].as_string(); - } - - if (settings.contains("base_url") && settings["base_url"].is_string()) { - llm_settings.base_url = settings["base_url"].as_string(); - } + const auto& data = toml::parse_file(config_path.string()); - if (settings.contains("end_point") && settings["end_point"].is_string()) { - llm_settings.end_point = settings["end_point"].as_string(); - } - - if (settings.contains("max_tokens") && settings["max_tokens"].is_integer()) { - llm_settings.max_tokens = settings["max_tokens"].as_integer(); - } - - if (settings.contains("temperature") && settings["temperature"].is_floating()) { - llm_settings.temperature = settings["temperature"].as_floating(); - } - - _config.llm[name] = llm_settings; - } + // 检查工具配置是否存在 + if (!data.contains("llm") || !data["llm"].is_table()) { + throw std::runtime_error("MCP配置文件中找不到llm配置: "); } + const auto& llm_table = *data["llm"].as_table(); + + LLMSettings llm_settings; + + if (llm_table.contains("model") && llm_table["model"].is_string()) { + llm_settings.model = llm_table["model"].as_string()->get(); + } else { + throw std::runtime_error("Invalid `model` configuration"); + } + + if (llm_table.contains("api_key") && llm_table["api_key"].is_string()) { + llm_settings.api_key = llm_table["api_key"].as_string()->get(); + } else { + throw std::runtime_error("Invalid `api_key` configuration"); + } + + if (llm_table.contains("base_url") && llm_table["base_url"].is_string()) { + llm_settings.base_url = llm_table["base_url"].as_string()->get(); + } else { + throw std::runtime_error("Invalid `base_url` configuration"); + } + + if (llm_table.contains("end_point") && llm_table["end_point"].is_string()) { + llm_settings.end_point = llm_table["end_point"].as_string()->get(); + } else { + throw std::runtime_error("Invalid `end_point` configuration"); + } + + if (llm_table.contains("max_tokens") && llm_table["max_tokens"].is_integer()) { + llm_settings.max_tokens = llm_table["max_tokens"].as_integer()->get(); + } else { + llm_settings.max_tokens = 4096; + } + + if (llm_table.contains("temperature") && llm_table["temperature"].is_floating_point()) { + llm_settings.temperature = llm_table["temperature"].as_floating_point()->get(); + } else { + llm_settings.temperature = 1.0; + } + + _config.llm["default"] = llm_settings; } catch (const std::exception& e) { std::cerr << "加载配置文件失败: " << e.what() << std::endl; // 设置默认配置 diff --git a/config.h b/config.h index 3325fde..b1dd75e 100644 --- a/config.h +++ b/config.h @@ -33,10 +33,10 @@ struct LLMSettings { double temperature; LLMSettings( - std::string model = "", - std::string api_key = "", - std::string base_url = "", - std::string end_point = "/v1/chat/completions", + const std::string& model = "", + const std::string& api_key = "", + const std::string& base_url = "", + const std::string& end_point = "/v1/chat/completions", int max_tokens = 4096, double temperature = 1.0 ) : model(model), api_key(api_key), base_url(base_url), end_point(end_point), @@ -54,27 +54,10 @@ struct LLMSettings { } }; -/** - * @brief 应用配置结构体 - */ struct AppConfig { std::map llm; - - json to_json() const { - json j; - json llm_json; - for (const auto& [name, settings] : llm) { - llm_json[name] = settings.to_json(); - } - j["llm"] = llm_json; - return j; - } }; -/** - * @class Config - * @brief 配置单例类,用于读取TOML格式的配置文件 - */ class Config { private: static Config* _instance; @@ -119,7 +102,7 @@ public: * @brief 获取单例实例 * @return 配置实例 */ - static Config& getInstance() { + static Config& get_instance() { if (_instance == nullptr) { std::lock_guard lock(_mutex); if (_instance == nullptr) { @@ -133,7 +116,7 @@ public: * @brief 获取LLM设置 * @return LLM设置映射 */ - const std::map& getLLMSettings() const { + const std::map& llm() const { return _config.llm; } @@ -141,7 +124,7 @@ public: * @brief 获取应用配置 * @return 应用配置 */ - const AppConfig& getConfig() const { + const AppConfig& get_config() const { return _config; } }; diff --git a/flow/base.h b/flow/base.h index 1114a86..e0192d5 100644 --- a/flow/base.h +++ b/flow/base.h @@ -1,16 +1,17 @@ #ifndef HUMANUS_FLOW_BASE_H #define HUMANUS_FLOW_BASE_H +#include "../tool/base.h" #include "../agent/base.h" namespace humanus { enum FlowType { - PLANING = 0 + PLANNING = 0 }; const std::map FLOW_TYPE_MAP = { - {PLANING, "planning"} + {PLANNING, "planning"} }; // Base class for execution flows supporting multiple agents @@ -19,10 +20,31 @@ struct BaseFlow { std::vector> tools; std::string primary_agent_key; - BaseFlow(const std::map>& agents = {}, const std::vector>& tools = {}, const std::string& primary_agent_key = "") : agents(agents), tools(tools), primary_agent_key(primary_agent_key) { + BaseFlow(const std::map>& agents = {}, const std::vector>& tools = {}, const std::string& primary_agent_key = "") + : agents(agents), tools(tools), primary_agent_key(primary_agent_key) { // If primary agent not specified, use first agent if (primary_agent_key.empty() && !agents.empty()) { - primary_agent_key = agents.begin()->first; + this->primary_agent_key = agents.begin()->first; + } + } + + BaseFlow(const std::shared_ptr& agent, const std::vector>& tools = {}, const std::string& primary_agent_key = "") + : tools(tools), primary_agent_key(primary_agent_key) { + agents["default"] = agent; + // If primary agent not specified, use first agent + if (primary_agent_key.empty()) { + this->primary_agent_key = "default"; + } + } + + BaseFlow(const std::vector>& agents_list, const std::vector>& tools = {}, const std::string& primary_agent_key = "") + : tools(tools), primary_agent_key(primary_agent_key) { + for (size_t i = 0; i < agents_list.size(); i++) { + agents["agent_" + std::to_string(i)] = agents_list[i]; + } + // If primary agent not specified, use first agent + if (primary_agent_key.empty() && !agents.empty()) { + this->primary_agent_key = agents.begin()->first; } } @@ -42,7 +64,7 @@ struct BaseFlow { } // Execute the flow with the given input - virtual std::string execute(const std::string& input) = 0; + virtual std::string execute(const std::string& input_text) = 0; }; } diff --git a/flow/flow_factory.h b/flow/flow_factory.h index b562d4d..a965b97 100644 --- a/flow/flow_factory.h +++ b/flow/flow_factory.h @@ -3,16 +3,17 @@ #include "base.h" #include "../agent/base.h" -#include "flow_planning.h" +#include "planning.h" namespace humanus { // Factory for creating different types of flows with support for multiple agents struct FlowFactory { - static BaseFlow create_flow(FlowType flow_type, std::map> agents, std::vector> tools, std::string primary_agent_key) { + template + static std::unique_ptr create_flow(FlowType flow_type, Args&&... args) { switch (flow_type) { case FlowType::PLANNING: - return std::make_shared(agents, tools, primary_agent_key); + return std::make_unique(std::forward(args)...); default: throw std::invalid_argument("Unknown flow type: " + std::to_string(static_cast(flow_type))); } diff --git a/flow/flow_planning.cpp b/flow/planning.cpp similarity index 80% rename from flow/flow_planning.cpp rename to flow/planning.cpp index 415b3ca..0eacd77 100644 --- a/flow/flow_planning.cpp +++ b/flow/planning.cpp @@ -1,4 +1,4 @@ -#include "flow_planning.h" +#include "planning.h" namespace humanus { @@ -7,13 +7,13 @@ namespace humanus { std::shared_ptr PlanningFlow::get_executor(const std::string& step_type = "") const { // If step type is provided and matches an agent key, use that agent if (!step_type.empty() && agents.find(step_type) != agents.end()) { - return agents[step_type]; + return agents.at(step_type); } // Otherwise use the first available executor or fall back to primary agent for (const auto& key : executor_keys) { if (agents.find(key) != agents.end()) { - return agents[key]; + return agents.at(key); } } @@ -33,7 +33,7 @@ std::string PlanningFlow::execute(const std::string& input) { _create_initial_plan(input); // Verify plan was created successfully - if (planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) { + if (planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) { logger->error("Plan creation failed. Plan ID " + active_plan_id + " not found in planning tool."); return "Failed to create plan for: " + input; } @@ -42,7 +42,7 @@ std::string PlanningFlow::execute(const std::string& input) { std::string result = ""; while (true) { // Get current step to execute - mcp::json step_info; + json step_info; _get_current_step_info(current_step_index, step_info); // Exit if no more steps or plan completed @@ -58,7 +58,7 @@ std::string PlanningFlow::execute(const std::string& input) { result += step_result + "\n"; // Check if agent wants to terminate - if (executor->state == AgentState::TERMINATED) { + if (executor->state == AgentState::FINISHED) { break; } } @@ -88,13 +88,13 @@ void PlanningFlow::_create_initial_plan(const std::string& request) { auto response = llm->ask_tool( {user_message}, {system_message}, - {planning_tool.to_param()}, + {planning_tool->to_param()}, "required" ); // Process tool calls if present if (response.contains("tool_calls") && !response["tool_calls"].empty()) { - tool_calls = ToolCall::from_json_list(response["tool_calls"]); + auto tool_calls = ToolCall::from_json_list(response["tool_calls"]); for (const auto& tool_call : tool_calls) { // Parse the arguments @@ -112,7 +112,7 @@ void PlanningFlow::_create_initial_plan(const std::string& request) { args["plan_id"] = active_plan_id; // Execute the tool via ToolCollection instead of directly - auto result = planning_tool.execute(tool_call.function.name, args); + auto result = planning_tool->execute(args); logger->info("Plan creation result: " + result.to_string()); return; @@ -123,7 +123,7 @@ void PlanningFlow::_create_initial_plan(const std::string& request) { logger->warn("Creating default plan"); // Create default plan using the ToolCollection - planning_tool.execute({ + planning_tool->execute({ {"command", "create"}, {"plan_id", active_plan_id}, {"title", request.substr(0, std::min(50, static_cast(request.size()))) + (request.size() > 50 ? "..." : "")}, @@ -133,23 +133,23 @@ void PlanningFlow::_create_initial_plan(const std::string& request) { // Parse the current plan to identify the first non-completed step's index and info. // Returns (None, None) if no active step is found. -void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& step_info) { - if (active_plan_id.empty() || planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) { +void PlanningFlow::_get_current_step_info(int& current_step_index, json& step_info) { + if (active_plan_id.empty() || planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) { logger->error("Plan with ID " + active_plan_id + " not found"); current_step_index = -1; - step_info = mcp::json::object(); + step_info = json::object(); return; } try { // Direct access to plan data from planning tool storage - mcp::json& plan_data = planning_tool.plans[active_plan_id]; - mcp::json steps = plan_data.value("steps", mcp::json::array()); - mcp::json step_status = plan_data.value("status", mcp::json::array()); + json& plan_data = planning_tool->plans[active_plan_id]; + json steps = plan_data.value("steps", json::array()); + json step_statuses = plan_data.value("step_statuses", json::array()); // Find first non-completed step for (size_t i = 0; i < steps.size(); ++i) { - const auto& step = steps[i]; + const auto& step = steps[i].get(); std::string step_status; if (i >= step_status.size()) { @@ -161,8 +161,8 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st if (step_status == "not_started" || step_status == "in_progress") { // Extract step type/category if available step_info = { - {"type", step}, - } + {"type", step} + }; } // Try to extract step type from the text (e.g., [SEARCH] or [CODE]) @@ -174,7 +174,7 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st // Mark current step as in_progress try { - planning_tool.execute({ + planning_tool->execute({ {"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", i}, @@ -183,13 +183,13 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st } catch (const std::exception& e) { logger->error("Error marking step as in_progress: " + std::string(e.what())); // Update step status directly if needed - if (i < step_status.size()) { - step_status[i] = "in_progress"; + if (i < step_statuses.size()) { + step_statuses[i] = "in_progress"; } else { - while (i > step_status.size()) { - step_status.push_back("not_started"); + while (i > step_statuses.size()) { + step_statuses.push_back("not_started"); } - step_status.push_back("in_progress"); + step_statuses.push_back("in_progress"); } plan_data["step_statuses"] = step_statuses; @@ -200,31 +200,31 @@ void PlanningFlow::_get_current_step_info(int& current_step_index, mcp::json& st return; } current_step_index = -1; - step_info = mcp::json::object(); // No active step found + step_info = json::object(); // No active step found } catch (const std::exception& e) { logger->error("Error finding current step index: " + std::string(e.what())); current_step_index = -1; - step_info = mcp::json::object(); + step_info = json::object(); } } // Execute the current step with the specified agent using agent.run(). -std::string PlanningFlow::_execute_step(const std::shared_ptr& executor, const mcp::json& step_info) { +std::string PlanningFlow::_execute_step(const std::shared_ptr& executor, const json& step_info) { // Prepare context for the agent with current plan status - mcp::json plan_status = _get_plan_status(); + json plan_status = _get_plan_text(); std::string step_text = step_info.value("text", "Step " + std::to_string(current_step_index)); // Create a prompt for the agent to execute the current step std::string step_prompt; - step_prompt += "\n\nCURRENT PLAN STATUS:\n"; - step_prompt += plan_status.dump(4); + step_prompt += "\nCURRENT PLAN STATUS:\n"; + step_prompt += plan_status.dump(2); step_prompt += "\n\nYOUR CURRENT TASK:\n"; step_prompt += "You are now working on step " + std::to_string(current_step_index) + ": \"" + step_text + "\"\n"; step_prompt += "Please execute this step using the appropriate tools. When you're done, provide a summary of what you accomplished."; // Use agent.run() to execute the step try { - std::string result = executor->run(step_prompt); + std::string step_result = executor->run(step_prompt); // Mark the step as completed after successful execution _mark_step_completed(); @@ -244,7 +244,7 @@ void PlanningFlow::_mark_step_completed() { try { // Mark the step as completed - planning_tool.execute({ + planning_tool->execute({ {"command", "mark_step"}, {"plan_id", active_plan_id}, {"step_index", current_step_index}, @@ -254,11 +254,11 @@ void PlanningFlow::_mark_step_completed() { "Marked step " + std::to_string(current_step_index) + " as completed in plan " + active_plan_id ); } catch (const std::exception& e) { - LOG_WARN("Failed to update plan status: " + std::string(e.what())); + logger->warn("Failed to update plan status: " + std::string(e.what())); // Update step status directly in planning tool storage - if (planning_tool.plans.find(active_plan_id) != planning_tool.plans.end()) { - mcp::json& plan_data = planning_tool.plans[active_plan_id]; - mcp::json step_statuses = plan_data.value("step_statuses", mcp::json::array()); + if (planning_tool->plans.find(active_plan_id) != planning_tool->plans.end()) { + json& plan_data = planning_tool->plans[active_plan_id]; + json step_statuses = plan_data.value("step_statuses", json::array()); // Ensure the step_statuses list is long enough while (current_step_index >= step_statuses.size()) { @@ -275,12 +275,12 @@ void PlanningFlow::_mark_step_completed() { // Get the current plan as formatted text. std::string PlanningFlow::_get_plan_text() { try { - auto result = planning_tool.execute({ + auto result = planning_tool->execute({ {"command", "get"}, {"plan_id", active_plan_id} }); - return result.to_string(); + return !result.output.empty() ? result.output.dump() : result.to_string(); } catch (const std::exception& e) { LOG_ERROR("Error getting plan: " + std::string(e.what())); return _generate_plan_text_from_storage(); @@ -290,15 +290,15 @@ std::string PlanningFlow::_get_plan_text() { // Generate plan text directly from storage if the planning tool fails. std::string PlanningFlow::_generate_plan_text_from_storage() { try { - if (planning_tool.plans.find(active_plan_id) == planning_tool.plans.end()) { + if (planning_tool->plans.find(active_plan_id) == planning_tool->plans.end()) { return "Error: Plan with ID " + active_plan_id + " not found"; } - mcp::json& plan_data = planning_tool.plans[active_plan_id]; - auto title = plan_data.value("title", ""); - auto steps = plan_data.value("steps", mcp::json::array()); - auto step_statuses = plan_data.value("step_statuses", mcp::json::array()); - auto step_notes = plan_data.value("step_notes", mcp::json::array()); + json& plan_data = planning_tool->plans[active_plan_id]; + auto title = plan_data.value("title", "Untitled Plan"); + auto steps = plan_data.value("steps", json::array()); + auto step_statuses = plan_data.value("step_statuses", json::array()); + auto step_notes = plan_data.value("step_notes", json::array()); // Ensure step_statuses and step_notes match the number of steps while (step_statuses.size() < steps.size()) { @@ -309,7 +309,7 @@ std::string PlanningFlow::_generate_plan_text_from_storage() { } // Count steps by status - mcp::json status_counts = { + std::map status_counts = { {"completed", 0}, {"in_progress", 0}, {"blocked", 0}, @@ -317,8 +317,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() { }; for (const auto& status : step_statuses) { - if (status_counts.contains(status)) { - status_counts[status]++; + if (status_counts.find(status) != status_counts.end()) { + status_counts[status] = status_counts[status] + 1; } } @@ -331,8 +331,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() { plan_text_ss << std::string(plan_text_ss.str().size(), '=') << "\n\n"; plan_text_ss << "Total steps: " << completed << "/" << total << " steps completed (" << std::fixed << std::setprecision(1) << progress << "%)\n"; - plan_text_ss << "Status: " << status_counts["completed"].get() << " completed, " << status_counts["in_progress"].get() << " in progress, " - << status_counts["blocked"].get() << " blocked, " << status_counts["not_started"].get() << " not started\n\n"; + plan_text_ss << "Status: " << status_counts["completed"] << " completed, " << status_counts["in_progress"] << " in progress, " + << status_counts["blocked"] << " blocked, " << status_counts["not_started"] << " not started\n\n"; plan_text_ss << "Steps:\n"; for (size_t i = 0; i < steps.size(); ++i) { @@ -350,6 +350,8 @@ std::string PlanningFlow::_generate_plan_text_from_storage() { status_mark = "[!]"; } else if (status == "not_started") { status_mark = "[ ]"; + } else { // unknown status + status_mark = "[?]"; } plan_text_ss << i << ". " << status_mark << " " << step << "\n"; @@ -385,7 +387,7 @@ std::string PlanningFlow::_finalize_plan() { {system_message} ); - return response.to_string(); + return response; } catch (const std::exception& e) { LOG_ERROR("Error finalizing plan with LLM: " + std::string(e.what())); @@ -394,7 +396,7 @@ std::string PlanningFlow::_finalize_plan() { auto agent = primary_agent(); std::string summary_prompt = "\nThe plan has been completed. Here is the final plan status:\n\n"; summary_prompt += plan_text + "\n\n"; - summary_prompt += "Please provide a summary of what was accomplished and any final thoughts\n"; + summary_prompt += "Please provide a summary of what was accomplished and any final thoughts.\n"; std::string summary = agent->run(summary_prompt); return "Plan completed:\n\n" + summary; } catch (const std::exception& e2) { diff --git a/flow/flow_planning.h b/flow/planning.h similarity index 62% rename from flow/flow_planning.h rename to flow/planning.h index 694e810..2fbc358 100644 --- a/flow/flow_planning.h +++ b/flow/planning.h @@ -12,28 +12,35 @@ namespace humanus { // A flow that manages planning and execution of tasks using agents. -struct FlowPlanning : public BaseFlow { +struct PlanningFlow : public BaseFlow { std::shared_ptr llm; - PlanningTool planning_tool; + std::shared_ptr planning_tool; std::vector executor_keys; std::string active_plan_id; int current_step_index = -1; - FlowPlanning(const std::shared_ptr& llm, - const PlanningTool& planning_tool = PlanningTool(), - const std::vector& executor_keys = {}, - const std::string& active_plan_id = "", - const std::map>& agents = {}, - const std::vector>& tools = {}, - const std::string& primary_agent_key = "") - : BaseFlow(agents, tools, primary_agent_key), - llm(llm), - planning_tool(planning_tool), - executor_keys(executor_keys), - active_plan_id(active_plan_id) { + PlanningFlow( + const std::shared_ptr& llm = nullptr, + const std::shared_ptr& planning_tool = nullptr, + const std::vector& executor_keys = {}, + const std::string& active_plan_id = "", + const std::map>& agents = {}, + const std::vector>& tools = {}, + const std::string& primary_agent_key = "" + ) : BaseFlow(agents, tools, primary_agent_key), + llm(llm), + planning_tool(planning_tool), + executor_keys(executor_keys), + active_plan_id(active_plan_id) { + if (!llm) { + this->llm = LLM::get_instance(); + } + if (!planning_tool) { + this->planning_tool = std::make_shared(); + } if (executor_keys.empty()) { for (const auto& [key, agent] : agents) { - executor_keys.push_back(key); + this->executor_keys.push_back(key); } } } @@ -50,10 +57,10 @@ struct FlowPlanning : public BaseFlow { // Parse the current plan to identify the first non-completed step's index and info. // Returns (None, None) if no active step is found. - void _get_current_step_info(int& current_step_index, mcp::json& step_info); + void _get_current_step_info(int& current_step_index, json& step_info); // Execute the current step with the specified agent using agent.run(). - std::string _execute_step(const std::shared_ptr& executor, const mcp::json& step_info); + std::string _execute_step(const std::shared_ptr& executor, const json& step_info); // Mark the current step as completed. void _mark_step_completed(); diff --git a/llm.h b/llm.h index 484ff5f..a6bd4ae 100644 --- a/llm.h +++ b/llm.h @@ -18,24 +18,17 @@ namespace humanus { class LLM { private: static std::map> _instances; - - std::string model; - std::string api_key; - int max_tokens; - double temperature; - std::unique_ptr client_ = nullptr; - int max_retries = 3; + std::unique_ptr client_; LLMSettings llm_config_; // 私有构造函数,防止直接创建实例 LLM(const std::string& config_name, const LLMSettings llm_config) : llm_config_(llm_config) { - model = llm_config.model; - api_key = llm_config.api_key; - max_tokens = llm_config.max_tokens; - temperature = llm_config.temperature; client_ = std::make_unique(llm_config.base_url); + client_->set_default_headers({ + {"Authorization", "Bearer " + llm_config_.api_key} + }); } public: @@ -65,7 +58,7 @@ public: if (message["role"] != "user" && message["role"] != "assistant" && message["role"] != "system" && message["role"] != "tool") { throw std::invalid_argument("Invalid role: " + message["role"]); } - if (!message.contains("content") && !message.contains("tool_calls")) { + if (message["content"].empty() && message["tool_calls"].empty()) { throw std::invalid_argument("Message must contain either 'content' or 'tool_calls'"); } } @@ -94,7 +87,7 @@ public: if (message["role"] != "user" && message["role"] != "assistant" && message["role"] != "system" && message["role"] != "tool") { throw std::invalid_argument("Invalid role: " + message["role"]); } - if (!message.contains("content") && !message.contains("tool_calls")) { + if (message["content"].empty() && message["tool_calls"].empty()) { throw std::invalid_argument("Message must contain either 'content' or 'tool_calls'"); } } @@ -127,23 +120,19 @@ public: formatted_messages.insert(formatted_messages.end(), _formatted_messages.begin(), _formatted_messages.end()); json body = { - {"model", model}, + {"model", llm_config_.model}, {"messages", formatted_messages}, - {"temperature", temperature}, - {"max_tokens", max_tokens} + {"temperature", llm_config_.temperature}, + {"max_tokens", llm_config_.max_tokens} }; std::string body_str = body.dump(); - httplib::Headers headers = { - {"Authorization", "Bearer " + api_key} - }; - int retry = 0; while (retry <= max_retries) { // send request - auto res = client_->Post(llm_config_.end_point, headers, body_str, "application/json"); + auto res = client_->Post(llm_config_.end_point, body_str, "application/json"); if (!res) { logger->error("Failed to send request: " + httplib::to_string(res.error())); @@ -176,6 +165,7 @@ public: * @param timeout 请求超时时间(秒) * @param tools 工具列表 * @param tool_choice 工具选择策略 + * @param max_retries 最大重试次数 * @return 生成的assistant message (content, tool_calls) * @throws std::invalid_argument 如果工具、工具选择或消息无效 * @throws std::runtime_error 如果API调用失败 @@ -185,7 +175,8 @@ public: const std::vector& system_msgs = {}, const std::vector tools = {}, const std::string& tool_choice = "auto", - int timeout = 60 + int timeout = 60, + int max_retries = 3 ) { if (tool_choice != "none" && tool_choice != "auto" && tool_choice != "required") { throw std::invalid_argument("Invalid tool_choice: " + tool_choice); @@ -210,10 +201,10 @@ public: } json body = { - {"model", model}, + {"model", llm_config_.model}, {"messages", formatted_messages}, - {"temperature", temperature}, - {"max_tokens", max_tokens}, + {"temperature", llm_config_.temperature}, + {"max_tokens", llm_config_.max_tokens}, {"tools", tools}, {"tool_choice", tool_choice} }; @@ -222,15 +213,11 @@ public: std::string body_str = body.dump(); - httplib::Headers headers = { - {"Authorization", "Bearer " + api_key} - }; - int retry = 0; while (retry <= max_retries) { // send request - auto res = client_->Post(llm_config_.end_point, headers, body_str, "application/json"); + auto res = client_->Post(llm_config_.end_point, body_str, "application/json"); if (!res) { logger->error("Failed to send request: " + httplib::to_string(res.error())); @@ -254,7 +241,6 @@ public: } throw std::runtime_error("Failed to get response from LLM"); - } }; diff --git a/logger.h b/logger.h index b76d969..7816882 100644 --- a/logger.h +++ b/logger.h @@ -14,8 +14,6 @@ namespace humanus { static spdlog::level::level_enum _print_level = spdlog::level::info; -static std::shared_ptr logger = spdlog::default_logger(); - /** * @brief 调整日志级别 * @param print_level 控制台输出日志级别 @@ -43,22 +41,23 @@ std::shared_ptr define_log_level(spdlog::level::level_enum print std::filesystem::create_directories((PROJECT_ROOT / "logs").string()); // 重置日志输出 - // 清除现有的sinks - logger->sinks().clear(); + std::shared_ptr _logger = spdlog::default_logger(); // 添加标准错误输出sink,相当于Python中的sys.stderr auto stderr_sink = std::make_shared(); stderr_sink->set_level(print_level); - logger->sinks().push_back(stderr_sink); + _logger->sinks().push_back(stderr_sink); // 添加文件sink,相当于Python中的PROJECT_ROOT / f"logs/{log_name}.log" auto file_sink = std::make_shared(log_file_path, true); file_sink->set_level(logfile_level); - logger->sinks().push_back(file_sink); + _logger->sinks().push_back(file_sink); - return logger; + return _logger; } +static std::shared_ptr logger = define_log_level(); + } // namespace humanus #endif \ No newline at end of file diff --git a/main.cpp b/main.cpp index ad8755b..8dc05d7 100644 --- a/main.cpp +++ b/main.cpp @@ -1,9 +1,46 @@ #include "agent/manus.h" #include "logger.h" +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) +#include +#include +#elif defined (_WIN32) +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#include +#endif + using namespace humanus; +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) +static void sigint_handler(int signo) { + if (signo == SIGINT) { + logger->warn("Goodbye!"); + exit(0); + } +} +#endif + int main() { + + // ctrl+C handling + { +#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset (&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined (_WIN32) + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { + return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; + }; + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); +#endif + } Manus agent = Manus(); while (true) { std::string prompt; diff --git a/mcp b/mcp index 84e2e09..5bc09d6 160000 --- a/mcp +++ b/mcp @@ -1 +1 @@ -Subproject commit 84e2e092f0b90e44aede47469dc58c1b78e7db36 +Subproject commit 5bc09d6ab03c3d793c3550954fc5cd959d017303 diff --git a/prompt.h b/prompt.h index 1ef528d..e48058a 100644 --- a/prompt.h +++ b/prompt.h @@ -13,11 +13,9 @@ const char* NEXT_STEP_PROMPT = R"(You can interact with the computer using Pytho PythonExecute: Execute Python code to interact with the computer system, data processing, automation tasks, etc. -FileSaver: Save files locally, such as txt, py, html, etc. +FileSystem: Read/write files locally, such as txt, py, html, etc. Create/list/delete directories, move files/directories, search for files and get file metadata. -BrowserUseTool: Open, browse, and use web browsers.If you open a local HTML file, you must provide the absolute path to the file. - -GoogleSearch: Perform web information retrieval +Puppeteer: Open, browse, and get screenshots of web pages using Puppeteer, a headless Chrome browser. Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.)"; } // namespace manus @@ -44,7 +42,7 @@ namespace swe { const char* SYSTEM_PROMPT = R"(SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface. The special interface consists of a file editor that shows you {WINDOW} lines of a file at a time. -In addition to typical bash commands, you can also use specific commands to help you navigate and edit files. +In addition to typical shell commands, you can also use specific commands to help you navigate and edit files. To call a command, you need to invoke it with a function call/tool call. Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION. @@ -54,7 +52,7 @@ RESPONSE FORMAT: Your shell prompt is formatted as follows: (Open file: ) (Current directory: ) -bash-$ +shell-$ First, you should _always_ include a general thought about what you're going to do next. Then, for every response, you must include exactly _ONE_ tool call/function call. @@ -66,7 +64,7 @@ Note that the environment does NOT support interactive session commands (e.g. py const char* NEXT_STEP_TEMPLATE = R"({observation} (Open file: {open_file}) (Current directory: {working_dir}) -bash-$)"; +shell-$)"; } // namespace swe namespace toolcall { diff --git a/schema.h b/schema.h index 7b9ce81..3e3d273 100644 --- a/schema.h +++ b/schema.h @@ -52,7 +52,7 @@ struct ToolCall { json tool_call; tool_call["id"] = id; tool_call["type"] = type; - tool_call["function"] = function.to_json().dump(); + tool_call["function"] = function.to_json(); return tool_call; } @@ -117,7 +117,7 @@ struct Message { json to_json() const { json message; message["role"] = role; - if (!content.is_null()) { + if (!content.empty()) { message["content"] = content; } if (!tool_calls.empty()) { @@ -152,7 +152,7 @@ struct Message { return Message("assistant", content); } - static Message tool_message(const json& content, const std::string& name = "", const std::string& tool_call_id = "") { + static Message tool_message(const json& content, const std::string& tool_call_id = "", const std::string& name = "") { return Message("tool", content, name, tool_call_id); } @@ -164,17 +164,6 @@ struct Message { * @return Message with tool calls */ static Message from_tool_calls(const std::vector& tool_calls, const json& content = json::object()) { - std::vector formatted_calls; - formatted_calls.reserve(tool_calls.size()); - - for (const auto& call : tool_calls) { - json formatted_call; - formatted_call["id"] = call.id; - formatted_call["type"] = "function"; - formatted_call["function"] = call.function.to_json(); - formatted_calls.push_back(formatted_call); - } - return Message("assistant", content, "", "", tool_calls); } }; @@ -199,11 +188,6 @@ struct Memory { add_message(message); } } - - // Set the messages - void set_messages(const std::vector& messages) { - this->messages = messages; - } // Clear all messages void clear() { @@ -212,6 +196,7 @@ struct Memory { // Get the last n messages std::vector get_recent_messages(int n) const { + n = std::min(n, static_cast(messages.size())); return std::vector(messages.end() - n, messages.end()); } @@ -225,7 +210,6 @@ struct Memory { } }; - } // namespace humanus #endif // HUMANUS_SCHEMA_H diff --git a/tool/base.h b/tool/base.h index 0c5dac5..60f19ed 100644 --- a/tool/base.h +++ b/tool/base.h @@ -1,8 +1,12 @@ #ifndef HUMANUS_TOOL_BASE_H #define HUMANUS_TOOL_BASE_H +#include "toml.hpp" #include "../schema.h" #include "../agent/base.h" +#include "../mcp/include/mcp_client.h" +#include "../mcp/include/mcp_stdio_client.h" +#include "../mcp/include/mcp_sse_client.h" #include namespace humanus { @@ -13,12 +17,12 @@ struct BaseTool { std::string description; json parameters; - std::unique_ptr client_; + std::unique_ptr _client; BaseTool(const std::string& name, const std::string& description, const json& parameters) : name(name), description(description), parameters(parameters) { // 从配置文件加载工具配置 - _config = MCPToolConfig::load_from_toml(name); + auto _config = MCPToolConfig::load_from_toml(name); if (_config.type == "stdio") { std::string command = _config.command; @@ -38,15 +42,21 @@ struct BaseTool { } } - client_->initialize(name + "_client", "0.0.1"); + _client->initialize(name + "_client", "0.0.1"); + } + + // Execute the tool with given parameters. + ToolResult operator()(const json& arguments) { + return execute(arguments); } + // Execute the tool with given parameters. virtual ToolResult execute(const json& arguments) { try { if (!_client) { throw std::runtime_error("MCP 客户端未初始化"); } - json result = _client->tool_call(name, arguments); + json result = _client->call_tool(name, arguments); bool is_error = result.value("isError", false); // 根据是否有错误返回不同的ToolResult if (is_error) { @@ -80,16 +90,16 @@ struct ToolResult { ToolResult(const json& output, const json& error = {}, const json& system = {}) : output(output), error(error), system(system) {} - bool is_null() const { - return output.is_null() && error.is_null() && system.is_null(); + bool empty() const { + return output.empty() && error.empty() && system.empty(); } ToolResult operator+(const ToolResult& other) const { auto combined_field = [](const json& field, const json& other_field) { - if (field.is_null()) { + if (field.empty()) { return other_field; } - if (other_field.is_null()) { + if (other_field.empty()) { return field; } json result = json::array(); @@ -114,15 +124,10 @@ struct ToolResult { } std::string to_string() const { - return !error.is_null() ? "Error: " + error.dump() : output.dump(); + return !error.empty() ? "Error: " + error.dump() : output.dump(); } }; -// A ToolResult that can be rendered as a CLI output. -struct CLIResult : ToolResult { - -}; - // A ToolResult that represents a failure. struct ToolError : ToolResult { ToolError(const std::string& error) : ToolResult({}, error) {} @@ -134,6 +139,10 @@ struct AgentAware : ToolResult { // 从config_mcp.toml中读取工具配置 struct MCPToolConfig { + std::string type; + std::string host; + std::string port; + std::string url; std::string command; std::vector args; json env_vars = json::object(); @@ -149,50 +158,75 @@ struct MCPToolConfig { } // 解析TOML文件 - auto data = toml::parse_file(config_path.string()); + const auto& data = toml::parse_file(config_path.string()); // 检查工具配置是否存在 if (!data.contains(tool_name) || !data[tool_name].is_table()) { throw std::runtime_error("MCP配置文件中找不到工具配置: " + tool_name); } - auto& tool_table = data[tool_name].as_table(); - - // 读取命令 - if (tool_table.contains("command") && tool_table["command"].is_string()) { - config.command = tool_table["command"].as_string(); + const auto& tool_table = *data[tool_name].as_table(); + + // 读取类型 + if (!tool_table.contains("type") || !tool_table["type"].is_string()) { + throw std::runtime_error("工具配置缺少type字段: " + tool_name); + } + config.type = tool_table["type"].as_string()->get(); + + if (config.type == "stdio") { + // 读取命令 + if (!tool_table.contains("command") || !tool_table["command"].is_string()) { + throw std::runtime_error("stdio类型工具配置缺少command字段: " + tool_name); + } + config.command = tool_table["command"].as_string()->get(); + + // 读取参数(如果有) + if (tool_table.contains("args") && tool_table["args"].is_array()) { + const auto& args_array = *tool_table["args"].as_array(); + for (const auto& arg : args_array) { + if (arg.is_string()) { + config.args.push_back(arg.as_string()->get()); + } + } + } + + // 读取环境变量 + std::string env_section = tool_name + ".env"; + if (data.contains(env_section) && data[env_section].is_table()) { + const auto& env_table = *data[env_section].as_table(); + for (const auto& [key, value] : env_table) { + if (value.is_string()) { + config.env_vars[key] = value.as_string()->get(); + } else if (value.is_integer()) { + config.env_vars[key] = value.as_integer()->get(); + } else if (value.is_floating_point()) { + config.env_vars[key] = value.as_floating_point()->get(); + } else if (value.is_boolean()) { + config.env_vars[key] = value.as_boolean()->get(); + } + } + } + } else if (config.type == "sse") { + // 读取host和port或url + if (tool_table.contains("url") && tool_table["url"].is_string()) { + config.url = tool_table["url"].as_string()->get(); + } else { + if (!tool_table.contains("host") || !tool_table["host"].is_string()) { + throw std::runtime_error("sse类型工具配置缺少host字段: " + tool_name); + } + config.host = tool_table["host"].as_string()->get(); + + if (!tool_table.contains("port") || !tool_table["port"].is_string()) { + throw std::runtime_error("sse类型工具配置缺少port字段: " + tool_name); + } + config.port = tool_table["port"].as_string()->get(); + } } else { - throw std::runtime_error("工具配置缺少command字段: " + tool_name); - } - - // 读取参数(如果有) - if (tool_table.contains("args") && tool_table["args"].is_array()) { - auto& args_array = tool_table["args"].as_array(); - for (const auto& arg : args_array) { - if (arg.is_string()) { - config.args.push_back(arg.as_string()); - } - } - } - - // 读取环境变量 - std::string env_section = tool_name + ".env"; - if (data.contains(env_section) && data[env_section].is_table()) { - auto& env_table = data[env_section].as_table(); - for (const auto& [key, value] : env_table) { - if (value.is_string()) { - config.env_vars[key] = value.as_string(); - } else if (value.is_integer()) { - config.env_vars[key] = value.as_integer(); - } else if (value.is_floating()) { - config.env_vars[key] = value.as_floating(); - } else if (value.is_boolean()) { - config.env_vars[key] = value.as_boolean(); - } - } + throw std::runtime_error("不支持的工具类型: " + config.type); } } catch (const std::exception& e) { std::cerr << "加载MCP工具配置失败: " << e.what() << std::endl; + throw; } return config; diff --git a/tool/create_chat_completion.cpp b/tool/create_chat_completion.cpp index 155cd56..0bd4518 100644 --- a/tool/create_chat_completion.cpp +++ b/tool/create_chat_completion.cpp @@ -14,28 +14,11 @@ json CreateChatCompletion::_build_parameters() const { }}, {"required", required} }; - } else if (response_type == "object") { - // 处理对象类型 - return { - {"type", "object"}, - {"properties", json::object()}, - {"required", required} - }; - } else if (response_type == "array") { - // 处理数组类型 - return { - {"type", "object"}, - {"properties", { - {"response", { - {"type", "array"}, - {"items", json::object()} - }} - }}, - {"required", required} - }; - } else { - throw std::runtime_error("Invalid response type: " + response_type); } + + // TODO: handle other types + + return _create_type_schema(response_type); } json CreateChatCompletion::_create_type_schema(const std::string& type_hint) const { @@ -83,7 +66,16 @@ json CreateChatCompletion::_create_type_schema(const std::string& type_hint) con } // 默认返回字符串类型 - return _build_parameters(); + return { + {"type", "object"}, + {"properties", { + {"response", { + {"type", "string"}, + {"description", "The response text that should be delivered to the user."} + }} + }}, + {"required", required} + }; } json CreateChatCompletion::_get_type_info(const std::string& type_hint) const { @@ -108,25 +100,29 @@ json CreateChatCompletion::_create_union_schema(const std::vector& }; } +// Execute the chat completion with type conversion. ToolResult CreateChatCompletion::execute(const json& args) { - std::vector req_fields = args.contains("req") ? args["req"].get>() : required; + std::vector req_fields = args.contains("required") ? args["required"].get>() : required; + // Handle case when required is a list if (!req_fields.empty()) { if (req_fields.size() == 1) { std::string required_field = req_fields[0]; - return args.contains(required_field) ? args.at(required_field) : ""; + return ToolResult(args.contains(required_field) ? args.at(required_field) : ""); } else { - // 返回多个字段作为对象 + // Return multiple fields as an object json result = json::object(); for (const auto& field : req_fields) { result[field] = args.contains(field) ? args.at(field) : ""; } - return result; + return ToolResult(result); } } else { std::string required_field = "response"; - return args.contains(required_field) ? args.at(required_field) : ""; + return ToolResult(args.contains(required_field) ? args.at(required_field) : ""); } + + // TODO: handle other types (Only string and array are supported for now) } } // namespace humanuse \ No newline at end of file diff --git a/tool/create_chat_completion.h b/tool/create_chat_completion.h index b53aa64..28c3722 100644 --- a/tool/create_chat_completion.h +++ b/tool/create_chat_completion.h @@ -26,9 +26,9 @@ struct CreateChatCompletion : BaseTool { }; std::string response_type; - std::vector required = {"response"}; + std::vector required; - CreateChatCompletion(const std::string& response_type) : BaseTool(name_, description_, json::object()), response_type(response_type) { + CreateChatCompletion(const std::string& response_type = "string", const std::vector& required = {"response"}) : BaseTool(name_, description_, json::object()), response_type(response_type), required(required) { parameters = _build_parameters(); } diff --git a/tool/filesystem.h b/tool/filesystem.h index 50109ba..f424906 100644 --- a/tool/filesystem.h +++ b/tool/filesystem.h @@ -69,11 +69,20 @@ struct FileSystem : BaseTool { if (!_client) { return ToolError("Failed to initialize shell client"); } + + // 处理命令参数 + std::string tool; + if (args.contains("tool")) { + if (args["tool"].is_string()) { + tool = args["tool"].get(); + } else { + return ToolError("Invalid tool format"); + } + } else { + return ToolError("Tool is required"); + } - json tool_args = args; - tool_args.erase("tool"); - - json result = _client->call_tool(args["tool"].get(), tool_args); + json result = _client->call_tool("puppeteer_" + tool, args); bool is_error = result.value("isError", false); diff --git a/tool/planning.cpp b/tool/planning.cpp index a3c574e..20899aa 100644 --- a/tool/planning.cpp +++ b/tool/planning.cpp @@ -3,61 +3,65 @@ namespace humanus { /** - * Execute the planning tool with the given command and parameters. - * - * Parameters: - * - command: The operation to perform - * - plan_id: Unique identifier for the plan - * - title: Title for the plan (used with create command) - * - steps: List of steps for the plan (used with create command) - * - step_index: Index of the step to update (used with mark_step command) - * - step_status: Status to set for a step (used with mark_step command) - * - step_notes: Additional notes for a step (used with mark_step command) - */ +* Execute the planning tool with the given command and parameters. +* +* Parameters: +* - command: The operation to perform +* - plan_id: Unique identifier for the plan +* - title: Title for the plan (used with create command) +* - steps: List of steps for the plan (used with create command) +* - step_index: Index of the step to update (used with mark_step command) +* - step_status: Status to set for a step (used with mark_step command) +* - step_notes: Additional notes for a step (used with mark_step command) +*/ ToolResult PlanningTool::execute(const json& args) { - std::string command = args["command"]; - std::string plan_id = args["plan_id"]; - std::string title = args["title"]; - std::vector steps = args["steps"]; - int step_index = args["step_index"]; - std::string step_status = args["step_status"]; - std::string step_notes = args["step_notes"]; - - if (command == "create") { - return _create_plan(plan_id, title, steps); - } else if (command == "update") { - return _update_plan(plan_id, title, steps); - } else if (command == "list") { - return _list_plans(); - } else if (command == "get") { - return _get_plan(plan_id); - } else if (command == "set_active") { - return _set_active_plan(plan_id); - } else if (command == "mark_step") { - return _mark_step(plan_id, step_index, step_status, step_notes); - } else if (command == "delete") { - return _delete_plan(plan_id); - } else { - throw std::runtime_error("Unrecognized command: " + command + ". Allowed commands are: create, update, list, get, set_active, mark_step, delete"); + try { + std::string command = args.value("command", ""); + std::string plan_id = args.value("plan_id", ""); + std::string title = args.value("title", ""); + std::vector steps = args.value("steps", std::vector()); + int step_index = args.value("step_index", -1); + std::string step_status = args.value("step_status", ""); + std::string step_notes = args.value("step_notes", ""); + + if (command == "create") { + return _create_plan(plan_id, title, steps); + } else if (command == "update") { + return _update_plan(plan_id, title, steps); + } else if (command == "list") { + return _list_plans(); + } else if (command == "get") { + return _get_plan(plan_id); + } else if (command == "set_active") { + return _set_active_plan(plan_id); + } else if (command == "mark_step") { + return _mark_step(plan_id, step_index, step_status, step_notes); + } else if (command == "delete") { + return _delete_plan(plan_id); + } else { + throw std::runtime_error("Unrecognized command: " + command + ". Allowed commands are: create, update, list, get, set_active, mark_step, delete"); + } + } catch (const std::exception& e) { + return ToolError(e.what()); } } // Create a new plan with the given ID, title, and steps. ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::string& title, const std::vector& steps) { if (plan_id.empty()) { - throw std::runtime_error("Parameter `plan_id` is required for command: create"); + return ToolError("Parameter `plan_id` is required for command: create"); } if (plans.find(plan_id) != plans.end()) { - throw std::runtime_error("Plan with ID " + plan_id + " already exists. Use 'update' to modify existing plans."); + return ToolError("Plan with ID " + plan_id + " already exists. Use 'update' to modify existing plans."); } if (title.empty()) { - throw std::runtime_error("Parameter `title` is required for command: create"); + return ToolError("Parameter `title` is required for command: create"); } if (steps.empty()) { - throw std::runtime_error("Parameter `steps` must be a non-empty list of strings for command: create"); + return ToolError("Parameter `steps` must be a non-empty list of strings for command: create"); } // Create a new plan with initialized step statuses @@ -70,7 +74,7 @@ ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::str }; plans[plan_id] = plan; - _current_plan_id = plan_id; + _current_plan_id = plan_id; // Set as active plan return ToolResult( "Plan created successfully with ID: " + plan_id + "\n\n" + _format_plan(plan) @@ -80,11 +84,11 @@ ToolResult PlanningTool::_create_plan(const std::string& plan_id, const std::str // Update an existing plan with new title or steps. ToolResult PlanningTool::_update_plan(const std::string& plan_id, const std::string& title, const std::vector& steps) { if (plan_id.empty()) { - throw std::runtime_error("Parameter `plan_id` is required for command: update"); + return ToolError("Parameter `plan_id` is required for command: update"); } if (plans.find(plan_id) == plans.end()) { - throw std::runtime_error("No plan found with ID: " + plan_id); + return ToolError("No plan found with ID: " + plan_id); } json plan = plans[plan_id]; @@ -119,14 +123,13 @@ ToolResult PlanningTool::_update_plan(const std::string& plan_id, const std::str plan["step_notes"] = new_step_notes; } - plans[plan_id] = plan; + plans[plan_id] = plan; // Note: Remember to update the plan in the map return ToolResult( "Plan updated successfully with ID: " + plan_id + "\n\n" + _format_plan(plan) ); } - // List all available plans. ToolResult PlanningTool::_list_plans() { if (plans.empty()) { @@ -143,7 +146,7 @@ ToolResult PlanningTool::_list_plans() { }); int total = plan["steps"].size(); std::string progress = std::to_string(completed) + "/" + std::to_string(total) + " steps completed"; - output += "• " + plan_id + current_marker + ": " + plan["title"].get() + " - " + progress + "\n"; + output += "• " + plan_id + current_marker + ": " + plan.value("title", "Unknown Plan") + " - " + progress + "\n"; } return ToolResult(output); @@ -156,13 +159,13 @@ ToolResult PlanningTool::_get_plan(const std::string& plan_id) { if (plan_id.empty()) { // If no plan_id is provided, use the current active plan if (_current_plan_id.empty()) { - throw std::runtime_error("No active plan. Please specify a plan_id or set an active plan."); + return ToolError("No active plan. Please specify a plan_id or set an active plan."); } _plan_id = _current_plan_id; } if (plans.find(_plan_id) == plans.end()) { - throw std::runtime_error("No plan found with ID: " + _plan_id); + return ToolError("No plan found with ID: " + _plan_id); } json plan = plans[_plan_id]; @@ -172,11 +175,11 @@ ToolResult PlanningTool::_get_plan(const std::string& plan_id) { // Set a plan as the active plan. ToolResult PlanningTool::_set_active_plan(const std::string& plan_id) { if (plan_id.empty()) { - throw std::runtime_error("Parameter `plan_id` is required for command: set_active"); + return ToolError("Parameter `plan_id` is required for command: set_active"); } if (plans.find(plan_id) == plans.end()) { - throw std::runtime_error("No plan found with ID: " + plan_id); + return ToolError("No plan found with ID: " + plan_id); } _current_plan_id = plan_id; @@ -192,24 +195,24 @@ ToolResult PlanningTool::_mark_step(const std::string& plan_id, int step_index, if (plan_id.empty()) { // If no plan_id is provided, use the current active plan if (_current_plan_id.empty()) { - throw std::runtime_error("No active plan. Please specify a plan_id or set an active plan."); + return ToolError("No active plan. Please specify a plan_id or set an active plan."); } _plan_id = _current_plan_id; } if (plans.find(_plan_id) == plans.end()) { - throw std::runtime_error("No plan found with ID: " + _plan_id); + return ToolError("No plan found with ID: " + _plan_id); } json plan = plans[_plan_id]; if (step_index < 0 || step_index >= plan["steps"].size()) { - throw std::runtime_error("Invalid step index: " + std::to_string(step_index) + ". Valid indices range from 0 to " + std::to_string((int)plan["steps"].size() - 1)); + return ToolError("Invalid step index: " + std::to_string(step_index) + ". Valid indices range from 0 to " + std::to_string((int)plan["steps"].size() - 1)); } if (!step_status.empty()) { if (step_status != "not_started" && step_status != "in_progress" && step_status != "completed" && step_status != "blocked") { - throw std::runtime_error("Invalid step status: " + step_status + ". Valid statuses are: not_started, in_progress, completed, blocked"); + return ToolError("Invalid step status: " + step_status + ". Valid statuses are: not_started, in_progress, completed, blocked"); } plan["step_statuses"][step_index] = step_status; } @@ -228,18 +231,18 @@ ToolResult PlanningTool::_mark_step(const std::string& plan_id, int step_index, // Delete a plan. ToolResult PlanningTool::_delete_plan(const std::string& plan_id) { if (plan_id.empty()) { - throw std::runtime_error("Parameter `plan_id` is required for command: delete"); + return ToolError("Parameter `plan_id` is required for command: delete"); } if (plans.find(plan_id) == plans.end()) { - throw std::runtime_error("No plan found with ID: " + plan_id); + return ToolError("No plan found with ID: " + plan_id); } plans.erase(plan_id); // If the deleted plan was the active plan, clear the active plan if (_current_plan_id == plan_id) { - _current_plan_id = ""; + _current_plan_id.clear(); } return ToolResult( @@ -249,14 +252,15 @@ ToolResult PlanningTool::_delete_plan(const std::string& plan_id) { // Format a plan for display. std::string PlanningTool::_format_plan(const json& plan) { - std::string output = "Plan ID: " + plan["plan_id"].get() + "\n"; - int current_length = output.length(); + std::stringstream output_ss; + output_ss << "Plan ID: " << plan["plan_id"].get() << "\n"; + int current_length = output_ss.str().length(); for (int i = 0; i < current_length; i++) { - output += "="; + output_ss << "="; } - output += "\n\n"; + output_ss << "\n\n"; // Calculate progress statistics int total_steps = plan["steps"].size(); @@ -273,17 +277,16 @@ std::string PlanningTool::_format_plan(const json& plan) { return status == "not_started"; }); - // Add progress statistics to the output - output += "Progress: " + std::to_string(completed_steps) + "/" + std::to_string(total_steps) + " steps completed\n"; + output_ss << "Progress: " << completed_steps << "/" << total_steps << " steps completed "; if (total_steps > 0) { double percentage = (double)completed_steps / total_steps * 100; - output += "(" + std::to_string(std::round(percentage * 10) / 10) + "%)\n"; + output_ss << "(" << std::fixed << std::setprecision(1) << percentage << "%)\n"; } else { - output += "(0%)\n"; + output_ss << "(0%)\n"; } - output += "Status: " + std::to_string(completed_steps) + " completed, " + std::to_string(in_progress_steps) + " in progress, " + std::to_string(blocked_steps) + " blocked, " + std::to_string(not_started_steps) + " not started\n"; - output += "Steps:\n"; + output_ss << "Status: " << completed_steps << " completed, " << in_progress_steps << " in progress, " << blocked_steps << " blocked, " << not_started_steps << " not started\n\n"; + output_ss << "Steps:\n"; static std::map status_symbols = { {"not_started", "[ ]"}, @@ -298,11 +301,13 @@ std::string PlanningTool::_format_plan(const json& plan) { std::string step_status = plan["step_statuses"][i]; std::string step_notes = plan["step_notes"][i]; std::string status_symbol = status_symbols.find(step_status) != status_symbols.end() ? status_symbols[step_status] : "[ ]"; - output += std::to_string(i) + ". " + status_symbols[step_status] + " " + step + "\n"; + output_ss << i << ". " + status_symbols[step_status] << " " << step << "\n"; if (!step_notes.empty()) { - output += " Notes: " + step_notes + "\n"; + output_ss << " Notes: " << step_notes << "\n"; } } + + return output_ss.str(); } } // namespace humanus \ No newline at end of file diff --git a/tool/puppeteer.h b/tool/puppeteer.h index c00dcd4..d230aef 100644 --- a/tool/puppeteer.h +++ b/tool/puppeteer.h @@ -51,10 +51,19 @@ struct Puppeteer : BaseTool { return ToolError("Failed to initialize puppeteer client"); } - json tool_args = args; - tool_args.erase("tool"); + // 处理命令参数 + std::string tool; + if (args.contains("tool")) { + if (args["tool"].is_string()) { + tool = args["tool"].get(); + } else { + return ToolError("Invalid tool format"); + } + } else { + return ToolError("Tool is required"); + } - json result = _client->call_tool("puppeteer_" + args["tool"].get(), tool_args); + json result = _client->call_tool("puppeteer_" + tool, args); bool is_error = result.value("isError", false); diff --git a/tool/python_execute.h b/tool/python_execute.h index 472aa7d..0d6abac 100644 --- a/tool/python_execute.h +++ b/tool/python_execute.h @@ -26,30 +26,6 @@ struct PythonExecute : BaseTool { }; PythonExecute() : BaseTool(name_, description_, parameters_) {} - - ToolResult execute(const json& arguments) override { - try { - // 创建MCP客户端 - mcp::client client("localhost", 8088); - - // 初始化客户端 - client.initialize("OpenManusCppClient", "0.1.0"); - - client.set_timeout(arguments["timeout"].get()); - - // 调用工具 - json tool_params = {{"code", arguments["code"]}}; - json result = client.call_tool("python_execute", tool_params); - - if (result["isError"]) { - return ToolError(result["error"].get()); - } - - return ToolResult(result["content"].get()); - } catch (const std::exception& e) { - return ToolError(e.what()); - } - } }; } diff --git a/tool/shell.h b/tool/shell.h index dff670d..1107910 100644 --- a/tool/shell.h +++ b/tool/shell.h @@ -54,42 +54,7 @@ struct Shell : BaseTool { "required": ["command"] })json"); - std::unique_ptr _client; - bool _initialized = false; - MCPToolConfig _config; - - std::string last_request_id_; - - Shell() : BaseTool(name_, description_, parameters_) { - - } - - ~Shell() { - // 确保客户端正确关闭 - if (_client && _client->is_running()) { - try { - if (!last_request_id_.empty()) { - _client->send_notification("notifications/cancelled", { - {"requestId", last_request_id_}, - {"reason", "Client shutdown"} - }); - } - } catch (...) { - // 忽略关闭时的错误 - } - } - } - - // 初始化客户端连接 - bool initialize() { - if (_initialized) return true; - - bool success = _client->initialize("humanus", "1.0.0"); - if (success) { - _initialized = true; - } - return success; - } + Shell() : BaseTool(name_, description_, parameters_) {} ToolResult execute(const json& args) override { try { diff --git a/tool/terminate.h b/tool/terminate.h index c8344ef..1d96fe5 100644 --- a/tool/terminate.h +++ b/tool/terminate.h @@ -23,9 +23,10 @@ struct Terminate : BaseTool { Terminate() : BaseTool(name_, description_, parameters_) {} + // Finish the current execution ToolResult execute(const json& arguments) override { return ToolResult{ - "The interaction has been completed with status: " + arguments["status"].get() + "The interaction has been completed with status: " + arguments.value("status", "unknown") }; } }; diff --git a/tool/tool_collection.h b/tool/tool_collection.h index d0b27da..fc7b5e7 100644 --- a/tool/tool_collection.h +++ b/tool/tool_collection.h @@ -26,7 +26,7 @@ struct ToolCollection { ToolResult execute(const std::string& name, const json& args) const { auto tool_iter = tools_map.find(name); if (tool_iter == tools_map.end()) { - return ToolError("Tool not found: " + name); + return ToolError("Tool " + name + " is invalid"); } try { return tool_iter->second->execute(args); @@ -35,19 +35,19 @@ struct ToolCollection { } } - // Execute all tools in the collection sequentially. - std::vector execute_all(const json& args) const { - std::vector results; - for (auto tool : tools) { - try { - auto result = tool->execute(args); - results.push_back(result); - } catch (const std::exception& e) { - results.push_back(ToolError(e.what())); - } - } - return results; - } + // // Execute all tools in the collection sequentially. + // std::vector execute_all(const json& args) const { // No reference now + // std::vector results; + // for (auto tool : tools) { + // try { + // auto result = tool->execute(args); + // results.push_back(result); + // } catch (const std::exception& e) { + // results.push_back(ToolError(e.what())); + // } + // } + // return results; + // } void add_tool(const std::shared_ptr& tool) { tools.push_back(tool);