From 16e34c4eee67f1a16fad13679cb7cf2ace35c00c Mon Sep 17 00:00:00 2001 From: hkr04 Date: Sun, 13 Apr 2025 14:25:53 +0800 Subject: [PATCH] update README and mcp --- README.md | 16 +++++++++++----- mcp | 2 +- src/llm.cpp | 22 ++++++++++++++++++++-- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ab5f72d..3740162 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,15 @@ # humanus.cpp -Humanus (meaning "human" in Latin) is a lightweight framework inspired by [OpenManus](https://github.com/mannaandpoem/OpenManus) and [mem0](https://github.com/mem0ai/mem0), integrated with the Model Context Protocol (MCP). `humanus.cpp` enables more flexible tool choices, and provides a foundation for building powerful local LLM agents. +Humanus (meaning "human" in Latin) is a **lightweight C++ framework** inspired by [OpenManus](https://github.com/mannaandpoem/OpenManus) and [mem0](https://github.com/mem0ai/mem0), integrated with the Model Context Protocol (MCP). + +**Key Features:** +- **C++ Implementation**: Core functionality written in efficient C++ for optimal performance and resource utilization +- **Lightweight Design**: Minimalist architecture with minimal dependencies, suitable for resource-constrained environments +- **Cross-platform Compatibility**: Full support for Unix, MacOS, and Windows systems +- **MCP Protocol Integration**: Seamless integration with Model Context Protocol for standardized tool interactions +- **Vectorized Memory Storage**: Efficient similarity search based on HNSW algorithm for intelligent context retrieval +- **Modular Architecture**: Easy to extend and customize, supporting various LLM models and tool integrations Let's embrace local LLM agents **w/** humanus.cpp! @@ -27,7 +35,7 @@ To set up your custom configuration, follow these steps: 1. Copy all files from `config/example` to `config`. 2. Replace `base_url`, `api_key`, .etc in `config/config_llm.toml` and other configurations in `config/config*.toml` according to your need. - > Note: `llama-server` in [llama.cpp](https://github.com/ggml-org/llama.cpp) also supports embedding models. + > Note: `llama-server` in [llama.cpp](https://github.com/ggml-org/llama.cpp) also supports embedding models for vectorized memory. 3. Fill in `args` after `"@modelcontextprotocol/server-filesystem"` for `filesystem` to control the access to files. For example: ``` [filesystem] @@ -38,7 +46,6 @@ args = ["-y", "/Users/{Username}/Desktop", "other/path/to/your/files] ``` -4. Ensure all requirements for the MCP servers are installed. For example, run `npx playwright install` first for `playwright`. ### `mcp_server` @@ -50,7 +57,7 @@ Start a MCP server with tool `python_execute` on port 8895 (or pass the port as ``` ```shell -.\build\bin\Release\mcp_server.exe # Windows +.\build\bin\Release\mcp_server.exe # Windows ``` ### `humanus_cli` @@ -112,7 +119,6 @@ Configure it in Cursor: > What if add `humanus` to `mcp_servers`? It might be interesting. - ## Acknowledgement

diff --git a/mcp b/mcp index 88237f2..3c0a2a7 160000 --- a/mcp +++ b/mcp @@ -1 +1 @@ -Subproject commit 88237f2eaae1dc89b32d2a693ac2bd15fb6ad269 +Subproject commit 3c0a2a730ad9da3da61cd7a35ab8bbe5ff078c11 diff --git a/src/llm.cpp b/src/llm.cpp index e48aff7..64d5184 100644 --- a/src/llm.cpp +++ b/src/llm.cpp @@ -315,8 +315,8 @@ json LLM::ask_tool( } // If the logger has a file sink, log the request body - if (logger->sinks().size() > 1) { - auto file_sink = std::dynamic_pointer_cast(logger->sinks()[1]); + for (const auto& sink : logger->sinks()) { + auto file_sink = std::dynamic_pointer_cast(sink); if (file_sink) { file_sink->log(spdlog::details::log_msg( spdlog::source_loc{}, @@ -325,6 +325,24 @@ json LLM::ask_tool( "Failed to get response from LLM. Full request body: " + body_str )); } + auto stderr_sink = std::dynamic_pointer_cast(sink); + if (stderr_sink) { + stderr_sink->log(spdlog::details::log_msg( + spdlog::source_loc{}, + logger->name(), + spdlog::level::debug, + "Failed to get response from LLM. See log file for full request body." + )); + } + auto session_sink = std::dynamic_pointer_cast(sink); + if (session_sink) { + session_sink->log(spdlog::details::log_msg( + spdlog::source_loc{}, + logger->name(), + spdlog::level::debug, + "Failed to get response from LLM. See log file for full request body." + )); + } } throw std::runtime_error("Failed to get response from LLM");