diff --git a/README.md b/README.md index 89a6ea5..3db81f1 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Let's embrace local LLM agents **w/** humanus.cpp! ## How to Build ```bash -git submodule update --init --recursive +git submodule update --init cmake -B build cmake --build build --config Release @@ -23,8 +23,9 @@ cmake --build build --config Release ### Configuration -Switch to your own configration first: -1. Copy configuration files from `config/example` to `config`. +To set up your custom configuration, follow these steps: + +1. Copy all files from `config/example` to `config`. 2. Replace `base_url`, `api_key`, .etc in `config/config_llm.toml` and other configurations in `config/config*.toml` according to your need. > Note: `llama-server` in [llama.cpp](https://github.com/ggml-org/llama.cpp) also support embedding models. 3. Fill in `args` after `"@modelcontextprotocol/server-filesystem"` for `filesystem` to control the access to files. For example: @@ -37,6 +38,7 @@ args = ["-y", "/Users/{Username}/Desktop", "other/path/to/your/files] ``` +4. Ensure all requirements for the MCP servers are installed. For example, run `npx playwright install` first for `playwright`. ### `mcp_server` diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c974768..cc6377e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,6 +98,12 @@ private: }; int main(int argc, char** argv) { +#if defined (_WIN32) + SetConsoleCP(CP_UTF8); + SetConsoleOutputCP(CP_UTF8); + _setmode(_fileno(stdin), _O_WTEXT); // wide character input mode +#endif + mcp::set_log_level(mcp::log_level::warning); int port = 8896; diff --git a/include/schema.h b/include/schema.h index 40d8372..a6edb04 100644 --- a/include/schema.h +++ b/include/schema.h @@ -87,7 +87,7 @@ struct Message { int num_tokens; // TODO: configure the tokenizer - inline static const std::shared_ptr tokenizer = std::make_shared(PROJECT_ROOT / "tokenizer" / "cl100k_base.tiktoken"); // use cl100k_base to roughly count tokens + inline static const std::shared_ptr tokenizer = std::make_shared((PROJECT_ROOT / "tokenizer" / "cl100k_base.tiktoken").string()); // use cl100k_base to roughly count tokens Message(const std::string& role, const json& content, const std::string& name = "", const std::string& tool_call_id = "", const std::vector tool_calls = {}) : role(role), content(content), name(name), tool_call_id(tool_call_id), tool_calls(tool_calls) { diff --git a/mcp b/mcp index 0dbcd1e..88237f2 160000 --- a/mcp +++ b/mcp @@ -1 +1 @@ -Subproject commit 0dbcd1e6d5bef3443c9d4b3cff6c6e3adef72264 +Subproject commit 88237f2eaae1dc89b32d2a693ac2bd15fb6ad269