windows adapted

main
hkr04 2025-04-13 01:03:50 +08:00
parent fdbb9fd9c3
commit 001b59ba89
4 changed files with 13 additions and 5 deletions

View File

@ -13,7 +13,7 @@ Let's embrace local LLM agents **w/** humanus.cpp!
## How to Build
```bash
git submodule update --init --recursive
git submodule update --init
cmake -B build
cmake --build build --config Release
@ -23,8 +23,9 @@ cmake --build build --config Release
### Configuration
Switch to your own configration first:
1. Copy configuration files from `config/example` to `config`.
To set up your custom configuration, follow these steps:
1. Copy all files from `config/example` to `config`.
2. Replace `base_url`, `api_key`, .etc in `config/config_llm.toml` and other configurations in `config/config*.toml` according to your need.
> Note: `llama-server` in [llama.cpp](https://github.com/ggml-org/llama.cpp) also support embedding models.
3. Fill in `args` after `"@modelcontextprotocol/server-filesystem"` for `filesystem` to control the access to files. For example:
@ -37,6 +38,7 @@ args = ["-y",
"/Users/{Username}/Desktop",
"other/path/to/your/files]
```
4. Ensure all requirements for the MCP servers are installed. For example, run `npx playwright install` first for `playwright`.
### `mcp_server`

View File

@ -98,6 +98,12 @@ private:
};
int main(int argc, char** argv) {
#if defined (_WIN32)
SetConsoleCP(CP_UTF8);
SetConsoleOutputCP(CP_UTF8);
_setmode(_fileno(stdin), _O_WTEXT); // wide character input mode
#endif
mcp::set_log_level(mcp::log_level::warning);
int port = 8896;

View File

@ -87,7 +87,7 @@ struct Message {
int num_tokens;
// TODO: configure the tokenizer
inline static const std::shared_ptr<BaseTokenizer> tokenizer = std::make_shared<BPETokenizer>(PROJECT_ROOT / "tokenizer" / "cl100k_base.tiktoken"); // use cl100k_base to roughly count tokens
inline static const std::shared_ptr<BaseTokenizer> tokenizer = std::make_shared<BPETokenizer>((PROJECT_ROOT / "tokenizer" / "cl100k_base.tiktoken").string()); // use cl100k_base to roughly count tokens
Message(const std::string& role, const json& content, const std::string& name = "", const std::string& tool_call_id = "", const std::vector<ToolCall> tool_calls = {})
: role(role), content(content), name(name), tool_call_id(tool_call_id), tool_calls(tool_calls) {

2
mcp

@ -1 +1 @@
Subproject commit 0dbcd1e6d5bef3443c9d4b3cff6c6e3adef72264
Subproject commit 88237f2eaae1dc89b32d2a693ac2bd15fb6ad269