72 lines
2.6 KiB
C++
72 lines
2.6 KiB
C++
|
#include "config.h"
|
||
|
#include "logger.h"
|
||
|
#include "toml.hpp"
|
||
|
#include <iostream>
|
||
|
#include <filesystem>
|
||
|
|
||
|
namespace humanus {
|
||
|
|
||
|
// 初始化静态成员
|
||
|
Config* Config::_instance = nullptr;
|
||
|
std::mutex Config::_mutex;
|
||
|
|
||
|
// 全局配置实例
|
||
|
Config& config = Config::getInstance();
|
||
|
|
||
|
void Config::_load_initial_config() {
|
||
|
try {
|
||
|
auto config_path = _get_config_path();
|
||
|
std::cout << "加载配置文件: " << config_path.string() << std::endl;
|
||
|
|
||
|
auto data = toml::parse_file(config_path.string());
|
||
|
|
||
|
// 解析LLM设置
|
||
|
if (data.contains("llm") && data["llm"].is_table()) {
|
||
|
auto& llm_table = data["llm"].as_table();
|
||
|
for (const auto& [name, settings] : llm_table) {
|
||
|
LLMSettings llm_settings;
|
||
|
|
||
|
if (settings.contains("model") && settings["model"].is_string()) {
|
||
|
llm_settings.model = settings["model"].as_string();
|
||
|
}
|
||
|
|
||
|
if (settings.contains("api_key") && settings["api_key"].is_string()) {
|
||
|
llm_settings.api_key = settings["api_key"].as_string();
|
||
|
}
|
||
|
|
||
|
if (settings.contains("base_url") && settings["base_url"].is_string()) {
|
||
|
llm_settings.base_url = settings["base_url"].as_string();
|
||
|
}
|
||
|
|
||
|
if (settings.contains("end_point") && settings["end_point"].is_string()) {
|
||
|
llm_settings.end_point = settings["end_point"].as_string();
|
||
|
}
|
||
|
|
||
|
if (settings.contains("max_tokens") && settings["max_tokens"].is_integer()) {
|
||
|
llm_settings.max_tokens = settings["max_tokens"].as_integer();
|
||
|
}
|
||
|
|
||
|
if (settings.contains("temperature") && settings["temperature"].is_floating()) {
|
||
|
llm_settings.temperature = settings["temperature"].as_floating();
|
||
|
}
|
||
|
|
||
|
_config.llm[name] = llm_settings;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
} catch (const std::exception& e) {
|
||
|
std::cerr << "加载配置文件失败: " << e.what() << std::endl;
|
||
|
// 设置默认配置
|
||
|
LLMSettings default_settings;
|
||
|
default_settings.model = "gpt-3.5-turbo";
|
||
|
default_settings.api_key = "sk-";
|
||
|
default_settings.base_url = "https://api.openai.com";
|
||
|
default_settings.end_point = "/v1/chat/completions";
|
||
|
default_settings.max_tokens = 4096;
|
||
|
default_settings.temperature = 1.0;
|
||
|
|
||
|
_config.llm["default"] = default_settings;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
} // namespace humanus
|