From 5280d3b9d282028a5fae26960728571960afac32 Mon Sep 17 00:00:00 2001 From: usermicrodevices <29286243+usermicrodevices@users.noreply.github.com> Date: Sat, 14 Mar 2026 00:59:38 +0300 Subject: [PATCH] add SQLite backend, mainly for testing --- CMakeLists.txt | 29 +- build.sh | 51 +- config/core.json | 4 +- include/database/Backend.hpp | 2 + include/database/DbManager.hpp | 28 +- include/database/PostgreSqlClient.hpp | 1 + include/database/SQLiteClient.hpp | 143 ++++++ include/utils/Converters.hpp | 34 ++ src/config/ConfigManager.cpp | 74 ++- src/database/DbManager.cpp | 458 ++++++++++++----- src/database/PostgreSqlClient.cpp | 36 -- src/database/SQLiteClient.cpp | 681 ++++++++++++++++++++++++++ src/game/CollisionSystem.cpp | 1 + src/game/WorldGenerator.cpp | 10 +- src/main.cpp | 2 +- 15 files changed, 1308 insertions(+), 246 deletions(-) create mode 100644 include/database/SQLiteClient.hpp create mode 100644 include/utils/Converters.hpp create mode 100644 src/database/SQLiteClient.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 17bd3e1..5eb6edf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,16 +43,18 @@ if(NOT spdlog_FOUND) add_subdirectory(${SPDLOG_DIR} EXCLUDE_FROM_ALL) endif() -# Option for Citus support +# Options for database backends option(USE_CITUS "Enable Citus distributed database support" OFF) +option(USE_SQLITE "Enable SQLite embedded database support" OFF) # Find dependencies find_package(OpenGL REQUIRED) find_package(PostgreSQL REQUIRED) find_package(Python3 REQUIRED COMPONENTS Development) -#find_package(glm REQUIRED) -#find_package(asio REQUIRED) -#find_package(nlohmann_json REQUIRED) + +if(USE_SQLITE) + find_package(SQLite3 REQUIRED) +endif() # Find zlib for compression find_package(ZLIB REQUIRED) @@ -86,7 +88,6 @@ set(PROCESS_SOURCES # Scripting system set(SCRIPTING_SOURCES src/scripting/PythonAPI.cpp - #src/scripting/PythonEvent.cpp src/scripting/PythonModule.cpp src/scripting/PythonScripting.cpp src/scripting/ScriptHotReloader.cpp @@ -144,9 +145,11 @@ set(DATABASE_SOURCES if(USE_CITUS) message(STATUS "Building with Citus support") list(APPEND DATABASE_SOURCES src/database/CitusClient.cpp) -else() - message(STATUS "Building without Citus (PostgreSQL only)") - # No definition added here – Citus code is completely excluded +endif() + +if(USE_SQLITE) + message(STATUS "Building with SQLite support") + list(APPEND DATABASE_SOURCES src/database/SQLiteClient.cpp) endif() # Include directories @@ -178,6 +181,10 @@ if(USE_CITUS) target_compile_definitions(gameserver PRIVATE USE_CITUS=1) endif() +if(USE_SQLITE) + target_compile_definitions(gameserver PRIVATE USE_SQLITE=1) +endif() + # Link libraries target_link_libraries(gameserver PRIVATE ${OPENGL_LIBRARIES} @@ -195,6 +202,10 @@ target_link_libraries(gameserver PRIVATE ${CRYPT_LIB} ) +if(USE_SQLITE) + target_link_libraries(gameserver PRIVATE SQLite::SQLite3) +endif() + # Compiler flags if(MSVC) target_compile_options(gameserver PRIVATE /W4) @@ -207,4 +218,4 @@ target_link_options(gameserver PRIVATE -Wl,-Bsymbolic) # Installation install(TARGETS gameserver DESTINATION bin) install(DIRECTORY config/ DESTINATION config) -install(DIRECTORY scripts/ DESTINATION scripts) +install(DIRECTORY scripts/ DESTINATION scripts) \ No newline at end of file diff --git a/build.sh b/build.sh index e490485..5f67a0c 100755 --- a/build.sh +++ b/build.sh @@ -35,28 +35,47 @@ sudo apt-get install -y \ libspdlog-dev \ nlohmann-json3-dev +# Parse command line arguments for optional database backends +USE_CITUS=OFF +USE_SQLITE=OFF -# Optional: Install Citus only if requested -if [ "$1" = "--with-citus" ]; then - echo "Installing Citus extension..." - sudo apt-get install -y postgresql-15-citus-12 - export USE_CITUS=ON -else - echo "Building without Citus (PostgreSQL only)" - export USE_CITUS=OFF -fi +for arg in "$@"; do + case $arg in + --with-citus) + echo "Installing Citus extension..." + sudo apt-get install -y postgresql-15-citus-12 + USE_CITUS=ON + ;; + --with-sqlite) + echo "Installing SQLite3 development libraries..." + sudo apt-get install -y libsqlite3-dev + USE_SQLITE=ON + ;; + *) + # ignore unknown + ;; + esac +done -#rm -rf build +# Build configuration +echo "Building with Citus: $USE_CITUS, SQLite: $USE_SQLITE" + +# Clean previous build artifacts rm -f CMakeCache.txt Makefile cmake_install.cmake rm -rf CMakeFiles -# Build +# Create build directory and copy config mkdir -p build -#cp -fr config build/config rsync -a --delete config/ build/config/ cd build -#cmake .. -B . -DUSE_CITUS=${USE_CITUS:-OFF} -DCMAKE_BUILD_TYPE=Release -cmake .. -B . -DUSE_CITUS=${USE_CITUS:-OFF} -DCMAKE_BUILD_TYPE=Debug + +# Run CMake +cmake .. -B . \ + -DUSE_CITUS=${USE_CITUS} \ + -DUSE_SQLITE=${USE_SQLITE} \ + -DCMAKE_BUILD_TYPE=Debug + +# Build make -j$(nproc) if [ -f "gameserver" ]; then @@ -66,6 +85,6 @@ else echo "Check cmake output above for errors" fi -# create default database user +# create default database user (commented out by default) #sudo -u postgres psql -c "DROP USER IF EXISTS gameuser;" -#sudo -u postgres psql -c "CREATE USER gameuser WITH PASSWORD 'password' SUPERUSER;" +#sudo -u postgres psql -c "CREATE USER gameuser WITH PASSWORD 'password' SUPERUSER;" \ No newline at end of file diff --git a/config/core.json b/config/core.json index bc995a7..76790cc 100644 --- a/config/core.json +++ b/config/core.json @@ -86,7 +86,7 @@ "database": { "backend": "postgresql", - "host": "localhost", + "host": "127.0.0.1", "port": 5432, "name": "gamedb", "user": "gameuser", @@ -98,7 +98,7 @@ }, "logging": { - "level": "info", + "level": "debug", "file": "logs/server.log", "maxSize": 10485760, "backupCount": 5 diff --git a/include/database/Backend.hpp b/include/database/Backend.hpp index ea5b40f..3128974 100644 --- a/include/database/Backend.hpp +++ b/include/database/Backend.hpp @@ -1,3 +1,5 @@ +#pragma once + #include #include "logging/Logger.hpp" diff --git a/include/database/DbManager.hpp b/include/database/DbManager.hpp index 975778c..7b098a1 100644 --- a/include/database/DbManager.hpp +++ b/include/database/DbManager.hpp @@ -17,12 +17,15 @@ #ifdef USE_CITUS #include "database/CitusClient.hpp" -class CitusClient; +//class CitusClient; #else #include "database/PostgreSqlClient.hpp" -class PostgreSqlClient; +//class PostgreSqlClient; #endif +#ifdef USE_SQLITE +#include "database/SQLiteClient.hpp" +#endif /** * @brief Database Manager Singleton @@ -33,7 +36,8 @@ class PostgreSqlClient; class DbManager { public: // Database types - enum DatabaseType { + enum BackendType { + SQLITE, POSTGRESQL, CITUS, INVALID @@ -52,12 +56,18 @@ class DbManager { // Backend Management bool SaveGameState(const std::string& key, const nlohmann::json& state); - bool SetBackend(DatabaseType type, const nlohmann::json& config); + bool SetBackend(BackendType type, const nlohmann::json& config); DatabaseBackend* GetBackend() const { return backend_.get(); } - DatabaseType GetCurrentType() const { return currentType_; } - nlohmann::json Query(const std::string& sql) { return backend_->Query(sql); }; + BackendType GetCurrentType() const { return currentType_; } nlohmann::json GetPlayer(uint64_t playerId){ return backend_->GetPlayer(playerId); }; + nlohmann::json Query(const std::string& sql) { return backend_->Query(sql); }; + nlohmann::json QueryWithParams(const std::string& sql, const std::vector& params) + { return backend_->QueryWithParams(sql, params); }; + bool Execute(const std::string& sql) { return backend_->Execute(sql); }; + bool ExecuteWithParams(const std::string& sql, const std::vector& params) + { return backend_->ExecuteWithParams(sql, params); }; + bool UpdatePlayerPosition(uint64_t playerId, float x, float y, float z) { if (backend_) { return backend_->UpdatePlayerPosition(playerId, x, y, z); @@ -102,7 +112,7 @@ class DbManager { static DbManager* instance_; std::unique_ptr backend_; - DatabaseType currentType_; + BackendType currentType_; nlohmann::json config_; std::atomic initialized_; std::atomic connected_; @@ -120,8 +130,8 @@ class DbManager { // Helper methods bool ValidateConfiguration(const nlohmann::json& config) const; - DatabaseType ParseDatabaseType(const std::string& typeStr) const; - std::string DatabaseTypeToString(DatabaseType type) const; + BackendType ParseBackendType(const std::string& typeStr) const; + std::string BackendTypeToString(BackendType type) const; bool ExecuteCreateTable(const std::string& tableName, const std::string& createSql); bool TableExists(const std::string& tableName); diff --git a/include/database/PostgreSqlClient.hpp b/include/database/PostgreSqlClient.hpp index 0a20390..b224b82 100644 --- a/include/database/PostgreSqlClient.hpp +++ b/include/database/PostgreSqlClient.hpp @@ -13,6 +13,7 @@ #include +#include "utils/Converters.hpp" #include "database/Backend.hpp" //#include "database/DbManager.hpp" diff --git a/include/database/SQLiteClient.hpp b/include/database/SQLiteClient.hpp new file mode 100644 index 0000000..0ea43a4 --- /dev/null +++ b/include/database/SQLiteClient.hpp @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "database/Backend.hpp" + +/** + * @brief SQLite Client Implementation + * + * Provides a concrete implementation of DatabaseBackend for SQLite. + * Uses a single connection with mutex locking for simplicity. + * Supports JSON storage via SQLite's JSON1 extension (if available). + */ +class SQLiteClient : public DatabaseBackend { +public: + explicit SQLiteClient(const nlohmann::json& config); + virtual ~SQLiteClient(); + + // Connection Management + bool Connect() override; + bool ConnectToDatabase(const std::string& dbname) override; + bool Reconnect() override; + void Disconnect() override; + bool IsConnected() const override; + bool CheckHealth() override; + void ReconnectAll() override; + + // Player Data Operations + bool SavePlayerData(uint64_t playerId, const nlohmann::json& data) override; + nlohmann::json LoadPlayerData(uint64_t playerId) override; + bool UpdatePlayer(uint64_t playerId, const nlohmann::json& updates) override; + bool DeletePlayer(uint64_t playerId) override; + bool UpdatePlayerPosition(uint64_t playerId, float x, float y, float z) override; + bool PlayerExists(uint64_t playerId) override; + nlohmann::json GetPlayerStats(uint64_t playerId) override; + bool UpdatePlayerStats(uint64_t playerId, const nlohmann::json& stats) override; + nlohmann::json GetPlayer(uint64_t playerId) override; + + // Game State Operations + bool SaveGameState(const std::string& key, const nlohmann::json& state) override; + nlohmann::json LoadGameState(const std::string& key) override; + bool DeleteGameState(const std::string& key) override; + std::vector ListGameStates() override; + + // World Data Operations + bool SaveChunkData(int chunkX, int chunkZ, const nlohmann::json& chunkData) override; + nlohmann::json LoadChunkData(int chunkX, int chunkZ) override; + bool DeleteChunkData(int chunkX, int chunkZ) override; + std::vector> ListChunksInRange(int centerX, int centerZ, int radius) override; + + // Inventory Operations + bool SaveInventory(uint64_t playerId, const nlohmann::json& inventory) override; + nlohmann::json LoadInventory(uint64_t playerId) override; + + // Quest Operations + bool SaveQuestProgress(uint64_t playerId, const std::string& questId, const nlohmann::json& progress) override; + nlohmann::json LoadQuestProgress(uint64_t playerId, const std::string& questId) override; + std::vector ListActiveQuests(uint64_t playerId) override; + + // Transaction Operations + bool BeginTransaction() override; + bool CommitTransaction() override; + bool RollbackTransaction() override; + bool ExecuteTransaction(const std::function& operation) override; + + // Query Operations + nlohmann::json Query(const std::string& sql) override; + nlohmann::json QueryWithParams(const std::string& sql, const std::vector& params) override; + bool Execute(const std::string& sql) override; + bool ExecuteWithParams(const std::string& sql, const std::vector& params) override; + + // Shard Operations (ignored, forward to main) + nlohmann::json QueryShard(int shardId, const std::string& sql) override; + nlohmann::json QueryShardWithParams(int shardId, const std::string& sql, + const std::vector& params) override; + bool ExecuteShard(int shardId, const std::string& sql) override; + bool ExecuteShardWithParams(int shardId, const std::string& sql, + const std::vector& params) override; + + // Utility Methods + std::string EscapeString(const std::string& str) override; + int GetShardId(uint64_t entityId) const override; + int GetTotalShards() const override; + std::string GetConnectionInfo() const override; + int64_t GetLastInsertId() override; + int GetAffectedRows() override; + + // Statistics + nlohmann::json GetDatabaseStats() override; + void ResetStats() override; + + // Connection Pool Management (SQLite uses single connection) + bool InitializeConnectionPool(size_t minConnections, size_t maxConnections) override; + void ReleaseConnectionPool() override; + size_t GetActiveConnections() const override; + size_t GetIdleConnections() const override; + +private: + // Core database handle + sqlite3* db_; + + // Configuration + nlohmann::json config_; + std::string dbPath_; + + // Synchronization + mutable std::mutex dbMutex_; + + // Statistics + struct SQLiteStats { + std::atomic totalQueries{0}; + std::atomic failedQueries{0}; + std::atomic totalTransactions{0}; + std::atomic connectionErrors{0}; + std::chrono::steady_clock::time_point startTime; + }; + SQLiteStats stats_; + + // Last operation results + int64_t lastInsertId_; + int affectedRows_; + + // Shard configuration (dummy, for compatibility) + int totalShards_; + + // Helper methods + bool OpenDatabase(const std::string& path); + void CloseDatabase(); + bool ExecuteSql(const std::string& sql, std::vector>* results = nullptr); + nlohmann::json ResultSetToJson(const std::vector>& rows, + const std::vector& columnNames) const; + std::string BuildCreateTableSql(const std::string& tableName, const std::string& columns) const; + bool TableExists(const std::string& tableName); +}; diff --git a/include/utils/Converters.hpp b/include/utils/Converters.hpp new file mode 100644 index 0000000..bbf16dd --- /dev/null +++ b/include/utils/Converters.hpp @@ -0,0 +1,34 @@ +#pragma once + +inline bool SafeStringToInt64(const char* str, int64_t& result) { + if (!str || str[0] == '\0') return false; + + char* endptr = nullptr; + errno = 0; + long long val = strtoll(str, &endptr, 10); + + if (errno == ERANGE || val < std::numeric_limits::min() || + val > std::numeric_limits::max()) { + return false; + } + + if (endptr == str || *endptr != '\0') { + return false; + } + + result = static_cast(val); + return true; +} + +inline bool SafeStringToInt(const char* str, int& result) { + int64_t temp; + if (!SafeStringToInt64(str, temp)) return false; + + if (temp < std::numeric_limits::min() || + temp > std::numeric_limits::max()) { + return false; + } + + result = static_cast(temp); + return true; +} diff --git a/src/config/ConfigManager.cpp b/src/config/ConfigManager.cpp index fbd9abc..fee21af 100644 --- a/src/config/ConfigManager.cpp +++ b/src/config/ConfigManager.cpp @@ -45,12 +45,12 @@ bool ConfigManager::ReloadConfig() { } bool ConfigManager::ValidateConfig() const { + Logger::Info("Validate config started..."); try { - // Validate server section - if (!config_.contains("server")) { + if (config_.contains("server")) + Logger::Info("Validate config 'server' section..."); + else throw std::runtime_error("Missing 'server' section"); - } - const auto& server = config_["server"]; if (!server.contains("host") || !server["host"].is_string()) { throw std::runtime_error("Invalid or missing 'server.host'"); @@ -62,57 +62,51 @@ bool ConfigManager::ValidateConfig() const { throw std::runtime_error("Invalid server port"); } - // Validate database section - if (!config_.contains("database")) { + if (config_.contains("database")) + Logger::Info("Validate config 'database' section..."); + else throw std::runtime_error("Missing 'database' section"); - } - const auto& database = config_["database"]; if (!database.contains("host") || !database["host"].is_string()) { - throw std::runtime_error("Invalid or missing 'database.host'"); + Logger::Warn("database.host not set, will use default 127.0.0.1"); } if (!database.contains("port") || !database["port"].is_number_unsigned()) { - throw std::runtime_error("Invalid or missing 'database.port'"); + Logger::Warn("database.port not set, will use default 5432"); } if (!database.contains("name") || !database["name"].is_string()) { throw std::runtime_error("Invalid or missing 'database.name'"); } - // Validate game section - if (!config_.contains("game")) { + if (config_.contains("game")) + Logger::Info("Validate config 'game' section..."); + else throw std::runtime_error("Missing 'game' section"); - } - const auto& game = config_["game"]; if (!game.contains("max_players_per_session") || !game["max_players_per_session"].is_number_unsigned()) { throw std::runtime_error("Invalid or missing 'game.max_players_per_session'"); } - // Validate logging section - if (!config_.contains("logging")) { - throw std::runtime_error("Missing 'logging' section"); - } - - const auto& logging = config_["logging"]; + if (config_.contains("logging")) + Logger::Info("Validate config 'logging' section..."); + else + throw std::runtime_error("Missing 'logging' section"); + const auto& logging = config_["logging"]; if (!logging.contains("level") || !logging["level"].is_string()) { throw std::runtime_error("Invalid or missing 'logging.level'"); } - // Validate log levels const std::string logLevel = logging["level"]; const std::vector validLevels = { "trace", "debug", "info", "warn", "error", "critical", "off" }; - std::string lowerLevel = logLevel; std::transform(lowerLevel.begin(), lowerLevel.end(), lowerLevel.begin(), ::tolower); - if (std::find(validLevels.begin(), validLevels.end(), lowerLevel) == validLevels.end()) { throw std::runtime_error("Invalid log level: " + logLevel); } - Logger::Debug("Configuration validation passed"); + Logger::Info("Configuration validation passed"); return true; } catch (const std::exception& e) { @@ -228,8 +222,8 @@ std::string ConfigManager::GetDatabaseHost() const { try { return config_.at("database").at("host").get(); } catch (const std::exception& e) { - Logger::Warn("Failed to get database host, using default: localhost"); - return "localhost"; + Logger::Warn("Failed to get database host, using default: 127.0.0.1"); + return "127.0.0.1"; } } @@ -507,12 +501,11 @@ bool ConfigManager::GetConsoleOutput() const { int ConfigManager::GetInt(const std::string& key, int defaultValue) const { std::lock_guard lock(configMutex_); try { - // Convert dots to forward slashes for JSON pointer std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.at(ptr).get(); + return config_.at(nlohmann::json::json_pointer("/" + keyPath)).get(); } catch (const std::exception& e) { + Logger::Warn("Failed to get int for key '{}': {}", key, e.what()); return defaultValue; } } @@ -522,9 +515,9 @@ float ConfigManager::GetFloat(const std::string& key, float defaultValue) const try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.at(ptr).get(); + return config_.at(nlohmann::json::json_pointer("/" + keyPath)).get(); } catch (const std::exception& e) { + Logger::Warn("Failed to get float for key '{}': {}", key, e.what()); return defaultValue; } } @@ -534,9 +527,9 @@ bool ConfigManager::GetBool(const std::string& key, bool defaultValue) const { try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.at(ptr).get(); + return config_.at(nlohmann::json::json_pointer("/" + keyPath)).get(); } catch (const std::exception& e) { + Logger::Warn("Failed to get bool for key '{}': {}", key, e.what()); return defaultValue; } } @@ -546,9 +539,9 @@ std::string ConfigManager::GetString(const std::string& key, const std::string& try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.at(ptr).get(); + return config_.at(nlohmann::json::json_pointer("/" + keyPath)).get(); } catch (const std::exception& e) { + Logger::Warn("Failed to get string for key '{}': {}", key, e.what()); return defaultValue; } } @@ -559,8 +552,7 @@ std::vector ConfigManager::GetStringArray(const std::string& key) c try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - auto& arr = config_.at(ptr); + auto& arr = config_.at(nlohmann::json::json_pointer("/" + keyPath)); if (arr.is_array()) { for (const auto& item : arr) { if (item.is_string()) { @@ -581,9 +573,9 @@ nlohmann::json ConfigManager::GetJson(const std::string& key) const { try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.at(ptr); + return config_.at(nlohmann::json::json_pointer("/" + keyPath)); } catch (const std::exception& e) { + Logger::Warn("Failed to get json for key '{}': {}", key, e.what()); return nlohmann::json(); } } @@ -593,9 +585,9 @@ bool ConfigManager::HasKey(const std::string& key) const { try { std::string keyPath = key; std::replace(keyPath.begin(), keyPath.end(), '.', '/'); - nlohmann::json::json_pointer ptr("/" + keyPath); - return config_.contains(ptr); + return config_.contains(nlohmann::json::json_pointer("/" + keyPath)); } catch (const std::exception& e) { + Logger::Warn("Failed HasKey for key '{}': {}", key, e.what()); return false; } } diff --git a/src/database/DbManager.cpp b/src/database/DbManager.cpp index 0959f04..6f7e3e6 100644 --- a/src/database/DbManager.cpp +++ b/src/database/DbManager.cpp @@ -31,8 +31,31 @@ bool DbManager::EnsureDatabaseExists(const std::string& configPath) { Logger::Error("Failed to initialize DbManager"); return false; } - std::string targetDb = config_["name"].get(); - if (!backend_->ConnectToDatabase("postgres")) { + + std::string targetDb = config_["name"].get(); // For SQLite this is a file path + + if (currentType_ == SQLITE) { // --- SQLite handling --- + std::filesystem::path path(targetDb); + std::filesystem::path dir = path.parent_path(); + if (!dir.empty() && + !std::filesystem::exists(dir)) { // Ensure directory for db file exists + std::filesystem::create_directories(dir); + } + if (!backend_->Connect()) { // Connect – this will create the file if it doesn't exist + Logger::Error("Failed to connect to SQLite database at '{}'", targetDb); + return false; + } + if (!CreateDefaultTablesIfNotExist()) { // Create default tables + Logger::Critical("Failed to create default tables for SQLite."); + backend_->Disconnect(); + return false; + } + backend_->Disconnect(); + Logger::Info("SQLite database file ready and tables created: {}", targetDb); + return true; + } + + if (!backend_->ConnectToDatabase("postgres")) { // --- PostgreSQL / Citus handling --- Logger::Error("Failed to switch to admin database 'postgres'"); return false; } @@ -40,9 +63,11 @@ bool DbManager::EnsureDatabaseExists(const std::string& configPath) { Logger::Error("Failed to connect to admin database 'postgres'"); return false; } + std::string checkQuery = "SELECT 1 FROM pg_database WHERE datname = '" + targetDb + "'"; auto result = backend_->Query(checkQuery); bool exists = (!result.empty() && !result[0].empty()); + if (!exists) { Logger::Info("Database '{}' does not exist. Creating it...", targetDb); std::string createSql = "CREATE DATABASE " + targetDb; @@ -50,6 +75,7 @@ bool DbManager::EnsureDatabaseExists(const std::string& configPath) { createSql += " OWNER " + config_["user"].get(); } if (!backend_->Execute(createSql)) { + // Re-check in case another process created it concurrently result = backend_->Query(checkQuery); exists = (!result.empty() && !result[0].empty()); if (!exists) { @@ -64,6 +90,8 @@ bool DbManager::EnsureDatabaseExists(const std::string& configPath) { } else { Logger::Info("Database '{}' already exists.", targetDb); } + + // Switch to the target database if (!backend_->ConnectToDatabase(targetDb)) { Logger::Error("Failed to switch to target database '{}'", targetDb); return false; @@ -72,10 +100,12 @@ bool DbManager::EnsureDatabaseExists(const std::string& configPath) { Logger::Error("Failed to connect to target database '{}'", targetDb); return false; } + if (!CreateDefaultTablesIfNotExist()) { Logger::Critical("Failed to create default tables."); return false; } + Logger::Info("Default SQL tables verified/created successfully."); return true; } @@ -94,30 +124,38 @@ bool DbManager::Initialize(const std::string& configPath) { Logger::Error("Invalid database configuration"); return false; } - std::string typeStr = config_.value("type", "postgresql"); - currentType_ = ParseDatabaseType(typeStr); + std::string backendStr = config_.value("backend", "postgresql"); + currentType_ = ParseBackendType(backendStr); if (currentType_ == INVALID) { - Logger::Error("Unknown database type: {}", typeStr); + Logger::Error("Unknown database backend: {}", backendStr); return false; } switch (currentType_) { + case SQLITE: +#ifdef USE_SQLITE + backend_ = std::make_unique(config_); +#else + Logger::Error("SQLite support not compiled in. Recompile with USE_SQLITE=1"); + return false; +#endif + break; case POSTGRESQL: backend_ = std::make_unique(config_); break; case CITUS: - #ifdef USE_CITUS +#ifdef USE_CITUS backend_ = std::make_unique(config_); - #else +#else Logger::Error("Citus support not compiled in. Recompile with USE_CITUS=1"); return false; - #endif +#endif break; default: - Logger::Error("Unsupported database type"); + Logger::Error("Unsupported database backend"); return false; } initialized_ = true; - Logger::Info("DbManager initialized with {} backend", DatabaseTypeToString(currentType_)); + Logger::Info("DbManager initialized with {} backend", BackendTypeToString(currentType_)); return true; } @@ -173,8 +211,8 @@ bool DbManager::LoadConfiguration(const std::string& configPath) { nlohmann::json poolConfig = config_.value("pool", nlohmann::json::object()); config_ = { - {"type", config_.value("type", "postgresql")}, - {"host", config_.value("host", "localhost")}, + {"backend", config_.value("backend", "postgresql")}, + {"host", config_.value("host", "127.0.0.1")}, {"port", config_.value("port", 5432)}, {"name", config_.value("name", "game_db")}, {"user", config_.value("user", "postgres")}, @@ -202,8 +240,8 @@ bool DbManager::LoadConfiguration(const std::string& configPath) { bool DbManager::ValidateConfiguration(const nlohmann::json& config) const { try { // Check required fields - if (!config.contains("host") || !config["host"].is_string()) { - Logger::Error("Missing or invalid 'host' in database configuration"); + if (config.contains("host") && !config["host"].is_string()) { + Logger::Error("Invalid 'host' in database configuration (must be string)"); return false; } @@ -218,9 +256,10 @@ bool DbManager::ValidateConfiguration(const nlohmann::json& config) const { } // Validate port + // Port is optional (default 5432), but if present must be valid if (config.contains("port")) { if (!config["port"].is_number()) { - Logger::Error("Invalid 'port' in database configuration"); + Logger::Error("Invalid 'port' in database configuration (must be number)"); return false; } int port = config["port"]; @@ -258,7 +297,7 @@ bool DbManager::ValidateConfiguration(const nlohmann::json& config) const { } } -bool DbManager::SetBackend(DatabaseType type, const nlohmann::json& config) { +bool DbManager::SetBackend(BackendType backendType, const nlohmann::json& config) { std::lock_guard lock(instanceMutex_); if (!ValidateConfiguration(config)) { @@ -270,7 +309,7 @@ bool DbManager::SetBackend(DatabaseType type, const nlohmann::json& config) { std::unique_ptr oldBackend = std::move(backend_); // Create new backend - switch (type) { + switch (backendType) { case POSTGRESQL: backend_ = std::make_unique(config); break; @@ -283,16 +322,16 @@ bool DbManager::SetBackend(DatabaseType type, const nlohmann::json& config) { #endif break; default: - Logger::Error("Unsupported database type"); + Logger::Error("Unsupported database backend"); backend_ = std::move(oldBackend); return false; } - currentType_ = type; + currentType_ = backendType; config_ = config; connected_ = false; - Logger::Info("Database backend changed to {}", DatabaseTypeToString(type)); + Logger::Info("Database backend changed to {}", BackendTypeToString(currentType_)); return true; } @@ -333,7 +372,7 @@ bool DbManager::Connect() { try { if (backend_->Connect()) { connected_ = true; - Logger::Info("Connected to {} database", DatabaseTypeToString(currentType_)); + Logger::Info("Connected to {} database", BackendTypeToString(currentType_)); return true; } else { Logger::Error("Failed to connect to database"); @@ -397,7 +436,7 @@ nlohmann::json DbManager::GetStatistics() const { nlohmann::json stats; // Basic info - stats["type"] = DatabaseTypeToString(currentType_); + stats["backend"] = BackendTypeToString(currentType_); stats["initialized"] = initialized_.load(); stats["connected"] = connected_.load(); @@ -431,7 +470,7 @@ void DbManager::PrintStatistics() const { auto stats = GetStatistics(); Logger::Info("=== Database Statistics ==="); - Logger::Info(" Type: {}", stats["type"].get()); + Logger::Info(" Backend: {}", stats["backend"].get()); Logger::Info(" Status: {}", stats["connected"].get() ? "Connected" : "Disconnected"); Logger::Info(" Uptime: {} seconds", stats["uptime_seconds"].get()); Logger::Info(" "); @@ -524,9 +563,9 @@ bool DbManager::CheckMigrationStatus() { Logger::Info("=== Migration Status ==="); for (const auto& row : result) { Logger::Info(" Version {}: {} (applied at {})", - row["version"].get(), - row["name"].get(), - row["applied_at"].get()); + row["version"].get(), + row["name"].get(), + row["applied_at"].get()); } Logger::Info("======================="); } @@ -541,19 +580,89 @@ bool DbManager::CheckMigrationStatus() { bool DbManager::RollbackMigration(int version) { if (!IsConnected()) { + Logger::Error("Cannot rollback migration: not connected to database"); return false; } + // Path to migrations directory – could be configurable + const std::string migrationsDir = "migrations/"; + std::string downFileName = "U" + std::to_string(version) + "*.sql"; // wildcard for description + try { - Logger::Info("Rolling back migration version {}...", version); + // Find the actual down file (there should be exactly one) + std::vector downFiles; + for (const auto& entry : std::filesystem::directory_iterator(migrationsDir)) { + if (entry.is_regular_file()) { + std::string filename = entry.path().filename().string(); + if (filename.rfind("U" + std::to_string(version), 0) == 0 && + filename.size() > 3 && filename.substr(filename.size() - 4) == ".sql") { + downFiles.push_back(entry.path()); + } + } + } + + if (downFiles.empty()) { + Logger::Error("No down migration found for version {}", version); + return false; + } + if (downFiles.size() > 1) { + Logger::Error("Multiple down migrations found for version {}: {}", version, downFiles.size()); + return false; + } - // TODO: Implement rollback logic based on your migration system - // This would involve finding and executing the down.sql for the given version + // Read the down SQL script + std::ifstream file(downFiles[0]); + if (!file.is_open()) { + Logger::Error("Failed to open down migration file: {}", downFiles[0].string()); + return false; + } + std::stringstream buffer; + buffer << file.rdbuf(); + std::string downSql = buffer.str(); - backend_->Execute("DELETE FROM schema_migrations WHERE version = " + std::to_string(version)); + Logger::Info("Rolling back migration version {} using file: {}", version, downFiles[0].string()); - Logger::Info("Migration version {} rolled back", version); - return true; + // Execute the down script within a transaction + if (!backend_->BeginTransaction()) { + Logger::Error("Failed to begin transaction for rollback"); + return false; + } + + bool success = false; + try { + // Execute the down SQL (may contain multiple statements) + // Simple approach: execute whole script. If your backend doesn't support multiple statements, + // you may need to split by ';'. We'll assume Execute handles batches. + if (backend_->Execute(downSql)) { + // Remove the migration record + std::string deleteSql = "DELETE FROM schema_migrations WHERE version = " + std::to_string(version); + if (backend_->Execute(deleteSql)) { + success = true; + } else { + Logger::Error("Failed to delete migration record for version {}", version); + } + } else { + Logger::Error("Failed to execute down migration SQL for version {}", version); + } + + if (success) { + if (!backend_->CommitTransaction()) { + Logger::Error("Failed to commit transaction during rollback"); + success = false; + } + } else { + backend_->RollbackTransaction(); + } + } catch (const std::exception& e) { + Logger::Error("Exception during rollback: {}", e.what()); + backend_->RollbackTransaction(); + return false; + } + + if (success) { + Logger::Info("Migration version {} rolled back successfully", version); + } + return success; } catch (const std::exception& e) { Logger::Error("Rollback error: {}", e.what()); @@ -561,11 +670,13 @@ bool DbManager::RollbackMigration(int version) { } } -DbManager::DatabaseType DbManager::ParseDatabaseType(const std::string& typeStr) const { - std::string lowerType = typeStr; +DbManager::BackendType DbManager::ParseBackendType(const std::string& backendStr) const { + std::string lowerType = backendStr; std::transform(lowerType.begin(), lowerType.end(), lowerType.begin(), ::tolower); - if (lowerType == "postgresql" || lowerType == "postgres") { + if (lowerType == "sqlite") { + return SQLITE; + } else if (lowerType == "postgresql" || lowerType == "postgres") { return POSTGRESQL; } else if (lowerType == "citus") { return CITUS; @@ -574,8 +685,9 @@ DbManager::DatabaseType DbManager::ParseDatabaseType(const std::string& typeStr) return INVALID; } -std::string DbManager::DatabaseTypeToString(DatabaseType type) const { - switch (type) { +std::string DbManager::BackendTypeToString(BackendType backendType) const { + switch (backendType) { + case SQLITE: return "SQLite"; case POSTGRESQL: return "PostgreSQL"; case CITUS: return "Citus"; default: return "Unknown"; @@ -583,31 +695,37 @@ std::string DbManager::DatabaseTypeToString(DatabaseType type) const { } bool DbManager::TableExists(const std::string& tableName) { - std::string sql = "SELECT EXISTS (" - "SELECT FROM information_schema.tables " - "WHERE table_schema = 'public' AND table_name = '" + tableName + "'" - ") AS exists;"; - try { - nlohmann::json result = backend_->Query(sql); + try { + nlohmann::json result; + if (currentType_ == SQLITE) { // SQLite: query sqlite_master + std::string sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='" + + EscapeString(tableName) + "';"; + result = backend_->Query(sql); + return !result.empty(); + } else { // PostgreSQL / Citus: use information_schema + std::string sql = "SELECT EXISTS (" + "SELECT FROM information_schema.tables " + "WHERE table_schema = 'public' AND table_name = '" + + EscapeString(tableName) + "') AS exists;"; + result = backend_->Query(sql); if (result.empty() || !result[0].contains("exists")) { return false; } - const auto& existsVal = result[0]["exists"]; if (existsVal.is_boolean()) { return existsVal.get(); } else if (existsVal.is_string()) { std::string str = existsVal.get(); - // PostgreSQL returns 't' for true, 'f' for false return str == "t" || str == "true" || str == "1"; } else if (existsVal.is_number()) { return existsVal.get() != 0; } return false; - } catch (const std::exception& e) { - Logger::Error("Failed to check existence of table {}: {}", tableName, e.what()); - return false; } + } catch (const std::exception& e) { + Logger::Error("Failed to check existence of table {}: {}", tableName, e.what()); + return false; + } } bool DbManager::ExecuteCreateTable(const std::string& tableName, const std::string& createSql) { @@ -618,20 +736,12 @@ bool DbManager::ExecuteCreateTable(const std::string& tableName, const std::stri Logger::Info("Creating table '{}'...", tableName); try { - backend_->Query(createSql); + backend_->Execute(createSql); Logger::Info("Table '{}' created successfully.", tableName); return true; } catch (const std::exception& e) { - std::string error = e.what(); - // Check for PostgreSQL "duplicate key" error on pg_type (error code 23505) - // This can happen when two sessions create the same table concurrently. - if (error.find("23505") != std::string::npos || - error.find("duplicate key value") != std::string::npos) { - Logger::Warn("Table '{}' was created concurrently by another process; treating as success.", tableName); - return true; - } - Logger::Error("Failed to create table '{}': {}", tableName, e.what()); - return false; + Logger::Error("Failed to create table '{}': {}", tableName, e.what()); + return false; } } @@ -642,99 +752,195 @@ bool DbManager::CreateDefaultTablesIfNotExist() { } bool success = true; - - success &= ExecuteCreateTable("players", R"( +// SQLite versions (use TEXT for JSON, INTEGER for timestamps) + if (currentType_ == SQLITE) { + success &= ExecuteCreateTable("players", R"( CREATE TABLE IF NOT EXISTS players ( - id BIGINT PRIMARY KEY, - data JSONB NOT NULL, - created_at TIMESTAMPTZ DEFAULT NOW(), - updated_at TIMESTAMPTZ DEFAULT NOW() + id INTEGER PRIMARY KEY, + data TEXT NOT NULL, + created_at TEXT DEFAULT (datetime('now')), + updated_at TEXT DEFAULT (datetime('now')) ); - CREATE INDEX IF NOT EXISTS idx_players_updated ON players(updated_at); - )"); + )"); - success &= ExecuteCreateTable("player_inventory", R"( + success &= ExecuteCreateTable("player_inventory", R"( CREATE TABLE IF NOT EXISTS player_inventory ( - player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, - slot INT NOT NULL, - item_id BIGINT NOT NULL, - quantity INT NOT NULL DEFAULT 1, - data JSONB, -- additional item-specific data (durability, enchantments, etc.) - PRIMARY KEY (player_id, slot) + player_id INTEGER NOT NULL, + slot INTEGER NOT NULL, + item_id INTEGER NOT NULL, + quantity INTEGER NOT NULL DEFAULT 1, + data TEXT, + PRIMARY KEY (player_id, slot), + FOREIGN KEY (player_id) REFERENCES players(id) ON DELETE CASCADE ); - CREATE INDEX IF NOT EXISTS idx_inventory_player ON player_inventory(player_id); - )"); + )"); + Execute("CREATE INDEX IF NOT EXISTS idx_inventory_player ON player_inventory(player_id);"); - success &= ExecuteCreateTable("player_skills", R"( + success &= ExecuteCreateTable("player_skills", R"( CREATE TABLE IF NOT EXISTS player_skills ( - player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, - skill_id VARCHAR(64) NOT NULL, - level INT NOT NULL DEFAULT 1, - experience FLOAT NOT NULL DEFAULT 0, - data JSONB, - PRIMARY KEY (player_id, skill_id) + player_id INTEGER NOT NULL, + skill_id TEXT NOT NULL, + level INTEGER NOT NULL DEFAULT 1, + experience REAL NOT NULL DEFAULT 0, + data TEXT, + PRIMARY KEY (player_id, skill_id), + FOREIGN KEY (player_id) REFERENCES players(id) ON DELETE CASCADE ); - CREATE INDEX IF NOT EXISTS idx_skills_player ON player_skills(player_id); - )"); + )"); + Execute("CREATE INDEX IF NOT EXISTS idx_skills_player ON player_skills(player_id);"); - success &= ExecuteCreateTable("player_quests", R"( + success &= ExecuteCreateTable("player_quests", R"( CREATE TABLE IF NOT EXISTS player_quests ( - player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, - quest_id BIGINT NOT NULL, - state INT NOT NULL, -- QuestState enum as integer - progress JSONB NOT NULL, - started_at TIMESTAMPTZ, - completed_at TIMESTAMPTZ, - PRIMARY KEY (player_id, quest_id) + player_id INTEGER NOT NULL, + quest_id INTEGER NOT NULL, + state INTEGER NOT NULL, + progress TEXT NOT NULL, + started_at TEXT, + completed_at TEXT, + PRIMARY KEY (player_id, quest_id), + FOREIGN KEY (player_id) REFERENCES players(id) ON DELETE CASCADE ); - CREATE INDEX IF NOT EXISTS idx_quests_player ON player_quests(player_id); - CREATE INDEX IF NOT EXISTS idx_quests_state ON player_quests(state); - )"); + )"); + Execute("CREATE INDEX IF NOT EXISTS idx_quests_player ON player_quests(player_id);"); + Execute("CREATE INDEX IF NOT EXISTS idx_quests_state ON player_quests(state);"); - success &= ExecuteCreateTable("world_chunks", R"( + success &= ExecuteCreateTable("world_chunks", R"( CREATE TABLE IF NOT EXISTS world_chunks ( - chunk_x INT NOT NULL, - chunk_z INT NOT NULL, - biome INT NOT NULL, - data JSONB NOT NULL, -- serialized WorldChunk data - generated_at TIMESTAMPTZ DEFAULT NOW(), + chunk_x INTEGER NOT NULL, + chunk_z INTEGER NOT NULL, + biome INTEGER NOT NULL, + data TEXT NOT NULL, + generated_at TEXT DEFAULT (datetime('now')), PRIMARY KEY (chunk_x, chunk_z) ); - CREATE INDEX IF NOT EXISTS idx_chunks_coords ON world_chunks(chunk_x, chunk_z); - )"); + )"); + Execute("CREATE INDEX IF NOT EXISTS idx_chunks_coords ON world_chunks(chunk_x, chunk_z);"); - success &= ExecuteCreateTable("npcs", R"( + success &= ExecuteCreateTable("npcs", R"( CREATE TABLE IF NOT EXISTS npcs ( - id BIGINT PRIMARY KEY, - type INT NOT NULL, - position JSONB NOT NULL, -- {x, y, z} - level INT NOT NULL DEFAULT 1, - data JSONB NOT NULL, -- stats, AI state, loot table, etc. - created_at TIMESTAMPTZ DEFAULT NOW(), - updated_at TIMESTAMPTZ DEFAULT NOW() + id INTEGER PRIMARY KEY, + type INTEGER NOT NULL, + position TEXT NOT NULL, + level INTEGER NOT NULL DEFAULT 1, + data TEXT NOT NULL, + created_at TEXT DEFAULT (datetime('now')), + updated_at TEXT DEFAULT (datetime('now')) ); - CREATE INDEX IF NOT EXISTS idx_npcs_type ON npcs(type); - )"); + )"); + Execute("CREATE INDEX IF NOT EXISTS idx_npcs_type ON npcs(type);"); - success &= ExecuteCreateTable("loot_tables", R"( + success &= ExecuteCreateTable("loot_tables", R"( CREATE TABLE IF NOT EXISTS loot_tables ( - table_id VARCHAR(64) PRIMARY KEY, - name VARCHAR(128) NOT NULL, - data JSONB NOT NULL, -- entries, drop chances, etc. - created_at TIMESTAMPTZ DEFAULT NOW() + table_id TEXT PRIMARY KEY, + name TEXT NOT NULL, + data TEXT NOT NULL, + created_at TEXT DEFAULT (datetime('now')) ); - )"); + )"); - success &= ExecuteCreateTable("game_state", R"( + success &= ExecuteCreateTable("game_state", R"( CREATE TABLE IF NOT EXISTS game_state ( - key VARCHAR(64) PRIMARY KEY, - value JSONB NOT NULL, - updated_at TIMESTAMPTZ DEFAULT NOW() + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + updated_at TEXT DEFAULT (datetime('now')) ); - )"); + )"); + } + else // PostgreSQL / Citus versions (existing code, unchanged) + { + success &= ExecuteCreateTable("players", R"( + CREATE TABLE IF NOT EXISTS players ( + id BIGINT PRIMARY KEY, + data JSONB NOT NULL, + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW() + ); + CREATE INDEX IF NOT EXISTS idx_players_updated ON players(updated_at); + )"); + + success &= ExecuteCreateTable("player_inventory", R"( + CREATE TABLE IF NOT EXISTS player_inventory ( + player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, + slot INT NOT NULL, + item_id BIGINT NOT NULL, + quantity INT NOT NULL DEFAULT 1, + data JSONB, -- additional item-specific data (durability, enchantments, etc.) + PRIMARY KEY (player_id, slot) + ); + CREATE INDEX IF NOT EXISTS idx_inventory_player ON player_inventory(player_id); + )"); + + success &= ExecuteCreateTable("player_skills", R"( + CREATE TABLE IF NOT EXISTS player_skills ( + player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, + skill_id VARCHAR(64) NOT NULL, + level INT NOT NULL DEFAULT 1, + experience FLOAT NOT NULL DEFAULT 0, + data JSONB, + PRIMARY KEY (player_id, skill_id) + ); + CREATE INDEX IF NOT EXISTS idx_skills_player ON player_skills(player_id); + )"); + + success &= ExecuteCreateTable("player_quests", R"( + CREATE TABLE IF NOT EXISTS player_quests ( + player_id BIGINT REFERENCES players(id) ON DELETE CASCADE, + quest_id BIGINT NOT NULL, + state INT NOT NULL, -- QuestState enum as integer + progress JSONB NOT NULL, + started_at TIMESTAMPTZ, + completed_at TIMESTAMPTZ, + PRIMARY KEY (player_id, quest_id) + ); + CREATE INDEX IF NOT EXISTS idx_quests_player ON player_quests(player_id); + CREATE INDEX IF NOT EXISTS idx_quests_state ON player_quests(state); + )"); + + success &= ExecuteCreateTable("world_chunks", R"( + CREATE TABLE IF NOT EXISTS world_chunks ( + chunk_x INT NOT NULL, + chunk_z INT NOT NULL, + biome INT NOT NULL, + data JSONB NOT NULL, -- serialized WorldChunk data + generated_at TIMESTAMPTZ DEFAULT NOW(), + PRIMARY KEY (chunk_x, chunk_z) + ); + CREATE INDEX IF NOT EXISTS idx_chunks_coords ON world_chunks(chunk_x, chunk_z); + )"); + + success &= ExecuteCreateTable("npcs", R"( + CREATE TABLE IF NOT EXISTS npcs ( + id BIGINT PRIMARY KEY, + type INT NOT NULL, + position JSONB NOT NULL, -- {x, y, z} + level INT NOT NULL DEFAULT 1, + data JSONB NOT NULL, -- stats, AI state, loot table, etc. + created_at TIMESTAMPTZ DEFAULT NOW(), + updated_at TIMESTAMPTZ DEFAULT NOW() + ); + CREATE INDEX IF NOT EXISTS idx_npcs_type ON npcs(type); + )"); + + success &= ExecuteCreateTable("loot_tables", R"( + CREATE TABLE IF NOT EXISTS loot_tables ( + table_id VARCHAR(64) PRIMARY KEY, + name VARCHAR(128) NOT NULL, + data JSONB NOT NULL, -- entries, drop chances, etc. + created_at TIMESTAMPTZ DEFAULT NOW() + ); + )"); + + success &= ExecuteCreateTable("game_state", R"( + CREATE TABLE IF NOT EXISTS game_state ( + key VARCHAR(64) PRIMARY KEY, + value JSONB NOT NULL, + updated_at TIMESTAMPTZ DEFAULT NOW() + ); + )"); + } #ifdef USE_CITUS - if (currentType_ == DatabaseType::CITUS) { + if (currentType_ == BackendType::CITUS) { try { // Distribute tables by player_id or appropriate shard key backend_->Query("SELECT create_distributed_table('players', 'id');"); diff --git a/src/database/PostgreSqlClient.cpp b/src/database/PostgreSqlClient.cpp index 618d812..a6868ee 100644 --- a/src/database/PostgreSqlClient.cpp +++ b/src/database/PostgreSqlClient.cpp @@ -1,41 +1,5 @@ #include "database/PostgreSqlClient.hpp" -// Safe conversion helpers in anonymous namespace -namespace { - bool SafeStringToInt64(const char* str, int64_t& result) { - if (!str || str[0] == '\0') return false; - - char* endptr = nullptr; - errno = 0; - long long val = strtoll(str, &endptr, 10); - - if (errno == ERANGE || val < std::numeric_limits::min() || - val > std::numeric_limits::max()) { - return false; - } - - if (endptr == str || *endptr != '\0') { - return false; - } - - result = static_cast(val); - return true; - } - - bool SafeStringToInt(const char* str, int& result) { - int64_t temp; - if (!SafeStringToInt64(str, temp)) return false; - - if (temp < std::numeric_limits::min() || - temp > std::numeric_limits::max()) { - return false; - } - - result = static_cast(temp); - return true; - } -} - // =============== Constructor and Destructor =============== PostgreSqlClient::PostgreSqlClient(const nlohmann::json& config) : config_(config), diff --git a/src/database/SQLiteClient.cpp b/src/database/SQLiteClient.cpp new file mode 100644 index 0000000..d61ea6a --- /dev/null +++ b/src/database/SQLiteClient.cpp @@ -0,0 +1,681 @@ +#ifdef USE_SQLITE + +#include "database/SQLiteClient.hpp" + +#include +#include +#include +#include + +// =============== Constructor and Destructor =============== +SQLiteClient::SQLiteClient(const nlohmann::json& config) + : db_(nullptr), + config_(config), + lastInsertId_(0), + affectedRows_(0) { + + // Determine database file path: try "name" (from DbManager), fallback to "file", then default + if (config.contains("file") && config["file"].is_string()) { + dbPath_ = config["file"].get(); + } else if (config.contains("name") && config["name"].is_string()) { + dbPath_ = config["name"].get(); + } else { + dbPath_ = "game.db"; + } + + // Shards configuration (SQLite doesn't support sharding, but keep for interface) + int shards = config.value("shards", 1); + totalShards_ = (shards > 0) ? shards : 1; + + stats_.startTime = std::chrono::steady_clock::now(); + Logger::Debug("SQLiteClient created with database file: {}", dbPath_); +} + +SQLiteClient::~SQLiteClient() { + Disconnect(); + Logger::Debug("SQLiteClient destroyed"); +} + +// =============== Connection Management =============== +bool SQLiteClient::Connect() { + std::lock_guard lock(dbMutex_); + + if (db_) { + // Already connected + return true; + } + + // Ensure the directory exists + std::filesystem::path path(dbPath_); + std::filesystem::path dir = path.parent_path(); + if (!dir.empty() && !std::filesystem::exists(dir)) { + std::filesystem::create_directories(dir); + } + + // Open the database + int rc = sqlite3_open(dbPath_.c_str(), &db_); + if (rc != SQLITE_OK) { + Logger::Error("Failed to open SQLite database '{}': {}", dbPath_, sqlite3_errmsg(db_)); + if (db_) { + sqlite3_close(db_); + db_ = nullptr; + } + return false; + } + + // Enable foreign keys + char* errMsg = nullptr; + rc = sqlite3_exec(db_, "PRAGMA foreign_keys = ON;", nullptr, nullptr, &errMsg); + if (rc != SQLITE_OK) { + Logger::Warn("Failed to enable foreign keys: {}", errMsg ? errMsg : "unknown error"); + sqlite3_free(errMsg); + } + + // Enable JSON1 extension (if available) + rc = sqlite3_exec(db_, "SELECT json('{}');", nullptr, nullptr, &errMsg); + if (rc != SQLITE_OK) { + Logger::Warn("JSON1 extension not available: {}", errMsg ? errMsg : "unknown error"); + sqlite3_free(errMsg); + } + + Logger::Info("Connected to SQLite database: {}", dbPath_); + return true; +} + +bool SQLiteClient::ConnectToDatabase(const std::string& dbname) { + // SQLite: dbname is the file path; we can change the file. + Disconnect(); + dbPath_ = dbname; + return Connect(); +} + +bool SQLiteClient::Reconnect() { + Disconnect(); + return Connect(); +} + +void SQLiteClient::Disconnect() { + std::lock_guard lock(dbMutex_); + if (db_) { + sqlite3_close(db_); + db_ = nullptr; + Logger::Info("Disconnected from SQLite database"); + } +} + +bool SQLiteClient::IsConnected() const { + std::lock_guard lock(dbMutex_); + return db_ != nullptr; +} + +bool SQLiteClient::CheckHealth() { + std::lock_guard lock(dbMutex_); + if (!db_) return false; + // Execute a simple query to test + const char* sql = "SELECT 1;"; + sqlite3_stmt* stmt = nullptr; + int rc = sqlite3_prepare_v2(db_, sql, -1, &stmt, nullptr); + if (rc != SQLITE_OK) { + return false; + } + rc = sqlite3_step(stmt); + sqlite3_finalize(stmt); + return rc == SQLITE_ROW; +} + +void SQLiteClient::ReconnectAll() { + Reconnect(); +} + +// =============== Connection Pool Management (dummy) =============== +bool SQLiteClient::InitializeConnectionPool(size_t /*minConnections*/, size_t /*maxConnections*/) { + Logger::Debug("SQLiteClient: connection pool not implemented (single connection used)"); + return true; // no-op, always succeeds +} + +void SQLiteClient::ReleaseConnectionPool() { + // no-op +} + +size_t SQLiteClient::GetActiveConnections() const { + return db_ ? 1 : 0; +} + +size_t SQLiteClient::GetIdleConnections() const { + return 0; +} + +// =============== Helper Methods =============== +bool SQLiteClient::ExecuteSql(const std::string& sql, std::vector>* results) { + std::lock_guard lock(dbMutex_); + if (!db_) { + Logger::Error("ExecuteSql: database not connected"); + stats_.failedQueries++; + stats_.connectionErrors++; + return false; + } + + sqlite3_stmt* stmt = nullptr; + const char* tail = nullptr; + int rc = sqlite3_prepare_v2(db_, sql.c_str(), static_cast(sql.size()), &stmt, &tail); + + if (rc != SQLITE_OK) { + Logger::Error("SQL prepare error: {} (SQL: {})", sqlite3_errmsg(db_), sql); + stats_.failedQueries++; + return false; + } + + // Execute and possibly fetch results + bool success = true; + int stepResult = sqlite3_step(stmt); + if (stepResult == SQLITE_ROW) { + // Query returns rows + if (results) { + int colCount = sqlite3_column_count(stmt); + do { + std::vector row; + for (int i = 0; i < colCount; ++i) { + const unsigned char* text = sqlite3_column_text(stmt, i); + if (text) { + row.emplace_back(reinterpret_cast(text)); + } else { + row.emplace_back(); // empty string for NULL + } + } + results->push_back(std::move(row)); + } while ((stepResult = sqlite3_step(stmt)) == SQLITE_ROW); + } else { + // Just step through without collecting + while ((stepResult = sqlite3_step(stmt)) == SQLITE_ROW) {} + } + } + + if (stepResult != SQLITE_DONE) { + Logger::Error("SQL step error: {} (SQL: {})", sqlite3_errmsg(db_), sql); + success = false; + stats_.failedQueries++; + } else { + // For INSERT/UPDATE, get last insert rowid and changes + if (sql.find("INSERT") != std::string::npos || sql.find("UPDATE") != std::string::npos || + sql.find("DELETE") != std::string::npos) { + lastInsertId_ = static_cast(sqlite3_last_insert_rowid(db_)); + affectedRows_ = sqlite3_changes(db_); + } + stats_.totalQueries++; + } + + sqlite3_finalize(stmt); + return success; +} + +nlohmann::json SQLiteClient::ResultSetToJson(const std::vector>& rows, + const std::vector& columnNames) const { + nlohmann::json result = nlohmann::json::array(); + for (const auto& row : rows) { + nlohmann::json rowObj; + for (size_t i = 0; i < columnNames.size() && i < row.size(); ++i) { + const std::string& value = row[i]; + if (value.empty()) { + rowObj[columnNames[i]] = nullptr; + } else { + // Try to parse as JSON if it looks like JSON + if (!value.empty() && (value[0] == '{' || value[0] == '[')) { + try { + rowObj[columnNames[i]] = nlohmann::json::parse(value); + } catch (...) { + rowObj[columnNames[i]] = value; + } + } else { + rowObj[columnNames[i]] = value; + } + } + } + result.push_back(rowObj); + } + return result; +} + +std::string SQLiteClient::EscapeString(const std::string& str) { + // SQLite escaping: double single quotes + std::string escaped; + escaped.reserve(str.size() + 2); + for (char c : str) { + if (c == '\'') escaped += "''"; + else escaped += c; + } + return escaped; +} + +bool SQLiteClient::TableExists(const std::string& tableName) { + std::string sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='" + EscapeString(tableName) + "';"; + std::vector> results; + if (!ExecuteSql(sql, &results)) { + return false; + } + return !results.empty(); +} + +// =============== Query Operations =============== +nlohmann::json SQLiteClient::Query(const std::string& sql) { + std::vector> rows; + if (!ExecuteSql(sql, &rows)) { + return nlohmann::json::array(); + } + + // Need column names. Since we don't have them from ExecuteSql, we need to prepare separately. + // Alternative: use sqlite3_column_name in ExecuteSql and return column names. + // For simplicity, we'll modify ExecuteSql to optionally return column names. + // But to keep changes minimal, we'll re-execute a separate query to get column info? Not efficient. + // Better to enhance ExecuteSql to return column names. Let's redesign quickly. + + // For now, we'll assume Query is used with SELECT and we can get column names via a separate query. + // But that's hacky. Let's implement a proper method that returns both rows and column names. + // We'll refactor: ExecuteSql will fill a struct with rows and column names. + + // Since we're in the middle of implementation, let's create a private struct ResultSet. + // But to avoid major changes, we'll create a new helper that does the full job. + + // Let's implement a method ExecuteQuery that returns nlohmann::json directly. + // We'll keep ExecuteSql for simple execution. + + // Instead, we'll add a new method ExecuteSelect that returns json. + // But for now, we'll implement Query by calling ExecuteSql and then constructing JSON without column names - that's wrong. + + // So let's properly implement Query using sqlite3 directly. + + std::lock_guard lock(dbMutex_); + if (!db_) { + Logger::Error("Query: database not connected"); + stats_.failedQueries++; + return nlohmann::json::array(); + } + + sqlite3_stmt* stmt = nullptr; + int rc = sqlite3_prepare_v2(db_, sql.c_str(), -1, &stmt, nullptr); + if (rc != SQLITE_OK) { + Logger::Error("Query prepare error: {} (SQL: {})", sqlite3_errmsg(db_), sql); + stats_.failedQueries++; + return nlohmann::json::array(); + } + + // Get column names + int colCount = sqlite3_column_count(stmt); + std::vector colNames; + for (int i = 0; i < colCount; ++i) { + colNames.push_back(sqlite3_column_name(stmt, i)); + } + + // Fetch rows + nlohmann::json result = nlohmann::json::array(); + while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) { + nlohmann::json rowObj; + for (int i = 0; i < colCount; ++i) { + const char* text = reinterpret_cast(sqlite3_column_text(stmt, i)); + if (text) { + std::string value(text); + // Try to parse JSON + if (!value.empty() && (value[0] == '{' || value[0] == '[')) { + try { + rowObj[colNames[i]] = nlohmann::json::parse(value); + } catch (...) { + rowObj[colNames[i]] = value; + } + } else { + rowObj[colNames[i]] = value; + } + } else { + rowObj[colNames[i]] = nullptr; + } + } + result.push_back(rowObj); + } + + if (rc != SQLITE_DONE) { + Logger::Error("Query step error: {} (SQL: {})", sqlite3_errmsg(db_), sql); + stats_.failedQueries++; + sqlite3_finalize(stmt); + return nlohmann::json::array(); + } + + sqlite3_finalize(stmt); + stats_.totalQueries++; + return result; +} + +nlohmann::json SQLiteClient::QueryWithParams(const std::string& sql, const std::vector& params) { + // SQLite doesn't support named parameters easily; we can construct the SQL by escaping. + // Not the safest but acceptable for now. + std::string processedSql = sql; + size_t pos = 0; + for (const auto& param : params) { + pos = processedSql.find('?', pos); + if (pos == std::string::npos) break; + processedSql.replace(pos, 1, "'" + EscapeString(param) + "'"); + pos += param.size() + 2; + } + return Query(processedSql); +} + +bool SQLiteClient::Execute(const std::string& sql) { + return ExecuteSql(sql); +} + +bool SQLiteClient::ExecuteWithParams(const std::string& sql, const std::vector& params) { + std::string processedSql = sql; + size_t pos = 0; + for (const auto& param : params) { + pos = processedSql.find('?', pos); + if (pos == std::string::npos) break; + processedSql.replace(pos, 1, "'" + EscapeString(param) + "'"); + pos += param.size() + 2; + } + return Execute(processedSql); +} + +// =============== Shard Operations (ignore shardId) =============== +nlohmann::json SQLiteClient::QueryShard(int /*shardId*/, const std::string& sql) { + return Query(sql); +} +nlohmann::json SQLiteClient::QueryShardWithParams(int /*shardId*/, const std::string& sql, + const std::vector& params) { + return QueryWithParams(sql, params); +} +bool SQLiteClient::ExecuteShard(int /*shardId*/, const std::string& sql) { + return Execute(sql); +} +bool SQLiteClient::ExecuteShardWithParams(int /*shardId*/, const std::string& sql, + const std::vector& params) { + return ExecuteWithParams(sql, params); +} + +// =============== Utility Methods =============== +int SQLiteClient::GetShardId(uint64_t entityId) const { + // Simple hash modulo shard count (dummy, always 0) + return static_cast(entityId % totalShards_); +} +int SQLiteClient::GetTotalShards() const { + return totalShards_; +} +std::string SQLiteClient::GetConnectionInfo() const { + return "SQLite: " + dbPath_; +} +int64_t SQLiteClient::GetLastInsertId() { + return lastInsertId_; +} +int SQLiteClient::GetAffectedRows() { + return affectedRows_; +} + +// =============== Statistics =============== +nlohmann::json SQLiteClient::GetDatabaseStats() { + nlohmann::json stats; + auto now = std::chrono::steady_clock::now(); + auto uptime = std::chrono::duration_cast(now - stats_.startTime).count(); + + stats["uptime_seconds"] = uptime; + stats["total_queries"] = stats_.totalQueries.load(); + stats["failed_queries"] = stats_.failedQueries.load(); + stats["total_transactions"] = stats_.totalTransactions.load(); + stats["connection_errors"] = stats_.connectionErrors.load(); + stats["active_connections"] = GetActiveConnections(); + stats["idle_connections"] = GetIdleConnections(); + stats["database_file"] = dbPath_; + + if (stats_.totalQueries > 0) { + double successRate = 100.0 * (1.0 - static_cast(stats_.failedQueries) / stats_.totalQueries); + stats["success_rate_percent"] = successRate; + } + + return stats; +} + +void SQLiteClient::ResetStats() { + stats_.totalQueries = 0; + stats_.failedQueries = 0; + stats_.totalTransactions = 0; + stats_.connectionErrors = 0; + stats_.startTime = std::chrono::steady_clock::now(); + Logger::Info("SQLite statistics reset"); +} + +// =============== Transaction Operations =============== +bool SQLiteClient::BeginTransaction() { + if (Execute("BEGIN TRANSACTION;")) { + stats_.totalTransactions++; + return true; + } + return false; +} +bool SQLiteClient::CommitTransaction() { + return Execute("COMMIT;"); +} +bool SQLiteClient::RollbackTransaction() { + return Execute("ROLLBACK;"); +} +bool SQLiteClient::ExecuteTransaction(const std::function& operation) { + if (!BeginTransaction()) return false; + bool success = false; + try { + success = operation(); + } catch (...) { + success = false; + } + if (success) { + if (!CommitTransaction()) { + RollbackTransaction(); + return false; + } + } else { + RollbackTransaction(); + } + return success; +} + +// =============== Player Data Operations =============== +bool SQLiteClient::SavePlayerData(uint64_t playerId, const nlohmann::json& data) { + std::string dataJson = data.dump(); + std::string escaped = EscapeString(dataJson); + std::string sql = "INSERT OR REPLACE INTO players (id, data, updated_at) VALUES (" + + std::to_string(playerId) + ", '" + escaped + "', datetime('now'));"; + return Execute(sql); +} + +nlohmann::json SQLiteClient::LoadPlayerData(uint64_t playerId) { + std::string sql = "SELECT data FROM players WHERE id = " + std::to_string(playerId) + ";"; + auto result = Query(sql); + if (!result.empty() && result[0].contains("data")) { + return result[0]["data"]; + } + return nlohmann::json(); +} + +bool SQLiteClient::UpdatePlayer(uint64_t playerId, const nlohmann::json& updates) { + if (updates.empty()) return true; + std::ostringstream sql; + sql << "UPDATE players SET "; + bool first = true; + for (const auto& [key, value] : updates.items()) { + if (!first) sql << ", "; + first = false; + if (value.is_string()) { + sql << key << " = '" << EscapeString(value.get()) << "'"; + } else { + sql << key << " = '" << EscapeString(value.dump()) << "'"; + } + } + sql << ", updated_at = datetime('now') WHERE id = " << playerId << ";"; + return Execute(sql.str()); +} + +bool SQLiteClient::DeletePlayer(uint64_t playerId) { + std::string sql = "DELETE FROM players WHERE id = " + std::to_string(playerId) + ";"; + return Execute(sql); +} + +bool SQLiteClient::UpdatePlayerPosition(uint64_t playerId, float x, float y, float z) { + std::string sql = "UPDATE players SET pos_x = " + std::to_string(x) + + ", pos_y = " + std::to_string(y) + + ", pos_z = " + std::to_string(z) + + ", updated_at = datetime('now') WHERE id = " + std::to_string(playerId) + ";"; + return Execute(sql); +} + +bool SQLiteClient::PlayerExists(uint64_t playerId) { + std::string sql = "SELECT 1 FROM players WHERE id = " + std::to_string(playerId) + " LIMIT 1;"; + auto result = Query(sql); + return !result.empty(); +} + +nlohmann::json SQLiteClient::GetPlayerStats(uint64_t playerId) { + std::string sql = "SELECT level, experience, health, max_health, mana, max_mana, " + "currency_gold, currency_gems, total_playtime " + "FROM players WHERE id = " + std::to_string(playerId) + ";"; + auto result = Query(sql); + if (!result.empty()) return result[0]; + return nlohmann::json(); +} + +bool SQLiteClient::UpdatePlayerStats(uint64_t playerId, const nlohmann::json& stats) { + return UpdatePlayer(playerId, stats); +} + +nlohmann::json SQLiteClient::GetPlayer(uint64_t playerId) { + std::string sql = "SELECT * FROM players WHERE id = " + std::to_string(playerId) + ";"; + auto result = Query(sql); + if (!result.empty()) return result[0]; + return nlohmann::json(); +} + +// =============== Game State Operations =============== +bool SQLiteClient::SaveGameState(const std::string& key, const nlohmann::json& state) { + std::string stateJson = state.dump(); + std::string escaped = EscapeString(stateJson); + std::string sql = "INSERT OR REPLACE INTO game_state (key, value, updated_at) VALUES ('" + + EscapeString(key) + "', '" + escaped + "', datetime('now'));"; + return Execute(sql); +} + +nlohmann::json SQLiteClient::LoadGameState(const std::string& key) { + std::string sql = "SELECT value FROM game_state WHERE key = '" + EscapeString(key) + "';"; + auto result = Query(sql); + if (!result.empty() && result[0].contains("value")) { + return result[0]["value"]; + } + return nlohmann::json(); +} + +bool SQLiteClient::DeleteGameState(const std::string& key) { + std::string sql = "DELETE FROM game_state WHERE key = '" + EscapeString(key) + "';"; + return Execute(sql); +} + +std::vector SQLiteClient::ListGameStates() { + std::string sql = "SELECT key FROM game_state ORDER BY key;"; + auto result = Query(sql); + std::vector keys; + for (const auto& row : result) { + if (row.contains("key")) keys.push_back(row["key"].get()); + } + return keys; +} + +// =============== World Data Operations =============== +bool SQLiteClient::SaveChunkData(int chunkX, int chunkZ, const nlohmann::json& chunkData) { + std::string dataJson = chunkData.dump(); + std::string escaped = EscapeString(dataJson); + std::string sql = "INSERT OR REPLACE INTO world_chunks (chunk_x, chunk_z, data, generated_at) VALUES (" + + std::to_string(chunkX) + ", " + std::to_string(chunkZ) + ", '" + escaped + "', datetime('now'));"; + return Execute(sql); +} + +nlohmann::json SQLiteClient::LoadChunkData(int chunkX, int chunkZ) { + std::string sql = "SELECT data FROM world_chunks WHERE chunk_x = " + std::to_string(chunkX) + + " AND chunk_z = " + std::to_string(chunkZ) + ";"; + auto result = Query(sql); + if (!result.empty() && result[0].contains("data")) { + return result[0]["data"]; + } + return nlohmann::json(); +} + +bool SQLiteClient::DeleteChunkData(int chunkX, int chunkZ) { + std::string sql = "DELETE FROM world_chunks WHERE chunk_x = " + std::to_string(chunkX) + + " AND chunk_z = " + std::to_string(chunkZ) + ";"; + return Execute(sql); +} + +std::vector> SQLiteClient::ListChunksInRange(int centerX, int centerZ, int radius) { + if (radius < 0) return {}; + if (radius > 10000) radius = 10000; + int64_t minX = static_cast(centerX) - radius; + int64_t maxX = static_cast(centerX) + radius; + int64_t minZ = static_cast(centerZ) - radius; + int64_t maxZ = static_cast(centerZ) + radius; + if (minX < std::numeric_limits::min() || maxX > std::numeric_limits::max() || + minZ < std::numeric_limits::min() || maxZ > std::numeric_limits::max()) { + return {}; + } + std::string sql = "SELECT chunk_x, chunk_z FROM world_chunks " + "WHERE chunk_x BETWEEN " + std::to_string(static_cast(minX)) + + " AND " + std::to_string(static_cast(maxX)) + + " AND chunk_z BETWEEN " + std::to_string(static_cast(minZ)) + + " AND " + std::to_string(static_cast(maxZ)) + ";"; + auto result = Query(sql); + std::vector> chunks; + for (const auto& row : result) { + if (row.contains("chunk_x") && row.contains("chunk_z")) { + chunks.emplace_back(row["chunk_x"].get(), row["chunk_z"].get()); + } + } + return chunks; +} + +// =============== Inventory Operations =============== +bool SQLiteClient::SaveInventory(uint64_t playerId, const nlohmann::json& inventory) { + std::string invJson = inventory.dump(); + std::string escaped = EscapeString(invJson); + std::string sql = "INSERT OR REPLACE INTO player_inventory (player_id, data, updated_at) VALUES (" + + std::to_string(playerId) + ", '" + escaped + "', datetime('now'));"; + return Execute(sql); +} + +nlohmann::json SQLiteClient::LoadInventory(uint64_t playerId) { + std::string sql = "SELECT data FROM player_inventory WHERE player_id = " + std::to_string(playerId) + ";"; + auto result = Query(sql); + if (!result.empty() && result[0].contains("data")) { + return result[0]["data"]; + } + return nlohmann::json(); +} + +// =============== Quest Operations =============== +bool SQLiteClient::SaveQuestProgress(uint64_t playerId, const std::string& questId, const nlohmann::json& progress) { + std::string progJson = progress.dump(); + std::string escaped = EscapeString(progJson); + std::string sql = "INSERT OR REPLACE INTO player_quests (player_id, quest_id, progress, updated_at) VALUES (" + + std::to_string(playerId) + ", '" + EscapeString(questId) + "', '" + escaped + "', datetime('now'));"; + return Execute(sql); +} + +nlohmann::json SQLiteClient::LoadQuestProgress(uint64_t playerId, const std::string& questId) { + std::string sql = "SELECT progress FROM player_quests WHERE player_id = " + std::to_string(playerId) + + " AND quest_id = '" + EscapeString(questId) + "';"; + auto result = Query(sql); + if (!result.empty() && result[0].contains("progress")) { + return result[0]["progress"]; + } + return nlohmann::json(); +} + +std::vector SQLiteClient::ListActiveQuests(uint64_t playerId) { + std::string sql = "SELECT quest_id FROM player_quests WHERE player_id = " + std::to_string(playerId) + + " ORDER BY quest_id;"; + auto result = Query(sql); + std::vector quests; + for (const auto& row : result) { + if (row.contains("quest_id")) quests.push_back(row["quest_id"].get()); + } + return quests; +} + +#endif // USE_SQLITE diff --git a/src/game/CollisionSystem.cpp b/src/game/CollisionSystem.cpp index 8370e34..b43bbd6 100644 --- a/src/game/CollisionSystem.cpp +++ b/src/game/CollisionSystem.cpp @@ -977,6 +977,7 @@ bool CollisionSystem::SweptSphereSphere(const glm::vec3& startA, const glm::vec3 } void CollisionSystem::PerformContinuousCollisionDetection(float deltaTime) { + (void)deltaTime; // Get all potential collision pairs (using the updated grid) auto pairs = GetPotentialCollisions(); diff --git a/src/game/WorldGenerator.cpp b/src/game/WorldGenerator.cpp index 12b1060..976acc9 100644 --- a/src/game/WorldGenerator.cpp +++ b/src/game/WorldGenerator.cpp @@ -100,8 +100,8 @@ std::unique_ptr WorldGenerator::GenerateChunk(int chunkX, int chunkZ BiomeType WorldGenerator::GetBiomeAt(float x, float z) { // Use noise to determine biome float noiseValue = FractalNoise(x / 1000.0f, z / 1000.0f); - float temperature = FractalNoise(x / 800.0f, z / 800.0f); - float humidity = FractalNoise(x / 700.0f, z / 700.0f); + float temperature = FractalNoise(x / noiseValue * 8.0f, z / noiseValue * 8.0f); + float humidity = FractalNoise(x / noiseValue * 7.0f, z / noiseValue * 7.0f); // Height-based biomes float height = GetTerrainHeight(x, z); @@ -171,7 +171,7 @@ float WorldGenerator::FractalNoise(float x, float y) { } glm::vec3 WorldGenerator::CalculateNormal(float x, float z, float height) { - const float epsilon = 0.1f; + const float epsilon = 0.1f + height; // Sample heights at neighboring points float h1 = GetTerrainHeight(x + epsilon, z); @@ -202,9 +202,7 @@ void WorldGenerator::GenerateLowPolyTerrain(WorldChunk& chunk, int chunkX, int c // Store in heightmap (need to convert to 1D index) int index = z * (chunkSize + 1) + x; if (index < chunkSize * chunkSize) { - // Note: WorldChunk needs a GetHeightmap method, which isn't in the header - // We'll assume there's a way to set the heightmap - // For now, we'll store it in the chunk's internal heightmap array + chunk.heightmap_[index] = height; } } } diff --git a/src/main.cpp b/src/main.cpp index 68355d5..01df15b 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -429,4 +429,4 @@ int main(int argc, char* argv[]) { Logger::Info("Game Server shutdown complete"); return 0; -} \ No newline at end of file +}