diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d4fb281 --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Linker files +*.ilk + +# Debugger Files +*.pdb + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# debug information files +*.dwo diff --git a/LICENSE b/LICENSE index 850ea9f..588435f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,28 @@ -Copyright (C) 2025 by interval interval1066@gmail.com +BSD 3-Clause License -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted. +Copyright (c) 2025, interval1066 -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md index 08dc2ec..fdaf7d8 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ -# big_llm +# Generative Pre-trained Transformer AI Language Model +My own large language model +This is just the very start of a agetic ai llm that I've been thinking about. Has a few external dependancies, the main one being the OpenNMT Tokenizer from MIT (https://github.com/OpenNMT/Tokenizer), for now. -My agentic model effort. \ No newline at end of file +Todo: build NMT as a shared lib and register it with the linux system librarian. diff --git a/TODO b/TODO new file mode 100644 index 0000000..64fa05b --- /dev/null +++ b/TODO @@ -0,0 +1,244 @@ +1. Initialization Module (lm::runtime) +include/lm/runtime/init.hpp +cpp + +#pragma once +#include +#include // JSON library +#include + +namespace lm::runtime { + +class SystemState { +public: + // Singleton access + static SystemState& get_instance(); + + // Initialize from JSON config + void initialize(const std::filesystem::path& config_path); + + // Configuration accessors + const nlohmann::json& config() const noexcept; + std::string get_string(const std::string& key) const; + int get_int(const std::string& key, int default_val = 0) const; + + // Subsystem states + bool is_tokenizer_ready() const noexcept; + bool is_model_loaded() const noexcept; + +private: + SystemState() = default; // Private constructor + nlohmann::json config_; + bool tokenizer_ready_ = false; + bool model_loaded_ = false; +}; + +} // namespace lm::runtime + +2. Shutdown Module (lm::runtime) +include/lm/runtime/shutdown.hpp +cpp + +#pragma once +#include +#include + +namespace lm::runtime { + +class ShutdownHandler { +public: + // Serialize state to JSON + static void save_state( + const std::filesystem::path& output_path, + bool include_model_weights = false + ); + + // Cleanup hooks + static void register_cleanup(void (*func)()); + static void execute_cleanup(); +}; + +} // namespace lm::runtime + +3. Implementation Files +src/runtime/init.cpp +cpp + +#include "lm/runtime/init.hpp" +#include +#include + +using namespace lm::runtime; + +void SystemState::initialize(const std::filesystem::path& config_path) { + try { + // Load JSON config + std::ifstream f(config_path); + config_ = nlohmann::json::parse(f); + + // Validate required fields + if (!config_.contains("tokenizer") || !config_.contains("model")) { + throw std::runtime_error("Invalid config: missing required sections"); + } + + // Initialize subsystems + tokenizer_ready_ = initialize_tokenizer(config_["tokenizer"]); + model_loaded_ = initialize_model(config_["model"]); + + } catch (const std::exception& e) { + throw std::runtime_error("Initialization failed: " + std::string(e.what())); + } +} + +// ... (Other method implementations) + +src/runtime/shutdown.cpp +cpp + +#include "lm/runtime/shutdown.hpp" +#include +#include + +namespace { + std::vector cleanup_functions; + std::mutex cleanup_mutex; +} + +void ShutdownHandler::save_state( + const std::filesystem::path& output_path, + bool include_model_weights) +{ + nlohmann::json state; + + // Capture framework state + state["tokenizer"] = serialize_tokenizer_state(); + state["model"] = serialize_model_state(include_model_weights); + state["threading"] = serialize_thread_pool_stats(); + + // Write to file + std::ofstream(output_path) << state.dump(2); // Pretty print +} + +void ShutdownHandler::register_cleanup(void (*func)()) { + std::lock_guard lock(cleanup_mutex); + cleanup_functions.push_back(func); +} + +void ShutdownHandler::execute_cleanup() { + std::lock_guard lock(cleanup_mutex); + for (auto it = cleanup_functions.rbegin(); it != cleanup_functions.rend(); ++it) { + (*it)(); // Execute in reverse order + } +} + +4. Configuration JSON Schema +config_schema.json +json + +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "required": ["tokenizer", "model"], + "properties": { + "tokenizer": { + "type": "object", + "properties": { + "type": {"enum": ["bpe", "sentencepiece"]}, + "vocab_path": {"type": "string"}, + "dropout": {"type": "number", "minimum": 0, "maximum": 1} + } + }, + "model": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "max_seq_len": {"type": "integer", "minimum": 1} + } + }, + "threading": { + "type": "object", + "properties": { + "worker_threads": {"type": "integer", "minimum": 1} + } + } + } +} + +5. Usage Example +Initialization +cpp + +#include "lm/runtime/init.hpp" + +int main() { + try { + auto& state = lm::runtime::SystemState::get_instance(); + state.initialize("config.json"); + + if (!state.is_model_loaded()) { + throw std::runtime_error("Model failed to load"); + } + } catch (const std::exception& e) { + std::cerr << "Fatal error: " << e.what() << std::endl; + return 1; + } +} + +Shutdown +cpp + +#include "lm/runtime/shutdown.hpp" + +void cleanup_db_connections() { + // Custom cleanup logic +} + +int main() { + lm::runtime::ShutdownHandler::register_cleanup(cleanup_db_connections); + + // ... run application ... + + lm::runtime::ShutdownHandler::save_state("shutdown_state.json"); + lm::runtime::ShutdownHandler::execute_cleanup(); +} + +6. CMake Integration +cmake + +# Add to your CMakeLists.txt +find_package(nlohmann_json 3.9 REQUIRED) + +add_library(lm_runtime + src/runtime/init.cpp + src/runtime/shutdown.cpp +) + +target_link_libraries(lm_runtime + PRIVATE nlohmann_json::nlohmann_json +) + +Key Features + + Thread-Safe Cleanup: Reverse-order execution of registered handlers + + Config Validation: JSON schema verification during initialization + + State Capture: Full system snapshot on shutdown + + Singleton Pattern: Controlled access to system state + +Error Handling +Error Case Recovery Action +Invalid JSON Throw std::runtime_error with path info +Missing Required Field Fail fast during initialization +Shutdown Serialization Failure Log warning, continue cleanup + +This implementation provides a robust foundation for managing the framework lifecycle. Would you like to add any of the following? + + Configuration hot-reloading + + Encrypted config files + + Distributed state serialization + + diff --git a/docs/spec.md b/docs/spec.md new file mode 100644 index 0000000..da45f5c Binary files /dev/null and b/docs/spec.md differ diff --git a/src/tokenizer/openmt_adapter.cpp b/src/tokenizer/openmt_adapter.cpp new file mode 100644 index 0000000..e69de29 diff --git a/src/tokenizer/tok_adapter.h b/src/tokenizer/tok_adapter.h new file mode 100644 index 0000000..3a9f5bc --- /dev/null +++ b/src/tokenizer/tok_adapter.h @@ -0,0 +1,39 @@ +#pragma once +#include "bpe_tokenizer.hpp" // Base BPE tokenizer +#include // OpenNMT's tokenizer +#include + +namespace lm::tokenizer { + +class OpenNMTAdapter : public BPETokenizer { +public: + // Supported tokenization modes + enum class Mode { BPE, SENTENCEPIECE, WORDPIECE }; + + // Initialize with OpenNMT config file + explicit OpenNMTAdapter(const std::filesystem::path& config_path); + + // Tokenization with mode selection + std::vector encode(std::string_view text, + Mode mode = Mode::BPE, + const SamplingOptions& opts = {}) const override; + + // Conversion utilities + static std::vector to_opennmt_tokens(const std::vector& our_tokens); + static std::vector from_opennmt_tokens(const std::vector& opennmt_tokens); + + // Configuration + void set_mode(Mode mode) { mode_ = mode; } + +private: + mutable std::mutex mutex_; // Thread safety for OpenNMT's tokenizer + Mode mode_ = Mode::BPE; + std::unique_ptr opennmt_tokenizer_; + + // Internal implementations + std::vector encode_bpe(std::string_view text, const SamplingOptions& opts) const; + std::vector encode_sp(std::string_view text) const; + std::vector encode_wp(std::string_view text) const; +}; + +} // namespace lm::tokenizer diff --git a/tests/tokenizer/test_openmtadapter.cpp b/tests/tokenizer/test_openmtadapter.cpp new file mode 100644 index 0000000..068a57a --- /dev/null +++ b/tests/tokenizer/test_openmtadapter.cpp @@ -0,0 +1,12 @@ +#include "tokenizer/opennmt_adapter.hpp" +#include + +TEST(OpenNMTAdapter, ModeSwitch) { + OpenNMTAdapter tokenizer("config.json"); + + auto bpe_tokens = tokenizer.encode("hello", OpenNMTAdapter::Mode::BPE); + auto sp_tokens = tokenizer.encode("hello", OpenNMTAdapter::Mode::SENTENCEPIECE); + + EXPECT_NE(bpe_tokens, sp_tokens); // Different tokenization schemes +} +