-
Notifications
You must be signed in to change notification settings - Fork 77
feat: add generative recommendation tokenizer. #317
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,109 @@ | ||
| /* Copyright 2025 The xLLM Authors. All Rights Reserved. | ||
| Copyright 2024 The ScaleLLM Authors. All Rights Reserved. | ||
|
|
||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||
| you may not use this file except in compliance with the License. | ||
| You may obtain a copy of the License at | ||
|
|
||
| https://github.com/jd-opensource/xllm/blob/main/LICENSE | ||
|
|
||
| Unless required by applicable law or agreed to in writing, software | ||
| distributed under the License is distributed on an "AS IS" BASIS, | ||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| See the License for the specific language governing permissions and | ||
| limitations under the License. | ||
| ==============================================================================*/ | ||
|
|
||
| #pragma once | ||
|
|
||
| #include <list> | ||
| #include <memory> | ||
| #include <mutex> | ||
| #include <shared_mutex> | ||
| #include <string> | ||
| #include <unordered_map> | ||
| #include <vector> | ||
|
|
||
| namespace xllm { | ||
| // a singleton mode by version | ||
| template <typename T> | ||
| class VersionSingleton { | ||
| public: | ||
| template <typename... Args> | ||
| static T* GetInstance(const std::string& version, | ||
| bool delete_old_versions = true, | ||
| int reserved_version_size = | ||
| 2, // default retention of the last two versions | ||
| Args&&... args) { | ||
| T* instance = nullptr; | ||
|
|
||
| { | ||
| std::shared_lock<std::shared_mutex> lock(instance_map_mutex_); | ||
| auto it = instance_map_.find(version); | ||
| if (it != instance_map_.end()) { | ||
| instance = it->second.get(); | ||
| } | ||
| } | ||
|
|
||
| if (instance == nullptr) { | ||
| std::unique_lock<std::shared_mutex> lock(instance_map_mutex_); | ||
|
|
||
| auto it = instance_map_.find(version); | ||
| if (it == instance_map_.end()) { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why we need to find twice from instance_map_? Line25 above has already do this.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. double-checked locking, it is better to change std::mutex to std::shared_mutex |
||
| instance = new T(std::forward<Args>(args)...); | ||
| instance_map_[version] = std::unique_ptr<T>(instance); | ||
| instance_version_list_.push_front(version); | ||
| if (delete_old_versions) { | ||
| if (instance_version_list_.size() > reserved_version_size) { | ||
| auto it = instance_version_list_.begin(); | ||
| std::advance(it, reserved_version_size); | ||
| for (; it != instance_version_list_.end(); it++) { | ||
| instance_map_.erase(*it); | ||
| } | ||
| instance_version_list_.resize(reserved_version_size); | ||
| } | ||
| } | ||
| } else { | ||
| instance = it->second.get(); | ||
| } | ||
| } | ||
|
|
||
| return instance; | ||
| } | ||
|
|
||
| static std::vector<std::string> GetVersions() { | ||
| std::lock_guard<std::mutex> lock(instance_map_mutex_); | ||
| std::vector<std::string> versions; | ||
| for (const auto& pair : instance_map_) { | ||
| versions.push_back(pair.first); | ||
| } | ||
| return versions; | ||
| } | ||
|
|
||
| static void DestroyAllInstances() { | ||
| std::lock_guard<std::mutex> lock(instance_map_mutex_); | ||
| instance_map_.clear(); | ||
| instance_version_list_.clear(); | ||
| } | ||
|
|
||
| VersionSingleton(const VersionSingleton&) = delete; | ||
| VersionSingleton& operator=(const VersionSingleton&) = delete; | ||
|
|
||
| private: | ||
| VersionSingleton() = default; | ||
| ~VersionSingleton() = default; | ||
|
|
||
| static std::unordered_map<std::string, std::unique_ptr<T>> instance_map_; | ||
| static std::list<std::string> instance_version_list_; | ||
| static std::shared_mutex instance_map_mutex_; | ||
| }; | ||
|
|
||
| template <typename T> | ||
| std::unordered_map<std::string, std::unique_ptr<T>> | ||
| VersionSingleton<T>::instance_map_; | ||
| template <typename T> | ||
| std::list<std::string> VersionSingleton<T>::instance_version_list_; | ||
| template <typename T> | ||
| std::shared_mutex VersionSingleton<T>::instance_map_mutex_; | ||
|
|
||
| } // namespace xllm | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -25,7 +25,10 @@ limitations under the License. | |
| #include <filesystem> | ||
| #include <vector> | ||
|
|
||
| #include "core/common/version_singleton.h" | ||
| #include "core/framework/state_dict/rec_vocab_dict.h" | ||
| #include "core/framework/tokenizer/fast_tokenizer.h" | ||
| #include "core/framework/tokenizer/rec_tokenizer.h" | ||
| #include "core/framework/tokenizer/sentencepiece_tokenizer.h" | ||
| #include "core/framework/tokenizer/tiktoken_tokenizer.h" | ||
| #include "core/framework/tokenizer/tokenizer_factory.h" | ||
|
|
@@ -50,6 +53,12 @@ HFModelLoader::HFModelLoader(const std::string& model_weights_path) | |
| << "Failed to find model weights files in " << model_weights_path; | ||
| // sort the model weights files by name | ||
| std::sort(model_weights_files_.begin(), model_weights_files_.end()); | ||
|
|
||
| //@todo: 'false' will be replaced with generative recommendation judgment | ||
| if (false) { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If it doesn't work yet, shouldn't add this logic for now.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. only send out the review in advance, will not merge now, and will be modified when merging the main process code of the generative recommendation. |
||
| CHECK(load_rec_vocab(model_weights_path)) | ||
| << "Failed to load rec content from " << model_weights_path; | ||
| } | ||
| } | ||
|
|
||
| std::unique_ptr<Tokenizer> HFModelLoader::tokenizer() const { | ||
|
|
@@ -70,6 +79,28 @@ std::vector<std::unique_ptr<StateDict>>& HFModelLoader::get_state_dicts() { | |
| return state_dicts_; | ||
| } | ||
|
|
||
| bool HFModelLoader::load_rec_vocab(const std::string& model_weights_path) { | ||
| if (!tokenizer_args_.vocab_file().empty()) { | ||
| std::filesystem::path path = model_weights_path; | ||
| std::string model_version = path.filename(); | ||
| std::string vocab_full_path = | ||
| path.append(tokenizer_args_.vocab_file()).string(); | ||
|
|
||
| LOG(INFO) << "model_version:" << model_version | ||
| << ", vocab_full_path:" << vocab_full_path; | ||
|
|
||
| CHECK(nullptr != VersionSingleton<RecVocabDict>::GetInstance(model_version)) | ||
| << "Failed to get vocab dict instance"; | ||
| CHECK(VersionSingleton<RecVocabDict>::GetInstance(model_version) | ||
| ->initialize(vocab_full_path)) | ||
| << "Failed to initialize vocab dict from " << vocab_full_path; | ||
| } else { | ||
| LOG(ERROR) << "vocab file is not set"; | ||
| } | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| bool HFModelLoader::load_args(const std::string& model_weights_path) { | ||
| if (!load_model_args(model_weights_path)) { | ||
| LOG(ERROR) << "Failed to load model args from " << model_weights_path; | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,138 @@ | ||
| #include "rec_vocab_dict.h" | ||
magicheng0816 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| #include <algorithm> | ||
| #include <array> | ||
| #include <filesystem> | ||
| #include <fstream> | ||
|
|
||
| #include "common/global_flags.h" | ||
| #include "util/timer.h" | ||
|
|
||
| namespace xllm { | ||
|
|
||
| bool RecVocabDict::initialize(const std::string& vocab_file) { | ||
| if (initialized_) { | ||
| return true; | ||
| } | ||
|
|
||
| Timer timer; | ||
|
|
||
| if (vocab_file.empty()) { | ||
| LOG(ERROR) << "content data file is empty, file: " << vocab_file; | ||
| return false; | ||
| } | ||
| if (!std::filesystem::exists(vocab_file)) { | ||
| LOG(ERROR) << "fail to find content data file: " << vocab_file; | ||
| return false; | ||
| } | ||
| std::ifstream ifs(vocab_file.data(), std::ios::binary | std::ios::ate); | ||
| if (!ifs.is_open()) { | ||
| LOG(ERROR) << "fail to load content data file: " << vocab_file; | ||
| return false; | ||
| } | ||
|
|
||
| const size_t file_size = ifs.tellg(); | ||
| ifs.seekg(0, std::ios::beg); | ||
|
|
||
| // each line of content : 1 * int64_t(item id) + REC_TOKEN_SIZE * | ||
| // int32_t(token id); | ||
| const size_t itemid_size = sizeof(int64_t); | ||
| const size_t tokens_size = REC_TOKEN_SIZE * sizeof(int32_t); | ||
| const size_t line_size = tokens_size + itemid_size; | ||
| const size_t estimated_lines = (file_size + line_size - 1) / line_size; | ||
|
|
||
| // 2 and 4 are only empirical values | ||
| item_to_tokens_map_.reserve(estimated_lines); | ||
| tokens_to_items_map_.reserve(estimated_lines / 2); | ||
| prefix_tokens_to_next_tokens_map_.reserve(estimated_lines / 4); | ||
|
|
||
| int64_t item_id = 0; | ||
| RecTokenTriple tokens; | ||
|
|
||
| while (ifs.read(reinterpret_cast<char*>(&item_id), itemid_size) && | ||
| ifs.read(reinterpret_cast<char*>(tokens.data()), tokens_size)) { | ||
| if (FLAGS_enable_constrained_decoding) { | ||
| for (int i = 0; i < tokens.size(); i++) { | ||
| std::vector<int32_t> prefix_tokens; | ||
|
|
||
| for (int j = 0; j < i; j++) { | ||
| prefix_tokens.emplace_back(tokens[j]); | ||
| } | ||
|
|
||
| prefix_tokens_to_next_tokens_map_[prefix_tokens].insert(tokens[i]); | ||
| } | ||
| } | ||
|
|
||
| item_to_tokens_map_[item_id] = tokens; | ||
|
|
||
| tokens_to_items_map_[tokens].emplace_back(item_id); | ||
| } | ||
|
|
||
| if (ifs.gcount() != 0 && ifs.gcount() != line_size) { | ||
| LOG(ERROR) << "possibly containing incomplete lines : " << vocab_file; | ||
| item_to_tokens_map_.clear(); | ||
| tokens_to_items_map_.clear(); | ||
| prefix_tokens_to_next_tokens_map_.clear(); | ||
| return false; | ||
| } | ||
|
|
||
| initialized_ = true; | ||
| LOG(INFO) << "total line size:" << estimated_lines | ||
| << ",parse tokens to item id map size: " | ||
| << tokens_to_items_map_.size() | ||
| << ", parse item to tokens map size:" << item_to_tokens_map_.size() | ||
| << ", parse prefix tokens to next tokens map size:" | ||
| << prefix_tokens_to_next_tokens_map_.size() | ||
| << ", cost: " << timer.elapsed_seconds() << " seconds"; | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| bool RecVocabDict::get_items_by_tokens(const RecTokenTriple& rec_token_triple, | ||
| std::vector<int64_t>* item_ids) const { | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: maybe use & here is better.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, the latter method is better, but the interface in front is the former method, so we used it |
||
| CHECK_EQ(initialized_, true); | ||
| CHECK_NE(item_ids, nullptr); | ||
|
|
||
| auto iter = tokens_to_items_map_.find(rec_token_triple); | ||
| if (iter == tokens_to_items_map_.end()) { | ||
| return false; | ||
| } | ||
|
|
||
| std::copy( | ||
| iter->second.begin(), iter->second.end(), std::back_inserter(*item_ids)); | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| bool RecVocabDict::get_tokens_by_item(int64_t item_id, | ||
| std::vector<int32_t>* token_ids) const { | ||
| CHECK_EQ(initialized_, true); | ||
| CHECK_NE(token_ids, nullptr); | ||
|
|
||
| auto iter = item_to_tokens_map_.find(item_id); | ||
| if (iter == item_to_tokens_map_.end()) { | ||
| return false; | ||
| } | ||
|
|
||
| std::copy( | ||
| iter->second.begin(), iter->second.end(), std::back_inserter(*token_ids)); | ||
|
|
||
| return true; | ||
| } | ||
|
|
||
| const std::set<int32_t>& RecVocabDict::get_next_tokens_by_prefix_tokens( | ||
| const Slice<int32_t>& prefix_token_ids) const { | ||
| CHECK_EQ(initialized_, true); | ||
| CHECK_LT(prefix_token_ids.size(), REC_TOKEN_SIZE); | ||
|
|
||
| std::vector<int32_t> prefix_tokens_ids_vec = prefix_token_ids; | ||
| auto iter = prefix_tokens_to_next_tokens_map_.find(prefix_tokens_ids_vec); | ||
| if (iter == prefix_tokens_to_next_tokens_map_.end()) { | ||
| static std::set<int32_t> empty_set; | ||
| return empty_set; | ||
| } | ||
|
|
||
| return iter->second; | ||
| } | ||
|
|
||
| } // namespace xllm | ||
Uh oh!
There was an error while loading. Please reload this page.