Performance optimization of Tokenizer
Reduces string allocations and removes std::vector from Tokenizer Most processing now happens on-demand.
This commit is contained in:
@ -2448,11 +2448,11 @@ int AtomVec::process_fields(char *str, const char *default_str, Method *method)
|
||||
}
|
||||
|
||||
// tokenize words in both strings
|
||||
Tokenizer words(str, " ");
|
||||
Tokenizer def_words(default_str, " ");
|
||||
std::vector<std::string> words = Tokenizer(str, " ").as_vector();
|
||||
std::vector<std::string> def_words = Tokenizer(default_str, " ").as_vector();
|
||||
|
||||
int nfield = words.count();
|
||||
int ndef = def_words.count();
|
||||
int nfield = words.size();
|
||||
int ndef = def_words.size();
|
||||
|
||||
// process fields one by one, add to index vector
|
||||
|
||||
|
||||
Reference in New Issue
Block a user