Performance optimization of Tokenizer

Reduces string allocations and removes std::vector from Tokenizer
Most processing now happens on-demand.
This commit is contained in:
Richard Berger
2020-06-11 00:16:46 -04:00
parent 71d3827cce
commit 81d937ee97
9 changed files with 145 additions and 95 deletions

View File

@ -2448,11 +2448,11 @@ int AtomVec::process_fields(char *str, const char *default_str, Method *method)
}
// tokenize words in both strings
Tokenizer words(str, " ");
Tokenizer def_words(default_str, " ");
std::vector<std::string> words = Tokenizer(str, " ").as_vector();
std::vector<std::string> def_words = Tokenizer(default_str, " ").as_vector();
int nfield = words.count();
int ndef = def_words.count();
int nfield = words.size();
int ndef = def_words.size();
// process fields one by one, add to index vector