diff --git a/src/atom_vec.cpp b/src/atom_vec.cpp index 8cd7db4fd4..353ab27c99 100644 --- a/src/atom_vec.cpp +++ b/src/atom_vec.cpp @@ -2448,11 +2448,11 @@ int AtomVec::process_fields(char *str, const char *default_str, Method *method) } // tokenize words in both strings - Tokenizer words(str, " "); - Tokenizer def_words(default_str, " "); + std::vector words = Tokenizer(str, " ").as_vector(); + std::vector def_words = Tokenizer(default_str, " ").as_vector(); - int nfield = words.count(); - int ndef = def_words.count(); + int nfield = words.size(); + int ndef = def_words.size(); // process fields one by one, add to index vector diff --git a/src/atom_vec_hybrid.cpp b/src/atom_vec_hybrid.cpp index 7e599863c0..9ba2b6f468 100644 --- a/src/atom_vec_hybrid.cpp +++ b/src/atom_vec_hybrid.cpp @@ -514,8 +514,8 @@ char *AtomVecHybrid::merge_fields(int inum, char *root, // identify unique words in concatenated string - Tokenizer words(concat, " "); - int nwords = words.count(); + std::vector words = Tokenizer(concat, " ").as_vector(); + int nwords = words.size(); int *unique = new int[nwords]; diff --git a/src/potential_file_reader.h b/src/potential_file_reader.h index c512e7886d..a73f5fdbaa 100644 --- a/src/potential_file_reader.h +++ b/src/potential_file_reader.h @@ -43,7 +43,7 @@ namespace LAMMPS_NS void skip_line(); char * next_line(int nparams = 0); void next_dvector(double * list, int n); - ValueTokenizer next_values(int nparams, const std::string & seperators = TOKENIZER_DEFAULT_SEPERATORS); + ValueTokenizer next_values(int nparams, const std::string & separators = TOKENIZER_DEFAULT_SEPARATORS); // convenience functions double next_double(); diff --git a/src/text_file_reader.h b/src/text_file_reader.h index b162bfb23c..80a5d756ea 100644 --- a/src/text_file_reader.h +++ b/src/text_file_reader.h @@ -42,7 +42,7 @@ namespace LAMMPS_NS char * next_line(int nparams = 0); void next_dvector(double * list, int n); - ValueTokenizer next_values(int nparams, const std::string & seperators = TOKENIZER_DEFAULT_SEPERATORS); + ValueTokenizer next_values(int nparams, const std::string & separators = TOKENIZER_DEFAULT_SEPARATORS); }; class FileReaderException : public std::exception { diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index 7d048d9e02..f041c79baa 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -17,77 +17,118 @@ #include "tokenizer.h" #include "utils.h" +#include "fmt/format.h" using namespace LAMMPS_NS; -Tokenizer::Tokenizer(const std::string & str, const std::string & seperators) { - size_t end = -1; +TokenizerException::TokenizerException(const std::string & msg, const std::string & token){ + if(token.empty()) { + message = msg; + } else { + message = fmt::format("{}: '{}'", msg, token); + } +} - do { - size_t start = str.find_first_not_of(seperators, end + 1); - if(start == std::string::npos) break; +Tokenizer::Tokenizer(const std::string & str, const std::string & separators) : + text(str), separators(separators), start(0), ntokens(std::string::npos) +{ + reset(); +} - end = str.find_first_of(seperators, start); +Tokenizer::Tokenizer(const Tokenizer & rhs) : + text(rhs.text), separators(rhs.separators), ntokens(rhs.ntokens) +{ + reset(); +} + +Tokenizer::Tokenizer(Tokenizer && rhs) : + text(std::move(rhs.text)), separators(std::move(rhs.separators)), ntokens(rhs.ntokens) +{ + reset(); +} + +void Tokenizer::reset() { + start = text.find_first_not_of(separators); +} + +void Tokenizer::skip(int n) { + for(int i = 0; i < n; ++i) { + if(!has_next()) throw TokenizerException("No more tokens", ""); + + size_t end = text.find_first_of(separators, start); if(end == std::string::npos) { - tokens.push_back(str.substr(start)); + start = end; } else { - tokens.push_back(str.substr(start, end-start)); - } - } while(end != std::string::npos); + start = text.find_first_not_of(separators, end+1); + } + } } -Tokenizer::Tokenizer(const Tokenizer & rhs) : tokens(rhs.tokens) { +bool Tokenizer::has_next() const { + return start != std::string::npos; } -Tokenizer::Tokenizer(Tokenizer && rhs) : tokens(std::move(rhs.tokens)) { +std::string Tokenizer::next() { + if(!has_next()) throw TokenizerException("No more tokens", ""); + + size_t end = text.find_first_of(separators, start); + + if(end == std::string::npos) { + std::string token = text.substr(start); + start = end; + return token; + } + + std::string token = text.substr(start, end-start); + start = text.find_first_not_of(separators, end+1); + return token; } -Tokenizer::iterator Tokenizer::begin() { - return tokens.begin(); +size_t Tokenizer::count() { + // lazy evaluation + if (ntokens == std::string::npos) { + ntokens = utils::count_words(text, separators); + } + return ntokens; } -Tokenizer::iterator Tokenizer::end() { - return tokens.end(); -} +std::vector Tokenizer::as_vector() { + // store current state + size_t current = start; -Tokenizer::const_iterator Tokenizer::cbegin() const { - return tokens.cbegin(); -} + reset(); -Tokenizer::const_iterator Tokenizer::cend() const { - return tokens.cend(); -} + // generate vector + std::vector tokens; -std::string & Tokenizer::operator[](size_t index) { - return tokens[index]; -} + while(has_next()) { + tokens.emplace_back(next()); + } -size_t Tokenizer::count() const { - return tokens.size(); + // restore state + start = current; + + return tokens; } -ValueTokenizer::ValueTokenizer(const std::string & str, const std::string & seperators) : tokens(str, seperators) { - current = tokens.begin(); +ValueTokenizer::ValueTokenizer(const std::string & str, const std::string & separators) : tokens(str, separators) { } ValueTokenizer::ValueTokenizer(const ValueTokenizer & rhs) : tokens(rhs.tokens) { - current = tokens.begin(); } ValueTokenizer::ValueTokenizer(ValueTokenizer && rhs) : tokens(std::move(rhs.tokens)) { - current = tokens.begin(); } bool ValueTokenizer::has_next() const { - return current != tokens.cend(); + return tokens.has_next(); } std::string ValueTokenizer::next_string() { if (has_next()) { - std::string value = *current; - ++current; + std::string value = tokens.next(); return value; } return ""; @@ -95,11 +136,11 @@ std::string ValueTokenizer::next_string() { int ValueTokenizer::next_int() { if (has_next()) { - if(!utils::is_integer(*current)) { - throw InvalidIntegerException(*current); + std::string current = tokens.next(); + if(!utils::is_integer(current)) { + throw InvalidIntegerException(current); } - int value = atoi(current->c_str()); - ++current; + int value = atoi(current.c_str()); return value; } return 0; @@ -107,45 +148,44 @@ int ValueTokenizer::next_int() { bigint ValueTokenizer::next_bigint() { if (has_next()) { - if(!utils::is_integer(*current)) { - throw InvalidIntegerException(*current); + std::string current = tokens.next(); + if(!utils::is_integer(current)) { + throw InvalidIntegerException(current); } - bigint value = ATOBIGINT(current->c_str()); - ++current; + bigint value = ATOBIGINT(current.c_str()); return value; } return 0; } tagint ValueTokenizer::next_tagint() { - if (current != tokens.end()) { - if(!utils::is_integer(*current)) { - throw InvalidIntegerException(*current); + if (has_next()) { + std::string current = tokens.next(); + if(!utils::is_integer(current)) { + throw InvalidIntegerException(current); } - tagint value = ATOTAGINT(current->c_str()); - ++current; + tagint value = ATOTAGINT(current.c_str()); return value; } return 0; } double ValueTokenizer::next_double() { - if (current != tokens.end()) { - if(!utils::is_double(*current)) { - throw InvalidFloatException(*current); + if (has_next()) { + std::string current = tokens.next(); + if(!utils::is_double(current)) { + throw InvalidFloatException(current); } - - double value = atof(current->c_str()); - ++current; + double value = atof(current.c_str()); return value; } return 0.0; } -void ValueTokenizer::skip(int ntokens) { - current = std::next(current, ntokens); +void ValueTokenizer::skip(int n) { + tokens.skip(n); } -size_t ValueTokenizer::count() const { +size_t ValueTokenizer::count() { return tokens.count(); } diff --git a/src/tokenizer.h b/src/tokenizer.h index 89cb57b301..8ad19ce960 100644 --- a/src/tokenizer.h +++ b/src/tokenizer.h @@ -25,34 +25,33 @@ namespace LAMMPS_NS { -#define TOKENIZER_DEFAULT_SEPERATORS " \t\r\n\f" +#define TOKENIZER_DEFAULT_SEPARATORS " \t\r\n\f" class Tokenizer { - std::vector tokens; + std::string text; + std::string separators; + size_t start; + size_t ntokens; public: - typedef std::vector::iterator iterator; - typedef std::vector::const_iterator const_iterator; - - Tokenizer(const std::string & str, const std::string & seperators = TOKENIZER_DEFAULT_SEPERATORS); + Tokenizer(const std::string & str, const std::string & separators = TOKENIZER_DEFAULT_SEPARATORS); Tokenizer(Tokenizer &&); Tokenizer(const Tokenizer &); Tokenizer& operator=(const Tokenizer&) = default; Tokenizer& operator=(Tokenizer&&) = default; - iterator begin(); - iterator end(); - const_iterator cbegin() const; - const_iterator cend() const; + void reset(); + void skip(int n); + bool has_next() const; + std::string next(); - std::string & operator[](size_t index); - size_t count() const; + size_t count(); + std::vector as_vector(); }; class TokenizerException : public std::exception { std::string message; public: - TokenizerException(const std::string & msg, const std::string & token) : message(msg + ": '" + token + "'") { - } + TokenizerException(const std::string & msg, const std::string & token); ~TokenizerException() throw() { } @@ -76,9 +75,8 @@ public: class ValueTokenizer { Tokenizer tokens; - Tokenizer::const_iterator current; public: - ValueTokenizer(const std::string & str, const std::string & seperators = TOKENIZER_DEFAULT_SEPERATORS); + ValueTokenizer(const std::string & str, const std::string & separators = TOKENIZER_DEFAULT_SEPARATORS); ValueTokenizer(const ValueTokenizer &); ValueTokenizer(ValueTokenizer &&); ValueTokenizer& operator=(const ValueTokenizer&) = default; @@ -91,9 +89,9 @@ public: double next_double(); bool has_next() const; - void skip(int ntokens); + void skip(int n); - size_t count() const; + size_t count(); }; diff --git a/src/utils.cpp b/src/utils.cpp index a8dc4e308e..72193bb2c8 100644 --- a/src/utils.cpp +++ b/src/utils.cpp @@ -369,8 +369,20 @@ std::string utils::trim_comment(const std::string & line) { ------------------------------------------------------------------------- */ size_t utils::count_words(const std::string & text, const std::string & seperators) { - ValueTokenizer words(text, seperators); - return words.count(); + size_t count = 0; + size_t start = text.find_first_not_of(seperators); + + while (start != std::string::npos) { + size_t end = text.find_first_of(seperators, start); + ++count; + + if(end == std::string::npos) { + return count; + } else { + start = text.find_first_not_of(seperators, end + 1); + } + } + return count; } /* ---------------------------------------------------------------------- diff --git a/unittest/utils/test_tokenizer.cpp b/unittest/utils/test_tokenizer.cpp index 08c71338be..09487aabff 100644 --- a/unittest/utils/test_tokenizer.cpp +++ b/unittest/utils/test_tokenizer.cpp @@ -50,25 +50,21 @@ TEST(Tokenizer, postfix_seperators) { TEST(Tokenizer, iterate_words) { Tokenizer t(" test word ", " "); - ASSERT_THAT(t[0], Eq("test")); - ASSERT_THAT(t[1], Eq("word")); + ASSERT_THAT(t.next(), Eq("test")); + ASSERT_THAT(t.next(), Eq("word")); ASSERT_EQ(t.count(), 2); } TEST(Tokenizer, default_seperators) { Tokenizer t(" \r\n test \t word \f"); - ASSERT_THAT(t[0], Eq("test")); - ASSERT_THAT(t[1], Eq("word")); + ASSERT_THAT(t.next(), Eq("test")); + ASSERT_THAT(t.next(), Eq("word")); ASSERT_EQ(t.count(), 2); } -TEST(Tokenizer, for_loop) { +TEST(Tokenizer, as_vector) { Tokenizer t(" \r\n test \t word \f"); - std::vector list; - - for(auto word : t) { - list.push_back(word); - } + std::vector list = t.as_vector(); ASSERT_THAT(list[0], Eq("test")); ASSERT_THAT(list[1], Eq("word")); } diff --git a/unittest/utils/test_utils.cpp b/unittest/utils/test_utils.cpp index e1c458a173..9830207c3e 100644 --- a/unittest/utils/test_utils.cpp +++ b/unittest/utils/test_utils.cpp @@ -32,6 +32,10 @@ TEST(Utils, trim_and_count_words) { ASSERT_EQ(utils::trim_and_count_words("some text # comment"), 2); } +TEST(Utils, count_words_with_extra_spaces) { + ASSERT_EQ(utils::count_words(" some text # comment "), 4); +} + TEST(Utils, valid_integer1) { ASSERT_TRUE(utils::is_integer("10")); }