Performance optimization of Tokenizer
Reduces string allocations and removes std::vector from Tokenizer Most processing now happens on-demand.
This commit is contained in:
@ -50,25 +50,21 @@ TEST(Tokenizer, postfix_seperators) {
|
||||
|
||||
TEST(Tokenizer, iterate_words) {
|
||||
Tokenizer t(" test word ", " ");
|
||||
ASSERT_THAT(t[0], Eq("test"));
|
||||
ASSERT_THAT(t[1], Eq("word"));
|
||||
ASSERT_THAT(t.next(), Eq("test"));
|
||||
ASSERT_THAT(t.next(), Eq("word"));
|
||||
ASSERT_EQ(t.count(), 2);
|
||||
}
|
||||
|
||||
TEST(Tokenizer, default_seperators) {
|
||||
Tokenizer t(" \r\n test \t word \f");
|
||||
ASSERT_THAT(t[0], Eq("test"));
|
||||
ASSERT_THAT(t[1], Eq("word"));
|
||||
ASSERT_THAT(t.next(), Eq("test"));
|
||||
ASSERT_THAT(t.next(), Eq("word"));
|
||||
ASSERT_EQ(t.count(), 2);
|
||||
}
|
||||
|
||||
TEST(Tokenizer, for_loop) {
|
||||
TEST(Tokenizer, as_vector) {
|
||||
Tokenizer t(" \r\n test \t word \f");
|
||||
std::vector<std::string> list;
|
||||
|
||||
for(auto word : t) {
|
||||
list.push_back(word);
|
||||
}
|
||||
std::vector<std::string> list = t.as_vector();
|
||||
ASSERT_THAT(list[0], Eq("test"));
|
||||
ASSERT_THAT(list[1], Eq("word"));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user