Merge remote-tracking branch 'origin/master' into refactor-reading

This commit is contained in:
Richard Berger
2020-07-17 23:08:26 -04:00
1223 changed files with 865702 additions and 2037147 deletions

View File

@ -11,104 +11,117 @@
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "tokenizer.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace LAMMPS_NS;
using ::testing::Eq;
TEST(Tokenizer, empty_string) {
TEST(Tokenizer, empty_string)
{
Tokenizer t("", " ");
ASSERT_EQ(t.count(), 0);
}
TEST(Tokenizer, whitespace_only) {
TEST(Tokenizer, whitespace_only)
{
Tokenizer t(" ", " ");
ASSERT_EQ(t.count(), 0);
}
TEST(Tokenizer, single_word) {
TEST(Tokenizer, single_word)
{
Tokenizer t("test", " ");
ASSERT_EQ(t.count(), 1);
}
TEST(Tokenizer, two_words) {
TEST(Tokenizer, two_words)
{
Tokenizer t("test word", " ");
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, prefix_seperators) {
TEST(Tokenizer, prefix_separators)
{
Tokenizer t(" test word", " ");
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, postfix_seperators) {
TEST(Tokenizer, postfix_separators)
{
Tokenizer t("test word ", " ");
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, iterate_words) {
TEST(Tokenizer, iterate_words)
{
Tokenizer t(" test word ", " ");
ASSERT_THAT(t[0], Eq("test"));
ASSERT_THAT(t[1], Eq("word"));
ASSERT_THAT(t.next(), Eq("test"));
ASSERT_THAT(t.next(), Eq("word"));
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, default_seperators) {
TEST(Tokenizer, default_separators)
{
Tokenizer t(" \r\n test \t word \f");
ASSERT_THAT(t[0], Eq("test"));
ASSERT_THAT(t[1], Eq("word"));
ASSERT_THAT(t.next(), Eq("test"));
ASSERT_THAT(t.next(), Eq("word"));
ASSERT_EQ(t.count(), 2);
}
TEST(Tokenizer, for_loop) {
TEST(Tokenizer, as_vector)
{
Tokenizer t(" \r\n test \t word \f");
std::vector<std::string> list;
for(auto word : t) {
list.push_back(word);
}
std::vector<std::string> list = t.as_vector();
ASSERT_THAT(list[0], Eq("test"));
ASSERT_THAT(list[1], Eq("word"));
}
TEST(ValueTokenizer, empty_string) {
TEST(ValueTokenizer, empty_string)
{
ValueTokenizer values("");
ASSERT_FALSE(values.has_next());
}
TEST(ValueTokenizer, bad_integer) {
TEST(ValueTokenizer, bad_integer)
{
ValueTokenizer values("f10");
ASSERT_THROW(values.next_int(), InvalidIntegerException);
}
TEST(ValueTokenizer, bad_double) {
TEST(ValueTokenizer, bad_double)
{
ValueTokenizer values("1a.0");
ASSERT_THROW(values.next_double(), InvalidFloatException);
}
TEST(ValueTokenizer, valid_int) {
TEST(ValueTokenizer, valid_int)
{
ValueTokenizer values("10");
ASSERT_EQ(values.next_int(), 10);
}
TEST(ValueTokenizer, valid_tagint) {
TEST(ValueTokenizer, valid_tagint)
{
ValueTokenizer values("42");
ASSERT_EQ(values.next_tagint(), 42);
}
TEST(ValueTokenizer, valid_bigint) {
TEST(ValueTokenizer, valid_bigint)
{
ValueTokenizer values("42");
ASSERT_EQ(values.next_bigint(), 42);
}
TEST(ValueTokenizer, valid_double) {
TEST(ValueTokenizer, valid_double)
{
ValueTokenizer values("3.14");
ASSERT_DOUBLE_EQ(values.next_double(), 3.14);
}
TEST(ValueTokenizer, valid_double_with_exponential) {
TEST(ValueTokenizer, valid_double_with_exponential)
{
ValueTokenizer values("3.14e22");
ASSERT_DOUBLE_EQ(values.next_double(), 3.14e22);
}