From 2bd1fadb7732bf17a605b2af3d228d6cbcb6766b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niels=20M=C3=B6ller?= Date: Thu, 2 Sep 2021 16:46:57 +0200 Subject: [PATCH] Delete unused variants of rtc::tokenize Bug: webrtc:6424 Change-Id: I16f3313e242e0e9ee2039a79d3a8b50c28190832 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/231129 Reviewed-by: Harald Alvestrand Commit-Queue: Niels Moller Cr-Commit-Position: refs/heads/main@{#34918} --- rtc_base/string_encode.cc | 46 --------------------------- rtc_base/string_encode.h | 18 ----------- rtc_base/string_encode_unittest.cc | 51 ------------------------------ 3 files changed, 115 deletions(-) diff --git a/rtc_base/string_encode.cc b/rtc_base/string_encode.cc index 91a66292f2..f30eda2dc5 100644 --- a/rtc_base/string_encode.cc +++ b/rtc_base/string_encode.cc @@ -186,52 +186,6 @@ size_t tokenize_with_empty_tokens(const std::string& source, return fields->size(); } -size_t tokenize_append(const std::string& source, - char delimiter, - std::vector* fields) { - if (!fields) - return 0; - - std::vector new_fields; - tokenize(source, delimiter, &new_fields); - fields->insert(fields->end(), new_fields.begin(), new_fields.end()); - return fields->size(); -} - -size_t tokenize(const std::string& source, - char delimiter, - char start_mark, - char end_mark, - std::vector* fields) { - if (!fields) - return 0; - fields->clear(); - - std::string remain_source = source; - while (!remain_source.empty()) { - size_t start_pos = remain_source.find(start_mark); - if (std::string::npos == start_pos) - break; - std::string pre_mark; - if (start_pos > 0) { - pre_mark = remain_source.substr(0, start_pos - 1); - } - - ++start_pos; - size_t end_pos = remain_source.find(end_mark, start_pos); - if (std::string::npos == end_pos) - break; - - // We have found the matching marks. First tokenize the pre-mask. Then add - // the marked part as a single field. Finally, loop back for the post-mark. - tokenize_append(pre_mark, delimiter, fields); - fields->push_back(remain_source.substr(start_pos, end_pos - start_pos)); - remain_source = remain_source.substr(end_pos + 1); - } - - return tokenize_append(remain_source, delimiter, fields); -} - bool tokenize_first(const std::string& source, const char delimiter, std::string* token, diff --git a/rtc_base/string_encode.h b/rtc_base/string_encode.h index 7f3345bb5e..fc48cac7d1 100644 --- a/rtc_base/string_encode.h +++ b/rtc_base/string_encode.h @@ -77,24 +77,6 @@ size_t tokenize_with_empty_tokens(const std::string& source, char delimiter, std::vector* fields); -// Tokenize and append the tokens to fields. Return the new size of fields. -size_t tokenize_append(const std::string& source, - char delimiter, - std::vector* fields); - -// Splits the source string into multiple fields separated by delimiter, with -// duplicates of delimiter ignored. Trailing delimiter ignored. A substring in -// between the start_mark and the end_mark is treated as a single field. Return -// the size of fields. For example, if source is "filename -// \"/Library/Application Support/media content.txt\"", delimiter is ' ', and -// the start_mark and end_mark are '"', this method returns two fields: -// "filename" and "/Library/Application Support/media content.txt". -size_t tokenize(const std::string& source, - char delimiter, - char start_mark, - char end_mark, - std::vector* fields); - // Extract the first token from source as separated by delimiter, with // duplicates of delimiter ignored. Return false if the delimiter could not be // found, otherwise return true. diff --git a/rtc_base/string_encode_unittest.cc b/rtc_base/string_encode_unittest.cc index 19d293bc2f..96b6445539 100644 --- a/rtc_base/string_encode_unittest.cc +++ b/rtc_base/string_encode_unittest.cc @@ -169,57 +169,6 @@ TEST(TokenizeTest, CompareSubstrings) { ASSERT_EQ(0ul, fields.size()); } -TEST(TokenizeTest, TokenizeAppend) { - ASSERT_EQ(0ul, tokenize_append("A B C", ' ', nullptr)); - - std::vector fields; - - tokenize_append("A B C", ' ', &fields); - ASSERT_EQ(3ul, fields.size()); - ASSERT_STREQ("B", fields.at(1).c_str()); - - tokenize_append("D E", ' ', &fields); - ASSERT_EQ(5ul, fields.size()); - ASSERT_STREQ("B", fields.at(1).c_str()); - ASSERT_STREQ("E", fields.at(4).c_str()); -} - -TEST(TokenizeTest, TokenizeWithMarks) { - ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', nullptr)); - - std::vector fields; - tokenize("A B C", ' ', '"', '"', &fields); - ASSERT_EQ(3ul, fields.size()); - ASSERT_STREQ("C", fields.at(2).c_str()); - - tokenize("\"A B\" C", ' ', '"', '"', &fields); - ASSERT_EQ(2ul, fields.size()); - ASSERT_STREQ("A B", fields.at(0).c_str()); - - tokenize("D \"A B\" C", ' ', '"', '"', &fields); - ASSERT_EQ(3ul, fields.size()); - ASSERT_STREQ("D", fields.at(0).c_str()); - ASSERT_STREQ("A B", fields.at(1).c_str()); - - tokenize("D \"A B\" C \"E F\"", ' ', '"', '"', &fields); - ASSERT_EQ(4ul, fields.size()); - ASSERT_STREQ("D", fields.at(0).c_str()); - ASSERT_STREQ("A B", fields.at(1).c_str()); - ASSERT_STREQ("E F", fields.at(3).c_str()); - - // No matching marks. - tokenize("D \"A B", ' ', '"', '"', &fields); - ASSERT_EQ(3ul, fields.size()); - ASSERT_STREQ("D", fields.at(0).c_str()); - ASSERT_STREQ("\"A", fields.at(1).c_str()); - - tokenize("D (A B) C (E F) G", ' ', '(', ')', &fields); - ASSERT_EQ(5ul, fields.size()); - ASSERT_STREQ("D", fields.at(0).c_str()); - ASSERT_STREQ("A B", fields.at(1).c_str()); - ASSERT_STREQ("E F", fields.at(3).c_str()); -} - TEST(TokenizeTest, TokenizeWithEmptyTokens) { std::vector fields; EXPECT_EQ(3ul, tokenize_with_empty_tokens("a.b.c", '.', &fields));