Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Do some tactical inlining across lexer and parser. #4307

Merged
merged 2 commits into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 0 additions & 28 deletions toolchain/lex/tokenized_buffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@

namespace Carbon::Lex {

auto TokenizedBuffer::GetKind(TokenIndex token) const -> TokenKind {
return GetTokenInfo(token).kind();
}

auto TokenizedBuffer::GetLine(TokenIndex token) const -> LineIndex {
return FindLineIndex(GetTokenInfo(token).byte_offset());
}
Expand Down Expand Up @@ -159,16 +155,6 @@ auto TokenizedBuffer::GetMatchedOpeningToken(TokenIndex closing_token) const
return closing_token_info.opening_token_index();
}

auto TokenizedBuffer::HasLeadingWhitespace(TokenIndex token) const -> bool {
return GetTokenInfo(token).has_leading_space();
}

auto TokenizedBuffer::HasTrailingWhitespace(TokenIndex token) const -> bool {
TokenIterator it(token);
++it;
return it != tokens().end() && GetTokenInfo(*it).has_leading_space();
}

auto TokenizedBuffer::IsRecoveryToken(TokenIndex token) const -> bool {
if (recovery_tokens_.empty()) {
return false;
Expand Down Expand Up @@ -359,20 +345,6 @@ auto TokenizedBuffer::AddLine(LineInfo info) -> LineIndex {
return LineIndex(static_cast<int>(line_infos_.size()) - 1);
}

auto TokenizedBuffer::GetTokenInfo(TokenIndex token) -> TokenInfo& {
return token_infos_[token.index];
}

auto TokenizedBuffer::GetTokenInfo(TokenIndex token) const -> const TokenInfo& {
return token_infos_[token.index];
}

auto TokenizedBuffer::AddToken(TokenInfo info) -> TokenIndex {
token_infos_.push_back(info);
expected_max_parse_tree_size_ += info.kind().expected_max_parse_tree_size();
return TokenIndex(static_cast<int>(token_infos_.size()) - 1);
}

auto TokenizedBuffer::CollectMemUsage(MemUsage& mem_usage,
llvm::StringRef label) const -> void {
mem_usage.Add(MemUsage::ConcatLabel(label, "allocator_"), allocator_);
Expand Down
32 changes: 32 additions & 0 deletions toolchain/lex/tokenized_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -477,6 +477,38 @@ using LexerDiagnosticEmitter = DiagnosticEmitter<const char*>;
// A diagnostic emitter that uses tokens as its source of location information.
using TokenDiagnosticEmitter = DiagnosticEmitter<TokenIndex>;

inline auto TokenizedBuffer::GetKind(TokenIndex token) const -> TokenKind {
return GetTokenInfo(token).kind();
}

inline auto TokenizedBuffer::HasLeadingWhitespace(TokenIndex token) const
-> bool {
return GetTokenInfo(token).has_leading_space();
}

inline auto TokenizedBuffer::HasTrailingWhitespace(TokenIndex token) const
-> bool {
TokenIterator it(token);
++it;
return it != tokens().end() && GetTokenInfo(*it).has_leading_space();
}

inline auto TokenizedBuffer::GetTokenInfo(TokenIndex token) -> TokenInfo& {
return token_infos_[token.index];
}

inline auto TokenizedBuffer::GetTokenInfo(TokenIndex token) const
-> const TokenInfo& {
return token_infos_[token.index];
}

inline auto TokenizedBuffer::AddToken(TokenInfo info) -> TokenIndex {
TokenIndex index(token_infos_.size());
token_infos_.push_back(info);
expected_max_parse_tree_size_ += info.kind().expected_max_parse_tree_size();
return index;
}

} // namespace Carbon::Lex

#endif // CARBON_TOOLCHAIN_LEX_TOKENIZED_BUFFER_H_
17 changes: 0 additions & 17 deletions toolchain/parse/context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,6 @@ Context::Context(Tree& tree, Lex::TokenizedBuffer& tokens,
tokens_->GetKind(*end_));
}

auto Context::AddLeafNode(NodeKind kind, Lex::TokenIndex token, bool has_error)
-> void {
tree_->node_impls_.push_back(Tree::NodeImpl(kind, has_error, token));
}

auto Context::AddNode(NodeKind kind, Lex::TokenIndex token, bool has_error)
-> void {
tree_->node_impls_.push_back(Tree::NodeImpl(kind, has_error, token));
}

auto Context::ReplacePlaceholderNode(int32_t position, NodeKind kind,
Lex::TokenIndex token, bool has_error)
-> void {
Expand Down Expand Up @@ -143,13 +133,6 @@ auto Context::ConsumeChecked(Lex::TokenKind kind) -> Lex::TokenIndex {
return Consume();
}

auto Context::ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex> {
if (!PositionIs(kind)) {
return std::nullopt;
}
return Consume();
}

auto Context::FindNextOf(std::initializer_list<Lex::TokenKind> desired_kinds)
-> std::optional<Lex::TokenIndex> {
auto new_position = position_;
Expand Down
15 changes: 12 additions & 3 deletions toolchain/parse/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,14 @@ class Context {

// Adds a node to the parse tree that has no children (a leaf).
auto AddLeafNode(NodeKind kind, Lex::TokenIndex token, bool has_error = false)
-> void;
-> void {
tree_->node_impls_.push_back(Tree::NodeImpl(kind, has_error, token));
}

// Adds a node to the parse tree that has children.
auto AddNode(NodeKind kind, Lex::TokenIndex token, bool has_error) -> void;
auto AddNode(NodeKind kind, Lex::TokenIndex token, bool has_error) -> void {
tree_->node_impls_.push_back(Tree::NodeImpl(kind, has_error, token));
}
josh11b marked this conversation as resolved.
Show resolved Hide resolved

// Replaces the placeholder node at the indicated position with a leaf node.
//
Expand Down Expand Up @@ -154,7 +158,12 @@ class Context {

// If the current position's token matches this `Kind`, returns it and
// advances to the next position. Otherwise returns an empty optional.
auto ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex>;
auto ConsumeIf(Lex::TokenKind kind) -> std::optional<Lex::TokenIndex> {
if (!PositionIs(kind)) {
return std::nullopt;
}
return Consume();
}

// Find the next token of any of the given kinds at the current bracketing
// level.
Expand Down
Loading