[analyzer] Initial LSP server e2e benchmark
A benchmark that launches the dart analyzer server in language server
protocol mode and communicates with is as an IDE.
In this case the benchmark generates between 16 and 1024 copies of the
abstract scanner (to get a large amount of code) as well as imports and
exports all the files in cycles and/or chains; performs an edit,
requests completion and times initial startup (with no cache),
completion after change and when it's done analyzing after the change.
It does this in several modes than change the way the files are imported
and exported:
* ImportCycle where file1 imports file2 etc and the last file imports
file1. There are no exports.
* ImportChain where file1 imports file2 etc and the last file doesn't
import anything. There are no exports.
* ImportExportCycle where file1 imports and exports file2 etc and the
last file imports and exports file1.
* ImportExportChain where file1 imports and exports file2 etc and the
last file doesn't import or export anything.
* ImportCycleExportChain where file1 imports and exports file2 etc and
the last file imports file1 but doesn't export anything.
For ImportCycle, ImportChain and ImportExportChain things appear to
scale ~linear and - using AOT - have timeings in this ballpark (this is
specifically for ImportCycle):
+------+-----------+------------+------------+
| Size | Initial | Completion | Fully done |
+------+-----------+------------+------------+
| 16 | 0.46561 | 0.158765 | 0.40474 |
| 32 | 0.901167 | 0.268819 | 0.859874 |
| 64 | 1.657207 | 0.428747 | 1.488365 |
| 128 | 3.178606 | 0.843576 | 3.040237 |
| 256 | 6.015557 | 1.737661 | 6.010487 |
| 512 | 12.08567 | 2.979242 | 11.736878 |
| 1024 | 24.273368 | 6.101671 | 24.018495 |
+------+-----------+------------+------------+
For ImportExportCycle and ImportCycleExportChain it scales worse and
e.g. ImportExportCycle looks like this:
+------+-----------+------------+------------+
| Size | Initial | Completion | Fully done |
+------+-----------+------------+------------+
| 16 | 0.46673 | 0.169486 | 0.406448 |
| 32 | 0.875871 | 0.242876 | 0.85543 |
| 64 | 1.583077 | 0.465915 | 1.506953 |
| 128 | 3.198071 | 0.903894 | 3.09165 |
| 256 | 6.786677 | 2.149489 | 6.779569 |
| 512 | 17.346131 | 8.92149 | 17.971033 |
| 1024 | 63.358453 | 46.152089 | 65.401559 |
+------+-----------+------------+------------+
(In the tables 'Completion' is time until completion answers after a
top-level change and 'Fully done' is time until the analyzer stops
analyzing after a top-level change).
Change-Id: Id7214c0d6c14199f39c0c8a6a8b4941a0e575dc3
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/413401
Reviewed-by: Brian Wilkerson <brianwilkerson@google.com>
Commit-Queue: Jens Johansen <jensj@google.com>
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/big_chain_benchmark.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/big_chain_benchmark.dart
new file mode 100644
index 0000000..d8f4823
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/big_chain_benchmark.dart
@@ -0,0 +1,272 @@
+// Copyright (c) 2025, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:io';
+
+import '../lsp_benchmark.dart';
+import '../messages.dart';
+
+Future<void> main() async {
+ StringBuffer sb = StringBuffer();
+ for (CodeType codeType in CodeType.values) {
+ for (int numFiles in [16, 32, 64, 128, 256, 512, 1024]) {
+ Directory tmpDir = Directory.systemTemp.createTempSync('lsp_benchmark');
+ try {
+ Directory cacheDir = Directory.fromUri(tmpDir.uri.resolve('cache/'))
+ ..createSync(recursive: true);
+ Directory dartDir = Directory.fromUri(tmpDir.uri.resolve('dart/'))
+ ..createSync(recursive: true);
+ copyData(dartDir.uri, numFiles, codeType);
+ BigChainBenchmark benchmark = BigChainBenchmark(
+ dartDir.uri,
+ cacheDir.uri,
+ numFiles: numFiles,
+ );
+ try {
+ await benchmark.run();
+ } finally {
+ benchmark.exit();
+ }
+
+ print('====================');
+ print('$numFiles files / $codeType:');
+ print(
+ 'Initial: '
+ '${formatDuration(benchmark.firstAnalyzingDuration)}',
+ );
+ print(
+ 'Completion after change: '
+ '${formatDuration(benchmark.completionAfterChange)}',
+ );
+ print(
+ 'Fully done after change: '
+ '${formatDuration(benchmark.doneAfterChange)}',
+ );
+ print('====================');
+ sb.writeln('$numFiles files / $codeType:');
+ sb.writeln(
+ 'Initial: '
+ '${formatDuration(benchmark.firstAnalyzingDuration)}',
+ );
+ sb.writeln(
+ 'Completion after change: '
+ '${formatDuration(benchmark.completionAfterChange)}',
+ );
+ sb.writeln(
+ 'Fully done after change: '
+ '${formatDuration(benchmark.doneAfterChange)}',
+ );
+ sb.writeln();
+ } finally {
+ tmpDir.deleteSync(recursive: true);
+ }
+ }
+ }
+
+ print('==================================');
+ print(sb.toString().trim());
+ print('==================================');
+}
+
+void copyData(Uri tmp, int numFiles, CodeType copyType) {
+ Uri filesUri = Platform.script.resolve('files/');
+ Uri tmpLib = tmp.resolve('lib/');
+ Directory.fromUri(tmpLib).createSync();
+ Directory files = Directory.fromUri(filesUri);
+ for (var file in files.listSync()) {
+ if (file is! File) continue;
+ String filename = file.uri.pathSegments.last;
+ file.copySync(tmpLib.resolve(filename).toFilePath());
+ }
+ File copyMe = File.fromUri(filesUri.resolve('copy_me/copy_me.dart'));
+ String copyMeData = copyMe.readAsStringSync();
+ Uri copyToDir = tmpLib.resolve('copies/');
+ Directory.fromUri(copyToDir).createSync(recursive: true);
+
+ for (int i = 1; i <= numFiles; i++) {
+ String nextFile = getFilenameFor(i == numFiles ? 1 : i + 1);
+ String import = "import '$nextFile' as nextFile;";
+ String export = "export '$nextFile';";
+ switch (copyType) {
+ case CodeType.ImportCycle:
+ export = '';
+ case CodeType.ImportChain:
+ export = '';
+ if (i == numFiles) {
+ import = '';
+ }
+ case CodeType.ImportExportChain:
+ if (i == numFiles) {
+ import = '';
+ export = '';
+ }
+ case CodeType.ImportCycleExportChain:
+ if (i == numFiles) {
+ export = '';
+ }
+ case CodeType.ImportExportCycle:
+ // As default values.
+ }
+
+ String fooMethod;
+ if (import.isEmpty) {
+ fooMethod = '''
+String foo(int i) {
+ if (i == 0) return "foo";
+ return "bar";
+}''';
+ } else {
+ fooMethod = '''
+String foo(int i) {
+ if (i == 0) return "foo";
+ return nextFile.foo(i-1);
+}''';
+ }
+ File.fromUri(copyToDir.resolve(getFilenameFor(i))).writeAsStringSync('''
+$import
+$export
+
+$copyMeData
+
+$fooMethod
+
+String get$i() {
+ return "$i";
+}
+
+''');
+ }
+
+ File.fromUri(copyToDir.resolve('main.dart')).writeAsStringSync("""
+import '${getFilenameFor(1)}';
+
+void main(List<String> arguments) {
+
+}
+""");
+}
+
+String formatDuration(Duration? duration) {
+ if (duration == null) return 'N/A';
+ int seconds = duration.inSeconds;
+ int ms = duration.inMicroseconds - seconds * Duration.microsecondsPerSecond;
+ return '$seconds.${ms.toString().padLeft(6, '0')}';
+}
+
+String getFilenameFor(int i) {
+ return "file${i.toString().padLeft(5, '0')}.dart";
+}
+
+class BigChainBenchmark extends LspBenchmark {
+ @override
+ final Uri rootUri;
+ @override
+ final Uri cacheFolder;
+
+ Duration? completionAfterChange;
+ Duration? doneAfterChange;
+
+ int numFiles;
+
+ BigChainBenchmark(this.rootUri, this.cacheFolder, {required this.numFiles});
+
+ @override
+ LaunchFrom get launchFrom => LaunchFrom.Dart;
+
+ @override
+ Future<void> afterInitialization() async {
+ Uri tmpLib = rootUri.resolve('lib/');
+ Uri lastFileUri = tmpLib.resolve('copies/${getFilenameFor(numFiles)}');
+ Uri mainFileUri = tmpLib.resolve('copies/main.dart');
+ var mainFileContent = File.fromUri(mainFileUri).readAsStringSync();
+ var lastFileContent = File.fromUri(lastFileUri).readAsStringSync();
+ var lastFileContentLines = lastFileContent.split('\n');
+
+ Future<void> openFile(Uri uri, String content) async {
+ await send(Messages.open(uri, 1, content));
+ await (await send(
+ Messages.documentColor(uri, largestIdSeen + 1),
+ ))?.completer.future;
+ await (await send(
+ Messages.documentSymbol(lastFileUri, largestIdSeen + 1),
+ ))?.completer.future;
+
+ // TODO(jensj): Possibly send this - as the IDE does - too?
+ // textDocument/semanticTokens/full
+ // textDocument/codeAction
+ // textDocument/documentLink
+ // textDocument/codeAction
+ // textDocument/semanticTokens/range
+ // textDocument/inlayHint
+ // textDocument/foldingRange
+ // textDocument/codeAction
+ // textDocument/documentHighlight
+ // textDocument/codeAction
+ // textDocument/codeLens
+ // textDocument/codeAction
+ }
+
+ // Open main file.
+ await openFile(mainFileUri, mainFileContent);
+
+ // Open last file.
+ await openFile(lastFileUri, lastFileContent);
+
+ // Change last file: Add a top-level method.
+ await send(
+ Messages.didChange(
+ lastFileUri,
+ version: 2,
+ insertAtLine: lastFileContentLines.length - 1 /* line 0-indexed */,
+ insert: '\nString bar() {\n return "bar";\n}',
+ ),
+ );
+
+ // Request the symbols (although we will ignore the response which we won't
+ // await).
+ await send(Messages.documentSymbol(lastFileUri, largestIdSeen + 1));
+
+ // Start typing in the main file and request auto-completion.
+ await send(
+ Messages.didChange(
+ mainFileUri,
+ version: 2,
+ insertAtLine: 3 /* line 0-indexed; at blank line inside main */,
+ insertAtCharacter: 2,
+ insert: 'ge',
+ ),
+ );
+ Future<Map<String, dynamic>> completionFuture =
+ (await send(
+ Messages.completion(
+ mainFileUri,
+ largestIdSeen + 1,
+ line: 3,
+ character: 4 /* after the 'ge' just typed */,
+ ),
+ ))!.completer.future;
+
+ Stopwatch stopwatch = Stopwatch()..start();
+ var completionResponse = await completionFuture;
+ List<dynamic> completionItems =
+ completionResponse['result']['items'] as List;
+ completionAfterChange = stopwatch.elapsed;
+ print(
+ 'Got ${completionItems.length} completion items '
+ 'in $completionAfterChange',
+ );
+ await waitWhileAnalyzing();
+ stopwatch.stop();
+ doneAfterChange = stopwatch.elapsed;
+ print('Fully done after $doneAfterChange');
+ }
+}
+
+enum CodeType {
+ ImportCycle,
+ ImportChain,
+ ImportExportCycle,
+ ImportExportChain,
+ ImportCycleExportChain,
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/analysis_options.yaml b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/analysis_options.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/analysis_options.yaml
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/characters.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/characters.dart
new file mode 100644
index 0000000..3cb7f14
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/characters.dart
@@ -0,0 +1,134 @@
+// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+library _fe_analyzer_shared.scanner.characters;
+
+const int $$ = 36;
+const int $0 = 48;
+const int $1 = 49;
+const int $2 = 50;
+const int $3 = 51;
+const int $4 = 52;
+const int $5 = 53;
+const int $6 = 54;
+const int $7 = 55;
+const int $8 = 56;
+const int $9 = 57;
+const int $_ = 95;
+const int $A = 65;
+const int $a = 97;
+const int $AMPERSAND = 38;
+const int $AT = 64;
+const int $B = 66;
+const int $b = 98;
+const int $BACKPING = 96;
+const int $BACKSLASH = 92;
+const int $BANG = 33;
+const int $BAR = 124;
+const int $BS = 8;
+const int $C = 67;
+const int $c = 99;
+const int $CARET = 94;
+const int $CLOSE_CURLY_BRACKET = 125;
+const int $CLOSE_PAREN = 41;
+const int $CLOSE_SQUARE_BRACKET = 93;
+const int $COLON = 58;
+const int $COMMA = 44;
+const int $CR = 13;
+const int $D = 68;
+const int $d = 100;
+const int $DEL = 127;
+const int $DQ = 34;
+const int $E = 69;
+const int $e = 101;
+const int $EOF = -1;
+const int $EQ = 61;
+const int $F = 70;
+const int $f = 102;
+const int $FF = 12;
+const int $FIRST_SURROGATE = 0xd800;
+const int $G = 71;
+const int $g = 103;
+const int $GT = 62;
+const int $H = 72;
+const int $h = 104;
+const int $HASH = 35;
+const int $I = 73;
+const int $i = 105;
+const int $J = 74;
+const int $j = 106;
+const int $K = 75;
+const int $k = 107;
+const int $L = 76;
+const int $l = 108;
+const int $LAST_CODE_POINT = 0x10ffff;
+const int $LAST_SURROGATE = 0xdfff;
+const int $LF = 10;
+const int $LS = 0x2028;
+const int $LT = 60;
+const int $M = 77;
+const int $m = 109;
+const int $MINUS = 45;
+const int $N = 78;
+const int $n = 110;
+const int $NBSP = 160;
+const int $O = 79;
+const int $o = 111;
+const int $OPEN_CURLY_BRACKET = 123;
+const int $OPEN_PAREN = 40;
+const int $OPEN_SQUARE_BRACKET = 91;
+const int $P = 80;
+const int $p = 112;
+const int $PERCENT = 37;
+const int $PERIOD = 46;
+const int $PLUS = 43;
+const int $PS = 0x2029;
+const int $Q = 81;
+const int $q = 113;
+const int $QUESTION = 63;
+const int $R = 82;
+const int $r = 114;
+const int $S = 83;
+const int $s = 115;
+const int $SEMICOLON = 59;
+const int $SLASH = 47;
+const int $SPACE = 32;
+const int $SQ = 39;
+const int $STAR = 42;
+const int $STX = 2;
+const int $T = 84;
+const int $t = 116;
+const int $TAB = 9;
+const int $TILDE = 126;
+const int $U = 85;
+const int $u = 117;
+const int $V = 86;
+const int $v = 118;
+const int $VTAB = 11;
+const int $W = 87;
+const int $w = 119;
+const int $X = 88;
+const int $x = 120;
+const int $Y = 89;
+
+const int $y = 121;
+const int $Z = 90;
+const int $z = 122;
+
+int hexDigitValue(int hexDigit) {
+ assert(isHexDigit(hexDigit));
+ // hexDigit is one of '0'..'9', 'A'..'F' and 'a'..'f'.
+ if (hexDigit <= $9) return hexDigit - $0;
+ return (hexDigit | ($a ^ $A)) - ($a - 10);
+}
+
+bool isDigit(int characterCode) {
+ return $0 <= characterCode && characterCode <= $9;
+}
+
+bool isHexDigit(int characterCode) {
+ if (characterCode <= $9) return $0 <= characterCode;
+ characterCode |= $a ^ $A;
+ return ($a <= characterCode && characterCode <= $f);
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/copy_me/copy_me.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/copy_me/copy_me.dart
new file mode 100644
index 0000000..b78abd7
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/copy_me/copy_me.dart
@@ -0,0 +1,2173 @@
+import 'dart:collection' show ListMixin;
+import 'dart:typed_data' show Uint16List, Uint32List;
+
+import '../characters.dart';
+import '../internal_utils.dart' show isIdentifierChar;
+import '../keyword_state.dart' show KeywordState, KeywordStateHelper;
+import '../link.dart' show Link;
+import '../token.dart'
+ show
+ BeginToken,
+ CommentToken,
+ Keyword,
+ KeywordToken,
+ LanguageVersionToken,
+ SyntheticToken,
+ Token,
+ TokenType;
+import '../token.dart' as analyzer show StringToken;
+import '../token_constants.dart';
+
+TokenType closeBraceInfoFor(BeginToken begin) {
+ return const {
+ '(': TokenType.CLOSE_PAREN,
+ '[': TokenType.CLOSE_SQUARE_BRACKET,
+ '{': TokenType.CLOSE_CURLY_BRACKET,
+ '<': TokenType.GT,
+ r'${': TokenType.CLOSE_CURLY_BRACKET,
+ }[begin.lexeme]!;
+}
+
+typedef void LanguageVersionChanged(
+ AbstractScanner scanner,
+ LanguageVersionToken languageVersion,
+);
+
+abstract class AbstractScanner {
+ /**
+ * A flag indicating whether character sequences `&&=` and `||=`
+ * should be tokenized as the assignment operators
+ * [AMPERSAND_AMPERSAND_EQ_TOKEN] and [BAR_BAR_EQ_TOKEN] respectively.
+ * See issue https://github.com/dart-lang/sdk/issues/30340
+ */
+ static const bool LAZY_ASSIGNMENT_ENABLED = false;
+
+ final bool includeComments;
+
+ /// Called when the scanner detects a language version comment
+ /// so that the listener can update the scanner configuration
+ /// based upon the specified language version.
+ final LanguageVersionChanged? languageVersionChanged;
+
+ /// Experimental flag for enabling scanning of `>>>`.
+ /// See https://github.com/dart-lang/language/issues/61
+ /// and https://github.com/dart-lang/language/issues/60
+ bool _enableTripleShift = false;
+
+ /// If `true`, 'augment' is treated as a built-in identifier.
+ bool _forAugmentationLibrary = false;
+
+ /**
+ * The string offset for the next token that will be created.
+ *
+ * Note that in the [Utf8BytesScanner], [stringOffset] and [scanOffset] values
+ * are different. One string character can be encoded using multiple UTF-8
+ * bytes.
+ */
+ int tokenStart = -1;
+
+ /**
+ * A pointer to the token stream created by this scanner. The first token
+ * is a special token and not part of the source file. This is an
+ * implementation detail to avoids special cases in the scanner. This token
+ * is not exposed to clients of the scanner, which are expected to invoke
+ * [firstToken] to access the token stream.
+ */
+ final Token tokens = new Token.eof(/* offset = */ -1);
+
+ /**
+ * A pointer to the last scanned token.
+ */
+ late Token tail;
+
+ /**
+ * A pointer to the last prepended error token.
+ */
+ late Token errorTail;
+
+ bool hasErrors = false;
+
+ /**
+ * A pointer to the stream of comment tokens created by this scanner
+ * before they are assigned to the [Token] precedingComments field
+ * of a non-comment token. A value of `null` indicates no comment tokens.
+ */
+ CommentToken? comments;
+
+ /**
+ * A pointer to the last scanned comment token or `null` if none.
+ */
+ Token? commentsTail;
+
+ final List<int> lineStarts;
+
+ /**
+ * The stack of open groups, e.g [: { ... ( .. :]
+ * Each BeginToken has a pointer to the token where the group
+ * ends. This field is set when scanning the end group token.
+ */
+ Link<BeginToken> groupingStack = const Link<BeginToken>();
+
+ final bool inRecoveryOption;
+ int recoveryCount = 0;
+ final bool allowLazyStrings;
+
+ AbstractScanner(
+ ScannerConfiguration? config,
+ this.includeComments,
+ this.languageVersionChanged, {
+ required int numberOfBytesHint,
+ this.allowLazyStrings = true,
+ }) : lineStarts = new LineStarts(numberOfBytesHint),
+ inRecoveryOption = false {
+ this.tail = this.tokens;
+ this.errorTail = this.tokens;
+ this.configuration = config;
+ }
+
+ AbstractScanner.recoveryOptionScanner(AbstractScanner copyFrom)
+ : lineStarts = [],
+ includeComments = false,
+ languageVersionChanged = null,
+ inRecoveryOption = true,
+ allowLazyStrings = true {
+ this.tail = this.tokens;
+ this.errorTail = this.tokens;
+ this._enableTripleShift = copyFrom._enableTripleShift;
+ this.tokenStart = copyFrom.tokenStart;
+ this.groupingStack = copyFrom.groupingStack;
+ }
+
+ set configuration(ScannerConfiguration? config) {
+ if (config != null) {
+ _enableTripleShift = config.enableTripleShift;
+ _forAugmentationLibrary = config.forAugmentationLibrary;
+ }
+ }
+
+ /**
+ * Returns the current scan offset.
+ *
+ * In the [Utf8BytesScanner] this is the offset into the byte list, in the
+ * [StringScanner] the offset in the source string.
+ */
+ int get scanOffset;
+
+ /**
+ * Returns the current string offset.
+ *
+ * In the [StringScanner] this is identical to the [scanOffset]. In the
+ * [Utf8BytesScanner] it is computed based on encountered UTF-8 characters.
+ */
+ int get stringOffset;
+
+ /**
+ * Advances and returns the next character.
+ *
+ * If the next character is non-ASCII, then the returned value depends on the
+ * scanner implementation. The [Utf8BytesScanner] returns a UTF-8 byte, while
+ * the [StringScanner] returns a UTF-16 code unit.
+ *
+ * The scanner ensures that [advance] is not invoked after it returned [$EOF].
+ * This allows implementations to omit bound checks if the data structure ends
+ * with '0'.
+ */
+ int advance();
+
+ int advanceAfterError() {
+ if (atEndOfFile()) return $EOF;
+ return advance(); // Ensure progress.
+ }
+
+ /**
+ * Appends a token that begins a new group, represented by [type].
+ * Group begin tokens are '{', '(', '[', '<' and '${'.
+ */
+ void appendBeginGroup(TokenType type) {
+ BeginToken token = new BeginToken(type, tokenStart, comments);
+ appendToken(token);
+
+ // { [ ${ cannot appear inside a type parameters / arguments.
+ if (type.kind != LT_TOKEN && type.kind != OPEN_PAREN_TOKEN) {
+ discardOpenLt();
+ }
+ groupingStack = groupingStack.prepend(token);
+ }
+
+ void appendComment(int start, TokenType type, bool asciiOnly) {
+ if (!includeComments) return;
+ CommentToken newComment = createCommentToken(type, start, asciiOnly);
+ _appendToCommentStream(newComment);
+ }
+
+ void appendDartDoc(int start, TokenType type, bool asciiOnly) {
+ if (!includeComments) return;
+ throw "removed";
+ }
+
+ /**
+ * Appends a token that begins an end group, represented by [type].
+ * It handles the group end tokens '}', ')' and ']'. The tokens '>',
+ * '>>' and '>>>' are handled separately by [appendGt], [appendGtGt]
+ * and [appendGtGtGt].
+ */
+ int appendEndGroup(TokenType type, int openKind) {
+ assert(openKind != LT_TOKEN); // openKind is < for > and >>
+ bool foundMatchingBrace = discardBeginGroupUntil(openKind);
+ return appendEndGroupInternal(foundMatchingBrace, type, openKind);
+ }
+
+ /// Append the end group (parenthesis, bracket etc).
+ /// If [foundMatchingBrace] is true the grouping stack (stack of parenthesis
+ /// etc) is updated, otherwise it's left alone.
+ /// In effect, if [foundMatchingBrace] is false this end token is basically
+ /// ignored, i.e. not really seen as an end group.
+ int appendEndGroupInternal(
+ bool foundMatchingBrace,
+ TokenType type,
+ int openKind,
+ ) {
+ if (!foundMatchingBrace) {
+ // No begin group. Leave the grouping stack alone and just continue.
+ appendPrecedenceToken(type);
+ return advance();
+ }
+ appendPrecedenceToken(type);
+ Token close = tail;
+ BeginToken begin = groupingStack.head;
+ if (begin.kind != openKind) {
+ assert(
+ begin.kind == STRING_INTERPOLATION_TOKEN &&
+ openKind == OPEN_CURLY_BRACKET_TOKEN,
+ );
+ // We're ending an interpolated expression.
+ begin.endGroup = close;
+ groupingStack = groupingStack.tail!;
+ // Using "start-of-text" to signal that we're back in string
+ // scanning mode.
+ return $STX;
+ }
+ begin.endGroup = close;
+ groupingStack = groupingStack.tail!;
+ return advance();
+ }
+
+ void appendEofToken() {
+ beginToken();
+ discardOpenLt();
+ while (!groupingStack.isEmpty) {
+ unmatchedBeginGroup(groupingStack.head);
+ groupingStack = groupingStack.tail!;
+ }
+ appendToken(new Token.eof(tokenStart, comments));
+ }
+
+ /**
+ * Appends a token for '>'.
+ * This method does not issue unmatched errors, because > is also the
+ * greater-than operator. It does not necessarily have to close a group.
+ */
+ void appendGt(TokenType type) {
+ appendPrecedenceToken(type);
+ if (groupingStack.isEmpty) return;
+ if (groupingStack.head.kind == LT_TOKEN) {
+ groupingStack.head.endGroup = tail;
+ groupingStack = groupingStack.tail!;
+ }
+ }
+
+ /**
+ * Appends a token for '>>'.
+ * This method does not issue unmatched errors, because >> is also the
+ * shift operator. It does not necessarily have to close a group.
+ */
+ void appendGtGt(TokenType type) {
+ appendPrecedenceToken(type);
+ if (groupingStack.isEmpty) return;
+ if (groupingStack.head.kind == LT_TOKEN) {
+ // Don't assign endGroup: in "T<U<V>>", the '>>' token closes the outer
+ // '<', the inner '<' is left without endGroup.
+ groupingStack = groupingStack.tail!;
+ }
+ if (groupingStack.isEmpty) return;
+ if (groupingStack.head.kind == LT_TOKEN) {
+ groupingStack.head.endGroup = tail;
+ groupingStack = groupingStack.tail!;
+ }
+ }
+
+ /// Appends a token for '>>>'.
+ ///
+ /// This method does not issue unmatched errors, because >>> is also the
+ /// triple shift operator. It does not necessarily have to close a group.
+ void appendGtGtGt(TokenType type) {
+ appendPrecedenceToken(type);
+ if (groupingStack.isEmpty) return;
+
+ // Don't assign endGroup: in "T<U<V<X>>>", the '>>>' token closes the
+ // outer '<', all the inner '<' are left without endGroups.
+ if (groupingStack.head.kind == LT_TOKEN) {
+ groupingStack = groupingStack.tail!;
+ }
+ if (groupingStack.isEmpty) return;
+ if (groupingStack.head.kind == LT_TOKEN) {
+ groupingStack = groupingStack.tail!;
+ }
+ if (groupingStack.isEmpty) return;
+ if (groupingStack.head.kind == LT_TOKEN) {
+ groupingStack.head.endGroup = tail;
+ groupingStack = groupingStack.tail!;
+ }
+ }
+
+ /**
+ * Appends a keyword token whose kind is determined by [keyword].
+ */
+ void appendKeywordToken(Keyword keyword) {
+ String syntax = keyword.lexeme;
+ // Type parameters and arguments cannot contain 'this'.
+ if (identical(syntax, 'this')) {
+ discardOpenLt();
+ }
+ appendToken(new KeywordToken(keyword, tokenStart, comments));
+ }
+
+ /**
+ * Appends a fixed token whose kind and content is determined by [type].
+ * Appends an *operator* token from [type].
+ *
+ * An operator token represent operators like ':', '.', ';', '&&', '==', '--',
+ * '=>', etc.
+ */
+ void appendPrecedenceToken(TokenType type) {
+ appendToken(new Token(type, tokenStart, comments));
+ }
+
+ /**
+ * Appends a substring from the scan offset [:start:] to the current
+ * [:scanOffset:] plus the [:extraOffset:]. For example, if the current
+ * scanOffset is 10, then [:appendSubstringToken(5, -1):] will append the
+ * substring string [5,9).
+ *
+ * Note that [extraOffset] can only be used if the covered character(s) are
+ * known to be ASCII.
+ */
+ void appendSubstringToken(
+ TokenType type,
+ int start,
+ bool asciiOnly, [
+ int extraOffset = 0,
+ ]) {
+ appendToken(
+ createSubstringToken(
+ type,
+ start,
+ asciiOnly,
+ extraOffset,
+ allowLazyStrings,
+ ),
+ );
+ }
+
+ /**
+ * Appends a substring from the scan offset [start] to the current
+ * [scanOffset] plus [syntheticChars]. The additional char(s) will be added
+ * to the unterminated string literal's lexeme but the returned
+ * token's length will *not* include those additional char(s)
+ * so as to be true to the original source.
+ */
+ void appendSyntheticSubstringToken(
+ TokenType type,
+ int start,
+ bool asciiOnly,
+ String syntheticChars,
+ ) {
+ appendToken(
+ createSyntheticSubstringToken(type, start, asciiOnly, syntheticChars),
+ );
+ }
+
+ /**
+ * Append the given token to the [tail] of the current stream of tokens.
+ */
+ void appendToken(Token token) {
+ tail.next = token;
+ token.previous = tail;
+ tail = token;
+ if (comments != null && comments == token.precedingComments) {
+ comments = null;
+ commentsTail = null;
+ } else {
+ // It is the responsibility of the caller to construct the token
+ // being appended with preceding comments if any
+ assert(comments == null || token.isSynthetic);
+ }
+ }
+
+ /// Return true when at EOF.
+ bool atEndOfFile();
+
+ /**
+ * Notifies that a new token starts at current offset.
+ */
+ @pragma("vm:prefer-inline")
+ void beginToken() {
+ tokenStart = stringOffset;
+ }
+
+ int bigHeaderSwitch(int next) {
+ if (next != $SLASH) {
+ return bigSwitch(next);
+ }
+ beginToken();
+ if ($SLASH != peek()) {
+ return tokenizeSlashOrComment(next);
+ }
+ return tokenizeLanguageVersionOrSingleLineComment(next);
+ }
+
+ int bigSwitch(int next) {
+ beginToken();
+ if (next == $SPACE || next == $TAB || next == $CR) {
+ return skipSpaces();
+ }
+ if (next == $LF) {
+ lineStarts.add(stringOffset + 1); // +1, the line starts after the $LF.
+ return skipSpaces();
+ }
+
+ int nextLower = next | 0x20;
+
+ if ($a <= nextLower && nextLower <= $z) {
+ if ($r == next) {
+ return tokenizeRawStringKeywordOrIdentifier(next);
+ }
+ return tokenizeKeywordOrIdentifier(next, /* allowDollar = */ true);
+ }
+
+ if (next == $CLOSE_PAREN) {
+ return appendEndGroup(TokenType.CLOSE_PAREN, OPEN_PAREN_TOKEN);
+ }
+
+ if (next == $OPEN_PAREN) {
+ appendBeginGroup(TokenType.OPEN_PAREN);
+ return advance();
+ }
+
+ if (next == $SEMICOLON) {
+ appendPrecedenceToken(TokenType.SEMICOLON);
+ // Type parameters and arguments cannot contain semicolon.
+ discardOpenLt();
+ return advance();
+ }
+
+ if (next == $PERIOD) {
+ return tokenizeDotsOrNumber(next);
+ }
+
+ if (next == $COMMA) {
+ appendPrecedenceToken(TokenType.COMMA);
+ return advance();
+ }
+
+ if (next == $EQ) {
+ return tokenizeEquals(next);
+ }
+
+ if (next == $CLOSE_CURLY_BRACKET) {
+ return appendEndGroup(
+ TokenType.CLOSE_CURLY_BRACKET,
+ OPEN_CURLY_BRACKET_TOKEN,
+ );
+ }
+
+ if (next == $SLASH) {
+ return tokenizeSlashOrComment(next);
+ }
+
+ if (next == $OPEN_CURLY_BRACKET) {
+ appendBeginGroup(TokenType.OPEN_CURLY_BRACKET);
+ return advance();
+ }
+
+ if (next == $DQ || next == $SQ) {
+ return tokenizeString(next, scanOffset, /* raw = */ false);
+ }
+
+ if (next == $_) {
+ return tokenizeKeywordOrIdentifier(next, /* allowDollar = */ true);
+ }
+
+ if (next == $COLON) {
+ appendPrecedenceToken(TokenType.COLON);
+ return advance();
+ }
+
+ if (next == $LT) {
+ return tokenizeLessThan(next);
+ }
+
+ if (next == $GT) {
+ return tokenizeGreaterThan(next);
+ }
+
+ if (next == $BANG) {
+ return tokenizeExclamation(next);
+ }
+
+ if (next == $OPEN_SQUARE_BRACKET) {
+ return tokenizeOpenSquareBracket(next);
+ }
+
+ if (next == $CLOSE_SQUARE_BRACKET) {
+ return appendEndGroup(
+ TokenType.CLOSE_SQUARE_BRACKET,
+ OPEN_SQUARE_BRACKET_TOKEN,
+ );
+ }
+
+ if (next == $AT) {
+ return tokenizeAt(next);
+ }
+
+ if (next >= $1 && next <= $9) {
+ return tokenizeNumber(next);
+ }
+
+ if (next == $AMPERSAND) {
+ return tokenizeAmpersand(next);
+ }
+
+ if (next == $0) {
+ return tokenizeHexOrNumber(next);
+ }
+
+ if (next == $QUESTION) {
+ return tokenizeQuestion(next);
+ }
+
+ if (next == $BAR) {
+ return tokenizeBar(next);
+ }
+
+ if (next == $PLUS) {
+ return tokenizePlus(next);
+ }
+
+ if (next == $$) {
+ return tokenizeKeywordOrIdentifier(next, /* allowDollar = */ true);
+ }
+
+ if (next == $MINUS) {
+ return tokenizeMinus(next);
+ }
+
+ if (next == $STAR) {
+ return tokenizeMultiply(next);
+ }
+
+ if (next == $CARET) {
+ return tokenizeCaret(next);
+ }
+
+ if (next == $TILDE) {
+ return tokenizeTilde(next);
+ }
+
+ if (next == $PERCENT) {
+ return tokenizePercent(next);
+ }
+
+ if (next == $BACKPING) {
+ appendPrecedenceToken(TokenType.BACKPING);
+ return advance();
+ }
+
+ if (next == $BACKSLASH) {
+ appendPrecedenceToken(TokenType.BACKSLASH);
+ return advance();
+ }
+
+ if (next == $HASH) {
+ return tokenizeTag(next);
+ }
+
+ if (next < 0x1f) {
+ return unexpected(next);
+ }
+
+ next = currentAsUnicode(next);
+
+ return unexpected(next);
+ }
+
+ /**
+ * Returns a new comment from the scan offset [start] to the current
+ * [scanOffset] plus the [extraOffset]. For example, if the current
+ * scanOffset is 10, then [appendSubstringToken(5, -1)] will append the
+ * substring string [5,9).
+ *
+ * Note that [extraOffset] can only be used if the covered character(s) are
+ * known to be ASCII.
+ */
+ CommentToken createCommentToken(
+ TokenType type,
+ int start,
+ bool asciiOnly, [
+ int extraOffset = 0,
+ ]);
+
+ /**
+ * Returns a new language version token from the scan offset [start]
+ * to the current [scanOffset] similar to createCommentToken.
+ */
+ LanguageVersionToken createLanguageVersionToken(
+ int start,
+ int major,
+ int minor,
+ );
+
+ AbstractScanner createRecoveryOptionScanner();
+
+ /**
+ * Returns a new substring from the scan offset [start] to the current
+ * [scanOffset] plus the [extraOffset]. For example, if the current
+ * scanOffset is 10, then [appendSubstringToken(5, -1)] will append the
+ * substring string [5,9).
+ *
+ * Note that [extraOffset] can only be used if the covered character(s) are
+ * known to be ASCII.
+ */
+ analyzer.StringToken createSubstringToken(
+ TokenType type,
+ int start,
+ bool asciiOnly,
+ int extraOffset,
+ bool allowLazy,
+ );
+
+ /**
+ * Returns a new synthetic substring from the scan offset [start]
+ * to the current [scanOffset] plus the [syntheticChars].
+ * The [syntheticChars] are appended to the unterminated string
+ * literal's lexeme but the returned token's length will *not* include
+ * those additional characters so as to be true to the original source.
+ */
+ analyzer.StringToken createSyntheticSubstringToken(
+ TokenType type,
+ int start,
+ bool asciiOnly,
+ String syntheticChars,
+ );
+
+ /// Get the current character, i.e. the latest response from [advance].
+ int current();
+
+ /**
+ * Returns the current unicode character.
+ *
+ * If the current character is ASCII, then it is returned unchanged.
+ *
+ * The [Utf8BytesScanner] decodes the next unicode code point starting at the
+ * current position. Note that every unicode character is returned as a single
+ * code point, that is, for '\u{1d11e}' it returns 119070, and the following
+ * [advance] returns the next character.
+ *
+ * The [StringScanner] returns the current character unchanged, which might
+ * be a surrogate character. In the case of '\u{1d11e}', it returns the first
+ * code unit 55348, and the following [advance] returns the second code unit
+ * 56606.
+ *
+ * Invoking [currentAsUnicode] multiple times is safe, i.e.,
+ * [:currentAsUnicode(next) == currentAsUnicode(currentAsUnicode(next)):].
+ */
+ int currentAsUnicode(int next);
+
+ /**
+ * If a begin group token matches [openKind],
+ * then discard begin group tokens up to that match and return `true`,
+ * otherwise return `false`.
+ * This recovers nicely from situations like "{[}" and "{foo());}",
+ * but not "foo(() {bar());});"
+ */
+ bool discardBeginGroupUntil(int openKind) {
+ Link<BeginToken> originalStack = groupingStack;
+
+ bool first = true;
+ do {
+ // Don't report unmatched errors for <; it is also the less-than operator.
+ discardOpenLt();
+ if (groupingStack.isEmpty) break; // recover
+ BeginToken begin = groupingStack.head;
+ if (openKind == begin.kind ||
+ (openKind == OPEN_CURLY_BRACKET_TOKEN &&
+ begin.kind == STRING_INTERPOLATION_TOKEN)) {
+ if (first) {
+ // If the expected opener has been found on the first pass
+ // then no recovery necessary.
+ return true;
+ }
+ break; // recover
+ }
+ first = false;
+ groupingStack = groupingStack.tail!;
+ } while (!groupingStack.isEmpty);
+
+ recoveryCount++;
+
+ // If the stack does not have any opener of the given type,
+ // then return without discarding anything.
+ // This recovers nicely from situations like "{foo());}".
+ if (groupingStack.isEmpty) {
+ groupingStack = originalStack;
+ return false;
+ }
+
+ // We found a matching group somewhere in the stack, but generally don't
+ // know if we should recover by inserting synthetic closers or
+ // basically ignore the current token.
+ // We're in a recovery setting so we're allowed to be 'relatively slow' ---
+ // try both and see which is better (i.e. gives fewest rewrites later).
+ // To not get exponential runtime we will not do this nested though.
+ // E.g. we can recover "{[}" as "{[]}" (better) or (with . for ignored
+ // tokens) "{[.".
+ // Or we can recover "[(])]" as "[()].." or "[(.)]" (better).
+ if (!inRecoveryOption) {
+ TokenType type;
+ switch (openKind) {
+ case OPEN_SQUARE_BRACKET_TOKEN:
+ type = TokenType.CLOSE_SQUARE_BRACKET;
+ break;
+ case OPEN_CURLY_BRACKET_TOKEN:
+ type = TokenType.CLOSE_CURLY_BRACKET;
+ break;
+ case OPEN_PAREN_TOKEN:
+ type = TokenType.CLOSE_PAREN;
+ break;
+ default:
+ throw new StateError("Unexpected openKind");
+ }
+
+ // Option #1: Insert synthetic closers.
+ int option1Recoveries;
+ {
+ AbstractScanner option1 = createRecoveryOptionScanner();
+ option1.insertSyntheticClosers(originalStack, groupingStack);
+ option1Recoveries = option1.recoveryOptionTokenizer(
+ option1.appendEndGroupInternal(
+ /* foundMatchingBrace = */ true,
+ type,
+ openKind,
+ ),
+ );
+ option1Recoveries += option1.groupingStack.slowLength();
+ }
+
+ // Option #2: ignore this token.
+ int option2Recoveries;
+ {
+ AbstractScanner option2 = createRecoveryOptionScanner();
+ option2.groupingStack = originalStack;
+ option2Recoveries = option2.recoveryOptionTokenizer(
+ option2.appendEndGroupInternal(
+ /* foundMatchingBrace = */ false,
+ type,
+ openKind,
+ ),
+ );
+ // We add 1 to make this option pay for ignoring this token.
+ option2Recoveries += option2.groupingStack.slowLength() + 1;
+ }
+
+ // The option-runs might have set invalid endGroup pointers. Reset them.
+ for (
+ Link<BeginToken> link = originalStack;
+ link.isNotEmpty;
+ link = link.tail!
+ ) {
+ link.head.endToken = null;
+ }
+
+ if (option2Recoveries < option1Recoveries) {
+ // Perform option #2 recovery.
+ groupingStack = originalStack;
+ return false;
+ }
+ // option #1 is the default, so fall though.
+ }
+
+ // Insert synthetic closers and report errors for any unbalanced openers.
+ // This recovers nicely from situations like "{[}".
+ insertSyntheticClosers(originalStack, groupingStack);
+ return true;
+ }
+
+ /**
+ * This method is called to discard '${' from the "grouping" stack.
+ *
+ * This method is called when the scanner finds an unterminated
+ * interpolation expression.
+ */
+ void discardInterpolation() {
+ while (!groupingStack.isEmpty) {
+ BeginToken beginToken = groupingStack.head;
+ unmatchedBeginGroup(beginToken);
+ groupingStack = groupingStack.tail!;
+ if (beginToken.kind == STRING_INTERPOLATION_TOKEN) break;
+ }
+ }
+
+ /**
+ * This method is called to discard '<' from the "grouping" stack.
+ *
+ * [ClassMemberParser.skipExpression] relies on the fact that we do not
+ * create groups for stuff like:
+ * [:a = b < c, d = e > f:].
+ *
+ * In other words, this method is called when the scanner recognizes
+ * something which cannot possibly be part of a type parameter/argument
+ * list, like the '=' in the above example.
+ */
+ void discardOpenLt() {
+ while (!groupingStack.isEmpty && groupingStack.head.kind == LT_TOKEN) {
+ groupingStack = groupingStack.tail!;
+ }
+ }
+
+ /**
+ * Returns the first token scanned by this [Scanner].
+ */
+ Token firstToken() => tokens.next!;
+
+ /**
+ * Notifies the scanner that unicode characters were detected in either a
+ * comment or a string literal between [startScanOffset] and the current
+ * scan offset.
+ */
+ void handleUnicode(int startScanOffset);
+
+ void insertSyntheticClosers(
+ Link<BeginToken> originalStack,
+ Link<BeginToken> entryToUse,
+ ) {
+ // Insert synthetic closers and report errors for any unbalanced openers.
+ // This recovers nicely from situations like "{[}".
+ while (!identical(originalStack, entryToUse)) {
+ // Don't report unmatched errors for <; it is also the less-than operator.
+ if (entryToUse.head.kind != LT_TOKEN) {
+ unmatchedBeginGroup(originalStack.head);
+ }
+ originalStack = originalStack.tail!;
+ }
+ }
+
+ /**
+ * Notifies on [$LF] characters in multi-line comments or strings.
+ *
+ * This method is used by the scanners to track line breaks and create the
+ * [lineStarts] map.
+ */
+ void lineFeedInMultiline() {
+ lineStarts.add(stringOffset + 1);
+ }
+
+ int passIdentifierCharAllowDollar();
+
+ /**
+ * Returns the character at the next position. Like in [advance], the
+ * [Utf8BytesScanner] returns a UTF-8 byte, while the [StringScanner] returns
+ * a UTF-16 code unit.
+ */
+ int peek();
+
+ /// Tokenize a (small) part of the data. Used for recovery "option testing".
+ ///
+ /// Returns the number of recoveries performed.
+ int recoveryOptionTokenizer(int next) {
+ int iterations = 0;
+ while (!atEndOfFile()) {
+ while (next != $EOF) {
+ // TODO(jensj): Look at number of lines, tokens, parenthesis stack,
+ // semi-colon etc, not just number of iterations.
+ next = bigSwitch(next);
+ iterations++;
+
+ if (iterations > 100) {
+ return recoveryCount;
+ }
+ }
+ if (!atEndOfFile()) {
+ // $EOF in the middle of the file. Skip it as `tokenize`.
+ next = advance();
+ iterations++;
+
+ if (iterations > 100) {
+ return recoveryCount;
+ }
+ }
+ }
+ return recoveryCount;
+ }
+
+ /// Scan until line end (or eof). Returns true if the skipped data is ascii
+ /// only and false otherwise. To get the end-of-line (or eof) character call
+ /// [current].
+ bool scanUntilLineEnd();
+
+ /**
+ * Appends a fixed token based on whether the current char is [choice] or not.
+ * If the current char is [choice] a fixed token whose kind and content
+ * is determined by [yes] is appended, otherwise a fixed token whose kind
+ * and content is determined by [no] is appended.
+ */
+ int select(int choice, TokenType yes, TokenType no) {
+ int next = advance();
+ if (next == choice) {
+ appendPrecedenceToken(yes);
+ return advance();
+ } else {
+ appendPrecedenceToken(no);
+ return next;
+ }
+ }
+
+ /// Skip past spaces. Returns the latest character not consumed
+ /// (i.e. the latest character that is not a space).
+ int skipSpaces();
+
+ Token tokenize() {
+ while (!atEndOfFile()) {
+ int next = advance();
+
+ // Scan the header looking for a language version
+ if (next != $EOF) {
+ Token oldTail = tail;
+ next = bigHeaderSwitch(next);
+ if (next != $EOF && tail.kind == SCRIPT_TOKEN) {
+ oldTail = tail;
+ next = bigHeaderSwitch(next);
+ }
+ while (next != $EOF && tail == oldTail) {
+ next = bigHeaderSwitch(next);
+ }
+ next = next;
+ }
+
+ while (next != $EOF) {
+ next = bigSwitch(next);
+ }
+ if (atEndOfFile()) {
+ appendEofToken();
+ } else {
+ unexpectedEof();
+ }
+ }
+
+ // Always pretend that there's a line at the end of the file.
+ lineStarts.add(stringOffset + 1);
+
+ return firstToken();
+ }
+
+ int tokenizeAmpersand(int next) {
+ // && &= & &&=
+ next = advance();
+ if (next == $AMPERSAND) {
+ next = advance();
+ if (LAZY_ASSIGNMENT_ENABLED && next == $EQ) {
+ appendPrecedenceToken(TokenType.AMPERSAND_AMPERSAND_EQ);
+ return advance();
+ }
+ appendPrecedenceToken(TokenType.AMPERSAND_AMPERSAND);
+ return next;
+ } else if (next == $EQ) {
+ appendPrecedenceToken(TokenType.AMPERSAND_EQ);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.AMPERSAND);
+ return next;
+ }
+ }
+
+ int tokenizeAt(int next) {
+ appendPrecedenceToken(TokenType.AT);
+ return advance();
+ }
+
+ int tokenizeBar(int next) {
+ // | || |= ||=
+ next = advance();
+ if (next == $BAR) {
+ next = advance();
+ if (LAZY_ASSIGNMENT_ENABLED && next == $EQ) {
+ appendPrecedenceToken(TokenType.BAR_BAR_EQ);
+ return advance();
+ }
+ appendPrecedenceToken(TokenType.BAR_BAR);
+ return next;
+ } else if (next == $EQ) {
+ appendPrecedenceToken(TokenType.BAR_EQ);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.BAR);
+ return next;
+ }
+ }
+
+ int tokenizeCaret(int next) {
+ // ^ ^=
+ return select($EQ, TokenType.CARET_EQ, TokenType.CARET);
+ }
+
+ int tokenizeDotsOrNumber(int next) {
+ int start = scanOffset;
+ next = advance();
+ if (($0 <= next && next <= $9)) {
+ return tokenizeFractionPart(next, start, /* hasSeparators = */ false);
+ } else if ($PERIOD == next) {
+ next = advance();
+ if (next == $PERIOD) {
+ next = advance();
+ if (next == $QUESTION) {
+ appendPrecedenceToken(TokenType.PERIOD_PERIOD_PERIOD_QUESTION);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.PERIOD_PERIOD_PERIOD);
+ return next;
+ }
+ } else {
+ appendPrecedenceToken(TokenType.PERIOD_PERIOD);
+ return next;
+ }
+ } else {
+ appendPrecedenceToken(TokenType.PERIOD);
+ return next;
+ }
+ }
+
+ int tokenizeEquals(int next) {
+ // = == =>
+ // === is kept for user-friendly error reporting.
+
+ // Type parameters and arguments cannot contain any token that
+ // starts with '='.
+ discardOpenLt();
+
+ next = advance();
+ if (next == $EQ) {
+ // was `return select($EQ, TokenType.EQ_EQ_EQ, TokenType.EQ_EQ);`
+ int next = advance();
+ if (next == $EQ) {
+ appendPrecedenceToken(TokenType.EQ_EQ_EQ);
+ throw "removed";
+ } else {
+ appendPrecedenceToken(TokenType.EQ_EQ);
+ return next;
+ }
+ } else if (next == $GT) {
+ appendPrecedenceToken(TokenType.FUNCTION);
+ return advance();
+ }
+ appendPrecedenceToken(TokenType.EQ);
+ return next;
+ }
+
+ int tokenizeExclamation(int next) {
+ // ! !=
+ // !== is kept for user-friendly error reporting.
+
+ next = advance();
+ if (next == $EQ) {
+ //was `return select($EQ, TokenType.BANG_EQ_EQ, TokenType.BANG_EQ);`
+ int next = advance();
+ if (next == $EQ) {
+ appendPrecedenceToken(TokenType.BANG_EQ_EQ);
+ throw "removed";
+ } else {
+ appendPrecedenceToken(TokenType.BANG_EQ);
+ return next;
+ }
+ }
+ appendPrecedenceToken(TokenType.BANG);
+ return next;
+ }
+
+ int tokenizeFractionPart(int next, int start, bool hasSeparators) {
+ bool done = false;
+ bool hasDigit = false;
+ bool previousWasSeparator = false;
+ LOOP:
+ while (!done) {
+ if ($0 <= next && next <= $9) {
+ hasDigit = true;
+ previousWasSeparator = false;
+ } else if ($_ == next) {
+ if (!hasDigit) {
+ throw "removed";
+ }
+ hasSeparators = true;
+ previousWasSeparator = true;
+ } else if ($e == next || $E == next) {
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ hasDigit = true;
+ previousWasSeparator = false;
+ next = advance();
+ while (next == $_) {
+ throw "removed";
+ }
+ if (next == $PLUS || next == $MINUS) {
+ previousWasSeparator = false;
+ next = advance();
+ }
+ bool hasExponentDigits = false;
+ while (true) {
+ if ($0 <= next && next <= $9) {
+ hasExponentDigits = true;
+ previousWasSeparator = false;
+ } else if (next == $_) {
+ if (!hasExponentDigits) {
+ throw "removed";
+ }
+ hasSeparators = true;
+ previousWasSeparator = true;
+ } else {
+ if (!hasExponentDigits) {
+ throw "removed";
+ }
+ break;
+ }
+ next = advance();
+ }
+ if (previousWasSeparator) {
+ // End of the number is a separator; not allowed.
+ throw "removed";
+ }
+
+ done = true;
+ continue LOOP;
+ } else {
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ done = true;
+ continue LOOP;
+ }
+ next = advance();
+ }
+ if (!hasDigit) {
+ // Reduce offset, we already advanced to the token past the period.
+ appendSubstringToken(
+ TokenType.INT,
+ start,
+ /* asciiOnly = */ true,
+ /* extraOffset = */ -1,
+ );
+
+ // TODO(ahe): Wrong offset for the period. Cannot call beginToken because
+ // the scanner already advanced past the period.
+ if ($PERIOD == next) {
+ return select(
+ $PERIOD,
+ TokenType.PERIOD_PERIOD_PERIOD,
+ TokenType.PERIOD_PERIOD,
+ );
+ }
+ appendPrecedenceToken(TokenType.PERIOD);
+ return next;
+ }
+ TokenType tokenType =
+ hasSeparators ? TokenType.DOUBLE_WITH_SEPARATORS : TokenType.DOUBLE;
+ appendSubstringToken(tokenType, start, /* asciiOnly = */ true);
+ return next;
+ }
+
+ int tokenizeGreaterThan(int next) {
+ // > >= >> >>= >>> >>>=
+ next = advance();
+ if ($EQ == next) {
+ // Saw `>=` only.
+ appendPrecedenceToken(TokenType.GT_EQ);
+ return advance();
+ } else if ($GT == next) {
+ // Saw `>>` so far.
+ next = advance();
+ if ($EQ == next) {
+ // Saw `>>=` only.
+ appendPrecedenceToken(TokenType.GT_GT_EQ);
+ return advance();
+ } else if (_enableTripleShift && $GT == next) {
+ // Saw `>>>` so far.
+ next = advance();
+ if ($EQ == next) {
+ // Saw `>>>=` only.
+ appendPrecedenceToken(TokenType.GT_GT_GT_EQ);
+ return advance();
+ } else {
+ // Saw `>>>` only.
+ appendGtGtGt(TokenType.GT_GT_GT);
+ return next;
+ }
+ } else {
+ // Saw `>>` only.
+ appendGtGt(TokenType.GT_GT);
+ return next;
+ }
+ } else {
+ // Saw `>` only.
+ appendGt(TokenType.GT);
+ return next;
+ }
+ }
+
+ int tokenizeHex(int next) {
+ int start = scanOffset;
+ next = advance(); // Advance past the $x or $X.
+ bool hasDigits = false;
+ bool hasSeparators = false;
+ bool previousWasSeparator = false;
+ while (true) {
+ next = advance();
+ if (($0 <= next && next <= $9) ||
+ ($A <= next && next <= $F) ||
+ ($a <= next && next <= $f)) {
+ hasDigits = true;
+ previousWasSeparator = false;
+ } else if (next == $_) {
+ if (!hasDigits) {
+ throw "removed";
+ }
+ hasSeparators = true;
+ previousWasSeparator = true;
+ } else {
+ if (!hasDigits) {
+ throw "removed";
+ }
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ TokenType tokenType =
+ hasSeparators
+ ? TokenType.HEXADECIMAL_WITH_SEPARATORS
+ : TokenType.HEXADECIMAL;
+ appendSubstringToken(tokenType, start, /* asciiOnly = */ true);
+ return next;
+ }
+ }
+ }
+
+ int tokenizeHexOrNumber(int next) {
+ int x = peek();
+ if (x == $x || x == $X) {
+ return tokenizeHex(next);
+ }
+ return tokenizeNumber(next);
+ }
+
+ /**
+ * [allowDollar] can exclude '$', which is not allowed as part of a string
+ * interpolation identifier.
+ */
+ int tokenizeIdentifier(int next, int start, bool allowDollar) {
+ if (allowDollar) {
+ // Normal case is to allow dollar.
+ if (isIdentifierChar(next, /* allowDollar = */ true)) {
+ next = passIdentifierCharAllowDollar();
+ appendSubstringToken(
+ TokenType.IDENTIFIER,
+ start,
+ /* asciiOnly = */ true,
+ );
+ } else {
+ // Identifier ends here.
+ if (start == scanOffset) {
+ return unexpected(next);
+ } else {
+ appendSubstringToken(
+ TokenType.IDENTIFIER,
+ start,
+ /* asciiOnly = */ true,
+ );
+ }
+ }
+ } else {
+ while (true) {
+ if (isIdentifierChar(next, /* allowDollar = */ false)) {
+ next = advance();
+ } else {
+ // Identifier ends here.
+ if (start == scanOffset) {
+ return unexpected(next);
+ } else {
+ appendSubstringToken(
+ TokenType.IDENTIFIER,
+ start,
+ /* asciiOnly = */ true,
+ );
+ }
+ break;
+ }
+ }
+ }
+ return next;
+ }
+
+ int tokenizeInterpolatedExpression(int next) {
+ appendBeginGroup(TokenType.STRING_INTERPOLATION_EXPRESSION);
+ beginToken(); // The expression starts here.
+ next = advance(); // Move past the curly bracket.
+ while (next != $EOF && next != $STX) {
+ next = bigSwitch(next);
+ }
+ if (next == $EOF) {
+ beginToken();
+ discardInterpolation();
+ return next;
+ }
+ next = advance(); // Move past the $STX.
+ beginToken(); // The string interpolation suffix starts here.
+ return next;
+ }
+
+ int tokenizeInterpolatedIdentifier(int next) {
+ appendPrecedenceToken(TokenType.STRING_INTERPOLATION_IDENTIFIER);
+
+ if ($a <= next && next <= $z || $A <= next && next <= $Z || next == $_) {
+ beginToken(); // The identifier starts here.
+ next = tokenizeKeywordOrIdentifier(next, /* allowDollar = */ false);
+ } else {
+ beginToken(); // The synthetic identifier starts here.
+ appendSyntheticSubstringToken(
+ TokenType.IDENTIFIER,
+ scanOffset,
+ /* asciiOnly = */ true,
+ '',
+ );
+ throw "removed";
+ }
+ beginToken(); // The string interpolation suffix starts here.
+ return next;
+ }
+
+ int tokenizeKeywordOrIdentifier(int next, bool allowDollar) {
+ KeywordState state = KeywordStateHelper.table;
+ int start = scanOffset;
+ // We allow a leading capital character.
+ if ($A <= next && next <= $z) {
+ state = state.next(next);
+ next = advance();
+ }
+ while (!state.isNull && $a <= next && next <= $z) {
+ state = state.next(next);
+ next = advance();
+ }
+ if (state.isNull) {
+ return tokenizeIdentifier(next, start, allowDollar);
+ }
+ Keyword? keyword = state.keyword;
+ if (keyword == null) {
+ return tokenizeIdentifier(next, start, allowDollar);
+ }
+ if (!_forAugmentationLibrary && keyword == Keyword.AUGMENT) {
+ return tokenizeIdentifier(next, start, allowDollar);
+ }
+ if (($A <= next && next <= $Z) ||
+ ($0 <= next && next <= $9) ||
+ next == $_ ||
+ (allowDollar && next == $$)) {
+ return tokenizeIdentifier(next, start, allowDollar);
+ } else {
+ appendKeywordToken(keyword);
+ return next;
+ }
+ }
+
+ int tokenizeLanguageVersionOrSingleLineComment(int next) {
+ assert(next == $SLASH);
+ int start = scanOffset;
+ next = advance();
+ assert(next == $SLASH);
+
+ // Dart doc
+ if ($SLASH == peek()) {
+ return tokenizeSingleLineComment(next, start);
+ }
+
+ // "@dart"
+ next = advance();
+ while ($SPACE == next) {
+ next = advance();
+ }
+ if ($AT != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+ if ($d != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+ if ($a != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+ if ($r != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+ if ($t != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+
+ // "="
+ while ($SPACE == next) {
+ next = advance();
+ }
+ if ($EQ != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+
+ // major
+ while ($SPACE == next) {
+ next = advance();
+ }
+ int major = 0;
+ int majorStart = scanOffset;
+ while (isDigit(next)) {
+ major = major * 10 + next - $0;
+ next = advance();
+ }
+ if (scanOffset == majorStart) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+
+ // minor
+ if ($PERIOD != next) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+ next = advance();
+ int minor = 0;
+ int minorStart = scanOffset;
+ while (isDigit(next)) {
+ minor = minor * 10 + next - $0;
+ next = advance();
+ }
+ if (scanOffset == minorStart) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+
+ // trailing spaces
+ while ($SPACE == next) {
+ next = advance();
+ }
+ if (next != $LF && next != $CR && next != $EOF) {
+ return tokenizeSingleLineCommentRest(next, start, /* dartdoc = */ false);
+ }
+
+ LanguageVersionToken languageVersion = createLanguageVersionToken(
+ start,
+ major,
+ minor,
+ );
+ if (languageVersionChanged != null) {
+ // TODO(danrubel): make this required and remove the languageVersion field
+ languageVersionChanged!(this, languageVersion);
+ }
+ if (includeComments) {
+ _appendToCommentStream(languageVersion);
+ }
+ return next;
+ }
+
+ int tokenizeLessThan(int next) {
+ // < <= << <<=
+ next = advance();
+ if ($EQ == next) {
+ appendPrecedenceToken(TokenType.LT_EQ);
+ return advance();
+ } else if ($LT == next) {
+ return select($EQ, TokenType.LT_LT_EQ, TokenType.LT_LT);
+ } else {
+ appendBeginGroup(TokenType.LT);
+ return next;
+ }
+ }
+
+ int tokenizeMinus(int next) {
+ // - -- -=
+ next = advance();
+ if (next == $MINUS) {
+ appendPrecedenceToken(TokenType.MINUS_MINUS);
+ return advance();
+ } else if (next == $EQ) {
+ appendPrecedenceToken(TokenType.MINUS_EQ);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.MINUS);
+ return next;
+ }
+ }
+
+ int tokenizeMultiLineComment(int next, int start) {
+ bool asciiOnlyComment = true; // Track if the entire comment is ASCII.
+ bool asciiOnlyLines = true; // Track ASCII since the last handleUnicode.
+ int unicodeStart = start;
+ int nesting = 1;
+ next = advance();
+ bool dartdoc = $STAR == next;
+ while (true) {
+ if ($EOF == next) {
+ if (!asciiOnlyLines) handleUnicode(unicodeStart);
+ throw "removed";
+ } else if ($STAR == next) {
+ next = advance();
+ if ($SLASH == next) {
+ --nesting;
+ if (0 == nesting) {
+ if (!asciiOnlyLines) handleUnicode(unicodeStart);
+ next = advance();
+ if (dartdoc) {
+ appendDartDoc(
+ start,
+ TokenType.MULTI_LINE_COMMENT,
+ asciiOnlyComment,
+ );
+ } else {
+ appendComment(
+ start,
+ TokenType.MULTI_LINE_COMMENT,
+ asciiOnlyComment,
+ );
+ }
+ break;
+ } else {
+ next = advance();
+ }
+ }
+ } else if ($SLASH == next) {
+ next = advance();
+ if ($STAR == next) {
+ next = advance();
+ ++nesting;
+ }
+ } else if (next == $LF) {
+ if (!asciiOnlyLines) {
+ // Synchronize the string offset in the utf8 scanner.
+ handleUnicode(unicodeStart);
+ asciiOnlyLines = true;
+ unicodeStart = scanOffset;
+ }
+ lineFeedInMultiline();
+ next = advance();
+ } else {
+ if (next > 127) {
+ asciiOnlyLines = false;
+ asciiOnlyComment = false;
+ }
+ next = advance();
+ }
+ }
+ return next;
+ }
+
+ int tokenizeMultiLineRawString(int quoteChar, int quoteStart) {
+ bool asciiOnlyString = true;
+ bool asciiOnlyLine = true;
+ int unicodeStart = quoteStart;
+ int next = advance(); // Advance past the (last) quote (of three).
+ outer:
+ while (next != $EOF) {
+ while (next != quoteChar) {
+ if (next == $LF) {
+ if (!asciiOnlyLine) {
+ // Synchronize the string offset in the utf8 scanner.
+ handleUnicode(unicodeStart);
+ asciiOnlyLine = true;
+ unicodeStart = scanOffset;
+ }
+ lineFeedInMultiline();
+ } else if (next > 127) {
+ asciiOnlyLine = false;
+ asciiOnlyString = false;
+ }
+ next = advance();
+ if (next == $EOF) break outer;
+ }
+ next = advance();
+ if (next == quoteChar) {
+ next = advance();
+ if (next == quoteChar) {
+ if (!asciiOnlyLine) handleUnicode(unicodeStart);
+ next = advance();
+ appendSubstringToken(TokenType.STRING, quoteStart, asciiOnlyString);
+ return next;
+ }
+ }
+ }
+ if (!asciiOnlyLine) handleUnicode(unicodeStart);
+ unterminatedString(
+ quoteChar,
+ quoteStart,
+ quoteStart,
+ asciiOnly: asciiOnlyLine,
+ isMultiLine: true,
+ isRaw: true,
+ );
+ return next;
+ }
+
+ int tokenizeMultiLineString(int quoteChar, int quoteStart, bool raw) {
+ if (raw) return tokenizeMultiLineRawString(quoteChar, quoteStart);
+ int start = quoteStart;
+ bool asciiOnlyString = true;
+ bool asciiOnlyLine = true;
+ int unicodeStart = start;
+ int next = advance(); // Advance past the (last) quote (of three).
+ while (next != $EOF) {
+ if (next == $$) {
+ if (!asciiOnlyLine) handleUnicode(unicodeStart);
+ next = tokenizeStringInterpolation(start, asciiOnlyString);
+ start = scanOffset;
+ unicodeStart = start;
+ asciiOnlyString = true; // A new string token is created for the rest.
+ asciiOnlyLine = true;
+ continue;
+ }
+ if (next == quoteChar) {
+ next = advance();
+ if (next == quoteChar) {
+ next = advance();
+ if (next == quoteChar) {
+ if (!asciiOnlyLine) handleUnicode(unicodeStart);
+ next = advance();
+ appendSubstringToken(TokenType.STRING, start, asciiOnlyString);
+ return next;
+ }
+ }
+ continue;
+ }
+ if (next == $BACKSLASH) {
+ next = advance();
+ if (next == $EOF) break;
+ }
+ if (next == $LF) {
+ if (!asciiOnlyLine) {
+ // Synchronize the string offset in the utf8 scanner.
+ handleUnicode(unicodeStart);
+ asciiOnlyLine = true;
+ unicodeStart = scanOffset;
+ }
+ lineFeedInMultiline();
+ } else if (next > 127) {
+ asciiOnlyString = false;
+ asciiOnlyLine = false;
+ }
+ next = advance();
+ }
+ if (!asciiOnlyLine) handleUnicode(unicodeStart);
+ unterminatedString(
+ quoteChar,
+ quoteStart,
+ start,
+ asciiOnly: asciiOnlyString,
+ isMultiLine: true,
+ isRaw: false,
+ );
+ return next;
+ }
+
+ int tokenizeMultiply(int next) {
+ // * *=
+ return select($EQ, TokenType.STAR_EQ, TokenType.STAR);
+ }
+
+ int tokenizeNumber(int next) {
+ int start = scanOffset;
+ bool hasSeparators = false;
+ bool previousWasSeparator = false;
+ while (true) {
+ next = advance();
+ if ($0 <= next && next <= $9) {
+ previousWasSeparator = false;
+ continue;
+ } else if (next == $_) {
+ hasSeparators = true;
+ previousWasSeparator = true;
+ continue;
+ } else if (next == $e || next == $E) {
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ return tokenizeFractionPart(next, start, hasSeparators);
+ } else {
+ if (next == $PERIOD) {
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ int nextnext = peek();
+ if ($0 <= nextnext && nextnext <= $9) {
+ // Use the peeked character.
+ advance();
+ return tokenizeFractionPart(nextnext, start, hasSeparators);
+ } else {
+ TokenType tokenType =
+ hasSeparators ? TokenType.INT_WITH_SEPARATORS : TokenType.INT;
+ appendSubstringToken(tokenType, start, /* asciiOnly = */ true);
+ return next;
+ }
+ }
+ if (previousWasSeparator) {
+ throw "removed";
+ }
+ TokenType tokenType =
+ hasSeparators ? TokenType.INT_WITH_SEPARATORS : TokenType.INT;
+ appendSubstringToken(tokenType, start, /* asciiOnly = */ true);
+ return next;
+ }
+ }
+ }
+
+ int tokenizeOpenSquareBracket(int next) {
+ // [ [] []=
+ next = advance();
+ if (next == $CLOSE_SQUARE_BRACKET) {
+ return select($EQ, TokenType.INDEX_EQ, TokenType.INDEX);
+ }
+ appendBeginGroup(TokenType.OPEN_SQUARE_BRACKET);
+ return next;
+ }
+
+ int tokenizePercent(int next) {
+ // % %=
+ return select($EQ, TokenType.PERCENT_EQ, TokenType.PERCENT);
+ }
+
+ int tokenizePlus(int next) {
+ // + ++ +=
+ next = advance();
+ if ($PLUS == next) {
+ appendPrecedenceToken(TokenType.PLUS_PLUS);
+ return advance();
+ } else if ($EQ == next) {
+ appendPrecedenceToken(TokenType.PLUS_EQ);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.PLUS);
+ return next;
+ }
+ }
+
+ int tokenizeQuestion(int next) {
+ // ? ?. ?.. ?? ??=
+ next = advance();
+ if (next == $QUESTION) {
+ return select(
+ $EQ,
+ TokenType.QUESTION_QUESTION_EQ,
+ TokenType.QUESTION_QUESTION,
+ );
+ } else if (next == $PERIOD) {
+ next = advance();
+ if ($PERIOD == next) {
+ appendPrecedenceToken(TokenType.QUESTION_PERIOD_PERIOD);
+ return advance();
+ }
+ appendPrecedenceToken(TokenType.QUESTION_PERIOD);
+ return next;
+ } else {
+ appendPrecedenceToken(TokenType.QUESTION);
+ return next;
+ }
+ }
+
+ int tokenizeRawStringKeywordOrIdentifier(int next) {
+ // [next] is $r.
+ int nextnext = peek();
+ if (nextnext == $DQ || nextnext == $SQ) {
+ int start = scanOffset;
+ next = advance();
+ return tokenizeString(next, start, /* raw = */ true);
+ }
+ return tokenizeKeywordOrIdentifier(next, /* allowDollar = */ true);
+ }
+
+ int tokenizeSingleLineComment(int next, int start) {
+ next = advance();
+ bool dartdoc = $SLASH == next;
+ return tokenizeSingleLineCommentRest(next, start, dartdoc);
+ }
+
+ int tokenizeSingleLineCommentRest(int next, int start, bool dartdoc) {
+ bool asciiOnly = true;
+ if (next > 127) asciiOnly = false;
+ if ($LF == next || $CR == next || $EOF == next) {
+ _tokenizeSingleLineCommentAppend(asciiOnly, start, dartdoc);
+ return next;
+ }
+ asciiOnly &= scanUntilLineEnd();
+ _tokenizeSingleLineCommentAppend(asciiOnly, start, dartdoc);
+ return current();
+ }
+
+ int tokenizeSingleLineRawString(int next, int quoteChar, int quoteStart) {
+ bool asciiOnly = true;
+ while (next != $EOF) {
+ if (next == quoteChar) {
+ if (!asciiOnly) handleUnicode(quoteStart);
+ next = advance();
+ appendSubstringToken(TokenType.STRING, quoteStart, asciiOnly);
+ return next;
+ } else if (next == $LF || next == $CR) {
+ if (!asciiOnly) handleUnicode(quoteStart);
+ unterminatedString(
+ quoteChar,
+ quoteStart,
+ quoteStart,
+ asciiOnly: asciiOnly,
+ isMultiLine: false,
+ isRaw: true,
+ );
+ return next;
+ } else if (next > 127) {
+ asciiOnly = false;
+ }
+ next = advance();
+ }
+ if (!asciiOnly) handleUnicode(quoteStart);
+ unterminatedString(
+ quoteChar,
+ quoteStart,
+ quoteStart,
+ asciiOnly: asciiOnly,
+ isMultiLine: false,
+ isRaw: true,
+ );
+ return next;
+ }
+
+ /**
+ * [next] is the first character after the quote.
+ * [quoteStart] is the scanOffset of the quote.
+ *
+ * The token contains a substring of the source file, including the
+ * string quotes, backslashes for escaping. For interpolated strings,
+ * the parts before and after are separate tokens.
+ *
+ * "a $b c"
+ *
+ * gives StringToken("a $), StringToken(b) and StringToken( c").
+ */
+ int tokenizeSingleLineString(int next, int quoteChar, int quoteStart) {
+ int start = quoteStart;
+ bool asciiOnly = true;
+ while (next != quoteChar) {
+ if (next == $BACKSLASH) {
+ next = advance();
+ } else if (next == $$) {
+ if (!asciiOnly) handleUnicode(start);
+ next = tokenizeStringInterpolation(start, asciiOnly);
+ start = scanOffset;
+ asciiOnly = true;
+ continue;
+ }
+ if (next <= $CR && (next == $LF || next == $CR || next == $EOF)) {
+ if (!asciiOnly) handleUnicode(start);
+ unterminatedString(
+ quoteChar,
+ quoteStart,
+ start,
+ asciiOnly: asciiOnly,
+ isMultiLine: false,
+ isRaw: false,
+ );
+ return next;
+ }
+ if (next > 127) asciiOnly = false;
+ next = advance();
+ }
+ if (!asciiOnly) handleUnicode(start);
+ // Advance past the quote character.
+ next = advance();
+ appendSubstringToken(TokenType.STRING, start, asciiOnly);
+ return next;
+ }
+
+ int tokenizeSlashOrComment(int next) {
+ int start = scanOffset;
+ next = advance();
+ if ($STAR == next) {
+ return tokenizeMultiLineComment(next, start);
+ } else if ($SLASH == next) {
+ return tokenizeSingleLineComment(next, start);
+ } else if ($EQ == next) {
+ appendPrecedenceToken(TokenType.SLASH_EQ);
+ return advance();
+ } else {
+ appendPrecedenceToken(TokenType.SLASH);
+ return next;
+ }
+ }
+
+ int tokenizeString(int next, int start, bool raw) {
+ int quoteChar = next;
+ next = advance();
+ if (quoteChar == next) {
+ next = advance();
+ if (quoteChar == next) {
+ // Multiline string.
+ return tokenizeMultiLineString(quoteChar, start, raw);
+ } else {
+ // Empty string.
+ appendSubstringToken(TokenType.STRING, start, /* asciiOnly = */ true);
+ return next;
+ }
+ }
+ if (raw) {
+ return tokenizeSingleLineRawString(next, quoteChar, start);
+ } else {
+ return tokenizeSingleLineString(next, quoteChar, start);
+ }
+ }
+
+ int tokenizeStringInterpolation(int start, bool asciiOnly) {
+ appendSubstringToken(TokenType.STRING, start, asciiOnly);
+ beginToken(); // $ starts here.
+ int next = advance();
+ if (next == $OPEN_CURLY_BRACKET) {
+ return tokenizeInterpolatedExpression(next);
+ } else {
+ return tokenizeInterpolatedIdentifier(next);
+ }
+ }
+
+ int tokenizeTag(int next) {
+ // # or #!.*[\n\r]
+ if (scanOffset == 0) {
+ if (peek() == $BANG) {
+ int start = scanOffset;
+ bool asciiOnly = true;
+ do {
+ next = advance();
+ if (next > 127) asciiOnly = false;
+ } while (next != $LF && next != $CR && next != $EOF);
+ if (!asciiOnly) handleUnicode(start);
+ appendSubstringToken(TokenType.SCRIPT_TAG, start, asciiOnly);
+ return next;
+ }
+ }
+ appendPrecedenceToken(TokenType.HASH);
+ return advance();
+ }
+
+ int tokenizeTilde(int next) {
+ // ~ ~/ ~/=
+ next = advance();
+ if (next == $SLASH) {
+ return select($EQ, TokenType.TILDE_SLASH_EQ, TokenType.TILDE_SLASH);
+ } else {
+ appendPrecedenceToken(TokenType.TILDE);
+ return next;
+ }
+ }
+
+ int unexpected(int character) {
+ throw "removed";
+ }
+
+ void unexpectedEof() {
+ throw "removed";
+ }
+
+ void unmatchedBeginGroup(BeginToken begin) {
+ // We want to ensure that unmatched BeginTokens are reported as
+ // errors. However, the diet parser assumes that groups are well-balanced
+ // and will never look at the endGroup token. This is a nice property that
+ // allows us to skip quickly over correct code. By inserting an additional
+ // synthetic token in the stream, we can keep ignoring endGroup tokens.
+ //
+ // [begin] --next--> [tail]
+ // [begin] --endG--> [synthetic] --next--> [next] --next--> [tail]
+ //
+ // This allows the diet parser to skip from [begin] via endGroup to
+ // [synthetic] and ignore the [synthetic] token (assuming it's correct),
+ // then the error will be reported when parsing the [next] token.
+ //
+ // For example, tokenize("{[1};") produces:
+ //
+ // SymbolToken({) --endGroup------------------------+
+ // | |
+ // next |
+ // v |
+ // SymbolToken([) --endGroup--+ |
+ // | | |
+ // next | |
+ // v | |
+ // StringToken(1) | |
+ // | | |
+ // next | |
+ // v | |
+ // SymbolToken(])<------------+ <-- Synthetic token |
+ // | |
+ // next |
+ // v |
+ // UnmatchedToken([) |
+ // | |
+ // next |
+ // v |
+ // SymbolToken(})<----------------------------------+
+ // |
+ // next
+ // v
+ // SymbolToken(;)
+ // |
+ // next
+ // v
+ // EOF
+ TokenType type = closeBraceInfoFor(begin);
+ appendToken(new SyntheticToken(type, tokenStart)..beforeSynthetic = tail);
+ begin.endGroup = tail;
+ throw "removed";
+ }
+
+ void unterminatedString(
+ int quoteChar,
+ int quoteStart,
+ int start, {
+ required bool asciiOnly,
+ required bool isMultiLine,
+ required bool isRaw,
+ }) {
+ String suffix = new String.fromCharCodes(
+ isMultiLine ? [quoteChar, quoteChar, quoteChar] : [quoteChar],
+ );
+ appendSyntheticSubstringToken(TokenType.STRING, start, asciiOnly, suffix);
+ throw "removed";
+ }
+
+ void _appendToCommentStream(CommentToken newComment) {
+ if (comments == null) {
+ comments = newComment;
+ commentsTail = comments;
+ } else {
+ commentsTail!.next = newComment;
+ commentsTail!.next!.previous = commentsTail;
+ commentsTail = commentsTail!.next;
+ }
+ }
+
+ void _tokenizeSingleLineCommentAppend(
+ bool asciiOnly,
+ int start,
+ bool dartdoc,
+ ) {
+ if (!asciiOnly) handleUnicode(start);
+ if (dartdoc) {
+ appendDartDoc(start, TokenType.SINGLE_LINE_COMMENT, asciiOnly);
+ } else {
+ appendComment(start, TokenType.SINGLE_LINE_COMMENT, asciiOnly);
+ }
+ }
+}
+
+class LineStarts extends Object with ListMixin<int> {
+ List<int> array;
+ int arrayLength = 0;
+
+ LineStarts(int numberOfBytesHint)
+ : array = _createInitialArray(numberOfBytesHint) {
+ // The first line starts at character offset 0.
+ add(/* value = */ 0);
+ }
+
+ // Implement abstract members used by [ListMixin]
+
+ @override
+ int get length => arrayLength;
+
+ @override
+ void set length(int newLength) {
+ if (newLength > array.length) {
+ grow(newLength);
+ }
+ arrayLength = newLength;
+ }
+
+ @override
+ int operator [](int index) {
+ assert(index < arrayLength);
+ return array[index];
+ }
+
+ @override
+ void operator []=(int index, int value) {
+ if (value > 65535 && array is! Uint32List) {
+ switchToUint32(array.length);
+ }
+ array[index] = value;
+ }
+
+ // Specialize methods from [ListMixin].
+ @override
+ void add(int value) {
+ if (arrayLength >= array.length) {
+ grow(/* newLengthMinimum = */ 0);
+ }
+ if (value > 65535 && array is! Uint32List) {
+ switchToUint32(array.length);
+ }
+ array[arrayLength++] = value;
+ }
+
+ // Helper methods.
+
+ void grow(int newLengthMinimum) {
+ int newLength = array.length * 2;
+ if (newLength < newLengthMinimum) newLength = newLengthMinimum;
+
+ if (array is Uint16List) {
+ final Uint16List newArray = new Uint16List(newLength);
+ newArray.setRange(/* start = */ 0, arrayLength, array);
+ array = newArray;
+ } else {
+ switchToUint32(newLength);
+ }
+ }
+
+ void switchToUint32(int newLength) {
+ final Uint32List newArray = new Uint32List(newLength);
+ newArray.setRange(/* start = */ 0, arrayLength, array);
+ array = newArray;
+ }
+
+ static List<int> _createInitialArray(int numberOfBytesHint) {
+ // Let's assume we have on average 22 bytes per line.
+ final int expectedNumberOfLines = 1 + (numberOfBytesHint ~/ 22);
+
+ if (numberOfBytesHint > 65535) {
+ return new Uint32List(expectedNumberOfLines);
+ } else {
+ return new Uint16List(expectedNumberOfLines);
+ }
+ }
+}
+
+/// [ScannerConfiguration] contains information for configuring which tokens
+/// the scanner produces based upon the Dart language level.
+class ScannerConfiguration {
+ static const ScannerConfiguration nonNullable = const ScannerConfiguration();
+
+ /// Experimental flag for enabling scanning of `>>>`.
+ /// See https://github.com/dart-lang/language/issues/61
+ /// and https://github.com/dart-lang/language/issues/60
+ final bool enableTripleShift;
+
+ /// If `true`, 'augment' is treated as a built-in identifier.
+ final bool forAugmentationLibrary;
+
+ const ScannerConfiguration({
+ this.enableTripleShift = false,
+ this.forAugmentationLibrary = false,
+ });
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/internal_utils.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/internal_utils.dart
new file mode 100644
index 0000000..b287852
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/internal_utils.dart
@@ -0,0 +1,64 @@
+import 'characters.dart' show $$, $0, $9, $A, $Z, $_, $a, $z;
+
+@pragma("vm:prefer-inline")
+bool isIdentifierChar(int next, bool allowDollar) {
+ return ($a <= next && next <= $z) ||
+ ($A <= next && next <= $Z) ||
+ ($0 <= next && next <= $9) ||
+ next == $_ ||
+ (next == $$ && allowDollar);
+}
+
+/// Checks if the character [next] is an identifier character (allowing the
+/// dollar sign) using a table lookup, utilizing the fact that the input is from
+/// a Uint8List and therefore between 0 and 255.
+/// It is the callers responsibility to ensure that this is the case.
+// DartDocTest(() {
+// for (int i = 0; i < 256; i++) {
+// if (isIdentifierCharAllowDollarTableLookup(i) !=
+// isIdentifierChar(i, true)) {
+// return false;
+// }
+// }
+// return true;
+// }(), true);
+@pragma("vm:prefer-inline")
+bool isIdentifierCharAllowDollarTableLookup(int next) {
+ const List<bool> table = [
+ // format hack.
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, true, false, false, false,
+ false, false, false, false, false, false, false, false,
+ true, true, true, true, true, true, true, true,
+ true, true, false, false, false, false, false, false,
+ false, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, false, false, false, false, true,
+ false, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, true, true, true, true, true,
+ true, true, true, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ false, false, false, false, false, false, false, false,
+ // format hack.
+ ];
+ return table[next];
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/interner.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/interner.dart
new file mode 100644
index 0000000..9f248c9
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/interner.dart
@@ -0,0 +1,24 @@
+// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+/**
+ * The interface `Interner` defines the behavior of objects that can intern
+ * strings.
+ */
+abstract class Interner {
+ /**
+ * Return a string that is identical to all of the other strings that have
+ * been interned that are equal to the given [string].
+ */
+ String intern(String string);
+}
+
+/**
+ * The class `NullInterner` implements an interner that does nothing (does not
+ * actually intern any strings).
+ */
+class NullInterner implements Interner {
+ @override
+ String intern(String string) => string;
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/keyword_state.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/keyword_state.dart
new file mode 100644
index 0000000..201434f
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/keyword_state.dart
@@ -0,0 +1,73 @@
+// Copyright (c) 2025, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:typed_data';
+
+import 'characters.dart';
+import 'token.dart';
+
+final class KeywordStateHelper {
+ static Uint16List? _table;
+ static KeywordState get table {
+ if (_table == null) {
+ // This is a fixed calculation, though if creating more keywords this
+ // number of (double) bytes might have to change.
+ Uint16List table = _table = new Uint16List(297 * KeywordState.blockSize);
+ int nextEmpty = 2 * KeywordState.blockSize;
+ for (int i = 0; i < Keyword.values.length; i++) {
+ Keyword keyword = Keyword.values[i];
+ String lexeme = keyword.lexeme;
+ // At this point we're looking at the $blockSize bytes
+ // $blockSize->(2 * $blockSize + 1).
+ // The first blockSize bytes (0->($blockSize-1)) are all 0s,
+ // being the "null leaf".
+ int offset = KeywordState.blockSize;
+ // For an offset, the 0'th byte is a link to the keyword
+ // (+1, so 0 means no keyword) and the remaining 58 spots are table
+ // entries for codeUnit - $A.
+ for (int j = 0; j < lexeme.length; j++) {
+ int charOffset = lexeme.codeUnitAt(j) - $A;
+ int link = table[offset + 1 + charOffset];
+ if (link == 0) {
+ // New one
+ table[offset + 1 + charOffset] = nextEmpty;
+ offset = nextEmpty;
+ nextEmpty += KeywordState.blockSize;
+ } else {
+ // Existing one.
+ offset = link;
+ }
+ }
+ // this offsets position 0 points to the i+1'th keyword.
+ table[offset + 0] = i + 1;
+ }
+ assert(nextEmpty == table.length);
+ }
+ return new KeywordState._(KeywordState.blockSize);
+ }
+}
+
+extension type KeywordState._(int _offset) {
+ static const int blockSize = 59;
+
+ @pragma("vm:prefer-inline")
+ bool get isNull => _offset == 0;
+
+ @pragma("vm:prefer-inline")
+ Keyword? get keyword {
+ // The 0'th index at the offset.
+ int keywordIndexPlusOne = KeywordStateHelper._table![_offset];
+ if (keywordIndexPlusOne == 0) return null;
+ return Keyword.values[keywordIndexPlusOne - 1];
+ }
+
+ @pragma("vm:prefer-inline")
+ KeywordState next(int next) {
+ // The entry for next starts with A at index offset + 1 because offset + 0
+ // is the (possible) keyword.
+ return new KeywordState._(
+ KeywordStateHelper._table![_offset + next - $A + 1],
+ );
+ }
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link.dart
new file mode 100644
index 0000000..6ba6499
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link.dart
@@ -0,0 +1,214 @@
+// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+library _fe_analyzer_shared.util.link;
+
+import 'link_implementation.dart'
+ show LinkBuilderImplementation, LinkEntry, LinkIterator, MappedLinkIterable;
+
+class Link<T> implements Iterable<T> {
+ const Link();
+ // TODO(ahe): Remove this method?
+ @override
+ T get first {
+ if (isEmpty) throw new StateError('No elements');
+ return head;
+ }
+
+ @override
+ int get hashCode => throw new UnsupportedError('Link.hashCode');
+
+ T get head => throw new StateError("no elements");
+
+ @override
+ bool get isEmpty => true;
+
+ @override
+ bool get isNotEmpty => false;
+
+ @override
+ Iterator<T> get iterator => new LinkIterator<T>(this);
+
+ @override
+ T get last => _unsupported('get:last');
+
+ @override
+ get length {
+ throw new UnsupportedError('get:length');
+ }
+
+ // TODO(ahe): Remove this method?
+ @override
+ T get single {
+ if (isEmpty) throw new StateError('No elements');
+ if (!tail!.isEmpty) throw new StateError('More than one element');
+ return head;
+ }
+
+ Link<T>? get tail => null;
+
+ @override
+ bool operator ==(other) {
+ if (other is! Link<T>) return false;
+ return other.isEmpty;
+ }
+
+ //
+ // Unsupported Iterable<T> methods.
+ //
+ @override
+ bool any(bool f(T e)) => _unsupported('any');
+
+ @override
+ Iterable<T> cast<T>() => _unsupported('cast');
+
+ // TODO(ahe): Remove this method?
+ @override
+ bool contains(Object? element) {
+ for (Link<T> link = this; !link.isEmpty; link = link.tail!) {
+ if (link.head == element) return true;
+ }
+ return false;
+ }
+
+ @override
+ T elementAt(int i) => _unsupported('elementAt');
+
+ /// Returns true if f returns true for all elements of this list.
+ ///
+ /// Returns true for the empty list.
+ @override
+ bool every(bool f(T e)) {
+ for (Link<T> link = this; !link.isEmpty; link = link.tail!) {
+ if (!f(link.head)) return false;
+ }
+ return true;
+ }
+
+ @override
+ Iterable<K> expand<K>(Iterable<K> f(T e)) => _unsupported('expand');
+
+ @override
+ T firstWhere(bool f(T e), {T orElse()?}) => _unsupported('firstWhere');
+
+ @override
+ K fold<K>(K initialValue, K combine(K value, T element)) {
+ return _unsupported('fold');
+ }
+
+ @override
+ Iterable<T> followedBy(Iterable<T> other) => _unsupported('followedBy');
+
+ @override
+ void forEach(void f(T element)) {}
+
+ @override
+ String join([separator = '']) => _unsupported('join');
+
+ @override
+ T lastWhere(bool f(T e), {T orElse()?}) => _unsupported('lastWhere');
+
+ /// Lazily maps over this linked list, returning an [Iterable].
+ @override
+ Iterable<K> map<K>(K fn(T item)) {
+ return new MappedLinkIterable<T, K>(this, fn);
+ }
+
+ /// Invokes `fn` for every item in the linked list and returns the results
+ /// in a [List].
+ /// TODO(scheglov) Rewrite to `List<E>`, or remove.
+ List<E?> mapToList<E>(E fn(T item), {bool growable = true}) {
+ List<E?> result;
+ if (!growable) {
+ result = new List<E?>.filled(slowLength(), null);
+ } else {
+ result = <E?>[];
+ result.length = slowLength();
+ }
+ int i = 0;
+ for (Link<T> link = this; !link.isEmpty; link = link.tail!) {
+ result[i++] = fn(link.head);
+ }
+ return result;
+ }
+
+ Link<T> prepend(T element) {
+ return new LinkEntry<T>(element, this);
+ }
+
+ void printOn(StringBuffer buffer, [separatedBy]) {}
+ @override
+ T reduce(T combine(T a, T b)) => _unsupported('reduce');
+ Iterable<T> retype<T>() => _unsupported('retype');
+
+ Link<T> reverse(Link<T> tail) => this;
+ Link<T> reversePrependAll(Link<T> from) {
+ if (from.isEmpty) return this;
+ return this.prepend(from.head).reversePrependAll(from.tail!);
+ }
+
+ @override
+ T singleWhere(bool f(T e), {T orElse()?}) => _unsupported('singleWhere');
+ @override
+ Link<T> skip(int n) {
+ if (n == 0) return this;
+ throw new RangeError('Index $n out of range');
+ }
+
+ @override
+ Iterable<T> skipWhile(bool f(T e)) => _unsupported('skipWhile');
+ int slowLength() => 0;
+ @override
+ Iterable<T> take(int n) => _unsupported('take');
+ @override
+ Iterable<T> takeWhile(bool f(T e)) => _unsupported('takeWhile');
+ @override
+ List<T> toList({bool growable = true}) {
+ List<T> result = <T>[];
+ for (Link<T> link = this; !link.isEmpty; link = link.tail!) {
+ result.add(link.head);
+ }
+ return result;
+ }
+
+ @override
+ Set<T> toSet() => _unsupported('toSet');
+ @override
+ String toString() => "[]";
+ @override
+ Iterable<T> where(bool f(T e)) => _unsupported('where');
+ @override
+ Iterable<T> whereType<T>() => _unsupported('whereType');
+
+ Never _unsupported(String method) => throw new UnsupportedError(method);
+}
+
+/// Builder object for creating linked lists using [Link] or fixed-length [List]
+/// objects.
+abstract class LinkBuilder<T> {
+ factory LinkBuilder() = LinkBuilderImplementation<T>;
+
+ /// Returns the first element in the list being built.
+ T get first;
+
+ /// Returns `true` if the list being built is empty.
+ bool get isEmpty;
+
+ /// Returns the number of elements in the list being built.
+ int get length;
+
+ /// Adds the element [t] to the end of the list being built.
+ Link<T> addLast(T t);
+
+ /// Removes all added elements and resets the builder.
+ void clear();
+
+ /// Prepends all elements added to the builder to [tail]. The resulting list
+ /// is returned and the builder is cleared.
+ Link<T> toLink(Link<T> tail);
+
+ /// Creates a new fixed length containing all added elements. The
+ /// resulting list is returned and the builder is cleared.
+ List<T> toList();
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link_implementation.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link_implementation.dart
new file mode 100644
index 0000000..1132fb0
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/link_implementation.dart
@@ -0,0 +1,237 @@
+// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+library _fe_analyzer_shared.util.link_implementation;
+
+import 'dart:collection' show IterableBase;
+
+import 'link.dart' show Link, LinkBuilder;
+
+typedef T Transformation<S, T>(S input);
+
+class LinkBuilderImplementation<T> implements LinkBuilder<T> {
+ LinkEntry<T>? head = null;
+ LinkEntry<T>? lastLink = null;
+ @override
+ int length = 0;
+
+ LinkBuilderImplementation();
+
+ @override
+ T get first {
+ if (head != null) {
+ return head!.head;
+ }
+ throw new StateError("no elements");
+ }
+
+ @override
+ bool get isEmpty => length == 0;
+
+ @override
+ Link<T> addLast(T t) {
+ length++;
+ LinkEntry<T> entry = new LinkEntry<T>(t, null);
+ if (head == null) {
+ head = entry;
+ } else {
+ lastLink!.tail = entry;
+ }
+ lastLink = entry;
+ return entry;
+ }
+
+ @override
+ void clear() {
+ head = null;
+ lastLink = null;
+ length = 0;
+ }
+
+ @override
+ Link<T> toLink(Link<T> tail) {
+ if (head == null) return tail;
+ lastLink!.tail = tail;
+ Link<T> link = head!;
+ lastLink = null;
+ head = null;
+ length = 0;
+ return link;
+ }
+
+ @override
+ List<T> toList() {
+ if (length == 0) return <T>[];
+
+ List<T> list = <T>[];
+ Link<T> link = head!;
+ while (link.isNotEmpty) {
+ list.add(link.head);
+ link = link.tail!;
+ }
+ lastLink = null;
+ head = null;
+ length = 0;
+ return list;
+ }
+}
+
+class LinkEntry<T> extends Link<T> {
+ @override
+ final T head;
+ @override
+ Link<T> tail;
+
+ LinkEntry(this.head, [Link<T>? tail]) : tail = tail ?? const Link<Never>();
+
+ @override
+ int get hashCode => throw new UnsupportedError('LinkEntry.hashCode');
+
+ @override
+ bool get isEmpty => false;
+
+ @override
+ bool get isNotEmpty => true;
+
+ @override
+ bool operator ==(other) {
+ if (other is! Link<T>) return false;
+ Link<T> myElements = this;
+ Link<T> otherElements = other;
+ while (myElements.isNotEmpty && otherElements.isNotEmpty) {
+ if (myElements.head != otherElements.head) {
+ return false;
+ }
+ myElements = myElements.tail!;
+ otherElements = otherElements.tail!;
+ }
+ return myElements.isEmpty && otherElements.isEmpty;
+ }
+
+ @override
+ void forEach(void f(T element)) {
+ for (Link<T> link = this; link.isNotEmpty; link = link.tail!) {
+ f(link.head);
+ }
+ }
+
+ @override
+ Link<T> prepend(T element) {
+ // TODO(ahe): Use new Link<T>, but this cost 8% performance on VM.
+ return new LinkEntry<T>(element, this);
+ }
+
+ @override
+ void printOn(StringBuffer buffer, [separatedBy]) {
+ buffer.write(head);
+ if (separatedBy == null) separatedBy = '';
+ for (Link<T> link = tail; link.isNotEmpty; link = link.tail!) {
+ buffer.write(separatedBy);
+ buffer.write(link.head);
+ }
+ }
+
+ @override
+ Link<T> reverse(Link<T> tail) {
+ Link<T> result = tail;
+ for (Link<T> link = this; link.isNotEmpty; link = link.tail!) {
+ result = result.prepend(link.head);
+ }
+ return result;
+ }
+
+ @override
+ Link<T> reversePrependAll(Link<T> from) {
+ Link<T> result;
+ for (result = this; from.isNotEmpty; from = from.tail!) {
+ result = result.prepend(from.head);
+ }
+ return result;
+ }
+
+ @override
+ Link<T> skip(int n) {
+ Link<T> link = this;
+ for (int i = 0; i < n; i++) {
+ if (link.isEmpty) {
+ throw new RangeError('Index $n out of range');
+ }
+ link = link.tail!;
+ }
+ return link;
+ }
+
+ @override
+ int slowLength() {
+ int length = 0;
+ for (Link<T> current = this; current.isNotEmpty; current = current.tail!) {
+ ++length;
+ }
+ return length;
+ }
+
+ @override
+ String toString() {
+ StringBuffer buffer = new StringBuffer();
+ buffer.write('[ ');
+ printOn(buffer, ', ');
+ buffer.write(' ]');
+ return buffer.toString();
+ }
+}
+
+class LinkIterator<T> implements Iterator<T> {
+ T? _current;
+ Link<T> _link;
+
+ LinkIterator(this._link);
+
+ @override
+ T get current => _current!;
+
+ @override
+ bool moveNext() {
+ if (_link.isEmpty) {
+ _current = null;
+ return false;
+ }
+ _current = _link.head;
+ _link = _link.tail!;
+ return true;
+ }
+}
+
+class MappedLinkIterable<S, T> extends IterableBase<T> {
+ Transformation<S, T> _transformation;
+ Link<S> _link;
+
+ MappedLinkIterable(this._link, this._transformation);
+
+ @override
+ Iterator<T> get iterator {
+ return new MappedLinkIterator<S, T>(_link, _transformation);
+ }
+}
+
+class MappedLinkIterator<S, T> implements Iterator<T> {
+ Transformation<S, T> _transformation;
+ Link<S> _link;
+ T? _current;
+
+ MappedLinkIterator(this._link, this._transformation);
+
+ @override
+ T get current => _current!;
+
+ @override
+ bool moveNext() {
+ if (_link.isEmpty) {
+ _current = null;
+ return false;
+ }
+ _current = _transformation(_link.head);
+ _link = _link.tail!;
+ return true;
+ }
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/string_utilities.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/string_utilities.dart
new file mode 100644
index 0000000..3b5540b
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/string_utilities.dart
@@ -0,0 +1,11 @@
+// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'interner.dart';
+
+class StringUtilities {
+ static Interner INTERNER = new NullInterner();
+
+ static String intern(String string) => INTERNER.intern(string);
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/syntactic_entity.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/syntactic_entity.dart
new file mode 100644
index 0000000..4a51ebe
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/syntactic_entity.dart
@@ -0,0 +1,22 @@
+/**
+ * Interface representing a syntactic entity (either a token or an AST node)
+ * which has a location and extent in the source file.
+ */
+abstract class SyntacticEntity {
+ /**
+ * Return the offset from the beginning of the file to the character after the
+ * last character of the syntactic entity.
+ */
+ int get end;
+
+ /**
+ * Return the number of characters in the syntactic entity's source range.
+ */
+ int get length;
+
+ /**
+ * Return the offset from the beginning of the file to the first character in
+ * the syntactic entity.
+ */
+ int get offset;
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token.dart
new file mode 100644
index 0000000..c319da2
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token.dart
@@ -0,0 +1,2920 @@
+// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+/**
+ * Defines the tokens that are produced by the scanner, used by the parser, and
+ * referenced from the [AST structure](ast.dart).
+ */
+import 'dart:collection';
+
+import 'string_utilities.dart';
+import 'syntactic_entity.dart';
+import 'token_constants.dart';
+
+const int ADDITIVE_PRECEDENCE = 13;
+const int ASSIGNMENT_PRECEDENCE = 1;
+const int BITWISE_AND_PRECEDENCE = 11;
+const int BITWISE_OR_PRECEDENCE = 9;
+const int BITWISE_XOR_PRECEDENCE = 10;
+const int CASCADE_PRECEDENCE = 2;
+const int CONDITIONAL_PRECEDENCE = 3;
+const int EQUALITY_PRECEDENCE = 7;
+const int IF_NULL_PRECEDENCE = 4;
+const int LOGICAL_AND_PRECEDENCE = 6;
+const int LOGICAL_OR_PRECEDENCE = 5;
+const int MULTIPLICATIVE_PRECEDENCE = 14;
+const int NO_PRECEDENCE = 0;
+const int POSTFIX_PRECEDENCE = 16;
+const int PREFIX_PRECEDENCE = 15;
+const int RELATIONAL_PRECEDENCE = 8;
+const int SELECTOR_PRECEDENCE = 17;
+const int SHIFT_PRECEDENCE = 12;
+
+/// Constant list of [TokenType] and [Keyword] ordered by index.
+///
+/// This list should always have length 256 to avoid bounds checks in
+/// SimpleToken.type:
+/// DartDocTest(_tokenTypesByIndex.length, 256)
+const List<TokenType> _tokenTypesByIndex = [
+ TokenType.EOF,
+ TokenType.DOUBLE,
+ TokenType.DOUBLE_WITH_SEPARATORS,
+ TokenType.HEXADECIMAL,
+ TokenType.HEXADECIMAL_WITH_SEPARATORS,
+ TokenType.IDENTIFIER,
+ TokenType.INT,
+ TokenType.INT_WITH_SEPARATORS,
+ TokenType.MULTI_LINE_COMMENT,
+ TokenType.SCRIPT_TAG,
+ TokenType.SINGLE_LINE_COMMENT,
+ TokenType.STRING,
+ TokenType.AMPERSAND,
+ TokenType.AMPERSAND_AMPERSAND,
+ TokenType.AMPERSAND_AMPERSAND_EQ,
+ TokenType.AMPERSAND_EQ,
+ TokenType.AT,
+ TokenType.BANG,
+ TokenType.BANG_EQ,
+ TokenType.BANG_EQ_EQ,
+ TokenType.BAR,
+ TokenType.BAR_BAR,
+ TokenType.BAR_BAR_EQ,
+ TokenType.BAR_EQ,
+ TokenType.COLON,
+ TokenType.COMMA,
+ TokenType.CARET,
+ TokenType.CARET_EQ,
+ TokenType.CLOSE_CURLY_BRACKET,
+ TokenType.CLOSE_PAREN,
+ TokenType.CLOSE_SQUARE_BRACKET,
+ TokenType.EQ,
+ TokenType.EQ_EQ,
+ TokenType.EQ_EQ_EQ,
+ TokenType.FUNCTION,
+ TokenType.GT,
+ TokenType.GT_EQ,
+ TokenType.GT_GT,
+ TokenType.GT_GT_EQ,
+ TokenType.GT_GT_GT,
+ TokenType.GT_GT_GT_EQ,
+ TokenType.HASH,
+ TokenType.INDEX,
+ TokenType.INDEX_EQ,
+ TokenType.LT,
+ TokenType.LT_EQ,
+ TokenType.LT_LT,
+ TokenType.LT_LT_EQ,
+ TokenType.MINUS,
+ TokenType.MINUS_EQ,
+ TokenType.MINUS_MINUS,
+ TokenType.OPEN_CURLY_BRACKET,
+ TokenType.OPEN_PAREN,
+ TokenType.OPEN_SQUARE_BRACKET,
+ TokenType.PERCENT,
+ TokenType.PERCENT_EQ,
+ TokenType.PERIOD,
+ TokenType.PERIOD_PERIOD,
+ TokenType.PLUS,
+ TokenType.PLUS_EQ,
+ TokenType.PLUS_PLUS,
+ TokenType.QUESTION,
+ TokenType.QUESTION_PERIOD,
+ TokenType.QUESTION_QUESTION,
+ TokenType.QUESTION_QUESTION_EQ,
+ TokenType.SEMICOLON,
+ TokenType.SLASH,
+ TokenType.SLASH_EQ,
+ TokenType.STAR,
+ TokenType.STAR_EQ,
+ TokenType.STRING_INTERPOLATION_EXPRESSION,
+ TokenType.STRING_INTERPOLATION_IDENTIFIER,
+ TokenType.TILDE,
+ TokenType.TILDE_SLASH,
+ TokenType.TILDE_SLASH_EQ,
+ TokenType.BACKPING,
+ TokenType.BACKSLASH,
+ TokenType.PERIOD_PERIOD_PERIOD,
+ TokenType.PERIOD_PERIOD_PERIOD_QUESTION,
+ TokenType.QUESTION_PERIOD_PERIOD,
+ TokenType.BAD_INPUT,
+ TokenType.RECOVERY,
+ Keyword.ABSTRACT,
+ Keyword.AS,
+ Keyword.ASSERT,
+ Keyword.ASYNC,
+ Keyword.AUGMENT,
+ Keyword.AWAIT,
+ Keyword.BASE,
+ Keyword.BREAK,
+ Keyword.CASE,
+ Keyword.CATCH,
+ Keyword.CLASS,
+ Keyword.CONST,
+ Keyword.CONTINUE,
+ Keyword.COVARIANT,
+ Keyword.DEFAULT,
+ Keyword.DEFERRED,
+ Keyword.DO,
+ Keyword.DYNAMIC,
+ Keyword.ELSE,
+ Keyword.ENUM,
+ Keyword.EXPORT,
+ Keyword.EXTENDS,
+ Keyword.EXTENSION,
+ Keyword.EXTERNAL,
+ Keyword.FACTORY,
+ Keyword.FALSE,
+ Keyword.FINAL,
+ Keyword.FINALLY,
+ Keyword.FOR,
+ Keyword.FUNCTION,
+ Keyword.GET,
+ Keyword.HIDE,
+ Keyword.IF,
+ Keyword.IMPLEMENTS,
+ Keyword.IMPORT,
+ Keyword.IN,
+ Keyword.INOUT,
+ Keyword.INTERFACE,
+ Keyword.IS,
+ Keyword.LATE,
+ Keyword.LIBRARY,
+ Keyword.MIXIN,
+ Keyword.NATIVE,
+ Keyword.NEW,
+ Keyword.NULL,
+ Keyword.OF,
+ Keyword.ON,
+ Keyword.OPERATOR,
+ Keyword.OUT,
+ Keyword.PART,
+ Keyword.PATCH,
+ Keyword.REQUIRED,
+ Keyword.RETHROW,
+ Keyword.RETURN,
+ Keyword.SEALED,
+ Keyword.SET,
+ Keyword.SHOW,
+ Keyword.SOURCE,
+ Keyword.STATIC,
+ Keyword.SUPER,
+ Keyword.SWITCH,
+ Keyword.SYNC,
+ Keyword.THIS,
+ Keyword.THROW,
+ Keyword.TRUE,
+ Keyword.TRY,
+ Keyword.TYPEDEF,
+ Keyword.VAR,
+ Keyword.VOID,
+ Keyword.WHEN,
+ Keyword.WHILE,
+ Keyword.WITH,
+ Keyword.YIELD,
+ // Fill to length 256 to avoid bounds check in SimpleToken.type.
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+ TokenType.UNUSED,
+];
+
+/**
+ * The opening half of a grouping pair of tokens. This is used for curly
+ * brackets ('{'), parentheses ('('), and square brackets ('[').
+ */
+class BeginToken extends SimpleToken {
+ /**
+ * The token that corresponds to this token.
+ */
+ Token? endToken;
+
+ /**
+ * Initialize a newly created token to have the given [type] at the given
+ * [offset].
+ */
+ BeginToken(TokenType type, int offset, [CommentToken? precedingComment])
+ : super(type, offset, precedingComment) {
+ assert(
+ type == TokenType.LT ||
+ type == TokenType.OPEN_CURLY_BRACKET ||
+ type == TokenType.OPEN_PAREN ||
+ type == TokenType.OPEN_SQUARE_BRACKET ||
+ type == TokenType.STRING_INTERPOLATION_EXPRESSION,
+ );
+ }
+
+ @override
+ Token? get endGroup => endToken;
+
+ /**
+ * Set the token that corresponds to this token.
+ */
+ set endGroup(Token? token) {
+ endToken = token;
+ }
+}
+
+/**
+ * A token representing a comment.
+ */
+class CommentToken extends StringToken {
+ /**
+ * The token that contains this comment.
+ */
+ SimpleToken? parent;
+
+ /**
+ * Initialize a newly created token to represent a token of the given [type]
+ * with the given [value] at the given [offset].
+ */
+ CommentToken(super.type, super.value, super.offset);
+}
+
+/**
+ * A documentation comment token.
+ */
+class DocumentationCommentToken extends CommentToken {
+ /**
+ * Initialize a newly created token to represent a token of the given [type]
+ * with the given [value] at the given [offset].
+ */
+ DocumentationCommentToken(super.type, super.value, super.offset);
+}
+
+/**
+ * The keywords in the Dart programming language.
+ *
+ * Clients may not extend, implement or mix-in this class.
+ */
+class Keyword extends TokenType {
+ static const Keyword ABSTRACT = const Keyword(
+ /* index = */ 82,
+ "abstract",
+ "ABSTRACT",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword AS = const Keyword(
+ /* index = */ 83,
+ "as",
+ "AS",
+ KeywordStyle.builtIn,
+ precedence: RELATIONAL_PRECEDENCE,
+ );
+
+ static const Keyword ASSERT = const Keyword(
+ /* index = */ 84,
+ "assert",
+ "ASSERT",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword ASYNC = const Keyword(
+ /* index = */ 85,
+ "async",
+ "ASYNC",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword AUGMENT = const Keyword(
+ /* index = */ 86,
+ "augment",
+ "AUGMENT",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword AWAIT = const Keyword(
+ /* index = */ 87,
+ "await",
+ "AWAIT",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword BASE = const Keyword(
+ /* index = */ 88,
+ "base",
+ "BASE",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword BREAK = const Keyword(
+ /* index = */ 89,
+ "break",
+ "BREAK",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword CASE = const Keyword(
+ /* index = */ 90,
+ "case",
+ "CASE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword CATCH = const Keyword(
+ /* index = */ 91,
+ "catch",
+ "CATCH",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword CLASS = const Keyword(
+ /* index = */ 92,
+ "class",
+ "CLASS",
+ KeywordStyle.reserved,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword CONST = const Keyword(
+ /* index = */ 93,
+ "const",
+ "CONST",
+ KeywordStyle.reserved,
+ isModifier: true,
+ );
+
+ static const Keyword CONTINUE = const Keyword(
+ /* index = */ 94,
+ "continue",
+ "CONTINUE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword COVARIANT = const Keyword(
+ /* index = */ 95,
+ "covariant",
+ "COVARIANT",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword DEFAULT = const Keyword(
+ /* index = */ 96,
+ "default",
+ "DEFAULT",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword DEFERRED = const Keyword(
+ /* index = */ 97,
+ "deferred",
+ "DEFERRED",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword DO = const Keyword(
+ /* index = */ 98,
+ "do",
+ "DO",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword DYNAMIC = const Keyword(
+ /* index = */ 99,
+ "dynamic",
+ "DYNAMIC",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword ELSE = const Keyword(
+ /* index = */ 100,
+ "else",
+ "ELSE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword ENUM = const Keyword(
+ /* index = */ 101,
+ "enum",
+ "ENUM",
+ KeywordStyle.reserved,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword EXPORT = const Keyword(
+ /* index = */ 102,
+ "export",
+ "EXPORT",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword EXTENDS = const Keyword(
+ /* index = */ 103,
+ "extends",
+ "EXTENDS",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword EXTENSION = const Keyword(
+ /* index = */ 104,
+ "extension",
+ "EXTENSION",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword EXTERNAL = const Keyword(
+ /* index = */ 105,
+ "external",
+ "EXTERNAL",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword FACTORY = const Keyword(
+ /* index = */ 106,
+ "factory",
+ "FACTORY",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword FALSE = const Keyword(
+ /* index = */ 107,
+ "false",
+ "FALSE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword FINAL = const Keyword(
+ /* index = */ 108,
+ "final",
+ "FINAL",
+ KeywordStyle.reserved,
+ isModifier: true,
+ );
+
+ static const Keyword FINALLY = const Keyword(
+ /* index = */ 109,
+ "finally",
+ "FINALLY",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword FOR = const Keyword(
+ /* index = */ 110,
+ "for",
+ "FOR",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword FUNCTION = const Keyword(
+ /* index = */ 111,
+ "Function",
+ "FUNCTION",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword GET = const Keyword(
+ /* index = */ 112,
+ "get",
+ "GET",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword HIDE = const Keyword(
+ /* index = */ 113,
+ "hide",
+ "HIDE",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword IF = const Keyword(
+ /* index = */ 114,
+ "if",
+ "IF",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword IMPLEMENTS = const Keyword(
+ /* index = */ 115,
+ "implements",
+ "IMPLEMENTS",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword IMPORT = const Keyword(
+ /* index = */ 116,
+ "import",
+ "IMPORT",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword IN = const Keyword(
+ /* index = */ 117,
+ "in",
+ "IN",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword INOUT = const Keyword(
+ /* index = */ 118,
+ "inout",
+ "INOUT",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword INTERFACE = const Keyword(
+ /* index = */ 119,
+ "interface",
+ "INTERFACE",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword IS = const Keyword(
+ /* index = */ 120,
+ "is",
+ "IS",
+ KeywordStyle.reserved,
+ precedence: RELATIONAL_PRECEDENCE,
+ );
+
+ static const Keyword LATE = const Keyword(
+ /* index = */ 121,
+ "late",
+ "LATE",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword LIBRARY = const Keyword(
+ /* index = */ 122,
+ "library",
+ "LIBRARY",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword MIXIN = const Keyword(
+ /* index = */ 123,
+ "mixin",
+ "MIXIN",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword NATIVE = const Keyword(
+ /* index = */ 124,
+ "native",
+ "NATIVE",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword NEW = const Keyword(
+ /* index = */ 125,
+ "new",
+ "NEW",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword NULL = const Keyword(
+ /* index = */ 126,
+ "null",
+ "NULL",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword OF = const Keyword(
+ /* index = */ 127,
+ "of",
+ "OF",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword ON = const Keyword(
+ /* index = */ 128,
+ "on",
+ "ON",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword OPERATOR = const Keyword(
+ /* index = */ 129,
+ "operator",
+ "OPERATOR",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword OUT = const Keyword(
+ /* index = */ 130,
+ "out",
+ "OUT",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword PART = const Keyword(
+ /* index = */ 131,
+ "part",
+ "PART",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword PATCH = const Keyword(
+ /* index = */ 132,
+ "patch",
+ "PATCH",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword REQUIRED = const Keyword(
+ /* index = */ 133,
+ "required",
+ "REQUIRED",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword RETHROW = const Keyword(
+ /* index = */ 134,
+ "rethrow",
+ "RETHROW",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword RETURN = const Keyword(
+ /* index = */ 135,
+ "return",
+ "RETURN",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword SEALED = const Keyword(
+ /* index = */ 136,
+ "sealed",
+ "SEALED",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword SET = const Keyword(
+ /* index = */ 137,
+ "set",
+ "SET",
+ KeywordStyle.builtIn,
+ );
+
+ static const Keyword SHOW = const Keyword(
+ /* index = */ 138,
+ "show",
+ "SHOW",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword SOURCE = const Keyword(
+ /* index = */ 139,
+ "source",
+ "SOURCE",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword STATIC = const Keyword(
+ /* index = */ 140,
+ "static",
+ "STATIC",
+ KeywordStyle.builtIn,
+ isModifier: true,
+ );
+
+ static const Keyword SUPER = const Keyword(
+ /* index = */ 141,
+ "super",
+ "SUPER",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword SWITCH = const Keyword(
+ /* index = */ 142,
+ "switch",
+ "SWITCH",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword SYNC = const Keyword(
+ /* index = */ 143,
+ "sync",
+ "SYNC",
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword THIS = const Keyword(
+ /* index = */ 144,
+ "this",
+ "THIS",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword THROW = const Keyword(
+ /* index = */ 145,
+ "throw",
+ "THROW",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword TRUE = const Keyword(
+ /* index = */ 146,
+ "true",
+ "TRUE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword TRY = const Keyword(
+ /* index = */ 147,
+ "try",
+ "TRY",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword TYPEDEF = const Keyword(
+ /* index = */ 148,
+ "typedef",
+ "TYPEDEF",
+ KeywordStyle.builtIn,
+ isTopLevelKeyword: true,
+ );
+
+ static const Keyword VAR = const Keyword(
+ /* index = */ 149,
+ "var",
+ "VAR",
+ KeywordStyle.reserved,
+ isModifier: true,
+ );
+
+ static const Keyword VOID = const Keyword(
+ /* index = */ 150,
+ "void",
+ "VOID",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword WHEN = const Keyword(
+ /* index = */ 151,
+ "when",
+ 'WHEN',
+ KeywordStyle.pseudo,
+ );
+
+ static const Keyword WHILE = const Keyword(
+ /* index = */ 152,
+ "while",
+ "WHILE",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword WITH = const Keyword(
+ /* index = */ 153,
+ "with",
+ "WITH",
+ KeywordStyle.reserved,
+ );
+
+ static const Keyword YIELD = const Keyword(
+ /* index = */ 154,
+ "yield",
+ "YIELD",
+ KeywordStyle.pseudo,
+ );
+
+ static const List<Keyword> values = const <Keyword>[
+ ABSTRACT,
+ AS,
+ ASSERT,
+ ASYNC,
+ AUGMENT,
+ AWAIT,
+ BASE,
+ BREAK,
+ CASE,
+ CATCH,
+ CLASS,
+ CONST,
+ CONTINUE,
+ COVARIANT,
+ DEFAULT,
+ DEFERRED,
+ DO,
+ DYNAMIC,
+ ELSE,
+ ENUM,
+ EXPORT,
+ EXTENDS,
+ EXTENSION,
+ EXTERNAL,
+ FACTORY,
+ FALSE,
+ FINAL,
+ FINALLY,
+ FOR,
+ FUNCTION,
+ GET,
+ HIDE,
+ IF,
+ IMPLEMENTS,
+ IMPORT,
+ IN,
+ INOUT,
+ INTERFACE,
+ IS,
+ LATE,
+ LIBRARY,
+ MIXIN,
+ NATIVE,
+ NEW,
+ NULL,
+ OF,
+ ON,
+ OPERATOR,
+ OUT,
+ PART,
+ PATCH,
+ REQUIRED,
+ RETHROW,
+ RETURN,
+ SEALED,
+ SET,
+ SHOW,
+ SOURCE,
+ STATIC,
+ SUPER,
+ SWITCH,
+ SYNC,
+ THIS,
+ THROW,
+ TRUE,
+ TRY,
+ TYPEDEF,
+ VAR,
+ VOID,
+ WHEN,
+ WHILE,
+ WITH,
+ YIELD,
+ ];
+
+ /**
+ * A table mapping the lexemes of keywords to the corresponding keyword.
+ */
+ static final Map<String, Keyword> keywords = _createKeywordMap();
+
+ final KeywordStyle keywordStyle;
+
+ /**
+ * Initialize a newly created keyword.
+ */
+ const Keyword(
+ int index,
+ String lexeme,
+ String name,
+ this.keywordStyle, {
+ bool isModifier = false,
+ bool isTopLevelKeyword = false,
+ int precedence = NO_PRECEDENCE,
+ }) : super(
+ index,
+ lexeme,
+ name,
+ precedence,
+ KEYWORD_TOKEN,
+ isModifier: isModifier,
+ isTopLevelKeyword: isTopLevelKeyword,
+ );
+
+ @override
+ bool get isBuiltIn => keywordStyle == KeywordStyle.builtIn;
+
+ bool get isBuiltInOrPseudo => isBuiltIn || isPseudo;
+
+ @override
+ bool get isPseudo => keywordStyle == KeywordStyle.pseudo;
+
+ @override
+ bool get isReservedWord => keywordStyle == KeywordStyle.reserved;
+
+ /**
+ * The name of the keyword type.
+ */
+ @override
+ String get name => lexeme.toUpperCase();
+
+ @override
+ String toString() => name;
+
+ /**
+ * Create a table mapping the lexemes of keywords to the corresponding keyword
+ * and return the table that was created.
+ */
+ static Map<String, Keyword> _createKeywordMap() {
+ LinkedHashMap<String, Keyword> result =
+ new LinkedHashMap<String, Keyword>();
+ for (Keyword keyword in values) {
+ result[keyword.lexeme] = keyword;
+ }
+ return result;
+ }
+}
+
+enum KeywordStyle { reserved, builtIn, pseudo }
+
+/**
+ * A token representing a keyword in the language.
+ */
+class KeywordToken extends SimpleToken {
+ /**
+ * Initialize a newly created token to represent the given [keyword] at the
+ * given [offset].
+ */
+ KeywordToken(super.keyword, super.offset, [super.precedingComment]);
+
+ @override
+ bool get isIdentifier => keyword.isPseudo || keyword.isBuiltIn;
+
+ @override
+ bool get isKeyword => true;
+
+ @override
+ bool get isKeywordOrIdentifier => true;
+
+ @override
+ Keyword get keyword => type as Keyword;
+
+ @override
+ Object value() => keyword;
+}
+
+/**
+ * A specialized comment token representing a language version
+ * (e.g. '// @dart = 2.1').
+ */
+class LanguageVersionToken extends CommentToken {
+ /**
+ * The major language version.
+ */
+ final int major;
+
+ /**
+ * The minor language version.
+ */
+ final int minor;
+
+ LanguageVersionToken.from(String text, int offset, this.major, this.minor)
+ : super(TokenType.SINGLE_LINE_COMMENT, text, offset);
+}
+
+/// A token used to replace another token in the stream, while still keeping the
+/// old token around (in [replacedToken]). Automatically sets the offset and
+/// precedingComments from the data available on [replacedToken].
+class ReplacementToken extends SyntheticToken {
+ /// The token that this token replaces. This will normally be the token
+ /// representing what the user actually wrote.
+ final Token replacedToken;
+
+ @override
+ Token? beforeSynthetic;
+
+ ReplacementToken(TokenType type, this.replacedToken)
+ : super(type, replacedToken.offset) {
+ precedingComments = replacedToken.precedingComments;
+ }
+
+ @override
+ bool get isSynthetic => true;
+
+ @override
+ int get length => 0;
+}
+
+/**
+ * A token that was scanned from the input. Each token knows which tokens
+ * precede and follow it, acting as a link in a doubly linked list of tokens.
+ */
+class SimpleToken implements Token {
+ /**
+ * The previous token in the token stream.
+ */
+ @override
+ Token? previous;
+
+ @override
+ Token? next;
+
+ /**
+ * The first comment in the list of comments that precede this token.
+ */
+ CommentToken? _precedingComment;
+
+ /**
+ * The combined encoding of token type and offset.
+ */
+ int _typeAndOffset;
+
+ /**
+ * Initialize a newly created token to have the given [type] and [offset].
+ */
+ SimpleToken(TokenType type, int offset, [this._precedingComment])
+ : _typeAndOffset = (((offset + 1) << 8) | type.index) {
+ // See https://github.com/dart-lang/sdk/issues/50048 for details.
+ assert(offset >= -1);
+
+ // Assert the encoding of the [type] is fully reversible.
+ assert(type.index < 256 && _tokenTypesByIndex.length == 256);
+ assert(identical(offset, this.offset));
+ assert(identical(type, this.type), '$type != ${this.type}');
+
+ _setCommentParent(_precedingComment);
+ }
+
+ @override
+ Token? get beforeSynthetic => null;
+
+ @override
+ set beforeSynthetic(Token? previous) {
+ // ignored
+ }
+
+ @override
+ int get charCount => length;
+
+ @override
+ int get charEnd => end;
+
+ @override
+ int get charOffset => offset;
+
+ @override
+ int get end => offset + length;
+
+ @override
+ Token? get endGroup => null;
+
+ @override
+ bool get isEof => type == TokenType.EOF;
+
+ @override
+ bool get isIdentifier => false;
+
+ @override
+ bool get isKeyword => false;
+
+ @override
+ bool get isKeywordOrIdentifier => isIdentifier;
+
+ @override
+ bool get isModifier => type.isModifier;
+
+ @override
+ bool get isOperator => type.isOperator;
+
+ @override
+ bool get isSynthetic => length == 0;
+
+ @override
+ bool get isTopLevelKeyword => type.isTopLevelKeyword;
+
+ @override
+ bool get isUserDefinableOperator => type.isUserDefinableOperator;
+
+ @override
+ Keyword? get keyword => null;
+
+ @override
+ int get kind => type.kind;
+
+ @override
+ int get length => lexeme.length;
+
+ @override
+ String get lexeme => type.lexeme;
+
+ /**
+ * The offset from the beginning of the file to the first character in the
+ * token.
+ */
+ @override
+ int get offset => (_typeAndOffset >> 8) - 1;
+
+ /**
+ * Set the offset from the beginning of the file to the first character in
+ * the token to the given [offset].
+ */
+ @override
+ void set offset(int value) {
+ assert(_tokenTypesByIndex.length == 256);
+ // See https://github.com/dart-lang/sdk/issues/50048 for details.
+ assert(value >= -1);
+ _typeAndOffset = ((value + 1) << 8) | (_typeAndOffset & 0xff);
+ }
+
+ @override
+ CommentToken? get precedingComments => _precedingComment;
+
+ void set precedingComments(CommentToken? comment) {
+ _precedingComment = comment;
+ _setCommentParent(_precedingComment);
+ }
+
+ @override
+ String? get stringValue => type.stringValue;
+
+ /**
+ * The type of the token.
+ */
+ @override
+ TokenType get type => _tokenTypesByIndex[_typeAndOffset & 0xff];
+
+ /**
+ * The index of the type.
+ */
+ @override
+ int get typeIndex => _typeAndOffset & 0xff;
+
+ @override
+ bool matchesAny(List<TokenType> types) {
+ // [type] is a getter that accesses [_tokenTypesByIndex]:
+ TokenType type = this.type;
+ for (TokenType t in types) {
+ if (type == t) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @override
+ Token setNext(Token token) {
+ next = token;
+ token.previous = this;
+ token.beforeSynthetic = this;
+ return token;
+ }
+
+ @override
+ Token? setNextWithoutSettingPrevious(Token? token) {
+ next = token;
+ return token;
+ }
+
+ @override
+ String toString() => lexeme;
+
+ @override
+ Object value() => lexeme;
+
+ /**
+ * Sets the `parent` property to `this` for the given [comment] and all the
+ * next tokens.
+ */
+ @pragma("vm:prefer-inline")
+ void _setCommentParent(CommentToken? comment) {
+ while (comment != null) {
+ comment.parent = this;
+ comment = comment.next as CommentToken?;
+ }
+ }
+}
+
+/**
+ * A token whose value is independent of it's type.
+ */
+class StringToken extends SimpleToken {
+ /**
+ * The lexeme represented by this token.
+ */
+ final String _value;
+
+ /**
+ * Initialize a newly created token to represent a token of the given [type]
+ * with the given [value] at the given [offset].
+ */
+ StringToken(super.type, String value, super.offset, [super.precedingComment])
+ : _value = StringUtilities.intern(value);
+
+ @override
+ bool get isIdentifier => kind == IDENTIFIER_TOKEN;
+
+ @override
+ String get lexeme => _value;
+
+ @override
+ String value() => _value;
+}
+
+/**
+ * A synthetic begin token.
+ */
+class SyntheticBeginToken extends BeginToken {
+ @override
+ Token? beforeSynthetic;
+
+ /**
+ * Initialize a newly created token to have the given [type] at the given
+ * [offset].
+ */
+ SyntheticBeginToken(super.type, super.offset, [super.precedingComment]);
+
+ @override
+ bool get isSynthetic => true;
+
+ @override
+ int get length => 0;
+}
+
+/**
+ * A synthetic keyword token.
+ */
+class SyntheticKeywordToken extends KeywordToken {
+ @override
+ Token? beforeSynthetic;
+
+ /**
+ * Initialize a newly created token to represent the given [keyword] at the
+ * given [offset].
+ */
+ SyntheticKeywordToken(super.keyword, super.offset);
+
+ @override
+ int get length => 0;
+}
+
+/**
+ * A token whose value is independent of it's type.
+ */
+class SyntheticStringToken extends StringToken {
+ final int? _length;
+
+ @override
+ Token? beforeSynthetic;
+
+ /**
+ * Initialize a newly created token to represent a token of the given [type]
+ * with the given [value] at the given [offset]. If the [length] is
+ * not specified, then it defaults to the length of [value].
+ */
+ SyntheticStringToken(super.type, super.value, super.offset, [this._length]);
+
+ @override
+ bool get isSynthetic => true;
+
+ @override
+ int get length => _length ?? super.length;
+}
+
+/**
+ * A synthetic token.
+ */
+class SyntheticToken extends SimpleToken {
+ @override
+ Token? beforeSynthetic;
+
+ SyntheticToken(super.type, super.offset);
+
+ @override
+ bool get isSynthetic => true;
+
+ @override
+ int get length => 0;
+}
+
+/**
+ * A token that was scanned from the input. Each token knows which tokens
+ * precede and follow it, acting as a link in a doubly linked list of tokens.
+ *
+ * Clients may not extend, implement or mix-in this class.
+ */
+abstract class Token implements SyntacticEntity {
+ /**
+ * Initialize a newly created token to have the given [type] and [offset].
+ */
+ factory Token(TokenType type, int offset, [CommentToken? precedingComment]) =
+ SimpleToken;
+
+ /**
+ * Initialize a newly created end-of-file token to have the given [offset].
+ */
+ factory Token.eof(int offset, [CommentToken? precedingComments]) {
+ Token eof = new SimpleToken(TokenType.EOF, offset, precedingComments);
+ // EOF points to itself so there's always infinite look-ahead.
+ eof.previous = eof;
+ eof.next = eof;
+ return eof;
+ }
+
+ /**
+ * The token before this synthetic token,
+ * or `null` if this is not a synthetic `)`, `]`, `}`, or `>` token.
+ */
+ Token? get beforeSynthetic;
+
+ /**
+ * Set token before this synthetic `)`, `]`, `}`, or `>` token,
+ * and ignored otherwise.
+ */
+ set beforeSynthetic(Token? previous);
+
+ /**
+ * The number of characters parsed by this token.
+ */
+ int get charCount;
+
+ /**
+ * The character offset of the end of this token within the source text.
+ */
+ int get charEnd;
+
+ /**
+ * The character offset of the start of this token within the source text.
+ */
+ int get charOffset;
+
+ @override
+ int get end;
+
+ /**
+ * The token that corresponds to this token, or `null` if this token is not
+ * the first of a pair of matching tokens (such as parentheses).
+ */
+ Token? get endGroup => null;
+
+ /**
+ * Return `true` if this token represents an end of file.
+ */
+ bool get isEof;
+
+ /**
+ * True if this token is an identifier. Some keywords allowed as identifiers,
+ * see implementation in [KeywordToken].
+ */
+ bool get isIdentifier;
+
+ /**
+ * True if this token is a keyword. Some keywords allowed as identifiers,
+ * see implementation in [KeywordToken].
+ */
+ bool get isKeyword;
+
+ /**
+ * True if this token is a keyword or an identifier.
+ */
+ bool get isKeywordOrIdentifier;
+
+ /**
+ * Return `true` if this token is a modifier such as `abstract` or `const`.
+ */
+ bool get isModifier;
+
+ /**
+ * Return `true` if this token represents an operator.
+ */
+ bool get isOperator;
+
+ /**
+ * Return `true` if this token is a synthetic token. A synthetic token is a
+ * token that was introduced by the parser in order to recover from an error
+ * in the code.
+ */
+ bool get isSynthetic;
+
+ /**
+ * Return `true` if this token is a keyword starting a top level declaration
+ * such as `class`, `enum`, `import`, etc.
+ */
+ bool get isTopLevelKeyword;
+
+ /**
+ * Return `true` if this token represents an operator that can be defined by
+ * users.
+ */
+ bool get isUserDefinableOperator;
+
+ /**
+ * Return the keyword, if a keyword token, or `null` otherwise.
+ */
+ Keyword? get keyword;
+
+ /**
+ * The kind enum of this token as determined by its [type].
+ */
+ int get kind;
+
+ @override
+ int get length;
+
+ /**
+ * Return the lexeme that represents this token.
+ *
+ * For [StringToken]s the [lexeme] includes the quotes, explicit escapes, etc.
+ */
+ String get lexeme;
+
+ /**
+ * Return the next token in the token stream.
+ */
+ Token? get next;
+
+ /**
+ * Return the next token in the token stream.
+ */
+ void set next(Token? next);
+
+ @override
+ int get offset;
+
+ /**
+ * Set the offset from the beginning of the file to the first character in
+ * the token to the given [offset].
+ */
+ void set offset(int offset);
+
+ /**
+ * Return the first comment in the list of comments that precede this token,
+ * or `null` if there are no comments preceding this token. Additional
+ * comments can be reached by following the token stream using [next] until
+ * `null` is returned.
+ *
+ * For example, if the original contents were `/* one */ /* two */ id`, then
+ * the first preceding comment token will have a lexeme of `/* one */` and
+ * the next comment token will have a lexeme of `/* two */`.
+ */
+ CommentToken? get precedingComments;
+
+ /**
+ * Return the previous token in the token stream.
+ */
+ Token? get previous;
+
+ /**
+ * Set the previous token in the token stream to the given [token].
+ */
+ void set previous(Token? token);
+
+ /**
+ * For symbol and keyword tokens, returns the string value represented by this
+ * token. For [StringToken]s this method returns [:null:].
+ *
+ * For symbol [Token]s and [KeywordToken]s, the string value is a compile-time
+ * constant originating in the [TokenType] or in the [Keyword] instance.
+ * This allows testing for keywords and symbols using [:identical:], e.g.,
+ * [:identical('class', token.value):].
+ *
+ * Note that returning [:null:] for string tokens is important to identify
+ * symbols and keywords, we cannot use [lexeme] instead. The string literal
+ * "$a($b"
+ * produces ..., SymbolToken($), StringToken(a), StringToken((), ...
+ *
+ * After parsing the identifier 'a', the parser tests for a function
+ * declaration using [:identical(next.stringValue, '('):], which (rightfully)
+ * returns false because stringValue returns [:null:].
+ */
+ String? get stringValue;
+
+ /**
+ * Return the type of the token.
+ */
+ TokenType get type;
+
+ /**
+ * Return the index of the type of the token.
+ */
+ int get typeIndex;
+
+ /**
+ * Return `true` if this token has any one of the given [types].
+ */
+ bool matchesAny(List<TokenType> types);
+
+ /**
+ * Set the next token in the token stream to the given [token]. This has the
+ * side-effect of setting this token to be the previous token for the given
+ * token. Return the token that was passed in.
+ */
+ Token setNext(Token token);
+
+ /**
+ * Set the next token in the token stream to the given token without changing
+ * which token is the previous token for the given token. Return the token
+ * that was passed in.
+ */
+ Token? setNextWithoutSettingPrevious(Token? token);
+
+ /**
+ * Returns a textual representation of this token to be used for debugging
+ * purposes. The resulting string might contain information about the
+ * structure of the token, for example 'StringToken(foo)' for the identifier
+ * token 'foo'.
+ *
+ * Use [lexeme] for the text actually parsed by the token.
+ */
+ @override
+ String toString();
+
+ /**
+ * Return the value of this token. For keyword tokens, this is the keyword
+ * associated with the token, for other tokens it is the lexeme associated
+ * with the token.
+ */
+ Object value();
+
+ /**
+ * Compare the given tokens to find the token that appears first in the
+ * source being parsed. That is, return the left-most of all of the tokens.
+ * Return the token with the smallest offset, or `null` if all of the
+ * tokens are `null`.
+ */
+ static Token? lexicallyFirst([
+ Token? t1,
+ Token? t2,
+ Token? t3,
+ Token? t4,
+ Token? t5,
+ ]) {
+ Token? result = t1;
+ if (result == null || t2 != null && t2.offset < result.offset) {
+ result = t2;
+ }
+ if (result == null || t3 != null && t3.offset < result.offset) {
+ result = t3;
+ }
+ if (result == null || t4 != null && t4.offset < result.offset) {
+ result = t4;
+ }
+ if (result == null || t5 != null && t5.offset < result.offset) {
+ result = t5;
+ }
+ return result;
+ }
+}
+
+/**
+ * The classes (or groups) of tokens with a similar use.
+ */
+class TokenClass {
+ /**
+ * A value used to indicate that the token type is not part of any specific
+ * class of token.
+ */
+ static const TokenClass NO_CLASS = const TokenClass('NO_CLASS');
+
+ /**
+ * A value used to indicate that the token type is an additive operator.
+ */
+ static const TokenClass ADDITIVE_OPERATOR = const TokenClass(
+ 'ADDITIVE_OPERATOR',
+ ADDITIVE_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is an assignment operator.
+ */
+ static const TokenClass ASSIGNMENT_OPERATOR = const TokenClass(
+ 'ASSIGNMENT_OPERATOR',
+ ASSIGNMENT_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a bitwise-and operator.
+ */
+ static const TokenClass BITWISE_AND_OPERATOR = const TokenClass(
+ 'BITWISE_AND_OPERATOR',
+ BITWISE_AND_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a bitwise-or operator.
+ */
+ static const TokenClass BITWISE_OR_OPERATOR = const TokenClass(
+ 'BITWISE_OR_OPERATOR',
+ BITWISE_OR_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a bitwise-xor operator.
+ */
+ static const TokenClass BITWISE_XOR_OPERATOR = const TokenClass(
+ 'BITWISE_XOR_OPERATOR',
+ BITWISE_XOR_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a cascade operator.
+ */
+ static const TokenClass CASCADE_OPERATOR = const TokenClass(
+ 'CASCADE_OPERATOR',
+ CASCADE_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a conditional operator.
+ */
+ static const TokenClass CONDITIONAL_OPERATOR = const TokenClass(
+ 'CONDITIONAL_OPERATOR',
+ CONDITIONAL_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is an equality operator.
+ */
+ static const TokenClass EQUALITY_OPERATOR = const TokenClass(
+ 'EQUALITY_OPERATOR',
+ EQUALITY_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is an if-null operator.
+ */
+ static const TokenClass IF_NULL_OPERATOR = const TokenClass(
+ 'IF_NULL_OPERATOR',
+ IF_NULL_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a logical-and operator.
+ */
+ static const TokenClass LOGICAL_AND_OPERATOR = const TokenClass(
+ 'LOGICAL_AND_OPERATOR',
+ LOGICAL_AND_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a logical-or operator.
+ */
+ static const TokenClass LOGICAL_OR_OPERATOR = const TokenClass(
+ 'LOGICAL_OR_OPERATOR',
+ LOGICAL_OR_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a multiplicative operator.
+ */
+ static const TokenClass MULTIPLICATIVE_OPERATOR = const TokenClass(
+ 'MULTIPLICATIVE_OPERATOR',
+ MULTIPLICATIVE_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a relational operator.
+ */
+ static const TokenClass RELATIONAL_OPERATOR = const TokenClass(
+ 'RELATIONAL_OPERATOR',
+ RELATIONAL_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a shift operator.
+ */
+ static const TokenClass SHIFT_OPERATOR = const TokenClass(
+ 'SHIFT_OPERATOR',
+ SHIFT_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a unary operator.
+ */
+ static const TokenClass UNARY_POSTFIX_OPERATOR = const TokenClass(
+ 'UNARY_POSTFIX_OPERATOR',
+ POSTFIX_PRECEDENCE,
+ );
+
+ /**
+ * A value used to indicate that the token type is a unary operator.
+ */
+ static const TokenClass UNARY_PREFIX_OPERATOR = const TokenClass(
+ 'UNARY_PREFIX_OPERATOR',
+ PREFIX_PRECEDENCE,
+ );
+
+ /**
+ * The name of the token class.
+ */
+ final String name;
+
+ /**
+ * The precedence of tokens of this class, or `0` if the such tokens do not
+ * represent an operator.
+ */
+ final int precedence;
+
+ /**
+ * Initialize a newly created class of tokens to have the given [name] and
+ * [precedence].
+ */
+ const TokenClass(this.name, [this.precedence = NO_PRECEDENCE]);
+
+ @override
+ String toString() => name;
+}
+
+/**
+ * The types of tokens that can be returned by the scanner.
+ *
+ * Clients may not extend, implement or mix-in this class.
+ */
+class TokenType {
+ static const TokenType UNUSED = const TokenType(
+ /* index = */ 255,
+ '',
+ 'UNUSED',
+ NO_PRECEDENCE,
+ EOF_TOKEN,
+ );
+
+ /**
+ * The type of the token that marks the start or end of the input.
+ */
+ static const TokenType EOF = const TokenType(
+ /* index = */ 0,
+ '',
+ 'EOF',
+ NO_PRECEDENCE,
+ EOF_TOKEN,
+ );
+
+ static const TokenType DOUBLE = const TokenType(
+ /* index = */ 1,
+ 'double',
+ 'DOUBLE',
+ NO_PRECEDENCE,
+ DOUBLE_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType DOUBLE_WITH_SEPARATORS = const TokenType(
+ /* index = */ 2,
+ 'double',
+ 'DOUBLE_WITH_SEPARATORS',
+ NO_PRECEDENCE,
+ DOUBLE_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType HEXADECIMAL = const TokenType(
+ /* index = */ 3,
+ 'hexadecimal',
+ 'HEXADECIMAL',
+ NO_PRECEDENCE,
+ HEXADECIMAL_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType HEXADECIMAL_WITH_SEPARATORS = const TokenType(
+ /* index = */ 4,
+ 'hexadecimal',
+ 'HEXADECIMAL_WITH_SEPARATORS',
+ NO_PRECEDENCE,
+ HEXADECIMAL_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType IDENTIFIER = const TokenType(
+ /* index = */ 5,
+ 'identifier',
+ 'IDENTIFIER',
+ NO_PRECEDENCE,
+ IDENTIFIER_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType INT = const TokenType(
+ /* index = */ 6,
+ 'int',
+ 'INT',
+ NO_PRECEDENCE,
+ INT_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType INT_WITH_SEPARATORS = const TokenType(
+ /* index = */ 7,
+ 'int',
+ 'INT_WITH_SEPARATORS',
+ NO_PRECEDENCE,
+ INT_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType MULTI_LINE_COMMENT = const TokenType(
+ /* index = */ 8,
+ 'comment',
+ 'MULTI_LINE_COMMENT',
+ NO_PRECEDENCE,
+ COMMENT_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType SCRIPT_TAG = const TokenType(
+ /* index = */ 9,
+ 'script',
+ 'SCRIPT_TAG',
+ NO_PRECEDENCE,
+ SCRIPT_TOKEN,
+ );
+
+ static const TokenType SINGLE_LINE_COMMENT = const TokenType(
+ /* index = */ 10,
+ 'comment',
+ 'SINGLE_LINE_COMMENT',
+ NO_PRECEDENCE,
+ COMMENT_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType STRING = const TokenType(
+ /* index = */ 11,
+ 'string',
+ 'STRING',
+ NO_PRECEDENCE,
+ STRING_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ static const TokenType AMPERSAND = const TokenType(
+ /* index = */ 12,
+ '&',
+ 'AMPERSAND',
+ BITWISE_AND_PRECEDENCE,
+ AMPERSAND_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType AMPERSAND_AMPERSAND = const TokenType(
+ /* index = */ 13,
+ '&&',
+ 'AMPERSAND_AMPERSAND',
+ LOGICAL_AND_PRECEDENCE,
+ AMPERSAND_AMPERSAND_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ );
+
+ // This is not yet part of the language and not supported by the scanner.
+ static const TokenType AMPERSAND_AMPERSAND_EQ = const TokenType(
+ /* index = */ 14,
+ '&&=',
+ 'AMPERSAND_AMPERSAND_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ AMPERSAND_AMPERSAND_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.AMPERSAND_AMPERSAND,
+ isOperator: true,
+ );
+
+ static const TokenType AMPERSAND_EQ = const TokenType(
+ /* index = */ 15,
+ '&=',
+ 'AMPERSAND_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ AMPERSAND_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.AMPERSAND,
+ isOperator: true,
+ );
+
+ static const TokenType AT = const TokenType(
+ /* index = */ 16,
+ '@',
+ 'AT',
+ NO_PRECEDENCE,
+ AT_TOKEN,
+ );
+
+ static const TokenType BANG = const TokenType(
+ /* index = */ 17,
+ '!',
+ 'BANG',
+ PREFIX_PRECEDENCE,
+ BANG_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType BANG_EQ = const TokenType(
+ /* index = */ 18,
+ '!=',
+ 'BANG_EQ',
+ EQUALITY_PRECEDENCE,
+ BANG_EQ_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType BANG_EQ_EQ = const TokenType(
+ /* index = */ 19,
+ '!==',
+ 'BANG_EQ_EQ',
+ EQUALITY_PRECEDENCE,
+ BANG_EQ_EQ_TOKEN,
+ );
+
+ static const TokenType BAR = const TokenType(
+ /* index = */ 20,
+ '|',
+ 'BAR',
+ BITWISE_OR_PRECEDENCE,
+ BAR_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType BAR_BAR = const TokenType(
+ /* index = */ 21,
+ '||',
+ 'BAR_BAR',
+ LOGICAL_OR_PRECEDENCE,
+ BAR_BAR_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ );
+
+ // This is not yet part of the language and not supported by the scanner.
+ static const TokenType BAR_BAR_EQ = const TokenType(
+ /* index = */ 22,
+ '||=',
+ 'BAR_BAR_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ BAR_BAR_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.BAR_BAR,
+ isOperator: true,
+ );
+
+ static const TokenType BAR_EQ = const TokenType(
+ /* index = */ 23,
+ '|=',
+ 'BAR_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ BAR_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.BAR,
+ isOperator: true,
+ );
+
+ static const TokenType COLON = const TokenType(
+ /* index = */ 24,
+ ':',
+ 'COLON',
+ NO_PRECEDENCE,
+ COLON_TOKEN,
+ );
+
+ static const TokenType COMMA = const TokenType(
+ /* index = */ 25,
+ ',',
+ 'COMMA',
+ NO_PRECEDENCE,
+ COMMA_TOKEN,
+ );
+
+ static const TokenType CARET = const TokenType(
+ /* index = */ 26,
+ '^',
+ 'CARET',
+ BITWISE_XOR_PRECEDENCE,
+ CARET_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType CARET_EQ = const TokenType(
+ /* index = */ 27,
+ '^=',
+ 'CARET_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ CARET_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.CARET,
+ isOperator: true,
+ );
+
+ static const TokenType CLOSE_CURLY_BRACKET = const TokenType(
+ /* index = */ 28,
+ '}',
+ 'CLOSE_CURLY_BRACKET',
+ NO_PRECEDENCE,
+ CLOSE_CURLY_BRACKET_TOKEN,
+ );
+
+ static const TokenType CLOSE_PAREN = const TokenType(
+ /* index = */ 29,
+ ')',
+ 'CLOSE_PAREN',
+ NO_PRECEDENCE,
+ CLOSE_PAREN_TOKEN,
+ );
+
+ static const TokenType CLOSE_SQUARE_BRACKET = const TokenType(
+ /* index = */ 30,
+ ']',
+ 'CLOSE_SQUARE_BRACKET',
+ NO_PRECEDENCE,
+ CLOSE_SQUARE_BRACKET_TOKEN,
+ );
+
+ static const TokenType EQ = const TokenType(
+ /* index = */ 31,
+ '=',
+ 'EQ',
+ ASSIGNMENT_PRECEDENCE,
+ EQ_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType EQ_EQ = const TokenType(
+ /* index = */ 32,
+ '==',
+ 'EQ_EQ',
+ EQUALITY_PRECEDENCE,
+ EQ_EQ_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ /// The `===` operator is not supported in the Dart language
+ /// but is parsed as such by the scanner to support better recovery
+ /// when a JavaScript code snippet is pasted into a Dart file.
+ static const TokenType EQ_EQ_EQ = const TokenType(
+ /* index = */ 33,
+ '===',
+ 'EQ_EQ_EQ',
+ EQUALITY_PRECEDENCE,
+ EQ_EQ_EQ_TOKEN,
+ );
+
+ static const TokenType FUNCTION = const TokenType(
+ /* index = */ 34,
+ '=>',
+ 'FUNCTION',
+ NO_PRECEDENCE,
+ FUNCTION_TOKEN,
+ );
+
+ static const TokenType GT = const TokenType(
+ /* index = */ 35,
+ '>',
+ 'GT',
+ RELATIONAL_PRECEDENCE,
+ GT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType GT_EQ = const TokenType(
+ /* index = */ 36,
+ '>=',
+ 'GT_EQ',
+ RELATIONAL_PRECEDENCE,
+ GT_EQ_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType GT_GT = const TokenType(
+ /* index = */ 37,
+ '>>',
+ 'GT_GT',
+ SHIFT_PRECEDENCE,
+ GT_GT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType GT_GT_EQ = const TokenType(
+ /* index = */ 38,
+ '>>=',
+ 'GT_GT_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ GT_GT_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.GT_GT,
+ isOperator: true,
+ );
+
+ static const TokenType GT_GT_GT = const TokenType(
+ /* index = */ 39,
+ '>>>',
+ 'GT_GT_GT',
+ SHIFT_PRECEDENCE,
+ GT_GT_GT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType GT_GT_GT_EQ = const TokenType(
+ /* index = */ 40,
+ '>>>=',
+ 'GT_GT_GT_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ GT_GT_GT_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.GT_GT_GT,
+ isOperator: true,
+ );
+
+ static const TokenType HASH = const TokenType(
+ /* index = */ 41,
+ '#',
+ 'HASH',
+ NO_PRECEDENCE,
+ HASH_TOKEN,
+ );
+
+ static const TokenType INDEX = const TokenType(
+ /* index = */ 42,
+ '[]',
+ 'INDEX',
+ SELECTOR_PRECEDENCE,
+ INDEX_TOKEN,
+ isOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType INDEX_EQ = const TokenType(
+ /* index = */ 43,
+ '[]=',
+ 'INDEX_EQ',
+ NO_PRECEDENCE,
+ INDEX_EQ_TOKEN,
+ isOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType LT = const TokenType(
+ /* index = */ 44,
+ '<',
+ 'LT',
+ RELATIONAL_PRECEDENCE,
+ LT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType LT_EQ = const TokenType(
+ /* index = */ 45,
+ '<=',
+ 'LT_EQ',
+ RELATIONAL_PRECEDENCE,
+ LT_EQ_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType LT_LT = const TokenType(
+ /* index = */ 46,
+ '<<',
+ 'LT_LT',
+ SHIFT_PRECEDENCE,
+ LT_LT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType LT_LT_EQ = const TokenType(
+ /* index = */ 47,
+ '<<=',
+ 'LT_LT_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ LT_LT_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.LT_LT,
+ isOperator: true,
+ );
+
+ static const TokenType MINUS = const TokenType(
+ /* index = */ 48,
+ '-',
+ 'MINUS',
+ ADDITIVE_PRECEDENCE,
+ MINUS_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType MINUS_EQ = const TokenType(
+ /* index = */ 49,
+ '-=',
+ 'MINUS_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ MINUS_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.MINUS,
+ isOperator: true,
+ );
+
+ static const TokenType MINUS_MINUS = const TokenType(
+ /* index = */ 50,
+ '--',
+ 'MINUS_MINUS',
+ POSTFIX_PRECEDENCE,
+ MINUS_MINUS_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType OPEN_CURLY_BRACKET = const TokenType(
+ /* index = */ 51,
+ '{',
+ 'OPEN_CURLY_BRACKET',
+ NO_PRECEDENCE,
+ OPEN_CURLY_BRACKET_TOKEN,
+ );
+
+ static const TokenType OPEN_PAREN = const TokenType(
+ /* index = */ 52,
+ '(',
+ 'OPEN_PAREN',
+ SELECTOR_PRECEDENCE,
+ OPEN_PAREN_TOKEN,
+ );
+
+ static const TokenType OPEN_SQUARE_BRACKET = const TokenType(
+ /* index = */ 53,
+ '[',
+ 'OPEN_SQUARE_BRACKET',
+ SELECTOR_PRECEDENCE,
+ OPEN_SQUARE_BRACKET_TOKEN,
+ );
+
+ static const TokenType PERCENT = const TokenType(
+ /* index = */ 54,
+ '%',
+ 'PERCENT',
+ MULTIPLICATIVE_PRECEDENCE,
+ PERCENT_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType PERCENT_EQ = const TokenType(
+ /* index = */ 55,
+ '%=',
+ 'PERCENT_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ PERCENT_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.PERCENT,
+ isOperator: true,
+ );
+
+ static const TokenType PERIOD = const TokenType(
+ /* index = */ 56,
+ '.',
+ 'PERIOD',
+ SELECTOR_PRECEDENCE,
+ PERIOD_TOKEN,
+ );
+
+ static const TokenType PERIOD_PERIOD = const TokenType(
+ /* index = */ 57,
+ '..',
+ 'PERIOD_PERIOD',
+ CASCADE_PRECEDENCE,
+ PERIOD_PERIOD_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType PLUS = const TokenType(
+ /* index = */ 58,
+ '+',
+ 'PLUS',
+ ADDITIVE_PRECEDENCE,
+ PLUS_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType PLUS_EQ = const TokenType(
+ /* index = */ 59,
+ '+=',
+ 'PLUS_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ PLUS_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.PLUS,
+ isOperator: true,
+ );
+
+ static const TokenType PLUS_PLUS = const TokenType(
+ /* index = */ 60,
+ '++',
+ 'PLUS_PLUS',
+ POSTFIX_PRECEDENCE,
+ PLUS_PLUS_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType QUESTION = const TokenType(
+ /* index = */ 61,
+ '?',
+ 'QUESTION',
+ CONDITIONAL_PRECEDENCE,
+ QUESTION_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType QUESTION_PERIOD = const TokenType(
+ /* index = */ 62,
+ '?.',
+ 'QUESTION_PERIOD',
+ SELECTOR_PRECEDENCE,
+ QUESTION_PERIOD_TOKEN,
+ isOperator: true,
+ );
+
+ static const TokenType QUESTION_QUESTION = const TokenType(
+ /* index = */ 63,
+ '??',
+ 'QUESTION_QUESTION',
+ IF_NULL_PRECEDENCE,
+ QUESTION_QUESTION_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ );
+
+ static const TokenType QUESTION_QUESTION_EQ = const TokenType(
+ /* index = */ 64,
+ '??=',
+ 'QUESTION_QUESTION_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ QUESTION_QUESTION_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.QUESTION_QUESTION,
+ isOperator: true,
+ );
+
+ static const TokenType SEMICOLON = const TokenType(
+ /* index = */ 65,
+ ';',
+ 'SEMICOLON',
+ NO_PRECEDENCE,
+ SEMICOLON_TOKEN,
+ );
+
+ static const TokenType SLASH = const TokenType(
+ /* index = */ 66,
+ '/',
+ 'SLASH',
+ MULTIPLICATIVE_PRECEDENCE,
+ SLASH_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType SLASH_EQ = const TokenType(
+ /* index = */ 67,
+ '/=',
+ 'SLASH_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ SLASH_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.SLASH,
+ isOperator: true,
+ );
+
+ static const TokenType STAR = const TokenType(
+ /* index = */ 68,
+ '*',
+ 'STAR',
+ MULTIPLICATIVE_PRECEDENCE,
+ STAR_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType STAR_EQ = const TokenType(
+ /* index = */ 69,
+ '*=',
+ 'STAR_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ STAR_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.STAR,
+ isOperator: true,
+ );
+
+ static const TokenType STRING_INTERPOLATION_EXPRESSION = const TokenType(
+ /* index = */ 70,
+ '\${',
+ 'STRING_INTERPOLATION_EXPRESSION',
+ NO_PRECEDENCE,
+ STRING_INTERPOLATION_TOKEN,
+ );
+
+ static const TokenType STRING_INTERPOLATION_IDENTIFIER = const TokenType(
+ /* index = */ 71,
+ '\$',
+ 'STRING_INTERPOLATION_IDENTIFIER',
+ NO_PRECEDENCE,
+ STRING_INTERPOLATION_IDENTIFIER_TOKEN,
+ );
+
+ static const TokenType TILDE = const TokenType(
+ /* index = */ 72,
+ '~',
+ 'TILDE',
+ PREFIX_PRECEDENCE,
+ TILDE_TOKEN,
+ isOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType TILDE_SLASH = const TokenType(
+ /* index = */ 73,
+ '~/',
+ 'TILDE_SLASH',
+ MULTIPLICATIVE_PRECEDENCE,
+ TILDE_SLASH_TOKEN,
+ isOperator: true,
+ isBinaryOperator: true,
+ isUserDefinableOperator: true,
+ );
+
+ static const TokenType TILDE_SLASH_EQ = const TokenType(
+ /* index = */ 74,
+ '~/=',
+ 'TILDE_SLASH_EQ',
+ ASSIGNMENT_PRECEDENCE,
+ TILDE_SLASH_EQ_TOKEN,
+ binaryOperatorOfCompoundAssignment: TokenType.TILDE_SLASH,
+ isOperator: true,
+ );
+
+ static const TokenType BACKPING = const TokenType(
+ /* index = */ 75,
+ '`',
+ 'BACKPING',
+ NO_PRECEDENCE,
+ BACKPING_TOKEN,
+ );
+
+ static const TokenType BACKSLASH = const TokenType(
+ /* index = */ 76,
+ '\\',
+ 'BACKSLASH',
+ NO_PRECEDENCE,
+ BACKSLASH_TOKEN,
+ );
+
+ static const TokenType PERIOD_PERIOD_PERIOD = const TokenType(
+ /* index = */ 77,
+ '...',
+ 'PERIOD_PERIOD_PERIOD',
+ NO_PRECEDENCE,
+ PERIOD_PERIOD_PERIOD_TOKEN,
+ );
+
+ static const TokenType PERIOD_PERIOD_PERIOD_QUESTION = const TokenType(
+ /* index = */ 78,
+ '...?',
+ 'PERIOD_PERIOD_PERIOD_QUESTION',
+ NO_PRECEDENCE,
+ PERIOD_PERIOD_PERIOD_QUESTION_TOKEN,
+ );
+
+ static const TokenType QUESTION_PERIOD_PERIOD = const TokenType(
+ /* index = */ 79,
+ '?..',
+ 'QUESTION_PERIOD_PERIOD',
+ CASCADE_PRECEDENCE,
+ QUESTION_PERIOD_PERIOD_TOKEN,
+ );
+
+ static const TokenType AS = Keyword.AS;
+
+ static const TokenType IS = Keyword.IS;
+
+ /**
+ * Token type used by error tokens.
+ */
+ static const TokenType BAD_INPUT = const TokenType(
+ /* index = */ 80,
+ 'malformed input',
+ 'BAD_INPUT',
+ NO_PRECEDENCE,
+ BAD_INPUT_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ /**
+ * Token type used by synthetic tokens that are created during parser
+ * recovery (non-analyzer use case).
+ */
+ static const TokenType RECOVERY = const TokenType(
+ /* index = */ 81,
+ 'recovery',
+ 'RECOVERY',
+ NO_PRECEDENCE,
+ RECOVERY_TOKEN,
+ stringValueShouldBeNull: true,
+ );
+
+ // TODO(danrubel): "all" is misleading
+ // because this list does not include all TokenType instances.
+ static const List<TokenType> all = const <TokenType>[
+ TokenType.EOF,
+ TokenType.DOUBLE,
+ TokenType.DOUBLE_WITH_SEPARATORS,
+ TokenType.HEXADECIMAL,
+ TokenType.HEXADECIMAL_WITH_SEPARATORS,
+ TokenType.IDENTIFIER,
+ TokenType.INT,
+ TokenType.INT_WITH_SEPARATORS,
+ TokenType.MULTI_LINE_COMMENT,
+ TokenType.SCRIPT_TAG,
+ TokenType.SINGLE_LINE_COMMENT,
+ TokenType.STRING,
+ TokenType.AMPERSAND,
+ TokenType.AMPERSAND_AMPERSAND,
+ TokenType.AMPERSAND_EQ,
+ TokenType.AT,
+ TokenType.BANG,
+ TokenType.BANG_EQ,
+ TokenType.BAR,
+ TokenType.BAR_BAR,
+ TokenType.BAR_EQ,
+ TokenType.COLON,
+ TokenType.COMMA,
+ TokenType.CARET,
+ TokenType.CARET_EQ,
+ TokenType.CLOSE_CURLY_BRACKET,
+ TokenType.CLOSE_PAREN,
+ TokenType.CLOSE_SQUARE_BRACKET,
+ TokenType.EQ,
+ TokenType.EQ_EQ,
+ TokenType.FUNCTION,
+ TokenType.GT,
+ TokenType.GT_EQ,
+ TokenType.GT_GT,
+ TokenType.GT_GT_EQ,
+ TokenType.HASH,
+ TokenType.INDEX,
+ TokenType.INDEX_EQ,
+ TokenType.LT,
+ TokenType.LT_EQ,
+ TokenType.LT_LT,
+ TokenType.LT_LT_EQ,
+ TokenType.MINUS,
+ TokenType.MINUS_EQ,
+ TokenType.MINUS_MINUS,
+ TokenType.OPEN_CURLY_BRACKET,
+ TokenType.OPEN_PAREN,
+ TokenType.OPEN_SQUARE_BRACKET,
+ TokenType.PERCENT,
+ TokenType.PERCENT_EQ,
+ TokenType.PERIOD,
+ TokenType.PERIOD_PERIOD,
+ TokenType.PLUS,
+ TokenType.PLUS_EQ,
+ TokenType.PLUS_PLUS,
+ TokenType.QUESTION,
+ TokenType.QUESTION_PERIOD,
+ TokenType.QUESTION_QUESTION,
+ TokenType.QUESTION_QUESTION_EQ,
+ TokenType.SEMICOLON,
+ TokenType.SLASH,
+ TokenType.SLASH_EQ,
+ TokenType.STAR,
+ TokenType.STAR_EQ,
+ TokenType.STRING_INTERPOLATION_EXPRESSION,
+ TokenType.STRING_INTERPOLATION_IDENTIFIER,
+ TokenType.TILDE,
+ TokenType.TILDE_SLASH,
+ TokenType.TILDE_SLASH_EQ,
+ TokenType.BACKPING,
+ TokenType.BACKSLASH,
+ TokenType.PERIOD_PERIOD_PERIOD,
+ TokenType.PERIOD_PERIOD_PERIOD_QUESTION,
+
+ // TODO(danrubel): Should these be added to the "all" list?
+ //TokenType.IS,
+ //TokenType.AS,
+
+ // These are not yet part of the language and not supported by the scanner:
+ //TokenType.AMPERSAND_AMPERSAND_EQ,
+ //TokenType.BAR_BAR_EQ,
+
+ // Supported by the scanner but not part of the language
+ //TokenType.BANG_EQ_EQ,
+ //TokenType.EQ_EQ_EQ,
+
+ // Used by synthetic tokens generated during recovery
+ //TokenType.BAD_INPUT,
+ //TokenType.RECOVERY,
+ ];
+
+ /**
+ * A unique index identifying this [TokenType].
+ */
+ final int index;
+
+ /**
+ * The binary operator that is invoked by this compound assignment operator,
+ * or `null` otherwise.
+ */
+ final TokenType? binaryOperatorOfCompoundAssignment;
+
+ final int kind;
+
+ /**
+ * `true` if this token type represents a modifier
+ * such as `abstract` or `const`.
+ */
+ final bool isModifier;
+
+ /**
+ * `true` if this token type represents an operator.
+ */
+ final bool isOperator;
+
+ /**
+ * `true` if this token type represents a binary operator.
+ */
+ final bool isBinaryOperator;
+
+ /**
+ * `true` if this token type represents a keyword starting a top level
+ * declaration such as `class`, `enum`, `import`, etc.
+ */
+ final bool isTopLevelKeyword;
+
+ /**
+ * `true` if this token type represents an operator
+ * that can be defined by users.
+ */
+ final bool isUserDefinableOperator;
+
+ /**
+ * The lexeme that defines this type of token,
+ * or `null` if there is more than one possible lexeme for this type of token.
+ */
+ final String lexeme;
+
+ /**
+ * The name of the token type.
+ */
+ final String name;
+
+ /**
+ * The precedence of this type of token,
+ * or `0` if the token does not represent an operator.
+ */
+ final int precedence;
+
+ /**
+ * See [Token.stringValue] for an explanation.
+ */
+ final String? stringValue;
+
+ const TokenType(
+ this.index,
+ this.lexeme,
+ this.name,
+ this.precedence,
+ this.kind, {
+ this.binaryOperatorOfCompoundAssignment,
+ this.isBinaryOperator = false,
+ this.isModifier = false,
+ this.isOperator = false,
+ this.isTopLevelKeyword = false,
+ this.isUserDefinableOperator = false,
+ bool stringValueShouldBeNull = false,
+ }) : this.stringValue = stringValueShouldBeNull ? null : lexeme;
+
+ /**
+ * Return `true` if this type of token represents an additive operator.
+ */
+ bool get isAdditiveOperator => precedence == ADDITIVE_PRECEDENCE;
+
+ /**
+ * Return `true` if this type of token represents an assignment operator.
+ */
+ bool get isAssignmentOperator => precedence == ASSIGNMENT_PRECEDENCE;
+
+ /**
+ * Return `true` if this type of token represents an associative operator. An
+ * associative operator is an operator for which the following equality is
+ * true: `(a * b) * c == a * (b * c)`. In other words, if the result of
+ * applying the operator to multiple operands does not depend on the order in
+ * which those applications occur.
+ *
+ * Note: This method considers the logical-and and logical-or operators to be
+ * associative, even though the order in which the application of those
+ * operators can have an effect because evaluation of the right-hand operand
+ * is conditional.
+ */
+ bool get isAssociativeOperator =>
+ this == TokenType.AMPERSAND ||
+ this == TokenType.AMPERSAND_AMPERSAND ||
+ this == TokenType.BAR ||
+ this == TokenType.BAR_BAR ||
+ this == TokenType.CARET ||
+ this == TokenType.PLUS ||
+ this == TokenType.STAR;
+
+ /**
+ * A flag indicating whether the keyword is a "built-in" identifier.
+ */
+ bool get isBuiltIn => false;
+
+ /**
+ * Return `true` if this type of token represents an equality operator.
+ */
+ bool get isEqualityOperator =>
+ this == TokenType.BANG_EQ || this == TokenType.EQ_EQ;
+
+ /**
+ * Return `true` if this type of token represents an increment operator.
+ */
+ bool get isIncrementOperator =>
+ this == TokenType.PLUS_PLUS || this == TokenType.MINUS_MINUS;
+
+ /**
+ * Return `true` if this type of token is a keyword.
+ */
+ bool get isKeyword => kind == KEYWORD_TOKEN;
+
+ /**
+ * Return `true` if this type of token represents a multiplicative operator.
+ */
+ bool get isMultiplicativeOperator => precedence == MULTIPLICATIVE_PRECEDENCE;
+
+ /**
+ * A flag indicating whether the keyword can be used as an identifier
+ * in some situations.
+ */
+ bool get isPseudo => false;
+
+ /**
+ * Return `true` if this type of token represents a relational operator.
+ */
+ bool get isRelationalOperator =>
+ this == TokenType.LT ||
+ this == TokenType.LT_EQ ||
+ this == TokenType.GT ||
+ this == TokenType.GT_EQ;
+
+ /// A flag indicating whether the keyword is a "reserved word".
+ bool get isReservedWord => false;
+
+ /**
+ * Return `true` if this type of token represents a selector operator
+ * (starting token of a selector).
+ */
+ bool get isSelectorOperator => precedence == SELECTOR_PRECEDENCE;
+
+ /**
+ * Return `true` if this type of token represents a shift operator.
+ */
+ bool get isShiftOperator => precedence == SHIFT_PRECEDENCE;
+
+ /**
+ * Return `true` if this type of token represents a unary postfix operator.
+ */
+ bool get isUnaryPostfixOperator => precedence == POSTFIX_PRECEDENCE;
+
+ /**
+ * Return `true` if this type of token represents a unary prefix operator.
+ */
+ bool get isUnaryPrefixOperator =>
+ precedence == PREFIX_PRECEDENCE ||
+ this == TokenType.MINUS ||
+ this == TokenType.PLUS_PLUS ||
+ this == TokenType.MINUS_MINUS;
+
+ @override
+ String toString() => name;
+}
+
+extension TokenIsAExtension on Token {
+ /// Returns true if this has the token type [value].
+ @pragma("vm:prefer-inline")
+ bool isA(TokenType value) {
+ return value.index == typeIndex;
+ }
+}
+
+extension TokenTypeIsAExtension on TokenType {
+ /// Returns true if this is the token type [value].
+ @pragma("vm:prefer-inline")
+ bool isA(TokenType value) {
+ return identical(value, this);
+ }
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token_constants.dart b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token_constants.dart
new file mode 100644
index 0000000..50c2494
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/big_chain_benchmark/files/token_constants.dart
@@ -0,0 +1,92 @@
+// Copyright (c) 2015, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+library _fe_analyzer_shared.scanner.token_constants;
+
+import 'characters.dart';
+
+const int AMPERSAND_AMPERSAND_EQ_TOKEN = AMPERSAND_AMPERSAND_TOKEN + 1;
+
+const int AMPERSAND_AMPERSAND_TOKEN = BANG_EQ_TOKEN + 1;
+const int AMPERSAND_EQ_TOKEN = AMPERSAND_AMPERSAND_EQ_TOKEN + 1;
+const int AMPERSAND_TOKEN = $AMPERSAND;
+const int AT_TOKEN = $AT;
+const int BACKPING_TOKEN = $BACKPING;
+const int BACKSLASH_TOKEN = $BACKSLASH;
+const int BAD_INPUT_TOKEN = $X;
+const int BANG_EQ_EQ_TOKEN = INDEX_TOKEN + 1;
+const int BANG_EQ_TOKEN = BANG_EQ_EQ_TOKEN + 1;
+
+const int BANG_TOKEN = $BANG;
+const int BAR_BAR_EQ_TOKEN = BAR_BAR_TOKEN + 1;
+const int BAR_BAR_TOKEN = AMPERSAND_EQ_TOKEN + 1;
+const int BAR_EQ_TOKEN = BAR_BAR_EQ_TOKEN + 1;
+const int BAR_TOKEN = $BAR;
+const int CARET_EQ_TOKEN = GT_GT_TOKEN + 1;
+const int CARET_TOKEN = $CARET;
+const int CLOSE_CURLY_BRACKET_TOKEN = $CLOSE_CURLY_BRACKET;
+const int CLOSE_PAREN_TOKEN = $CLOSE_PAREN;
+const int CLOSE_SQUARE_BRACKET_TOKEN = $CLOSE_SQUARE_BRACKET;
+const int COLON_TOKEN = $COLON;
+const int COMMA_TOKEN = $COMMA;
+const int COMMENT_TOKEN = CARET_EQ_TOKEN + 1;
+const int DOUBLE_TOKEN = $d;
+const int EOF_TOKEN = 0;
+const int EQ_EQ_EQ_TOKEN = PERIOD_PERIOD_TOKEN + 1;
+const int EQ_EQ_TOKEN = EQ_EQ_EQ_TOKEN + 1;
+const int EQ_TOKEN = $EQ;
+const int FUNCTION_TOKEN = LT_EQ_TOKEN + 1;
+const int GENERIC_METHOD_TYPE_ASSIGN_TOKEN = QUESTION_QUESTION_EQ_TOKEN + 1;
+const int GENERIC_METHOD_TYPE_LIST_TOKEN = GENERIC_METHOD_TYPE_ASSIGN_TOKEN + 1;
+const int GT_EQ_TOKEN = LT_LT_TOKEN + 1;
+const int GT_GT_EQ_TOKEN = GT_EQ_TOKEN + 1;
+const int GT_GT_GT_EQ_TOKEN = PERIOD_PERIOD_PERIOD_QUESTION_TOKEN + 1;
+const int GT_GT_GT_TOKEN = GENERIC_METHOD_TYPE_LIST_TOKEN + 1;
+const int GT_GT_TOKEN = PERCENT_EQ_TOKEN + 1;
+const int GT_TOKEN = $GT;
+const int HASH_TOKEN = $HASH;
+
+const int HEXADECIMAL_TOKEN = $x;
+const int IDENTIFIER_TOKEN = $a;
+const int INDEX_EQ_TOKEN = GT_GT_EQ_TOKEN + 1;
+const int INDEX_TOKEN = INDEX_EQ_TOKEN + 1;
+const int INT_TOKEN = $i;
+const int KEYWORD_TOKEN = $k;
+const int LT_EQ_TOKEN = STRING_INTERPOLATION_TOKEN + 1;
+const int LT_LT_EQ_TOKEN = EQ_EQ_TOKEN + 1;
+const int LT_LT_TOKEN = LT_LT_EQ_TOKEN + 1;
+const int LT_TOKEN = $LT;
+const int MINUS_EQ_TOKEN = MINUS_MINUS_TOKEN + 1;
+const int MINUS_MINUS_TOKEN = PLUS_EQ_TOKEN + 1;
+const int MINUS_TOKEN = $MINUS;
+const int OPEN_CURLY_BRACKET_TOKEN = $OPEN_CURLY_BRACKET;
+const int OPEN_PAREN_TOKEN = $OPEN_PAREN;
+const int OPEN_SQUARE_BRACKET_TOKEN = $OPEN_SQUARE_BRACKET;
+const int PERCENT_EQ_TOKEN = TILDE_SLASH_TOKEN + 1;
+const int PERCENT_TOKEN = $PERCENT;
+const int PERIOD_PERIOD_PERIOD_QUESTION_TOKEN = GT_GT_GT_TOKEN + 1;
+const int PERIOD_PERIOD_PERIOD_TOKEN = SLASH_EQ_TOKEN + 1;
+const int PERIOD_PERIOD_TOKEN = PERIOD_PERIOD_PERIOD_TOKEN + 1;
+const int PERIOD_TOKEN = $PERIOD;
+const int PLUS_EQ_TOKEN = PLUS_PLUS_TOKEN + 1;
+const int PLUS_PLUS_TOKEN = STAR_EQ_TOKEN + 1;
+const int PLUS_TOKEN = $PLUS;
+const int QUESTION_PERIOD_PERIOD_TOKEN = GT_GT_GT_EQ_TOKEN + 1;
+const int QUESTION_PERIOD_TOKEN = STRING_INTERPOLATION_IDENTIFIER_TOKEN + 1;
+const int QUESTION_QUESTION_EQ_TOKEN = QUESTION_QUESTION_TOKEN + 1;
+const int QUESTION_QUESTION_TOKEN = QUESTION_PERIOD_TOKEN + 1;
+const int QUESTION_TOKEN = $QUESTION;
+const int RECOVERY_TOKEN = $r;
+const int SCRIPT_TOKEN = $b;
+const int SEMICOLON_TOKEN = $SEMICOLON;
+const int SLASH_EQ_TOKEN = FUNCTION_TOKEN + 1;
+const int SLASH_TOKEN = $SLASH;
+const int STAR_EQ_TOKEN = BAR_EQ_TOKEN + 1;
+const int STAR_TOKEN = $STAR;
+const int STRING_INTERPOLATION_IDENTIFIER_TOKEN = COMMENT_TOKEN + 1;
+const int STRING_INTERPOLATION_TOKEN = 128;
+const int STRING_TOKEN = $SQ;
+const int TILDE_SLASH_EQ_TOKEN = MINUS_EQ_TOKEN + 1;
+const int TILDE_SLASH_TOKEN = TILDE_SLASH_EQ_TOKEN + 1;
+const int TILDE_TOKEN = $TILDE;
diff --git a/pkg/analysis_server/tool/benchmark_tools/io_utils.dart b/pkg/analysis_server/tool/benchmark_tools/io_utils.dart
new file mode 100644
index 0000000..8a8f69c
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/io_utils.dart
@@ -0,0 +1,49 @@
+// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:io' show Directory, File, Platform, Process, ProcessResult;
+
+import 'package:_fe_analyzer_shared/src/util/filenames.dart';
+
+// This file is a copy of pkg/front_end/test/utils/io_utils.dart
+
+String computeRepoDir() {
+ Uri uri;
+ if (Platform.script.hasAbsolutePath) {
+ uri = Platform.script;
+ } else if (Platform.packageConfig != null) {
+ String packageConfig = Platform.packageConfig!;
+ String prefix = 'file://';
+ if (packageConfig.startsWith(prefix)) {
+ uri = Uri.parse(packageConfig);
+ } else {
+ uri = Uri.base.resolve(nativeToUriPath(packageConfig));
+ }
+ } else {
+ throw "Can't obtain the path to the SDK either via "
+ 'Platform.script or Platform.packageConfig';
+ }
+ String path = File.fromUri(uri).parent.path;
+ ProcessResult result = Process.runSync(
+ 'git',
+ ['rev-parse', '--show-toplevel'],
+ runInShell: true,
+ workingDirectory: path,
+ );
+ if (result.exitCode != 0) {
+ throw 'Git returned non-zero error code (${result.exitCode}):\n\n'
+ 'stdout: ${result.stdout}\n\n'
+ 'stderr: ${result.stderr}';
+ }
+ String dirPath = (result.stdout as String).trim();
+ if (!Directory(dirPath).existsSync()) {
+ throw 'The path returned by git ($dirPath) does not actually exist.';
+ }
+ return dirPath;
+}
+
+Uri computeRepoDirUri() {
+ String dirPath = computeRepoDir();
+ return Directory(dirPath).uri;
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/lsp_benchmark.dart b/pkg/analysis_server/tool/benchmark_tools/lsp_benchmark.dart
new file mode 100644
index 0000000..88444c5
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/lsp_benchmark.dart
@@ -0,0 +1,329 @@
+// Copyright (c) 2025, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+import 'dart:async';
+import 'dart:convert';
+import 'dart:io';
+
+import 'io_utils.dart';
+import 'messages.dart';
+
+enum LaunchFrom { Source, Dart, Aot }
+
+abstract class LspBenchmark {
+ static const int verbosity = 0;
+ final Uri repoDir = computeRepoDirUri();
+ late final Process p;
+ late final Timer longRunningRequestsTimer;
+ bool _launched = false;
+ Completer<bool> _analyzingCompleter = Completer();
+ final _buffer = <int>[];
+ int? _headerContentLength;
+ bool _printedVmServiceStuff = false;
+
+ /// There's something weird about getting (several) id 3's that wasn't
+ /// requested...
+ int largestIdSeen = 3;
+
+ final RegExp _newLineRegExp = RegExp('\r?\n');
+ final Map<int, OutstandingRequest> _outstandingRequestsWithId = {};
+
+ Duration? firstAnalyzingDuration;
+
+ LspBenchmark() {
+ _checkCorrectDart();
+ }
+ List<Uri> get additionalWorkspaceUris => const [];
+ Uri? get cacheFolder => null;
+ LaunchFrom get launchFrom => LaunchFrom.Source;
+
+ Uri get rootUri;
+
+ Future<void> afterInitialization();
+
+ void exit() {
+ longRunningRequestsTimer.cancel();
+ p.kill();
+ }
+
+ Future<void> run() async {
+ await _launch();
+ p.stdout.listen(_listenToStdout);
+ p.stderr.listen(stderr.add);
+ longRunningRequestsTimer = Timer.periodic(
+ const Duration(seconds: 1),
+ _checkLongRunningRequests,
+ );
+
+ await _initialize(p);
+ print('Should now be initialized.');
+
+ await afterInitialization();
+ }
+
+ Future<OutstandingRequest?> send(Map<String, dynamic> json) async {
+ // Mostly copied from
+ // pkg/analysis_server/lib/src/lsp/channel/lsp_byte_stream_channel.dart
+ var jsonEncodedBody = jsonEncode(json);
+ var utf8EncodedBody = utf8.encode(jsonEncodedBody);
+ var header =
+ 'Content-Length: ${utf8EncodedBody.length}\r\n'
+ 'Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n';
+ var asciiEncodedHeader = ascii.encode(header);
+
+ OutstandingRequest? result;
+
+ dynamic possibleId = json['id'];
+ if (possibleId is int) {
+ if (possibleId > largestIdSeen) {
+ largestIdSeen = possibleId;
+ }
+ result = OutstandingRequest();
+ _outstandingRequestsWithId[possibleId] = result;
+ if (verbosity > 2) {
+ print('Sending message with id $possibleId');
+ }
+ }
+
+ // Header is always ascii, body is always utf8!
+ p.stdin.add(asciiEncodedHeader);
+ p.stdin.add(utf8EncodedBody);
+ await p.stdin.flush();
+ if (verbosity > 2) {
+ print('\n\nMessage sent\n\n');
+ }
+ return result;
+ }
+
+ Future<void> waitWhileAnalyzing() async {
+ // Wait until it's done analyzing.
+ bool isAnalyzing = await _analyzingCompleter.future;
+ Stopwatch stopwatch = Stopwatch()..start();
+ while (isAnalyzing) {
+ isAnalyzing = await _analyzingCompleter.future;
+ }
+ print('isAnalyzing is now done after ${stopwatch.elapsed}');
+ firstAnalyzingDuration ??= stopwatch.elapsed;
+ }
+
+ void _checkCorrectDart() {
+ Uri exe = Uri.base.resolveUri(Uri.file(Platform.resolvedExecutable));
+ Uri librariesDart = exe.resolve(
+ '../lib/_internal/sdk_library_metadata/lib/libraries.dart',
+ );
+ if (!File.fromUri(librariesDart).existsSync()) {
+ throw 'Execute with a dart that has '
+ "'../lib/_internal/sdk_library_metadata/lib/libraries.dart' "
+ 'available (e.g. out/ReleaseX64/dart-sdk/bin/dart)';
+ }
+ }
+
+ void _checkLongRunningRequests(timer) {
+ bool reportedSomething = false;
+ for (MapEntry<int, OutstandingRequest> waitingFor
+ in _outstandingRequestsWithId.entries) {
+ if (waitingFor.value.stopwatch.elapsed > const Duration(seconds: 1)) {
+ if (!reportedSomething) {
+ print('----');
+ reportedSomething = true;
+ }
+ print(
+ '==> Has been waiting for required #${waitingFor.key} for '
+ '${waitingFor.value.stopwatch.elapsed}',
+ );
+ }
+ }
+ if (reportedSomething) {
+ print('----');
+ } else {
+ // print(" -- not waiting for anything -- ");
+ }
+ }
+
+ /// Copied from pkg/analysis_server/lib/src/lsp/lsp_packet_transformer.dart.
+ bool _endsWithCrLfCrLf() {
+ var l = _buffer.length;
+ return l > 4 &&
+ _buffer[l - 1] == 10 &&
+ _buffer[l - 2] == 13 &&
+ _buffer[l - 3] == 10 &&
+ _buffer[l - 4] == 13;
+ }
+
+ Future<void> _initialize(Process p) async {
+ OutstandingRequest? request = await send(
+ Messages.initMessage(pid, rootUri, additionalWorkspaceUris),
+ );
+ await request?.completer.future;
+ await send(Messages.initNotification);
+
+ // Wait until it's done analyzing.
+ await waitWhileAnalyzing();
+ }
+
+ Future<void> _launch() async {
+ if (_launched) throw 'Already launched';
+ _launched = true;
+
+ List<String> cacheFolderArgs = [];
+ if (cacheFolder != null) {
+ cacheFolderArgs.add('--cache');
+ cacheFolderArgs.add(cacheFolder!.toFilePath());
+ }
+
+ switch (launchFrom) {
+ case LaunchFrom.Source:
+ File serverFile = File.fromUri(
+ repoDir.resolve('pkg/analysis_server/bin/server.dart'),
+ );
+ if (!serverFile.existsSync()) {
+ throw "Couldn't find 'analysis_server/bin/server.dart' "
+ 'expected it at $serverFile';
+ }
+
+ // TODO(jensj): Option of passing --profiler
+ p = await Process.start(Platform.resolvedExecutable, [
+ '--enable-vm-service',
+ '--profiler',
+ serverFile.path,
+ '--lsp',
+ '--port=9102',
+ ...cacheFolderArgs,
+ ]);
+ case LaunchFrom.Dart:
+ // TODO(jensj): Option of wrapping in `perf record -g` call.
+ p = await Process.start(Platform.resolvedExecutable, [
+ 'language-server',
+ '--lsp',
+ '--port=9102',
+ ...cacheFolderArgs,
+ ]);
+ case LaunchFrom.Aot:
+ File serverFile = File.fromUri(
+ repoDir.resolve('pkg/analysis_server/bin/server.aot'),
+ );
+ if (!serverFile.existsSync()) {
+ throw "Couldn't find 'analysis_server/bin/server.aot' "
+ 'expected it at $serverFile';
+ }
+
+ // TODO(jensj): Option of passing --profiler
+ Uri dart = Uri.base.resolveUri(Uri.file(Platform.resolvedExecutable));
+ File aotRuntime = File.fromUri(dart.resolve('dartaotruntime'));
+ if (!aotRuntime.existsSync()) {
+ throw "Couldn't find 'dartaotruntime' expected it at $aotRuntime";
+ }
+ p = await Process.start(aotRuntime.path, [
+ serverFile.path,
+ '--lsp',
+ '--port=9102',
+ ...cacheFolderArgs,
+ ]);
+ }
+
+ print('Launched with pid ${p.pid}');
+ }
+
+ void _listenToStdout(List<int> event) {
+ // General idea taken from
+ // pkg/analysis_server/lib/src/lsp/lsp_packet_transformer.dart
+ for (int element in event) {
+ _buffer.add(element);
+ if (verbosity > 3 &&
+ _buffer.length >= 1000 &&
+ _buffer.length % 1000 == 0) {
+ print(
+ 'DEBUG MESSAGE: Stdout buffer with length ${_buffer.length} so far: '
+ '${utf8.decode(_buffer)}',
+ );
+ }
+ if (_headerContentLength == null && _endsWithCrLfCrLf()) {
+ String headerRaw = utf8.decode(_buffer);
+ _buffer.clear();
+ // Use a regex that makes the '\r' optional to handle "The Dart VM service
+ // is listening on [..." message - at least on linux - being \n terminated
+ // which would otherwise mean that we'd be stuck because no message would
+ // start with 'Content-Length:'.
+ List<String> headers = headerRaw.split(_newLineRegExp);
+ for (String header in headers) {
+ if (!_printedVmServiceStuff &&
+ header.startsWith('The Dart VM service')) {
+ print('\n\n$header\n\n');
+ _printedVmServiceStuff = true;
+ }
+ if (header.startsWith('Content-Length:')) {
+ String contentLength =
+ header.substring('Content-Length:'.length).trim();
+ _headerContentLength = int.parse(contentLength);
+ break;
+ }
+ }
+ } else if (_headerContentLength != null &&
+ _buffer.length == _headerContentLength!) {
+ String messageString = utf8.decode(_buffer);
+ _buffer.clear();
+ _headerContentLength = null;
+ Map<String, dynamic> message =
+ json.decode(messageString) as Map<String, dynamic>;
+
+ // {"jsonrpc":"2.0","method":"$/analyzerStatus","params":{"isAnalyzing":false}}
+ dynamic method = message['method'];
+ if (method == r'$/analyzerStatus') {
+ dynamic params = message['params'];
+ if (params is Map) {
+ dynamic isAnalyzing = params['isAnalyzing'];
+ if (isAnalyzing is bool) {
+ _analyzingCompleter.complete(isAnalyzing);
+ _analyzingCompleter = Completer<bool>();
+ if (verbosity > 0) {
+ print('Got analyzerStatus isAnalyzing = $isAnalyzing');
+ }
+ }
+ }
+ }
+ dynamic possibleId = message['id'];
+ if (possibleId is int) {
+ if (possibleId > largestIdSeen) {
+ largestIdSeen = possibleId;
+ }
+
+ if (verbosity > 0) {
+ if (messageString.length > 100) {
+ print('Got message ${messageString.substring(0, 100)}...');
+ } else {
+ print('Got message $messageString');
+ }
+ }
+
+ OutstandingRequest? outstandingRequest = _outstandingRequestsWithId
+ .remove(possibleId);
+ if (outstandingRequest != null) {
+ outstandingRequest.stopwatch.stop();
+ outstandingRequest.completer.complete(message);
+ if (verbosity > 2) {
+ print(
+ ' => Got response for $possibleId in '
+ '${outstandingRequest.stopwatch.elapsed}',
+ );
+ }
+ }
+ } else if (verbosity > 1) {
+ if (messageString.length > 100) {
+ print('Got message ${messageString.substring(0, 100)}...');
+ } else {
+ print('Got message $messageString');
+ }
+ }
+ }
+ }
+ }
+}
+
+class OutstandingRequest {
+ final Stopwatch stopwatch = Stopwatch();
+ final Completer<Map<String, dynamic>> completer = Completer();
+ OutstandingRequest() {
+ stopwatch.start();
+ }
+}
diff --git a/pkg/analysis_server/tool/benchmark_tools/messages.dart b/pkg/analysis_server/tool/benchmark_tools/messages.dart
new file mode 100644
index 0000000..c769775
--- /dev/null
+++ b/pkg/analysis_server/tool/benchmark_tools/messages.dart
@@ -0,0 +1,181 @@
+// Copyright (c) 2025, the Dart project authors. Please see the AUTHORS file
+// for details. All rights reserved. Use of this source code is governed by a
+// BSD-style license that can be found in the LICENSE file.
+
+class Location {
+ final Uri uri;
+ final int line;
+ final int column;
+
+ Location(this.uri, this.line, this.column);
+
+ @override
+ String toString() => 'Location[$uri:$line:$column]';
+}
+
+class Messages {
+ static Map<String, dynamic> initNotification = {
+ 'jsonrpc': '2.0',
+ 'method': 'initialized',
+ 'params': {},
+ };
+
+ static Map<String, dynamic> completion(
+ Uri uri,
+ int id, {
+ required int line,
+ required int character,
+ }) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/completion',
+ 'params': {
+ 'textDocument': {'uri': '$uri'},
+ 'position': {'line': line, 'character': character},
+ 'context': {'triggerKind': 1},
+ },
+ 'clientRequestTime': DateTime.now().millisecondsSinceEpoch,
+ };
+ }
+
+ static Map<String, dynamic> didChange(
+ Uri uri, {
+ required int version,
+ required int insertAtLine,
+ int insertAtCharacter = 0,
+ required String insert,
+ }) {
+ return {
+ 'jsonrpc': '2.0',
+ 'method': 'textDocument/didChange',
+ 'params': {
+ 'textDocument': {'uri': '$uri', 'version': version},
+ 'contentChanges': [
+ {
+ 'range': {
+ 'start': {'line': insertAtLine, 'character': insertAtCharacter},
+ 'end': {'line': insertAtLine, 'character': insertAtCharacter},
+ },
+ 'rangeLength': 0,
+ 'text': insert,
+ },
+ ],
+ },
+ 'clientRequestTime': DateTime.now().millisecondsSinceEpoch,
+ };
+ }
+
+ static Map<String, dynamic> documentColor(Uri uri, int id) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/documentColor',
+ 'params': {
+ 'textDocument': {'uri': '$uri'},
+ },
+ 'clientRequestTime': DateTime.now().millisecondsSinceEpoch,
+ };
+ }
+
+ static Map<String, dynamic> documentSymbol(Uri uri, int id) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/documentSymbol',
+ 'params': {
+ 'textDocument': {'uri': '$uri'},
+ },
+ 'clientRequestTime': DateTime.now().millisecondsSinceEpoch,
+ };
+ }
+
+ static Map<String, dynamic> gotoDef(int id, Location location) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/definition',
+ 'params': {
+ 'textDocument': {'uri': '${location.uri}'},
+ 'position': {'line': location.line, 'character': location.column},
+ },
+ };
+ }
+
+ static Map<String, dynamic> implementation(int id, Location location) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/implementation',
+ 'params': {
+ 'textDocument': {'uri': '${location.uri}'},
+ 'position': {'line': location.line, 'character': location.column},
+ },
+ };
+ }
+
+ static Map<String, dynamic> initMessage(
+ int processId,
+ Uri rootUri,
+ List<Uri> additionalWorkspaceUris,
+ ) {
+ String rootPath = rootUri.toFilePath();
+ String name = rootUri.pathSegments.last;
+ if (name.isEmpty) {
+ name = rootUri.pathSegments[rootUri.pathSegments.length - 2];
+ }
+ return {
+ 'id': 0,
+ 'jsonrpc': '2.0',
+ 'method': 'initialize',
+ 'params': {
+ 'processId': processId,
+ 'clientInfo': {'name': 'lspTestScript', 'version': '0.0.1'},
+ 'locale': 'en',
+ 'rootPath': rootPath,
+ 'rootUri': '$rootUri',
+ 'capabilities': {},
+ 'initializationOptions': {},
+ 'workspaceFolders': [
+ {'uri': '$rootUri', 'name': name},
+ ...additionalWorkspaceUris.map((uri) {
+ String name = uri.pathSegments.last;
+ if (name.isEmpty) {
+ name = uri.pathSegments[uri.pathSegments.length - 2];
+ }
+ return {'uri': '$uri', 'name': name};
+ }),
+ ],
+ },
+ };
+ }
+
+ static Map<String, dynamic> open(Uri uri, int version, String content) {
+ return {
+ 'jsonrpc': '2.0',
+ 'method': 'textDocument/didOpen',
+ 'params': {
+ 'textDocument': {
+ 'uri': '$uri',
+ 'languageId': 'dart',
+ 'version': version,
+ 'text': content,
+ },
+ },
+ 'clientRequestTime': DateTime.now().millisecondsSinceEpoch,
+ };
+ }
+
+ static Map<String, dynamic> references(int id, Location location) {
+ return {
+ 'jsonrpc': '2.0',
+ 'id': id,
+ 'method': 'textDocument/references',
+ 'params': {
+ 'textDocument': {'uri': '${location.uri}'},
+ 'position': {'line': location.line, 'character': location.column},
+ 'context': {'includeDeclaration': true},
+ },
+ };
+ }
+}