Version 2.12.0-47.0.dev

Merge commit 'f5e26456bfa6feed59136fee66c93d3096710094' into 'dev'
diff --git a/.dart_tool/package_config.json b/.dart_tool/package_config.json
index 7e58d34..c75c95d3 100644
--- a/.dart_tool/package_config.json
+++ b/.dart_tool/package_config.json
@@ -689,12 +689,6 @@
       "languageVersion": "2.0"
     },
     {
-      "name": "tflite_native",
-      "rootUri": "../third_party/pkg/tflite_native",
-      "packageUri": "lib/",
-      "languageVersion": "2.6"
-    },
-    {
       "name": "typed_data",
       "rootUri": "../third_party/pkg/typed_data",
       "packageUri": "lib/",
diff --git a/.packages b/.packages
index f264745..d31bc9b 100644
--- a/.packages
+++ b/.packages
@@ -107,7 +107,6 @@
 test_reflective_loader:third_party/pkg/test_reflective_loader/lib
 test_runner:pkg/test_runner/lib
 testing:pkg/testing/lib
-tflite_native:third_party/pkg/tflite_native/lib
 typed_data:third_party/pkg/typed_data/lib
 usage:third_party/pkg/usage/lib
 vector_math:third_party/pkg/vector_math/lib
diff --git a/DEPS b/DEPS
index 4003414..f53c12e 100644
--- a/DEPS
+++ b/DEPS
@@ -157,7 +157,6 @@
   "term_glyph_rev": "6a0f9b6fb645ba75e7a00a4e20072678327a0347",
   "test_reflective_loader_tag": "0.1.9",
   "test_rev": "e37a93bbeae23b215972d1659ac865d71287ff6a",
-  "tflite_native_rev": "0.4.0+1",
   "typed_data_tag": "f94fc57b8e8c0e4fe4ff6cfd8290b94af52d3719",
   "usage_tag": "16fbfd90c58f16e016a295a880bc722d2547d2c9",
   "vector_math_rev": "0c9f5d68c047813a6dcdeb88ba7a42daddf25025",
@@ -431,8 +430,6 @@
       Var("dart_git") + "term_glyph.git" + "@" + Var("term_glyph_rev"),
   Var("dart_root") + "/third_party/pkg/test":
       Var("dart_git") + "test.git" + "@" + Var("test_rev"),
-  Var("dart_root") + "/third_party/pkg/tflite_native":
-      Var("dart_git") + "tflite_native.git" + "@" + Var("tflite_native_rev"),
   Var("dart_root") + "/third_party/pkg/test_descriptor":
       Var("dart_git") + "test_descriptor.git" + "@" + Var("test_descriptor_tag"),
   Var("dart_root") + "/third_party/pkg/test_process":
diff --git a/pkg/analysis_server/lib/src/analysis_server.dart b/pkg/analysis_server/lib/src/analysis_server.dart
index a6a5a4e..4599f0a 100644
--- a/pkg/analysis_server/lib/src/analysis_server.dart
+++ b/pkg/analysis_server/lib/src/analysis_server.dart
@@ -625,11 +625,6 @@
   /// Whether to use the Language Server Protocol.
   bool useLanguageServerProtocol = false;
 
-  /// Base path to locate trained completion language model files.
-  ///
-  /// ML completion is enabled if this is non-null.
-  String completionModelFolder;
-
   /// The set of enabled features.
   FeatureSet featureSet = FeatureSet();
 }
diff --git a/pkg/analysis_server/lib/src/server/driver.dart b/pkg/analysis_server/lib/src/server/driver.dart
index 642f895..734177d 100644
--- a/pkg/analysis_server/lib/src/server/driver.dart
+++ b/pkg/analysis_server/lib/src/server/driver.dart
@@ -3,7 +3,6 @@
 // BSD-style license that can be found in the LICENSE file.
 
 import 'dart:async';
-import 'dart:ffi' as ffi;
 import 'dart:io';
 import 'dart:isolate';
 import 'dart:math';
@@ -24,11 +23,9 @@
 import 'package:analysis_server/src/server/lsp_stdio_server.dart';
 import 'package:analysis_server/src/server/sdk_configuration.dart';
 import 'package:analysis_server/src/server/stdio_server.dart';
-import 'package:analysis_server/src/services/completion/dart/completion_ranking.dart';
 import 'package:analysis_server/src/socket_server.dart';
 import 'package:analysis_server/src/utilities/request_statistics.dart';
 import 'package:analysis_server/starter.dart';
-import 'package:analyzer/exception/exception.dart';
 import 'package:analyzer/file_system/physical_file_system.dart';
 import 'package:analyzer/instrumentation/file_instrumentation.dart';
 import 'package:analyzer/instrumentation/instrumentation.dart';
@@ -38,7 +35,6 @@
 import 'package:args/args.dart';
 import 'package:cli_util/cli_util.dart';
 import 'package:linter/src/rules.dart' as linter;
-import 'package:path/path.dart' as path;
 import 'package:telemetry/crash_reporting.dart';
 import 'package:telemetry/telemetry.dart' as telemetry;
 
@@ -265,13 +261,6 @@
   /// The name of the flag to use the Language Server Protocol (LSP).
   static const String USE_LSP = 'lsp';
 
-  /// Whether or not to enable ML ranking for code completion.
-  static const String ENABLE_COMPLETION_MODEL = 'enable-completion-model';
-
-  /// The path on disk to a directory containing language model files for smart
-  /// code completion.
-  static const String COMPLETION_MODEL_FOLDER = 'completion-model';
-
   /// A directory to analyze in order to train an analysis server snapshot.
   static const String TRAIN_USING = 'train-using';
 
@@ -318,35 +307,6 @@
     var sdkConfig = SdkConfiguration.readFromSdk();
     analysisServerOptions.configurationOverrides = sdkConfig;
 
-    // ML model configuration.
-    // TODO(brianwilkerson) Uncomment the line below and delete the second line
-    //  when there is a new completion model to query. Until then we ignore the
-    //  flag to enable the model so that we can't try to read from a file that
-    //  doesn't exist.
-//    final bool enableCompletionModel = results[ENABLE_COMPLETION_MODEL];
-    final enableCompletionModel = false;
-    analysisServerOptions.completionModelFolder =
-        results[COMPLETION_MODEL_FOLDER];
-    if (results.wasParsed(ENABLE_COMPLETION_MODEL) && !enableCompletionModel) {
-      // This is the case where the user has explicitly turned off model-based
-      // code completion.
-      analysisServerOptions.completionModelFolder = null;
-    }
-    // TODO(devoncarew): Simplify this logic and use the value from sdkConfig.
-    if (enableCompletionModel &&
-        analysisServerOptions.completionModelFolder == null) {
-      // The user has enabled ML code completion without explicitly setting a
-      // model for us to choose, so use the default one. We need to walk over
-      // from $SDK/bin/snapshots/analysis_server.dart.snapshot to
-      // $SDK/bin/model/lexeme.
-      analysisServerOptions.completionModelFolder = path.join(
-        File.fromUri(Platform.script).parent.path,
-        '..',
-        'model',
-        'lexeme',
-      );
-    }
-
     // Analytics
     bool disableAnalyticsForSession = results[SUPPRESS_ANALYTICS_FLAG];
     if (results.wasParsed(TRAIN_USING)) {
@@ -601,7 +561,6 @@
           socketServer.analysisServer.shutdown();
           if (sendPort == null) exit(0);
         });
-        startCompletionRanking(socketServer, null, analysisServerOptions);
       },
           print: results[INTERNAL_PRINT_TO_CONSOLE]
               ? null
@@ -609,36 +568,6 @@
     }
   }
 
-  /// This will be invoked after createAnalysisServer has been called on the
-  /// socket server. At that point, we'll be able to send a server.error
-  /// notification in case model startup fails.
-  void startCompletionRanking(
-      SocketServer socketServer,
-      LspSocketServer lspSocketServer,
-      AnalysisServerOptions analysisServerOptions) {
-    // If ML completion is not enabled, or we're on a 32-bit machine, don't try
-    // and start the completion model.
-    if (analysisServerOptions.completionModelFolder == null ||
-        ffi.sizeOf<ffi.IntPtr>() == 4) {
-      return;
-    }
-
-    // Start completion model isolate if this is a 64 bit system and analysis
-    // server was configured to load a language model on disk.
-    CompletionRanking.instance =
-        CompletionRanking(analysisServerOptions.completionModelFolder);
-    CompletionRanking.instance.start().catchError((exception, stackTrace) {
-      // Disable smart ranking if model startup fails.
-      analysisServerOptions.completionModelFolder = null;
-      // TODO(brianwilkerson) Shutdown the isolates that have already been
-      //  started.
-      CompletionRanking.instance = null;
-      AnalysisEngine.instance.instrumentationService.logException(
-          CaughtException.withMessage(
-              'Failed to start ranking model isolate', exception, stackTrace));
-    });
-  }
-
   void startLspServer(
     ArgResults args,
     AnalysisServerOptions analysisServerOptions,
@@ -681,7 +610,6 @@
           exit(0);
         }
       });
-      startCompletionRanking(null, socketServer, analysisServerOptions);
     });
   }
 
@@ -772,18 +700,16 @@
         help: 'Pass in a directory to analyze for purposes of training an '
             'analysis server snapshot.');
 
-    parser.addFlag(ENABLE_COMPLETION_MODEL,
-        help: 'Whether or not to turn on ML ranking for code completion.');
-    parser.addOption(COMPLETION_MODEL_FOLDER,
-        valueHelp: 'path',
-        help: 'Path to the location of a code completion model.');
-
     //
     // Deprecated options - no longer read from.
     //
 
+    // Removed 11/15/2020.
+    parser.addOption('completion-model', hide: true);
     // Removed 11/8/2020.
     parser.addFlag('dartpad', hide: true);
+    // Removed 11/15/2020.
+    parser.addFlag('enable-completion-model', hide: true);
     // Removed 10/30/2020.
     parser.addMultiOption('enable-experiment', hide: true);
     // Removed 9/23/2020.
diff --git a/pkg/analysis_server/lib/src/server/sdk_configuration.dart b/pkg/analysis_server/lib/src/server/sdk_configuration.dart
index d952a21..08105f8 100644
--- a/pkg/analysis_server/lib/src/server/sdk_configuration.dart
+++ b/pkg/analysis_server/lib/src/server/sdk_configuration.dart
@@ -61,9 +61,6 @@
   /// Returns whether this SDK configuration has any configured values.
   bool get hasAnyOverrides => _values.isNotEmpty;
 
-  /// Return an override value for the analysis server's ML model file path.
-  String get mlModelPath => _values['server.ml.model.path'];
-
   @override
   String toString() => displayString;
 
diff --git a/pkg/analysis_server/lib/src/services/completion/dart/completion_manager.dart b/pkg/analysis_server/lib/src/services/completion/dart/completion_manager.dart
index 67150b8..0b05d08 100644
--- a/pkg/analysis_server/lib/src/services/completion/dart/completion_manager.dart
+++ b/pkg/analysis_server/lib/src/services/completion/dart/completion_manager.dart
@@ -10,7 +10,6 @@
 import 'package:analysis_server/src/services/completion/completion_performance.dart';
 import 'package:analysis_server/src/services/completion/dart/arglist_contributor.dart';
 import 'package:analysis_server/src/services/completion/dart/combinator_contributor.dart';
-import 'package:analysis_server/src/services/completion/dart/completion_ranking.dart';
 import 'package:analysis_server/src/services/completion/dart/extension_member_contributor.dart';
 import 'package:analysis_server/src/services/completion/dart/feature_computer.dart';
 import 'package:analysis_server/src/services/completion/dart/field_formal_contributor.dart';
@@ -36,7 +35,6 @@
 import 'package:analyzer/dart/ast/token.dart';
 import 'package:analyzer/dart/element/element.dart';
 import 'package:analyzer/dart/element/type.dart';
-import 'package:analyzer/exception/exception.dart';
 import 'package:analyzer/file_system/file_system.dart';
 import 'package:analyzer/src/dart/analysis/driver_based_analysis_context.dart';
 import 'package:analyzer/src/dart/ast/ast.dart';
@@ -116,10 +114,6 @@
 
     request.checkAborted();
 
-    final ranking = CompletionRanking.instance;
-    var probabilityFuture =
-        ranking != null ? ranking.predict(dartRequest) : Future.value(null);
-
     var range = dartRequest.target.computeReplacementRange(dartRequest.offset);
     (request as CompletionRequestImpl)
       ..replacementOffset = range.offset
@@ -169,35 +163,7 @@
       throw AbortCompletion();
     }
 
-    // Adjust suggestion relevance before returning
-    var suggestions = builder.suggestions.toList();
-    const SORT_TAG = 'DartCompletionManager - sort';
-    await performance.runAsync(SORT_TAG, (_) async {
-      if (ranking != null) {
-        request.checkAborted();
-        try {
-          suggestions = await ranking.rerank(
-              probabilityFuture,
-              suggestions,
-              includedElementNames,
-              includedSuggestionRelevanceTags,
-              dartRequest,
-              request.result.unit.featureSet);
-        } catch (exception, stackTrace) {
-          // TODO(brianwilkerson) Shutdown the isolates that have already been
-          //  started.
-          // Disable smart ranking if prediction fails.
-          CompletionRanking.instance = null;
-          AnalysisEngine.instance.instrumentationService.logException(
-              CaughtException.withMessage(
-                  'Failed to rerank completion suggestions',
-                  exception,
-                  stackTrace));
-        }
-      }
-    });
-    request.checkAborted();
-    return suggestions;
+    return builder.suggestions.toList();
   }
 
   void _addIncludedElementKinds(DartCompletionRequestImpl request) {
diff --git a/pkg/analysis_server/lib/src/services/completion/dart/completion_ranking.dart b/pkg/analysis_server/lib/src/services/completion/dart/completion_ranking.dart
deleted file mode 100644
index 021aaf8..0000000
--- a/pkg/analysis_server/lib/src/services/completion/dart/completion_ranking.dart
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-import 'dart:collection';
-import 'dart:isolate';
-
-import 'package:analysis_server/src/protocol_server.dart';
-import 'package:analysis_server/src/provisional/completion/dart/completion_dart.dart';
-import 'package:analysis_server/src/services/completion/completion_performance.dart';
-import 'package:analysis_server/src/services/completion/dart/completion_ranking_internal.dart';
-import 'package:analysis_server/src/services/completion/dart/language_model.dart';
-import 'package:analyzer/dart/analysis/features.dart';
-
-/// Number of code completion isolates.
-// TODO(devoncarew): We need to explore the memory costs of running multiple ML
-// isolates.
-const int _ISOLATE_COUNT = 2;
-
-/// Number of lookback tokens.
-const int _LOOKBACK = 100;
-
-/// Minimum probability to prioritize model-only suggestion.
-const double _MODEL_RELEVANCE_CUTOFF = 0.5;
-
-/// Prediction service run by the model isolate.
-void entrypoint(SendPort sendPort) {
-  LanguageModel model;
-  final port = ReceivePort();
-  sendPort.send(port.sendPort);
-  port.listen((message) {
-    var response = <String, Map<String, double>>{};
-    switch (message['method']) {
-      case 'load':
-        model = LanguageModel.load(message['args'][0]);
-        break;
-      case 'predict':
-        response['data'] = model.predictWithScores(message['args']);
-        break;
-    }
-
-    message['port'].send(response);
-  });
-}
-
-class CompletionRanking {
-  /// Singleton instance.
-  static CompletionRanking instance;
-
-  /// Filesystem location of model files.
-  final String _directory;
-
-  /// Ports to communicate from main to model isolates.
-  List<SendPort> _writes;
-
-  /// Pointer for round robin load balancing over isolates.
-  int _index;
-
-  /// General performance metrics around ML completion.
-  final PerformanceMetrics performanceMetrics = PerformanceMetrics._();
-
-  CompletionRanking(this._directory);
-
-  /// Send an RPC to the isolate worker requesting that it load the model and
-  /// wait for it to respond.
-  Future<Map<String, Map<String, double>>> makeLoadRequest(
-      SendPort sendPort, List<String> args) async {
-    final receivePort = ReceivePort();
-    sendPort.send({
-      'method': 'load',
-      'args': args,
-      'port': receivePort.sendPort,
-    });
-    return await receivePort.first;
-  }
-
-  /// Send an RPC to the isolate worker requesting that it make a prediction and
-  /// wait for it to respond.
-  Future<Map<String, Map<String, double>>> makePredictRequest(
-      List<String> args) async {
-    final receivePort = ReceivePort();
-    _writes[_index].send({
-      'method': 'predict',
-      'args': args,
-      'port': receivePort.sendPort,
-    });
-    _index = (_index + 1) % _writes.length;
-    return await receivePort.first;
-  }
-
-  /// Return a next-token prediction starting at the completion request cursor
-  /// and walking back to find previous input tokens, or `null` if the
-  /// prediction isolates are not running.
-  Future<Map<String, double>> predict(DartCompletionRequest request) async {
-    if (_writes == null || _writes.isEmpty) {
-      // The field `_writes` is initialized in `start`, but the code that
-      // invokes `start` doesn't wait for it complete. That means that it's
-      // possible for this method to be invoked before `_writes` is initialized.
-      // In those cases we return `null`
-      return null;
-    }
-    final query = constructQuery(request, _LOOKBACK);
-    if (query == null) {
-      return Future.value();
-    }
-
-    performanceMetrics._incrementPredictionRequestCount();
-
-    var timer = Stopwatch()..start();
-    var response = await makePredictRequest(query);
-    timer.stop();
-
-    var result = response['data'];
-
-    performanceMetrics._addPredictionResult(PredictionResult(
-      result,
-      timer.elapsed,
-      request.source.fullName,
-      computeCompletionSnippet(request.sourceContents, request.offset),
-    ));
-
-    return result;
-  }
-
-  /// Transforms [CompletionSuggestion] relevances and
-  /// [IncludedSuggestionRelevanceTag] relevanceBoosts based on language model
-  /// predicted next-token probability distribution.
-  Future<List<CompletionSuggestion>> rerank(
-      Future<Map<String, double>> probabilityFuture,
-      List<CompletionSuggestion> suggestions,
-      Set<String> includedElementNames,
-      List<IncludedSuggestionRelevanceTag> includedSuggestionRelevanceTags,
-      DartCompletionRequest request,
-      FeatureSet featureSet) async {
-    assert((includedElementNames != null &&
-            includedSuggestionRelevanceTags != null) ||
-        (includedElementNames == null &&
-            includedSuggestionRelevanceTags == null));
-    final probability = await probabilityFuture
-        .timeout(const Duration(seconds: 1), onTimeout: () => null);
-    if (probability == null || probability.isEmpty) {
-      // Failed to compute probability distribution, don't rerank.
-      return suggestions;
-    }
-
-    // Discard the type-based relevance boosts.
-    if (includedSuggestionRelevanceTags != null) {
-      includedSuggestionRelevanceTags.forEach((tag) {
-        tag.relevanceBoost = 0;
-      });
-    }
-
-    // Intersection between static analysis and model suggestions.
-    var middle = DART_RELEVANCE_HIGH + probability.length;
-    // Up to one suggestion from model with very high confidence.
-    var high = middle + probability.length;
-    // Lower relevance, model-only suggestions (perhaps literals).
-    var low = DART_RELEVANCE_LOW - 1;
-
-    List<MapEntry> entries = probability.entries.toList()
-      ..sort((a, b) => b.value.compareTo(a.value));
-
-    if (testInsideQuotes(request)) {
-      // If completion is requested inside of quotes, remove any suggestions
-      // which are not string literal.
-      entries = selectStringLiterals(entries);
-    } else if (request.opType.includeVarNameSuggestions &&
-        suggestions.every((CompletionSuggestion suggestion) =>
-            suggestion.kind == CompletionSuggestionKind.IDENTIFIER)) {
-      // If analysis server thinks this is a declaration context,
-      // remove all of the model-suggested literals.
-      // TODO(lambdabaa): Ask Brian for help leveraging
-      //     SimpleIdentifier#inDeclarationContext.
-      entries.retainWhere((MapEntry entry) => !isLiteral(entry.key));
-    }
-
-    var allowModelOnlySuggestions =
-        !testNamedArgument(suggestions) && !testFollowingDot(request);
-    for (var entry in entries) {
-      // There may be multiple like
-      // CompletionSuggestion and CompletionSuggestion().
-      final completionSuggestions = suggestions.where((suggestion) =>
-          areCompletionsEquivalent(suggestion.completion, entry.key));
-      List<IncludedSuggestionRelevanceTag> includedSuggestions;
-      final isIncludedElementName = includedElementNames != null &&
-          includedElementNames.contains(entry.key);
-      if (includedSuggestionRelevanceTags != null) {
-        includedSuggestions = includedSuggestionRelevanceTags
-            .where((tag) => areCompletionsEquivalent(
-                elementNameFromRelevanceTag(tag.tag), entry.key))
-            .toList();
-      } else {
-        includedSuggestions = [];
-      }
-      if (allowModelOnlySuggestions && entry.value > _MODEL_RELEVANCE_CUTOFF) {
-        final relevance = high--;
-        if (completionSuggestions.isNotEmpty ||
-            includedSuggestions.isNotEmpty) {
-          completionSuggestions.forEach((completionSuggestion) {
-            completionSuggestion.relevance = relevance;
-          });
-          includedSuggestions.forEach((includedSuggestion) {
-            includedSuggestion.relevanceBoost = relevance;
-          });
-        } else if (isIncludedElementName) {
-          if (includedSuggestionRelevanceTags != null) {
-            includedSuggestionRelevanceTags
-                .add(IncludedSuggestionRelevanceTag(entry.key, relevance));
-          }
-        } else {
-          suggestions
-              .add(createCompletionSuggestion(entry.key, featureSet, high--));
-        }
-      } else if (completionSuggestions.isNotEmpty ||
-          includedSuggestions.isNotEmpty ||
-          isIncludedElementName) {
-        final relevance = middle--;
-        completionSuggestions.forEach((completionSuggestion) {
-          completionSuggestion.relevance = relevance;
-        });
-        if (includedSuggestions.isNotEmpty) {
-          includedSuggestions.forEach((includedSuggestion) {
-            includedSuggestion.relevanceBoost = relevance;
-          });
-        } else if (includedSuggestionRelevanceTags != null) {
-          includedSuggestionRelevanceTags
-              .add(IncludedSuggestionRelevanceTag(entry.key, relevance));
-        }
-      } else if (allowModelOnlySuggestions) {
-        final relevance = low--;
-        suggestions
-            .add(createCompletionSuggestion(entry.key, featureSet, relevance));
-        if (includedSuggestionRelevanceTags != null) {
-          includedSuggestionRelevanceTags
-              .add(IncludedSuggestionRelevanceTag(entry.key, relevance));
-        }
-      }
-    }
-    return suggestions;
-  }
-
-  /// Spin up the model isolates and load the tflite model.
-  Future<void> start() async {
-    _writes = [];
-    _index = 0;
-    final initializations = <Future<void>>[];
-
-    // Start the first isolate.
-    await _startIsolate();
-
-    // Start the 2nd and later isolates.
-    for (var i = 1; i < _ISOLATE_COUNT; i++) {
-      initializations.add(_startIsolate());
-    }
-
-    return Future.wait(initializations);
-  }
-
-  Future<void> _startIsolate() async {
-    var timer = Stopwatch()..start();
-    var port = ReceivePort();
-    await Isolate.spawn(entrypoint, port.sendPort);
-    SendPort sendPort = await port.first;
-    return makeLoadRequest(sendPort, [_directory]).whenComplete(() {
-      timer.stop();
-      performanceMetrics._isolateInitTimes.add(timer.elapsed);
-      _writes.add(sendPort);
-    });
-  }
-}
-
-class PerformanceMetrics {
-  static const int _maxResultBuffer = 50;
-
-  final Queue<PredictionResult> _predictionResults = Queue();
-  int _predictionRequestCount = 0;
-  final List<Duration> _isolateInitTimes = [];
-
-  PerformanceMetrics._();
-
-  List<Duration> get isolateInitTimes => _isolateInitTimes;
-
-  /// The total prediction requests to ML Complete.
-  int get predictionRequestCount => _predictionRequestCount;
-
-  /// An iterable of the last `n` prediction results;
-  Iterable<PredictionResult> get predictionResults => _predictionResults;
-
-  void _addPredictionResult(PredictionResult request) {
-    _predictionResults.addFirst(request);
-    if (_predictionResults.length > _maxResultBuffer) {
-      _predictionResults.removeLast();
-    }
-  }
-
-  void _incrementPredictionRequestCount() {
-    _predictionRequestCount++;
-  }
-}
-
-class PredictionResult {
-  final Map<String, double> results;
-  final Duration elapsedTime;
-  final String sourcePath;
-  final String snippet;
-
-  PredictionResult(
-      this.results, this.elapsedTime, this.sourcePath, this.snippet);
-}
diff --git a/pkg/analysis_server/lib/src/services/completion/dart/language_model.dart b/pkg/analysis_server/lib/src/services/completion/dart/language_model.dart
deleted file mode 100644
index 8a2e044..0000000
--- a/pkg/analysis_server/lib/src/services/completion/dart/language_model.dart
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-import 'dart:convert';
-import 'dart:io';
-import 'dart:typed_data';
-
-import 'package:path/path.dart' as path;
-import 'package:tflite_native/tflite.dart' as tfl;
-
-/// Interface to TensorFlow-based Dart language model for next-token prediction.
-class LanguageModel {
-  static const _probabilityThreshold = 0.0001;
-  static final _numeric = RegExp(r'^\d+(\.\d+)?$');
-  static final _alphanumeric = RegExp(r"^['\w]+$");
-  static final _doubleQuote = '"'.codeUnitAt(0);
-
-  final tfl.Interpreter _interpreter;
-  final Map<String, int> _word2idx;
-  final Map<int, String> _idx2word;
-  final int _lookback;
-
-  /// Load model from directory.
-  factory LanguageModel.load(String directory) {
-    // Load model.
-    final interpreter =
-        tfl.Interpreter.fromFile(path.join(directory, 'model.tflite'));
-    interpreter.allocateTensors();
-
-    // Load word2idx mapping for input.
-    final word2idx = json
-        .decode(File(path.join(directory, 'word2idx.json')).readAsStringSync())
-        .cast<String, int>();
-
-    // Load idx2word mapping for output.
-    final idx2word = json
-        .decode(File(path.join(directory, 'idx2word.json')).readAsStringSync())
-        .map<int, String>((k, v) => MapEntry<int, String>(int.parse(k), v));
-
-    // Get lookback size from model input tensor shape.
-    final tensorShape = interpreter.getInputTensors().single.shape;
-    if (tensorShape.length != 2 || tensorShape.first != 1) {
-      throw ArgumentError(
-          'tensor shape $tensorShape does not match the expected [1, X]');
-    }
-    final lookback = tensorShape.last;
-
-    return LanguageModel._(interpreter, word2idx, idx2word, lookback);
-  }
-
-  LanguageModel._(
-      this._interpreter, this._word2idx, this._idx2word, this._lookback);
-
-  /// Number of previous tokens to look at during predictions.
-  int get lookback => _lookback;
-
-  /// Tear down the interpreter.
-  void close() {
-    _interpreter.delete();
-  }
-
-  bool isNumber(String token) {
-    return _numeric.hasMatch(token) || token.startsWith('0x');
-  }
-
-  /// Predicts the next token to follow a list of precedent tokens
-  ///
-  /// Returns a list of tokens, sorted by most probable first.
-  List<String> predict(List<String> tokens) =>
-      predictWithScores(tokens).keys.toList();
-
-  /// Predicts the next token with confidence scores.
-  ///
-  /// Returns an ordered map of tokens to scores, sorted by most probable first.
-  Map<String, double> predictWithScores(List<String> tokens) {
-    final tensorIn = _interpreter.getInputTensors().single;
-    tensorIn.data = _transformInput(tokens);
-    _interpreter.invoke();
-    final tensorOut = _interpreter.getOutputTensors().single;
-    return _transformOutput(tensorOut.data, tokens);
-  }
-
-  bool _isAlphanumeric(String token) {
-    // Note that _numeric covers integral and decimal values whereas
-    // _alphanumeric only matches integral values. Check both.
-    return _alphanumeric.hasMatch(token) || _numeric.hasMatch(token);
-  }
-
-  bool _isString(String token) {
-    return token.contains('"') || token.contains("'");
-  }
-
-  /// Transforms tokens to data bytes that can be used as interpreter input.
-  List<int> _transformInput(List<String> tokens) {
-    // Replace out of vocabulary tokens.
-    final sanitizedTokens = tokens.map((token) {
-      if (_word2idx.containsKey(token)) {
-        return token;
-      }
-      if (isNumber(token)) {
-        return '<num>';
-      }
-      if (_isString(token)) {
-        return '<str>';
-      }
-      return '<unk>';
-    });
-    // Get indexes (as floats).
-    final indexes = Float32List(lookback)
-      ..setAll(0, sanitizedTokens.map((token) => _word2idx[token].toDouble()));
-    // Get bytes
-    return Uint8List.view(indexes.buffer);
-  }
-
-  /// Transforms interpreter output data to map of tokens to scores.
-  Map<String, double> _transformOutput(
-      List<int> databytes, List<String> tokens) {
-    // Get bytes.
-    final bytes = Uint8List.fromList(databytes);
-
-    // Get scores (as floats)
-    final probabilities = Float32List.view(bytes.buffer);
-
-    final scores = <String, double>{};
-    final scoresAboveThreshold = <String, double>{};
-    probabilities.asMap().forEach((k, v) {
-      // x in 0, 1, ..., |V| - 1 correspond to specific members of the vocabulary.
-      // x in |V|, |V| + 1, ..., |V| + 49 are pointers to reference positions along the
-      // network input.
-      if (k >= _idx2word.length + tokens.length) {
-        return;
-      }
-      // Find the name corresponding to this position along the network output.
-      final lexeme =
-          k < _idx2word.length ? _idx2word[k] : tokens[k - _idx2word.length];
-      // Normalize double to single quotes.
-      final sanitized = lexeme.codeUnitAt(0) != _doubleQuote
-          ? lexeme
-          : lexeme.replaceAll('"', '\'');
-      final score = (scores[sanitized] ?? 0.0) + v;
-      scores[sanitized] = score;
-      if (score < _probabilityThreshold ||
-          k >= _idx2word.length && !_isAlphanumeric(sanitized)) {
-        // Discard names below a fixed likelihood, and
-        // don't assign probability to punctuation by reference.
-        return;
-      }
-      scoresAboveThreshold[sanitized] = score;
-    });
-
-    return Map.fromEntries(scoresAboveThreshold.entries.toList()
-      ..sort((a, b) => b.value.compareTo(a.value)));
-  }
-}
diff --git a/pkg/analysis_server/lib/src/status/diagnostics.dart b/pkg/analysis_server/lib/src/status/diagnostics.dart
index bd39067..8f04747 100644
--- a/pkg/analysis_server/lib/src/status/diagnostics.dart
+++ b/pkg/analysis_server/lib/src/status/diagnostics.dart
@@ -16,7 +16,6 @@
 import 'package:analysis_server/src/plugin/plugin_manager.dart';
 import 'package:analysis_server/src/server/http_server.dart';
 import 'package:analysis_server/src/services/completion/completion_performance.dart';
-import 'package:analysis_server/src/services/completion/dart/completion_ranking.dart';
 import 'package:analysis_server/src/socket_server.dart';
 import 'package:analysis_server/src/status/ast_writer.dart';
 import 'package:analysis_server/src/status/element_writer.dart';
@@ -759,7 +758,6 @@
     // Add server-specific pages. Ordering doesn't matter as the items are
     // sorted later.
     var server = socketServer.analysisServer;
-    pages.add(MLCompletionPage(this, server));
     pages.add(PluginsPage(this, server));
 
     if (server is AnalysisServer) {
@@ -1074,100 +1072,6 @@
   }
 }
 
-class MLCompletionPage extends DiagnosticPageWithNav {
-  @override
-  final AbstractAnalysisServer server;
-
-  MLCompletionPage(DiagnosticsSite site, this.server)
-      : super(site, 'ml-completion', 'ML Completion',
-            description: 'Statistics for ML code completion.');
-
-  path.Context get pathContext => server.resourceProvider.pathContext;
-
-  @override
-  Future<void> generateContent(Map<String, String> params) async {
-    var hasMLComplete = CompletionRanking.instance != null;
-    if (!hasMLComplete) {
-      blankslate('''ML code completion is not enabled (see <a
-href="https://github.com/dart-lang/sdk/wiki/Previewing-Dart-code-completions-powered-by-machine-learning"
->previewing Dart ML completion</a> for how to enable it).''');
-      return;
-    }
-
-    buf.writeln('ML completion enabled.<br>');
-
-    var isolateTimes = CompletionRanking
-        .instance.performanceMetrics.isolateInitTimes
-        .map((Duration time) {
-      return '${time.inMilliseconds}ms';
-    }).join(', ');
-    p('ML isolate init times: $isolateTimes');
-
-    var predictions = CompletionRanking
-        .instance.performanceMetrics.predictionResults
-        .toList();
-
-    if (predictions.isEmpty) {
-      blankslate('No completions recorded.');
-      return;
-    }
-
-    p('${CompletionRanking.instance.performanceMetrics.predictionRequestCount} '
-        'requests');
-
-    // draw a chart
-    buf.writeln(
-        '<div id="chart-div" style="width: 700px; height: 300px;"></div>');
-    var rowData = StringBuffer();
-    for (var prediction in predictions.reversed) {
-      // [' ', 101.5]
-      if (rowData.isNotEmpty) {
-        rowData.write(',');
-      }
-      rowData.write("[' ', ${prediction.elapsedTime.inMilliseconds}]");
-    }
-    buf.writeln('''
-      <script type="text/javascript">
-      google.charts.load('current', {'packages':['bar']});
-      google.charts.setOnLoadCallback(drawChart);
-      function drawChart() {
-        var data = google.visualization.arrayToDataTable([
-          ['Completions', 'Time'],
-          $rowData
-        ]);
-        var options = { bars: 'vertical', vAxis: {format: 'decimal'}, height: 300 };
-        var chart = new google.charts.Bar(document.getElementById('chart-div'));
-        chart.draw(data, google.charts.Bar.convertOptions(options));
-      }
-      </script>
-''');
-
-    String summarize(PredictionResult prediction) {
-      var entries = prediction.results.entries.toList();
-      entries.sort((a, b) => b.value.compareTo(a.value));
-      var summary = entries
-          .take(3)
-          .map((entry) => '"${entry.key}":${entry.value.toStringAsFixed(3)}')
-          .join('<br>');
-      return summary;
-    }
-
-    // emit the data as a table
-    buf.writeln('<table>');
-    buf.writeln(
-        '<tr><th>Time</th><th>Results</th><th>Snippet</th><th>Top suggestions</th></tr>');
-    for (var prediction in predictions) {
-      buf.writeln('<tr>'
-          '<td class="pre right">${printMilliseconds(prediction.elapsedTime.inMilliseconds)}</td>'
-          '<td class="right">${prediction.results.length}</td>'
-          '<td><code>${escape(prediction.snippet)}</code></td>'
-          '<td class="right">${summarize(prediction)}</td>'
-          '</tr>');
-    }
-    buf.writeln('</table>');
-  }
-}
-
 class NotFoundPage extends DiagnosticPage {
   @override
   final String path;
diff --git a/pkg/analysis_server/pubspec.yaml b/pkg/analysis_server/pubspec.yaml
index 2b64203..9a94f7d 100644
--- a/pkg/analysis_server/pubspec.yaml
+++ b/pkg/analysis_server/pubspec.yaml
@@ -25,7 +25,6 @@
     path: ../telemetry
   test: any
   path: any
-  tflite_native: any
   watcher: any
   yaml: any
 
diff --git a/pkg/analysis_server/test/services/completion/dart/completion_ranking_test.dart b/pkg/analysis_server/test/services/completion/dart/completion_ranking_test.dart
deleted file mode 100644
index 841ad47..0000000
--- a/pkg/analysis_server/test/services/completion/dart/completion_ranking_test.dart
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-import 'dart:io';
-
-import 'package:analysis_server/src/services/completion/dart/completion_ranking.dart';
-import 'package:path/path.dart' as path;
-import 'package:test/test.dart';
-
-void main() {
-  CompletionRanking ranking;
-
-  setUp(() async {
-    ranking = CompletionRanking(directory);
-    await ranking.start();
-  });
-
-  test('make request to isolate', () async {
-    final tokens =
-        tokenize('if (list == null) { return; } for (final i = 0; i < list.');
-    final response = await ranking.makePredictRequest(tokens);
-    expect(response['data']['length'], greaterThan(0.9));
-  }, skip: 'https://github.com/dart-lang/sdk/issues/42988');
-}
-
-final directory = path.join(File.fromUri(Platform.script).parent.path, '..',
-    '..', '..', '..', 'language_model', 'lexeme');
-
-/// Tokenizes the input string.
-///
-/// The input is split by word boundaries and trimmed of whitespace.
-List<String> tokenize(String input) =>
-    input.split(RegExp(r'\b|\s')).map((t) => t.trim()).toList()
-      ..removeWhere((t) => t.isEmpty);
diff --git a/pkg/analysis_server/test/services/completion/dart/language_model_test.dart b/pkg/analysis_server/test/services/completion/dart/language_model_test.dart
deleted file mode 100644
index 7a2434e..0000000
--- a/pkg/analysis_server/test/services/completion/dart/language_model_test.dart
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
-// for details. All rights reserved. Use of this source code is governed by a
-// BSD-style license that can be found in the LICENSE file.
-
-import 'dart:ffi';
-import 'dart:io';
-
-import 'package:analysis_server/src/services/completion/dart/language_model.dart';
-import 'package:path/path.dart' as path;
-import 'package:test/test.dart';
-
-void main() {
-  if (sizeOf<IntPtr>() == 4) {
-    // We don't yet support running tflite on 32-bit systems.
-    return;
-  }
-
-  group('LanguageModel', () {
-    LanguageModel model;
-
-    setUp(() {
-      model = LanguageModel.load(directory);
-    });
-
-    tearDown(() {
-      model.close();
-    });
-
-    test('calculates lookback', () {
-      expect(model.lookback, expectedLookback);
-    });
-
-    test('predict with defaults', () {
-      final tokens =
-          tokenize('if (list == null) { return; } for (final i = 0; i < list.');
-      final suggestions = model.predict(tokens);
-      expect(suggestions.first, 'length');
-    });
-
-    test('predict with confidence scores', () {
-      final tokens =
-          tokenize('if (list == null) { return; } for (final i = 0; i < list.');
-      final suggestions = model.predictWithScores(tokens);
-      final best = suggestions.entries.first;
-      expect(best.key, 'length');
-      expect(best.value, greaterThan(0.9));
-    });
-
-    test('predict when no previous tokens', () {
-      final tokens = <String>[];
-      final suggestions = model.predict(tokens);
-      expect(suggestions.first, isNotEmpty);
-    });
-
-    test('load fail', () {
-      try {
-        LanguageModel.load('doesnotexist');
-        fail('Failure to load language model should throw an exception');
-      } catch (e) {
-        expect(e.toString(),
-            equals('Invalid argument(s): Unable to create model.'));
-      }
-    });
-
-    test('isNumber', () {
-      expect(model.isNumber('0xCAb005E'), true);
-      expect(model.isNumber('foo'), false);
-      expect(model.isNumber('3.1415'), true);
-      expect(model.isNumber('1337'), true);
-      expect(model.isNumber('"four score and seven years ago"'), false);
-      expect(model.isNumber('0.0'), true);
-    });
-  }, skip: 'https://github.com/dart-lang/sdk/issues/42988');
-}
-
-const expectedLookback = 100;
-
-final directory = path.join(File.fromUri(Platform.script).parent.path, '..',
-    '..', '..', '..', 'language_model', 'lexeme');
-
-/// Tokenizes the input string.
-///
-/// The input is split by word boundaries and trimmed of whitespace.
-List<String> tokenize(String input) =>
-    input.split(RegExp(r'\b|\s')).map((t) => t.trim()).toList()
-      ..removeWhere((t) => t.isEmpty);
diff --git a/pkg/analysis_server/test/services/completion/dart/test_all.dart b/pkg/analysis_server/test/services/completion/dart/test_all.dart
index 3509cde..bf9d30b 100644
--- a/pkg/analysis_server/test/services/completion/dart/test_all.dart
+++ b/pkg/analysis_server/test/services/completion/dart/test_all.dart
@@ -9,15 +9,11 @@
 import 'completion_manager_test.dart' as completion_manager;
 import 'completion_ranking_internal_test.dart'
     as completion_ranking_internal_test;
-// ignore: unused_import
-import 'completion_ranking_test.dart' as completion_ranking_test;
 import 'extension_member_contributor_test.dart' as extension_member_contributor;
 import 'field_formal_contributor_test.dart' as field_formal_contributor_test;
 import 'imported_reference_contributor_test.dart' as imported_ref_test;
 import 'keyword_contributor_test.dart' as keyword_test;
 import 'label_contributor_test.dart' as label_contributor_test;
-// ignore: unused_import
-import 'language_model_test.dart' as language_model_test;
 import 'library_member_contributor_test.dart' as library_member_test;
 import 'library_prefix_contributor_test.dart' as library_prefix_test;
 import 'local_library_contributor_test.dart' as local_lib_test;
@@ -35,18 +31,12 @@
     arglist_test.main();
     combinator_test.main();
     completion_manager.main();
-    // TODO(lambdabaa): Run this test once we figure out how to suppress
-    //   output from the tflite shared library
-    // completion_ranking_test.main();
     completion_ranking_internal_test.main();
     extension_member_contributor.main();
     field_formal_contributor_test.main();
     imported_ref_test.main();
     keyword_test.main();
     label_contributor_test.main();
-    // TODO(brianwilkerson) Run this test when it's been updated to not rely on
-    //   the location of the 'script' being run.
-    // language_model_test.main();
     library_member_test.main();
     library_prefix_test.main();
     local_lib_test.main();
diff --git a/pkg/analysis_server/test/src/server/sdk_configuration_test.dart b/pkg/analysis_server/test/src/server/sdk_configuration_test.dart
index a961393..b007d66 100644
--- a/pkg/analysis_server/test/src/server/sdk_configuration_test.dart
+++ b/pkg/analysis_server/test/src/server/sdk_configuration_test.dart
@@ -43,7 +43,6 @@
       expect(config.analyticsForceEnabled, isNull);
       expect(config.crashReportingId, isNull);
       expect(config.crashReportingForceEnabled, isNull);
-      expect(config.mlModelPath, isNull);
     });
 
     test('is configured', () {
@@ -55,9 +54,7 @@
   "server.analytics.forceEnabled": true,
 
   "server.crash.reporting.id": "Test_crash_id",
-  "server.crash.reporting.forceEnabled": true,
-
-  "server.ml.model.path": "/foo/bar/baz.ml"
+  "server.crash.reporting.forceEnabled": true
 }
 ''');
 
@@ -68,7 +65,6 @@
       expect(config.analyticsForceEnabled, isTrue);
       expect(config.crashReportingId, 'Test_crash_id');
       expect(config.crashReportingForceEnabled, isTrue);
-      expect(config.mlModelPath, '/foo/bar/baz.ml');
     });
   });
 }
diff --git a/sdk/BUILD.gn b/sdk/BUILD.gn
index 39c0c0a..4e61649 100644
--- a/sdk/BUILD.gn
+++ b/sdk/BUILD.gn
@@ -315,45 +315,6 @@
   ]
 }
 
-if (target_cpu == "x64") {
-  if (is_linux || is_android || is_fuchsia) {
-    copy_tree_specs += [
-      {
-        target = "copy_libtensorflowlite_c"
-        visibility = [ ":create_common_sdk" ]
-        deps = [ ":copy_libraries" ]
-        source = "../third_party/pkg/tflite_native/lib/src/blobs"
-        dest = "$root_out_dir/dart-sdk/bin/snapshots"
-        ignore_patterns = "*.dll,*mac64.so"
-      },
-    ]
-  }
-  if (is_mac) {
-    copy_tree_specs += [
-      {
-        target = "copy_libtensorflowlite_c"
-        visibility = [ ":create_common_sdk" ]
-        deps = [ ":copy_libraries" ]
-        source = "../third_party/pkg/tflite_native/lib/src/blobs"
-        dest = "$root_out_dir/dart-sdk/bin/snapshots"
-        ignore_patterns = "*.dll,*linux64.so"
-      },
-    ]
-  }
-  if (is_win) {
-    copy_tree_specs += [
-      {
-        target = "copy_libtensorflowlite_c"
-        visibility = [ ":create_common_sdk" ]
-        deps = [ ":copy_libraries" ]
-        source = "../third_party/pkg/tflite_native/lib/src/blobs"
-        dest = "$root_out_dir/dart-sdk/bin/snapshots"
-        ignore_patterns = "*.so"
-      },
-    ]
-  }
-}
-
 # This generates targets for everything in copy_tree_specs. The targets have the
 # same name as the "target" fields in the scopes of copy_tree_specs.
 copy_trees("copy_trees") {
@@ -918,9 +879,6 @@
   if (is_win) {
     public_deps += [ ":copy_7zip" ]
   }
-  if (target_cpu == "x64") {
-    public_deps += [ ":copy_libtensorflowlite_c" ]
-  }
 
   # CIPD only has versions of the Rust compiler for linux and mac x64 hosts.
   # We also disallow cross-compialtion (it may be possible in future, but it
diff --git a/tools/VERSION b/tools/VERSION
index e3a1704..7f57bc0 100644
--- a/tools/VERSION
+++ b/tools/VERSION
@@ -27,5 +27,5 @@
 MAJOR 2
 MINOR 12
 PATCH 0
-PRERELEASE 46
+PRERELEASE 47
 PRERELEASE_PATCH 0
\ No newline at end of file