Vendor package:tar and package:chunked_stream (#2932)

diff --git a/analysis_options.yaml b/analysis_options.yaml
index 3257c3a..e920b41 100644
--- a/analysis_options.yaml
+++ b/analysis_options.yaml
@@ -1,6 +1,7 @@
 include: package:pedantic/analysis_options.yaml
 
 analyzer:
+  exclude: [lib/src/third_party/**]
   errors:
     unused_import: error
     unused_local_variable: error
diff --git a/lib/src/io.dart b/lib/src/io.dart
index a53663a..15828a2 100644
--- a/lib/src/io.dart
+++ b/lib/src/io.dart
@@ -15,12 +15,13 @@
 import 'package:pedantic/pedantic.dart';
 import 'package:pool/pool.dart';
 import 'package:stack_trace/stack_trace.dart';
-import 'package:tar/tar.dart';
 
 import 'error_group.dart';
 import 'exceptions.dart';
 import 'exit_codes.dart' as exit_codes;
 import 'log.dart' as log;
+// ignore: avoid_relative_lib_imports
+import 'third_party/tar/lib/tar.dart';
 import 'utils.dart';
 
 export 'package:http/http.dart' show ByteStream;
diff --git a/lib/src/third_party/chunked_stream/CHANGELOG.md b/lib/src/third_party/chunked_stream/CHANGELOG.md
new file mode 100644
index 0000000..1139fab
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/CHANGELOG.md
@@ -0,0 +1,33 @@
+## v1.4.0
+
+- Stable null-safety release
+
+## v1.4.0-nullsafety.0
+- Added `readByteStream` which uses `BytesBuilder` from `dart:typed_data` under
+  the hood.
+- Added `readBytes` to `ChunkedStreamIterator<int>` for reading byte streams
+  into `Uint8List`.
+- Added `@sealed` annotation to all exported classes.
+
+## v1.3.0-nullsafety.0
+
+- Migrated to null safety
+
+## v1.2.0
+
+- Changed `ChunkedStreamIterator` implementation to fix bugs related to
+  stream pausing and resuming.
+
+## v1.1.0
+
+- Added `asChunkedStream(N, input)` for wrapping a `Stream<T>` as a
+  chunked stream `Stream<List<T>>`, which is useful when batch processing
+  chunks of a stream.
+
+## v1.0.1
+
+- Fixed lints reported by pana.
+
+## v1.0.0
+
+- Initial release.
diff --git a/lib/src/third_party/chunked_stream/LICENSE b/lib/src/third_party/chunked_stream/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/lib/src/third_party/chunked_stream/METADATA b/lib/src/third_party/chunked_stream/METADATA
new file mode 100644
index 0000000..236d49e
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/METADATA
@@ -0,0 +1,17 @@
+name: "chunked_stream"
+description:
+    "Utilities for working with chunked streams, such as byte streams which is"
+    "often given as a stream of byte chunks with type `Stream<List<int>>`."
+
+third_party {
+  url {
+    type: GIT
+    value: "https://github.com/google/dart-neats/"
+  }
+  version: "1.4.0"
+  last_upgrade_date { year: 2021 month: 4 day: 23 }
+  license_type: NOTICE
+  local_modifications:
+    "Only extracted directory chunked_stream."
+    "Added @dart=2.12" headers to all dart files.
+}
diff --git a/lib/src/third_party/chunked_stream/README.md b/lib/src/third_party/chunked_stream/README.md
new file mode 100644
index 0000000..410f7a5
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/README.md
@@ -0,0 +1,57 @@
+Chunked Stream Utilities
+========================
+Utilities for working with chunked streams, such as `Stream<List<int>>`.
+
+**Disclaimer:** This is not an officially supported Google product.
+
+A _chunked stream_ is a stream where the data arrives in chunks. The most
+common example is a byte stream, which conventionally has the type
+`Stream<List<int>>`. We say a byte stream in chunked because bytes arrives in
+chunks, rather than individiually.
+
+A byte stream could technically have the type `Stream<int>`, however, this would
+be very inefficient, as each byte would be passed as an individual event.
+Instead bytes arrives in chunks (`List<int>`) and the type of a byte stream
+is `Stream<List<int>>`.
+
+For easily converting a byte stream `Stream<List<int>>` into a single byte
+buffer `Uint8List` (which implements `List<int>`) this package provides
+`readByteStream(stream, maxSize: 1024*1024)`, which conveniently takes an
+optional `maxSize` parameter to help avoid running out of memory.
+
+**Example**
+```dart
+import 'dart:io';
+import 'dart:convert';
+import 'package:chunked_stream/chunked_stream.dart';
+
+Future<void> main() async {
+  // Open README.md as a byte stream
+  Stream<List<int>> fileStream = File('README.md').openRead();
+
+  // Read all bytes from the stream
+  final Uint8List bytes = await readByteStream(fileStream);
+  
+  // Convert content to string using utf8 codec from dart:convert and print
+  print(utf8.decode(bytes));
+}
+```
+
+To make it easy to process chunked streams, such as `Stream<List<int>>`,
+this package provides `ChunkedStreamIterator` which allows you to specify how
+many elements you want, and buffer unconsumed elements, making it easy to work
+with chunked streams one element at the time.
+
+**Example**
+```dart
+final reader = ChunkedStreamIterator(File('my-file.txt').openRead());
+// While the reader has a next byte
+while (true) {
+  var data = await reader.read(1);  // read one byte
+  if (data.length < 0) {
+    print('End of file reached');
+    break;
+  }
+  print('next byte: ${data[0]}');
+}
+```
diff --git a/lib/src/third_party/chunked_stream/analysis_options.yaml b/lib/src/third_party/chunked_stream/analysis_options.yaml
new file mode 100644
index 0000000..108d105
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/analysis_options.yaml
@@ -0,0 +1 @@
+include: package:pedantic/analysis_options.yaml
diff --git a/lib/src/third_party/chunked_stream/example/main.dart b/lib/src/third_party/chunked_stream/example/main.dart
new file mode 100644
index 0000000..8cc2e96
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/example/main.dart
@@ -0,0 +1,66 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:io' show stdout;
+import 'dart:convert' show utf8;
+import 'dart:typed_data' show Uint8List;
+import 'package:chunked_stream/chunked_stream.dart';
+
+void main() async {
+  // Input consisting of: *([uint32 length] [blob of size length])
+  // This is a series of blocks consisting of an uint32 length followed by
+  // length number of bytes.
+  //
+  // Many format including tar, msgpack, protobuf, etc. have formats binary
+  // encodings that consist of an integer indicating length of a blob of bytes.
+  // ChunkedStreamIterator can be useful when decoding such formats.
+  final inputStream = () async* {
+    // Yield blob1 from stream
+    final blob1 = utf8.encode('hello world');
+    yield [blob1.length, 0, 0, 0]; // uint32 encoding of length
+    yield blob1;
+
+    // Yield blob2 from stream
+    final blob2 = utf8.encode('small blob');
+    yield [blob2.length, 0, 0, 0];
+    yield blob2;
+  }();
+
+  // To ensure efficient reading, we buffer the stream upto 4096 bytes, for I/O
+  // buffering can improve performance (in some cases).
+  final bufferedStream = bufferChunkedStream(inputStream, bufferSize: 4096);
+
+  // Create a chunk stream iterator over the buffered stream.
+  final iterator = ChunkedStreamIterator(bufferedStream);
+
+  while (true) {
+    // Read the first 4 bytes
+    final lengthBytes = await iterator.read(4);
+
+    // We have EOF if there is no more bytes
+    if (lengthBytes.isEmpty) {
+      break;
+    }
+
+    // Read those 4 bytes as Uint32
+    final length = Uint8List.fromList(lengthBytes).buffer.asUint32List()[0];
+
+    // Read the next [length] bytes, and write them to stdout
+    print('Blob of $length bytes:');
+    await stdout.addStream(iterator.substream(length));
+    print('\n');
+  }
+}
diff --git a/lib/src/third_party/chunked_stream/lib/chunked_stream.dart b/lib/src/third_party/chunked_stream/lib/chunked_stream.dart
new file mode 100644
index 0000000..3a9d2e4
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/lib/chunked_stream.dart
@@ -0,0 +1,45 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+/// Utilities for working with chunked streams.
+///
+/// This library provides the following utilities:
+///  * [ChunkedStreamIterator], for reading a chunked stream by iterating over
+///  chunks and splitting into substreams.
+///  * [readByteStream], for reading a byte stream into a single [Uint8List].
+///  Often useful for converting [Stream<List<int>>] to [Uint8List].
+///  * [readChunkedStream], for reading a chunked stream into a single big list.
+///  * [limitChunkedStream], for wrapping a chunked stream as a new stream with
+///  length limit, useful when accepting input streams from untrusted network.
+///  * [bufferChunkedStream], for buffering a chunked stream. This can be useful
+///  to improve I/O performance if reading the stream chunk by chunk with
+///  frequent pause/resume calls, as is the case when using
+///  [ChunkedStreamIterator].
+///  * [asChunkedStream], for wrapping a [Stream<T>] as [Stream<List<T>>],
+///  useful for batch processing elements from a stream.
+library chunked_stream;
+
+import 'dart:typed_data';
+
+import 'src/chunk_stream.dart';
+import 'src/chunked_stream_buffer.dart';
+import 'src/chunked_stream_iterator.dart';
+import 'src/read_chunked_stream.dart';
+
+export 'src/chunk_stream.dart';
+export 'src/chunked_stream_buffer.dart';
+export 'src/chunked_stream_iterator.dart';
+export 'src/read_chunked_stream.dart';
diff --git a/lib/src/third_party/chunked_stream/lib/src/chunk_stream.dart b/lib/src/third_party/chunked_stream/lib/src/chunk_stream.dart
new file mode 100644
index 0000000..60874c8
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/lib/src/chunk_stream.dart
@@ -0,0 +1,44 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:async';
+
+/// Wrap [input] as a chunked stream with chunks the size of [N].
+///
+/// This function returns a [Stream<List<T>>] where each event is a [List<T>]
+/// with [N] elements. The last chunk of the resulting stream may contain less
+/// than [N] elements.
+///
+/// This is useful for batch processing elements from a stream.
+Stream<List<T>> asChunkedStream<T>(int N, Stream<T> input) async* {
+  if (N <= 0) {
+    throw ArgumentError.value(N, 'N', 'chunk size must be >= 0');
+  }
+
+  var events = <T>[];
+  await for (final event in input) {
+    events.add(event);
+    if (events.length >= N) {
+      assert(events.length == N);
+      yield events;
+      events = <T>[];
+    }
+  }
+  assert(events.length <= N);
+  if (events.isNotEmpty) {
+    yield events;
+  }
+}
diff --git a/lib/src/third_party/chunked_stream/lib/src/chunked_stream_buffer.dart b/lib/src/third_party/chunked_stream/lib/src/chunked_stream_buffer.dart
new file mode 100644
index 0000000..bcc9ef1
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/lib/src/chunked_stream_buffer.dart
@@ -0,0 +1,68 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:async';
+
+/// Buffer an chunked stream.
+///
+/// This reads [input] into an internal buffer of size [bufferSize] elements.
+/// When the internal buffer is full the [input] stream is _paused_, as elements
+/// are consumed from the stream returned the [input] stream in _resumed_.
+///
+/// If reading from a chunked stream as it arrives from disk or network it can
+/// be useful to buffer the stream internally to avoid blocking disk or network
+/// reads while waiting for CPU to process the bytes read.
+Stream<List<T>> bufferChunkedStream<T>(
+  Stream<List<T>> input, {
+  int bufferSize = 16 * 1024,
+}) async* {
+  if (bufferSize <= 0) {
+    throw ArgumentError.value(
+        bufferSize, 'bufferSize', 'bufferSize must be positive');
+  }
+
+  late final StreamController<List<T>> c;
+  StreamSubscription? sub;
+
+  c = StreamController(
+    onListen: () {
+      sub = input.listen((chunk) {
+        bufferSize -= chunk.length;
+        c.add(chunk);
+
+        final currentSub = sub;
+        if (bufferSize <= 0 && currentSub != null && !currentSub.isPaused) {
+          currentSub.pause();
+        }
+      }, onDone: () {
+        c.close();
+      }, onError: (e, st) {
+        c.addError(e, st);
+      });
+    },
+    onCancel: () => sub!.cancel(),
+  );
+
+  await for (final chunk in c.stream) {
+    yield chunk;
+    bufferSize += chunk.length;
+
+    final currentSub = sub;
+    if (bufferSize > 0 && currentSub != null && currentSub.isPaused) {
+      currentSub.resume();
+    }
+  }
+}
diff --git a/lib/src/third_party/chunked_stream/lib/src/chunked_stream_iterator.dart b/lib/src/third_party/chunked_stream/lib/src/chunked_stream_iterator.dart
new file mode 100644
index 0000000..3e83754
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/lib/src/chunked_stream_iterator.dart
@@ -0,0 +1,244 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:async';
+import 'dart:typed_data';
+
+import 'package:meta/meta.dart' show sealed;
+import 'read_chunked_stream.dart';
+
+/// Auxiliary class for iterating over the items in a chunked stream.
+///
+/// A _chunked stream_ is a stream in which items arrives in chunks with each
+/// event from the stream. A common example is a byte stream with the type
+/// `Stream<List<int>>`. In such a byte stream bytes arrives in chunks
+/// `List<int>` for each event.
+///
+/// Note. methods on this class may not be called concurrently.
+@sealed
+abstract class ChunkedStreamIterator<T> {
+  factory ChunkedStreamIterator(Stream<List<T>> stream) {
+    return _ChunkedStreamIterator<T>(stream);
+  }
+
+  /// Returns a list of the next [size] elements.
+  ///
+  /// Returns a list with less than [size] elements if the end of stream is
+  /// encountered before [size] elements are read.
+  ///
+  /// If an error is encountered before reading [size] elements, the error
+  /// will be thrown.
+  Future<List<T>> read(int size);
+
+  /// Cancels the stream iterator (and the underlying stream subscription)
+  /// early.
+  ///
+  /// Users should call [cancel] to ensure that the stream is properly closed
+  /// if they need to stop listening earlier than the end of the stream.
+  Future<void> cancel();
+
+  /// Returns a sub-[Stream] with the next [size] elements.
+  ///
+  /// A sub-[Stream] is a [Stream] consisting of the next [size] elements
+  /// in the same order they occur in the stream used to create this iterator.
+  ///
+  /// If [read] is called before the sub-[Stream] is fully read, a [StateError]
+  /// will be thrown.
+  ///
+  /// ```dart
+  /// final s = ChunkedStreamIterator(_chunkedStream([
+  ///   ['a', 'b', 'c'],
+  ///   ['1', '2'],
+  /// ]));
+  /// expect(await s.read(1), equals(['a']));
+  ///
+  /// // creates a substream from the chunks holding the
+  /// // next three elements (['b', 'c'], ['1'])
+  /// final i = StreamIterator(s.substream(3));
+  /// expect(await i.moveNext(), isTrue);
+  /// expect(await i.current, equals(['b', 'c']));
+  /// expect(await i.moveNext(), isTrue);
+  /// expect(await i.current, equals(['1']));
+  ///
+  /// // Since the substream has been read till the end, we can continue reading
+  /// // from the initial stream.
+  /// expect(await s.read(1), equals(['2']));
+  /// ```
+  ///
+  /// The resulting stream may contain less than [size] elements if the
+  /// underlying stream has less than [size] elements before the end of stream.
+  ///
+  /// When the substream is cancelled, the remaining elements in the substream
+  /// are drained.
+  Stream<List<T>> substream(int size);
+}
+
+/// General purpose _chunked stream iterator_.
+class _ChunkedStreamIterator<T> implements ChunkedStreamIterator<T> {
+  /// Underlying iterator that iterates through the original stream.
+  final StreamIterator<List<T>> _iterator;
+
+  /// Keeps track of the number of elements left in the current substream.
+  int _toRead = 0;
+
+  /// Buffered items from a previous chunk. Items in this list should not have
+  /// been read by the user.
+  late List<T> _buffered;
+
+  /// Instance variable representing an empty list object, used as the empty
+  /// default state for [_buffered]. Take caution not to write code that
+  /// directly modify the [_buffered] list by adding elements to it.
+  final List<T> _emptyList = [];
+
+  _ChunkedStreamIterator(Stream<List<T>> stream)
+      : _iterator = StreamIterator(stream) {
+    _buffered = _emptyList;
+  }
+
+  /// Returns a list of the next [size] elements.
+  ///
+  /// Returns a list with less than [size] elements if the end of stream is
+  /// encounted before [size] elements are read.
+  ///
+  /// If an error is encountered before reading [size] elements, the error
+  /// will be thrown.
+  @override
+  Future<List<T>> read(int size) async =>
+      await readChunkedStream(substream(size));
+
+  /// Cancels the stream iterator (and the underlying stream subscription)
+  /// early.
+  ///
+  /// Users should call [cancel] to ensure that the stream is properly closed
+  /// if they need to stop listening earlier than the end of the stream.
+  @override
+  Future<void> cancel() async => await _iterator.cancel();
+
+  /// Returns a sub-[Stream] with the next [size] elements.
+  ///
+  /// A sub-[Stream] is a [Stream] consisting of the next [size] elements
+  /// in the same order they occur in the stream used to create this iterator.
+  ///
+  /// If [read] is called before the sub-[Stream] is fully read, a [StateError]
+  /// will be thrown.
+  ///
+  /// ```dart
+  /// final s = ChunkedStreamIterator(_chunkedStream([
+  ///   ['a', 'b', 'c'],
+  ///   ['1', '2'],
+  /// ]));
+  /// expect(await s.read(1), equals(['a']));
+  ///
+  /// // creates a substream from the chunks holding the
+  /// // next three elements (['b', 'c'], ['1'])
+  /// final i = StreamIterator(s.substream(3));
+  /// expect(await i.moveNext(), isTrue);
+  /// expect(await i.current, equals(['b', 'c']));
+  /// expect(await i.moveNext(), isTrue);
+  /// expect(await i.current, equals(['1']));
+  ///
+  /// // Since the substream has been read till the end, we can continue reading
+  /// // from the initial stream.
+  /// expect(await s.read(1), equals(['2']));
+  /// ```
+  ///
+  /// The resulting stream may contain less than [size] elements if the
+  /// underlying stream has less than [size] elements before the end of stream.
+  ///
+  /// When the substream is cancelled, the remaining elements in the substream
+  /// are drained.
+  @override
+  Stream<List<T>> substream(int size) {
+    if (size < 0) {
+      throw ArgumentError.value(size, 'size', 'must be non-negative');
+    }
+    if (_toRead > 0) {
+      throw StateError('Concurrent invocations are not supported!');
+    }
+
+    _toRead = size;
+
+    // Creates a new [StreamController] made out of the elements from
+    // [_iterator].
+    final substream = _substream();
+    final newController = StreamController<List<T>>();
+
+    // When [newController]'s stream is cancelled, drain all the remaining
+    // elements.
+    newController.onCancel = () async {
+      await _substream().drain();
+    };
+
+    // Since the controller should only have [size] elements, we close
+    // [newController]'s stream once all the elements in [substream] have
+    // been added. This is necessary so that await-for loops on
+    // [newController.stream] will complete.
+    final future = newController.addStream(substream);
+    future.whenComplete(() {
+      newController.close();
+    });
+
+    return newController.stream;
+  }
+
+  /// Asynchronous generator implementation for [substream].
+  Stream<List<T>> _substream() async* {
+    // Only yield when there are elements to be read.
+    while (_toRead > 0) {
+      // If [_buffered] is empty, set it to the next element in the stream if
+      // possible.
+      if (_buffered.isEmpty) {
+        if (!(await _iterator.moveNext())) {
+          break;
+        }
+
+        _buffered = _iterator.current;
+      }
+
+      List<T> toYield;
+      if (_toRead < _buffered.length) {
+        // If there are less than [_buffered.length] elements left to be read
+        // in the substream, sublist the chunk from [_buffered] accordingly.
+        toYield = _buffered.sublist(0, _toRead);
+        _buffered = _buffered.sublist(_toRead);
+        _toRead = 0;
+      } else {
+        // Otherwise prepare to yield the full [_buffered] chunk, updating
+        // the other variables accordingly
+        toYield = _buffered;
+        _toRead -= _buffered.length;
+        _buffered = _emptyList;
+      }
+
+      yield toYield;
+    }
+
+    // Set [_toRead] to be 0. This line is necessary if the size that is passed
+    // in is greater than the number of elements in [_iterator].
+    _toRead = 0;
+  }
+}
+
+/// Extension methods for [ChunkedStreamIterator] when working with byte-streams
+/// [Stream<List<int>>].
+extension ChunkedStreamIteratorByteStreamExt on ChunkedStreamIterator<int> {
+  /// Read bytes as [Uint8List].
+  ///
+  /// This does the same as [read], except it uses [readByteStream] to create
+  /// a [Uint8List], which offers better performance.
+  Future<Uint8List> readBytes(int size) async =>
+      await readByteStream(substream(size));
+}
diff --git a/lib/src/third_party/chunked_stream/lib/src/read_chunked_stream.dart b/lib/src/third_party/chunked_stream/lib/src/read_chunked_stream.dart
new file mode 100644
index 0000000..4b283c2
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/lib/src/read_chunked_stream.dart
@@ -0,0 +1,136 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:async' show Stream, Future;
+import 'dart:typed_data';
+
+import 'package:meta/meta.dart' show sealed;
+
+/// Read all chunks from [input] and return a list consisting of items from all
+/// chunks.
+///
+/// If the maximum number of items exceeded [maxSize] this will stop reading and
+/// throw [MaximumSizeExceeded].
+///
+/// **Example**
+/// ```dart
+/// import 'dart:io';
+///
+/// List<int> readFile(String filePath) async {
+///   Stream<List<int>> fileStream = File(filePath).openRead();
+///   List<int> contents = await readChunkedStream(fileStream);
+///   return contents;
+/// }
+/// ```
+///
+/// If reading a byte stream of type [Stream<List<int>>] consider using
+/// [readByteStream] instead.
+Future<List<T>> readChunkedStream<T>(
+  Stream<List<T>> input, {
+  int? maxSize,
+}) async {
+  if (maxSize != null && maxSize < 0) {
+    throw ArgumentError.value(maxSize, 'maxSize must be positive, if given');
+  }
+
+  final result = <T>[];
+  await for (final chunk in input) {
+    result.addAll(chunk);
+    if (maxSize != null && result.length > maxSize) {
+      throw MaximumSizeExceeded(maxSize);
+    }
+  }
+  return result;
+}
+
+/// Read all bytes from [input] and return a [Uint8List] consisting of all bytes
+/// from [input].
+///
+/// If the maximum number of bytes exceeded [maxSize] this will stop reading and
+/// throw [MaximumSizeExceeded].
+///
+/// **Example**
+/// ```dart
+/// import 'dart:io';
+///
+/// Uint8List readFile(String filePath) async {
+///   Stream<List<int>> fileStream = File(filePath).openRead();
+///   Uint8List contents = await readByteStream(fileStream);
+///   return contents;
+/// }
+/// ```
+///
+/// This method does the same as [readChunkedStream], except it returns a
+/// [Uint8List] which can be faster when working with bytes.
+///
+/// **Remark** The returned [Uint8List] might be a view on a
+/// larger [ByteBuffer]. Do not use [Uint8List.buffer] without taking into
+/// account [Uint8List.lengthInBytes] and [Uint8List.offsetInBytes].
+/// Doing so is never correct, but in many common cases an instance of
+/// [Uint8List] will not be a view on a larger buffer, so such mistakes can go
+/// undetected. Consider using [Uint8List.sublistView], to create subviews if
+/// necessary.
+Future<Uint8List> readByteStream(
+  Stream<List<int>> input, {
+  int? maxSize,
+}) async {
+  if (maxSize != null && maxSize < 0) {
+    throw ArgumentError.value(maxSize, 'maxSize must be positive, if given');
+  }
+
+  final result = BytesBuilder();
+  await for (final chunk in input) {
+    result.add(chunk);
+    if (maxSize != null && result.length > maxSize) {
+      throw MaximumSizeExceeded(maxSize);
+    }
+  }
+  return result.takeBytes();
+}
+
+/// Create a _chunked stream_ limited to the first [maxSize] items from [input].
+///
+/// Throws [MaximumSizeExceeded] if [input] contains more than [maxSize] items.
+Stream<List<T>> limitChunkedStream<T>(
+  Stream<List<T>> input, {
+  int? maxSize,
+}) async* {
+  if (maxSize != null && maxSize < 0) {
+    throw ArgumentError.value(maxSize, 'maxSize must be positive, if given');
+  }
+
+  var count = 0;
+  await for (final chunk in input) {
+    if (maxSize != null && maxSize - count < chunk.length) {
+      yield chunk.sublist(0, maxSize - count);
+      throw MaximumSizeExceeded(maxSize);
+    }
+    count += chunk.length;
+    yield chunk;
+  }
+}
+
+/// Exception thrown if [maxSize] was exceeded while reading a _chunked stream_.
+///
+/// This is typically thrown by [readChunkedStream] or [readByteStream].
+@sealed
+class MaximumSizeExceeded implements Exception {
+  final int maxSize;
+  const MaximumSizeExceeded(this.maxSize);
+
+  @override
+  String toString() => 'Input stream exceeded the maxSize: $maxSize';
+}
diff --git a/lib/src/third_party/chunked_stream/mono_pkg.yaml b/lib/src/third_party/chunked_stream/mono_pkg.yaml
new file mode 100644
index 0000000..bb2c0c0
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/mono_pkg.yaml
@@ -0,0 +1,10 @@
+dart:
+# todo: Uncomment once Dart 2.12 is stable
+#  - stable
+  - dev
+stages:
+  - analyze:
+      - dartanalyzer
+      - dartfmt
+  - tests:
+      - test
diff --git a/lib/src/third_party/chunked_stream/pubspec.yaml b/lib/src/third_party/chunked_stream/pubspec.yaml
new file mode 100644
index 0000000..5cfb79b
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/pubspec.yaml
@@ -0,0 +1,15 @@
+name: chunked_stream
+version: 1.4.0
+description: |
+  Utilities for working with chunked streams, such as byte streams which is
+  often given as a stream of byte chunks with type `Stream<List<int>>`.
+homepage: https://github.com/google/dart-neats/tree/master/chunked_stream
+repository: https://github.com/google/dart-neats.git
+issue_tracker: https://github.com/google/dart-neats/labels/pkg:chunked_stream
+dependencies:
+  meta: ^1.3.0
+dev_dependencies:
+  test: ^1.16.0
+  pedantic: ^1.4.0
+environment:
+  sdk: ">=2.12.0-259.9.beta <3.0.0"
diff --git a/lib/src/third_party/chunked_stream/test/chunk_stream_test.dart b/lib/src/third_party/chunked_stream/test/chunk_stream_test.dart
new file mode 100644
index 0000000..ff3648e
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/test/chunk_stream_test.dart
@@ -0,0 +1,49 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'package:test/test.dart';
+import 'package:chunked_stream/chunked_stream.dart';
+
+void main() {
+  for (var N = 1; N < 6; N++) {
+    test('asChunkedStream (N = $N) preserves elements', () async {
+      final s = (() async* {
+        for (var j = 0; j < 97; j++) {
+          yield j;
+        }
+      })();
+
+      final result = await readChunkedStream(asChunkedStream(N, s));
+      expect(result, hasLength(97));
+      expect(result, equals(List.generate(97, (j) => j)));
+    });
+
+    test('asChunkedStream (N = $N) has chunk size N', () async {
+      final s = (() async* {
+        for (var j = 0; j < 97; j++) {
+          yield j;
+        }
+      })();
+
+      final chunks = await asChunkedStream(N, s).toList();
+
+      // Last chunk may be smaller than N
+      expect(chunks.removeLast(), hasLength(lessThanOrEqualTo(N)));
+      // Last chunk must be N
+      expect(chunks, everyElement(hasLength(N)));
+    });
+  }
+}
diff --git a/lib/src/third_party/chunked_stream/test/chunked_stream_buffer_test.dart b/lib/src/third_party/chunked_stream/test/chunked_stream_buffer_test.dart
new file mode 100644
index 0000000..d5c6329
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/test/chunked_stream_buffer_test.dart
@@ -0,0 +1,34 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'package:test/test.dart';
+import 'package:chunked_stream/chunked_stream.dart';
+
+void main() {
+  for (var i = 1; i < 6; i++) {
+    test('bufferChunkedStream (bufferSize: $i)', () async {
+      final s = (() async* {
+        yield ['a'];
+        yield ['b'];
+        yield ['c'];
+      })();
+
+      final bs = bufferChunkedStream(s, bufferSize: i);
+      final result = await readChunkedStream(bs);
+      expect(result, equals(['a', 'b', 'c']));
+    });
+  }
+}
diff --git a/lib/src/third_party/chunked_stream/test/chunked_stream_iterator_test.dart b/lib/src/third_party/chunked_stream/test/chunked_stream_iterator_test.dart
new file mode 100644
index 0000000..197a2e1
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/test/chunked_stream_iterator_test.dart
@@ -0,0 +1,382 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:async';
+import 'dart:typed_data';
+import 'package:test/test.dart';
+import 'package:chunked_stream/chunked_stream.dart';
+
+Stream<List<T>> _chunkedStream<T>(List<List<T>> chunks) async* {
+  for (final chunk in chunks) {
+    yield chunk;
+  }
+}
+
+Stream<List<T>> _chunkedStreamWithError<T>(List<List<T>> chunks) async* {
+  for (final chunk in chunks) {
+    yield chunk;
+  }
+
+  throw StateError('test generated error');
+}
+
+void main() {
+  test('read() -- chunk in given size', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(3), equals(['a', 'b', 'c']));
+    expect(await s.read(2), equals(['1', '2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('read() propagates stream error', () async {
+    final s = ChunkedStreamIterator(_chunkedStreamWithError([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(3), equals(['a', 'b', 'c']));
+    expect(() async => await s.read(3), throwsStateError);
+  });
+
+  test('read() -- chunk in given size', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(2), equals(['a', 'b']));
+    expect(await s.read(3), equals(['c', '1', '2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('read() -- chunks one item at the time', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    expect(await s.read(1), equals(['b']));
+    expect(await s.read(1), equals(['c']));
+    expect(await s.read(1), equals(['1']));
+    expect(await s.read(1), equals(['2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('read() -- one big chunk', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(6), equals(['a', 'b', 'c', '1', '2']));
+  });
+
+  test('substream() propagates stream error', () async {
+    final s = ChunkedStreamIterator(_chunkedStreamWithError([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(3), equals(['a', 'b', 'c']));
+    final substream = s.substream(3);
+    final subChunkedStreamIterator = ChunkedStreamIterator(substream);
+    expect(
+        () async => await subChunkedStreamIterator.read(3), throwsStateError);
+  });
+
+  test('substream() + readChunkedStream()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await readChunkedStream(s.substream(5)),
+        equals(['a', 'b', 'c', '1', '2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('(substream() + readChunkedStream()) x 2', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await readChunkedStream(s.substream(2)), equals(['a', 'b']));
+    expect(await readChunkedStream(s.substream(3)), equals(['c', '1', '2']));
+  });
+
+  test('substream() + readChunkedStream() -- past end', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await readChunkedStream(s.substream(6)),
+        equals(['a', 'b', 'c', '1', '2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('read() substream() + readChunkedStream() read()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    expect(await readChunkedStream(s.substream(3)), equals(['b', 'c', '1']));
+    expect(await s.read(2), equals(['2']));
+  });
+
+  test(
+      'read() StreamIterator(substream()).cancel() read() '
+      '-- one item at the time', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final i = StreamIterator(s.substream(3));
+    expect(await i.moveNext(), isTrue);
+    await i.cancel();
+    expect(await s.read(1), equals(['2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test(
+      'read() StreamIterator(substream()) read() '
+      '-- one item at the time', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final i = StreamIterator(s.substream(3));
+    expect(await i.moveNext(), isTrue);
+    expect(await i.current, equals(['b', 'c']));
+    expect(await i.moveNext(), isTrue);
+    expect(await i.current, equals(['1']));
+    expect(await i.moveNext(), isFalse);
+    expect(await s.read(1), equals(['2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test('substream() x 2', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(
+        await s.substream(2).toList(),
+        equals([
+          ['a', 'b']
+        ]));
+    expect(
+        await s.substream(3).toList(),
+        equals([
+          ['c'],
+          ['1', '2']
+        ]));
+  });
+
+  test(
+      'read() StreamIterator(substream()).cancel() read() -- '
+      'cancellation after reading', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final i = StreamIterator(s.substream(3));
+    expect(await i.moveNext(), isTrue);
+    await i.cancel();
+    expect(await s.read(1), equals(['2']));
+    expect(await s.read(1), equals([]));
+  });
+
+  test(
+      'read() StreamIterator(substream()).cancel() read() -- '
+      'cancellation after reading (2)', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2', '3'],
+      ['4', '5', '6']
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final i = StreamIterator(s.substream(6));
+    expect(await i.moveNext(), isTrue);
+    await i.cancel();
+    expect(await s.read(1), equals(['5']));
+    expect(await s.read(1), equals(['6']));
+  });
+
+  // The following test fails because before the first `moveNext` is called,
+  // the [StreamIterator] is not intialized to the correct
+  // [StreamSubscription], thus calling `cancel` does not correctly cancel the
+  // underlying stream, resulting in an error.
+  //
+  // test(
+  //     'read() substream().cancel() read() -- '
+  //     'cancellation without reading', () async {
+  //   final s = ChunkedStreamIterator(_chunkedStream([
+  //     ['a', 'b', 'c'],
+  //     ['1', '2'],
+  //   ]));
+  //   expect(await s.read(1), equals(['a']));
+  //   final i = StreamIterator(s.substream(3));
+  //   await i.cancel();
+  //   expect(await s.read(1), equals(['1']));
+  //   expect(await s.read(1), equals(['2']));
+  // });
+
+  test(
+      'read() StreamIterator(substream()) read() -- '
+      'not cancelling produces StateError', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final i = StreamIterator(s.substream(3));
+    expect(await i.moveNext(), isTrue);
+    expect(() async => await s.read(1), throwsStateError);
+  });
+
+  test(
+      'read() StreamIterator(substream()) read() -- '
+      'not cancelling produces StateError (2)', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+
+    /// ignore: unused_local_variable
+    final i = StreamIterator(s.substream(3));
+    expect(() async => await s.read(1), throwsStateError);
+  });
+
+  test(
+      'read() substream() that ends with first chunk + '
+      'readChunkedStream() read()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    expect(
+        await s.substream(2).toList(),
+        equals([
+          ['b', 'c']
+        ]));
+    expect(await s.read(3), equals(['1', '2']));
+  });
+
+  test(
+      'read() substream() that ends with first chunk + drain() '
+      'read()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final sub = s.substream(2);
+    await sub.drain();
+    expect(await s.read(3), equals(['1', '2']));
+  });
+
+  test(
+      'read() substream() that ends with second chunk + '
+      'readChunkedStream() read()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+      ['3', '4']
+    ]));
+    expect(await s.read(1), equals(['a']));
+    expect(
+        await s.substream(4).toList(),
+        equals([
+          ['b', 'c'],
+          ['1', '2']
+        ]));
+    expect(await s.read(3), equals(['3', '4']));
+  });
+
+  test(
+      'read() substream() that ends with second chunk + '
+      'drain() read()', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+      ['3', '4'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final substream = s.substream(4);
+    await substream.drain();
+    expect(await s.read(3), equals(['3', '4']));
+  });
+
+  test(
+      'read() substream() read() before '
+      'draining substream produces StateError', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+      ['3', '4'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    // ignore: unused_local_variable
+    final substream = s.substream(4);
+    expect(() async => await s.read(3), throwsStateError);
+  });
+
+  test('creating two substreams simultaneously causes a StateError', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b', 'c'],
+      ['1', '2'],
+      ['3', '4'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    // ignore: unused_local_variable
+    final substream = s.substream(4);
+    expect(() async {
+      //ignore: unused_local_variable
+      final substream2 = s.substream(3);
+    }, throwsStateError);
+  });
+
+  test('nested ChunkedStreamIterator', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      ['a', 'b'],
+      ['1', '2'],
+      ['3', '4'],
+    ]));
+    expect(await s.read(1), equals(['a']));
+    final substream = s.substream(4);
+    final nested = ChunkedStreamIterator(substream);
+    expect(await nested.read(2), equals(['b', '1']));
+    expect(await nested.read(3), equals(['2', '3']));
+    expect(await nested.read(2), equals([]));
+    expect(await s.read(1), equals(['4']));
+  });
+
+  test('ByteStreamIterator', () async {
+    final s = ChunkedStreamIterator(_chunkedStream([
+      [1, 2, 3],
+      [4],
+    ]));
+    expect(await s.readBytes(1), equals([1]));
+    expect(await s.readBytes(1), isA<Uint8List>());
+    expect(await s.readBytes(1), equals([3]));
+    expect(await s.readBytes(1), equals([4]));
+    expect(await s.readBytes(1), equals([]));
+  });
+}
diff --git a/lib/src/third_party/chunked_stream/test/read_chunked_stream_test.dart b/lib/src/third_party/chunked_stream/test/read_chunked_stream_test.dart
new file mode 100644
index 0000000..2a4b143
--- /dev/null
+++ b/lib/src/third_party/chunked_stream/test/read_chunked_stream_test.dart
@@ -0,0 +1,42 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// @dart = 2.12
+
+import 'dart:typed_data';
+
+import 'package:test/test.dart';
+import 'package:chunked_stream/chunked_stream.dart';
+
+void main() {
+  test('readChunkedStream', () async {
+    final s = (() async* {
+      yield ['a'];
+      yield ['b'];
+      yield ['c'];
+    })();
+    expect(await readChunkedStream(s), equals(['a', 'b', 'c']));
+  });
+
+  test('readByteStream', () async {
+    final s = (() async* {
+      yield [1, 2];
+      yield Uint8List.fromList([3]);
+      yield [4];
+    })();
+    final result = await readByteStream(s);
+    expect(result, equals([1, 2, 3, 4]));
+    expect(result, isA<Uint8List>());
+  });
+}
diff --git a/lib/src/third_party/tar/.gitignore b/lib/src/third_party/tar/.gitignore
new file mode 100644
index 0000000..e8735c4
--- /dev/null
+++ b/lib/src/third_party/tar/.gitignore
@@ -0,0 +1,17 @@
+# Files and directories created by pub
+.dart_tool/
+.packages
+
+# Omit commiting pubspec.lock for library packages:
+# https://dart.dev/guides/libraries/private-files#pubspeclock
+pubspec.lock
+
+# Conventional directory for build outputs
+build/
+
+# Directory created by dartdoc
+doc/api/
+
+# Generated in the example
+test.tar
+.vscode
\ No newline at end of file
diff --git a/lib/src/third_party/tar/CHANGELOG.md b/lib/src/third_party/tar/CHANGELOG.md
new file mode 100644
index 0000000..3bda04b
--- /dev/null
+++ b/lib/src/third_party/tar/CHANGELOG.md
@@ -0,0 +1,29 @@
+## 0.3.0
+
+- Remove outdated references in the documentation
+
+## 0.3.0-nullsafety.0
+
+- Remove `TarReader.contents` and `TarReader.header`. Use `current.contents` and `current.header`, respectively.
+- Fix some minor implementation details
+
+## 0.2.0-nullsafety
+
+Most of the tar package has been rewritten, it's now based on the
+implementation written by [Garett Tok Ern Liang](https://github.com/walnutdust)
+in the GSoC 2020.
+
+- Added `tar` prefix to exported symbols.
+- Remove `MemoryEntry`. Use `TarEntry.data` to create a tar entry from bytes.
+- Make `WritingSink` private. Use `tarWritingSink` to create a general `StreamSink<tar.Entry>`.
+- `TarReader` is now a [`StreamIterator`](https://api.dart.dev/stable/2.10.4/dart-async/StreamIterator-class.html),
+  the transformer had some design flaws.
+
+## 0.1.0-nullsafety.1
+
+- Support writing user and group names
+- Better support for PAX-headers and large files
+
+## 0.1.0-nullsafety.0
+
+- Initial version
diff --git a/lib/src/third_party/tar/LICENSE b/lib/src/third_party/tar/LICENSE
new file mode 100644
index 0000000..ed92ded
--- /dev/null
+++ b/lib/src/third_party/tar/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Simon Binder
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/src/third_party/tar/METADATA b/lib/src/third_party/tar/METADATA
new file mode 100644
index 0000000..922c4b9
--- /dev/null
+++ b/lib/src/third_party/tar/METADATA
@@ -0,0 +1,15 @@
+name: "tar"
+description:
+    "Memory-efficient, streaming implementation of the tar file format"
+
+third_party {
+  url {
+    type: GIT
+    value: "https://github.com/simolus3/tar/"
+  }
+  version: "0.3.0"
+  last_upgrade_date { year: 2021 month: 3 day: 23 }
+  license_type: NOTICE
+  local_modifications:
+    "Added @dart=2.12" headers to all dart files.
+}
diff --git a/lib/src/third_party/tar/README.md b/lib/src/third_party/tar/README.md
new file mode 100644
index 0000000..65e40b3
--- /dev/null
+++ b/lib/src/third_party/tar/README.md
@@ -0,0 +1,108 @@
+# tar
+
+![Build status](https://github.com/simolus3/tar/workflows/build/badge.svg)
+
+This package provides stream-based readers and writers for tar files.
+
+When working with large tar files, this library consumes considerably less memory
+than [package:archive](https://pub.dev/packages/archive), although it is slightly slower.
+
+## Reading
+
+To read entries from a tar file, use
+
+```dart
+import 'dart:convert';
+import 'dart:io';
+import 'package:tar/tar.dart';
+
+Future<void> main() async {
+  final reader = TarReader(File('file.tar').openRead());
+
+  while (await reader.moveNext()) {
+    final entry = reader.current;
+    // Use reader.header to see the header of the current tar entry
+    print(entry.header.name);
+    // And reader.contents to read the content of the current entry as a stream
+    print(await entry.contents.transform(utf8.decoder).first);
+  }
+  // Note that the reader will automatically close if moveNext() returns false or
+  // throws. If you want to close a tar stream before that happens, use
+  // reader.cancel();
+}
+```
+
+To read `.tar.gz` files, transform the stream with `gzip.decoder` before
+passing it to the `TarReader`.
+
+To easily go through all entries in a tar file, use `TarReader.forEach`:
+
+```dart
+Future<void> main() async {
+  final inputStream = File('file.tar').openRead();
+
+  await TarReader.forEach(inputStream, (entry) {
+    print(header.name);
+    print(await entry.contents.transform(utf8.decoder).first);
+  });
+}
+```
+
+__Warning__: Since the reader is backed by a single stream, concurrent calls to
+`read` are not allowed! Similarly, if you're reading from an entry's `contents`,
+make sure to fully drain the stream before calling `read()` again.
+
+## Writing
+
+You can write tar files into a `StreamSink<List<int>>`, such as an `IOSink`:
+
+```dart
+import 'dart:convert';
+import 'dart:io';
+import 'package:tar/tar.dart';
+
+Future<void> main() async {
+  final output = File('test.tar').openWrite();
+
+  await Stream<TarEntry>.value(
+    TarEntry.data(
+      TarHeader(
+        name: 'hello.txt',
+        mode: int.parse('644', radix: 8),
+      ),
+      utf8.encode('Hello world'),
+    ),
+  ).pipe(tarWritingSink(output));
+}
+```
+
+Note that tar files are always written in the pax format defined by the POSIX.1-2001 specification
+(`--format=posix` in GNU tar).
+When all entries have file names shorter than 100 chars and a size smaller than 8 GB, this is
+equivalent to the `ustar` format. This library won't write PAX headers when there is no reason to do so.
+
+To write `.tar.gz` files, you can again transform the stream twice:
+
+```dart
+import 'dart:io';
+import 'package:tar/tar.dart';
+
+Future<void> write(Stream<TarEntry> entries) {
+  return entries
+      .transform(tarWriter)
+      .transform(gzip.encoder)
+      .pipe(File('output.tar.gz').openWrite());
+}
+```
+
+## Features
+
+- Supports v7, ustar, pax, gnu and star archives
+- Supports extended pax headers for long file or link names
+- Supports long file and link names generated by GNU-tar
+- Hardened against denial-of-service attacks with invalid tar files
+
+-----
+
+Big thanks to [Garett Tok Ern Liang](https://github.com/walnutdust) for writing the initial 
+Dart tar reader that this library is based on.
\ No newline at end of file
diff --git a/lib/src/third_party/tar/analysis_options.yaml b/lib/src/third_party/tar/analysis_options.yaml
new file mode 100644
index 0000000..a4e40fa
--- /dev/null
+++ b/lib/src/third_party/tar/analysis_options.yaml
@@ -0,0 +1,17 @@
+include: package:extra_pedantic/analysis_options.yaml
+
+analyzer:
+  strong-mode:
+    implicit-casts: false
+    implicit-dynamic: false
+  language:
+    strict-inference: true
+    strict-raw-types: true
+
+linter:
+  rules:
+    close_sinks: false # This rule has just too many false-positives...
+    comment_references: true
+    literal_only_boolean_expressions: false # Nothing wrong with a little while(true)
+    parameter_assignments: false
+    unnecessary_await_in_return: false
diff --git a/lib/src/third_party/tar/example/main.dart b/lib/src/third_party/tar/example/main.dart
new file mode 100644
index 0000000..65cab46
--- /dev/null
+++ b/lib/src/third_party/tar/example/main.dart
@@ -0,0 +1,37 @@
+import 'dart:convert';
+import 'dart:io';
+
+import 'package:tar/tar.dart';
+
+Future<void> main() async {
+  // Start reading a tar file
+  final reader = TarReader(File('reference/gnu.tar').openRead());
+
+  while (await reader.moveNext()) {
+    final header = reader.current.header;
+    print('${header.name}: ');
+
+    // Print the output if it's a regular file
+    if (header.typeFlag == TypeFlag.reg) {
+      await reader.current.contents.transform(utf8.decoder).forEach(print);
+    }
+  }
+
+  // We can write tar files to any stream sink like this:
+  final output = File('test.tar').openWrite();
+
+  await Stream<TarEntry>.value(
+    TarEntry.data(
+      TarHeader(
+          name: 'hello_dart.txt',
+          mode: int.parse('644', radix: 8),
+          userName: 'Dart',
+          groupName: 'Dartgroup'),
+      utf8.encode('Hello world'),
+    ),
+  )
+      // transform tar entries back to a byte stream
+      .transform(tarWriter)
+      // and then write that to the file
+      .pipe(output);
+}
diff --git a/lib/src/third_party/tar/lib/src/charcodes.dart b/lib/src/third_party/tar/lib/src/charcodes.dart
new file mode 100644
index 0000000..f6c2d79
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/charcodes.dart
@@ -0,0 +1,73 @@
+// @dart = 2.12
+
+/// "Line feed" control character.
+const int $lf = 0x0a;
+
+/// Space character.
+const int $space = 0x20;
+
+/// Character `0`.
+const int $0 = 0x30;
+
+/// Character `1`.
+const int $1 = 0x31;
+
+/// Character `2`.
+const int $2 = 0x32;
+
+/// Character `3`.
+const int $3 = 0x33;
+
+/// Character `4`.
+const int $4 = 0x34;
+
+/// Character `5`.
+const int $5 = 0x35;
+
+/// Character `6`.
+const int $6 = 0x36;
+
+/// Character `7`.
+const int $7 = 0x37;
+
+/// Character `8`.
+const int $8 = 0x38;
+
+/// Character `9`.
+const int $9 = 0x39;
+
+/// Character `<`.
+const int $equal = 0x3d;
+
+/// Character `A`.
+const int $A = 0x41;
+
+/// Character `K`.
+const int $K = 0x4b;
+
+/// Character `L`.
+const int $L = 0x4c;
+
+/// Character `S`.
+const int $S = 0x53;
+
+/// Character `a`.
+const int $a = 0x61;
+
+/// Character `g`.
+const int $g = 0x67;
+
+/// Character `r`.
+const int $r = 0x72;
+
+/// Character `s`.
+const int $s = 0x73;
+
+/// Character `t`.
+const int $t = 0x74;
+
+/// Character `u`.
+const int $u = 0x75;
+
+/// Character `x`.
+const int $x = 0x78;
diff --git a/lib/src/third_party/tar/lib/src/constants.dart b/lib/src/third_party/tar/lib/src/constants.dart
new file mode 100644
index 0000000..9180a0a
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/constants.dart
@@ -0,0 +1,262 @@
+// @dart = 2.12
+
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'exception.dart';
+import 'header.dart' show TarHeader; // for dartdoc
+
+// Magic values to help us identify the TAR header type.
+const magicGnu = [$u, $s, $t, $a, $r, $space]; // 'ustar '
+const versionGnu = [$space, 0]; // ' \x00'
+const magicUstar = [$u, $s, $t, $a, $r, 0]; // 'ustar\x00'
+const versionUstar = [$0, $0]; // '00'
+const trailerStar = [$t, $a, $r, 0]; // 'tar\x00'
+
+/// Type flags for [TarHeader].
+///
+/// The type flag of a header indicates the kind of file associated with the
+/// entry. This enum contains the various type flags over the different TAR
+/// formats, and users should be careful that the type flag corresponds to the
+/// TAR format they are working with.
+enum TypeFlag {
+  /// [reg] indicates regular files.
+  ///
+  /// Old tar implementations have a seperate `TypeRegA` value. This library
+  /// will transparently read those as [regA].
+  reg,
+
+  /// Legacy-version of [reg] in old tar implementations.
+  ///
+  /// This is only used internally.
+  regA,
+
+  /// Hard link - header-only, may not have a data body
+  link,
+
+  /// Symbolic link - header-only, may not have a data body
+  symlink,
+
+  /// Character device node - header-only, may not have a data body
+  char,
+
+  /// Block device node - header-only, may not have a data body
+  block,
+
+  /// Directory - header-only, may not have a data body
+  dir,
+
+  /// FIFO node - header-only, may not have a data body
+  fifo,
+
+  /// Currently does not have any meaning, but is reserved for the future.
+  reserved,
+
+  /// Used by the PAX format to store key-value records that are only relevant
+  /// to the next file.
+  ///
+  /// This package transparently handles these types.
+  xHeader,
+
+  /// Used by the PAX format to store key-value records that are relevant to all
+  /// subsequent files.
+  ///
+  /// This package only supports parsing and composing such headers,
+  /// but does not currently support persisting the global state across files.
+  xGlobalHeader,
+
+  /// Indiates a sparse file in the GNU format
+  gnuSparse,
+
+  /// Used by the GNU format for a meta file to store the path or link name for
+  /// the next file.
+  /// This package transparently handles these types.
+  gnuLongName,
+  gnuLongLink,
+
+  /// Vendor specific typeflag, as defined in POSIX.1-1998. Seen as outdated but
+  /// may still exist on old files.
+  ///
+  /// This library uses a single enum to catch them all.
+  vendor
+}
+
+/// Generates the corresponding [TypeFlag] associated with [byte].
+TypeFlag typeflagFromByte(int byte) {
+  switch (byte) {
+    case $0:
+      return TypeFlag.reg;
+    case 0:
+      return TypeFlag.regA;
+    case $1:
+      return TypeFlag.link;
+    case $2:
+      return TypeFlag.symlink;
+    case $3:
+      return TypeFlag.char;
+    case $4:
+      return TypeFlag.block;
+    case $5:
+      return TypeFlag.dir;
+    case $6:
+      return TypeFlag.fifo;
+    case $7:
+      return TypeFlag.reserved;
+    case $x:
+      return TypeFlag.xHeader;
+    case $g:
+      return TypeFlag.xGlobalHeader;
+    case $S:
+      return TypeFlag.gnuSparse;
+    case $L:
+      return TypeFlag.gnuLongName;
+    case $K:
+      return TypeFlag.gnuLongLink;
+    default:
+      if (64 < byte && byte < 91) {
+        return TypeFlag.vendor;
+      }
+      throw TarException.header('Invalid typeflag value $byte');
+  }
+}
+
+int typeflagToByte(TypeFlag flag) {
+  switch (flag) {
+    case TypeFlag.reg:
+    case TypeFlag.regA:
+      return $0;
+    case TypeFlag.link:
+      return $1;
+    case TypeFlag.symlink:
+      return $2;
+    case TypeFlag.char:
+      return $3;
+    case TypeFlag.block:
+      return $4;
+    case TypeFlag.dir:
+      return $5;
+    case TypeFlag.fifo:
+      return $6;
+    case TypeFlag.reserved:
+      return $7;
+    case TypeFlag.xHeader:
+      return $x;
+    case TypeFlag.xGlobalHeader:
+      return $g;
+    case TypeFlag.gnuSparse:
+      return $S;
+    case TypeFlag.gnuLongName:
+      return $L;
+    case TypeFlag.gnuLongLink:
+      return $K;
+    case TypeFlag.vendor:
+      throw ArgumentError("Can't write vendor-specific type-flags");
+  }
+}
+
+/// Keywords for PAX extended header records.
+const paxPath = 'path';
+const paxLinkpath = 'linkpath';
+const paxSize = 'size';
+const paxUid = 'uid';
+const paxGid = 'gid';
+const paxUname = 'uname';
+const paxGname = 'gname';
+const paxMtime = 'mtime';
+const paxAtime = 'atime';
+const paxCtime =
+    'ctime'; // Removed from later revision of PAX spec, but was valid
+const paxComment = 'comment';
+const paxSchilyXattr = 'SCHILY.xattr.';
+
+/// Keywords for GNU sparse files in a PAX extended header.
+const paxGNUSparse = 'GNU.sparse.';
+const paxGNUSparseNumBlocks = 'GNU.sparse.numblocks';
+const paxGNUSparseOffset = 'GNU.sparse.offset';
+const paxGNUSparseNumBytes = 'GNU.sparse.numbytes';
+const paxGNUSparseMap = 'GNU.sparse.map';
+const paxGNUSparseName = 'GNU.sparse.name';
+const paxGNUSparseMajor = 'GNU.sparse.major';
+const paxGNUSparseMinor = 'GNU.sparse.minor';
+const paxGNUSparseSize = 'GNU.sparse.size';
+const paxGNUSparseRealSize = 'GNU.sparse.realsize';
+
+/// A set of pax header keys supported by this library.
+///
+/// The reader will ignore pax headers not listed in this map.
+const supportedPaxHeaders = {
+  paxPath,
+  paxLinkpath,
+  paxSize,
+  paxUid,
+  paxGid,
+  paxUname,
+  paxGname,
+  paxMtime,
+  paxAtime,
+  paxCtime,
+  paxComment,
+  paxSchilyXattr,
+  paxGNUSparse,
+  paxGNUSparseNumBlocks,
+  paxGNUSparseOffset,
+  paxGNUSparseNumBytes,
+  paxGNUSparseMap,
+  paxGNUSparseName,
+  paxGNUSparseMajor,
+  paxGNUSparseMinor,
+  paxGNUSparseSize,
+  paxGNUSparseRealSize
+};
+
+/// User ID bit
+const c_ISUID = 2048;
+
+/// Group ID bit
+const c_ISGID = 1024;
+
+/// Sticky bit
+const c_ISVTX = 512;
+
+/// **********************
+///  Convenience constants
+/// **********************
+/// 64-bit integer max and min values
+const int64MaxValue = 9223372036854775807;
+const int64MinValue = -9223372036854775808;
+
+/// Constants to determine file modes.
+const modeType = 2401763328;
+const modeSymLink = 134217728;
+const modeDevice = 67108864;
+const modeCharDevice = 2097152;
+const modeNamedPipe = 33554432;
+const modeSocket = 1677216;
+const modeSetUid = 8388608;
+const modeSetGid = 4194304;
+const modeSticky = 1048576;
+const modeDirectory = 2147483648;
+
+/// The offset of the checksum in the header
+const checksumOffset = 148;
+const checksumLength = 8;
+const magicOffset = 257;
+const versionOffset = 263;
+const starTrailerOffset = 508;
+
+/// Size constants from various TAR specifications.
+/// Size of each block in a TAR stream.
+const blockSize = 512;
+const blockSizeLog2 = 9;
+const maxIntFor12CharOct = 0x1ffffffff; // 777 7777 7777 in oct
+
+const defaultSpecialLength = 4 * blockSize;
+
+/// Max length of the name field in USTAR format.
+const nameSize = 100;
+
+/// Max length of the prefix field in USTAR format.
+const prefixSize = 155;
+
+/// A full TAR block of zeros.
+final zeroBlock = Uint8List(blockSize);
diff --git a/lib/src/third_party/tar/lib/src/entry.dart b/lib/src/third_party/tar/lib/src/entry.dart
new file mode 100644
index 0000000..1a36e0e
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/entry.dart
@@ -0,0 +1,61 @@
+// @dart = 2.12
+
+import 'dart:async';
+
+import 'package:meta/meta.dart';
+
+import 'constants.dart';
+import 'header.dart';
+
+/// An entry in a tar file.
+///
+/// Usually, tar entries are read from a stream, and they're bound to the stream
+/// from which they've been read. This means that they can only be read once,
+/// and that only one [TarEntry] is active at a time.
+@sealed
+class TarEntry {
+  /// The parsed [TarHeader] of this tar entry.
+  final TarHeader header;
+
+  /// The content stream of the active tar entry.
+  ///
+  /// For tar entries read through the reader provided by this library,
+  /// [contents] is a single-subscription streamed backed by the original stream
+  /// used to create the reader.
+  /// When listening on [contents], the stream needs to be fully drained before
+  /// the next call to [StreamIterator.moveNext]. It's acceptable to not listen
+  /// to [contents] at all before calling [StreamIterator.moveNext] again.
+  /// In that case, this library will take care of draining the stream to get to
+  /// the next entry.
+  final Stream<List<int>> contents;
+
+  /// The name of this entry, as indicated in the header or a previous pax
+  /// entry.
+  String get name => header.name;
+
+  /// The type of tar entry (file, directory, etc.).
+  TypeFlag get type => header.typeFlag;
+
+  /// The content size of this entry, in bytes.
+  int get size => header.size;
+
+  /// Time of the last modification of this file, as indicated in the [header].
+  DateTime get modified => header.modified;
+
+  /// Creates a tar entry from a [header] and the [contents] stream.
+  ///
+  /// If the total length of [contents] is known, consider setting the
+  /// [header]'s [TarHeader.size] property to the appropriate value.
+  /// Otherwise, the tar writer needs to buffer contents to determine the right
+  /// size.
+  // factory so that this class can't be extended
+  factory TarEntry(TarHeader header, Stream<List<int>> contents) = TarEntry._;
+
+  TarEntry._(this.header, this.contents);
+
+  /// Creates an in-memory tar entry from the [header] and the [data] to store.
+  factory TarEntry.data(TarHeader header, List<int> data) {
+    (header as HeaderImpl).size = data.length;
+    return TarEntry(header, Stream.value(data));
+  }
+}
diff --git a/lib/src/third_party/tar/lib/src/exception.dart b/lib/src/third_party/tar/lib/src/exception.dart
new file mode 100644
index 0000000..e5bf826
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/exception.dart
@@ -0,0 +1,15 @@
+// @dart = 2.12
+
+import 'package:meta/meta.dart';
+
+/// An exception indicating that there was an issue parsing a `.tar` file.
+/// Intended to be seen by the user.
+class TarException extends FormatException {
+  @internal
+  TarException(String message) : super(message);
+
+  @internal
+  factory TarException.header(String message) {
+    return TarException('Invalid header: $message');
+  }
+}
diff --git a/lib/src/third_party/tar/lib/src/format.dart b/lib/src/third_party/tar/lib/src/format.dart
new file mode 100644
index 0000000..8d1c7d3
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/format.dart
@@ -0,0 +1,294 @@
+// @dart = 2.12
+
+import 'package:meta/meta.dart';
+
+/// Handy map to help us translate [TarFormat] values to their names.
+/// Be sure to keep this consistent with the constant initializers in
+/// [TarFormat].
+const _formatNames = {
+  1: 'V7',
+  2: 'USTAR',
+  4: 'PAX',
+  8: 'GNU',
+  16: 'STAR',
+};
+
+/// Holds the possible TAR formats that a file could take.
+///
+/// This library only supports the V7, USTAR, PAX, GNU, and STAR formats.
+@sealed
+class TarFormat {
+  /// The TAR formats are encoded in powers of two in [_value], such that we
+  /// can refine our guess via bit operations as we discover more information
+  /// about the TAR file.
+  /// A value of 0 means that the format is invalid.
+  final int _value;
+
+  const TarFormat._internal(this._value);
+
+  @override
+  int get hashCode => _value;
+
+  @override
+  bool operator ==(Object? other) {
+    if (other is! TarFormat) return false;
+
+    return _value == other._value;
+  }
+
+  @override
+  String toString() {
+    if (!isValid()) return 'Invalid';
+
+    final possibleNames = _formatNames.entries
+        .where((e) => _value & e.key != 0)
+        .map((e) => e.value);
+
+    return possibleNames.join(' or ');
+  }
+
+  /// Returns if [other] is a possible resolution of `this`.
+  ///
+  /// For example, a [TarFormat] with a value of 6 means that we do not have
+  /// enough information to determine if it is [TarFormat.ustar] or
+  /// [TarFormat.pax], so either of them could be possible resolutions of
+  /// `this`.
+  bool has(TarFormat other) => _value & other._value != 0;
+
+  /// Returns a new [TarFormat] that signifies that it can be either
+  /// `this` or [other]'s format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.USTAR | TarFormat.PAX;
+  /// ```
+  ///
+  /// The above code would signify that we have limited `format` to either
+  /// the USTAR or PAX format, but need further information to refine the guess.
+  TarFormat operator |(TarFormat other) {
+    return mayBe(other);
+  }
+
+  /// Returns a new [TarFormat] that signifies that it can be either
+  /// `this` or [other]'s format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.PAX;
+  /// format = format.mayBe(TarFormat.USTAR);
+  /// ```
+  ///
+  /// The above code would signify that we learnt that in addition to being a
+  /// PAX format, it could also be of the USTAR format.
+  TarFormat mayBe(TarFormat? other) {
+    if (other == null) return this;
+    return TarFormat._internal(_value | other._value);
+  }
+
+  /// Returns a new [TarFormat] that signifies that it can only be [other]'s
+  /// format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.PAX | TarFormat.USTAR;
+  /// ...
+  /// format = format.mayOnlyBe(TarFormat.USTAR);
+  /// ```
+  ///
+  /// In the above example, we found that `format` could either be PAX or USTAR,
+  /// but later learnt that it can only be the USTAR format.
+  ///
+  /// If [has(other) == false], [mayOnlyBe] will result in an unknown
+  /// [TarFormat].
+  TarFormat mayOnlyBe(TarFormat other) {
+    return TarFormat._internal(_value & other._value);
+  }
+
+  /// Returns if this format might be valid.
+  ///
+  /// This returns true as well even if we have yet to fully determine what the
+  /// format is.
+  bool isValid() => _value > 0;
+
+  /// Original Unix Version 7 (V7) AT&T tar tool prior to standardization.
+  ///
+  /// The structure of the V7 Header consists of the following:
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII. See
+  ///               [computeUnsignedChecksum] for more details.
+  /// 156   | 157 | Link flag, determines the kind of header.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 512 | NUL pad.
+  ///
+  /// Unused bytes are set to NUL ('\x00')s
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  static const v7 = TarFormat._internal(1);
+
+  /// USTAR (Unix Standard TAR) header format defined in POSIX.1-1988.
+  ///
+  /// The structure of the USTAR Header consists of the following:
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII. See
+  ///               [computeUnsignedChecksum] for more details.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar\x00" to indicate that this is
+  ///               the USTAR format. Full compliance requires user name and
+  ///               group name fields to be set.
+  /// 263   | 265 | Version. "00" for POSIX standard archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 500 | Prefix. If the pathname is too long to fit in the 100 bytes
+  ///               provided at the start, it can be split at any / character
+  ///               with the first portion going here.
+  /// 500   | 512 | NUL pad.
+  ///
+  /// Unused bytes are set to NUL ('\x00')s
+  ///
+  /// User and group names should be used in preference to uid/gid values when
+  /// they are set and the corresponding names exist on the system.
+  ///
+  /// While this format is compatible with most tar readers, the format has
+  /// several limitations making it unsuitable for some usages. Most notably, it
+  /// cannot support sparse files, files larger than 8GiB, filenames larger than
+  /// 256 characters, and non-ASCII filenames.
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  ///	http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+  static const ustar = TarFormat._internal(2);
+
+  /// PAX header format defined in POSIX.1-2001.
+  ///
+  /// PAX extends USTAR by writing a special file with either the `x` or `g`
+  /// type flags to allow for attributes that are not conveniently stored in a
+  /// POSIX ustar archive to be held.
+  ///
+  /// Some newer formats add their own extensions to PAX by defining their
+  /// own keys and assigning certain semantic meaning to the associated values.
+  /// For example, sparse file support in PAX is implemented using keys
+  /// defined by the GNU manual (e.g., "GNU.sparse.map").
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  ///	http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
+  static const pax = TarFormat._internal(4);
+
+  /// GNU header format.
+  ///
+  /// The GNU header format is older than the USTAR and PAX standards and
+  /// is not compatible with them. The GNU format supports
+  /// arbitrary file sizes, filenames of arbitrary encoding and length,
+  /// sparse files, and other features.
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII. See
+  ///               [computeUnsignedChecksum] for more details.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar " to indicate that this is
+  ///               the GNU format.
+  /// 263   | 265 | Version. " \x00" for POSIX standard archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 357 | Last Access time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 357   | 369 | Last Changed time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 369   | 381 | Offset - not used.
+  /// 381   | 385 | Longnames - deprecated
+  /// 385   | 386 | Unused.
+  /// 386   | 482 | Sparse data - 4 sets of (offset, numbytes) stored as
+  ///               octal numbers in ASCII.
+  /// 482   | 483 | isExtended - if this field is non-zero, this header is
+  ///               followed by  additional sparse records, which are in the
+  ///               same format as above.
+  /// 483   | 495 | Binary representation of the file's complete size, inclusive
+  ///               of the sparse data.
+  /// 495   | 512 | NUL pad.
+  ///
+  /// It is recommended that PAX be chosen over GNU unless the target
+  /// application can only parse GNU formatted archives.
+  ///
+  /// Reference:
+  ///	https://www.gnu.org/software/tar/manual/html_node/Standard.html
+  static const gnu = TarFormat._internal(8);
+
+  /// Schily's TAR format, which is incompatible with USTAR.
+  /// This does not cover STAR extensions to the PAX format; these fall under
+  /// the PAX format.
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII. See
+  ///               [computeUnsignedChecksum] for more details.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar\x00" to indicate that this is
+  ///               the GNU format.
+  /// 263   | 265 | Version. "00" for STAR archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 476 | Prefix. If the pathname is too long to fit in the 100 bytes
+  ///               provided at the start, it can be split at any / character
+  ///               with the first portion going here.
+  /// 476   | 488 | Last Access time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 488   | 500 | Last Changed time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 500   | 508 | NUL pad.
+  /// 508   | 512 | Trailer - "tar\x00".
+  ///
+  /// Reference:
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  static const star = TarFormat._internal(16);
+}
diff --git a/lib/src/third_party/tar/lib/src/header.dart b/lib/src/third_party/tar/lib/src/header.dart
new file mode 100644
index 0000000..d394db6
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/header.dart
@@ -0,0 +1,337 @@
+// @dart = 2.12
+
+import 'dart:typed_data';
+
+import 'package:meta/meta.dart';
+
+import 'constants.dart';
+import 'exception.dart';
+import 'format.dart';
+import 'utils.dart';
+
+/// Header of a tar entry
+///
+/// A tar header stores meta-information about the matching tar entry, such as
+/// its name.
+@sealed
+abstract class TarHeader {
+  /// Type of header entry. In the V7 TAR format, this field was known as the
+  /// link flag.
+  TypeFlag get typeFlag;
+
+  /// Name of file or directory entry.
+  String get name;
+
+  /// Target name of link (valid for hard links or symbolic links).
+  String? get linkName;
+
+  /// Permission and mode bits.
+  int get mode;
+
+  /// User ID of owner.
+  int get userId;
+
+  /// Group ID of owner.
+  int get groupId;
+
+  /// User name of owner.
+  String? get userName;
+
+  /// Group name of owner.
+  String? get groupName;
+
+  /// Logical file size in bytes.
+  int get size;
+
+  /// The time of the last change to the data of the TAR file.
+  DateTime get modified;
+
+  /// The time of the last access to the data of the TAR file.
+  DateTime? get accessed;
+
+  /// The time of the last change to the data or metadata of the TAR file.
+  DateTime? get changed;
+
+  /// Major device number
+  int get devMajor;
+
+  /// Minor device number
+  int get devMinor;
+
+  /// The TAR format of the header.
+  TarFormat get format;
+
+  /// Checks if this header indicates that the file will have content.
+  bool get hasContent {
+    switch (typeFlag) {
+      case TypeFlag.link:
+      case TypeFlag.symlink:
+      case TypeFlag.block:
+      case TypeFlag.dir:
+      case TypeFlag.char:
+      case TypeFlag.fifo:
+        return false;
+      default:
+        return true;
+    }
+  }
+
+  /// Creates a tar header from the individual field.
+  factory TarHeader({
+    required String name,
+    TarFormat? format,
+    TypeFlag? typeFlag,
+    DateTime? modified,
+    String? linkName,
+    int mode = 0,
+    int size = -1,
+    String? userName,
+    int userId = 0,
+    int groupId = 0,
+    String? groupName,
+    DateTime? accessed,
+    DateTime? changed,
+    int devMajor = 0,
+    int devMinor = 0,
+  }) {
+    return HeaderImpl.internal(
+      name: name,
+      modified: modified ?? DateTime.fromMillisecondsSinceEpoch(0),
+      format: format ?? TarFormat.pax,
+      typeFlag: typeFlag ?? TypeFlag.reg,
+      linkName: linkName,
+      mode: mode,
+      size: size,
+      userName: userName,
+      userId: userId,
+      groupId: groupId,
+      groupName: groupName,
+      accessed: accessed,
+      changed: changed,
+      devMajor: devMajor,
+      devMinor: devMinor,
+    );
+  }
+
+  TarHeader._();
+}
+
+@internal
+class HeaderImpl extends TarHeader {
+  TypeFlag internalTypeFlag;
+
+  @override
+  String name;
+
+  @override
+  String? linkName;
+
+  @override
+  int mode;
+
+  @override
+  int userId;
+
+  @override
+  int groupId;
+
+  @override
+  String? userName;
+
+  @override
+  String? groupName;
+
+  @override
+  int size;
+
+  @override
+  DateTime modified;
+
+  @override
+  DateTime? accessed;
+
+  @override
+  DateTime? changed;
+
+  @override
+  int devMajor;
+
+  @override
+  int devMinor;
+
+  @override
+  TarFormat format;
+
+  @override
+  TypeFlag get typeFlag {
+    return internalTypeFlag == TypeFlag.regA ? TypeFlag.reg : internalTypeFlag;
+  }
+
+  /// This constructor is meant to help us deal with header-only headers (i.e.
+  /// meta-headers that only describe the next file instead of being a header
+  /// to files themselves)
+  HeaderImpl.internal({
+    required this.name,
+    required this.modified,
+    required this.format,
+    required TypeFlag typeFlag,
+    this.linkName,
+    this.mode = 0,
+    this.size = -1,
+    this.userName,
+    this.userId = 0,
+    this.groupId = 0,
+    this.groupName,
+    this.accessed,
+    this.changed,
+    this.devMajor = 0,
+    this.devMinor = 0,
+  })  : internalTypeFlag = typeFlag,
+        super._();
+
+  factory HeaderImpl.parseBlock(Uint8List headerBlock,
+      {Map<String, String> paxHeaders = const {}}) {
+    assert(headerBlock.length == 512);
+
+    final format = _getFormat(headerBlock);
+    final size = paxHeaders.size ?? headerBlock.readOctal(124, 12);
+
+    // Start by reading data available in every format.
+    final header = HeaderImpl.internal(
+      format: format,
+      name: headerBlock.readString(0, 100),
+      mode: headerBlock.readOctal(100, 8),
+      // These should be octal, but some weird tar implementations ignore that?!
+      // Encountered with package:RAL, version 1.28.0 on pub
+      userId: headerBlock.readNumeric(108, 8),
+      groupId: headerBlock.readNumeric(116, 8),
+      size: size,
+      modified: secondsSinceEpoch(headerBlock.readOctal(136, 12)),
+      typeFlag: typeflagFromByte(headerBlock[156]),
+      linkName: headerBlock.readStringOrNullIfEmpty(157, 100),
+    );
+
+    if (header.hasContent && size < 0) {
+      throw TarException.header('Indicates an invalid size of $size');
+    }
+
+    if (format.isValid() && format != TarFormat.v7) {
+      // If it's a valid header that is not of the v7 format, it will have the
+      // USTAR fields
+      header
+        ..userName ??= headerBlock.readStringOrNullIfEmpty(265, 32)
+        ..groupName ??= headerBlock.readStringOrNullIfEmpty(297, 32)
+        ..devMajor = headerBlock.readNumeric(329, 8)
+        ..devMinor = headerBlock.readNumeric(337, 8);
+
+      // Prefix to the file name
+      var prefix = '';
+      if (format.has(TarFormat.ustar) || format.has(TarFormat.pax)) {
+        prefix = headerBlock.readString(345, 155);
+
+        if (headerBlock.any(isNotAscii)) {
+          header.format = format.mayOnlyBe(TarFormat.pax);
+        }
+      } else if (format.has(TarFormat.star)) {
+        prefix = headerBlock.readString(345, 131);
+        header
+          ..accessed = secondsSinceEpoch(headerBlock.readNumeric(476, 12))
+          ..changed = secondsSinceEpoch(headerBlock.readNumeric(488, 12));
+      } else if (format.has(TarFormat.gnu)) {
+        header.format = TarFormat.gnu;
+
+        if (headerBlock[345] != 0) {
+          header.accessed = secondsSinceEpoch(headerBlock.readNumeric(345, 12));
+        }
+
+        if (headerBlock[357] != 0) {
+          header.changed = secondsSinceEpoch(headerBlock.readNumeric(357, 12));
+        }
+      }
+
+      if (prefix.isNotEmpty) {
+        header.name = '$prefix/${header.name}';
+      }
+    }
+
+    return header.._applyPaxHeaders(paxHeaders);
+  }
+
+  void _applyPaxHeaders(Map<String, String> headers) {
+    for (final entry in headers.entries) {
+      if (entry.value == '') {
+        continue; // Keep the original USTAR value
+      }
+
+      switch (entry.key) {
+        case paxPath:
+          name = entry.value;
+          break;
+        case paxLinkpath:
+          linkName = entry.value;
+          break;
+        case paxUname:
+          userName = entry.value;
+          break;
+        case paxGname:
+          groupName = entry.value;
+          break;
+        case paxUid:
+          userId = parseInt(entry.value);
+          break;
+        case paxGid:
+          groupId = parseInt(entry.value);
+          break;
+        case paxAtime:
+          accessed = parsePaxTime(entry.value);
+          break;
+        case paxMtime:
+          modified = parsePaxTime(entry.value);
+          break;
+        case paxCtime:
+          changed = parsePaxTime(entry.value);
+          break;
+        case paxSize:
+          size = parseInt(entry.value);
+          break;
+        default:
+          break;
+      }
+    }
+  }
+}
+
+/// Checks that [rawHeader] represents a valid tar header based on the
+/// checksum, and then attempts to guess the specific format based
+/// on magic values. If the checksum fails, then an error is thrown.
+TarFormat _getFormat(Uint8List rawHeader) {
+  final checksum = rawHeader.readOctal(checksumOffset, checksumLength);
+
+  // Modern TAR archives use the unsigned checksum, but we check the signed
+  // checksum as well for compatibility.
+  if (checksum != rawHeader.computeUnsignedHeaderChecksum() &&
+      checksum != rawHeader.computeSignedHeaderChecksum()) {
+    throw TarException.header('Checksum does not match');
+  }
+
+  final hasUstarMagic = rawHeader.matchesHeader(magicUstar);
+  if (hasUstarMagic) {
+    return rawHeader.matchesHeader(trailerStar, offset: starTrailerOffset)
+        ? TarFormat.star
+        : TarFormat.ustar | TarFormat.pax;
+  }
+
+  if (rawHeader.matchesHeader(magicGnu) &&
+      rawHeader.matchesHeader(versionGnu, offset: versionOffset)) {
+    return TarFormat.gnu;
+  }
+
+  return TarFormat.v7;
+}
+
+extension _ReadPaxHeaders on Map<String, String> {
+  int? get size {
+    final sizeStr = this[paxSize];
+    return sizeStr == null ? null : int.tryParse(sizeStr);
+  }
+}
diff --git a/lib/src/third_party/tar/lib/src/reader.dart b/lib/src/third_party/tar/lib/src/reader.dart
new file mode 100644
index 0000000..e34b242
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/reader.dart
@@ -0,0 +1,868 @@
+// @dart = 2.12
+
+import 'dart:async';
+import 'dart:collection';
+import 'dart:convert';
+import 'dart:typed_data';
+
+import '../../../chunked_stream/lib/chunked_stream.dart';
+import 'package:meta/meta.dart';
+import 'package:typed_data/typed_data.dart';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'entry.dart';
+import 'exception.dart';
+import 'format.dart';
+import 'header.dart';
+import 'sparse.dart';
+import 'utils.dart';
+
+/// [TarReader] provides sequential access to the TAR files in a TAR archive.
+/// It is designed to read from a stream and to spit out substreams for
+/// individual file contents in order to minimize the amount of memory needed
+/// to read each archive where possible.
+@sealed
+class TarReader implements StreamIterator<TarEntry> {
+  /// A chunked stream iterator to enable us to get our data.
+  final ChunkedStreamIterator<int> _chunkedStream;
+  final PaxHeaders _paxHeaders = PaxHeaders();
+  final int _maxSpecialFileSize;
+
+  /// Skip the next [_skipNext] elements when reading in the stream.
+  int _skipNext = 0;
+
+  TarEntry? _current;
+
+  /// The underlying content stream for the [_current] entry. Draining this
+  /// stream will move the tar reader to the beginning of the next file.
+  ///
+  /// This is not the same as `_current.stream` for sparse files, which are
+  /// reported as expanded through [TarEntry.contents].
+  /// For that reason, we prefer to drain this stream when skipping a tar entry.
+  /// When we know we're skipping data, there's no point expanding sparse holes.
+  ///
+  /// This stream is always set to null after being drained, and there can only
+  /// be one [_underlyingContentStream] at a time.
+  Stream<List<int>>? _underlyingContentStream;
+
+  /// Whether [_current] has ever been listened to.
+  bool _listenedToContentsOnce = false;
+
+  /// Whether we're in the process of reading tar headers.
+  bool _isReadingHeaders = false;
+
+  /// Whether this tar reader is terminally done.
+  ///
+  /// That is the case if:
+  ///  - [cancel] was called
+  ///  - [moveNext] completed to `false` once.
+  ///  - [moveNext] completed to an error
+  ///  - an error was emitted through a tar entry's content stream
+  bool _isDone = false;
+
+  /// Creates a tar reader reading from the raw [tarStream].
+  ///
+  /// The [maxSpecialFileSize] parameter can be used to limit the maximum length
+  /// of hidden entries in the tar stream. These entries include extended PAX
+  /// headers or long names in GNU tar. The content of those entries has to be
+  /// buffered in the parser to properly read the following tar entries. To
+  /// avoid memory-based denial-of-service attacks, this library limits their
+  /// maximum length. Changing the default of 2 KiB is rarely necessary.
+  TarReader(Stream<List<int>> tarStream,
+      {int maxSpecialFileSize = defaultSpecialLength})
+      : _chunkedStream = ChunkedStreamIterator(tarStream),
+        _maxSpecialFileSize = maxSpecialFileSize;
+
+  @override
+  TarEntry get current {
+    final current = _current;
+
+    if (current == null) {
+      throw StateError('Invalid call to TarReader.current. \n'
+          'Did you call and await next() and checked that it returned true?');
+    }
+
+    return current;
+  }
+
+  /// Reads the tar stream up until the beginning of the next logical file.
+  ///
+  /// If such file exists, the returned future will complete with `true`. After
+  /// the future completes, the next tar entry will be evailable in [current].
+  ///
+  /// If no such file exists, the future will complete with `false`.
+  /// The future might complete with an [TarException] if the tar stream is
+  /// malformed or ends unexpectedly.
+  /// If the future completes with `false` or an exception, the reader will
+  /// [cancel] itself and release associated resources. Thus, it is invalid to
+  /// call [moveNext] again in that case.
+  @override
+  Future<bool> moveNext() async {
+    await _prepareToReadHeaders();
+    try {
+      return await _moveNextInternal();
+    } on Object {
+      await cancel();
+      rethrow;
+    }
+  }
+
+  /// Consumes the stream up to the contents of the next logical tar entry.
+  /// Will cancel the underlying subscription when returning false, but not when
+  /// it throws.
+  Future<bool> _moveNextInternal() async {
+    // We're reading a new logical file, so clear the local pax headers
+    _paxHeaders.clearLocals();
+
+    var gnuLongName = '';
+    var gnuLongLink = '';
+    var eofAcceptable = true;
+
+    var format = TarFormat.ustar |
+        TarFormat.pax |
+        TarFormat.gnu |
+        TarFormat.v7 |
+        TarFormat.star;
+
+    HeaderImpl? nextHeader;
+
+    /// Externally, [next] iterates through the tar archive as if it is a series
+    /// of files. Internally, the tar format often uses fake "files" to add meta
+    /// data that describes the next file. These meta data "files" should not
+    /// normally be visible to the outside. As such, this loop iterates through
+    /// one or more "header files" until it finds a "normal file".
+    while (true) {
+      if (_skipNext > 0) {
+        await _readFullBlock(_skipNext);
+        _skipNext = 0;
+      }
+
+      final rawHeader =
+          await _readFullBlock(blockSize, allowEmpty: eofAcceptable);
+
+      nextHeader = await _readHeader(rawHeader);
+      if (nextHeader == null) {
+        if (eofAcceptable) {
+          await cancel();
+          return false;
+        } else {
+          _unexpectedEof();
+        }
+      }
+
+      // We're beginning to read a file, if the tar file ends now something is
+      // wrong
+      eofAcceptable = false;
+      format = format.mayOnlyBe(nextHeader.format);
+
+      // Check for PAX/GNU special headers and files.
+      if (nextHeader.typeFlag == TypeFlag.xHeader ||
+          nextHeader.typeFlag == TypeFlag.xGlobalHeader) {
+        format = format.mayOnlyBe(TarFormat.pax);
+        final paxHeaderSize = _checkSpecialSize(nextHeader.size);
+        final rawPaxHeaders = await _readFullBlock(paxHeaderSize);
+
+        _paxHeaders.readPaxHeaders(
+            rawPaxHeaders, nextHeader.typeFlag == TypeFlag.xGlobalHeader);
+        _markPaddingToSkip(paxHeaderSize);
+
+        // This is a meta header affecting the next header.
+        continue;
+      } else if (nextHeader.typeFlag == TypeFlag.gnuLongLink ||
+          nextHeader.typeFlag == TypeFlag.gnuLongName) {
+        format = format.mayOnlyBe(TarFormat.gnu);
+        final realName = await _readFullBlock(
+            _checkSpecialSize(nextBlockSize(nextHeader.size)));
+
+        final readName = realName.readString(0, realName.length);
+        if (nextHeader.typeFlag == TypeFlag.gnuLongName) {
+          gnuLongName = readName;
+        } else {
+          gnuLongLink = readName;
+        }
+
+        // This is a meta header affecting the next header.
+        continue;
+      } else {
+        // The old GNU sparse format is handled here since it is technically
+        // just a regular file with additional attributes.
+
+        if (gnuLongName.isNotEmpty) nextHeader.name = gnuLongName;
+        if (gnuLongLink.isNotEmpty) nextHeader.linkName = gnuLongLink;
+
+        if (nextHeader.internalTypeFlag == TypeFlag.regA) {
+          /// Legacy archives use trailing slash for directories
+          if (nextHeader.name.endsWith('/')) {
+            nextHeader.internalTypeFlag = TypeFlag.dir;
+          } else {
+            nextHeader.internalTypeFlag = TypeFlag.reg;
+          }
+        }
+
+        final content = await _handleFile(nextHeader, rawHeader);
+
+        // Set the final guess at the format
+        if (format.has(TarFormat.ustar) && format.has(TarFormat.pax)) {
+          format = format.mayOnlyBe(TarFormat.ustar);
+        }
+        nextHeader.format = format;
+
+        _current = TarEntry(nextHeader, content);
+        _listenedToContentsOnce = false;
+        _isReadingHeaders = false;
+        return true;
+      }
+    }
+  }
+
+  @override
+  Future<void> cancel() async {
+    if (_isDone) return;
+
+    _isDone = true;
+    _current = null;
+    _underlyingContentStream = null;
+    _listenedToContentsOnce = false;
+    _isReadingHeaders = false;
+
+    return _chunkedStream.cancel();
+  }
+
+  /// Utility function for quickly iterating through all entries in [tarStream].
+  static Future<void> forEach(Stream<List<int>> tarStream,
+      FutureOr<void> Function(TarEntry entry) action) async {
+    final reader = TarReader(tarStream);
+    try {
+      while (await reader.moveNext()) {
+        await action(reader.current);
+      }
+    } finally {
+      await reader.cancel();
+    }
+  }
+
+  /// Ensures that this reader can safely read headers now.
+  ///
+  /// This methods prevents:
+  ///  * concurrent calls to [moveNext]
+  ///  * a call to [moveNext] while a stream is active:
+  ///    * if [contents] has never been listened to, we drain the stream
+  ///    * otherwise, throws a [StateError]
+  Future<void> _prepareToReadHeaders() async {
+    if (_isDone) {
+      throw StateError('Tried to call TarReader.moveNext() on a canceled '
+          'reader. \n'
+          'Note that a reader is canceled when moveNext() throws or returns '
+          'false.');
+    }
+
+    if (_isReadingHeaders) {
+      throw StateError('Concurrent call to TarReader.moveNext() detected. \n'
+          'Please await all calls to Reader.moveNext().');
+    }
+    _isReadingHeaders = true;
+
+    final underlyingStream = _underlyingContentStream;
+    if (underlyingStream != null) {
+      if (_listenedToContentsOnce) {
+        throw StateError(
+            'Illegal call to TarReader.moveNext() while a previous stream was '
+            'active.\n'
+            'When listening to tar contents, make sure the stream is '
+            'complete or cancelled before calling TarReader.moveNext() again.');
+      } else {
+        await underlyingStream.drain<void>();
+        // The stream should reset when drained (we do this in _publishStream)
+        assert(_underlyingContentStream == null);
+      }
+    }
+  }
+
+  int _checkSpecialSize(int size) {
+    if (size > _maxSpecialFileSize) {
+      throw TarException(
+          'TAR file contains hidden entry with an invalid size of $size.');
+    }
+
+    return size;
+  }
+
+  Never _unexpectedEof() {
+    throw TarException.header('Unexpected end of file');
+  }
+
+  /// Reads a block with the requested [size], or throws an unexpected EoF
+  /// exception.
+  Future<Uint8List> _readFullBlock(int size, {bool allowEmpty = false}) async {
+    final block = await _chunkedStream.readBytes(size);
+    if (block.length != size && !(allowEmpty && block.isEmpty)) {
+      _unexpectedEof();
+    }
+
+    return block;
+  }
+
+  /// Reads the next block header and assumes that the underlying reader
+  /// is already aligned to a block boundary. It returns the raw block of the
+  /// header in case further processing is required.
+  ///
+  /// EOF is hit when one of the following occurs:
+  ///	* Exactly 0 bytes are read and EOF is hit.
+  ///	* Exactly 1 block of zeros is read and EOF is hit.
+  ///	* At least 2 blocks of zeros are read.
+  Future<HeaderImpl?> _readHeader(Uint8List rawHeader) async {
+    // Exactly 0 bytes are read and EOF is hit.
+    if (rawHeader.isEmpty) return null;
+
+    if (rawHeader.isAllZeroes) {
+      rawHeader = await _chunkedStream.readBytes(blockSize);
+
+      // Exactly 1 block of zeroes is read and EOF is hit.
+      if (rawHeader.isEmpty) return null;
+
+      if (rawHeader.isAllZeroes) {
+        // Two blocks of zeros are read - Normal EOF.
+        return null;
+      }
+
+      throw TarException('Encountered a non-zero block after a zero block');
+    }
+
+    return HeaderImpl.parseBlock(rawHeader, paxHeaders: _paxHeaders);
+  }
+
+  /// Creates a stream of the next entry's content
+  Future<Stream<List<int>>> _handleFile(
+      HeaderImpl header, Uint8List rawHeader) async {
+    List<SparseEntry>? sparseData;
+    if (header.typeFlag == TypeFlag.gnuSparse) {
+      sparseData = await _readOldGNUSparseMap(header, rawHeader);
+    } else {
+      sparseData = await _readGNUSparsePAXHeaders(header);
+    }
+
+    if (sparseData != null) {
+      if (header.hasContent &&
+          !validateSparseEntries(sparseData, header.size)) {
+        throw TarException.header('Invalid sparse file header.');
+      }
+
+      final sparseHoles = invertSparseEntries(sparseData, header.size);
+      final sparseDataLength =
+          sparseData.fold<int>(0, (value, element) => value + element.length);
+
+      final streamLength = nextBlockSize(sparseDataLength);
+      final safeStream =
+          _publishStream(_chunkedStream.substream(streamLength), streamLength);
+      return sparseStream(safeStream, sparseHoles, header.size);
+    } else {
+      var size = header.size;
+      if (!header.hasContent) size = 0;
+
+      if (size < 0) {
+        throw TarException.header('Invalid size ($size) detected!');
+      }
+
+      if (size == 0) {
+        return _publishStream(const Stream<Never>.empty(), 0);
+      } else {
+        _markPaddingToSkip(size);
+        return _publishStream(
+            _chunkedStream.substream(header.size), header.size);
+      }
+    }
+  }
+
+  /// Publishes an library-internal stream for users.
+  ///
+  /// This adds a check to ensure that the stream we're exposing has the
+  /// expected length. It also sets the [_underlyingContentStream] field when
+  /// the stream starts and resets it when it's done.
+  Stream<List<int>> _publishStream(Stream<List<int>> stream, int length) {
+    // There can only be one content stream at a time. This precondition is
+    // checked by _prepareToReadHeaders.
+    assert(_underlyingContentStream == null);
+    return _underlyingContentStream = Stream.eventTransformed(stream, (sink) {
+      _listenedToContentsOnce = true;
+
+      late _OutgoingStreamGuard guard;
+      return guard = _OutgoingStreamGuard(
+        length,
+        sink,
+        // Reset state when the stream is done. This will only be called when
+        // the sream is done, not when a listener cancels.
+        () {
+          _underlyingContentStream = null;
+          if (guard.hadError) {
+            cancel();
+          }
+        },
+      );
+    });
+  }
+
+  /// Skips to the next block after reading [readSize] bytes from the beginning
+  /// of a previous block.
+  void _markPaddingToSkip(int readSize) {
+    final offsetInLastBlock = readSize.toUnsigned(blockSizeLog2);
+    if (offsetInLastBlock != 0) {
+      _skipNext = blockSize - offsetInLastBlock;
+    }
+  }
+
+  /// Checks the PAX headers for GNU sparse headers.
+  /// If they are found, then this function reads the sparse map and returns it.
+  /// This assumes that 0.0 headers have already been converted to 0.1 headers
+  /// by the PAX header parsing logic.
+  Future<List<SparseEntry>?> _readGNUSparsePAXHeaders(HeaderImpl header) async {
+    /// Identify the version of GNU headers.
+    var isVersion1 = false;
+    final major = _paxHeaders[paxGNUSparseMajor];
+    final minor = _paxHeaders[paxGNUSparseMinor];
+
+    final sparseMapHeader = _paxHeaders[paxGNUSparseMap];
+    if (major == '0' && (minor == '0' || minor == '1') ||
+        // assume 0.0 or 0.1 if no version header is set
+        sparseMapHeader != null && sparseMapHeader.isNotEmpty) {
+      isVersion1 = false;
+    } else if (major == '1' && minor == '0') {
+      isVersion1 = true;
+    } else {
+      // Unknown version that we don't support
+      return null;
+    }
+
+    header.format |= TarFormat.pax;
+
+    /// Update [header] from GNU sparse PAX headers.
+    final possibleName = _paxHeaders[paxGNUSparseName] ?? '';
+    if (possibleName.isNotEmpty) {
+      header.name = possibleName;
+    }
+
+    final possibleSize =
+        _paxHeaders[paxGNUSparseSize] ?? _paxHeaders[paxGNUSparseRealSize];
+
+    if (possibleSize != null && possibleSize.isNotEmpty) {
+      final size = int.tryParse(possibleSize, radix: 10);
+      if (size == null) {
+        throw TarException.header('Invalid PAX size ($possibleSize) detected');
+      }
+
+      header.size = size;
+    }
+
+    // Read the sparse map according to the appropriate format.
+    if (isVersion1) {
+      return await _readGNUSparseMap1x0();
+    }
+
+    return _readGNUSparseMap0x1(header);
+  }
+
+  /// Reads the sparse map as stored in GNU's PAX sparse format version 1.0.
+  /// The format of the sparse map consists of a series of newline-terminated
+  /// numeric fields. The first field is the number of entries and is always
+  /// present. Following this are the entries, consisting of two fields
+  /// (offset, length). This function must stop reading at the end boundary of
+  /// the block containing the last newline.
+  ///
+  /// Note that the GNU manual says that numeric values should be encoded in
+  /// octal format. However, the GNU tar utility itself outputs these values in
+  /// decimal. As such, this library treats values as being encoded in decimal.
+  Future<List<SparseEntry>> _readGNUSparseMap1x0() async {
+    var newLineCount = 0;
+    final block = Uint8Queue();
+
+    /// Ensures that [block] h as at least [n] tokens.
+    Future<void> feedTokens(int n) async {
+      while (newLineCount < n) {
+        final newBlock = await _chunkedStream.readBytes(blockSize);
+        if (newBlock.length < blockSize) {
+          throw TarException.header(
+              'GNU Sparse Map does not have enough lines!');
+        }
+
+        block.addAll(newBlock);
+        newLineCount += newBlock.where((byte) => byte == $lf).length;
+      }
+    }
+
+    /// Get the next token delimited by a newline. This assumes that
+    /// at least one newline exists in the buffer.
+    String nextToken() {
+      newLineCount--;
+      final nextNewLineIndex = block.indexOf($lf);
+      final result = block.sublist(0, nextNewLineIndex);
+      block.removeRange(0, nextNewLineIndex + 1);
+      return result.readString(0, nextNewLineIndex);
+    }
+
+    await feedTokens(1);
+
+    // Parse for the number of entries.
+    // Use integer overflow resistant math to check this.
+    final numEntriesString = nextToken();
+    final numEntries = int.tryParse(numEntriesString);
+    if (numEntries == null || numEntries < 0 || 2 * numEntries < numEntries) {
+      throw TarException.header(
+          'Invalid sparse map number of entries: $numEntriesString!');
+    }
+
+    // Parse for all member entries.
+    // [numEntries] is trusted after this since a potential attacker must have
+    // committed resources proportional to what this library used.
+    await feedTokens(2 * numEntries);
+
+    final sparseData = <SparseEntry>[];
+
+    for (var i = 0; i < numEntries; i++) {
+      final offsetToken = nextToken();
+      final lengthToken = nextToken();
+
+      final offset = int.tryParse(offsetToken);
+      final length = int.tryParse(lengthToken);
+
+      if (offset == null || length == null) {
+        throw TarException.header(
+            'Failed to read a GNU sparse map entry. Encountered '
+            'offset: $offsetToken, length: $lengthToken');
+      }
+
+      sparseData.add(SparseEntry(offset, length));
+    }
+    return sparseData;
+  }
+
+  /// Reads the sparse map as stored in GNU's PAX sparse format version 0.1.
+  /// The sparse map is stored in the PAX headers and is stored like this:
+  /// `offset₀,size₀,offset₁,size₁...`
+  List<SparseEntry> _readGNUSparseMap0x1(TarHeader header) {
+    // Get number of entries, check for integer overflows
+    final numEntriesString = _paxHeaders[paxGNUSparseNumBlocks];
+    final numEntries =
+        numEntriesString != null ? int.tryParse(numEntriesString) : null;
+
+    if (numEntries == null || numEntries < 0 || 2 * numEntries < numEntries) {
+      throw TarException.header('Invalid GNU version 0.1 map');
+    }
+
+    // There should be two numbers in [sparseMap] for each entry.
+    final sparseMap = _paxHeaders[paxGNUSparseMap]?.split(',');
+    if (sparseMap == null) {
+      throw TarException.header('Invalid GNU version 0.1 map');
+    }
+
+    if (sparseMap.length != 2 * numEntries) {
+      throw TarException.header(
+          'Detected sparse map length ${sparseMap.length} '
+          'that is not twice the number of entries $numEntries');
+    }
+
+    /// Loop through sparse map entries.
+    /// [numEntries] is now trusted.
+    final sparseData = <SparseEntry>[];
+    for (var i = 0; i < sparseMap.length; i += 2) {
+      final offset = int.tryParse(sparseMap[i]);
+      final length = int.tryParse(sparseMap[i + 1]);
+
+      if (offset == null || length == null) {
+        throw TarException.header(
+            'Failed to read a GNU sparse map entry. Encountered '
+            'offset: $offset, length: $length');
+      }
+
+      sparseData.add(SparseEntry(offset, length));
+    }
+
+    return sparseData;
+  }
+
+  /// Reads the sparse map from the old GNU sparse format.
+  /// The sparse map is stored in the tar header if it's small enough.
+  /// If it's larger than four entries, then one or more extension headers are
+  /// used to store the rest of the sparse map.
+  ///
+  /// [TarHeader.size] does not reflect the size of any extended headers used.
+  /// Thus, this function will read from the chunked stream iterator to fetch
+  /// extra headers.
+  ///
+  /// See also: https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC191
+  Future<List<SparseEntry>> _readOldGNUSparseMap(
+      HeaderImpl header, Uint8List rawHeader) async {
+    // Make sure that the input format is GNU.
+    // Unfortunately, the STAR format also has a sparse header format that uses
+    // the same type flag but has a completely different layout.
+    if (header.format != TarFormat.gnu) {
+      throw TarException.header('Tried to read sparse map of non-GNU header');
+    }
+
+    header.size = rawHeader.readNumeric(483, 12);
+    final sparseMaps = <Uint8List>[];
+
+    var sparse = rawHeader.sublistView(386, 483);
+    sparseMaps.add(sparse);
+
+    while (true) {
+      final maxEntries = sparse.length ~/ 24;
+      if (sparse[24 * maxEntries] > 0) {
+        // If there are more entries, read an extension header and parse its
+        // entries.
+        sparse = await _chunkedStream.readBytes(blockSize);
+        sparseMaps.add(sparse);
+        continue;
+      }
+
+      break;
+    }
+
+    try {
+      return _processOldGNUSparseMap(sparseMaps);
+    } on FormatException {
+      throw TarException('Invalid old GNU Sparse Map');
+    }
+  }
+
+  /// Process [sparseMaps], which is known to be an OLD GNU v0.1 sparse map.
+  ///
+  /// For details, see https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC191
+  List<SparseEntry> _processOldGNUSparseMap(List<Uint8List> sparseMaps) {
+    final sparseData = <SparseEntry>[];
+
+    for (final sparseMap in sparseMaps) {
+      final maxEntries = sparseMap.length ~/ 24;
+      for (var i = 0; i < maxEntries; i++) {
+        // This termination condition is identical to GNU and BSD tar.
+        if (sparseMap[i * 24] == 0) {
+          // Don't return, need to process extended headers (even if empty)
+          break;
+        }
+
+        final offset = sparseMap.readNumeric(i * 24, 12);
+        final length = sparseMap.readNumeric(i * 24 + 12, 12);
+
+        sparseData.add(SparseEntry(offset, length));
+      }
+    }
+    return sparseData;
+  }
+}
+
+@internal
+class PaxHeaders extends UnmodifiableMapBase<String, String> {
+  final Map<String, String> _globalHeaders = {};
+  Map<String, String> _localHeaders = {};
+
+  /// Applies new global PAX-headers from the map.
+  ///
+  /// The [headers] will replace global headers with the same key, but leave
+  /// others intact.
+  void newGlobals(Map<String, String> headers) {
+    _globalHeaders.addAll(headers);
+  }
+
+  void addLocal(String key, String value) => _localHeaders[key] = value;
+
+  void removeLocal(String key) => _localHeaders.remove(key);
+
+  /// Applies new local PAX-headers from the map.
+  ///
+  /// This replaces all currently active local headers.
+  void newLocals(Map<String, String> headers) {
+    _localHeaders = headers;
+  }
+
+  /// Clears local headers.
+  ///
+  /// This is used by the reader after a file has ended, as local headers only
+  /// apply to the next entry.
+  void clearLocals() {
+    _localHeaders = {};
+  }
+
+  @override
+  String? operator [](Object? key) {
+    return _localHeaders[key] ?? _globalHeaders[key];
+  }
+
+  @override
+  Iterable<String> get keys => {..._globalHeaders.keys, ..._localHeaders.keys};
+
+  /// Decodes the content of an extended pax header entry.
+  ///
+  /// Semantically, a [PAX Header][posix pax] is a map with string keys and
+  /// values, where both keys and values are encodes with utf8.
+  ///
+  /// However, [old GNU Versions][gnu sparse00] used to repeat keys to store
+  /// sparse file information in sparse headers. This method will transparently
+  /// rewrite the PAX format of version 0.0 to version 0.1.
+  ///
+  /// [posix pax]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
+  /// [gnu sparse00]: https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC192
+  void readPaxHeaders(List<int> data, bool isGlobal,
+      {bool ignoreUnknown = true}) {
+    var offset = 0;
+    final map = <String, String>{};
+    final sparseMap = <String>[];
+
+    Never error() => throw TarException.header('Invalid PAX record');
+
+    while (offset < data.length) {
+      // At the start of an entry, expect its length which is terminated by a
+      // space char.
+      final space = data.indexOf($space, offset);
+      if (space == -1) break;
+
+      var length = 0;
+      var currentChar = data[offset];
+      var charsInLength = 0;
+      while (currentChar >= $0 && currentChar <= $9) {
+        length = length * 10 + currentChar - $0;
+        charsInLength++;
+        currentChar = data[++offset];
+      }
+
+      if (length == 0) {
+        error();
+      }
+
+      // Skip the whitespace
+      if (currentChar != $space) {
+        error();
+      }
+      offset++;
+
+      // Length also includes the length description and a space we just read
+      final endOfEntry = offset + length - 1 - charsInLength;
+      // checking against endOfEntry - 1 because the trailing whitespace is
+      // optional for the last entry
+      if (endOfEntry < offset || endOfEntry - 1 > data.length) {
+        error();
+      }
+
+      // Read the key
+      final nextEquals = data.indexOf($equal, offset);
+      if (nextEquals == -1 || nextEquals >= endOfEntry) {
+        error();
+      }
+
+      final key = utf8.decoder.convert(data, offset, nextEquals);
+      // Skip over the equals sign
+      offset = nextEquals + 1;
+
+      // Subtract one for trailing newline
+      final endOfValue = endOfEntry - 1;
+      final value = utf8.decoder.convert(data, offset, endOfValue);
+
+      if (!_isValidPaxRecord(key, value)) {
+        error();
+      }
+
+      // If we're seeing weird PAX Version 0.0 sparse keys, expect alternating
+      // GNU.sparse.offset and GNU.sparse.numbytes headers.
+      if (key == paxGNUSparseNumBytes || key == paxGNUSparseOffset) {
+        if ((sparseMap.length % 2 == 0 && key != paxGNUSparseOffset) ||
+            (sparseMap.length % 2 == 1 && key != paxGNUSparseNumBytes) ||
+            value.contains(',')) {
+          error();
+        }
+
+        sparseMap.add(value);
+      } else if (!ignoreUnknown || supportedPaxHeaders.contains(key)) {
+        // Ignore unrecognized headers to avoid unbounded growth of the global
+        // header map.
+        map[key] = value;
+      }
+
+      // Skip over value
+      offset = endOfValue;
+      // and the trailing newline
+      final hasNewline = offset < data.length;
+      if (hasNewline && data[offset] != $lf) {
+        throw TarException('Invalid PAX Record (missing trailing newline)');
+      }
+      offset++;
+    }
+
+    if (sparseMap.isNotEmpty) {
+      map[paxGNUSparseMap] = sparseMap.join(',');
+    }
+
+    if (isGlobal) {
+      newGlobals(map);
+    } else {
+      newLocals(map);
+    }
+  }
+
+  /// Checks whether [key], [value] is a valid entry in a pax header.
+  ///
+  /// This is adopted from the Golang tar reader (`validPAXRecord`), which says
+  /// that "Keys and values should be UTF-8, but the number of bad writers out
+  /// there forces us to be a more liberal."
+  static bool _isValidPaxRecord(String key, String value) {
+    // These limitations are documented in the PAX standard.
+    if (key.isEmpty || key.contains('=')) return false;
+
+    // These aren't, but Golangs's tar has them and got away with it.
+    switch (key) {
+      case paxPath:
+      case paxLinkpath:
+      case paxUname:
+      case paxGname:
+        return !value.codeUnits.contains(0);
+      default:
+        return !key.codeUnits.contains(0);
+    }
+  }
+}
+
+/// Event-sink tracking the length of emitted tar entry streams.
+///
+/// [ChunkedStreamIterator.substream] might return a stream shorter than
+/// expected. That indicates an invalid tar file though, since the correct size
+/// is stored in the header.
+class _OutgoingStreamGuard extends EventSink<List<int>> {
+  final int expectedSize;
+  final EventSink<List<int>> out;
+  void Function() onDone;
+
+  int emittedSize = 0;
+  bool hadError = false;
+
+  _OutgoingStreamGuard(this.expectedSize, this.out, this.onDone);
+
+  @override
+  void add(List<int> event) {
+    emittedSize += event.length;
+    // We have checks limiting the length of outgoing streams. If the stream is
+    // larger than expected, that's a bug in pkg:tar.
+    assert(
+        emittedSize <= expectedSize,
+        'Stream now emitted $emittedSize bytes, but only expected '
+        '$expectedSize');
+
+    out.add(event);
+  }
+
+  @override
+  void addError(Object error, [StackTrace? stackTrace]) {
+    hadError = true;
+    out.addError(error, stackTrace);
+  }
+
+  @override
+  void close() {
+    onDone();
+
+    // If the stream stopped after an error, the user is already aware that
+    // something is wrong.
+    if (emittedSize < expectedSize && !hadError) {
+      out.addError(
+          TarException('Unexpected end of tar file'), StackTrace.current);
+    }
+
+    out.close();
+  }
+}
diff --git a/lib/src/third_party/tar/lib/src/sparse.dart b/lib/src/third_party/tar/lib/src/sparse.dart
new file mode 100644
index 0000000..7f35313
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/sparse.dart
@@ -0,0 +1,151 @@
+// @dart = 2.12
+
+import '../../../chunked_stream/lib/chunked_stream.dart';
+import 'package:meta/meta.dart';
+
+import 'exception.dart';
+import 'utils.dart';
+
+/// Represents a [length]-sized fragment at [offset] in a file.
+///
+/// [SparseEntry]s can represent either data or holes, and we can easily
+/// convert between the two if we know the size of the file, all the sparse
+/// data and all the sparse entries combined must give the full size.
+class SparseEntry {
+  final int offset;
+  final int length;
+
+  SparseEntry(this.offset, this.length);
+
+  int get end => offset + length;
+
+  @override
+  String toString() => 'offset: $offset, length $length';
+
+  @override
+  bool operator ==(Object? other) {
+    if (other is! SparseEntry) return false;
+
+    return offset == other.offset && length == other.length;
+  }
+
+  @override
+  int get hashCode => offset ^ length;
+}
+
+/// Generates a stream of the sparse file contents of size [size], given
+/// [sparseHoles] and the raw content in [source].
+@internal
+Stream<List<int>> sparseStream(
+    Stream<List<int>> source, List<SparseEntry> sparseHoles, int size) {
+  if (sparseHoles.isEmpty) {
+    return ChunkedStreamIterator(source).substream(size);
+  }
+
+  return _sparseStream(source, sparseHoles, size);
+}
+
+/// Generates a stream of the sparse file contents of size [size], given
+/// [sparseHoles] and the raw content in [source].
+///
+/// [sparseHoles] has to be non-empty.
+Stream<List<int>> _sparseStream(
+    Stream<List<int>> source, List<SparseEntry> sparseHoles, int size) async* {
+  // Current logical position in sparse file.
+  var position = 0;
+
+  // Index of the next sparse hole in [sparseHoles] to be processed.
+  var sparseHoleIndex = 0;
+
+  // Iterator through [source] to obtain the data bytes.
+  final iterator = ChunkedStreamIterator(source);
+
+  while (position < size) {
+    // Yield all the necessary sparse holes.
+    while (sparseHoleIndex < sparseHoles.length &&
+        sparseHoles[sparseHoleIndex].offset == position) {
+      final sparseHole = sparseHoles[sparseHoleIndex];
+      yield* zeroes(sparseHole.length);
+      position += sparseHole.length;
+      sparseHoleIndex++;
+    }
+
+    if (position == size) break;
+
+    /// Yield up to the next sparse hole's offset, or all the way to the end
+    /// if there are no sparse holes left.
+    var yieldTo = size;
+    if (sparseHoleIndex < sparseHoles.length) {
+      yieldTo = sparseHoles[sparseHoleIndex].offset;
+    }
+
+    // Yield data as substream, but make sure that we have enough data.
+    var checkedPosition = position;
+    await for (final chunk in iterator.substream(yieldTo - position)) {
+      yield chunk;
+      checkedPosition += chunk.length;
+    }
+
+    if (checkedPosition != yieldTo) {
+      throw TarException('Invalid sparse data: Unexpected end of input stream');
+    }
+
+    position = yieldTo;
+  }
+}
+
+/// Reports whether [sparseEntries] is a valid sparse map.
+/// It does not matter whether [sparseEntries] represents data fragments or
+/// hole fragments.
+bool validateSparseEntries(List<SparseEntry> sparseEntries, int size) {
+  // Validate all sparse entries. These are the same checks as performed by
+  // the BSD tar utility.
+  if (size < 0) return false;
+
+  SparseEntry? previous;
+
+  for (final current in sparseEntries) {
+    // Negative values are never okay.
+    if (current.offset < 0 || current.length < 0) return false;
+
+    // Integer overflow with large length.
+    if (current.offset + current.length < current.offset) return false;
+
+    // Region extends beyond the actual size.
+    if (current.end > size) return false;
+
+    // Regions cannot overlap and must be in order.
+    if (previous != null && previous.end > current.offset) return false;
+
+    previous = current;
+  }
+
+  return true;
+}
+
+/// Converts a sparse map ([source]) from one form to the other.
+/// If the input is sparse holes, then it will output sparse datas and
+/// vice-versa. The input must have been already validated.
+///
+/// This function mutates [source] and returns a normalized map where:
+///	* adjacent fragments are coalesced together
+///	* only the last fragment may be empty
+///	* the endOffset of the last fragment is the total size
+List<SparseEntry> invertSparseEntries(List<SparseEntry> source, int size) {
+  final result = <SparseEntry>[];
+  var previous = SparseEntry(0, 0);
+  for (final current in source) {
+    /// Skip empty fragments
+    if (current.length == 0) continue;
+
+    final newLength = current.offset - previous.offset;
+    if (newLength > 0) {
+      result.add(SparseEntry(previous.offset, newLength));
+    }
+
+    previous = SparseEntry(current.end, 0);
+  }
+  final lastLength = size - previous.offset;
+  result.add(SparseEntry(previous.offset, lastLength));
+  return result;
+}
diff --git a/lib/src/third_party/tar/lib/src/utils.dart b/lib/src/third_party/tar/lib/src/utils.dart
new file mode 100644
index 0000000..c456966
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/utils.dart
@@ -0,0 +1,233 @@
+// @dart = 2.12
+
+import 'dart:convert';
+import 'dart:math';
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'exception.dart';
+
+const _checksumEnd = checksumOffset + checksumLength;
+const _checksumPlaceholder = $space;
+
+extension ByteBufferUtils on Uint8List {
+  String readString(int offset, int maxLength) {
+    return readStringOrNullIfEmpty(offset, maxLength) ?? '';
+  }
+
+  Uint8List sublistView(int start, [int? end]) {
+    return Uint8List.sublistView(this, start, end);
+  }
+
+  String? readStringOrNullIfEmpty(int offset, int maxLength) {
+    var data = sublistView(offset, offset + maxLength);
+    var contentLength = data.indexOf(0);
+    // If there's no \0, assume that the string fills the whole segment
+    if (contentLength.isNegative) contentLength = maxLength;
+
+    if (contentLength == 0) return null;
+
+    data = data.sublistView(0, contentLength);
+    try {
+      return utf8.decode(data);
+    } on FormatException {
+      return String.fromCharCodes(data).trim();
+    }
+  }
+
+  /// Parse an octal string encoded from index [offset] with the maximum length
+  /// [length].
+  int readOctal(int offset, int length) {
+    var result = 0;
+    var multiplier = 1;
+
+    for (var i = length - 1; i >= 0; i--) {
+      final charCode = this[offset + i];
+      // Some tar implementations add a \0 or space at the end, ignore that
+      if (charCode == 0 || charCode == $space) continue;
+      if (charCode < $0 || charCode > $9) {
+        throw TarException('Invalid octal value');
+      }
+
+      // Obtain the numerical value of this digit
+      final digit = charCode - $0;
+      result += digit * multiplier;
+      multiplier <<= 3; // Multiply by the base, 8
+    }
+
+    return result;
+  }
+
+  /// Parses an encoded int, either as base-256 or octal.
+  ///
+  /// This function may return negative numbers.
+  int readNumeric(int offset, int length) {
+    if (length == 0) return 0;
+
+    // Check for base-256 (binary) format first. If the first bit is set, then
+    // all following bits constitute a two's complement encoded number in big-
+    // endian byte order.
+    final firstByte = this[offset];
+    if (firstByte & 0x80 != 0) {
+      // Handling negative numbers relies on the following identity:
+      // -a-1 == ~a
+      //
+      // If the number is negative, we use an inversion mask to invert the
+      // date bytes and treat the value as an unsigned number.
+      final inverseMask = firstByte & 0x40 != 0 ? 0xff : 0x00;
+
+      // Ignore signal bit in the first byte
+      var x = (firstByte ^ inverseMask) & 0x7f;
+
+      for (var i = 1; i < length; i++) {
+        var byte = this[offset + i];
+        byte ^= inverseMask;
+
+        x = x << 8 | byte;
+      }
+
+      return inverseMask == 0xff ? ~x : x;
+    }
+
+    return readOctal(offset, length);
+  }
+
+  int computeUnsignedHeaderChecksum() {
+    var result = 0;
+
+    for (var i = 0; i < length; i++) {
+      result += (i < checksumOffset || i >= _checksumEnd)
+          ? this[i] // Not in range of where the checksum is written
+          : _checksumPlaceholder;
+    }
+
+    return result;
+  }
+
+  int computeSignedHeaderChecksum() {
+    var result = 0;
+
+    for (var i = 0; i < length; i++) {
+      // Note that _checksumPlaceholder.toSigned(8) == _checksumPlaceholder
+      result += (i < checksumOffset || i >= _checksumEnd)
+          ? this[i].toSigned(8)
+          : _checksumPlaceholder;
+    }
+
+    return result;
+  }
+
+  bool matchesHeader(List<int> header, {int offset = magicOffset}) {
+    for (var i = 0; i < header.length; i++) {
+      if (this[offset + i] != header[i]) return false;
+    }
+
+    return true;
+  }
+}
+
+bool isNotAscii(int i) => i > 128;
+
+/// Like [int.parse], but throwing a [TarException] instead of the more-general
+/// [FormatException] when it fails.
+int parseInt(String source) {
+  return int.tryParse(source, radix: 10) ??
+      (throw TarException('Not an int: $source'));
+}
+
+/// Takes a [paxTimeString] of the form %d.%d as described in the PAX
+/// specification. Note that this implementation allows for negative timestamps,
+/// which is allowed for by the PAX specification, but not always portable.
+///
+/// Note that Dart's [DateTime] class only allows us to give up to microsecond
+/// precision, which implies that we cannot parse all the digits in since PAX
+/// allows for nanosecond level encoding.
+DateTime parsePaxTime(String paxTimeString) {
+  const maxMicroSecondDigits = 6;
+
+  /// Split [paxTimeString] into seconds and sub-seconds parts.
+  var secondsString = paxTimeString;
+  var microSecondsString = '';
+  final position = paxTimeString.indexOf('.');
+  if (position >= 0) {
+    secondsString = paxTimeString.substring(0, position);
+    microSecondsString = paxTimeString.substring(position + 1);
+  }
+
+  /// Parse the seconds.
+  final seconds = int.tryParse(secondsString);
+  if (seconds == null) {
+    throw TarException.header('Invalid PAX time $paxTimeString detected!');
+  }
+
+  if (microSecondsString.replaceAll(RegExp('[0-9]'), '') != '') {
+    throw TarException.header(
+        'Invalid nanoseconds $microSecondsString detected');
+  }
+
+  microSecondsString = microSecondsString.padRight(maxMicroSecondDigits, '0');
+  microSecondsString = microSecondsString.substring(0, maxMicroSecondDigits);
+
+  var microSeconds =
+      microSecondsString.isEmpty ? 0 : int.parse(microSecondsString);
+  if (paxTimeString.startsWith('-')) microSeconds = -microSeconds;
+
+  return microsecondsSinceEpoch(microSeconds + seconds * pow(10, 6).toInt());
+}
+
+DateTime secondsSinceEpoch(int timestamp) {
+  return DateTime.fromMillisecondsSinceEpoch(timestamp * 1000, isUtc: true);
+}
+
+DateTime millisecondsSinceEpoch(int milliseconds) {
+  return DateTime.fromMillisecondsSinceEpoch(milliseconds, isUtc: true);
+}
+
+DateTime microsecondsSinceEpoch(int microseconds) {
+  return DateTime.fromMicrosecondsSinceEpoch(microseconds, isUtc: true);
+}
+
+int numBlocks(int fileSize) {
+  if (fileSize % blockSize == 0) return fileSize ~/ blockSize;
+
+  return fileSize ~/ blockSize + 1;
+}
+
+int nextBlockSize(int fileSize) => numBlocks(fileSize) * blockSize;
+
+extension ToTyped on List<int> {
+  Uint8List asUint8List() {
+    // Flow analysis doesn't work on this.
+    final $this = this;
+    return $this is Uint8List ? $this : Uint8List.fromList(this);
+  }
+
+  bool get isAllZeroes {
+    for (var i = 0; i < length; i++) {
+      if (this[i] != 0) return false;
+    }
+
+    return true;
+  }
+}
+
+/// Generates a chunked stream of [length] zeroes.
+Stream<List<int>> zeroes(int length) async* {
+  // Emit data in chunks for efficiency
+  const chunkSize = 4 * 1024;
+  if (length < chunkSize) {
+    yield Uint8List(length);
+    return;
+  }
+
+  final chunk = Uint8List(chunkSize);
+  for (var i = 0; i < length ~/ chunkSize; i++) {
+    yield chunk;
+  }
+
+  final remainingBytes = length % chunkSize;
+  if (remainingBytes != 0) {
+    yield Uint8List(remainingBytes);
+  }
+}
diff --git a/lib/src/third_party/tar/lib/src/writer.dart b/lib/src/third_party/tar/lib/src/writer.dart
new file mode 100644
index 0000000..4ec9c43
--- /dev/null
+++ b/lib/src/third_party/tar/lib/src/writer.dart
@@ -0,0 +1,299 @@
+// @dart = 2.12
+
+import 'dart:async';
+import 'dart:convert';
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'entry.dart';
+import 'format.dart';
+import 'header.dart';
+
+class _WritingTransformer extends StreamTransformerBase<TarEntry, List<int>> {
+  const _WritingTransformer();
+
+  @override
+  Stream<List<int>> bind(Stream<TarEntry> stream) {
+    // sync because the controller proxies another stream
+    final controller = StreamController<List<int>>(sync: true);
+    controller.onListen = () {
+      stream.pipe(tarWritingSink(controller));
+    };
+
+    return controller.stream;
+  }
+}
+
+/// A stream transformer writing tar entries as byte streams.
+///
+/// Regardless of the input stream, the stream returned by this
+/// [StreamTransformer.bind] is a single-subscription stream.
+/// Apart from that, subscriptions, cancellations, pauses and resumes are
+/// propagated as one would expect from a [StreamTransformer].
+///
+/// When piping the resulting stream into a [StreamConsumer], consider using
+/// [tarWritingSink] directly.
+const StreamTransformer<TarEntry, List<int>> tarWriter = _WritingTransformer();
+
+/// Create a sink emitting encoded tar files to the [output] sink.
+///
+/// For instance, you can use this to write a tar file:
+///
+/// ```dart
+/// import 'dart:convert';
+/// import 'dart:io';
+/// import 'package:tar/tar.dart';
+///
+/// Future<void> main() async {
+///   Stream<TarEntry> entries = Stream.value(
+///     TarEntry.data(
+///       TarHeader(
+///         name: 'example.txt',
+///         mode: int.parse('644', radix: 8),
+///       ),
+///       utf8.encode('This is the content of the tar file'),
+///     ),
+///   );
+///
+///   final output = File('/tmp/test.tar').openWrite();
+///   await entries.pipe(tarWritingSink(output));
+///  }
+/// ```
+///
+/// Note that, if you don't set the [TarHeader.size], outgoing tar entries need
+/// to be buffered once, which decreases performance.
+///
+/// See also:
+///  - [tarWriter], a stream transformer using this sink
+///  - [StreamSink]
+StreamSink<TarEntry> tarWritingSink(StreamSink<List<int>> output) {
+  return _WritingSink(output);
+}
+
+class _WritingSink extends StreamSink<TarEntry> {
+  final StreamSink<List<int>> _output;
+
+  int _paxHeaderCount = 0;
+  bool _closed = false;
+  final Completer<Object?> _done = Completer();
+
+  int _pendingOperations = 0;
+  Future<void> _ready = Future.value();
+
+  _WritingSink(this._output);
+
+  @override
+  Future<void> get done => _done.future;
+
+  @override
+  Future<void> add(TarEntry event) {
+    if (_closed) {
+      throw StateError('Cannot add event after close was called');
+    }
+    return _doWork(() => _safeAdd(event));
+  }
+
+  Future<void> _doWork(FutureOr<void> Function() work) {
+    _pendingOperations++;
+    // Chain futures to make sure we only write one entry at a time.
+    return _ready = _ready
+        .then((_) => work())
+        .catchError(_output.addError)
+        .whenComplete(() {
+      _pendingOperations--;
+
+      if (_closed && _pendingOperations == 0) {
+        _done.complete(_output.close());
+      }
+    });
+  }
+
+  Future<void> _safeAdd(TarEntry event) async {
+    final header = event.header;
+    var size = header.size;
+    Uint8List? bufferedData;
+    if (size < 0) {
+      final builder = BytesBuilder();
+      await event.contents.forEach(builder.add);
+      bufferedData = builder.takeBytes();
+      size = bufferedData.length;
+    }
+
+    var nameBytes = utf8.encode(header.name);
+    var linkBytes = utf8.encode(header.linkName ?? '');
+    var gnameBytes = utf8.encode(header.groupName ?? '');
+    var unameBytes = utf8.encode(header.userName ?? '');
+
+    // We only get 100 chars for the name and link name. If they are longer, we
+    // have to insert an entry just to store the names. Some tar implementations
+    // expect them to be zero-terminated, so use 99 chars to be safe.
+    final paxHeader = <String, List<int>>{};
+    if (nameBytes.length > 99) {
+      paxHeader[paxPath] = nameBytes;
+      nameBytes = nameBytes.sublist(0, 99);
+    }
+    if (linkBytes.length > 99) {
+      paxHeader[paxLinkpath] = linkBytes;
+      linkBytes = linkBytes.sublist(0, 99);
+    }
+
+    // It's even worse for users and groups, where we only get 31 usable chars.
+    if (gnameBytes.length > 31) {
+      paxHeader[paxGname] = gnameBytes;
+      gnameBytes = gnameBytes.sublist(0, 31);
+    }
+    if (unameBytes.length > 31) {
+      paxHeader[paxUname] = unameBytes;
+      unameBytes = unameBytes.sublist(0, 31);
+    }
+
+    if (size > maxIntFor12CharOct) {
+      paxHeader[paxSize] = ascii.encode(size.toString());
+    }
+
+    if (paxHeader.isNotEmpty) {
+      await _writePaxHeader(paxHeader);
+    }
+
+    final headerBlock = Uint8List(blockSize)
+      ..setAll(0, nameBytes)
+      ..setUint(header.mode, 100, 8)
+      ..setUint(header.userId, 108, 8)
+      ..setUint(header.groupId, 116, 8)
+      ..setUint(size, 124, 12)
+      ..setUint(header.modified.millisecondsSinceEpoch ~/ 1000, 136, 12)
+      ..[156] = typeflagToByte(header.typeFlag)
+      ..setAll(157, linkBytes)
+      ..setAll(257, magicUstar)
+      ..setUint(0, 263, 2) // version
+      ..setAll(265, unameBytes)
+      ..setAll(297, gnameBytes)
+      // To calculate the checksum, we first fill the checksum range with spaces
+      ..setAll(148, List.filled(8, $space));
+
+    // Then, we take the sum of the header
+    var checksum = 0;
+    for (final byte in headerBlock) {
+      checksum += byte;
+    }
+    headerBlock.setUint(checksum, 148, 8);
+
+    _output.add(headerBlock);
+
+    // Write content.
+    if (bufferedData != null) {
+      _output.add(bufferedData);
+    } else {
+      await event.contents.forEach(_output.add);
+    }
+
+    final padding = -size % blockSize;
+    _output.add(Uint8List(padding));
+  }
+
+  /// Writes an extended pax header.
+  ///
+  /// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
+  Future<void> _writePaxHeader(Map<String, List<int>> values) {
+    final buffer = BytesBuilder();
+    // format of each entry: "%d %s=%s\n", <length>, <keyword>, <value>
+    // note that the length includes the trailing \n and the length description
+    // itself.
+    values.forEach((key, value) {
+      final encodedKey = utf8.encode(key);
+      // +3 for the whitespace, the equals and the \n
+      final payloadLength = encodedKey.length + value.length + 3;
+      var indicatedLength = payloadLength;
+
+      // The indicated length contains the length (in decimals) itself. So if
+      // we had payloadLength=9, then we'd prefix a 9 at which point the whole
+      // string would have a length of 10. If that happens, increment length.
+      var actualLength = payloadLength + indicatedLength.toString().length;
+
+      while (actualLength != indicatedLength) {
+        indicatedLength++;
+        actualLength = payloadLength + indicatedLength.toString().length;
+      }
+
+      // With that sorted out, let's add the line
+      buffer
+        ..add(utf8.encode(indicatedLength.toString()))
+        ..addByte($space)
+        ..add(encodedKey)
+        ..addByte($equal)
+        ..add(value)
+        ..addByte($lf); // \n
+    });
+
+    final paxData = buffer.takeBytes();
+    final file = TarEntry.data(
+      HeaderImpl.internal(
+        format: TarFormat.pax,
+        modified: DateTime.fromMillisecondsSinceEpoch(0),
+        name: 'PaxHeader/${_paxHeaderCount++}',
+        mode: 0,
+        size: paxData.length,
+        typeFlag: TypeFlag.xHeader,
+      ),
+      paxData,
+    );
+    return _safeAdd(file);
+  }
+
+  @override
+  void addError(Object error, [StackTrace? stackTrace]) {
+    _output.addError(error, stackTrace);
+  }
+
+  @override
+  Future<void> addStream(Stream<TarEntry> stream) async {
+    await for (final entry in stream) {
+      await add(entry);
+    }
+  }
+
+  @override
+  Future<void> close() async {
+    if (!_closed) {
+      _closed = true;
+
+      // Add two empty blocks at the end.
+      await _doWork(() {
+        _output.add(zeroBlock);
+        _output.add(zeroBlock);
+      });
+    }
+
+    return done;
+  }
+}
+
+extension on Uint8List {
+  void setUint(int value, int position, int length) {
+    // Values are encoded as octal string, terminated and left-padded with
+    // space chars.
+
+    // Set terminating space char.
+    this[position + length - 1] = $space;
+
+    // Write as octal value, we write from right to left
+    var number = value;
+    var needsExplicitZero = number == 0;
+
+    for (var pos = position + length - 2; pos >= position; pos--) {
+      if (number != 0) {
+        // Write the last octal digit of the number (e.g. the last 4 bits)
+        this[pos] = (number & 7) + $0;
+        // then drop the last digit (divide by 8 = 2³)
+        number >>= 3;
+      } else if (needsExplicitZero) {
+        this[pos] = $0;
+        needsExplicitZero = false;
+      } else {
+        // done, left-pad with spaces
+        this[pos] = $space;
+      }
+    }
+  }
+}
diff --git a/lib/src/third_party/tar/lib/tar.dart b/lib/src/third_party/tar/lib/tar.dart
new file mode 100644
index 0000000..d1ea938
--- /dev/null
+++ b/lib/src/third_party/tar/lib/tar.dart
@@ -0,0 +1,19 @@
+// @dart = 2.12
+
+/// Streaming tar implementation for Dart.
+///
+/// To read tar files, see [TarReader]. To write tar files, use [tarWritingSink]
+///  or [tarWriter].
+library tar;
+
+// For dartdoc.
+import 'src/reader.dart';
+import 'src/writer.dart';
+
+export 'src/constants.dart' show TypeFlag;
+export 'src/entry.dart';
+export 'src/exception.dart';
+export 'src/format.dart';
+export 'src/header.dart' show TarHeader;
+export 'src/reader.dart' show TarReader;
+export 'src/writer.dart' show tarWritingSink, tarWriter;
diff --git a/lib/src/third_party/tar/pubspec.yaml b/lib/src/third_party/tar/pubspec.yaml
new file mode 100644
index 0000000..b2467e0
--- /dev/null
+++ b/lib/src/third_party/tar/pubspec.yaml
@@ -0,0 +1,17 @@
+name: tar
+description: Memory-efficient, streaming implementation of the tar file format
+version: 0.3.0
+repository: https://github.com/simolus3/tar/
+
+environment:
+  sdk: '>=2.12.0-29.10.beta <3.0.0'
+
+dependencies:
+  chunked_stream: ^1.4.0
+  meta: ^1.3.0
+  typed_data: ^1.3.0
+
+dev_dependencies:
+  charcode: ^1.2.0
+  extra_pedantic: ^1.2.0
+  test: ^1.16.0
diff --git a/lib/src/third_party/tar/reference/bad/truncated_in_body.tar b/lib/src/third_party/tar/reference/bad/truncated_in_body.tar
new file mode 100644
index 0000000..f185e40
--- /dev/null
+++ b/lib/src/third_party/tar/reference/bad/truncated_in_body.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/bad/truncated_in_header.tar b/lib/src/third_party/tar/reference/bad/truncated_in_header.tar
new file mode 100644
index 0000000..e9cf06c
--- /dev/null
+++ b/lib/src/third_party/tar/reference/bad/truncated_in_header.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/gnu.tar b/lib/src/third_party/tar/reference/gnu.tar
new file mode 100644
index 0000000..1b7876c
--- /dev/null
+++ b/lib/src/third_party/tar/reference/gnu.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/headers/evil_large_header.tar b/lib/src/third_party/tar/reference/headers/evil_large_header.tar
new file mode 100644
index 0000000..b00f176
--- /dev/null
+++ b/lib/src/third_party/tar/reference/headers/evil_large_header.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/headers/large_posix.tar b/lib/src/third_party/tar/reference/headers/large_posix.tar
new file mode 100644
index 0000000..5499e33
--- /dev/null
+++ b/lib/src/third_party/tar/reference/headers/large_posix.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-incremental.tar b/lib/src/third_party/tar/reference/neats_test/gnu-incremental.tar
new file mode 100644
index 0000000..df063e5
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-incremental.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-long-nul.tar b/lib/src/third_party/tar/reference/neats_test/gnu-long-nul.tar
new file mode 100644
index 0000000..c3a88d0
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-long-nul.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-multi-hdrs.tar b/lib/src/third_party/tar/reference/neats_test/gnu-multi-hdrs.tar
new file mode 100644
index 0000000..846358e
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-multi-hdrs.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-data.tar b/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-data.tar
new file mode 100644
index 0000000..9cdcb60
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-data.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-hole.tar b/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-hole.tar
new file mode 100644
index 0000000..d82eb40
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-nil-sparse-hole.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-non-utf8-name.tar b/lib/src/third_party/tar/reference/neats_test/gnu-non-utf8-name.tar
new file mode 100644
index 0000000..0fa6503
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-non-utf8-name.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu-utf8.tar b/lib/src/third_party/tar/reference/neats_test/gnu-utf8.tar
new file mode 100644
index 0000000..86195c0
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu-utf8.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/gnu.tar b/lib/src/third_party/tar/reference/neats_test/gnu.tar
new file mode 100644
index 0000000..d71480d
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/gnu.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/invalid-pax-headers.tar b/lib/src/third_party/tar/reference/neats_test/invalid-pax-headers.tar
new file mode 100644
index 0000000..4d71fa1
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/invalid-pax-headers.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/invalid-uid.tar b/lib/src/third_party/tar/reference/neats_test/invalid-uid.tar
new file mode 100644
index 0000000..3542dd8
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/invalid-uid.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/malformed-sparse-file.tar b/lib/src/third_party/tar/reference/neats_test/malformed-sparse-file.tar
new file mode 100644
index 0000000..1cc837b
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/malformed-sparse-file.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/nil-gid-uid.tar b/lib/src/third_party/tar/reference/neats_test/nil-gid-uid.tar
new file mode 100644
index 0000000..e4f14a6
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/nil-gid-uid.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-bad-mtime.tar b/lib/src/third_party/tar/reference/neats_test/pax-bad-mtime.tar
new file mode 100644
index 0000000..61343c5
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-bad-mtime.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-bad-record-length.tar b/lib/src/third_party/tar/reference/neats_test/pax-bad-record-length.tar
new file mode 100644
index 0000000..5e4098b
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-bad-record-length.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-multi-hdrs.tar b/lib/src/third_party/tar/reference/neats_test/pax-multi-hdrs.tar
new file mode 100644
index 0000000..adbe77f
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-multi-hdrs.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-data.tar b/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-data.tar
new file mode 100644
index 0000000..f14aac2
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-data.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-hole.tar b/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-hole.tar
new file mode 100644
index 0000000..22f933b
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-nil-sparse-hole.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-non-ascii-name.tar b/lib/src/third_party/tar/reference/neats_test/pax-non-ascii-name.tar
new file mode 100644
index 0000000..ffc0cac
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-non-ascii-name.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-nul-path.tar b/lib/src/third_party/tar/reference/neats_test/pax-nul-path.tar
new file mode 100644
index 0000000..6e96673
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-nul-path.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-nul-xattrs.tar b/lib/src/third_party/tar/reference/neats_test/pax-nul-xattrs.tar
new file mode 100644
index 0000000..b2b00dd
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-nul-xattrs.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-pos-size-file.tar b/lib/src/third_party/tar/reference/neats_test/pax-pos-size-file.tar
new file mode 100644
index 0000000..a62e630
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-pos-size-file.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax-records.tar b/lib/src/third_party/tar/reference/neats_test/pax-records.tar
new file mode 100644
index 0000000..2f94fbc
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax-records.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/pax.tar b/lib/src/third_party/tar/reference/neats_test/pax.tar
new file mode 100644
index 0000000..6a412bb
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/pax.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/sparse-formats.tar b/lib/src/third_party/tar/reference/neats_test/sparse-formats.tar
new file mode 100644
index 0000000..d42bae1
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/sparse-formats.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/star.tar b/lib/src/third_party/tar/reference/neats_test/star.tar
new file mode 100644
index 0000000..1f0319e
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/star.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/trailing-slash.tar b/lib/src/third_party/tar/reference/neats_test/trailing-slash.tar
new file mode 100644
index 0000000..1aac752
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/trailing-slash.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/ustar-nonzero-device-numbers.tar b/lib/src/third_party/tar/reference/neats_test/ustar-nonzero-device-numbers.tar
new file mode 100644
index 0000000..718079f
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/ustar-nonzero-device-numbers.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/ustar.tar b/lib/src/third_party/tar/reference/neats_test/ustar.tar
new file mode 100644
index 0000000..90430a3
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/ustar.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/v7.tar b/lib/src/third_party/tar/reference/neats_test/v7.tar
new file mode 100644
index 0000000..d27c2a4
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/v7.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/neats_test/xattrs.tar b/lib/src/third_party/tar/reference/neats_test/xattrs.tar
new file mode 100644
index 0000000..f974b15
--- /dev/null
+++ b/lib/src/third_party/tar/reference/neats_test/xattrs.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/posix.tar b/lib/src/third_party/tar/reference/posix.tar
new file mode 100644
index 0000000..c10d4f8
--- /dev/null
+++ b/lib/src/third_party/tar/reference/posix.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/pub/RAL-1.28.0.tar.gz b/lib/src/third_party/tar/reference/pub/RAL-1.28.0.tar.gz
new file mode 100644
index 0000000..47765a0
--- /dev/null
+++ b/lib/src/third_party/tar/reference/pub/RAL-1.28.0.tar.gz
Binary files differ
diff --git a/lib/src/third_party/tar/reference/pub/access_settings_menu-0.0.1.tar.gz b/lib/src/third_party/tar/reference/pub/access_settings_menu-0.0.1.tar.gz
new file mode 100644
index 0000000..c4b83fd
--- /dev/null
+++ b/lib/src/third_party/tar/reference/pub/access_settings_menu-0.0.1.tar.gz
Binary files differ
diff --git a/lib/src/third_party/tar/reference/pub/rikulo_commons-0.7.6.tar.gz b/lib/src/third_party/tar/reference/pub/rikulo_commons-0.7.6.tar.gz
new file mode 100644
index 0000000..29fa344
--- /dev/null
+++ b/lib/src/third_party/tar/reference/pub/rikulo_commons-0.7.6.tar.gz
Binary files differ
diff --git a/lib/src/third_party/tar/reference/res/subdirectory_with_a_long_name/file_with_a_path_length_of_more_than_100_characters_so_that_it_gets_split.txt b/lib/src/third_party/tar/reference/res/subdirectory_with_a_long_name/file_with_a_path_length_of_more_than_100_characters_so_that_it_gets_split.txt
new file mode 100644
index 0000000..6205f3a
--- /dev/null
+++ b/lib/src/third_party/tar/reference/res/subdirectory_with_a_long_name/file_with_a_path_length_of_more_than_100_characters_so_that_it_gets_split.txt
@@ -0,0 +1 @@
+ditto
\ No newline at end of file
diff --git a/lib/src/third_party/tar/reference/res/test.txt b/lib/src/third_party/tar/reference/res/test.txt
new file mode 100644
index 0000000..02a3542
--- /dev/null
+++ b/lib/src/third_party/tar/reference/res/test.txt
@@ -0,0 +1 @@
+Test file content!
diff --git a/lib/src/third_party/tar/reference/ustar.tar b/lib/src/third_party/tar/reference/ustar.tar
new file mode 100644
index 0000000..8612c5e
--- /dev/null
+++ b/lib/src/third_party/tar/reference/ustar.tar
Binary files differ
diff --git a/lib/src/third_party/tar/reference/v7.tar b/lib/src/third_party/tar/reference/v7.tar
new file mode 100644
index 0000000..c8dc14f
--- /dev/null
+++ b/lib/src/third_party/tar/reference/v7.tar
Binary files differ
diff --git a/lib/src/third_party/tar/test/pub_test.dart b/lib/src/third_party/tar/test/pub_test.dart
new file mode 100644
index 0000000..6cb7e70
--- /dev/null
+++ b/lib/src/third_party/tar/test/pub_test.dart
@@ -0,0 +1,27 @@
+// Simple tests to ensure that we can parse weird tars found on pub.
+//
+// The test cases were found by running an earlier version of this package
+// across all packages and versions found on pub.dev. This package needs to
+// be able to read every package version ever uploaded to pub.
+import 'dart:io';
+
+import 'package:tar/tar.dart';
+import 'package:test/test.dart';
+
+void main() {
+  const onceBroken = [
+    'access_settings_menu-0.0.1',
+    'RAL-1.28.0',
+    'rikulo_commons-0.7.6',
+  ];
+
+  for (final package in onceBroken) {
+    test('can read $package', () {
+      final file = File('reference/pub/$package.tar.gz');
+      final tarStream = file.openRead().transform(gzip.decoder);
+      return TarReader.forEach(tarStream, (entry) {
+        // do nothing, we just want to make sure that the package can be read.
+      });
+    });
+  }
+}
diff --git a/lib/src/third_party/tar/test/reader_test.dart b/lib/src/third_party/tar/test/reader_test.dart
new file mode 100644
index 0000000..3a9a3d8
--- /dev/null
+++ b/lib/src/third_party/tar/test/reader_test.dart
@@ -0,0 +1,915 @@
+import 'dart:async';
+import 'dart:convert';
+import 'dart:io';
+import 'dart:typed_data';
+
+import 'package:chunked_stream/chunked_stream.dart';
+import 'package:tar/src/reader.dart';
+import 'package:tar/src/utils.dart';
+import 'package:test/test.dart';
+
+import 'package:tar/tar.dart';
+
+void main() {
+  group('POSIX.1-2001', () {
+    test('reads files', () => _testWith('reference/posix.tar'));
+
+    test('reads large files',
+        () => _testLargeFile('reference/headers/large_posix.tar'));
+  });
+
+  test('(new) GNU Tar format', () => _testWith('reference/gnu.tar'));
+  test('ustar', () => _testWith('reference/ustar.tar'));
+  test('v7', () => _testWith('reference/v7.tar', ignoreLongFileName: true));
+
+  test('can skip tar files', () async {
+    final input = File('reference/posix.tar').openRead();
+    final reader = TarReader(input);
+
+    expect(await reader.moveNext(), isTrue);
+    expect(await reader.moveNext(), isTrue);
+    expect(reader.current.name, 'reference/res/subdirectory_with_a_long_name/');
+  });
+
+  test('getters throw before moveNext() is called', () {
+    final reader = TarReader(const Stream<Never>.empty());
+
+    expect(() => reader.current, throwsStateError);
+  });
+
+  test("can't use moveNext() concurrently", () {
+    final reader = TarReader(Stream.fromFuture(
+        Future.delayed(const Duration(seconds: 2), () => <int>[])));
+
+    expect(reader.moveNext(), completion(isFalse));
+    expect(() => reader.moveNext(), throwsStateError);
+    return reader.cancel();
+  });
+
+  test("can't use moveNext() while a stream is active", () async {
+    final input = File('reference/posix.tar').openRead();
+    final reader = TarReader(input);
+
+    expect(await reader.moveNext(), isTrue);
+    reader.current.contents.listen((event) {}).pause();
+
+    expect(() => reader.moveNext(), throwsStateError);
+    await reader.cancel();
+  });
+
+  test("can't use moveNext() after canceling the reader", () async {
+    final input = File('reference/posix.tar').openRead();
+    final reader = TarReader(input);
+    await reader.cancel();
+
+    expect(() => reader.moveNext(), throwsStateError);
+  });
+
+  group('the reader closes itself', () {
+    test("at the end of a file", () async {
+      // two zero blocks terminate a tar file
+      final zeroBlock = Uint8List(512);
+      final controller = StreamController<List<int>>();
+      controller.onListen = () {
+        controller..add(zeroBlock)..add(zeroBlock);
+      };
+
+      final reader = TarReader(controller.stream);
+      await expectLater(reader.moveNext(), completion(isFalse));
+
+      expect(controller.hasListener, isFalse);
+    });
+
+    test('if the stream emits an error in headers', () async {
+      final controller = StreamController<List<int>>();
+      controller.onListen = () {
+        controller.addError('foo');
+      };
+
+      final reader = TarReader(controller.stream);
+      await expectLater(reader.moveNext(), throwsA('foo'));
+
+      expect(controller.hasListener, isFalse);
+    });
+
+    test('if the stream emits an error in content', () async {
+      // Craft a stream that starts with a valid tar file, but then emits an
+      // error in the middle of an entry. First 512 bytes are headers.
+      final iterator =
+          ChunkedStreamIterator(File('reference/v7.tar').openRead());
+      final controller = StreamController<List<int>>();
+      controller.onListen = () async {
+        // headers + 3 bytes of content
+        await controller.addStream(iterator.substream(515));
+        controller.addError('foo');
+      };
+
+      final reader = TarReader(controller.stream);
+      await expectLater(reader.moveNext(), completion(isTrue));
+      await expectLater(
+          reader.current.contents, emitsThrough(emitsError('foo')));
+
+      expect(controller.hasListener, isFalse);
+      await iterator.cancel();
+    });
+  });
+
+  group('tests from dart-neats PR', () {
+    Stream<List<int>> open(String name) {
+      return File('reference/neats_test/$name').openRead();
+    }
+
+    final tests = [
+      {
+        'file': 'gnu.tar',
+        'headers': <TarHeader>[
+          TarHeader(
+            name: 'small.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 3,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            format: TarFormat.gnu,
+          ),
+          TarHeader(
+            name: 'small2.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 8,
+            modified: millisecondsSinceEpoch(1597755958000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            format: TarFormat.gnu,
+          )
+        ],
+      },
+      {
+        'file': 'sparse-formats.tar',
+        'headers': <TarHeader>[
+          TarHeader(
+            name: 'sparse-gnu',
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            size: 200,
+            modified: millisecondsSinceEpoch(1597756151000),
+            typeFlag: TypeFlag.gnuSparse,
+            userName: 'jonas',
+            groupName: 'jonas',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.gnu,
+          ),
+          TarHeader(
+            name: 'sparse-posix-v-0-0',
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            size: 200,
+            modified: millisecondsSinceEpoch(1597756151000),
+            typeFlag: TypeFlag.reg,
+            userName: 'jonas',
+            groupName: 'jonas',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.pax,
+          ),
+          TarHeader(
+            name: 'sparse-posix-0-1',
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            size: 200,
+            modified: millisecondsSinceEpoch(1597756151000),
+            typeFlag: TypeFlag.reg,
+            userName: 'jonas',
+            groupName: 'jonas',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.pax,
+          ),
+          TarHeader(
+            name: 'sparse-posix-1-0',
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            size: 200,
+            modified: millisecondsSinceEpoch(1597756151000),
+            typeFlag: TypeFlag.reg,
+            userName: 'jonas',
+            groupName: 'jonas',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.pax,
+          ),
+          TarHeader(
+            name: 'end',
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            size: 4,
+            modified: millisecondsSinceEpoch(1597756151000),
+            typeFlag: TypeFlag.reg,
+            userName: 'jonas',
+            groupName: 'jonas',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.gnu,
+          )
+        ],
+      },
+      {
+        'file': 'star.tar',
+        'headers': [
+          TarHeader(
+            name: 'small.txt',
+            mode: 416,
+            userId: 1000,
+            groupId: 1000,
+            size: 3,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            accessed: millisecondsSinceEpoch(1597755680000),
+            changed: millisecondsSinceEpoch(1597755680000),
+            format: TarFormat.star,
+          ),
+          TarHeader(
+            name: 'small2.txt',
+            mode: 416,
+            userId: 1000,
+            groupId: 1000,
+            size: 7,
+            modified: millisecondsSinceEpoch(1597755958000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            accessed: millisecondsSinceEpoch(1597755958000),
+            changed: millisecondsSinceEpoch(1597755958000),
+            format: TarFormat.star,
+          )
+        ]
+      },
+      {
+        'file': 'v7.tar',
+        'headers': [
+          TarHeader(
+            name: 'small.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 3,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            format: TarFormat.v7,
+          ),
+          TarHeader(
+            name: 'small2.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 8,
+            modified: millisecondsSinceEpoch(1597755958000),
+            typeFlag: TypeFlag.reg,
+            format: TarFormat.v7,
+          )
+        ],
+      },
+      {
+        'file': 'ustar.tar',
+        'headers': [
+          TarHeader(
+            name: 'small.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 3,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            format: TarFormat.ustar,
+          ),
+          TarHeader(
+            name: 'small2.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            size: 8,
+            modified: millisecondsSinceEpoch(1597755958000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            format: TarFormat.ustar,
+          )
+        ],
+      },
+      {
+        'file': 'pax.tar',
+        'headers': [
+          TarHeader(
+            name:
+                'a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100',
+            mode: 436,
+            userId: 1000,
+            groupId: 1000,
+            userName: 'jonas',
+            groupName: 'fj',
+            size: 7,
+            modified: microsecondsSinceEpoch(1597823492427388),
+            changed: microsecondsSinceEpoch(1597823492427388),
+            accessed: microsecondsSinceEpoch(1597823492427388),
+            typeFlag: TypeFlag.reg,
+            format: TarFormat.pax,
+          ),
+          TarHeader(
+            name: 'a/b',
+            mode: 511,
+            userId: 1000,
+            groupId: 1000,
+            userName: 'garett',
+            groupName: 'tok',
+            size: 0,
+            modified: microsecondsSinceEpoch(1597823492427388),
+            changed: microsecondsSinceEpoch(1597823492427388),
+            accessed: microsecondsSinceEpoch(1597823492427388),
+            typeFlag: TypeFlag.symlink,
+            linkName:
+                '123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100',
+            format: TarFormat.pax,
+          ),
+        ]
+      },
+      {
+        // PAX record with bad record length.
+        'file': 'pax-bad-record-length.tar',
+        'error': true,
+      },
+      {
+        // PAX record with non-numeric mtime
+        'file': 'pax-bad-mtime.tar',
+        'error': true,
+      },
+      {
+        'file': 'pax-pos-size-file.tar',
+        'headers': [
+          TarHeader(
+            name: 'bar',
+            mode: 416,
+            userId: 143077,
+            groupId: 1000,
+            size: 999,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            userName: 'jonasfj',
+            groupName: 'jfj',
+            format: TarFormat.pax,
+          )
+        ],
+      },
+      {
+        'file': 'pax-records.tar',
+        'headers': [
+          TarHeader(
+            typeFlag: TypeFlag.reg,
+            size: 0,
+            name: 'pax-records',
+            mode: 416,
+            userName: 'walnut',
+            modified: millisecondsSinceEpoch(0),
+            format: TarFormat.pax,
+          )
+        ],
+      },
+      {
+        'file': 'nil-gid-uid.tar',
+        'headers': [
+          TarHeader(
+            name: 'nil-gid.txt',
+            mode: 436,
+            userId: 1000,
+            groupId: 0,
+            size: 3,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.gnu,
+          ),
+          TarHeader(
+            name: 'nil-uid.txt',
+            mode: 436,
+            userId: 0,
+            groupId: 1000,
+            size: 7,
+            modified: millisecondsSinceEpoch(1597755958000),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'garett',
+            devMajor: 0,
+            devMinor: 0,
+            format: TarFormat.gnu,
+          )
+        ]
+      },
+      {
+        'file': 'xattrs.tar',
+        'headers': [
+          TarHeader(
+            name: 'small.txt',
+            mode: 420,
+            userId: 1000,
+            groupId: 10,
+            size: 5,
+            modified: microsecondsSinceEpoch(1597823492427388),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'tok',
+            accessed: microsecondsSinceEpoch(1597823492427388),
+            changed: microsecondsSinceEpoch(1597823492427388),
+            format: TarFormat.pax,
+          ),
+          TarHeader(
+            name: 'small2.txt',
+            mode: 420,
+            userId: 1000,
+            groupId: 10,
+            size: 11,
+            modified: microsecondsSinceEpoch(1597823492427388),
+            typeFlag: TypeFlag.reg,
+            userName: 'garett',
+            groupName: 'tok',
+            accessed: microsecondsSinceEpoch(1597823492427388),
+            changed: microsecondsSinceEpoch(1597823492427388),
+            format: TarFormat.pax,
+          )
+        ]
+      },
+      {
+        // Matches the behavior of GNU, BSD, and STAR tar utilities.
+        'file': 'gnu-multi-hdrs.tar',
+        'headers': [
+          TarHeader(
+            name: 'long-path-name',
+            size: 0,
+            linkName: 'long-linkpath-name',
+            userId: 1000,
+            groupId: 1000,
+            modified: millisecondsSinceEpoch(1597756829000),
+            typeFlag: TypeFlag.symlink,
+            format: TarFormat.gnu,
+          )
+        ],
+      },
+      {
+        // GNU tar 'file' with atime and ctime fields set.
+        // Old GNU incremental backup.
+        //
+        // Created with the GNU tar v1.27.1.
+        //	tar --incremental -S -cvf gnu-incremental.tar test2
+        'file': 'gnu-incremental.tar',
+        'headers': [
+          TarHeader(
+            name: 'incremental/',
+            mode: 16877,
+            userId: 1000,
+            groupId: 1000,
+            size: 14,
+            modified: millisecondsSinceEpoch(1597755680000),
+            typeFlag: TypeFlag.vendor,
+            userName: 'fizz',
+            groupName: 'foobar',
+            accessed: millisecondsSinceEpoch(1597755680000),
+            changed: millisecondsSinceEpoch(1597755033000),
+            format: TarFormat.gnu,
+          ),
+          TarHeader(
+            name: 'incremental/foo',
+            mode: 33188,
+            userId: 1000,
+            groupId: 1000,
+            size: 64,
+            modified: millisecondsSinceEpoch(1597755688000),
+            typeFlag: TypeFlag.reg,
+            userName: 'fizz',
+            groupName: 'foobar',
+            accessed: millisecondsSinceEpoch(1597759641000),
+            changed: millisecondsSinceEpoch(1597755793000),
+            format: TarFormat.gnu,
+          ),
+          TarHeader(
+            name: 'incremental/sparse',
+            mode: 33188,
+            userId: 1000,
+            groupId: 1000,
+            size: 536870912,
+            modified: millisecondsSinceEpoch(1597755776000),
+            typeFlag: TypeFlag.gnuSparse,
+            userName: 'fizz',
+            groupName: 'foobar',
+            accessed: millisecondsSinceEpoch(1597755703000),
+            changed: millisecondsSinceEpoch(1597755602000),
+            format: TarFormat.gnu,
+          )
+        ]
+      },
+      {
+        // Matches the behavior of GNU and BSD tar utilities.
+        'file': 'pax-multi-hdrs.tar',
+        'headers': [
+          TarHeader(
+            name: 'baz',
+            size: 0,
+            linkName: 'bzzt/bzzt/bzzt/bzzt/bzzt/baz',
+            modified: millisecondsSinceEpoch(0),
+            typeFlag: TypeFlag.symlink,
+            format: TarFormat.pax,
+          )
+        ]
+      },
+      {
+        // Both BSD and GNU tar truncate long names at first NUL even
+        // if there is data following that NUL character.
+        // This is reasonable as GNU long names are C-strings.
+        'file': 'gnu-long-nul.tar',
+        'headers': [
+          TarHeader(
+            name: '9876543210',
+            size: 0,
+            mode: 420,
+            userId: 1000,
+            groupId: 1000,
+            modified: millisecondsSinceEpoch(1597755682000),
+            typeFlag: TypeFlag.reg,
+            format: TarFormat.gnu,
+            userName: 'jensen',
+            groupName: 'jensen',
+          )
+        ]
+      },
+      {
+        // This archive was generated by Writer but is readable by both
+        // GNU and BSD tar utilities.
+        // The archive generated by GNU is nearly byte-for-byte identical
+        // to the Go version except the Go version sets a negative devMinor
+        // just to force the GNU format.
+        'file': 'gnu-utf8.tar',
+        'headers': [
+          TarHeader(
+            name: '🧸',
+            size: 0,
+            mode: 420,
+            userId: 525,
+            groupId: 600,
+            modified: millisecondsSinceEpoch(0),
+            typeFlag: TypeFlag.reg,
+            userName: '🐻',
+            groupName: '🥭',
+            format: TarFormat.gnu,
+          )
+        ]
+      },
+      {
+        'file': 'gnu-non-utf8-name.tar',
+        'headers': [
+          TarHeader(
+            name: 'pub\x80\x81\x82\x83dev',
+            size: 0,
+            mode: 422,
+            userId: 1234,
+            groupId: 5678,
+            modified: millisecondsSinceEpoch(0),
+            typeFlag: TypeFlag.reg,
+            userName: 'walnut',
+            groupName: 'dust',
+            format: TarFormat.gnu,
+          )
+        ]
+      },
+      {
+        // BSD tar v3.1.2 and GNU tar v1.27.1 both rejects PAX records
+        // with NULs in the key.
+        'file': 'pax-nul-xattrs.tar',
+        'error': true,
+      },
+      {
+        // BSD tar v3.1.2 rejects a PAX path with NUL in the value, while
+        // GNU tar v1.27.1 simply truncates at first NUL.
+        // We emulate the behavior of BSD since it is strange doing NUL
+        // truncations since PAX records are length-prefix strings instead
+        // of NUL-terminated C-strings.
+        'file': 'pax-nul-path.tar',
+        'error': true,
+      },
+      {
+        // Malformed sparse file
+        'file': 'malformed-sparse-file.tar',
+        'error': true,
+      },
+      {
+        // PAX records that do not have a new line at the end.
+        'file': 'invalid-pax-headers.tar',
+        'error': true,
+      },
+      {
+        // Invalid user id
+        'file': 'invalid-uid.tar',
+        'error': true,
+      },
+      {
+        // USTAR archive with a regular entry with non-zero device numbers.
+        'file': 'ustar-nonzero-device-numbers.tar',
+        'headers': [
+          TarHeader(
+            name: 'file',
+            size: 0,
+            mode: 420,
+            typeFlag: TypeFlag.reg,
+            modified: millisecondsSinceEpoch(0),
+            userName: 'Jonas',
+            groupName: 'Google',
+            devMajor: 1,
+            devMinor: 1,
+            format: TarFormat.ustar,
+          )
+        ]
+      },
+      {
+        // Works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+        'file': 'gnu-nil-sparse-data.tar',
+        'headers': [
+          TarHeader(
+            name: 'nil-sparse-data',
+            typeFlag: TypeFlag.gnuSparse,
+            userId: 1000,
+            groupId: 1000,
+            size: 1000,
+            modified: millisecondsSinceEpoch(1597756076000),
+            format: TarFormat.gnu,
+          )
+        ],
+      },
+      {
+        // Works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+        'file': 'gnu-nil-sparse-hole.tar',
+        'headers': [
+          TarHeader(
+            name: 'nil-sparse-hole',
+            typeFlag: TypeFlag.gnuSparse,
+            size: 1000,
+            userId: 1000,
+            groupId: 1000,
+            modified: millisecondsSinceEpoch(1597756079000),
+            format: TarFormat.gnu,
+          )
+        ]
+      },
+      {
+        // Works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+        'file': 'pax-nil-sparse-data.tar',
+        'headers': [
+          TarHeader(
+            name: 'sparse',
+            typeFlag: TypeFlag.reg,
+            size: 1000,
+            userId: 1000,
+            groupId: 1000,
+            modified: millisecondsSinceEpoch(1597756076000),
+            format: TarFormat.pax,
+          )
+        ]
+      },
+      {
+        // Works on BSD tar v3.1.2 and GNU tar v.1.27.1.
+        'file': 'pax-nil-sparse-hole.tar',
+        'headers': [
+          TarHeader(
+            name: 'sparse.txt',
+            typeFlag: TypeFlag.reg,
+            size: 1000,
+            userId: 1000,
+            groupId: 1000,
+            modified: millisecondsSinceEpoch(1597756077000),
+            format: TarFormat.pax,
+          )
+        ]
+      },
+      {
+        'file': 'trailing-slash.tar',
+        'headers': [
+          TarHeader(
+            typeFlag: TypeFlag.dir,
+            size: 0,
+            name: '987654321/' * 30,
+            modified: millisecondsSinceEpoch(0),
+            format: TarFormat.pax,
+          )
+        ]
+      },
+      {
+        'file': 'pax-non-ascii-name.tar',
+        'headers': [
+          TarHeader(
+            name: 'æøå/',
+            mode: 493,
+            size: 0,
+            userName: 'sigurdm',
+            userId: 224757,
+            groupId: 89939,
+            groupName: 'primarygroup',
+            format: TarFormat.pax,
+            typeFlag: TypeFlag.dir,
+            modified: DateTime.utc(2020, 10, 13, 13, 04, 32, 608, 662),
+          ),
+          TarHeader(
+            name: 'æøå/æøå.dart',
+            mode: 420,
+            size: 1024,
+            userName: 'sigurdm',
+            userId: 224757,
+            groupId: 89939,
+            groupName: 'primarygroup',
+            format: TarFormat.pax,
+            typeFlag: TypeFlag.reg,
+            modified: DateTime.utc(2020, 10, 13, 13, 05, 12, 105, 884),
+          ),
+        ]
+      }
+    ];
+
+    Matcher matchesHeader(TarHeader expected) {
+      return isA<TarHeader>()
+          .having((e) => e.name, 'name', expected.name)
+          .having((e) => e.modified, 'modified', expected.modified)
+          .having((e) => e.linkName, 'linkName', expected.linkName)
+          .having((e) => e.mode, 'mode', expected.mode)
+          .having((e) => e.size, 'size', expected.size)
+          .having((e) => e.userName, 'userName', expected.userName)
+          .having((e) => e.userId, 'userId', expected.userId)
+          .having((e) => e.groupId, 'groupId', expected.groupId)
+          .having((e) => e.groupName, 'groupName', expected.groupName)
+          .having((e) => e.accessed, 'accessed', expected.accessed)
+          .having((e) => e.changed, 'changed', expected.changed)
+          .having((e) => e.devMajor, 'devMajor', expected.devMajor)
+          .having((e) => e.devMinor, 'devMinor', expected.devMinor)
+          .having((e) => e.format, 'format', expected.format)
+          .having((e) => e.typeFlag, 'typeFlag', expected.typeFlag);
+    }
+
+    for (final testInputs in tests) {
+      test('${testInputs['file']}', () async {
+        final tarReader = TarReader(open(testInputs['file']! as String),
+            maxSpecialFileSize: 16000);
+
+        if (testInputs['error'] == true) {
+          expect(tarReader.moveNext(), throwsFormatException);
+        } else {
+          final expectedHeaders = testInputs['headers']! as List<TarHeader>;
+
+          for (var i = 0; i < expectedHeaders.length; i++) {
+            expect(await tarReader.moveNext(), isTrue);
+            expect(tarReader.current.header, matchesHeader(expectedHeaders[i]));
+          }
+          expect(await tarReader.moveNext(), isFalse);
+        }
+      });
+    }
+
+    test('reader procudes an empty stream if the entry has no size', () async {
+      final reader = TarReader(open('trailing-slash.tar'));
+      while (await reader.moveNext()) {
+        expect(await reader.current.contents.toList(), isEmpty);
+      }
+    });
+  });
+
+  test('does not read large headers', () {
+    final reader =
+        TarReader(File('reference/headers/evil_large_header.tar').openRead());
+
+    expect(
+      reader.moveNext(),
+      throwsA(
+        isFormatException.having((e) => e.message, 'message',
+            contains('hidden entry with an invalid size')),
+      ),
+    );
+  });
+
+  group('throws on unexpected EoF', () {
+    final expectedException = isA<TarException>()
+        .having((e) => e.message, 'message', contains('Unexpected end'));
+
+    test('at header', () {
+      final reader =
+          TarReader(File('reference/bad/truncated_in_header.tar').openRead());
+      expect(reader.moveNext(), throwsA(expectedException));
+    });
+
+    test('in content', () {
+      final reader =
+          TarReader(File('reference/bad/truncated_in_body.tar').openRead());
+      expect(reader.moveNext(), throwsA(expectedException));
+    });
+  });
+
+  group('PAX headers', () {
+    test('locals overrwrite globals', () {
+      final header = PaxHeaders()
+        ..newGlobals({'foo': 'foo', 'bar': 'bar'})
+        ..newLocals({'foo': 'local'});
+
+      expect(header.keys, containsAll(<String>['foo', 'bar']));
+      expect(header['foo'], 'local');
+    });
+
+    group('parse', () {
+      final mediumName = 'CD' * 50;
+      final longName = 'AB' * 100;
+
+      final tests = [
+        ['6 k=v\n\n', 'k', 'v', true],
+        ['19 path=/etc/hosts\n', 'path', '/etc/hosts', true],
+        ['210 path=' + longName + '\nabc', 'path', longName, true],
+        ['110 path=' + mediumName + '\n', 'path', mediumName, true],
+        ['9 foo=ba\n', 'foo', 'ba', true],
+        ['11 foo=bar\n\x00', 'foo', 'bar', true],
+        ['18 foo=b=\nar=\n==\x00\n', 'foo', 'b=\nar=\n==\x00', true],
+        ['27 foo=hello9 foo=ba\nworld\n', 'foo', 'hello9 foo=ba\nworld', true],
+        ['27 ☺☻☹=日a本b語ç\n', '☺☻☹', '日a本b語ç', true],
+        ['17 \x00hello=\x00world\n', '', '', false],
+        ['1 k=1\n', '', '', false],
+        ['6 k~1\n', '', '', false],
+        ['6 k=1 ', '', '', false],
+        ['632 k=1\n', '', '', false],
+        ['16 longkeyname=hahaha\n', '', '', false],
+        ['3 somelongkey=\n', '', '', false],
+        ['50 tooshort=\n', '', '', false],
+      ];
+
+      for (var i = 0; i < tests.length; i++) {
+        final input = tests[i];
+
+        test('parsePax #$i', () {
+          final headers = PaxHeaders();
+
+          final raw = utf8.encode(input[0] as String);
+          final key = input[1];
+          final value = input[2];
+          final isValid = input[3] as bool;
+
+          if (isValid) {
+            headers.readPaxHeaders(raw, false, ignoreUnknown: false);
+            expect(headers.keys, [key]);
+            expect(headers[key], value);
+          } else {
+            expect(() => headers.readPaxHeaders(raw, false),
+                throwsA(isA<TarException>()));
+          }
+        });
+      }
+    });
+  });
+}
+
+Future<void> _testWith(String file, {bool ignoreLongFileName = false}) async {
+  final entries = <String, Uint8List>{};
+
+  await TarReader.forEach(File(file).openRead(), (entry) async {
+    entries[entry.name] = await entry.contents.readFully();
+  });
+
+  final testEntry = entries['reference/res/test.txt']!;
+  expect(utf8.decode(testEntry), 'Test file content!\n');
+
+  if (!ignoreLongFileName) {
+    final longName = entries['reference/res/'
+        'subdirectory_with_a_long_name/'
+        'file_with_a_path_length_of_more_than_100_characters_so_that_it_gets_split.txt']!;
+    expect(utf8.decode(longName), 'ditto');
+  }
+}
+
+Future<void> _testLargeFile(String file) async {
+  final reader = TarReader(File(file).openRead());
+  await reader.moveNext();
+
+  expect(reader.current.size, 9663676416);
+}
+
+extension on Stream<List<int>> {
+  Future<Uint8List> readFully() async {
+    final builder = BytesBuilder();
+    await forEach(builder.add);
+    return builder.takeBytes();
+  }
+}
diff --git a/lib/src/third_party/tar/test/sparse_test.dart b/lib/src/third_party/tar/test/sparse_test.dart
new file mode 100644
index 0000000..a819abd
--- /dev/null
+++ b/lib/src/third_party/tar/test/sparse_test.dart
@@ -0,0 +1,340 @@
+import 'dart:io';
+
+import 'dart:math';
+import 'dart:typed_data';
+
+import 'package:chunked_stream/chunked_stream.dart';
+import 'package:tar/src/sparse.dart';
+import 'package:tar/tar.dart';
+import 'package:test/test.dart';
+
+import 'system_tar.dart';
+
+/// Writes [size] random bytes to [path].
+Future<void> createTestFile(String path, int size) {
+  final random = Random();
+  final file = File(path);
+  final sink = file.openWrite();
+
+  const chunkSize = 1024;
+  for (var i = 0; i < size ~/ chunkSize; i++) {
+    final buffer = Uint8List(chunkSize);
+    fillRandomBytes(buffer, random);
+    sink.add(buffer);
+  }
+
+  final remaining = Uint8List(size % chunkSize);
+  fillRandomBytes(remaining, random);
+  sink.add(remaining);
+
+  return sink.close();
+}
+
+/// Creates a sparse file with a logical size of [size]. The file will be all
+/// zeroes.
+Future<void> createCleanSparseTestFile(String path, int size) async {
+  await Process.run('truncate', ['--size=$size', path]);
+}
+
+/// Creates a file with [size], where some chunks are zeroes.
+Future<void> createSparseTestFile(String path, int size) {
+  final sink = File(path).openWrite();
+  final random = Random();
+
+  var remaining = size;
+  while (remaining > 0) {
+    final nextBlockSize = min(remaining, 512);
+    if (random.nextBool()) {
+      sink.add(Uint8List(nextBlockSize));
+    } else {
+      final block = Uint8List(nextBlockSize);
+      fillRandomBytes(block, random);
+      sink.add(block);
+    }
+
+    remaining -= nextBlockSize;
+  }
+
+  return sink.close();
+}
+
+void fillRandomBytes(List<int> bytes, Random random) {
+  for (var i = 0; i < bytes.length; i++) {
+    bytes[i] = random.nextInt(256);
+  }
+}
+
+Future<void> validate(Stream<List<int>> tar, Map<String, String> files) async {
+  final reader = TarReader(tar);
+
+  for (var i = 0; i < files.length; i++) {
+    expect(await reader.moveNext(), isTrue);
+
+    final fileName = reader.current.name;
+    final matchingFile = files[fileName];
+
+    if (matchingFile == null) {
+      fail('Unexpected file $fileName in tar file');
+    }
+
+    final actualContents = ChunkedStreamIterator(File(matchingFile).openRead());
+    final tarContents = ChunkedStreamIterator(reader.current.contents);
+
+    while (true) {
+      final actualChunk = await actualContents.read(1024);
+      final tarChunk = await tarContents.read(1024);
+      expect(tarChunk, actualChunk);
+
+      if (actualChunk.isEmpty) break;
+    }
+  }
+}
+
+void main() {
+  // map from file names to desired size
+  const testFiles = {
+    'reg_1': 65023,
+    'reg_2': 65539,
+    'reg_3': 65534,
+    'sparse_1': 131076,
+    'sparse_2': 65534,
+    'clean_sparse_1': 131076,
+    'clean_sparse_2': 65534,
+  };
+  late String baseDirectory;
+
+  String path(String fileName) => '$baseDirectory/$fileName';
+
+  setUpAll(() async {
+    baseDirectory = Directory.systemTemp.path +
+        '/tar_test/${DateTime.now().millisecondsSinceEpoch}';
+    await Directory(baseDirectory).create(recursive: true);
+
+    for (final entry in testFiles.entries) {
+      final name = entry.key;
+      final size = entry.value;
+
+      if (name.contains('clean')) {
+        await createCleanSparseTestFile(path(name), size);
+      } else if (name.contains('sparse')) {
+        await createSparseTestFile(path(name), size);
+      } else {
+        await createTestFile(path(entry.key), entry.value);
+      }
+    }
+  });
+
+  tearDownAll(() {
+    Directory(baseDirectory).delete(recursive: true);
+  });
+
+  Future<void> testSubset(
+      Iterable<String> keys, String format, String? sparse) {
+    final files = {for (final file in keys) file: path(file)};
+    final tar = createTarStream(files.keys,
+        baseDir: baseDirectory, archiveFormat: format, sparseVersion: sparse);
+    return validate(tar, files);
+  }
+
+  for (final format in ['gnu', 'v7', 'oldgnu', 'posix', 'ustar']) {
+    group('reads large files in $format', () {
+      test('single file', () {
+        return testSubset(['reg_1'], format, null);
+      });
+
+      test('reads multiple large files successfully', () {
+        return testSubset(['reg_1', 'reg_2', 'reg_3'], format, null);
+      });
+    });
+  }
+
+  for (final format in ['gnu', 'posix']) {
+    for (final sparseVersion in ['0.0', '0.1', '1.0']) {
+      group('sparse format $format, version $sparseVersion', () {
+        test('reads a clean sparse file', () {
+          return testSubset(['clean_sparse_1'], format, sparseVersion);
+        });
+
+        test('reads a sparse file', () {
+          return testSubset(['sparse_1'], format, sparseVersion);
+        });
+
+        test('reads clean sparse / regular files', () {
+          return testSubset(
+            ['reg_1', 'clean_sparse_1', 'reg_3', 'clean_sparse_2'],
+            format,
+            sparseVersion,
+          );
+        });
+
+        test('reads mixed regular / sparse / clean sparse files', () {
+          return testSubset(
+            ['reg_1', 'sparse_2', 'clean_sparse_1', 'reg_3'],
+            format,
+            sparseVersion,
+          );
+        });
+      });
+    }
+  }
+
+  group('sparse entries', () {
+    final tests = [
+      _SparseTestcase(
+        input: [],
+        size: 0,
+        isValid: true,
+        inverted: [SparseEntry(0, 0)],
+      ),
+      _SparseTestcase(
+        input: [],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(0, 5000)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(0, 5000)],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(5000, 0)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(1000, 4000)],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(0, 1000), SparseEntry(5000, 0)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(0, 3000)],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(3000, 2000)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(3000, 2000)],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(0, 3000), SparseEntry(5000, 0)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(2000, 2000)],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(0, 2000), SparseEntry(4000, 1000)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(0, 2000), SparseEntry(8000, 2000)],
+        size: 10000,
+        isValid: true,
+        inverted: [SparseEntry(2000, 6000), SparseEntry(10000, 0)],
+      ),
+      _SparseTestcase(
+        input: [
+          SparseEntry(0, 2000),
+          SparseEntry(2000, 2000),
+          SparseEntry(4000, 0),
+          SparseEntry(4000, 3000),
+          SparseEntry(7000, 1000),
+          SparseEntry(8000, 0),
+          SparseEntry(8000, 2000)
+        ],
+        size: 10000,
+        isValid: true,
+        inverted: [SparseEntry(10000, 0)],
+      ),
+      _SparseTestcase(
+        input: [
+          SparseEntry(0, 0),
+          SparseEntry(1000, 0),
+          SparseEntry(2000, 0),
+          SparseEntry(3000, 0),
+          SparseEntry(4000, 0),
+          SparseEntry(5000, 0),
+        ],
+        size: 5000,
+        isValid: true,
+        inverted: [SparseEntry(0, 5000)],
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(1, 0)],
+        size: 0,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(-1, 0)],
+        size: 100,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(0, -1)],
+        size: 100,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(0, 1)],
+        size: -100,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(9223372036854775807, 3), SparseEntry(6, -5)],
+        size: 35,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(1, 3), SparseEntry(6, -5)],
+        size: 35,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(9223372036854775807, 9223372036854775807)],
+        size: 9223372036854775807,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(3, 3)],
+        size: 5,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(2, 0), SparseEntry(1, 0), SparseEntry(0, 0)],
+        size: 3,
+        isValid: false,
+      ),
+      _SparseTestcase(
+        input: [SparseEntry(1, 3), SparseEntry(2, 2)],
+        size: 10,
+        isValid: false,
+      ),
+    ];
+
+    for (var i = 0; i < tests.length; i++) {
+      final testcase = tests[i];
+
+      test('validateSparseEntries #$i', () {
+        expect(validateSparseEntries(testcase.input, testcase.size),
+            testcase.isValid);
+      });
+
+      if (testcase.isValid) {
+        test('invertSparseEntries #$i', () {
+          expect(invertSparseEntries(testcase.input, testcase.size),
+              testcase.inverted);
+        });
+      }
+    }
+  });
+}
+
+class _SparseTestcase {
+  final List<SparseEntry> input;
+  final int size;
+  final bool isValid;
+  final List<SparseEntry>? inverted;
+
+  _SparseTestcase({
+    required this.input,
+    required this.size,
+    required this.isValid,
+    this.inverted,
+  });
+}
diff --git a/lib/src/third_party/tar/test/system_tar.dart b/lib/src/third_party/tar/test/system_tar.dart
new file mode 100644
index 0000000..4e7c897
--- /dev/null
+++ b/lib/src/third_party/tar/test/system_tar.dart
@@ -0,0 +1,57 @@
+// Wrapper around the `tar` command, for testing.
+
+import 'dart:async';
+import 'dart:convert';
+import 'dart:io';
+
+import 'package:tar/tar.dart' as tar;
+import 'package:test/test.dart';
+
+Future<Process> startTar(List<String> args, {String? baseDir}) {
+  return Process.start('tar', args, workingDirectory: baseDir).then((proc) {
+    expect(proc.exitCode, completion(0),
+        reason: 'tar ${args.join(' ')} should complete normally');
+
+    // Attach stderr listener, we don't expect any output on that
+    late List<int> data;
+    final sink = ByteConversionSink.withCallback((result) => data = result);
+    proc.stderr.forEach(sink.add).then((Object? _) {
+      sink.close();
+      const LineSplitter().convert(utf8.decode(data)).forEach(stderr.writeln);
+    });
+
+    return proc;
+  });
+}
+
+Stream<List<int>> createTarStream(Iterable<String> files,
+    {String archiveFormat = 'gnu',
+    String? sparseVersion,
+    String? baseDir}) async* {
+  final args = [
+    '--format=$archiveFormat',
+    '--create',
+    ...files,
+  ];
+
+  if (sparseVersion != null) {
+    args..add('--sparse')..add('--sparse-version=$sparseVersion');
+  }
+
+  final tar = await startTar(args, baseDir: baseDir);
+  yield* tar.stdout;
+}
+
+Future<Process> writeToTar(
+    List<String> args, Stream<tar.TarEntry> entries) async {
+  final proc = await startTar(args);
+  await entries.pipe(tar.tarWritingSink(proc.stdin));
+
+  return proc;
+}
+
+extension ProcessUtils on Process {
+  Stream<String> get lines {
+    return this.stdout.transform(utf8.decoder).transform(const LineSplitter());
+  }
+}
diff --git a/lib/src/third_party/tar/test/utils_test.dart b/lib/src/third_party/tar/test/utils_test.dart
new file mode 100644
index 0000000..32de010
--- /dev/null
+++ b/lib/src/third_party/tar/test/utils_test.dart
@@ -0,0 +1,190 @@
+import 'dart:convert';
+import 'dart:typed_data';
+
+import 'package:tar/src/exception.dart';
+import 'package:tar/src/utils.dart';
+import 'package:test/test.dart';
+
+// ignore_for_file: avoid_js_rounded_ints
+void main() {
+  group('readString', () {
+    test('can read empty strings', () {
+      expect(_bytes('').readString(0, 0), '');
+    });
+
+    test('does not include trailing null', () {
+      expect(_bytes('hello\x00').readString(0, 6), 'hello');
+    });
+
+    test('does not require a trailing null', () {
+      expect(_bytes('hello').readString(0, 5), 'hello');
+    });
+  });
+
+  group('readStringOrNullIfEmpty', () {
+    test('returns null if empty', () {
+      expect(_bytes('').readStringOrNullIfEmpty(0, 0), isNull);
+    });
+
+    test('can read non-empty strings', () {
+      expect(_bytes('hello').readStringOrNullIfEmpty(0, 5), 'hello');
+    });
+  });
+
+  group('generates stream of zeroes', () {
+    const lengths = [024 * 1024 * 128 + 12, 12, 0];
+
+    for (final length in lengths) {
+      test('with length $length', () {
+        final stream = zeroes(length);
+
+        expect(
+          stream.fold<int>(0, (previous, element) => previous + element.length),
+          completion(length),
+        );
+      });
+    }
+  });
+
+  group('readNumeric', () {
+    void testValid(String value, int expected) {
+      test('readNumeric($value)', () {
+        expect(Uint8List.fromList(value.codeUnits).readNumeric(0, value.length),
+            expected);
+      });
+    }
+
+    void testValidBin(List<int> value, int expected) {
+      test('readNumeric($value)', () {
+        expect(
+            Uint8List.fromList(value).readNumeric(0, value.length), expected);
+      });
+    }
+
+    void testInvalid(String value) {
+      test('readNumeric($value)', () {
+        expect(() => _bytes(value).readNumeric(0, value.length),
+            throwsA(isA<TarException>()));
+      });
+    }
+
+    group('base-256', () {
+      testValidBin([0x0], 0);
+      testValidBin([0x80], 0);
+      testValidBin([0x80, 0x00], 0);
+      testValidBin([0x80, 0x00, 0x00], 0);
+      testValidBin([0xbf], (1 << 6) - 1);
+      testValidBin([0xbf, 0xff], (1 << 14) - 1);
+      testValid('\xbf\xff\xff', (1 << 22) - 1);
+      testValidBin([0xff], -1);
+      testValidBin([0xff, 0xff], -1);
+      testValidBin([0xff, 0xff, 0xff], -1);
+      testValid('\xc0', -1 * (1 << 6));
+      testValid('\xc0\x00', -1 * (1 << 14));
+      testValid('\xc0\x00\x00', -1 * (1 << 22));
+      testValid('\x87\x76\xa2\x22\xeb\x8a\x72\x61', 537795476381659745);
+      testValid('\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61',
+          537795476381659745);
+      testValid('\xf7\x76\xa2\x22\xeb\x8a\x72\x61', -615126028225187231);
+      testValid('\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61',
+          -615126028225187231);
+      testValid('\x80\x7f\xff\xff\xff\xff\xff\xff\xff', 9223372036854775807);
+      testValid('\xff\x80\x00\x00\x00\x00\x00\x00\x00', -9223372036854775808);
+    });
+
+    group('octal', () {
+      testValid('', 0);
+      testValid('   \x00  ', 0);
+      testValid('0000000\x00', 0);
+      testValid(' \x0000000\x00', 0);
+      testValid(' \x0000003\x00', 3);
+      testValid('00000000644\x00', 420);
+      testValid('032033\x00 ', 13339);
+      testValid('320330\x00 ', 106712);
+      testValid('0000660\x00 ', 432);
+      testValid('\x00 0000660\x00 ', 432);
+
+      testInvalid('0123456789abcdef');
+      testInvalid('0123456789\x00abcdef');
+      testInvalid('0123\x7e\x5f\x264123');
+    });
+  });
+
+  group('parsePaxTime', () {
+    const validTimes = {
+      '1350244992.023960108': 1350244992023960,
+      '1350244992.02396010': 1350244992023960,
+      '1350244992.0239601089': 1350244992023960,
+      '1350244992.3': 1350244992300000,
+      '1350244992': 1350244992000000,
+      '-1.000000001': -1000000,
+      '-1.000001': -1000001,
+      '-1.001000': -1001000,
+      '-1': -1000000,
+      '-1.999000': -1999000,
+      '-1.999999': -1999999,
+      '-1.999999999': -1999999,
+      '0.000000001': 0,
+      '0.000001': 1,
+      '0.001000': 1000,
+      '0': 0,
+      '0.999000': 999000,
+      '0.999999': 999999,
+      '0.999999999': 999999,
+      '1.000000001': 1000000,
+      '1.000001': 1000001,
+      '1.001000': 1001000,
+      '1': 1000000,
+      '1.999000': 1999000,
+      '1.999999': 1999999,
+      '1.999999999': 1999999,
+      '-1350244992.023960108': -1350244992023960,
+      '-1350244992.02396010': -1350244992023960,
+      '-1350244992.0239601089': -1350244992023960,
+      '-1350244992.3': -1350244992300000,
+      '-1350244992': -1350244992000000,
+      '1.': 1000000,
+      '0.0': 0,
+      '-1.': -1000000,
+      '-1.0': -1000000,
+      '-0.0': 0,
+      '-0.1': -100000,
+      '-0.01': -10000,
+      '-0.99': -990000,
+      '-0.98': -980000,
+      '-1.1': -1100000,
+      '-1.01': -1010000,
+      '-2.99': -2990000,
+      '-5.98': -5980000,
+    };
+
+    validTimes.forEach((str, micros) {
+      test('parsePaxTime($str)', () {
+        expect(parsePaxTime(str), microsecondsSinceEpoch(micros));
+      });
+    });
+
+    const invalidTimes = {
+      '',
+      '.5',
+      '-',
+      '+',
+      '-1.-1',
+      '99999999999999999999999999999999999999999999999',
+      '0.123456789abcdef',
+      'foo',
+      '𝟵𝟴𝟳𝟲𝟱.𝟰𝟯𝟮𝟭𝟬', // Unicode numbers (U+1D7EC to U+1D7F5)
+      '98765﹒43210', // Unicode period (U+FE52);
+    };
+
+    for (final invalid in invalidTimes) {
+      test('parsePaxTime($invalid)', () {
+        expect(() => parsePaxTime(invalid), throwsA(isA<TarException>()));
+      });
+    }
+  });
+}
+
+Uint8List _bytes(String str) {
+  return Uint8List.fromList(utf8.encode(str));
+}
diff --git a/lib/src/third_party/tar/test/writer_test.dart b/lib/src/third_party/tar/test/writer_test.dart
new file mode 100644
index 0000000..7b742ed
--- /dev/null
+++ b/lib/src/third_party/tar/test/writer_test.dart
@@ -0,0 +1,69 @@
+import 'dart:async';
+import 'dart:typed_data';
+
+import 'package:test/test.dart';
+
+import 'package:tar/tar.dart' as tar;
+import 'system_tar.dart';
+
+void main() {
+  test('writes long file names', () async {
+    final name = '${'very' * 30} long name.txt';
+    final withLongName = tar.TarEntry.data(
+      tar.TarHeader(name: name, mode: 0, size: 0),
+      Uint8List(0),
+    );
+
+    final proc = await writeToTar(['--list'], Stream.value(withLongName));
+    expect(proc.lines, emits(name));
+  });
+
+  test('writes headers', () async {
+    final date = DateTime.parse('2020-12-30 12:34');
+    final entry = tar.TarEntry.data(
+      tar.TarHeader(
+        name: 'hello_dart.txt',
+        mode: int.parse('744', radix: 8),
+        size: 0,
+        userId: 3,
+        groupId: 4,
+        userName: 'my_user',
+        groupName: 'long group that exceeds 32 characters',
+        modified: date,
+      ),
+      Uint8List(0),
+    );
+
+    final proc = await writeToTar(['--list', '--verbose'], Stream.value(entry));
+    expect(
+      proc.lines,
+      emits(
+        allOf(
+          contains('-rwxr--r--'),
+          contains('my_user/long group that exceeds 32 characters'),
+          contains('2020-12-30 12:34'),
+        ),
+      ),
+    );
+  });
+
+  test('writes huge files', () async {
+    const oneMbSize = 1024 * 1024;
+    const tenGbSize = oneMbSize * 1024 * 10;
+
+    final oneMb = Uint8List(oneMbSize);
+    const count = tenGbSize ~/ oneMbSize;
+
+    final entry = tar.TarEntry(
+      tar.TarHeader(
+        name: 'file.blob',
+        mode: 0,
+        size: tenGbSize,
+      ),
+      Stream<List<int>>.fromIterable(Iterable.generate(count, (i) => oneMb)),
+    );
+
+    final proc = await writeToTar(['--list', '--verbose'], Stream.value(entry));
+    expect(proc.lines, emits(contains(tenGbSize.toString())));
+  });
+}
diff --git a/lib/src/third_party/tar/tool/generate_charcodes.sh b/lib/src/third_party/tar/tool/generate_charcodes.sh
new file mode 100755
index 0000000..4f139de
--- /dev/null
+++ b/lib/src/third_party/tar/tool/generate_charcodes.sh
@@ -0,0 +1 @@
+dart run charcode 'ustarxgASLK=\x20\x0a\d' > lib/src/charcodes.dart
\ No newline at end of file
diff --git a/lib/src/third_party/tar/tool/generate_evil.dart b/lib/src/third_party/tar/tool/generate_evil.dart
new file mode 100644
index 0000000..6882b16
--- /dev/null
+++ b/lib/src/third_party/tar/tool/generate_evil.dart
@@ -0,0 +1,28 @@
+import 'dart:io';
+import 'dart:typed_data';
+
+import 'package:tar/tar.dart';
+
+Future<void> main() async {
+  // Generate tar file claiming to have a 7 GB header
+  await Stream<TarEntry>.fromIterable([
+    TarEntry.data(
+      TarHeader(
+        name: 'PaxHeader',
+        mode: 0,
+        typeFlag: TypeFlag.xHeader,
+        size: 1024 * 1024 * 1024 * 7,
+      ),
+      Uint8List(0),
+    ),
+    TarEntry.data(
+      TarHeader(
+        name: 'test.txt',
+        mode: 0,
+      ),
+      Uint8List(0),
+    ),
+  ])
+      .transform(tarWriter)
+      .pipe(File('reference/evil_large_header.tar').openWrite());
+}
diff --git a/lib/src/third_party/tar/tool/generate_reference_tars.sh b/lib/src/third_party/tar/tool/generate_reference_tars.sh
new file mode 100755
index 0000000..e09b71b
--- /dev/null
+++ b/lib/src/third_party/tar/tool/generate_reference_tars.sh
@@ -0,0 +1,21 @@
+echo "--format=posix"
+tar --create --verbose --file=reference/posix.tar --owner=1 --group=2 --format=posix reference/res/
+
+echo "--format=gnu"
+tar --create --verbose --file=reference/gnu.tar --owner=1 --group=2 --format=gnu reference/res/
+
+echo "--format=v7"
+# v7 can't store long names at all
+tar --create --verbose --file=reference/v7.tar --owner=1 --group=2 --format=v7 reference/res/test.txt
+
+echo "--format=ustar"
+tar --create --verbose --file=reference/ustar.tar --owner=1 --group=2 --format=ustar reference/res/
+
+echo "truncated --format=posix"
+tar --create --file - --owner=1 --group=2 --format=posix reference/res/ | head --bytes=1k > reference/bad/truncated_in_header.tar
+tar --create --file - --owner=1 --group=2 --format=posix reference/res/ | head --bytes=1050 > reference/bad/truncated_in_body.tar
+
+# Note: The truncated headers were generated by creating a 9 GiB blob:
+#  dd if=/dev/zero of=zeroes ibs=1G count=9
+# Then, I ran
+#  tar --create --format=posix zeroes | head -q -c 1536 > reference/headers/large_posix.tar
diff --git a/pubspec.yaml b/pubspec.yaml
index d9970e3..fc380a0 100644
--- a/pubspec.yaml
+++ b/pubspec.yaml
@@ -24,7 +24,6 @@
   shelf: ^1.0.0
   source_span: ^1.8.0
   stack_trace: ^1.10.0
-  tar: ^0.3.0
   yaml: ^3.0.0
 
 dev_dependencies:
diff --git a/test/io_test.dart b/test/io_test.dart
index 8c627d1..9fc2f90 100644
--- a/test/io_test.dart
+++ b/test/io_test.dart
@@ -9,7 +9,7 @@
 import 'package:path/path.dart' as path;
 import 'package:pub/src/exceptions.dart';
 import 'package:pub/src/io.dart';
-import 'package:tar/tar.dart';
+import 'package:pub/src/third_party/tar/lib/tar.dart';
 import 'package:test/test.dart';
 
 import 'descriptor.dart' as d;