Vendor tar (#2987)

* Vendor libraries from package:tar
diff --git a/lib/src/io.dart b/lib/src/io.dart
index afb4ccc..b73312a 100644
--- a/lib/src/io.dart
+++ b/lib/src/io.dart
@@ -17,12 +17,12 @@
 import 'package:pedantic/pedantic.dart';
 import 'package:pool/pool.dart';
 import 'package:stack_trace/stack_trace.dart';
-import 'package:tar/tar.dart';
 
 import 'error_group.dart';
 import 'exceptions.dart';
 import 'exit_codes.dart' as exit_codes;
 import 'log.dart' as log;
+import 'third_party/tar/tar.dart';
 import 'utils.dart';
 
 export 'package:http/http.dart' show ByteStream;
diff --git a/lib/src/third_party/tar/LICENSE b/lib/src/third_party/tar/LICENSE
new file mode 100644
index 0000000..ed92ded
--- /dev/null
+++ b/lib/src/third_party/tar/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Simon Binder
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/src/third_party/tar/README.md b/lib/src/third_party/tar/README.md
new file mode 100644
index 0000000..c2f429e
--- /dev/null
+++ b/lib/src/third_party/tar/README.md
@@ -0,0 +1,7 @@
+# package:tar
+
+Vendored elements from `package:tar` for use in creation and extration of
+tar-archives.
+
+ * Repository: `https://github.com/simolus3/tar/`
+ * Revision: `e8bf3e828e1c4c89774fab1b9083a6879ed5eae9`
diff --git a/lib/src/third_party/tar/src/charcodes.dart b/lib/src/third_party/tar/src/charcodes.dart
new file mode 100644
index 0000000..1d34a6d
--- /dev/null
+++ b/lib/src/third_party/tar/src/charcodes.dart
@@ -0,0 +1,71 @@
+/// "Line feed" control character.
+const int $lf = 0x0a;
+
+/// Space character.
+const int $space = 0x20;
+
+/// Character `0`.
+const int $0 = 0x30;
+
+/// Character `1`.
+const int $1 = 0x31;
+
+/// Character `2`.
+const int $2 = 0x32;
+
+/// Character `3`.
+const int $3 = 0x33;
+
+/// Character `4`.
+const int $4 = 0x34;
+
+/// Character `5`.
+const int $5 = 0x35;
+
+/// Character `6`.
+const int $6 = 0x36;
+
+/// Character `7`.
+const int $7 = 0x37;
+
+/// Character `8`.
+const int $8 = 0x38;
+
+/// Character `9`.
+const int $9 = 0x39;
+
+/// Character `<`.
+const int $equal = 0x3d;
+
+/// Character `A`.
+const int $A = 0x41;
+
+/// Character `K`.
+const int $K = 0x4b;
+
+/// Character `L`.
+const int $L = 0x4c;
+
+/// Character `S`.
+const int $S = 0x53;
+
+/// Character `a`.
+const int $a = 0x61;
+
+/// Character `g`.
+const int $g = 0x67;
+
+/// Character `r`.
+const int $r = 0x72;
+
+/// Character `s`.
+const int $s = 0x73;
+
+/// Character `t`.
+const int $t = 0x74;
+
+/// Character `u`.
+const int $u = 0x75;
+
+/// Character `x`.
+const int $x = 0x78;
diff --git a/lib/src/third_party/tar/src/constants.dart b/lib/src/third_party/tar/src/constants.dart
new file mode 100644
index 0000000..aac7669
--- /dev/null
+++ b/lib/src/third_party/tar/src/constants.dart
@@ -0,0 +1,260 @@
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'exception.dart';
+import 'header.dart' show TarHeader; // for dartdoc
+
+// Magic values to help us identify the TAR header type.
+const magicGnu = [$u, $s, $t, $a, $r, $space]; // 'ustar '
+const versionGnu = [$space, 0]; // ' \x00'
+const magicUstar = [$u, $s, $t, $a, $r, 0]; // 'ustar\x00'
+const versionUstar = [$0, $0]; // '00'
+const trailerStar = [$t, $a, $r, 0]; // 'tar\x00'
+
+/// Type flags for [TarHeader].
+///
+/// The type flag of a header indicates the kind of file associated with the
+/// entry. This enum contains the various type flags over the different TAR
+/// formats, and users should be careful that the type flag corresponds to the
+/// TAR format they are working with.
+enum TypeFlag {
+  /// [reg] indicates regular files.
+  ///
+  /// Old tar implementations have a seperate `TypeRegA` value. This library
+  /// will transparently read those as [regA].
+  reg,
+
+  /// Legacy-version of [reg] in old tar implementations.
+  ///
+  /// This is only used internally.
+  regA,
+
+  /// Hard link - header-only, may not have a data body
+  link,
+
+  /// Symbolic link - header-only, may not have a data body
+  symlink,
+
+  /// Character device node - header-only, may not have a data body
+  char,
+
+  /// Block device node - header-only, may not have a data body
+  block,
+
+  /// Directory - header-only, may not have a data body
+  dir,
+
+  /// FIFO node - header-only, may not have a data body
+  fifo,
+
+  /// Currently does not have any meaning, but is reserved for the future.
+  reserved,
+
+  /// Used by the PAX format to store key-value records that are only relevant
+  /// to the next file.
+  ///
+  /// This package transparently handles these types.
+  xHeader,
+
+  /// Used by the PAX format to store key-value records that are relevant to all
+  /// subsequent files.
+  ///
+  /// This package only supports parsing and composing such headers,
+  /// but does not currently support persisting the global state across files.
+  xGlobalHeader,
+
+  /// Indiates a sparse file in the GNU format
+  gnuSparse,
+
+  /// Used by the GNU format for a meta file to store the path or link name for
+  /// the next file.
+  /// This package transparently handles these types.
+  gnuLongName,
+  gnuLongLink,
+
+  /// Vendor specific typeflag, as defined in POSIX.1-1998. Seen as outdated but
+  /// may still exist on old files.
+  ///
+  /// This library uses a single enum to catch them all.
+  vendor
+}
+
+/// Generates the corresponding [TypeFlag] associated with [byte].
+TypeFlag typeflagFromByte(int byte) {
+  switch (byte) {
+    case $0:
+      return TypeFlag.reg;
+    case 0:
+      return TypeFlag.regA;
+    case $1:
+      return TypeFlag.link;
+    case $2:
+      return TypeFlag.symlink;
+    case $3:
+      return TypeFlag.char;
+    case $4:
+      return TypeFlag.block;
+    case $5:
+      return TypeFlag.dir;
+    case $6:
+      return TypeFlag.fifo;
+    case $7:
+      return TypeFlag.reserved;
+    case $x:
+      return TypeFlag.xHeader;
+    case $g:
+      return TypeFlag.xGlobalHeader;
+    case $S:
+      return TypeFlag.gnuSparse;
+    case $L:
+      return TypeFlag.gnuLongName;
+    case $K:
+      return TypeFlag.gnuLongLink;
+    default:
+      if (64 < byte && byte < 91) {
+        return TypeFlag.vendor;
+      }
+      throw TarException.header('Invalid typeflag value $byte');
+  }
+}
+
+int typeflagToByte(TypeFlag flag) {
+  switch (flag) {
+    case TypeFlag.reg:
+    case TypeFlag.regA:
+      return $0;
+    case TypeFlag.link:
+      return $1;
+    case TypeFlag.symlink:
+      return $2;
+    case TypeFlag.char:
+      return $3;
+    case TypeFlag.block:
+      return $4;
+    case TypeFlag.dir:
+      return $5;
+    case TypeFlag.fifo:
+      return $6;
+    case TypeFlag.reserved:
+      return $7;
+    case TypeFlag.xHeader:
+      return $x;
+    case TypeFlag.xGlobalHeader:
+      return $g;
+    case TypeFlag.gnuSparse:
+      return $S;
+    case TypeFlag.gnuLongName:
+      return $L;
+    case TypeFlag.gnuLongLink:
+      return $K;
+    case TypeFlag.vendor:
+      throw ArgumentError("Can't write vendor-specific type-flags");
+  }
+}
+
+/// Keywords for PAX extended header records.
+const paxPath = 'path';
+const paxLinkpath = 'linkpath';
+const paxSize = 'size';
+const paxUid = 'uid';
+const paxGid = 'gid';
+const paxUname = 'uname';
+const paxGname = 'gname';
+const paxMtime = 'mtime';
+const paxAtime = 'atime';
+const paxCtime =
+    'ctime'; // Removed from later revision of PAX spec, but was valid
+const paxComment = 'comment';
+const paxSchilyXattr = 'SCHILY.xattr.';
+
+/// Keywords for GNU sparse files in a PAX extended header.
+const paxGNUSparse = 'GNU.sparse.';
+const paxGNUSparseNumBlocks = 'GNU.sparse.numblocks';
+const paxGNUSparseOffset = 'GNU.sparse.offset';
+const paxGNUSparseNumBytes = 'GNU.sparse.numbytes';
+const paxGNUSparseMap = 'GNU.sparse.map';
+const paxGNUSparseName = 'GNU.sparse.name';
+const paxGNUSparseMajor = 'GNU.sparse.major';
+const paxGNUSparseMinor = 'GNU.sparse.minor';
+const paxGNUSparseSize = 'GNU.sparse.size';
+const paxGNUSparseRealSize = 'GNU.sparse.realsize';
+
+/// A set of pax header keys supported by this library.
+///
+/// The reader will ignore pax headers not listed in this map.
+const supportedPaxHeaders = {
+  paxPath,
+  paxLinkpath,
+  paxSize,
+  paxUid,
+  paxGid,
+  paxUname,
+  paxGname,
+  paxMtime,
+  paxAtime,
+  paxCtime,
+  paxComment,
+  paxSchilyXattr,
+  paxGNUSparse,
+  paxGNUSparseNumBlocks,
+  paxGNUSparseOffset,
+  paxGNUSparseNumBytes,
+  paxGNUSparseMap,
+  paxGNUSparseName,
+  paxGNUSparseMajor,
+  paxGNUSparseMinor,
+  paxGNUSparseSize,
+  paxGNUSparseRealSize
+};
+
+/// User ID bit
+const c_ISUID = 2048;
+
+/// Group ID bit
+const c_ISGID = 1024;
+
+/// Sticky bit
+const c_ISVTX = 512;
+
+/// **********************
+///  Convenience constants
+/// **********************
+/// 64-bit integer max and min values
+const int64MaxValue = 9223372036854775807;
+const int64MinValue = -9223372036854775808;
+
+/// Constants to determine file modes.
+const modeType = 2401763328;
+const modeSymLink = 134217728;
+const modeDevice = 67108864;
+const modeCharDevice = 2097152;
+const modeNamedPipe = 33554432;
+const modeSocket = 1677216;
+const modeSetUid = 8388608;
+const modeSetGid = 4194304;
+const modeSticky = 1048576;
+const modeDirectory = 2147483648;
+
+/// The offset of the checksum in the header
+const checksumOffset = 148;
+const checksumLength = 8;
+const magicOffset = 257;
+const versionOffset = 263;
+const starTrailerOffset = 508;
+
+/// Size constants from various TAR specifications.
+/// Size of each block in a TAR stream.
+const blockSize = 512;
+const blockSizeLog2 = 9;
+const maxIntFor12CharOct = 0x1ffffffff; // 777 7777 7777 in oct
+
+const defaultSpecialLength = 4 * blockSize;
+
+/// Max length of the name field in USTAR format.
+const nameSize = 100;
+
+/// Max length of the prefix field in USTAR format.
+const prefixSize = 155;
+
+/// A full TAR block of zeros.
+final zeroBlock = Uint8List(blockSize);
diff --git a/lib/src/third_party/tar/src/entry.dart b/lib/src/third_party/tar/src/entry.dart
new file mode 100644
index 0000000..f6b0a5a
--- /dev/null
+++ b/lib/src/third_party/tar/src/entry.dart
@@ -0,0 +1,59 @@
+import 'dart:async';
+
+import 'package:meta/meta.dart';
+
+import 'constants.dart';
+import 'header.dart';
+
+/// An entry in a tar file.
+///
+/// Usually, tar entries are read from a stream, and they're bound to the stream
+/// from which they've been read. This means that they can only be read once,
+/// and that only one [TarEntry] is active at a time.
+@sealed
+class TarEntry {
+  /// The parsed [TarHeader] of this tar entry.
+  final TarHeader header;
+
+  /// The content stream of the active tar entry.
+  ///
+  /// For tar entries read through the reader provided by this library,
+  /// [contents] is a single-subscription streamed backed by the original stream
+  /// used to create the reader.
+  /// When listening on [contents], the stream needs to be fully drained before
+  /// the next call to [StreamIterator.moveNext]. It's acceptable to not listen
+  /// to [contents] at all before calling [StreamIterator.moveNext] again.
+  /// In that case, this library will take care of draining the stream to get to
+  /// the next entry.
+  final Stream<List<int>> contents;
+
+  /// The name of this entry, as indicated in the header or a previous pax
+  /// entry.
+  String get name => header.name;
+
+  /// The type of tar entry (file, directory, etc.).
+  TypeFlag get type => header.typeFlag;
+
+  /// The content size of this entry, in bytes.
+  int get size => header.size;
+
+  /// Time of the last modification of this file, as indicated in the [header].
+  DateTime get modified => header.modified;
+
+  /// Creates a tar entry from a [header] and the [contents] stream.
+  ///
+  /// If the total length of [contents] is known, consider setting the
+  /// [header]'s [TarHeader.size] property to the appropriate value.
+  /// Otherwise, the tar writer needs to buffer contents to determine the right
+  /// size.
+  // factory so that this class can't be extended
+  factory TarEntry(TarHeader header, Stream<List<int>> contents) = TarEntry._;
+
+  TarEntry._(this.header, this.contents);
+
+  /// Creates an in-memory tar entry from the [header] and the [data] to store.
+  factory TarEntry.data(TarHeader header, List<int> data) {
+    (header as HeaderImpl).size = data.length;
+    return TarEntry(header, Stream.value(data));
+  }
+}
diff --git a/lib/src/third_party/tar/src/exception.dart b/lib/src/third_party/tar/src/exception.dart
new file mode 100644
index 0000000..3d9e614
--- /dev/null
+++ b/lib/src/third_party/tar/src/exception.dart
@@ -0,0 +1,13 @@
+import 'package:meta/meta.dart';
+
+/// An exception indicating that there was an issue parsing a `.tar` file.
+/// Intended to be seen by the user.
+class TarException extends FormatException {
+  @internal
+  TarException(String message) : super(message);
+
+  @internal
+  factory TarException.header(String message) {
+    return TarException('Invalid header: $message');
+  }
+}
diff --git a/lib/src/third_party/tar/src/format.dart b/lib/src/third_party/tar/src/format.dart
new file mode 100644
index 0000000..b6be2f5
--- /dev/null
+++ b/lib/src/third_party/tar/src/format.dart
@@ -0,0 +1,288 @@
+import 'package:meta/meta.dart';
+
+/// Handy map to help us translate [TarFormat] values to their names.
+/// Be sure to keep this consistent with the constant initializers in
+/// [TarFormat].
+const _formatNames = {
+  1: 'V7',
+  2: 'USTAR',
+  4: 'PAX',
+  8: 'GNU',
+  16: 'STAR',
+};
+
+/// Holds the possible TAR formats that a file could take.
+///
+/// This library only supports the V7, USTAR, PAX, GNU, and STAR formats.
+@sealed
+class TarFormat {
+  /// The TAR formats are encoded in powers of two in [_value], such that we
+  /// can refine our guess via bit operations as we discover more information
+  /// about the TAR file.
+  /// A value of 0 means that the format is invalid.
+  final int _value;
+
+  const TarFormat._internal(this._value);
+
+  @override
+  int get hashCode => _value;
+
+  @override
+  bool operator ==(Object? other) {
+    if (other is! TarFormat) return false;
+
+    return _value == other._value;
+  }
+
+  @override
+  String toString() {
+    if (!isValid()) return 'Invalid';
+
+    final possibleNames = _formatNames.entries
+        .where((e) => _value & e.key != 0)
+        .map((e) => e.value);
+
+    return possibleNames.join(' or ');
+  }
+
+  /// Returns if [other] is a possible resolution of `this`.
+  ///
+  /// For example, a [TarFormat] with a value of 6 means that we do not have
+  /// enough information to determine if it is [TarFormat.ustar] or
+  /// [TarFormat.pax], so either of them could be possible resolutions of
+  /// `this`.
+  bool has(TarFormat other) => _value & other._value != 0;
+
+  /// Returns a new [TarFormat] that signifies that it can be either
+  /// `this` or [other]'s format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.USTAR | TarFormat.PAX;
+  /// ```
+  ///
+  /// The above code would signify that we have limited `format` to either
+  /// the USTAR or PAX format, but need further information to refine the guess.
+  TarFormat operator |(TarFormat other) {
+    return mayBe(other);
+  }
+
+  /// Returns a new [TarFormat] that signifies that it can be either
+  /// `this` or [other]'s format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.PAX;
+  /// format = format.mayBe(TarFormat.USTAR);
+  /// ```
+  ///
+  /// The above code would signify that we learnt that in addition to being a
+  /// PAX format, it could also be of the USTAR format.
+  TarFormat mayBe(TarFormat? other) {
+    if (other == null) return this;
+    return TarFormat._internal(_value | other._value);
+  }
+
+  /// Returns a new [TarFormat] that signifies that it can only be [other]'s
+  /// format.
+  ///
+  /// **Example:**
+  /// ```dart
+  /// TarFormat format = TarFormat.PAX | TarFormat.USTAR;
+  /// ...
+  /// format = format.mayOnlyBe(TarFormat.USTAR);
+  /// ```
+  ///
+  /// In the above example, we found that `format` could either be PAX or USTAR,
+  /// but later learnt that it can only be the USTAR format.
+  ///
+  /// If `has(other) == false`, [mayOnlyBe] will result in an unknown
+  /// [TarFormat].
+  TarFormat mayOnlyBe(TarFormat other) {
+    return TarFormat._internal(_value & other._value);
+  }
+
+  /// Returns if this format might be valid.
+  ///
+  /// This returns true as well even if we have yet to fully determine what the
+  /// format is.
+  bool isValid() => _value > 0;
+
+  /// Original Unix Version 7 (V7) AT&T tar tool prior to standardization.
+  ///
+  /// The structure of the V7 Header consists of the following:
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII.
+  /// 156   | 157 | Link flag, determines the kind of header.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 512 | NUL pad.
+  ///
+  /// Unused bytes are set to NUL ('\x00')s
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  static const v7 = TarFormat._internal(1);
+
+  /// USTAR (Unix Standard TAR) header format defined in POSIX.1-1988.
+  ///
+  /// The structure of the USTAR Header consists of the following:
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar\x00" to indicate that this is
+  ///               the USTAR format. Full compliance requires user name and
+  ///               group name fields to be set.
+  /// 263   | 265 | Version. "00" for POSIX standard archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 500 | Prefix. If the pathname is too long to fit in the 100 bytes
+  ///               provided at the start, it can be split at any / character
+  ///               with the first portion going here.
+  /// 500   | 512 | NUL pad.
+  ///
+  /// Unused bytes are set to NUL ('\x00')s
+  ///
+  /// User and group names should be used in preference to uid/gid values when
+  /// they are set and the corresponding names exist on the system.
+  ///
+  /// While this format is compatible with most tar readers, the format has
+  /// several limitations making it unsuitable for some usages. Most notably, it
+  /// cannot support sparse files, files larger than 8GiB, filenames larger than
+  /// 256 characters, and non-ASCII filenames.
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  ///	http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+  static const ustar = TarFormat._internal(2);
+
+  /// PAX header format defined in POSIX.1-2001.
+  ///
+  /// PAX extends USTAR by writing a special file with either the `x` or `g`
+  /// type flags to allow for attributes that are not conveniently stored in a
+  /// POSIX ustar archive to be held.
+  ///
+  /// Some newer formats add their own extensions to PAX by defining their
+  /// own keys and assigning certain semantic meaning to the associated values.
+  /// For example, sparse file support in PAX is implemented using keys
+  /// defined by the GNU manual (e.g., "GNU.sparse.map").
+  ///
+  /// Reference:
+  /// https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&format=html
+  /// https://www.gnu.org/software/tar/manual/html_chapter/tar_15.html#SEC188
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  ///	http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
+  static const pax = TarFormat._internal(4);
+
+  /// GNU header format.
+  ///
+  /// The GNU header format is older than the USTAR and PAX standards and
+  /// is not compatible with them. The GNU format supports
+  /// arbitrary file sizes, filenames of arbitrary encoding and length,
+  /// sparse files, and other features.
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar " to indicate that this is
+  ///               the GNU format.
+  /// 263   | 265 | Version. " \x00" for POSIX standard archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 357 | Last Access time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 357   | 369 | Last Changed time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 369   | 381 | Offset - not used.
+  /// 381   | 385 | Longnames - deprecated
+  /// 385   | 386 | Unused.
+  /// 386   | 482 | Sparse data - 4 sets of (offset, numbytes) stored as
+  ///               octal numbers in ASCII.
+  /// 482   | 483 | isExtended - if this field is non-zero, this header is
+  ///               followed by  additional sparse records, which are in the
+  ///               same format as above.
+  /// 483   | 495 | Binary representation of the file's complete size, inclusive
+  ///               of the sparse data.
+  /// 495   | 512 | NUL pad.
+  ///
+  /// It is recommended that PAX be chosen over GNU unless the target
+  /// application can only parse GNU formatted archives.
+  ///
+  /// Reference:
+  ///	https://www.gnu.org/software/tar/manual/html_node/Standard.html
+  static const gnu = TarFormat._internal(8);
+
+  /// Schily's TAR format, which is incompatible with USTAR.
+  /// This does not cover STAR extensions to the PAX format; these fall under
+  /// the PAX format.
+  ///
+  /// Start | End | Field
+  /// =========================================================================
+  /// 0     | 100 | Path name, stored as null-terminated string.
+  /// 100   | 108 | File mode, stored as an octal number in ASCII.
+  /// 108   | 116 | User id of owner, as octal number in ASCII.
+  /// 116   | 124 | Group id of owner, as octal number in ASCII.
+  /// 124   | 136 | Size of file, as octal number in ASCII.
+  /// 136   | 148 | Modification time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 148   | 156 | Header checksum, stored as an octal number in ASCII.
+  /// 156   | 157 | Type flag, determines the kind of header.
+  ///               Note that the meaning of the size field depends on the type.
+  /// 157   | 257 | Link name, stored as a string.
+  /// 257   | 263 | Contains the magic value "ustar\x00" to indicate that this is
+  ///               the GNU format.
+  /// 263   | 265 | Version. "00" for STAR archives.
+  /// 265   | 297 | User name, as null-terminated ASCII string.
+  /// 297   | 329 | Group name, as null-terminated ASCII string.
+  /// 329   | 337 | Major number for character or block device entry.
+  /// 337   | 345 | Minor number for character or block device entry.
+  /// 345   | 476 | Prefix. If the pathname is too long to fit in the 100 bytes
+  ///               provided at the start, it can be split at any / character
+  ///               with the first portion going here.
+  /// 476   | 488 | Last Access time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 488   | 500 | Last Changed time of file, number of seconds from epoch,
+  ///               stored as an octal number in ASCII.
+  /// 500   | 508 | NUL pad.
+  /// 508   | 512 | Trailer - "tar\x00".
+  ///
+  /// Reference:
+  /// http://cdrtools.sourceforge.net/private/man/star/star.4.html
+  static const star = TarFormat._internal(16);
+}
diff --git a/lib/src/third_party/tar/src/header.dart b/lib/src/third_party/tar/src/header.dart
new file mode 100644
index 0000000..f28dc79
--- /dev/null
+++ b/lib/src/third_party/tar/src/header.dart
@@ -0,0 +1,335 @@
+import 'dart:typed_data';
+
+import 'package:meta/meta.dart';
+
+import 'constants.dart';
+import 'exception.dart';
+import 'format.dart';
+import 'utils.dart';
+
+/// Header of a tar entry
+///
+/// A tar header stores meta-information about the matching tar entry, such as
+/// its name.
+@sealed
+abstract class TarHeader {
+  /// Type of header entry. In the V7 TAR format, this field was known as the
+  /// link flag.
+  TypeFlag get typeFlag;
+
+  /// Name of file or directory entry.
+  String get name;
+
+  /// Target name of link (valid for hard links or symbolic links).
+  String? get linkName;
+
+  /// Permission and mode bits.
+  int get mode;
+
+  /// User ID of owner.
+  int get userId;
+
+  /// Group ID of owner.
+  int get groupId;
+
+  /// User name of owner.
+  String? get userName;
+
+  /// Group name of owner.
+  String? get groupName;
+
+  /// Logical file size in bytes.
+  int get size;
+
+  /// The time of the last change to the data of the TAR file.
+  DateTime get modified;
+
+  /// The time of the last access to the data of the TAR file.
+  DateTime? get accessed;
+
+  /// The time of the last change to the data or metadata of the TAR file.
+  DateTime? get changed;
+
+  /// Major device number
+  int get devMajor;
+
+  /// Minor device number
+  int get devMinor;
+
+  /// The TAR format of the header.
+  TarFormat get format;
+
+  /// Checks if this header indicates that the file will have content.
+  bool get hasContent {
+    switch (typeFlag) {
+      case TypeFlag.link:
+      case TypeFlag.symlink:
+      case TypeFlag.block:
+      case TypeFlag.dir:
+      case TypeFlag.char:
+      case TypeFlag.fifo:
+        return false;
+      default:
+        return true;
+    }
+  }
+
+  /// Creates a tar header from the individual field.
+  factory TarHeader({
+    required String name,
+    TarFormat? format,
+    TypeFlag? typeFlag,
+    DateTime? modified,
+    String? linkName,
+    int mode = 0,
+    int size = -1,
+    String? userName,
+    int userId = 0,
+    int groupId = 0,
+    String? groupName,
+    DateTime? accessed,
+    DateTime? changed,
+    int devMajor = 0,
+    int devMinor = 0,
+  }) {
+    return HeaderImpl.internal(
+      name: name,
+      modified: modified ?? DateTime.fromMillisecondsSinceEpoch(0),
+      format: format ?? TarFormat.pax,
+      typeFlag: typeFlag ?? TypeFlag.reg,
+      linkName: linkName,
+      mode: mode,
+      size: size,
+      userName: userName,
+      userId: userId,
+      groupId: groupId,
+      groupName: groupName,
+      accessed: accessed,
+      changed: changed,
+      devMajor: devMajor,
+      devMinor: devMinor,
+    );
+  }
+
+  TarHeader._();
+}
+
+@internal
+class HeaderImpl extends TarHeader {
+  TypeFlag internalTypeFlag;
+
+  @override
+  String name;
+
+  @override
+  String? linkName;
+
+  @override
+  int mode;
+
+  @override
+  int userId;
+
+  @override
+  int groupId;
+
+  @override
+  String? userName;
+
+  @override
+  String? groupName;
+
+  @override
+  int size;
+
+  @override
+  DateTime modified;
+
+  @override
+  DateTime? accessed;
+
+  @override
+  DateTime? changed;
+
+  @override
+  int devMajor;
+
+  @override
+  int devMinor;
+
+  @override
+  TarFormat format;
+
+  @override
+  TypeFlag get typeFlag {
+    return internalTypeFlag == TypeFlag.regA ? TypeFlag.reg : internalTypeFlag;
+  }
+
+  /// This constructor is meant to help us deal with header-only headers (i.e.
+  /// meta-headers that only describe the next file instead of being a header
+  /// to files themselves)
+  HeaderImpl.internal({
+    required this.name,
+    required this.modified,
+    required this.format,
+    required TypeFlag typeFlag,
+    this.linkName,
+    this.mode = 0,
+    this.size = -1,
+    this.userName,
+    this.userId = 0,
+    this.groupId = 0,
+    this.groupName,
+    this.accessed,
+    this.changed,
+    this.devMajor = 0,
+    this.devMinor = 0,
+  })  : internalTypeFlag = typeFlag,
+        super._();
+
+  factory HeaderImpl.parseBlock(Uint8List headerBlock,
+      {Map<String, String> paxHeaders = const {}}) {
+    assert(headerBlock.length == 512);
+
+    final format = _getFormat(headerBlock);
+    final size = paxHeaders.size ?? headerBlock.readOctal(124, 12);
+
+    // Start by reading data available in every format.
+    final header = HeaderImpl.internal(
+      format: format,
+      name: headerBlock.readString(0, 100),
+      mode: headerBlock.readOctal(100, 8),
+      // These should be octal, but some weird tar implementations ignore that?!
+      // Encountered with package:RAL, version 1.28.0 on pub
+      userId: headerBlock.readNumeric(108, 8),
+      groupId: headerBlock.readNumeric(116, 8),
+      size: size,
+      modified: secondsSinceEpoch(headerBlock.readOctal(136, 12)),
+      typeFlag: typeflagFromByte(headerBlock[156]),
+      linkName: headerBlock.readStringOrNullIfEmpty(157, 100),
+    );
+
+    if (header.hasContent && size < 0) {
+      throw TarException.header('Indicates an invalid size of $size');
+    }
+
+    if (format.isValid() && format != TarFormat.v7) {
+      // If it's a valid header that is not of the v7 format, it will have the
+      // USTAR fields
+      header
+        ..userName ??= headerBlock.readStringOrNullIfEmpty(265, 32)
+        ..groupName ??= headerBlock.readStringOrNullIfEmpty(297, 32)
+        ..devMajor = headerBlock.readNumeric(329, 8)
+        ..devMinor = headerBlock.readNumeric(337, 8);
+
+      // Prefix to the file name
+      var prefix = '';
+      if (format.has(TarFormat.ustar) || format.has(TarFormat.pax)) {
+        prefix = headerBlock.readString(345, 155);
+
+        if (headerBlock.any(isNotAscii)) {
+          header.format = format.mayOnlyBe(TarFormat.pax);
+        }
+      } else if (format.has(TarFormat.star)) {
+        prefix = headerBlock.readString(345, 131);
+        header
+          ..accessed = secondsSinceEpoch(headerBlock.readNumeric(476, 12))
+          ..changed = secondsSinceEpoch(headerBlock.readNumeric(488, 12));
+      } else if (format.has(TarFormat.gnu)) {
+        header.format = TarFormat.gnu;
+
+        if (headerBlock[345] != 0) {
+          header.accessed = secondsSinceEpoch(headerBlock.readNumeric(345, 12));
+        }
+
+        if (headerBlock[357] != 0) {
+          header.changed = secondsSinceEpoch(headerBlock.readNumeric(357, 12));
+        }
+      }
+
+      if (prefix.isNotEmpty) {
+        header.name = '$prefix/${header.name}';
+      }
+    }
+
+    return header.._applyPaxHeaders(paxHeaders);
+  }
+
+  void _applyPaxHeaders(Map<String, String> headers) {
+    for (final entry in headers.entries) {
+      if (entry.value == '') {
+        continue; // Keep the original USTAR value
+      }
+
+      switch (entry.key) {
+        case paxPath:
+          name = entry.value;
+          break;
+        case paxLinkpath:
+          linkName = entry.value;
+          break;
+        case paxUname:
+          userName = entry.value;
+          break;
+        case paxGname:
+          groupName = entry.value;
+          break;
+        case paxUid:
+          userId = parseInt(entry.value);
+          break;
+        case paxGid:
+          groupId = parseInt(entry.value);
+          break;
+        case paxAtime:
+          accessed = parsePaxTime(entry.value);
+          break;
+        case paxMtime:
+          modified = parsePaxTime(entry.value);
+          break;
+        case paxCtime:
+          changed = parsePaxTime(entry.value);
+          break;
+        case paxSize:
+          size = parseInt(entry.value);
+          break;
+        default:
+          break;
+      }
+    }
+  }
+}
+
+/// Checks that [rawHeader] represents a valid tar header based on the
+/// checksum, and then attempts to guess the specific format based
+/// on magic values. If the checksum fails, then an error is thrown.
+TarFormat _getFormat(Uint8List rawHeader) {
+  final checksum = rawHeader.readOctal(checksumOffset, checksumLength);
+
+  // Modern TAR archives use the unsigned checksum, but we check the signed
+  // checksum as well for compatibility.
+  if (checksum != rawHeader.computeUnsignedHeaderChecksum() &&
+      checksum != rawHeader.computeSignedHeaderChecksum()) {
+    throw TarException.header('Checksum does not match');
+  }
+
+  final hasUstarMagic = rawHeader.matchesHeader(magicUstar);
+  if (hasUstarMagic) {
+    return rawHeader.matchesHeader(trailerStar, offset: starTrailerOffset)
+        ? TarFormat.star
+        : TarFormat.ustar | TarFormat.pax;
+  }
+
+  if (rawHeader.matchesHeader(magicGnu) &&
+      rawHeader.matchesHeader(versionGnu, offset: versionOffset)) {
+    return TarFormat.gnu;
+  }
+
+  return TarFormat.v7;
+}
+
+extension _ReadPaxHeaders on Map<String, String> {
+  int? get size {
+    final sizeStr = this[paxSize];
+    return sizeStr == null ? null : int.tryParse(sizeStr);
+  }
+}
diff --git a/lib/src/third_party/tar/src/reader.dart b/lib/src/third_party/tar/src/reader.dart
new file mode 100644
index 0000000..de84d6a
--- /dev/null
+++ b/lib/src/third_party/tar/src/reader.dart
@@ -0,0 +1,909 @@
+import 'dart:async';
+import 'dart:collection';
+import 'dart:convert';
+import 'dart:typed_data';
+
+import 'package:async/async.dart';
+import 'package:meta/meta.dart';
+import 'package:typed_data/typed_data.dart';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'entry.dart';
+import 'exception.dart';
+import 'format.dart';
+import 'header.dart';
+import 'sparse.dart';
+import 'utils.dart';
+
+/// [TarReader] provides sequential access to the TAR files in a TAR archive.
+/// It is designed to read from a stream and to spit out substreams for
+/// individual file contents in order to minimize the amount of memory needed
+/// to read each archive where possible.
+@sealed
+class TarReader implements StreamIterator<TarEntry> {
+  /// A chunked stream iterator to enable us to get our data.
+  final ChunkedStreamReader<int> _chunkedStream;
+  final PaxHeaders _paxHeaders = PaxHeaders();
+  final int _maxSpecialFileSize;
+
+  /// Skip the next [_skipNext] elements when reading in the stream.
+  int _skipNext = 0;
+
+  TarEntry? _current;
+
+  /// The underlying content stream for the [_current] entry. Draining this
+  /// stream will move the tar reader to the beginning of the next file.
+  ///
+  /// This is not the same as `_current.stream` for sparse files, which are
+  /// reported as expanded through [TarEntry.contents].
+  /// For that reason, we prefer to drain this stream when skipping a tar entry.
+  /// When we know we're skipping data, there's no point expanding sparse holes.
+  ///
+  /// This stream is always set to null after being drained, and there can only
+  /// be one [_underlyingContentStream] at a time.
+  Stream<List<int>>? _underlyingContentStream;
+
+  /// Whether [_current] has ever been listened to.
+  bool _listenedToContentsOnce = false;
+
+  /// Whether we're in the process of reading tar headers.
+  bool _isReadingHeaders = false;
+
+  /// Whether this tar reader is terminally done.
+  ///
+  /// That is the case if:
+  ///  - [cancel] was called
+  ///  - [moveNext] completed to `false` once.
+  ///  - [moveNext] completed to an error
+  ///  - an error was emitted through a tar entry's content stream
+  bool _isDone = false;
+
+  /// Whether we should ensure that the stream emits no further data after the
+  /// end of the tar file was reached.
+  final bool _checkNoTrailingData;
+
+  /// Creates a tar reader reading from the raw [tarStream].
+  ///
+  /// The [disallowTrailingData] parameter can be enabled to assert that the
+  /// [tarStream] contains exactly one tar archive before ending.
+  /// When [disallowTrailingData] is disabled (which is the default), the reader
+  /// will automatically cancel its stream subscription when [moveNext] returns
+  /// `false`.
+  /// When it is enabled and a marker indicating the end of an archive is
+  /// encountered, [moveNext] will wait for further events on the stream. If
+  /// further data is received, a [TarException] will be thrown and the
+  /// subscription will be cancelled. Otherwise, [moveNext] effectively waits
+  /// for a done event, making a cancellation unecessary.
+  /// Depending on the input stream, cancellations may cause unintended
+  /// side-effects. In that case, [disallowTrailingData] can be used to ensure
+  /// that the stream is only cancelled if it emits an invalid tar file.
+  ///
+  /// The [maxSpecialFileSize] parameter can be used to limit the maximum length
+  /// of hidden entries in the tar stream. These entries include extended PAX
+  /// headers or long names in GNU tar. The content of those entries has to be
+  /// buffered in the parser to properly read the following tar entries. To
+  /// avoid memory-based denial-of-service attacks, this library limits their
+  /// maximum length. Changing the default of 2 KiB is rarely necessary.
+  TarReader(Stream<List<int>> tarStream,
+      {int maxSpecialFileSize = defaultSpecialLength,
+      bool disallowTrailingData = false})
+      : _chunkedStream = ChunkedStreamReader(tarStream),
+        _checkNoTrailingData = disallowTrailingData,
+        _maxSpecialFileSize = maxSpecialFileSize;
+
+  @override
+  TarEntry get current {
+    final current = _current;
+
+    if (current == null) {
+      throw StateError('Invalid call to TarReader.current. \n'
+          'Did you call and await next() and checked that it returned true?');
+    }
+
+    return current;
+  }
+
+  /// Reads the tar stream up until the beginning of the next logical file.
+  ///
+  /// If such file exists, the returned future will complete with `true`. After
+  /// the future completes, the next tar entry will be evailable in [current].
+  ///
+  /// If no such file exists, the future will complete with `false`.
+  /// The future might complete with an [TarException] if the tar stream is
+  /// malformed or ends unexpectedly.
+  /// If the future completes with `false` or an exception, the reader will
+  /// [cancel] itself and release associated resources. Thus, it is invalid to
+  /// call [moveNext] again in that case.
+  @override
+  Future<bool> moveNext() async {
+    await _prepareToReadHeaders();
+    try {
+      return await _moveNextInternal();
+    } on Object {
+      await cancel();
+      rethrow;
+    }
+  }
+
+  /// Consumes the stream up to the contents of the next logical tar entry.
+  /// Will cancel the underlying subscription when returning false, but not when
+  /// it throws.
+  Future<bool> _moveNextInternal() async {
+    // We're reading a new logical file, so clear the local pax headers
+    _paxHeaders.clearLocals();
+
+    var gnuLongName = '';
+    var gnuLongLink = '';
+    var eofAcceptable = true;
+
+    var format = TarFormat.ustar |
+        TarFormat.pax |
+        TarFormat.gnu |
+        TarFormat.v7 |
+        TarFormat.star;
+
+    HeaderImpl? nextHeader;
+
+    // Externally, [moveNext] iterates through the tar archive as if it is a
+    // series of files. Internally, the tar format often uses fake "files" to
+    // add meta data that describes the next file. These meta data "files"
+    // should not normally be visible to the outside. As such, this loop
+    // iterates through one or more "header files" until it finds a
+    // "normal file".
+    while (true) {
+      if (_skipNext > 0) {
+        await _readFullBlock(_skipNext);
+        _skipNext = 0;
+      }
+
+      final rawHeader =
+          await _readFullBlock(blockSize, allowEmpty: eofAcceptable);
+
+      nextHeader = await _readHeader(rawHeader);
+      if (nextHeader == null) {
+        if (eofAcceptable) {
+          await _handleExpectedEof();
+          return false;
+        } else {
+          _unexpectedEof();
+        }
+      }
+
+      // We're beginning to read a file, if the tar file ends now something is
+      // wrong
+      eofAcceptable = false;
+      format = format.mayOnlyBe(nextHeader.format);
+
+      // Check for PAX/GNU special headers and files.
+      if (nextHeader.typeFlag == TypeFlag.xHeader ||
+          nextHeader.typeFlag == TypeFlag.xGlobalHeader) {
+        format = format.mayOnlyBe(TarFormat.pax);
+        final paxHeaderSize = _checkSpecialSize(nextHeader.size);
+        final rawPaxHeaders = await _readFullBlock(paxHeaderSize);
+
+        _paxHeaders.readPaxHeaders(
+            rawPaxHeaders, nextHeader.typeFlag == TypeFlag.xGlobalHeader);
+        _markPaddingToSkip(paxHeaderSize);
+
+        // This is a meta header affecting the next header.
+        continue;
+      } else if (nextHeader.typeFlag == TypeFlag.gnuLongLink ||
+          nextHeader.typeFlag == TypeFlag.gnuLongName) {
+        format = format.mayOnlyBe(TarFormat.gnu);
+        final realName = await _readFullBlock(
+            _checkSpecialSize(nextBlockSize(nextHeader.size)));
+
+        final readName = realName.readString(0, realName.length);
+        if (nextHeader.typeFlag == TypeFlag.gnuLongName) {
+          gnuLongName = readName;
+        } else {
+          gnuLongLink = readName;
+        }
+
+        // This is a meta header affecting the next header.
+        continue;
+      } else {
+        // The old GNU sparse format is handled here since it is technically
+        // just a regular file with additional attributes.
+
+        if (gnuLongName.isNotEmpty) nextHeader.name = gnuLongName;
+        if (gnuLongLink.isNotEmpty) nextHeader.linkName = gnuLongLink;
+
+        if (nextHeader.internalTypeFlag == TypeFlag.regA) {
+          /// Legacy archives use trailing slash for directories
+          if (nextHeader.name.endsWith('/')) {
+            nextHeader.internalTypeFlag = TypeFlag.dir;
+          } else {
+            nextHeader.internalTypeFlag = TypeFlag.reg;
+          }
+        }
+
+        final content = await _handleFile(nextHeader, rawHeader);
+
+        // Set the final guess at the format
+        if (format.has(TarFormat.ustar) && format.has(TarFormat.pax)) {
+          format = format.mayOnlyBe(TarFormat.ustar);
+        }
+        nextHeader.format = format;
+
+        _current = TarEntry(nextHeader, content);
+        _listenedToContentsOnce = false;
+        _isReadingHeaders = false;
+        return true;
+      }
+    }
+  }
+
+  @override
+  Future<void> cancel() async {
+    if (_isDone) return;
+
+    _isDone = true;
+    _current = null;
+    _underlyingContentStream = null;
+    _listenedToContentsOnce = false;
+    _isReadingHeaders = false;
+
+    // Note: Calling cancel is safe when the stream has already been completed.
+    // It's a noop in that case, which is what we want.
+    return _chunkedStream.cancel();
+  }
+
+  /// Utility function for quickly iterating through all entries in [tarStream].
+  static Future<void> forEach(Stream<List<int>> tarStream,
+      FutureOr<void> Function(TarEntry entry) action) async {
+    final reader = TarReader(tarStream);
+    try {
+      while (await reader.moveNext()) {
+        await action(reader.current);
+      }
+    } finally {
+      await reader.cancel();
+    }
+  }
+
+  /// Ensures that this reader can safely read headers now.
+  ///
+  /// This methods prevents:
+  ///  * concurrent calls to [moveNext]
+  ///  * a call to [moveNext] while a stream is active:
+  ///    * if [contents] has never been listened to, we drain the stream
+  ///    * otherwise, throws a [StateError]
+  Future<void> _prepareToReadHeaders() async {
+    if (_isDone) {
+      throw StateError('Tried to call TarReader.moveNext() on a canceled '
+          'reader. \n'
+          'Note that a reader is canceled when moveNext() throws or returns '
+          'false.');
+    }
+
+    if (_isReadingHeaders) {
+      throw StateError('Concurrent call to TarReader.moveNext() detected. \n'
+          'Please await all calls to Reader.moveNext().');
+    }
+    _isReadingHeaders = true;
+
+    final underlyingStream = _underlyingContentStream;
+    if (underlyingStream != null) {
+      if (_listenedToContentsOnce) {
+        throw StateError(
+            'Illegal call to TarReader.moveNext() while a previous stream was '
+            'active.\n'
+            'When listening to tar contents, make sure the stream is '
+            'complete or cancelled before calling TarReader.moveNext() again.');
+      } else {
+        await underlyingStream.drain<void>();
+        // The stream should reset when drained (we do this in _publishStream)
+        assert(_underlyingContentStream == null);
+      }
+    }
+  }
+
+  int _checkSpecialSize(int size) {
+    if (size > _maxSpecialFileSize) {
+      throw TarException(
+          'TAR file contains hidden entry with an invalid size of $size.');
+    }
+
+    return size;
+  }
+
+  /// Ater we detected the end of a tar file, optionally check for trailing
+  /// data.
+  Future<void> _handleExpectedEof() async {
+    if (_checkNoTrailingData) {
+      // Trailing zeroes are okay, but don't allow any more data here.
+      Uint8List block;
+
+      do {
+        block = await _chunkedStream.readBytes(blockSize);
+        if (!block.isAllZeroes) {
+          throw TarException(
+              'Illegal content after the end of the tar archive.');
+        }
+      } while (block.length == blockSize);
+      // The stream is done when we couldn't read the full block.
+    }
+
+    await cancel();
+  }
+
+  Never _unexpectedEof() {
+    throw TarException.header('Unexpected end of file');
+  }
+
+  /// Reads a block with the requested [size], or throws an unexpected EoF
+  /// exception.
+  Future<Uint8List> _readFullBlock(int size, {bool allowEmpty = false}) async {
+    final block = await _chunkedStream.readBytes(size);
+    if (block.length != size && !(allowEmpty && block.isEmpty)) {
+      _unexpectedEof();
+    }
+
+    return block;
+  }
+
+  /// Reads the next block header and assumes that the underlying reader
+  /// is already aligned to a block boundary. It returns the raw block of the
+  /// header in case further processing is required.
+  ///
+  /// EOF is hit when one of the following occurs:
+  ///	* Exactly 0 bytes are read and EOF is hit.
+  ///	* Exactly 1 block of zeros is read and EOF is hit.
+  ///	* At least 2 blocks of zeros are read.
+  Future<HeaderImpl?> _readHeader(Uint8List rawHeader) async {
+    // Exactly 0 bytes are read and EOF is hit.
+    if (rawHeader.isEmpty) return null;
+
+    if (rawHeader.isAllZeroes) {
+      rawHeader = await _chunkedStream.readBytes(blockSize);
+
+      // Exactly 1 block of zeroes is read and EOF is hit.
+      if (rawHeader.isEmpty) return null;
+
+      if (rawHeader.isAllZeroes) {
+        // Two blocks of zeros are read - Normal EOF.
+        return null;
+      }
+
+      throw TarException('Encountered a non-zero block after a zero block');
+    }
+
+    return HeaderImpl.parseBlock(rawHeader, paxHeaders: _paxHeaders);
+  }
+
+  /// Creates a stream of the next entry's content
+  Future<Stream<List<int>>> _handleFile(
+      HeaderImpl header, Uint8List rawHeader) async {
+    List<SparseEntry>? sparseData;
+    if (header.typeFlag == TypeFlag.gnuSparse) {
+      sparseData = await _readOldGNUSparseMap(header, rawHeader);
+    } else {
+      sparseData = await _readGNUSparsePAXHeaders(header);
+    }
+
+    if (sparseData != null) {
+      if (header.hasContent &&
+          !validateSparseEntries(sparseData, header.size)) {
+        throw TarException.header('Invalid sparse file header.');
+      }
+
+      final sparseHoles = invertSparseEntries(sparseData, header.size);
+      final sparseDataLength =
+          sparseData.fold<int>(0, (value, element) => value + element.length);
+
+      final streamLength = nextBlockSize(sparseDataLength);
+      final safeStream =
+          _publishStream(_chunkedStream.readStream(streamLength), streamLength);
+      return sparseStream(safeStream, sparseHoles, header.size);
+    } else {
+      var size = header.size;
+      if (!header.hasContent) size = 0;
+
+      if (size < 0) {
+        throw TarException.header('Invalid size ($size) detected!');
+      }
+
+      if (size == 0) {
+        return _publishStream(const Stream<Never>.empty(), 0);
+      } else {
+        _markPaddingToSkip(size);
+        return _publishStream(
+            _chunkedStream.readStream(header.size), header.size);
+      }
+    }
+  }
+
+  /// Publishes an library-internal stream for users.
+  ///
+  /// This adds a check to ensure that the stream we're exposing has the
+  /// expected length. It also sets the [_underlyingContentStream] field when
+  /// the stream starts and resets it when it's done.
+  Stream<List<int>> _publishStream(Stream<List<int>> stream, int length) {
+    // There can only be one content stream at a time. This precondition is
+    // checked by _prepareToReadHeaders.
+    assert(_underlyingContentStream == null);
+    return _underlyingContentStream = Stream.eventTransformed(stream, (sink) {
+      _listenedToContentsOnce = true;
+
+      late _OutgoingStreamGuard guard;
+      return guard = _OutgoingStreamGuard(
+        length,
+        sink,
+        // Reset state when the stream is done. This will only be called when
+        // the sream is done, not when a listener cancels.
+        () {
+          _underlyingContentStream = null;
+          if (guard.hadError) {
+            cancel();
+          }
+        },
+      );
+    });
+  }
+
+  /// Skips to the next block after reading [readSize] bytes from the beginning
+  /// of a previous block.
+  void _markPaddingToSkip(int readSize) {
+    final offsetInLastBlock = readSize.toUnsigned(blockSizeLog2);
+    if (offsetInLastBlock != 0) {
+      _skipNext = blockSize - offsetInLastBlock;
+    }
+  }
+
+  /// Checks the PAX headers for GNU sparse headers.
+  /// If they are found, then this function reads the sparse map and returns it.
+  /// This assumes that 0.0 headers have already been converted to 0.1 headers
+  /// by the PAX header parsing logic.
+  Future<List<SparseEntry>?> _readGNUSparsePAXHeaders(HeaderImpl header) async {
+    /// Identify the version of GNU headers.
+    var isVersion1 = false;
+    final major = _paxHeaders[paxGNUSparseMajor];
+    final minor = _paxHeaders[paxGNUSparseMinor];
+
+    final sparseMapHeader = _paxHeaders[paxGNUSparseMap];
+    if (major == '0' && (minor == '0' || minor == '1') ||
+        // assume 0.0 or 0.1 if no version header is set
+        sparseMapHeader != null && sparseMapHeader.isNotEmpty) {
+      isVersion1 = false;
+    } else if (major == '1' && minor == '0') {
+      isVersion1 = true;
+    } else {
+      // Unknown version that we don't support
+      return null;
+    }
+
+    header.format |= TarFormat.pax;
+
+    /// Update [header] from GNU sparse PAX headers.
+    final possibleName = _paxHeaders[paxGNUSparseName] ?? '';
+    if (possibleName.isNotEmpty) {
+      header.name = possibleName;
+    }
+
+    final possibleSize =
+        _paxHeaders[paxGNUSparseSize] ?? _paxHeaders[paxGNUSparseRealSize];
+
+    if (possibleSize != null && possibleSize.isNotEmpty) {
+      final size = int.tryParse(possibleSize, radix: 10);
+      if (size == null) {
+        throw TarException.header('Invalid PAX size ($possibleSize) detected');
+      }
+
+      header.size = size;
+    }
+
+    // Read the sparse map according to the appropriate format.
+    if (isVersion1) {
+      return await _readGNUSparseMap1x0();
+    }
+
+    return _readGNUSparseMap0x1(header);
+  }
+
+  /// Reads the sparse map as stored in GNU's PAX sparse format version 1.0.
+  /// The format of the sparse map consists of a series of newline-terminated
+  /// numeric fields. The first field is the number of entries and is always
+  /// present. Following this are the entries, consisting of two fields
+  /// (offset, length). This function must stop reading at the end boundary of
+  /// the block containing the last newline.
+  ///
+  /// Note that the GNU manual says that numeric values should be encoded in
+  /// octal format. However, the GNU tar utility itself outputs these values in
+  /// decimal. As such, this library treats values as being encoded in decimal.
+  Future<List<SparseEntry>> _readGNUSparseMap1x0() async {
+    var newLineCount = 0;
+    final block = Uint8Queue();
+
+    /// Ensures that [block] h as at least [n] tokens.
+    Future<void> feedTokens(int n) async {
+      while (newLineCount < n) {
+        final newBlock = await _chunkedStream.readBytes(blockSize);
+        if (newBlock.length < blockSize) {
+          throw TarException.header(
+              'GNU Sparse Map does not have enough lines!');
+        }
+
+        block.addAll(newBlock);
+        newLineCount += newBlock.where((byte) => byte == $lf).length;
+      }
+    }
+
+    /// Get the next token delimited by a newline. This assumes that
+    /// at least one newline exists in the buffer.
+    String nextToken() {
+      newLineCount--;
+      final nextNewLineIndex = block.indexOf($lf);
+      final result = block.sublist(0, nextNewLineIndex);
+      block.removeRange(0, nextNewLineIndex + 1);
+      return result.readString(0, nextNewLineIndex);
+    }
+
+    await feedTokens(1);
+
+    // Parse for the number of entries.
+    // Use integer overflow resistant math to check this.
+    final numEntriesString = nextToken();
+    final numEntries = int.tryParse(numEntriesString);
+    if (numEntries == null || numEntries < 0 || 2 * numEntries < numEntries) {
+      throw TarException.header(
+          'Invalid sparse map number of entries: $numEntriesString!');
+    }
+
+    // Parse for all member entries.
+    // [numEntries] is trusted after this since a potential attacker must have
+    // committed resources proportional to what this library used.
+    await feedTokens(2 * numEntries);
+
+    final sparseData = <SparseEntry>[];
+
+    for (var i = 0; i < numEntries; i++) {
+      final offsetToken = nextToken();
+      final lengthToken = nextToken();
+
+      final offset = int.tryParse(offsetToken);
+      final length = int.tryParse(lengthToken);
+
+      if (offset == null || length == null) {
+        throw TarException.header(
+            'Failed to read a GNU sparse map entry. Encountered '
+            'offset: $offsetToken, length: $lengthToken');
+      }
+
+      sparseData.add(SparseEntry(offset, length));
+    }
+    return sparseData;
+  }
+
+  /// Reads the sparse map as stored in GNU's PAX sparse format version 0.1.
+  /// The sparse map is stored in the PAX headers and is stored like this:
+  /// `offset₀,size₀,offset₁,size₁...`
+  List<SparseEntry> _readGNUSparseMap0x1(TarHeader header) {
+    // Get number of entries, check for integer overflows
+    final numEntriesString = _paxHeaders[paxGNUSparseNumBlocks];
+    final numEntries =
+        numEntriesString != null ? int.tryParse(numEntriesString) : null;
+
+    if (numEntries == null || numEntries < 0 || 2 * numEntries < numEntries) {
+      throw TarException.header('Invalid GNU version 0.1 map');
+    }
+
+    // There should be two numbers in [sparseMap] for each entry.
+    final sparseMap = _paxHeaders[paxGNUSparseMap]?.split(',');
+    if (sparseMap == null) {
+      throw TarException.header('Invalid GNU version 0.1 map');
+    }
+
+    if (sparseMap.length != 2 * numEntries) {
+      throw TarException.header(
+          'Detected sparse map length ${sparseMap.length} '
+          'that is not twice the number of entries $numEntries');
+    }
+
+    /// Loop through sparse map entries.
+    /// [numEntries] is now trusted.
+    final sparseData = <SparseEntry>[];
+    for (var i = 0; i < sparseMap.length; i += 2) {
+      final offset = int.tryParse(sparseMap[i]);
+      final length = int.tryParse(sparseMap[i + 1]);
+
+      if (offset == null || length == null) {
+        throw TarException.header(
+            'Failed to read a GNU sparse map entry. Encountered '
+            'offset: $offset, length: $length');
+      }
+
+      sparseData.add(SparseEntry(offset, length));
+    }
+
+    return sparseData;
+  }
+
+  /// Reads the sparse map from the old GNU sparse format.
+  /// The sparse map is stored in the tar header if it's small enough.
+  /// If it's larger than four entries, then one or more extension headers are
+  /// used to store the rest of the sparse map.
+  ///
+  /// [TarHeader.size] does not reflect the size of any extended headers used.
+  /// Thus, this function will read from the chunked stream iterator to fetch
+  /// extra headers.
+  ///
+  /// See also: https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC191
+  Future<List<SparseEntry>> _readOldGNUSparseMap(
+      HeaderImpl header, Uint8List rawHeader) async {
+    // Make sure that the input format is GNU.
+    // Unfortunately, the STAR format also has a sparse header format that uses
+    // the same type flag but has a completely different layout.
+    if (header.format != TarFormat.gnu) {
+      throw TarException.header('Tried to read sparse map of non-GNU header');
+    }
+
+    header.size = rawHeader.readNumeric(483, 12);
+    final sparseMaps = <Uint8List>[];
+
+    var sparse = rawHeader.sublistView(386, 483);
+    sparseMaps.add(sparse);
+
+    while (true) {
+      final maxEntries = sparse.length ~/ 24;
+      if (sparse[24 * maxEntries] > 0) {
+        // If there are more entries, read an extension header and parse its
+        // entries.
+        sparse = await _chunkedStream.readBytes(blockSize);
+        sparseMaps.add(sparse);
+        continue;
+      }
+
+      break;
+    }
+
+    try {
+      return _processOldGNUSparseMap(sparseMaps);
+    } on FormatException {
+      throw TarException('Invalid old GNU Sparse Map');
+    }
+  }
+
+  /// Process [sparseMaps], which is known to be an OLD GNU v0.1 sparse map.
+  ///
+  /// For details, see https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC191
+  List<SparseEntry> _processOldGNUSparseMap(List<Uint8List> sparseMaps) {
+    final sparseData = <SparseEntry>[];
+
+    for (final sparseMap in sparseMaps) {
+      final maxEntries = sparseMap.length ~/ 24;
+      for (var i = 0; i < maxEntries; i++) {
+        // This termination condition is identical to GNU and BSD tar.
+        if (sparseMap[i * 24] == 0) {
+          // Don't return, need to process extended headers (even if empty)
+          break;
+        }
+
+        final offset = sparseMap.readNumeric(i * 24, 12);
+        final length = sparseMap.readNumeric(i * 24 + 12, 12);
+
+        sparseData.add(SparseEntry(offset, length));
+      }
+    }
+    return sparseData;
+  }
+}
+
+@internal
+class PaxHeaders extends UnmodifiableMapBase<String, String> {
+  final Map<String, String> _globalHeaders = {};
+  Map<String, String> _localHeaders = {};
+
+  /// Applies new global PAX-headers from the map.
+  ///
+  /// The [headers] will replace global headers with the same key, but leave
+  /// others intact.
+  void newGlobals(Map<String, String> headers) {
+    _globalHeaders.addAll(headers);
+  }
+
+  void addLocal(String key, String value) => _localHeaders[key] = value;
+
+  void removeLocal(String key) => _localHeaders.remove(key);
+
+  /// Applies new local PAX-headers from the map.
+  ///
+  /// This replaces all currently active local headers.
+  void newLocals(Map<String, String> headers) {
+    _localHeaders = headers;
+  }
+
+  /// Clears local headers.
+  ///
+  /// This is used by the reader after a file has ended, as local headers only
+  /// apply to the next entry.
+  void clearLocals() {
+    _localHeaders = {};
+  }
+
+  @override
+  String? operator [](Object? key) {
+    return _localHeaders[key] ?? _globalHeaders[key];
+  }
+
+  @override
+  Iterable<String> get keys => {..._globalHeaders.keys, ..._localHeaders.keys};
+
+  /// Decodes the content of an extended pax header entry.
+  ///
+  /// Semantically, a [PAX Header][posix pax] is a map with string keys and
+  /// values, where both keys and values are encodes with utf8.
+  ///
+  /// However, [old GNU Versions][gnu sparse00] used to repeat keys to store
+  /// sparse file information in sparse headers. This method will transparently
+  /// rewrite the PAX format of version 0.0 to version 0.1.
+  ///
+  /// [posix pax]: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
+  /// [gnu sparse00]: https://www.gnu.org/software/tar/manual/html_section/tar_94.html#SEC192
+  void readPaxHeaders(List<int> data, bool isGlobal,
+      {bool ignoreUnknown = true}) {
+    var offset = 0;
+    final map = <String, String>{};
+    final sparseMap = <String>[];
+
+    Never error() => throw TarException.header('Invalid PAX record');
+
+    while (offset < data.length) {
+      // At the start of an entry, expect its length which is terminated by a
+      // space char.
+      final space = data.indexOf($space, offset);
+      if (space == -1) break;
+
+      var length = 0;
+      var currentChar = data[offset];
+      var charsInLength = 0;
+      while (currentChar >= $0 && currentChar <= $9) {
+        length = length * 10 + currentChar - $0;
+        charsInLength++;
+        currentChar = data[++offset];
+      }
+
+      if (length == 0) {
+        error();
+      }
+
+      // Skip the whitespace
+      if (currentChar != $space) {
+        error();
+      }
+      offset++;
+
+      // Length also includes the length description and a space we just read
+      final endOfEntry = offset + length - 1 - charsInLength;
+      // checking against endOfEntry - 1 because the trailing whitespace is
+      // optional for the last entry
+      if (endOfEntry < offset || endOfEntry - 1 > data.length) {
+        error();
+      }
+
+      // Read the key
+      final nextEquals = data.indexOf($equal, offset);
+      if (nextEquals == -1 || nextEquals >= endOfEntry) {
+        error();
+      }
+
+      final key = utf8.decoder.convert(data, offset, nextEquals);
+      // Skip over the equals sign
+      offset = nextEquals + 1;
+
+      // Subtract one for trailing newline
+      final endOfValue = endOfEntry - 1;
+      final value = utf8.decoder.convert(data, offset, endOfValue);
+
+      if (!_isValidPaxRecord(key, value)) {
+        error();
+      }
+
+      // If we're seeing weird PAX Version 0.0 sparse keys, expect alternating
+      // GNU.sparse.offset and GNU.sparse.numbytes headers.
+      if (key == paxGNUSparseNumBytes || key == paxGNUSparseOffset) {
+        if ((sparseMap.length % 2 == 0 && key != paxGNUSparseOffset) ||
+            (sparseMap.length % 2 == 1 && key != paxGNUSparseNumBytes) ||
+            value.contains(',')) {
+          error();
+        }
+
+        sparseMap.add(value);
+      } else if (!ignoreUnknown || supportedPaxHeaders.contains(key)) {
+        // Ignore unrecognized headers to avoid unbounded growth of the global
+        // header map.
+        map[key] = value;
+      }
+
+      // Skip over value
+      offset = endOfValue;
+      // and the trailing newline
+      final hasNewline = offset < data.length;
+      if (hasNewline && data[offset] != $lf) {
+        throw TarException('Invalid PAX Record (missing trailing newline)');
+      }
+      offset++;
+    }
+
+    if (sparseMap.isNotEmpty) {
+      map[paxGNUSparseMap] = sparseMap.join(',');
+    }
+
+    if (isGlobal) {
+      newGlobals(map);
+    } else {
+      newLocals(map);
+    }
+  }
+
+  /// Checks whether [key], [value] is a valid entry in a pax header.
+  ///
+  /// This is adopted from the Golang tar reader (`validPAXRecord`), which says
+  /// that "Keys and values should be UTF-8, but the number of bad writers out
+  /// there forces us to be a more liberal."
+  static bool _isValidPaxRecord(String key, String value) {
+    // These limitations are documented in the PAX standard.
+    if (key.isEmpty || key.contains('=')) return false;
+
+    // These aren't, but Golangs's tar has them and got away with it.
+    switch (key) {
+      case paxPath:
+      case paxLinkpath:
+      case paxUname:
+      case paxGname:
+        return !value.codeUnits.contains(0);
+      default:
+        return !key.codeUnits.contains(0);
+    }
+  }
+}
+
+/// Event-sink tracking the length of emitted tar entry streams.
+///
+/// [ChunkedStreamReader.readStream] might return a stream shorter than
+/// expected. That indicates an invalid tar file though, since the correct size
+/// is stored in the header.
+class _OutgoingStreamGuard extends EventSink<List<int>> {
+  final int expectedSize;
+  final EventSink<List<int>> out;
+  void Function() onDone;
+
+  int emittedSize = 0;
+  bool hadError = false;
+
+  _OutgoingStreamGuard(this.expectedSize, this.out, this.onDone);
+
+  @override
+  void add(List<int> event) {
+    emittedSize += event.length;
+    // We have checks limiting the length of outgoing streams. If the stream is
+    // larger than expected, that's a bug in pkg:tar.
+    assert(
+        emittedSize <= expectedSize,
+        'Stream now emitted $emittedSize bytes, but only expected '
+        '$expectedSize');
+
+    out.add(event);
+  }
+
+  @override
+  void addError(Object error, [StackTrace? stackTrace]) {
+    hadError = true;
+    out.addError(error, stackTrace);
+  }
+
+  @override
+  void close() {
+    onDone();
+
+    // If the stream stopped after an error, the user is already aware that
+    // something is wrong.
+    if (emittedSize < expectedSize && !hadError) {
+      out.addError(
+          TarException('Unexpected end of tar file'), StackTrace.current);
+    }
+
+    out.close();
+  }
+}
diff --git a/lib/src/third_party/tar/src/sparse.dart b/lib/src/third_party/tar/src/sparse.dart
new file mode 100644
index 0000000..06b88b1
--- /dev/null
+++ b/lib/src/third_party/tar/src/sparse.dart
@@ -0,0 +1,149 @@
+import 'package:async/async.dart';
+import 'package:meta/meta.dart';
+
+import 'exception.dart';
+import 'utils.dart';
+
+/// Represents a [length]-sized fragment at [offset] in a file.
+///
+/// [SparseEntry]s can represent either data or holes, and we can easily
+/// convert between the two if we know the size of the file, all the sparse
+/// data and all the sparse entries combined must give the full size.
+class SparseEntry {
+  final int offset;
+  final int length;
+
+  SparseEntry(this.offset, this.length);
+
+  int get end => offset + length;
+
+  @override
+  String toString() => 'offset: $offset, length $length';
+
+  @override
+  bool operator ==(Object? other) {
+    if (other is! SparseEntry) return false;
+
+    return offset == other.offset && length == other.length;
+  }
+
+  @override
+  int get hashCode => offset ^ length;
+}
+
+/// Generates a stream of the sparse file contents of size [size], given
+/// [sparseHoles] and the raw content in [source].
+@internal
+Stream<List<int>> sparseStream(
+    Stream<List<int>> source, List<SparseEntry> sparseHoles, int size) {
+  if (sparseHoles.isEmpty) {
+    return ChunkedStreamReader(source).readStream(size);
+  }
+
+  return _sparseStream(source, sparseHoles, size);
+}
+
+/// Generates a stream of the sparse file contents of size [size], given
+/// [sparseHoles] and the raw content in [source].
+///
+/// [sparseHoles] has to be non-empty.
+Stream<List<int>> _sparseStream(
+    Stream<List<int>> source, List<SparseEntry> sparseHoles, int size) async* {
+  // Current logical position in sparse file.
+  var position = 0;
+
+  // Index of the next sparse hole in [sparseHoles] to be processed.
+  var sparseHoleIndex = 0;
+
+  // Iterator through [source] to obtain the data bytes.
+  final iterator = ChunkedStreamReader(source);
+
+  while (position < size) {
+    // Yield all the necessary sparse holes.
+    while (sparseHoleIndex < sparseHoles.length &&
+        sparseHoles[sparseHoleIndex].offset == position) {
+      final sparseHole = sparseHoles[sparseHoleIndex];
+      yield* zeroes(sparseHole.length);
+      position += sparseHole.length;
+      sparseHoleIndex++;
+    }
+
+    if (position == size) break;
+
+    /// Yield up to the next sparse hole's offset, or all the way to the end
+    /// if there are no sparse holes left.
+    var yieldTo = size;
+    if (sparseHoleIndex < sparseHoles.length) {
+      yieldTo = sparseHoles[sparseHoleIndex].offset;
+    }
+
+    // Yield data as substream, but make sure that we have enough data.
+    var checkedPosition = position;
+    await for (final chunk in iterator.readStream(yieldTo - position)) {
+      yield chunk;
+      checkedPosition += chunk.length;
+    }
+
+    if (checkedPosition != yieldTo) {
+      throw TarException('Invalid sparse data: Unexpected end of input stream');
+    }
+
+    position = yieldTo;
+  }
+}
+
+/// Reports whether [sparseEntries] is a valid sparse map.
+/// It does not matter whether [sparseEntries] represents data fragments or
+/// hole fragments.
+bool validateSparseEntries(List<SparseEntry> sparseEntries, int size) {
+  // Validate all sparse entries. These are the same checks as performed by
+  // the BSD tar utility.
+  if (size < 0) return false;
+
+  SparseEntry? previous;
+
+  for (final current in sparseEntries) {
+    // Negative values are never okay.
+    if (current.offset < 0 || current.length < 0) return false;
+
+    // Integer overflow with large length.
+    if (current.offset + current.length < current.offset) return false;
+
+    // Region extends beyond the actual size.
+    if (current.end > size) return false;
+
+    // Regions cannot overlap and must be in order.
+    if (previous != null && previous.end > current.offset) return false;
+
+    previous = current;
+  }
+
+  return true;
+}
+
+/// Converts a sparse map ([source]) from one form to the other.
+/// If the input is sparse holes, then it will output sparse datas and
+/// vice-versa. The input must have been already validated.
+///
+/// This function mutates [source] and returns a normalized map where:
+///	* adjacent fragments are coalesced together
+///	* only the last fragment may be empty
+///	* the endOffset of the last fragment is the total size
+List<SparseEntry> invertSparseEntries(List<SparseEntry> source, int size) {
+  final result = <SparseEntry>[];
+  var previous = SparseEntry(0, 0);
+  for (final current in source) {
+    /// Skip empty fragments
+    if (current.length == 0) continue;
+
+    final newLength = current.offset - previous.offset;
+    if (newLength > 0) {
+      result.add(SparseEntry(previous.offset, newLength));
+    }
+
+    previous = SparseEntry(current.end, 0);
+  }
+  final lastLength = size - previous.offset;
+  result.add(SparseEntry(previous.offset, lastLength));
+  return result;
+}
diff --git a/lib/src/third_party/tar/src/utils.dart b/lib/src/third_party/tar/src/utils.dart
new file mode 100644
index 0000000..a6b7e13
--- /dev/null
+++ b/lib/src/third_party/tar/src/utils.dart
@@ -0,0 +1,231 @@
+import 'dart:convert';
+import 'dart:math';
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'exception.dart';
+
+const _checksumEnd = checksumOffset + checksumLength;
+const _checksumPlaceholder = $space;
+
+extension ByteBufferUtils on Uint8List {
+  String readString(int offset, int maxLength) {
+    return readStringOrNullIfEmpty(offset, maxLength) ?? '';
+  }
+
+  Uint8List sublistView(int start, [int? end]) {
+    return Uint8List.sublistView(this, start, end);
+  }
+
+  String? readStringOrNullIfEmpty(int offset, int maxLength) {
+    var data = sublistView(offset, offset + maxLength);
+    var contentLength = data.indexOf(0);
+    // If there's no \0, assume that the string fills the whole segment
+    if (contentLength.isNegative) contentLength = maxLength;
+
+    if (contentLength == 0) return null;
+
+    data = data.sublistView(0, contentLength);
+    try {
+      return utf8.decode(data);
+    } on FormatException {
+      return String.fromCharCodes(data).trim();
+    }
+  }
+
+  /// Parse an octal string encoded from index [offset] with the maximum length
+  /// [length].
+  int readOctal(int offset, int length) {
+    var result = 0;
+    var multiplier = 1;
+
+    for (var i = length - 1; i >= 0; i--) {
+      final charCode = this[offset + i];
+      // Some tar implementations add a \0 or space at the end, ignore that
+      if (charCode == 0 || charCode == $space) continue;
+      if (charCode < $0 || charCode > $9) {
+        throw TarException('Invalid octal value');
+      }
+
+      // Obtain the numerical value of this digit
+      final digit = charCode - $0;
+      result += digit * multiplier;
+      multiplier <<= 3; // Multiply by the base, 8
+    }
+
+    return result;
+  }
+
+  /// Parses an encoded int, either as base-256 or octal.
+  ///
+  /// This function may return negative numbers.
+  int readNumeric(int offset, int length) {
+    if (length == 0) return 0;
+
+    // Check for base-256 (binary) format first. If the first bit is set, then
+    // all following bits constitute a two's complement encoded number in big-
+    // endian byte order.
+    final firstByte = this[offset];
+    if (firstByte & 0x80 != 0) {
+      // Handling negative numbers relies on the following identity:
+      // -a-1 == ~a
+      //
+      // If the number is negative, we use an inversion mask to invert the
+      // date bytes and treat the value as an unsigned number.
+      final inverseMask = firstByte & 0x40 != 0 ? 0xff : 0x00;
+
+      // Ignore signal bit in the first byte
+      var x = (firstByte ^ inverseMask) & 0x7f;
+
+      for (var i = 1; i < length; i++) {
+        var byte = this[offset + i];
+        byte ^= inverseMask;
+
+        x = x << 8 | byte;
+      }
+
+      return inverseMask == 0xff ? ~x : x;
+    }
+
+    return readOctal(offset, length);
+  }
+
+  int computeUnsignedHeaderChecksum() {
+    var result = 0;
+
+    for (var i = 0; i < length; i++) {
+      result += (i < checksumOffset || i >= _checksumEnd)
+          ? this[i] // Not in range of where the checksum is written
+          : _checksumPlaceholder;
+    }
+
+    return result;
+  }
+
+  int computeSignedHeaderChecksum() {
+    var result = 0;
+
+    for (var i = 0; i < length; i++) {
+      // Note that _checksumPlaceholder.toSigned(8) == _checksumPlaceholder
+      result += (i < checksumOffset || i >= _checksumEnd)
+          ? this[i].toSigned(8)
+          : _checksumPlaceholder;
+    }
+
+    return result;
+  }
+
+  bool matchesHeader(List<int> header, {int offset = magicOffset}) {
+    for (var i = 0; i < header.length; i++) {
+      if (this[offset + i] != header[i]) return false;
+    }
+
+    return true;
+  }
+}
+
+bool isNotAscii(int i) => i > 128;
+
+/// Like [int.parse], but throwing a [TarException] instead of the more-general
+/// [FormatException] when it fails.
+int parseInt(String source) {
+  return int.tryParse(source, radix: 10) ??
+      (throw TarException('Not an int: $source'));
+}
+
+/// Takes a [paxTimeString] of the form %d.%d as described in the PAX
+/// specification. Note that this implementation allows for negative timestamps,
+/// which is allowed for by the PAX specification, but not always portable.
+///
+/// Note that Dart's [DateTime] class only allows us to give up to microsecond
+/// precision, which implies that we cannot parse all the digits in since PAX
+/// allows for nanosecond level encoding.
+DateTime parsePaxTime(String paxTimeString) {
+  const maxMicroSecondDigits = 6;
+
+  /// Split [paxTimeString] into seconds and sub-seconds parts.
+  var secondsString = paxTimeString;
+  var microSecondsString = '';
+  final position = paxTimeString.indexOf('.');
+  if (position >= 0) {
+    secondsString = paxTimeString.substring(0, position);
+    microSecondsString = paxTimeString.substring(position + 1);
+  }
+
+  /// Parse the seconds.
+  final seconds = int.tryParse(secondsString);
+  if (seconds == null) {
+    throw TarException.header('Invalid PAX time $paxTimeString detected!');
+  }
+
+  if (microSecondsString.replaceAll(RegExp('[0-9]'), '') != '') {
+    throw TarException.header(
+        'Invalid nanoseconds $microSecondsString detected');
+  }
+
+  microSecondsString = microSecondsString.padRight(maxMicroSecondDigits, '0');
+  microSecondsString = microSecondsString.substring(0, maxMicroSecondDigits);
+
+  var microSeconds =
+      microSecondsString.isEmpty ? 0 : int.parse(microSecondsString);
+  if (paxTimeString.startsWith('-')) microSeconds = -microSeconds;
+
+  return microsecondsSinceEpoch(microSeconds + seconds * pow(10, 6).toInt());
+}
+
+DateTime secondsSinceEpoch(int timestamp) {
+  return DateTime.fromMillisecondsSinceEpoch(timestamp * 1000, isUtc: true);
+}
+
+DateTime millisecondsSinceEpoch(int milliseconds) {
+  return DateTime.fromMillisecondsSinceEpoch(milliseconds, isUtc: true);
+}
+
+DateTime microsecondsSinceEpoch(int microseconds) {
+  return DateTime.fromMicrosecondsSinceEpoch(microseconds, isUtc: true);
+}
+
+int numBlocks(int fileSize) {
+  if (fileSize % blockSize == 0) return fileSize ~/ blockSize;
+
+  return fileSize ~/ blockSize + 1;
+}
+
+int nextBlockSize(int fileSize) => numBlocks(fileSize) * blockSize;
+
+extension ToTyped on List<int> {
+  Uint8List asUint8List() {
+    // Flow analysis doesn't work on this.
+    final $this = this;
+    return $this is Uint8List ? $this : Uint8List.fromList(this);
+  }
+
+  bool get isAllZeroes {
+    for (var i = 0; i < length; i++) {
+      if (this[i] != 0) return false;
+    }
+
+    return true;
+  }
+}
+
+/// Generates a chunked stream of [length] zeroes.
+Stream<List<int>> zeroes(int length) async* {
+  // Emit data in chunks for efficiency
+  const chunkSize = 4 * 1024;
+  if (length < chunkSize) {
+    yield Uint8List(length);
+    return;
+  }
+
+  final chunk = Uint8List(chunkSize);
+  for (var i = 0; i < length ~/ chunkSize; i++) {
+    yield chunk;
+  }
+
+  final remainingBytes = length % chunkSize;
+  if (remainingBytes != 0) {
+    yield Uint8List(remainingBytes);
+  }
+}
diff --git a/lib/src/third_party/tar/src/writer.dart b/lib/src/third_party/tar/src/writer.dart
new file mode 100644
index 0000000..8d8073f
--- /dev/null
+++ b/lib/src/third_party/tar/src/writer.dart
@@ -0,0 +1,297 @@
+import 'dart:async';
+import 'dart:convert';
+import 'dart:typed_data';
+
+import 'charcodes.dart';
+import 'constants.dart';
+import 'entry.dart';
+import 'format.dart';
+import 'header.dart';
+
+class _WritingTransformer extends StreamTransformerBase<TarEntry, List<int>> {
+  const _WritingTransformer();
+
+  @override
+  Stream<List<int>> bind(Stream<TarEntry> stream) {
+    // sync because the controller proxies another stream
+    final controller = StreamController<List<int>>(sync: true);
+    controller.onListen = () {
+      stream.pipe(tarWritingSink(controller));
+    };
+
+    return controller.stream;
+  }
+}
+
+/// A stream transformer writing tar entries as byte streams.
+///
+/// Regardless of the input stream, the stream returned by this
+/// [StreamTransformer.bind] is a single-subscription stream.
+/// Apart from that, subscriptions, cancellations, pauses and resumes are
+/// propagated as one would expect from a [StreamTransformer].
+///
+/// When piping the resulting stream into a [StreamConsumer], consider using
+/// [tarWritingSink] directly.
+const StreamTransformer<TarEntry, List<int>> tarWriter = _WritingTransformer();
+
+/// Create a sink emitting encoded tar files to the [output] sink.
+///
+/// For instance, you can use this to write a tar file:
+///
+/// ```dart
+/// import 'dart:convert';
+/// import 'dart:io';
+/// import 'package:tar/tar.dart';
+///
+/// Future<void> main() async {
+///   Stream<TarEntry> entries = Stream.value(
+///     TarEntry.data(
+///       TarHeader(
+///         name: 'example.txt',
+///         mode: int.parse('644', radix: 8),
+///       ),
+///       utf8.encode('This is the content of the tar file'),
+///     ),
+///   );
+///
+///   final output = File('/tmp/test.tar').openWrite();
+///   await entries.pipe(tarWritingSink(output));
+///  }
+/// ```
+///
+/// Note that, if you don't set the [TarHeader.size], outgoing tar entries need
+/// to be buffered once, which decreases performance.
+///
+/// See also:
+///  - [tarWriter], a stream transformer using this sink
+///  - [StreamSink]
+StreamSink<TarEntry> tarWritingSink(StreamSink<List<int>> output) {
+  return _WritingSink(output);
+}
+
+class _WritingSink extends StreamSink<TarEntry> {
+  final StreamSink<List<int>> _output;
+
+  int _paxHeaderCount = 0;
+  bool _closed = false;
+  final Completer<Object?> _done = Completer();
+
+  int _pendingOperations = 0;
+  Future<void> _ready = Future.value();
+
+  _WritingSink(this._output);
+
+  @override
+  Future<void> get done => _done.future;
+
+  @override
+  Future<void> add(TarEntry event) {
+    if (_closed) {
+      throw StateError('Cannot add event after close was called');
+    }
+    return _doWork(() => _safeAdd(event));
+  }
+
+  Future<void> _doWork(FutureOr<void> Function() work) {
+    _pendingOperations++;
+    // Chain futures to make sure we only write one entry at a time.
+    return _ready = _ready
+        .then((_) => work())
+        .catchError(_output.addError)
+        .whenComplete(() {
+      _pendingOperations--;
+
+      if (_closed && _pendingOperations == 0) {
+        _done.complete(_output.close());
+      }
+    });
+  }
+
+  Future<void> _safeAdd(TarEntry event) async {
+    final header = event.header;
+    var size = header.size;
+    Uint8List? bufferedData;
+    if (size < 0) {
+      final builder = BytesBuilder();
+      await event.contents.forEach(builder.add);
+      bufferedData = builder.takeBytes();
+      size = bufferedData.length;
+    }
+
+    var nameBytes = utf8.encode(header.name);
+    var linkBytes = utf8.encode(header.linkName ?? '');
+    var gnameBytes = utf8.encode(header.groupName ?? '');
+    var unameBytes = utf8.encode(header.userName ?? '');
+
+    // We only get 100 chars for the name and link name. If they are longer, we
+    // have to insert an entry just to store the names. Some tar implementations
+    // expect them to be zero-terminated, so use 99 chars to be safe.
+    final paxHeader = <String, List<int>>{};
+    if (nameBytes.length > 99) {
+      paxHeader[paxPath] = nameBytes;
+      nameBytes = nameBytes.sublist(0, 99);
+    }
+    if (linkBytes.length > 99) {
+      paxHeader[paxLinkpath] = linkBytes;
+      linkBytes = linkBytes.sublist(0, 99);
+    }
+
+    // It's even worse for users and groups, where we only get 31 usable chars.
+    if (gnameBytes.length > 31) {
+      paxHeader[paxGname] = gnameBytes;
+      gnameBytes = gnameBytes.sublist(0, 31);
+    }
+    if (unameBytes.length > 31) {
+      paxHeader[paxUname] = unameBytes;
+      unameBytes = unameBytes.sublist(0, 31);
+    }
+
+    if (size > maxIntFor12CharOct) {
+      paxHeader[paxSize] = ascii.encode(size.toString());
+    }
+
+    if (paxHeader.isNotEmpty) {
+      await _writePaxHeader(paxHeader);
+    }
+
+    final headerBlock = Uint8List(blockSize)
+      ..setAll(0, nameBytes)
+      ..setUint(header.mode, 100, 8)
+      ..setUint(header.userId, 108, 8)
+      ..setUint(header.groupId, 116, 8)
+      ..setUint(size, 124, 12)
+      ..setUint(header.modified.millisecondsSinceEpoch ~/ 1000, 136, 12)
+      ..[156] = typeflagToByte(header.typeFlag)
+      ..setAll(157, linkBytes)
+      ..setAll(257, magicUstar)
+      ..setUint(0, 263, 2) // version
+      ..setAll(265, unameBytes)
+      ..setAll(297, gnameBytes)
+      // To calculate the checksum, we first fill the checksum range with spaces
+      ..setAll(148, List.filled(8, $space));
+
+    // Then, we take the sum of the header
+    var checksum = 0;
+    for (final byte in headerBlock) {
+      checksum += byte;
+    }
+    headerBlock.setUint(checksum, 148, 8);
+
+    _output.add(headerBlock);
+
+    // Write content.
+    if (bufferedData != null) {
+      _output.add(bufferedData);
+    } else {
+      await event.contents.forEach(_output.add);
+    }
+
+    final padding = -size % blockSize;
+    _output.add(Uint8List(padding));
+  }
+
+  /// Writes an extended pax header.
+  ///
+  /// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_03
+  Future<void> _writePaxHeader(Map<String, List<int>> values) {
+    final buffer = BytesBuilder();
+    // format of each entry: "%d %s=%s\n", <length>, <keyword>, <value>
+    // note that the length includes the trailing \n and the length description
+    // itself.
+    values.forEach((key, value) {
+      final encodedKey = utf8.encode(key);
+      // +3 for the whitespace, the equals and the \n
+      final payloadLength = encodedKey.length + value.length + 3;
+      var indicatedLength = payloadLength;
+
+      // The indicated length contains the length (in decimals) itself. So if
+      // we had payloadLength=9, then we'd prefix a 9 at which point the whole
+      // string would have a length of 10. If that happens, increment length.
+      var actualLength = payloadLength + indicatedLength.toString().length;
+
+      while (actualLength != indicatedLength) {
+        indicatedLength++;
+        actualLength = payloadLength + indicatedLength.toString().length;
+      }
+
+      // With that sorted out, let's add the line
+      buffer
+        ..add(utf8.encode(indicatedLength.toString()))
+        ..addByte($space)
+        ..add(encodedKey)
+        ..addByte($equal)
+        ..add(value)
+        ..addByte($lf); // \n
+    });
+
+    final paxData = buffer.takeBytes();
+    final file = TarEntry.data(
+      HeaderImpl.internal(
+        format: TarFormat.pax,
+        modified: DateTime.fromMillisecondsSinceEpoch(0),
+        name: 'PaxHeader/${_paxHeaderCount++}',
+        mode: 0,
+        size: paxData.length,
+        typeFlag: TypeFlag.xHeader,
+      ),
+      paxData,
+    );
+    return _safeAdd(file);
+  }
+
+  @override
+  void addError(Object error, [StackTrace? stackTrace]) {
+    _output.addError(error, stackTrace);
+  }
+
+  @override
+  Future<void> addStream(Stream<TarEntry> stream) async {
+    await for (final entry in stream) {
+      await add(entry);
+    }
+  }
+
+  @override
+  Future<void> close() async {
+    if (!_closed) {
+      _closed = true;
+
+      // Add two empty blocks at the end.
+      await _doWork(() {
+        _output.add(zeroBlock);
+        _output.add(zeroBlock);
+      });
+    }
+
+    return done;
+  }
+}
+
+extension on Uint8List {
+  void setUint(int value, int position, int length) {
+    // Values are encoded as octal string, terminated and left-padded with
+    // space chars.
+
+    // Set terminating space char.
+    this[position + length - 1] = $space;
+
+    // Write as octal value, we write from right to left
+    var number = value;
+    var needsExplicitZero = number == 0;
+
+    for (var pos = position + length - 2; pos >= position; pos--) {
+      if (number != 0) {
+        // Write the last octal digit of the number (e.g. the last 4 bits)
+        this[pos] = (number & 7) + $0;
+        // then drop the last digit (divide by 8 = 2³)
+        number >>= 3;
+      } else if (needsExplicitZero) {
+        this[pos] = $0;
+        needsExplicitZero = false;
+      } else {
+        // done, left-pad with spaces
+        this[pos] = $space;
+      }
+    }
+  }
+}
diff --git a/lib/src/third_party/tar/tar.dart b/lib/src/third_party/tar/tar.dart
new file mode 100644
index 0000000..dd5f896
--- /dev/null
+++ b/lib/src/third_party/tar/tar.dart
@@ -0,0 +1,17 @@
+/// Streaming tar implementation for Dart.
+///
+/// To read tar files, see [TarReader]. To write tar files, use [tarWritingSink]
+///  or [tarWriter].
+library tar;
+
+// For dartdoc.
+import 'src/reader.dart';
+import 'src/writer.dart';
+
+export 'src/constants.dart' show TypeFlag;
+export 'src/entry.dart';
+export 'src/exception.dart';
+export 'src/format.dart';
+export 'src/header.dart' show TarHeader;
+export 'src/reader.dart' show TarReader;
+export 'src/writer.dart' show tarWritingSink, tarWriter;
diff --git a/pubspec.yaml b/pubspec.yaml
index 2790971..67a8d94 100644
--- a/pubspec.yaml
+++ b/pubspec.yaml
@@ -24,7 +24,6 @@
   shelf: ^1.1.1
   source_span: ^1.8.1
   stack_trace: ^1.10.0
-  tar: ^0.3.3
   yaml: ^3.1.0
 
 dev_dependencies:
diff --git a/test/io_test.dart b/test/io_test.dart
index 985d21e..b54bc91 100644
--- a/test/io_test.dart
+++ b/test/io_test.dart
@@ -11,7 +11,7 @@
 import 'package:path/path.dart' as path;
 import 'package:pub/src/exceptions.dart';
 import 'package:pub/src/io.dart';
-import 'package:tar/tar.dart';
+import 'package:pub/src/third_party/tar/tar.dart';
 import 'package:test/test.dart';
 
 import 'descriptor.dart' as d;