diff --git a/SharpCompress.sln.DotSettings b/SharpCompress.sln.DotSettings
index 9570036fb..248b4c350 100644
--- a/SharpCompress.sln.DotSettings
+++ b/SharpCompress.sln.DotSettings
@@ -126,4 +126,7 @@
True
True
True
- True
+ True
+ <SessionState ContinuousTestingIsOn="False" ContinuousTestingMode="0" FrameworkVersion="{x:Null}" IsLocked="False" Name="All tests from Solution" PlatformMonoPreference="{x:Null}" PlatformType="{x:Null}" xmlns="urn:schemas-jetbrains-com:jetbrains-ut-session" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml">
+ <Solution />
+</SessionState>
diff --git a/src/SharpCompress/Common/GZip/GZipEntry.cs b/src/SharpCompress/Common/GZip/GZipEntry.cs
index 8b2d3e937..76b2b1c9f 100644
--- a/src/SharpCompress/Common/GZip/GZipEntry.cs
+++ b/src/SharpCompress/Common/GZip/GZipEntry.cs
@@ -1,7 +1,6 @@
using System;
using System.Collections.Generic;
using System.IO;
-using System.Text;
namespace SharpCompress.Common.GZip
{
diff --git a/src/SharpCompress/Common/IEntry.Extensions.cs b/src/SharpCompress/Common/IEntry.Extensions.cs
index 9c1e31af8..02acd59a6 100644
--- a/src/SharpCompress/Common/IEntry.Extensions.cs
+++ b/src/SharpCompress/Common/IEntry.Extensions.cs
@@ -1,5 +1,4 @@
using System.IO;
-using SharpCompress.Readers;
namespace SharpCompress.Common
{
diff --git a/src/SharpCompress/Common/Rar/Headers/AVHeader.cs b/src/SharpCompress/Common/Rar/Headers/AVHeader.cs
index 51dda2cf2..eb89804c5 100644
--- a/src/SharpCompress/Common/Rar/Headers/AVHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/AVHeader.cs
@@ -7,8 +7,10 @@ internal class AvHeader : RarHeader
public AvHeader(RarHeader header, RarCrcBinaryReader reader)
: base(header, reader, HeaderType.Av)
{
- if (IsRar5)
+ if (IsRar5)
+ {
throw new InvalidFormatException("unexpected rar5 record");
+ }
}
protected override void ReadFinish(MarkingBinaryReader reader)
diff --git a/src/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs b/src/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs
index 72b4bd4b9..5e9497d3c 100644
--- a/src/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/ArchiveHeader.cs
@@ -38,7 +38,11 @@ protected override void ReadFinish(MarkingBinaryReader reader)
private void ReadLocator(MarkingBinaryReader reader) {
var size = reader.ReadRarVIntUInt16();
var type = reader.ReadRarVIntUInt16();
- if (type != 1) throw new InvalidFormatException("expected locator record");
+ if (type != 1)
+ {
+ throw new InvalidFormatException("expected locator record");
+ }
+
var flags = reader.ReadRarVIntUInt16();
const ushort hasQuickOpenOffset = 0x01;
const ushort hasRecoveryOffset = 0x02;
diff --git a/src/SharpCompress/Common/Rar/Headers/CommentHeader.cs b/src/SharpCompress/Common/Rar/Headers/CommentHeader.cs
index 4845ce2bd..e03d46a5b 100644
--- a/src/SharpCompress/Common/Rar/Headers/CommentHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/CommentHeader.cs
@@ -7,7 +7,10 @@ internal class CommentHeader : RarHeader
protected CommentHeader(RarHeader header, RarCrcBinaryReader reader)
: base(header, reader, HeaderType.Comment)
{
- if (IsRar5) throw new InvalidFormatException("unexpected rar5 record");
+ if (IsRar5)
+ {
+ throw new InvalidFormatException("unexpected rar5 record");
+ }
}
protected override void ReadFinish(MarkingBinaryReader reader)
diff --git a/src/SharpCompress/Common/Rar/Headers/MarkHeader.cs b/src/SharpCompress/Common/Rar/Headers/MarkHeader.cs
index 3a27cf69f..8af80c8c2 100644
--- a/src/SharpCompress/Common/Rar/Headers/MarkHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/MarkHeader.cs
@@ -46,19 +46,38 @@ public static MarkHeader Read(Stream stream, bool leaveStreamOpen, bool lookForH
if (b == 0x61)
{
b = GetByte(stream); start++;
- if (b != 0x72) continue;
+ if (b != 0x72)
+ {
+ continue;
+ }
+
b = GetByte(stream); start++;
- if (b != 0x21) continue;
+ if (b != 0x21)
+ {
+ continue;
+ }
+
b = GetByte(stream); start++;
- if (b != 0x1a) continue;
+ if (b != 0x1a)
+ {
+ continue;
+ }
+
b = GetByte(stream); start++;
- if (b != 0x07) continue;
+ if (b != 0x07)
+ {
+ continue;
+ }
b = GetByte(stream); start++;
if (b == 1)
{
b = GetByte(stream); start++;
- if (b != 0) continue;
+ if (b != 0)
+ {
+ continue;
+ }
+
return new MarkHeader(true); // Rar5
}
else if (b == 0)
@@ -69,9 +88,17 @@ public static MarkHeader Read(Stream stream, bool leaveStreamOpen, bool lookForH
else if (b == 0x45)
{
b = GetByte(stream); start++;
- if (b != 0x7e) continue;
+ if (b != 0x7e)
+ {
+ continue;
+ }
+
b = GetByte(stream); start++;
- if (b != 0x5e) continue;
+ if (b != 0x5e)
+ {
+ continue;
+ }
+
throw new InvalidFormatException("Rar format version pre-4 is unsupported.");
}
}
diff --git a/src/SharpCompress/Common/Rar/Headers/ProtectHeader.cs b/src/SharpCompress/Common/Rar/Headers/ProtectHeader.cs
index f7f0e8ba9..9a43f10bc 100644
--- a/src/SharpCompress/Common/Rar/Headers/ProtectHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/ProtectHeader.cs
@@ -8,7 +8,10 @@ internal class ProtectHeader : RarHeader
public ProtectHeader(RarHeader header, RarCrcBinaryReader reader)
: base(header, reader, HeaderType.Protect)
{
- if (IsRar5) throw new InvalidFormatException("unexpected rar5 record");
+ if (IsRar5)
+ {
+ throw new InvalidFormatException("unexpected rar5 record");
+ }
}
protected override void ReadFinish(MarkingBinaryReader reader)
diff --git a/src/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs b/src/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs
index d5b2b80b6..9a5bb2b42 100644
--- a/src/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs
+++ b/src/SharpCompress/Common/Rar/Headers/RarHeaderFactory.cs
@@ -1,4 +1,3 @@
-using System;
using System.Collections.Generic;
using System.IO;
using SharpCompress.IO;
diff --git a/src/SharpCompress/Common/Rar/Headers/SignHeader.cs b/src/SharpCompress/Common/Rar/Headers/SignHeader.cs
index 58b3baefb..3b95f6af8 100644
--- a/src/SharpCompress/Common/Rar/Headers/SignHeader.cs
+++ b/src/SharpCompress/Common/Rar/Headers/SignHeader.cs
@@ -7,7 +7,10 @@ internal class SignHeader : RarHeader
protected SignHeader(RarHeader header, RarCrcBinaryReader reader)
: base(header, reader, HeaderType.Sign)
{
- if (IsRar5) throw new InvalidFormatException("unexpected rar5 record");
+ if (IsRar5)
+ {
+ throw new InvalidFormatException("unexpected rar5 record");
+ }
}
protected override void ReadFinish(MarkingBinaryReader reader)
diff --git a/src/SharpCompress/Common/SevenZip/SevenZipVolume.cs b/src/SharpCompress/Common/SevenZip/SevenZipVolume.cs
index 32b50cc79..1609c8171 100644
--- a/src/SharpCompress/Common/SevenZip/SevenZipVolume.cs
+++ b/src/SharpCompress/Common/SevenZip/SevenZipVolume.cs
@@ -1,5 +1,4 @@
using System.IO;
-using SharpCompress.Archives;
using SharpCompress.Readers;
namespace SharpCompress.Common.SevenZip
diff --git a/src/SharpCompress/Common/Tar/TarEntry.cs b/src/SharpCompress/Common/Tar/TarEntry.cs
index 6ec5d3199..1d76e44d7 100644
--- a/src/SharpCompress/Common/Tar/TarEntry.cs
+++ b/src/SharpCompress/Common/Tar/TarEntry.cs
@@ -3,7 +3,6 @@
using System.IO;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.IO;
-using System.Text;
namespace SharpCompress.Common.Tar
{
diff --git a/src/SharpCompress/Common/Tar/TarHeaderFactory.cs b/src/SharpCompress/Common/Tar/TarHeaderFactory.cs
index 4671f0944..da8d63401 100644
--- a/src/SharpCompress/Common/Tar/TarHeaderFactory.cs
+++ b/src/SharpCompress/Common/Tar/TarHeaderFactory.cs
@@ -2,7 +2,6 @@
using System.IO;
using SharpCompress.Common.Tar.Headers;
using SharpCompress.IO;
-using System.Text;
namespace SharpCompress.Common.Tar
{
diff --git a/src/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs b/src/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs
index 6a2dd04ac..8d5e2db6f 100644
--- a/src/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs
+++ b/src/SharpCompress/Common/Zip/Headers/DirectoryEntryHeader.cs
@@ -1,6 +1,5 @@
using System.IO;
using System.Linq;
-using System.Text;
namespace SharpCompress.Common.Zip.Headers
{
diff --git a/src/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs b/src/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs
index 100b8f5ed..0a086ad7a 100644
--- a/src/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs
+++ b/src/SharpCompress/Common/Zip/Headers/IgnoreHeader.cs
@@ -1,5 +1,4 @@
-using System;
-using System.IO;
+using System.IO;
namespace SharpCompress.Common.Zip.Headers
{
diff --git a/src/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs b/src/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs
index 3bc437522..36a1a93de 100644
--- a/src/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs
+++ b/src/SharpCompress/Common/Zip/Headers/LocalEntryHeader.cs
@@ -1,6 +1,5 @@
using System.IO;
using System.Linq;
-using System.Text;
namespace SharpCompress.Common.Zip.Headers
{
diff --git a/src/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs b/src/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs
index 443cbd8a0..5d6442161 100644
--- a/src/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs
+++ b/src/SharpCompress/Common/Zip/Headers/Zip64DirectoryEndHeader.cs
@@ -1,5 +1,4 @@
-using System;
-using System.IO;
+using System.IO;
namespace SharpCompress.Common.Zip.Headers
{
diff --git a/src/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs b/src/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs
index 2a328003e..7f2421d1d 100644
--- a/src/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs
+++ b/src/SharpCompress/Common/Zip/PkwareTraditionalEncryptionData.cs
@@ -1,5 +1,4 @@
using System;
-using System.Text;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.Compressors.Deflate;
diff --git a/src/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs b/src/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs
index 23f4cbe85..dc8e0cb03 100644
--- a/src/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs
+++ b/src/SharpCompress/Common/Zip/SeekableZipHeaderFactory.cs
@@ -3,7 +3,6 @@
using System.IO;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
-using System.Text;
namespace SharpCompress.Common.Zip
{
@@ -35,7 +34,9 @@ internal IEnumerable ReadSeekableHeader(Stream stream)
stream.Seek(zip64Locator.RelativeOffsetOfTheEndOfDirectoryRecord, SeekOrigin.Begin);
uint zip64Signature = reader.ReadUInt32();
if (zip64Signature != ZIP64_END_OF_CENTRAL_DIRECTORY)
+ {
throw new ArchiveException("Failed to locate the Zip64 Header");
+ }
var zip64Entry = new Zip64DirectoryEndHeader();
zip64Entry.Read(reader);
@@ -55,7 +56,9 @@ internal IEnumerable ReadSeekableHeader(Stream stream)
position = stream.Position;
if (nextHeader == null)
+ {
yield break;
+ }
if (nextHeader is DirectoryEntryHeader entryHeader)
{
diff --git a/src/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs b/src/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs
index c7c69eb86..249a80fa3 100644
--- a/src/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs
+++ b/src/SharpCompress/Common/Zip/StreamingZipHeaderFactory.cs
@@ -2,7 +2,6 @@
using System.IO;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
-using System.Text;
namespace SharpCompress.Common.Zip
{
diff --git a/src/SharpCompress/Common/Zip/ZipHeaderFactory.cs b/src/SharpCompress/Common/Zip/ZipHeaderFactory.cs
index 0d6b94022..ce1d6976a 100644
--- a/src/SharpCompress/Common/Zip/ZipHeaderFactory.cs
+++ b/src/SharpCompress/Common/Zip/ZipHeaderFactory.cs
@@ -3,7 +3,6 @@
using System.Linq;
using SharpCompress.Common.Zip.Headers;
using SharpCompress.IO;
-using System.Text;
namespace SharpCompress.Common.Zip
{
diff --git a/src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs b/src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs
index be7300c20..2bce90ea1 100644
--- a/src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs
+++ b/src/SharpCompress/Compressors/Deflate64/Deflate64Stream.cs
@@ -3,7 +3,6 @@
// See the LICENSE file in the project root for more information.
using SharpCompress.Common.Zip;
-using SharpCompress.Compressors.Deflate;
using System;
using System.Diagnostics;
using System.IO;
@@ -23,11 +22,19 @@ public sealed class Deflate64Stream : Stream
public Deflate64Stream(Stream stream, CompressionMode mode)
{
if (stream == null)
+ {
throw new ArgumentNullException(nameof(stream));
+ }
+
if (mode != CompressionMode.Decompress)
+ {
throw new NotImplementedException("Deflate64: this implementation only supports decompression");
+ }
+
if (!stream.CanRead)
+ {
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
+ }
InitializeInflater(stream, ZipCompressionMethod.Deflate64);
}
@@ -40,7 +47,9 @@ private void InitializeInflater(Stream stream, ZipCompressionMethod method = Zip
Debug.Assert(stream != null);
Debug.Assert(method == ZipCompressionMethod.Deflate || method == ZipCompressionMethod.Deflate64);
if (!stream.CanRead)
+ {
throw new ArgumentException("Deflate64: input stream is not readable", nameof(stream));
+ }
_inflater = new InflaterManaged(method == ZipCompressionMethod.Deflate64);
@@ -152,22 +161,32 @@ public override int Read(byte[] array, int offset, int count)
private void ValidateParameters(byte[] array, int offset, int count)
{
if (array == null)
+ {
throw new ArgumentNullException(nameof(array));
+ }
if (offset < 0)
+ {
throw new ArgumentOutOfRangeException(nameof(offset));
+ }
if (count < 0)
+ {
throw new ArgumentOutOfRangeException(nameof(count));
+ }
if (array.Length - offset < count)
+ {
throw new ArgumentException("Deflate64: invalid offset/count combination");
+ }
}
private void EnsureNotDisposed()
{
if (_stream == null)
+ {
ThrowStreamClosedException();
+ }
}
[MethodImpl(MethodImplOptions.NoInlining)]
@@ -179,7 +198,9 @@ private static void ThrowStreamClosedException()
private void EnsureDecompressionMode()
{
if (_mode != CompressionMode.Decompress)
+ {
ThrowCannotReadFromDeflateManagedStreamException();
+ }
}
[MethodImpl(MethodImplOptions.NoInlining)]
@@ -191,7 +212,9 @@ private static void ThrowCannotReadFromDeflateManagedStreamException()
private void EnsureCompressionMode()
{
if (_mode != CompressionMode.Compress)
+ {
ThrowCannotWriteToDeflateManagedStreamException();
+ }
}
[MethodImpl(MethodImplOptions.NoInlining)]
@@ -209,10 +232,14 @@ public override void Write(byte[] array, int offset, int count)
private void PurgeBuffers(bool disposing)
{
if (!disposing)
+ {
return;
+ }
if (_stream == null)
+ {
return;
+ }
Flush();
}
diff --git a/src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs b/src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs
index 62c24b59e..21cb46cb0 100644
--- a/src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs
+++ b/src/SharpCompress/Compressors/Deflate64/HuffmanTree.cs
@@ -277,9 +277,14 @@ public int GetNextSymbol(InputBuffer input)
{
symbol = -symbol;
if ((bitBuffer & mask) == 0)
+ {
symbol = _left[symbol];
+ }
else
+ {
symbol = _right[symbol];
+ }
+
mask <<= 1;
} while (symbol < 0);
}
diff --git a/src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs b/src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs
index 7ce389b7c..a0f1f2c82 100644
--- a/src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs
+++ b/src/SharpCompress/Compressors/Deflate64/InflaterManaged.cs
@@ -220,7 +220,9 @@ private bool Decode()
// reading bfinal bit
// Need 1 bit
if (!_input.EnsureBitsAvailable(1))
+ {
return false;
+ }
_bfinal = _input.GetBits(1);
_state = InflaterState.ReadingBType;
diff --git a/src/SharpCompress/Compressors/LZMA/AesDecoderStream.cs b/src/SharpCompress/Compressors/LZMA/AesDecoderStream.cs
index 41c45c78e..c2101f678 100644
--- a/src/SharpCompress/Compressors/LZMA/AesDecoderStream.cs
+++ b/src/SharpCompress/Compressors/LZMA/AesDecoderStream.cs
@@ -30,7 +30,9 @@ public AesDecoderStream(Stream input, byte[] info, IPasswordProvider pass, long
mLimit = limit;
if (((uint) input.Length & 15) != 0)
+ {
throw new NotSupportedException("AES decoder does not support padding.");
+ }
int numCyclesPower;
byte[] salt, seed;
@@ -90,10 +92,14 @@ public override int Read(byte[] buffer, int offset, int count)
{
if (count == 0
|| mWritten == mLimit)
+ {
return 0;
+ }
if (mUnderflow > 0)
+ {
return HandleUnderflow(buffer, offset, count);
+ }
// Need at least 16 bytes to proceed.
if (mEnding - mOffset < 16)
@@ -120,16 +126,22 @@ public override int Read(byte[] buffer, int offset, int count)
// Currently this is handled by forcing an underflow if
// the stream length is not a multiple of the block size.
if (count > mLimit - mWritten)
+ {
count = (int) (mLimit - mWritten);
+ }
// We cannot transform less than 16 bytes into the target buffer,
// but we also cannot return zero, so we need to handle this.
// We transform the data locally and use our own buffer as cache.
if (count < 16)
+ {
return HandleUnderflow(buffer, offset, count);
+ }
if (count > mEnding - mOffset)
+ {
count = mEnding - mOffset;
+ }
// Otherwise we transform directly into the target buffer.
int processed = mDecoder.TransformBlock(mBuffer, mOffset, count & ~15, buffer, offset);
@@ -157,13 +169,17 @@ private void Init(byte[] info, out int numCyclesPower, out byte[] salt, out byte
int saltSize = (bt >> 7) & 1;
int ivSize = (bt >> 6) & 1;
if (info.Length == 1)
+ {
throw new InvalidOperationException();
+ }
byte bt2 = info[1];
saltSize += (bt2 >> 4);
ivSize += (bt2 & 15);
if (info.Length < 2 + saltSize + ivSize)
+ {
throw new InvalidOperationException();
+ }
salt = new byte[saltSize];
for (int i = 0; i < saltSize; i++)
@@ -174,7 +190,9 @@ private void Init(byte[] info, out int numCyclesPower, out byte[] salt, out byte
iv[i] = info[i + saltSize + 2];
if (numCyclesPower > 24)
+ {
throw new NotSupportedException();
+ }
}
private byte[] InitKey(int mNumCyclesPower, byte[] salt, byte[] pass)
@@ -208,7 +226,9 @@ private byte[] InitKey(int mNumCyclesPower, byte[] salt, byte[] pass)
// (It also ensures the counter is little endian, which BitConverter does not.)
for (int i = 0; i < 8; i++)
if (++counter[i] != 0)
+ {
break;
+ }
}
return sha.GetHashAndReset();
}
@@ -227,7 +247,9 @@ private byte[] InitKey(int mNumCyclesPower, byte[] salt, byte[] pass)
// (It also ensures the counter is little endian, which BitConverter does not.)
for (int i = 0; i < 8; i++)
if (++counter[i] != 0)
+ {
break;
+ }
}
sha.TransformFinalBlock(counter, 0, 0);
@@ -248,7 +270,9 @@ private int HandleUnderflow(byte[] buffer, int offset, int count)
}
if (count > mUnderflow)
+ {
count = mUnderflow;
+ }
Buffer.BlockCopy(mBuffer, mOffset, buffer, offset, count);
mWritten += count;
diff --git a/src/SharpCompress/Compressors/Rar/RarCrcStream.cs b/src/SharpCompress/Compressors/Rar/RarCrcStream.cs
index 329587e72..127cef25c 100644
--- a/src/SharpCompress/Compressors/Rar/RarCrcStream.cs
+++ b/src/SharpCompress/Compressors/Rar/RarCrcStream.cs
@@ -1,4 +1,3 @@
-using System.IO;
using SharpCompress.Common;
using SharpCompress.Common.Rar.Headers;
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs b/src/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs
index 274692cfb..194fce273 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV1/Unpack50.cs
@@ -2,8 +2,6 @@
using System;
using System.Collections.Generic;
using SharpCompress.Compressors.Rar.UnpackV1.Decode;
-using SharpCompress.Compressors.Rar.VM;
-
using size_t=System.UInt32;
using UnpackBlockHeader = SharpCompress.Compressors.Rar.UnpackV1;
@@ -139,14 +137,18 @@ public void Unpack5(bool Solid) {
{
UnpInitData(Solid);
if (!UnpReadBuf())
- return;
+ {
+ return;
+ }
// Check TablesRead5 to be sure that we read tables at least once
// regardless of current block header TablePresent flag.
// So we can safefly use these tables below.
if (!ReadBlockHeader() ||
!ReadTables() || !TablesRead5)
- return;
+ {
+ return;
+ }
}
while (true)
@@ -169,17 +171,24 @@ public void Unpack5(bool Solid) {
break;
}
if (!ReadBlockHeader() || !ReadTables())
+ {
return;
+ }
}
if (FileDone || !UnpReadBuf())
+ {
break;
+ }
}
if (((WriteBorder-UnpPtr) & MaxWinMask)DestUnpSize)
+ {
return;
+ }
+
if (Suspended)
{
FileExtracted=false;
@@ -243,7 +252,9 @@ public void Unpack5(bool Solid) {
{
Length++;
if (Distance>0x40000)
+ {
Length++;
+ }
}
}
@@ -259,7 +270,10 @@ public void Unpack5(bool Solid) {
{
UnpackFilter Filter = new UnpackFilter();
if (!ReadFilter(Filter) || !AddFilter(Filter))
+ {
break;
+ }
+
continue;
}
if (MainSlot==257)
@@ -269,7 +283,10 @@ public void Unpack5(bool Solid) {
// FragWindow.CopyString(LastLength,OldDist[0],UnpPtr,MaxWinMask);
// else
//CopyString(LastLength,OldDist[0]);
- CopyString(LastLength,OldDistN(0));
+ {
+ CopyString(LastLength,OldDistN(0));
+ }
+
continue;
}
if (MainSlot<262)
@@ -316,13 +333,19 @@ private uint ReadFilterData()
private bool ReadFilter(UnpackFilter Filter)
{
if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-16)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
Filter.uBlockStart=ReadFilterData();
Filter.uBlockLength=ReadFilterData();
if (Filter.BlockLength>MAX_FILTER_BLOCK_SIZE)
+ {
Filter.BlockLength=0;
+ }
//Filter.Type=Inp.fgetbits()>>13;
Filter.Type=(byte)(Inp.fgetbits()>>13);
@@ -344,7 +367,9 @@ private bool AddFilter(UnpackFilter Filter)
{
UnpWriteBuf(); // Write data, apply and flush filters.
if (Filters.Count>=MAX_UNPACK_FILTERS)
+ {
InitFilters(); // Still too many filters, prevent excessive memory use.
+ }
}
// If distance to filter start is that large that due to circular dictionary
@@ -361,7 +386,10 @@ private bool UnpReadBuf()
{
int DataSize=ReadTop-Inp.InAddr; // Data left to process.
if (DataSize<0)
+ {
return false;
+ }
+
BlockHeader.BlockSize-=Inp.InAddr-BlockHeader.BlockStart;
if (Inp.InAddr>MAX_SIZE/2)
{
@@ -373,21 +401,33 @@ private bool UnpReadBuf()
// to make it zero.
if (DataSize>0)
//memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize);
+ {
Array.Copy(InBuf, inAddr, InBuf, 0, DataSize);
- // TODO: perf
+ }
+
+ // TODO: perf
//Buffer.BlockCopy(InBuf, inAddr, InBuf, 0, DataSize);
Inp.InAddr=0;
ReadTop=DataSize;
}
else
+ {
DataSize=ReadTop;
+ }
+
int ReadCode=0;
if (MAX_SIZE!=DataSize)
//ReadCode=UnpIO->UnpRead(Inp.InBuf+DataSize,BitInput.MAX_SIZE-DataSize);
+ {
ReadCode = readStream.Read(InBuf, DataSize, MAX_SIZE-DataSize);
+ }
+
if (ReadCode>0) // Can be also -1.
+ {
ReadTop+=ReadCode;
+ }
+
ReadBorder=ReadTop-30;
BlockHeader.BlockStart=Inp.InAddr;
if (BlockHeader.BlockSize!=-1) // '-1' means not defined yet.
@@ -674,7 +714,9 @@ private bool UnpReadBuf()
private void UnpInitData50(bool Solid)
{
if (!Solid)
+ {
TablesRead5=false;
+ }
}
private bool ReadBlockHeader()
@@ -682,8 +724,13 @@ private bool ReadBlockHeader()
Header.HeaderSize=0;
if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-7)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
+
//Inp.faddbits((8-Inp.InBit)&7);
Inp.faddbits((uint)((8-Inp.InBit)&7));
@@ -693,7 +740,9 @@ private bool ReadBlockHeader()
uint ByteCount=(uint)(((BlockFlags>>3)&3)+1); // Block size byte count.
if (ByteCount==4)
+ {
return false;
+ }
//Header.HeaderSize=2+ByteCount;
Header.HeaderSize=(int)(2+ByteCount);
@@ -715,7 +764,9 @@ private bool ReadBlockHeader()
Header.BlockSize=BlockSize;
byte CheckSum=(byte)(0x5a^BlockFlags^BlockSize^(BlockSize>>8)^(BlockSize>>16));
if (CheckSum!=SavedCheckSum)
+ {
return false;
+ }
Header.BlockStart=Inp.InAddr;
ReadBorder=Math.Min(ReadBorder,Header.BlockStart+Header.BlockSize-1);
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs
index ff7b70d7f..52da28a16 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/BitInput.getbits_cpp.cs
@@ -30,7 +30,9 @@ public BitInput(bool AllocBuffer)
//memset(InBuf,0,BufSize);
}
else
- InBuf=null;
+ {
+ InBuf=null;
+ }
}
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs
index e36142176..dbedb0bcf 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/FragmentedWindow.unpack50frag_cpp.cs
@@ -60,13 +60,18 @@ public void Init(size_t WinSize)
{
NewMem=new byte[Size];
if (NewMem!=null)
+ {
break;
+ }
+
Size-=Size/32;
}
if (NewMem==null)
//throw std::bad_alloc();
+ {
throw new InvalidOperationException();
-
+ }
+
// Clean the window to generate the same output when unpacking corrupt
// RAR files, which may access to unused areas of sliding dictionary.
// sharpcompress: don't need this, freshly allocated above
@@ -79,17 +84,25 @@ public void Init(size_t WinSize)
}
if (TotalSize=0)
{
@@ -75,9 +78,15 @@ private void Unpack15(bool Solid)
UnpPtr&=MaxWinMask;
if (Inp.InAddr>ReadTop-30 && !UnpReadBuf())
+ {
break;
+ }
+
if (((WrPtr-UnpPtr) & MaxWinMask)<270 && WrPtr!=UnpPtr)
+ {
UnpWriteBuf20();
+ }
+
if (StMode != 0)
{
HuffDecode();
@@ -94,9 +103,13 @@ private void Unpack15(bool Solid)
{
FlagBuf<<=1;
if (Nlzb > Nhfb)
+ {
LongLZ();
+ }
else
+ {
HuffDecode();
+ }
}
else
{
@@ -110,9 +123,13 @@ private void Unpack15(bool Solid)
{
FlagBuf<<=1;
if (Nlzb > Nhfb)
+ {
HuffDecode();
+ }
else
+ {
LongLZ();
+ }
}
else
{
@@ -169,14 +186,20 @@ private void ShortLZ()
{
for (Length=0;;Length++)
if (((BitField^ShortXor1[Length]) & (~(0xff>>(int)GetShortLen1(Length))))==0)
+ {
break;
+ }
+
Inp.faddbits(GetShortLen1(Length));
}
else
{
for (Length=0;;Length++)
if (((BitField^ShortXor2[Length]) & (~(0xff>>(int)GetShortLen2(Length))))==0)
+ {
break;
+ }
+
Inp.faddbits(GetShortLen2(Length));
}
@@ -210,9 +233,14 @@ private void ShortLZ()
return;
}
if (Distance > 256)
+ {
Length++;
+ }
+
if (Distance >= MaxDist3)
+ {
Length++;
+ }
OldDist[OldDistPtr++]=Distance;
OldDistPtr = OldDistPtr & 3;
@@ -260,10 +288,14 @@ private void LongLZ()
uint BitField=Inp.fgetbits();
if (AvrLn2 >= 122)
+ {
Length=DecodeNum(BitField,STARTL2,DecL2,PosL2);
+ }
else
if (AvrLn2 >= 64)
+ {
Length=DecodeNum(BitField,STARTL1,DecL1,PosL1);
+ }
else
if (BitField < 0x100)
{
@@ -282,12 +314,18 @@ private void LongLZ()
BitField=Inp.fgetbits();
if (AvrPlcB > 0x28ff)
+ {
DistancePlace=DecodeNum(BitField,STARTHF2,DecHf2,PosHf2);
+ }
else
if (AvrPlcB > 0x6ff)
+ {
DistancePlace=DecodeNum(BitField,STARTHF1,DecHf1,PosHf1);
+ }
else
+ {
DistancePlace=DecodeNum(BitField,STARTHF0,DecHf0,PosHf0);
+ }
AvrPlcB += DistancePlace;
AvrPlcB -= AvrPlcB >> 8;
@@ -296,9 +334,13 @@ private void LongLZ()
Distance = ChSetB[DistancePlace & 0xff];
NewDistancePlace = NToPlB[Distance++ & 0xff]++;
if ((Distance & 0xff) != 0)
+ {
CorrHuff(ChSetB,NToPlB);
+ }
else
+ {
break;
+ }
}
ChSetB[DistancePlace & 0xff]=ChSetB[NewDistancePlace];
@@ -309,23 +351,39 @@ private void LongLZ()
OldAvr3=AvrLn3;
if (Length!=1 && Length!=4)
+ {
if (Length==0 && Distance <= MaxDist3)
{
AvrLn3++;
AvrLn3 -= AvrLn3 >> 8;
}
else
- if (AvrLn3 > 0)
- AvrLn3--;
+ if (AvrLn3 > 0)
+ {
+ AvrLn3--;
+ }
+ }
+
Length+=3;
if (Distance >= MaxDist3)
+ {
Length++;
+ }
+
if (Distance <= 256)
+ {
Length+=8;
+ }
+
if (OldAvr3 > 0xb0 || AvrPlc >= 0x2a00 && OldAvr2 < 0x40)
+ {
MaxDist3=0x7f00;
+ }
else
+ {
MaxDist3=0x2001;
+ }
+
OldDist[OldDistPtr++]=Distance;
OldDistPtr = OldDistPtr & 3;
LastLength=Length;
@@ -343,23 +401,37 @@ private void HuffDecode()
uint BitField=Inp.fgetbits();
if (AvrPlc > 0x75ff)
+ {
BytePlace=(int)DecodeNum(BitField,STARTHF4,DecHf4,PosHf4);
+ }
else
if (AvrPlc > 0x5dff)
+ {
BytePlace=(int)DecodeNum(BitField,STARTHF3,DecHf3,PosHf3);
+ }
else
if (AvrPlc > 0x35ff)
+ {
BytePlace=(int)DecodeNum(BitField,STARTHF2,DecHf2,PosHf2);
+ }
else
if (AvrPlc > 0x0dff)
+ {
BytePlace=(int)DecodeNum(BitField,STARTHF1,DecHf1,PosHf1);
+ }
else
+ {
BytePlace=(int)DecodeNum(BitField,STARTHF0,DecHf0,PosHf0);
+ }
+
BytePlace&=0xff;
if (StMode != 0)
{
if (BytePlace==0 && BitField > 0xfff)
+ {
BytePlace=0x100;
+ }
+
if (--BytePlace==-1)
{
BitField=Inp.fgetbits();
@@ -383,7 +455,10 @@ private void HuffDecode()
}
else
if (NumHuf++ >= 16 && FlagsCnt==0)
+ {
StMode=1;
+ }
+
AvrPlc += (uint)BytePlace;
AvrPlc -= AvrPlc >> 8;
Nhfb+=16;
@@ -401,9 +476,13 @@ private void HuffDecode()
CurByte=ChSet[BytePlace];
NewBytePlace=NToPl[CurByte++ & 0xff]++;
if ((CurByte & 0xff) > 0xa1)
+ {
CorrHuff(ChSet,NToPl);
+ }
else
+ {
break;
+ }
}
ChSet[BytePlace]=ChSet[NewBytePlace];
@@ -421,7 +500,9 @@ private void GetFlagsBuf()
// we need to check for value 256 when unpacking in case we unpack
// a corrupt archive.
if (FlagsPlace>=ChSetC.Length)
+ {
return;
+ }
while (true)
{
@@ -429,7 +510,10 @@ private void GetFlagsBuf()
FlagBuf=Flags>>8;
NewFlagsPlace=NToPlC[Flags++ & 0xff]++;
if ((Flags & 0xff) != 0)
+ {
break;
+ }
+
CorrHuff(ChSetC,NToPlC);
}
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs
index 81e2c52d6..857c03b42 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack20_cpp.cs
@@ -40,14 +40,22 @@ private void Unpack20(bool Solid)
uint Bits;
if (Suspended)
+ {
UnpPtr=WrPtr;
+ }
else
{
UnpInitData(Solid);
if (!UnpReadBuf())
+ {
return;
+ }
+
if ((!Solid || !TablesRead2) && !ReadTables20())
+ {
return;
+ }
+
--DestUnpSize;
}
@@ -56,13 +64,20 @@ private void Unpack20(bool Solid)
UnpPtr&=MaxWinMask;
if (Inp.InAddr>ReadTop-30)
+ {
if (!UnpReadBuf())
+ {
break;
+ }
+ }
+
if (((WrPtr-UnpPtr) & MaxWinMask)<270 && WrPtr!=UnpPtr)
{
UnpWriteBuf20();
if (Suspended)
+ {
return;
+ }
}
if (UnpAudioBlock)
{
@@ -71,12 +86,18 @@ private void Unpack20(bool Solid)
if (AudioNumber==256)
{
if (!ReadTables20())
+ {
break;
+ }
+
continue;
}
Window[UnpPtr++]=DecodeAudio((int)AudioNumber);
if (++UnpCurChannel==UnpChannels)
+ {
UnpCurChannel=0;
+ }
+
--DestUnpSize;
continue;
}
@@ -109,7 +130,9 @@ private void Unpack20(bool Solid)
{
Length++;
if (Distance>=0x40000L)
+ {
Length++;
+ }
}
CopyString20(Length,Distance);
@@ -118,7 +141,10 @@ private void Unpack20(bool Solid)
if (Number==269)
{
if (!ReadTables20())
+ {
break;
+ }
+
continue;
}
if (Number==256)
@@ -143,7 +169,9 @@ private void Unpack20(bool Solid)
{
Length++;
if (Distance>=0x40000)
+ {
Length++;
+ }
}
}
CopyString20(Length,Distance);
@@ -168,7 +196,10 @@ private void Unpack20(bool Solid)
private void UnpWriteBuf20()
{
if (UnpPtr!=WrPtr)
+ {
UnpSomeRead=true;
+ }
+
if (UnpPtrReadTop-25)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
+
uint BitField=Inp.getbits();
UnpAudioBlock=(BitField & 0x8000)!=0;
if ((BitField & 0x4000) != 0)
+ {
new Span(UnpOldTable20).Fill(0);
+ }
+
Inp.addbits(2);
uint TableSize;
@@ -199,12 +241,17 @@ private bool ReadTables20()
{
UnpChannels=((BitField>>12) & 3)+1;
if (UnpCurChannel>=UnpChannels)
+ {
UnpCurChannel=0;
+ }
+
Inp.addbits(2);
TableSize=MC20*UnpChannels;
}
else
+ {
TableSize=NC20+DC20+RC20;
+ }
for (uint I=0;IReadTop-5)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
+
uint Number=DecodeNumber(Inp,BlockTables.BD);
if (Number<16)
{
@@ -229,13 +281,17 @@ private bool ReadTables20()
uint N=(Inp.getbits() >> 14)+3;
Inp.addbits(2);
if (I==0)
+ {
return false; // We cannot have "repeat previous" code at the first position.
+ }
else
+ {
while (N-- > 0 && IReadTop)
+ {
return true;
+ }
+
if (UnpAudioBlock)
+ {
for (uint I=0;I=Inp.InAddr+5)
+ {
if (UnpAudioBlock)
{
if (DecodeNumber(Inp,MD[UnpCurChannel])==256)
+ {
ReadTables20();
+ }
}
else
- if (DecodeNumber(Inp,BlockTables.LD)==269)
- ReadTables20();
+ if (DecodeNumber(Inp,BlockTables.LD)==269)
+ {
+ ReadTables20();
+ }
+ }
}
private void UnpInitData20(bool Solid)
@@ -352,43 +419,73 @@ private byte DecodeAudio(int Delta)
{
case 1:
if (V.K1>=-16)
+ {
V.K1--;
+ }
+
break;
case 2:
if (V.K1<16)
+ {
V.K1++;
+ }
+
break;
case 3:
if (V.K2>=-16)
+ {
V.K2--;
+ }
+
break;
case 4:
if (V.K2<16)
+ {
V.K2++;
+ }
+
break;
case 5:
if (V.K3>=-16)
+ {
V.K3--;
+ }
+
break;
case 6:
if (V.K3<16)
+ {
V.K3++;
+ }
+
break;
case 7:
if (V.K4>=-16)
+ {
V.K4--;
+ }
+
break;
case 8:
if (V.K4<16)
+ {
V.K4++;
+ }
+
break;
case 9:
if (V.K5>=-16)
+ {
V.K5--;
+ }
+
break;
case 10:
if (V.K5<16)
+ {
V.K5++;
+ }
+
break;
}
}
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs
index 7e77c7d40..1647b902c 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack30_cpp.cs
@@ -9,9 +9,6 @@
#endif
using int64 = System.Int64;
-using System;
-using static SharpCompress.Compressors.Rar.UnpackV2017.PackDef;
-using static SharpCompress.Compressors.Rar.UnpackV2017.UnpackGlobal;
//using static SharpCompress.Compressors.Rar.UnpackV2017.Unpack.Unpack30Local;
/*
namespace SharpCompress.Compressors.Rar.UnpackV2017
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack50_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack50_cpp.cs
index a48813154..e08368496 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack50_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack50_cpp.cs
@@ -25,14 +25,18 @@ private void Unpack5(bool Solid)
{
UnpInitData(Solid);
if (!UnpReadBuf())
+ {
return;
+ }
// Check TablesRead5 to be sure that we read tables at least once
// regardless of current block header TablePresent flag.
// So we can safefly use these tables below.
if (!ReadBlockHeader(Inp,ref BlockHeader) ||
!ReadTables(Inp,ref BlockHeader, ref BlockTables) || !TablesRead5)
+ {
return;
+ }
}
while (true)
@@ -55,17 +59,24 @@ private void Unpack5(bool Solid)
break;
}
if (!ReadBlockHeader(Inp,ref BlockHeader) || !ReadTables(Inp, ref BlockHeader, ref BlockTables))
+ {
return;
+ }
}
if (FileDone || !UnpReadBuf())
+ {
break;
+ }
}
if (((WriteBorder-UnpPtr) & MaxWinMask)DestUnpSize)
+ {
return;
+ }
+
if (Suspended)
{
FileExtracted=false;
@@ -77,9 +88,14 @@ private void Unpack5(bool Solid)
if (MainSlot<256)
{
if (Fragmented)
+ {
FragWindow[UnpPtr++]=(byte)MainSlot;
+ }
else
+ {
Window[UnpPtr++]=(byte)MainSlot;
+ }
+
continue;
}
if (MainSlot>=262)
@@ -124,32 +140,49 @@ private void Unpack5(bool Solid)
{
Length++;
if (Distance>0x40000)
+ {
Length++;
+ }
}
}
InsertOldDist(Distance);
LastLength=Length;
if (Fragmented)
+ {
FragWindow.CopyString(Length,Distance,ref UnpPtr,MaxWinMask);
+ }
else
+ {
CopyString(Length,Distance);
+ }
+
continue;
}
if (MainSlot==256)
{
UnpackFilter Filter = new UnpackFilter();
if (!ReadFilter(Inp,Filter) || !AddFilter(Filter))
+ {
break;
+ }
+
continue;
}
if (MainSlot==257)
{
if (LastLength!=0)
+ {
if (Fragmented)
+ {
FragWindow.CopyString(LastLength,OldDist[0],ref UnpPtr,MaxWinMask);
+ }
else
+ {
CopyString(LastLength,OldDist[0]);
+ }
+ }
+
continue;
}
if (MainSlot<262)
@@ -164,9 +197,14 @@ private void Unpack5(bool Solid)
uint Length=SlotToLength(Inp,LengthSlot);
LastLength=Length;
if (Fragmented)
+ {
FragWindow.CopyString(Length,Distance,ref UnpPtr,MaxWinMask);
+ }
else
+ {
CopyString(Length,Distance);
+ }
+
continue;
}
}
@@ -190,13 +228,19 @@ private uint ReadFilterData(BitInput Inp)
private bool ReadFilter(BitInput Inp,UnpackFilter Filter)
{
if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-16)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
Filter.BlockStart=ReadFilterData(Inp);
Filter.BlockLength=ReadFilterData(Inp);
if (Filter.BlockLength>MAX_FILTER_BLOCK_SIZE)
+ {
Filter.BlockLength=0;
+ }
Filter.Type=(byte)(Inp.fgetbits()>>13);
Inp.faddbits(3);
@@ -216,7 +260,9 @@ private bool AddFilter(UnpackFilter Filter)
{
UnpWriteBuf(); // Write data, apply and flush filters.
if (Filters.Count>=MAX_UNPACK_FILTERS)
+ {
InitFilters(); // Still too many filters, prevent excessive memory use.
+ }
}
// If distance to filter start is that large that due to circular dictionary
@@ -233,7 +279,10 @@ private bool UnpReadBuf()
{
int DataSize=ReadTop-Inp.InAddr; // Data left to process.
if (DataSize<0)
+ {
return false;
+ }
+
BlockHeader.BlockSize-=Inp.InAddr-BlockHeader.BlockStart;
if (Inp.InAddr>MAX_SIZE/2)
{
@@ -245,17 +294,29 @@ private bool UnpReadBuf()
// to make it zero.
if (DataSize>0)
//x memmove(Inp.InBuf,Inp.InBuf+Inp.InAddr,DataSize);
+ {
Buffer.BlockCopy(Inp.InBuf, Inp.InAddr, Inp.InBuf, 0, DataSize);
+ }
+
Inp.InAddr=0;
ReadTop=DataSize;
}
else
+ {
DataSize=ReadTop;
+ }
+
int ReadCode=0;
if (MAX_SIZE!=DataSize)
+ {
ReadCode=UnpIO_UnpRead(Inp.InBuf,DataSize,MAX_SIZE-DataSize);
+ }
+
if (ReadCode>0) // Can be also -1.
+ {
ReadTop+=ReadCode;
+ }
+
ReadBorder=ReadTop-30;
BlockHeader.BlockStart=Inp.InAddr;
if (BlockHeader.BlockSize!=-1) // '-1' means not defined yet.
@@ -285,7 +346,10 @@ private void UnpWriteBuf()
UnpackFilter flt=Filters[I];
if (flt.Type==FILTER_NONE)
+ {
continue;
+ }
+
if (flt.NextWindow)
{
// Here we skip filters which have block start in current data range
@@ -301,7 +365,10 @@ private void UnpWriteBuf()
// our write here, we can safely assume that filter is applicable
// to next block on no further wrap arounds is possible.
if (((flt.BlockStart-WrPtr)&MaxWinMask)<=FullWriteSize)
+ {
flt.NextWindow=false;
+ }
+
continue;
}
uint BlockStart=flt.BlockStart;
@@ -326,10 +393,14 @@ private void UnpWriteBuf()
if (BlockStart0)
+ {
Filters[I-EmptyCount]=Filters[I];
+ }
+
if (Filters[I].Type==FILTER_NONE)
+ {
EmptyCount++;
+ }
}
if (EmptyCount>0)
//Filters.Alloc(Filters.Count-EmptyCount);
+ {
Filters.RemoveRange(Filters.Count-EmptyCount, EmptyCount);
+ }
if (!NotAllFiltersProcessed) // Only if all filters are processed.
{
@@ -417,7 +499,9 @@ private void UnpWriteBuf()
// If border is equal to UnpPtr, it means that we have MaxWinSize data ahead.
if (WriteBorder==UnpPtr ||
WrPtr!=UnpPtr && ((WrPtr-UnpPtr)&MaxWinMask)<((WriteBorder-UnpPtr)&MaxWinMask))
+ {
WriteBorder=WrPtr;
+ }
}
private byte[] ApplyFilter(byte[] __d,uint DataSize,UnpackFilter Flt)
@@ -450,11 +534,15 @@ private byte[] ApplyFilter(byte[] __d,uint DataSize,UnpackFilter Flt)
if ((Addr & 0x80000000)!=0) // Addr<0
{
if (((Addr+Offset) & 0x80000000)==0) // Addr+Offset>=0
+ {
RawPut4(Addr+FileSize,__d,Data);
+ }
}
else
if (((Addr-FileSize) & 0x80000000)!=0) // Addr=DestUnpSize)
+ {
return;
+ }
+
size_t WriteSize=Size;
int64 LeftToWrite=DestUnpSize-WrittenFileSize;
if ((int64)WriteSize>LeftToWrite)
+ {
WriteSize=(size_t)LeftToWrite;
+ }
+
UnpIO_UnpWrite(Data, offset, WriteSize);
WrittenFileSize+=Size;
}
@@ -552,7 +653,9 @@ private void UnpWriteData(byte[] Data, size_t offset, size_t Size)
private void UnpInitData50(bool Solid)
{
if (!Solid)
+ {
TablesRead5=false;
+ }
}
private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header)
@@ -560,8 +663,13 @@ private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header)
Header.HeaderSize=0;
if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-7)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
+
Inp.faddbits((uint)((8-Inp.InBit)&7));
byte BlockFlags=(byte)(Inp.fgetbits()>>8);
@@ -569,7 +677,9 @@ private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header)
uint ByteCount=(uint)(((BlockFlags>>3)&3)+1); // Block size byte count.
if (ByteCount==4)
+ {
return false;
+ }
Header.HeaderSize=(int)(2+ByteCount);
@@ -588,7 +698,9 @@ private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header)
Header.BlockSize=BlockSize;
byte CheckSum=(byte)(0x5a^BlockFlags^BlockSize^(BlockSize>>8)^(BlockSize>>16));
if (CheckSum!=SavedCheckSum)
+ {
return false;
+ }
Header.BlockStart=Inp.InAddr;
ReadBorder=Math.Min(ReadBorder,Header.BlockStart+Header.BlockSize-1);
@@ -601,11 +713,17 @@ private bool ReadBlockHeader(BitInput Inp,ref UnpackBlockHeader Header)
private bool ReadTables(BitInput Inp,ref UnpackBlockHeader Header, ref UnpackBlockTables Tables)
{
if (!Header.TablePresent)
+ {
return true;
+ }
if (!Inp.ExternalBuffer && Inp.InAddr>ReadTop-25)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
byte[] BitLength = new byte[BC];
for (uint I=0;I> 12);
Inp.faddbits(4);
if (ZeroCount==0)
+ {
BitLength[I]=15;
+ }
else
{
ZeroCount+=2;
@@ -627,7 +747,9 @@ private bool ReadTables(BitInput Inp,ref UnpackBlockHeader Header, ref UnpackBlo
}
}
else
+ {
BitLength[I]=(byte)Length;
+ }
}
MakeDecodeTables(BitLength,0,Tables.BD,BC);
@@ -637,8 +759,13 @@ private bool ReadTables(BitInput Inp,ref UnpackBlockHeader Header, ref UnpackBlo
for (uint I=0;IReadTop-5)
+ {
if (!UnpReadBuf())
+ {
return false;
+ }
+ }
+
uint Number=DecodeNumber(Inp,Tables.BD);
if (Number<16)
{
@@ -669,11 +796,13 @@ private bool ReadTables(BitInput Inp,ref UnpackBlockHeader Header, ref UnpackBlo
return false;
}
else
+ {
while (N-- > 0 && IReadTop)
+ {
return false;
+ }
+
MakeDecodeTables(Table, 0, Tables.LD,NC);
MakeDecodeTables(Table, (int)NC,Tables.DD,DC);
MakeDecodeTables(Table, (int)(NC+DC),Tables.LDD,LDC);
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs
index 029eba267..5d5121ea2 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpack_cpp.cs
@@ -70,7 +70,9 @@ private void Init(size_t WinSize,bool Solid)
// will be 0 because of size_t overflow. Let's issue the memory error.
if (WinSize==0)
//ErrHandler.MemoryError();
+ {
throw new InvalidFormatException("invalid window size (possibly due to a rar file with a 4GB being unpacked on a 32-bit platform)");
+ }
// Minimum window size must be at least twice more than maximum possible
// size of filter block, which is 0x10000 in RAR now. If window size is
@@ -79,12 +81,19 @@ private void Init(size_t WinSize,bool Solid)
// use 0x40000 for extra safety and possible filter area size expansion.
const size_t MinAllocSize=0x40000;
if (WinSize>16)>0x10000) // Window size must not exceed 4 GB.
+ {
return;
+ }
// Archiving code guarantees that window size does not grow in the same
// solid stream. So if we are here, we are either creating a new window
@@ -96,11 +105,14 @@ private void Init(size_t WinSize,bool Solid)
// We do not handle growth for existing fragmented window.
if (Grow && Fragmented)
//throw std::bad_alloc();
+ {
throw new InvalidFormatException("Grow && Fragmented");
+ }
byte[] NewWindow=Fragmented ? null : new byte[WinSize];
if (NewWindow==null)
+ {
if (Grow || WinSize<0x1000000)
{
// We do not support growth for new fragmented window.
@@ -118,6 +130,7 @@ private void Init(size_t WinSize,bool Solid)
FragWindow.Init(WinSize);
Fragmented=true;
}
+ }
if (!Fragmented)
{
@@ -132,8 +145,10 @@ private void Init(size_t WinSize,bool Solid)
// RAR archiving code does not allow it in solid streams now,
// but let's implement it anyway just in case we'll change it sometimes.
if (Grow)
+ {
for (size_t I=1;I<=MaxWinSize;I++)
NewWindow[(UnpPtr-I)&(WinSize-1)]=Window[(UnpPtr-I)&(MaxWinSize-1)];
+ }
//if (Window!=null)
// free(Window);
@@ -154,18 +169,27 @@ private void DoUnpack(uint Method,bool Solid)
#if !RarV2017_SFX_MODULE
case 15: // rar 1.5 compression
if (!Fragmented)
+ {
Unpack15(Solid);
+ }
+
break;
case 20: // rar 2.x compression
case 26: // files larger than 2GB
if (!Fragmented)
+ {
Unpack20(Solid);
+ }
+
break;
#endif
#if !RarV2017_RAR5ONLY
case 29: // rar 3.x compression
if (!Fragmented)
+ {
throw new NotImplementedException();
+ }
+
break;
#endif
case 50: // RAR 5.0 compression algorithm.
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpackinline_cpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpackinline_cpp.cs
index db7058073..53ef977da 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpackinline_cpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/Unpack.unpackinline_cpp.cs
@@ -94,6 +94,7 @@ private void CopyString(uint Length,uint Distance)
// if (Length>6) { Dest[6]=Src[6]; } } } } } } } // Close all nested "if"s.
}
else
+ {
while (Length-- > 0) // Slow copying with all possible precautions.
{
Window[UnpPtr]=Window[SrcPtr++ & MaxWinMask];
@@ -101,6 +102,7 @@ private void CopyString(uint Length,uint Distance)
// be replaced with 'Window[UnpPtr++ & MaxWinMask]'
UnpPtr=(UnpPtr+1) & MaxWinMask;
}
+ }
}
private uint DecodeNumber(BitInput Inp,DecodeTable Dec)
@@ -140,7 +142,9 @@ private uint DecodeNumber(BitInput Inp,DecodeTable Dec)
// Out of bounds safety check required for damaged archives.
if (Pos>=Dec.MaxNum)
+ {
Pos=0;
+ }
// Convert the position in the code list to position in alphabet
// and return it.
diff --git a/src/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs b/src/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs
index 8586c98ba..84a57e9d2 100644
--- a/src/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs
+++ b/src/SharpCompress/Compressors/Rar/UnpackV2017/unpack_hpp.cs
@@ -432,7 +432,10 @@ internal partial class Unpack
private uint GetChar()
{
if (Inp.InAddr>MAX_SIZE-30)
- UnpReadBuf();
+ {
+ UnpReadBuf();
+ }
+
return(Inp.InBuf[Inp.InAddr++]);
}
diff --git a/src/SharpCompress/Compressors/Xz/BinaryUtils.cs b/src/SharpCompress/Compressors/Xz/BinaryUtils.cs
index f63770bef..73ae52035 100644
--- a/src/SharpCompress/Compressors/Xz/BinaryUtils.cs
+++ b/src/SharpCompress/Compressors/Xz/BinaryUtils.cs
@@ -36,7 +36,9 @@ internal static byte[] ToBigEndianBytes(this uint uint32)
var result = BitConverter.GetBytes(uint32);
if (BitConverter.IsLittleEndian)
+ {
Array.Reverse(result);
+ }
return result;
}
@@ -46,7 +48,9 @@ internal static byte[] ToLittleEndianBytes(this uint uint32)
var result = BitConverter.GetBytes(uint32);
if (!BitConverter.IsLittleEndian)
+ {
Array.Reverse(result);
+ }
return result;
}
diff --git a/src/SharpCompress/Compressors/Xz/Crc32.cs b/src/SharpCompress/Compressors/Xz/Crc32.cs
index 72c37ba1f..81bf8126e 100644
--- a/src/SharpCompress/Compressors/Xz/Crc32.cs
+++ b/src/SharpCompress/Compressors/Xz/Crc32.cs
@@ -28,7 +28,9 @@ public static UInt32 Compute(UInt32 polynomial, UInt32 seed, byte[] buffer)
private static UInt32[] InitializeTable(UInt32 polynomial)
{
if (polynomial == DefaultPolynomial && defaultTable != null)
+ {
return defaultTable;
+ }
var createTable = new UInt32[256];
for (var i = 0; i < 256; i++)
@@ -36,14 +38,21 @@ private static UInt32[] InitializeTable(UInt32 polynomial)
var entry = (UInt32)i;
for (var j = 0; j < 8; j++)
if ((entry & 1) == 1)
+ {
entry = (entry >> 1) ^ polynomial;
+ }
else
+ {
entry = entry >> 1;
+ }
+
createTable[i] = entry;
}
if (polynomial == DefaultPolynomial)
+ {
defaultTable = createTable;
+ }
return createTable;
}
diff --git a/src/SharpCompress/Compressors/Xz/Crc64.cs b/src/SharpCompress/Compressors/Xz/Crc64.cs
index bf6a0692f..639fcef08 100644
--- a/src/SharpCompress/Compressors/Xz/Crc64.cs
+++ b/src/SharpCompress/Compressors/Xz/Crc64.cs
@@ -19,7 +19,9 @@ public static UInt64 Compute(byte[] buffer)
public static UInt64 Compute(UInt64 seed, byte[] buffer)
{
if (Table == null)
+ {
Table = CreateTable(Iso3309Polynomial);
+ }
return CalculateHash(seed, Table, buffer, 0, buffer.Length);
}
@@ -45,9 +47,14 @@ public static ulong[] CreateTable(ulong polynomial)
var entry = (UInt64)i;
for (var j = 0; j < 8; ++j)
if ((entry & 1) == 1)
+ {
entry = (entry >> 1) ^ polynomial;
+ }
else
+ {
entry = entry >> 1;
+ }
+
createTable[i] = entry;
}
return createTable;
diff --git a/src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs b/src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs
index 8a13ec07f..05cd42311 100644
--- a/src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs
+++ b/src/SharpCompress/Compressors/Xz/Filters/BlockFilter.cs
@@ -35,12 +35,18 @@ public static BlockFilter Read(BinaryReader reader)
{
var filterType = (FilterTypes)reader.ReadXZInteger();
if (!FilterMap.ContainsKey(filterType))
+ {
throw new NotImplementedException($"Filter {filterType} has not yet been implemented");
+ }
+
var filter = Activator.CreateInstance(FilterMap[filterType]) as BlockFilter;
var sizeOfProperties = reader.ReadXZInteger();
if (sizeOfProperties > int.MaxValue)
+ {
throw new InvalidDataException("Block filter information too large");
+ }
+
byte[] properties = reader.ReadBytes((int)sizeOfProperties);
filter.Init(properties);
return filter;
diff --git a/src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs b/src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs
index cd2869c9e..0a842f687 100644
--- a/src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs
+++ b/src/SharpCompress/Compressors/Xz/Filters/Lzma2Filter.cs
@@ -16,7 +16,10 @@ public uint DictionarySize
get
{
if (_dictionarySize > 40)
+ {
throw new OverflowException("Dictionary size greater than UInt32.Max");
+ }
+
if (_dictionarySize == 40)
{
return uint.MaxValue;
@@ -30,12 +33,16 @@ public uint DictionarySize
public override void Init(byte[] properties)
{
if (properties.Length != 1)
+ {
throw new InvalidDataException("LZMA properties unexpected length");
+ }
_dictionarySize = (byte)(properties[0] & 0x3F);
var reserved = properties[0] & 0xC0;
if (reserved != 0)
+ {
throw new InvalidDataException("Reserved bits used in LZMA properties");
+ }
}
public override void ValidateFilter()
diff --git a/src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs b/src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs
index 247951288..3eb83449e 100644
--- a/src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs
+++ b/src/SharpCompress/Compressors/Xz/MultiByteIntegers.cs
@@ -8,9 +8,14 @@ internal static class MultiByteIntegers
public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
{
if (MaxBytes <= 0)
+ {
throw new ArgumentOutOfRangeException();
+ }
+
if (MaxBytes > 9)
+ {
MaxBytes = 9;
+ }
byte LastByte = reader.ReadByte();
ulong Output = (ulong)LastByte & 0x7F;
@@ -19,10 +24,15 @@ public static ulong ReadXZInteger(this BinaryReader reader, int MaxBytes = 9)
while ((LastByte & 0x80) != 0)
{
if (++i >= MaxBytes)
+ {
throw new InvalidDataException();
+ }
+
LastByte = reader.ReadByte();
if (LastByte == 0)
+ {
throw new InvalidDataException();
+ }
Output |= ((ulong)(LastByte & 0x7F)) << (i * 7);
}
diff --git a/src/SharpCompress/Compressors/Xz/XZBlock.cs b/src/SharpCompress/Compressors/Xz/XZBlock.cs
index 96ecef171..e4ff64a7d 100644
--- a/src/SharpCompress/Compressors/Xz/XZBlock.cs
+++ b/src/SharpCompress/Compressors/Xz/XZBlock.cs
@@ -33,17 +33,35 @@ public override int Read(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (!HeaderIsLoaded)
+ {
LoadHeader();
+ }
+
if (!_streamConnected)
+ {
ConnectStream();
+ }
+
if (!_endOfStream)
+ {
bytesRead = _decomStream.Read(buffer, offset, count);
+ }
+
if (bytesRead != count)
+ {
_endOfStream = true;
+ }
+
if (_endOfStream && !_paddingSkipped)
+ {
SkipPadding();
+ }
+
if (_endOfStream && !_crcChecked)
+ {
CheckCrc();
+ }
+
_bytesRead += (ulong)bytesRead;
return bytesRead;
}
@@ -56,7 +74,9 @@ private void SkipPadding()
byte[] paddingBytes = new byte[4 - bytes];
BaseStream.Read(paddingBytes, 0, paddingBytes.Length);
if (paddingBytes.Any(b => b != 0))
+ {
throw new InvalidDataException("Padding bytes were non-null");
+ }
}
_paddingSkipped = true;
}
@@ -101,7 +121,9 @@ private void ReadHeaderSize()
{
_blockHeaderSizeByte = (byte)BaseStream.ReadByte();
if (_blockHeaderSizeByte == 0)
+ {
throw new XZIndexMarkerReachedException();
+ }
}
private byte[] CacheHeader()
@@ -110,12 +132,16 @@ private byte[] CacheHeader()
blockHeaderWithoutCrc[0] = _blockHeaderSizeByte;
var read = BaseStream.Read(blockHeaderWithoutCrc, 1, BlockHeaderSize - 5);
if (read != BlockHeaderSize - 5)
+ {
throw new EndOfStreamException("Reached end of stream unexectedly");
+ }
uint crc = BaseStream.ReadLittleEndianUInt32();
uint calcCrc = Crc32.Compute(blockHeaderWithoutCrc);
if (crc != calcCrc)
+ {
throw new InvalidDataException("Block header corrupt");
+ }
return blockHeaderWithoutCrc;
}
@@ -127,15 +153,22 @@ private void ReadBlockFlags(BinaryReader reader)
byte reserved = (byte)(blockFlags & 0x3C);
if (reserved != 0)
+ {
throw new InvalidDataException("Reserved bytes used, perhaps an unknown XZ implementation");
+ }
bool compressedSizePresent = (blockFlags & 0x40) != 0;
bool uncompressedSizePresent = (blockFlags & 0x80) != 0;
if (compressedSizePresent)
+ {
CompressedSize = reader.ReadXZInteger();
+ }
+
if (uncompressedSizePresent)
+ {
UncompressedSize = reader.ReadXZInteger();
+ }
}
private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
@@ -146,20 +179,30 @@ private void ReadFilters(BinaryReader reader, long baseStreamOffset = 0)
var filter = BlockFilter.Read(reader);
if ((i + 1 == _numFilters && !filter.AllowAsLast)
|| (i + 1 < _numFilters && !filter.AllowAsNonLast))
+ {
throw new InvalidDataException("Block Filters in bad order");
+ }
+
if (filter.ChangesDataSize && i + 1 < _numFilters)
+ {
nonLastSizeChangers++;
+ }
+
filter.ValidateFilter();
Filters.Push(filter);
}
if (nonLastSizeChangers > 2)
+ {
throw new InvalidDataException("More than two non-last block filters cannot change stream size");
+ }
int blockHeaderPaddingSize = BlockHeaderSize -
- (4 + (int)(reader.BaseStream.Position - baseStreamOffset));
+ (4 + (int)(reader.BaseStream.Position - baseStreamOffset));
byte[] blockHeaderPadding = reader.ReadBytes(blockHeaderPaddingSize);
if (!blockHeaderPadding.All(b => b == 0))
+ {
throw new InvalidDataException("Block header contains unknown fields");
+ }
}
}
}
diff --git a/src/SharpCompress/Compressors/Xz/XZFooter.cs b/src/SharpCompress/Compressors/Xz/XZFooter.cs
index 8339bc65f..f4a6b9df9 100644
--- a/src/SharpCompress/Compressors/Xz/XZFooter.cs
+++ b/src/SharpCompress/Compressors/Xz/XZFooter.cs
@@ -32,7 +32,10 @@ public void Process()
byte[] footerBytes = _reader.ReadBytes(6);
uint myCrc = Crc32.Compute(footerBytes);
if (crc != myCrc)
+ {
throw new InvalidDataException("Footer corrupt");
+ }
+
using (var stream = new MemoryStream(footerBytes))
using (var reader = new BinaryReader(stream))
{
diff --git a/src/SharpCompress/Compressors/Xz/XZHeader.cs b/src/SharpCompress/Compressors/Xz/XZHeader.cs
index 16588d6e2..587f20da0 100644
--- a/src/SharpCompress/Compressors/Xz/XZHeader.cs
+++ b/src/SharpCompress/Compressors/Xz/XZHeader.cs
@@ -38,18 +38,24 @@ private void ProcessStreamFlags()
UInt32 crc = _reader.ReadLittleEndianUInt32();
UInt32 calcCrc = Crc32.Compute(streamFlags);
if (crc != calcCrc)
+ {
throw new InvalidDataException("Stream header corrupt");
+ }
BlockCheckType = (CheckType)(streamFlags[1] & 0x0F);
byte futureUse = (byte)(streamFlags[1] & 0xF0);
if (futureUse != 0 || streamFlags[0] != 0)
+ {
throw new InvalidDataException("Unknown XZ Stream Version");
+ }
}
private void CheckMagicBytes(byte[] header)
{
if (!header.SequenceEqual(MagicHeader))
+ {
throw new InvalidDataException("Invalid XZ Stream");
+ }
}
}
}
diff --git a/src/SharpCompress/Compressors/Xz/XZIndex.cs b/src/SharpCompress/Compressors/Xz/XZIndex.cs
index 736f978af..d634db0c6 100644
--- a/src/SharpCompress/Compressors/Xz/XZIndex.cs
+++ b/src/SharpCompress/Compressors/Xz/XZIndex.cs
@@ -23,7 +23,9 @@ public XZIndex(BinaryReader reader, bool indexMarkerAlreadyVerified)
_indexMarkerAlreadyVerified = indexMarkerAlreadyVerified;
StreamStartPosition = reader.BaseStream.Position;
if (indexMarkerAlreadyVerified)
+ {
StreamStartPosition--;
+ }
}
public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
@@ -36,7 +38,10 @@ public static XZIndex FromStream(Stream stream, bool indexMarkerAlreadyVerified)
public void Process()
{
if (!_indexMarkerAlreadyVerified)
+ {
VerifyIndexMarker();
+ }
+
NumberOfRecords = _reader.ReadXZInteger();
for (ulong i = 0; i < NumberOfRecords; i++)
{
@@ -50,7 +55,9 @@ private void VerifyIndexMarker()
{
byte marker = _reader.ReadByte();
if (marker != 0)
+ {
throw new InvalidDataException("Not an index block");
+ }
}
private void SkipPadding()
@@ -60,7 +67,9 @@ private void SkipPadding()
{
byte[] paddingBytes = _reader.ReadBytes(4 - bytes);
if (paddingBytes.Any(b => b != 0))
+ {
throw new InvalidDataException("Padding bytes were non-null");
+ }
}
}
diff --git a/src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs b/src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs
index ae124b150..fa376c513 100644
--- a/src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs
+++ b/src/SharpCompress/Compressors/Xz/XZReadOnlyStream.cs
@@ -8,7 +8,9 @@ public XZReadOnlyStream(Stream stream)
{
BaseStream = stream;
if (!BaseStream.CanRead)
+ {
throw new InvalidDataException("Must be able to read from stream");
+ }
}
}
}
diff --git a/src/SharpCompress/Compressors/Xz/XZStream.cs b/src/SharpCompress/Compressors/Xz/XZStream.cs
index df755c268..8fd9d9bd3 100644
--- a/src/SharpCompress/Compressors/Xz/XZStream.cs
+++ b/src/SharpCompress/Compressors/Xz/XZStream.cs
@@ -50,9 +50,15 @@ public override int Read(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (_endOfStream)
+ {
return bytesRead;
+ }
+
if (!HeaderIsRead)
+ {
ReadHeader();
+ }
+
bytesRead = ReadBlocks(buffer, offset, count);
if (bytesRead < count)
{
@@ -86,18 +92,27 @@ private int ReadBlocks(byte[] buffer, int offset, int count)
{
int bytesRead = 0;
if (_currentBlock == null)
+ {
NextBlock();
+ }
+
for (;;)
{
try
{
if (bytesRead >= count)
+ {
break;
+ }
+
int remaining = count - bytesRead;
int newOffset = offset + bytesRead;
int justRead = _currentBlock.Read(buffer, newOffset, remaining);
if (justRead < remaining)
+ {
NextBlock();
+ }
+
bytesRead += justRead;
}
catch (XZIndexMarkerReachedException)
diff --git a/src/SharpCompress/Crypto/Crc32Stream.cs b/src/SharpCompress/Crypto/Crc32Stream.cs
index 35dd85bdb..09bd100ca 100644
--- a/src/SharpCompress/Crypto/Crc32Stream.cs
+++ b/src/SharpCompress/Crypto/Crc32Stream.cs
@@ -1,5 +1,4 @@
using System;
-using System.Collections.Generic;
using System.IO;
namespace SharpCompress.Crypto
@@ -79,7 +78,9 @@ public static uint Compute(uint polynomial, uint seed, byte[] buffer)
private static uint[] InitializeTable(uint polynomial)
{
if (polynomial == DefaultPolynomial && defaultTable != null)
+ {
return defaultTable;
+ }
var createTable = new uint[256];
for (var i = 0; i < 256; i++)
@@ -87,14 +88,21 @@ private static uint[] InitializeTable(uint polynomial)
var entry = (uint)i;
for (var j = 0; j < 8; j++)
if ((entry & 1) == 1)
+ {
entry = (entry >> 1) ^ polynomial;
+ }
else
+ {
entry = entry >> 1;
+ }
+
createTable[i] = entry;
}
if (polynomial == DefaultPolynomial)
+ {
defaultTable = createTable;
+ }
return createTable;
}
diff --git a/src/SharpCompress/IO/RewindableStream.cs b/src/SharpCompress/IO/RewindableStream.cs
index 3cfc035cb..97bd0f4d3 100644
--- a/src/SharpCompress/IO/RewindableStream.cs
+++ b/src/SharpCompress/IO/RewindableStream.cs
@@ -1,6 +1,5 @@
using System;
using System.IO;
-using SharpCompress.Compressors.Filters;
namespace SharpCompress.IO
{
diff --git a/src/SharpCompress/Utility.cs b/src/SharpCompress/Utility.cs
index 2137c96eb..0717d7930 100644
--- a/src/SharpCompress/Utility.cs
+++ b/src/SharpCompress/Utility.cs
@@ -71,11 +71,19 @@ public static void ForEach(this IEnumerable items, Action action)
public static void Copy(Array sourceArray, long sourceIndex, Array destinationArray, long destinationIndex, long length)
{
if (sourceIndex > Int32.MaxValue || sourceIndex < Int32.MinValue)
+ {
throw new ArgumentOutOfRangeException();
+ }
+
if (destinationIndex > Int32.MaxValue || destinationIndex < Int32.MinValue)
+ {
throw new ArgumentOutOfRangeException();
+ }
+
if (length > Int32.MaxValue || length < Int32.MinValue)
+ {
throw new ArgumentOutOfRangeException();
+ }
Array.Copy(sourceArray, (int)sourceIndex, destinationArray, (int)destinationIndex, (int)length);
}
diff --git a/src/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs b/src/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs
index 15f62ae36..64a595c96 100644
--- a/src/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs
+++ b/src/SharpCompress/Writers/Zip/ZipCentralDirectoryEntry.cs
@@ -55,7 +55,9 @@ internal uint Write(Stream outputStream)
// so this extra guard is not required, but kept to simplify changing the code
// once the zip64 post-data issue is resolved
if (!zip64_stream)
+ {
flags |= HeaderFlags.UsePostDataDescriptor;
+ }
if (usedCompression == ZipCompressionMethod.LZMA)
{
@@ -65,7 +67,9 @@ internal uint Write(Stream outputStream)
// Support for zero byte files
if (Decompressed == 0 && Compressed == 0)
+ {
usedCompression = ZipCompressionMethod.None;
+ }
byte[] intBuf = new byte[] { 80, 75, 1, 2, version, 0, version, 0 };
//constant sig, then version made by, then version to extract
diff --git a/src/SharpCompress/Writers/Zip/ZipWriter.cs b/src/SharpCompress/Writers/Zip/ZipWriter.cs
index 016aec3e9..d0de08814 100644
--- a/src/SharpCompress/Writers/Zip/ZipWriter.cs
+++ b/src/SharpCompress/Writers/Zip/ZipWriter.cs
@@ -132,7 +132,9 @@ public Stream WriteToStream(string entryPath, ZipWriterEntryOptions options)
// Use the archive default setting for zip64 and allow overrides
var useZip64 = isZip64;
if (options.EnableZip64.HasValue)
+ {
useZip64 = options.EnableZip64.Value;
+ }
var headersize = (uint)WriteHeader(entryPath, options, entry, useZip64);
streamPosition += headersize;
@@ -157,7 +159,9 @@ private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOpt
{
// We err on the side of caution until the zip specification clarifies how to support this
if (!OutputStream.CanSeek && useZip64)
+ {
throw new NotSupportedException("Zip64 extensions are not supported on non-seekable streams");
+ }
var explicitZipCompressionInfo = ToZipCompressionMethod(zipWriterEntryOptions.CompressionType ?? compressionType);
byte[] encodedFilename = WriterOptions.ArchiveEncoding.Encode(filename);
@@ -169,9 +173,13 @@ private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOpt
if (explicitZipCompressionInfo == ZipCompressionMethod.Deflate)
{
if (OutputStream.CanSeek && useZip64)
+ {
OutputStream.Write(new byte[] { 45, 0 }, 0, 2); //smallest allowed version for zip64
+ }
else
+ {
OutputStream.Write(new byte[] { 20, 0 }, 0, 2); //older version which is more compatible
+ }
}
else
{
@@ -204,7 +212,9 @@ private int WriteHeader(string filename, ZipWriterEntryOptions zipWriterEntryOpt
var extralength = 0;
if (OutputStream.CanSeek && useZip64)
+ {
extralength = 2 + 2 + 8 + 8;
+ }
BinaryPrimitives.WriteUInt16LittleEndian(intBuf, (ushort)extralength);
OutputStream.Write(intBuf, 0, 2); // extra length
@@ -428,7 +438,9 @@ protected override void Dispose(bool disposing)
// Ideally, we should not throw from Dispose()
// We should not get here as the Write call checks the limits
if (zip64 && entry.Zip64HeaderOffset == 0)
+ {
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
+ }
// If we have pre-allocated space for zip64 data,
// fill it out, even if it is not required
@@ -459,7 +471,9 @@ protected override void Dispose(bool disposing)
// Ideally, we should not throw from Dispose()
// We should not get here as the Write call checks the limits
if (zip64)
+ {
throw new NotSupportedException("Streams larger than 4GiB are not supported for non-seekable streams");
+ }
byte[] intBuf = new byte[4];
BinaryPrimitives.WriteUInt32LittleEndian(intBuf, ZipHeaderFactory.POST_DATA_DESCRIPTOR);
@@ -501,7 +515,9 @@ public override void Write(byte[] buffer, int offset, int count)
{
// Pre-check, the counting.Count is not exact, as we do not know the size before having actually compressed it
if (limitsExceeded || ((decompressed + (uint)count) > uint.MaxValue) || (counting.Count + (uint)count) > uint.MaxValue)
+ {
throw new NotSupportedException("Attempted to write a stream that is larger than 4GiB without setting the zip64 option");
+ }
}
decompressed += (uint)count;
diff --git a/src/SharpCompress/Writers/Zip/ZipWriterOptions.cs b/src/SharpCompress/Writers/Zip/ZipWriterOptions.cs
index 660e567a7..f12d0bd09 100644
--- a/src/SharpCompress/Writers/Zip/ZipWriterOptions.cs
+++ b/src/SharpCompress/Writers/Zip/ZipWriterOptions.cs
@@ -1,5 +1,4 @@
-using SharpCompress.Archives;
-using SharpCompress.Common;
+using SharpCompress.Common;
using SharpCompress.Compressors.Deflate;
namespace SharpCompress.Writers.Zip
diff --git a/tests/SharpCompress.Test/ADCTest.cs b/tests/SharpCompress.Test/ADCTest.cs
index 9c8998dc6..e5151bec8 100644
--- a/tests/SharpCompress.Test/ADCTest.cs
+++ b/tests/SharpCompress.Test/ADCTest.cs
@@ -23,7 +23,6 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-using System;
using System.IO;
using SharpCompress.Compressors;
using SharpCompress.Compressors.ADC;
diff --git a/tests/SharpCompress.Test/WriterTests.cs b/tests/SharpCompress.Test/WriterTests.cs
index d9ceaf999..85b65c2cd 100644
--- a/tests/SharpCompress.Test/WriterTests.cs
+++ b/tests/SharpCompress.Test/WriterTests.cs
@@ -1,5 +1,4 @@
-using System;
-using System.IO;
+using System.IO;
using System.Text;
using SharpCompress.Common;
using SharpCompress.IO;
diff --git a/tests/SharpCompress.Test/Xz/XZBlockTests.cs b/tests/SharpCompress.Test/Xz/XZBlockTests.cs
index 7a528b44d..50ac0cb1f 100644
--- a/tests/SharpCompress.Test/Xz/XZBlockTests.cs
+++ b/tests/SharpCompress.Test/Xz/XZBlockTests.cs
@@ -22,7 +22,10 @@ private byte[] ReadBytes(XZBlock block, int bytesToRead)
byte[] buffer = new byte[bytesToRead];
var read = block.Read(buffer, 0, bytesToRead);
if (read != bytesToRead)
+ {
throw new EndOfStreamException();
+ }
+
return buffer;
}
diff --git a/tests/SharpCompress.Test/Zip/Zip64Tests.cs b/tests/SharpCompress.Test/Zip/Zip64Tests.cs
index bac9a7944..c5cb7c7ab 100644
--- a/tests/SharpCompress.Test/Zip/Zip64Tests.cs
+++ b/tests/SharpCompress.Test/Zip/Zip64Tests.cs
@@ -104,23 +104,36 @@ public void RunSingleTest(long files, long filesize, bool set_zip64, bool forwar
filename = Path.Combine(SCRATCH2_FILES_PATH, filename);
if (File.Exists(filename))
+ {
File.Delete(filename);
+ }
if (!File.Exists(filename))
+ {
CreateZipArchive(filename, files, filesize, write_chunk_size, set_zip64, forward_only);
+ }
var resForward = ReadForwardOnly(filename);
if (resForward.Item1 != files)
+ {
throw new Exception($"Incorrect number of items reported: {resForward.Item1}, should have been {files}");
+ }
- if (resForward.Item2 != files * filesize)
- throw new Exception($"Incorrect combined size reported: {resForward.Item2}, should have been {files * filesize}");
+ if (resForward.Item2 != files * filesize)
+ {
+ throw new Exception($"Incorrect combined size reported: {resForward.Item2}, should have been {files * filesize}");
+ }
var resArchive = ReadArchive(filename);
if (resArchive.Item1 != files)
+ {
throw new Exception($"Incorrect number of items reported: {resArchive.Item1}, should have been {files}");
+ }
+
if (resArchive.Item2 != files * filesize)
+ {
throw new Exception($"Incorrect number of items reported: {resArchive.Item2}, should have been {files * filesize}");
+ }
}
public void CreateZipArchive(string filename, long files, long filesize, long chunksize, bool set_zip64, bool forward_only)
@@ -166,13 +179,17 @@ public Tuple ReadForwardOnly(string filename)
count++;
if (prev != null)
- size += prev.Size;
-
- prev = rd.Entry;
+ {
+ size += prev.Size;
+ }
+
+ prev = rd.Entry;
}
if (prev != null)
- size += prev.Size;
+ {
+ size += prev.Size;
+ }
return new Tuple(count, size);
}