001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019package org.apache.commons.compress.archivers.zip; 020 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 025 026import java.io.ByteArrayInputStream; 027import java.io.ByteArrayOutputStream; 028import java.io.EOFException; 029import java.io.FilterInputStream; 030import java.io.IOException; 031import java.io.InputStream; 032import java.io.PushbackInputStream; 033import java.math.BigInteger; 034import java.nio.ByteBuffer; 035import java.util.Arrays; 036import java.util.Objects; 037import java.util.zip.CRC32; 038import java.util.zip.DataFormatException; 039import java.util.zip.Inflater; 040import java.util.zip.ZipEntry; 041import java.util.zip.ZipException; 042 043import org.apache.commons.compress.archivers.ArchiveEntry; 044import org.apache.commons.compress.archivers.ArchiveInputStream; 045import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; 046import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream; 047import org.apache.commons.compress.utils.ArchiveUtils; 048import org.apache.commons.compress.utils.IOUtils; 049import org.apache.commons.compress.utils.InputStreamStatistics; 050 051/** 052 * Implements an input stream that can read Zip archives. 053 * 054 * <p>As of Apache Commons Compress it transparently supports Zip64 055 * extensions and thus individual entries and archives larger than 4 056 * GB or with more than 65536 entries.</p> 057 * 058 * <p>The {@link ZipFile} class is preferred when reading from files 059 * as {@link ZipArchiveInputStream} is limited by not being able to 060 * read the central directory header before returning entries. In 061 * particular {@link ZipArchiveInputStream}</p> 062 * 063 * <ul> 064 * 065 * <li>may return entries that are not part of the central directory 066 * at all and shouldn't be considered part of the archive.</li> 067 * 068 * <li>may return several entries with the same name.</li> 069 * 070 * <li>will not return internal or external attributes.</li> 071 * 072 * <li>may return incomplete extra field data.</li> 073 * 074 * <li>may return unknown sizes and CRC values for entries until the 075 * next entry has been reached if the archive uses the data 076 * descriptor feature.</li> 077 * 078 * </ul> 079 * 080 * @see ZipFile 081 * @NotThreadSafe 082 */ 083public class ZipArchiveInputStream extends ArchiveInputStream implements InputStreamStatistics { 084 085 /** 086 * Bounded input stream adapted from commons-io 087 */ 088 private class BoundedInputStream extends FilterInputStream { 089 090 /** the max length to provide */ 091 private final long max; 092 093 /** the number of bytes already returned */ 094 private long pos; 095 096 /** 097 * Creates a new {@code BoundedInputStream} that wraps the given input 098 * stream and limits it to a certain size. 099 * 100 * @param in The wrapped input stream 101 * @param size The maximum number of bytes to return 102 */ 103 public BoundedInputStream(final InputStream in, final long size) { 104 super(in); 105 this.max = size; 106 } 107 108 @Override 109 public int available() throws IOException { 110 if (max >= 0 && pos >= max) { 111 return 0; 112 } 113 return in.available(); 114 } 115 116 @Override 117 public int read() throws IOException { 118 if (max >= 0 && pos >= max) { 119 return -1; 120 } 121 final int result = in.read(); 122 pos++; 123 count(1); 124 current.bytesReadFromStream++; 125 return result; 126 } 127 128 @Override 129 public int read(final byte[] b) throws IOException { 130 return this.read(b, 0, b.length); 131 } 132 133 @Override 134 public int read(final byte[] b, final int off, final int len) throws IOException { 135 if (len == 0) { 136 return 0; 137 } 138 if (max >= 0 && pos >= max) { 139 return -1; 140 } 141 final long maxRead = max >= 0 ? Math.min(len, max - pos) : len; 142 final int bytesRead = in.read(b, off, (int) maxRead); 143 144 if (bytesRead == -1) { 145 return -1; 146 } 147 148 pos += bytesRead; 149 count(bytesRead); 150 current.bytesReadFromStream += bytesRead; 151 return bytesRead; 152 } 153 154 @Override 155 public long skip(final long n) throws IOException { 156 final long toSkip = max >= 0 ? Math.min(n, max - pos) : n; 157 final long skippedBytes = IOUtils.skip(in, toSkip); 158 pos += skippedBytes; 159 return skippedBytes; 160 } 161 } 162 163 /** 164 * Structure collecting information for the entry that is 165 * currently being read. 166 */ 167 private static final class CurrentEntry { 168 169 /** 170 * Current ZIP entry. 171 */ 172 private final ZipArchiveEntry entry = new ZipArchiveEntry(); 173 174 /** 175 * Does the entry use a data descriptor? 176 */ 177 private boolean hasDataDescriptor; 178 179 /** 180 * Does the entry have a ZIP64 extended information extra field. 181 */ 182 private boolean usesZip64; 183 184 /** 185 * Number of bytes of entry content read by the client if the 186 * entry is STORED. 187 */ 188 private long bytesRead; 189 190 /** 191 * Number of bytes of entry content read from the stream. 192 * 193 * <p>This may be more than the actual entry's length as some 194 * stuff gets buffered up and needs to be pushed back when the 195 * end of the entry has been reached.</p> 196 */ 197 private long bytesReadFromStream; 198 199 /** 200 * The checksum calculated as the current entry is read. 201 */ 202 private final CRC32 crc = new CRC32(); 203 204 /** 205 * The input stream decompressing the data for shrunk and imploded entries. 206 */ 207 private InputStream inputStream; 208 209 @SuppressWarnings("unchecked") // Caller beware 210 private <T extends InputStream> T checkInputStream() { 211 return (T) Objects.requireNonNull(inputStream, "inputStream"); 212 } 213 } 214 215 private static final int LFH_LEN = 30; 216 /* 217 local file header signature WORD 218 version needed to extract SHORT 219 general purpose bit flag SHORT 220 compression method SHORT 221 last mod file time SHORT 222 last mod file date SHORT 223 crc-32 WORD 224 compressed size WORD 225 uncompressed size WORD 226 file name length SHORT 227 extra field length SHORT 228 */ 229 230 private static final int CFH_LEN = 46; 231 /* 232 central file header signature WORD 233 version made by SHORT 234 version needed to extract SHORT 235 general purpose bit flag SHORT 236 compression method SHORT 237 last mod file time SHORT 238 last mod file date SHORT 239 crc-32 WORD 240 compressed size WORD 241 uncompressed size WORD 242 file name length SHORT 243 extra field length SHORT 244 file comment length SHORT 245 disk number start SHORT 246 internal file attributes SHORT 247 external file attributes WORD 248 relative offset of local header WORD 249 */ 250 251 private static final long TWO_EXP_32 = ZIP64_MAGIC + 1; 252 253 private static final String USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER = 254 " while reading a stored entry using data descriptor. Either the archive is broken" 255 + " or it can not be read using ZipArchiveInputStream and you must use ZipFile." 256 + " A common cause for this is a ZIP archive containing a ZIP archive." 257 + " See http://commons.apache.org/proper/commons-compress/zip.html#ZipArchiveInputStream_vs_ZipFile"; 258 259 private static final byte[] LFH = ZipLong.LFH_SIG.getBytes(); 260 261 private static final byte[] CFH = ZipLong.CFH_SIG.getBytes(); 262 263 private static final byte[] DD = ZipLong.DD_SIG.getBytes(); 264 265 private static final byte[] APK_SIGNING_BLOCK_MAGIC = { 266 'A', 'P', 'K', ' ', 'S', 'i', 'g', ' ', 'B', 'l', 'o', 'c', 'k', ' ', '4', '2', 267 }; 268 269 private static final BigInteger LONG_MAX = BigInteger.valueOf(Long.MAX_VALUE); 270 271 private static boolean checksig(final byte[] signature, final byte[] expected) { 272 for (int i = 0; i < expected.length; i++) { 273 if (signature[i] != expected[i]) { 274 return false; 275 } 276 } 277 return true; 278 } 279 280 /** 281 * Checks if the signature matches what is expected for a ZIP file. 282 * Does not currently handle self-extracting ZIPs which may have arbitrary 283 * leading content. 284 * 285 * @param signature the bytes to check 286 * @param length the number of bytes to check 287 * @return true, if this stream is a ZIP archive stream, false otherwise 288 */ 289 public static boolean matches(final byte[] signature, final int length) { 290 if (length < ZipArchiveOutputStream.LFH_SIG.length) { 291 return false; 292 } 293 294 return checksig(signature, ZipArchiveOutputStream.LFH_SIG) // normal file 295 || checksig(signature, ZipArchiveOutputStream.EOCD_SIG) // empty zip 296 || checksig(signature, ZipArchiveOutputStream.DD_SIG) // split zip 297 || checksig(signature, ZipLong.SINGLE_SEGMENT_SPLIT_MARKER.getBytes()); 298 } 299 300 /** The ZIP encoding to use for file names and the file comment. */ 301 private final ZipEncoding zipEncoding; 302 303 // the provided encoding (for unit tests) 304 final String encoding; 305 306 /** Whether to look for and use Unicode extra fields. */ 307 private final boolean useUnicodeExtraFields; 308 309 /** Wrapped stream, will always be a PushbackInputStream. */ 310 private final InputStream inputStream; 311 /** Inflater used for all deflated entries. */ 312 private final Inflater inf = new Inflater(true); 313 /** Buffer used to read from the wrapped stream. */ 314 private final ByteBuffer buf = ByteBuffer.allocate(ZipArchiveOutputStream.BUFFER_SIZE); 315 /** The entry that is currently being read. */ 316 private CurrentEntry current; 317 /** Whether the stream has been closed. */ 318 private boolean closed; 319 320 /** Whether the stream has reached the central directory - and thus found all entries. */ 321 private boolean hitCentralDirectory; 322 323 /** 324 * When reading a stored entry that uses the data descriptor this 325 * stream has to read the full entry and caches it. This is the 326 * cache. 327 */ 328 private ByteArrayInputStream lastStoredEntry; 329 330 /** 331 * Whether the stream will try to read STORED entries that use a data descriptor. 332 * Setting it to true means we will not stop reading an entry with the compressed 333 * size, instead we will stop reading an entry when a data descriptor is met (by 334 * finding the Data Descriptor Signature). This will completely break down in some 335 * cases - like JARs in WARs. 336 * <p> 337 * See also : 338 * https://issues.apache.org/jira/projects/COMPRESS/issues/COMPRESS-555 339 * https://github.com/apache/commons-compress/pull/137#issuecomment-690835644 340 */ 341 private final boolean allowStoredEntriesWithDataDescriptor; 342 343 /** Count decompressed bytes for current entry */ 344 private long uncompressedCount; 345 346 /** Whether the stream will try to skip the ZIP split signature(08074B50) at the beginning **/ 347 private final boolean skipSplitSig; 348 349 // cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection) 350 private final byte[] lfhBuf = new byte[LFH_LEN]; 351 352 private final byte[] skipBuf = new byte[1024]; 353 354 private final byte[] shortBuf = new byte[SHORT]; 355 356 private final byte[] wordBuf = new byte[WORD]; 357 358 private final byte[] twoDwordBuf = new byte[2 * DWORD]; 359 360 private int entriesRead; 361 362 /** 363 * Create an instance using UTF-8 encoding 364 * @param inputStream the stream to wrap 365 */ 366 public ZipArchiveInputStream(final InputStream inputStream) { 367 this(inputStream, ZipEncodingHelper.UTF8); 368 } 369 370 /** 371 * Create an instance using the specified encoding 372 * @param inputStream the stream to wrap 373 * @param encoding the encoding to use for file names, use null 374 * for the platform's default encoding 375 * @since 1.5 376 */ 377 public ZipArchiveInputStream(final InputStream inputStream, final String encoding) { 378 this(inputStream, encoding, true); 379 } 380 381 /** 382 * Create an instance using the specified encoding 383 * @param inputStream the stream to wrap 384 * @param encoding the encoding to use for file names, use null 385 * for the platform's default encoding 386 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 387 * Extra Fields (if present) to set the file names. 388 */ 389 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields) { 390 this(inputStream, encoding, useUnicodeExtraFields, false); 391 } 392 393 /** 394 * Create an instance using the specified encoding 395 * @param inputStream the stream to wrap 396 * @param encoding the encoding to use for file names, use null 397 * for the platform's default encoding 398 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 399 * Extra Fields (if present) to set the file names. 400 * @param allowStoredEntriesWithDataDescriptor whether the stream 401 * will try to read STORED entries that use a data descriptor 402 * @since 1.1 403 */ 404 public ZipArchiveInputStream(final InputStream inputStream, 405 final String encoding, 406 final boolean useUnicodeExtraFields, 407 final boolean allowStoredEntriesWithDataDescriptor) { 408 this(inputStream, encoding, useUnicodeExtraFields, allowStoredEntriesWithDataDescriptor, false); 409 } 410 411 /** 412 * Create an instance using the specified encoding 413 * @param inputStream the stream to wrap 414 * @param encoding the encoding to use for file names, use null 415 * for the platform's default encoding 416 * @param useUnicodeExtraFields whether to use InfoZIP Unicode 417 * Extra Fields (if present) to set the file names. 418 * @param allowStoredEntriesWithDataDescriptor whether the stream 419 * will try to read STORED entries that use a data descriptor 420 * @param skipSplitSig Whether the stream will try to skip the zip 421 * split signature(08074B50) at the beginning. You will need to 422 * set this to true if you want to read a split archive. 423 * @since 1.20 424 */ 425 public ZipArchiveInputStream(final InputStream inputStream, 426 final String encoding, 427 final boolean useUnicodeExtraFields, 428 final boolean allowStoredEntriesWithDataDescriptor, 429 final boolean skipSplitSig) { 430 this.encoding = encoding; 431 zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 432 this.useUnicodeExtraFields = useUnicodeExtraFields; 433 this.inputStream = new PushbackInputStream(inputStream, buf.capacity()); 434 this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; 435 this.skipSplitSig = skipSplitSig; 436 // haven't read anything so far 437 buf.limit(0); 438 } 439 440 /** 441 * Checks whether the current buffer contains the signature of a 442 * "data descriptor", "local file header" or 443 * "central directory entry". 444 * 445 * <p>If it contains such a signature, reads the data descriptor 446 * and positions the stream right after the data descriptor.</p> 447 */ 448 private boolean bufferContainsSignature(final ByteArrayOutputStream bos, final int offset, final int lastRead, final int expectedDDLen) 449 throws IOException { 450 451 boolean done = false; 452 for (int i = 0; !done && i < offset + lastRead - 4; i++) { 453 if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) { 454 int expectDDPos = i; 455 if (i >= expectedDDLen && 456 (buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3]) 457 || (buf.array()[i + 2] == CFH[2] && buf.array()[i + 3] == CFH[3])) { 458 // found an LFH or CFH: 459 expectDDPos = i - expectedDDLen; 460 done = true; 461 } 462 else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) { 463 // found DD: 464 done = true; 465 } 466 if (done) { 467 // * push back bytes read in excess as well as the data 468 // descriptor 469 // * copy the remaining bytes to cache 470 // * read data descriptor 471 pushback(buf.array(), expectDDPos, offset + lastRead - expectDDPos); 472 bos.write(buf.array(), 0, expectDDPos); 473 readDataDescriptor(); 474 } 475 } 476 } 477 return done; 478 } 479 480 /** 481 * If the last read bytes could hold a data descriptor and an 482 * incomplete signature then save the last bytes to the front of 483 * the buffer and cache everything in front of the potential data 484 * descriptor into the given ByteArrayOutputStream. 485 * 486 * <p>Data descriptor plus incomplete signature (3 bytes in the 487 * worst case) can be 20 bytes max.</p> 488 */ 489 private int cacheBytesRead(final ByteArrayOutputStream bos, int offset, final int lastRead, final int expectedDDLen) { 490 final int cacheable = offset + lastRead - expectedDDLen - 3; 491 if (cacheable > 0) { 492 bos.write(buf.array(), 0, cacheable); 493 System.arraycopy(buf.array(), cacheable, buf.array(), 0, expectedDDLen + 3); 494 offset = expectedDDLen + 3; 495 } else { 496 offset += lastRead; 497 } 498 return offset; 499 } 500 501 /** 502 * Whether this class is able to read the given entry. 503 * 504 * <p>May return false if it is set up to use encryption or a 505 * compression method that hasn't been implemented yet.</p> 506 * @since 1.1 507 */ 508 @Override 509 public boolean canReadEntryData(final ArchiveEntry ae) { 510 if (ae instanceof ZipArchiveEntry) { 511 final ZipArchiveEntry ze = (ZipArchiveEntry) ae; 512 return ZipUtil.canHandleEntryData(ze) 513 && supportsDataDescriptorFor(ze) 514 && supportsCompressedSizeFor(ze); 515 } 516 return false; 517 } 518 519 @Override 520 public void close() throws IOException { 521 if (!closed) { 522 closed = true; 523 try { 524 inputStream.close(); 525 } finally { 526 inf.end(); 527 } 528 } 529 } 530 531 /** 532 * Closes the current ZIP archive entry and positions the underlying 533 * stream to the beginning of the next entry. All per-entry variables 534 * and data structures are cleared. 535 * <p> 536 * If the compressed size of this entry is included in the entry header, 537 * then any outstanding bytes are simply skipped from the underlying 538 * stream without uncompressing them. This allows an entry to be safely 539 * closed even if the compression method is unsupported. 540 * <p> 541 * In case we don't know the compressed size of this entry or have 542 * already buffered too much data from the underlying stream to support 543 * uncompression, then the uncompression process is completed and the 544 * end position of the stream is adjusted based on the result of that 545 * process. 546 * 547 * @throws IOException if an error occurs 548 */ 549 private void closeEntry() throws IOException { 550 if (closed) { 551 throw new IOException("The stream is closed"); 552 } 553 if (current == null) { 554 return; 555 } 556 557 // Ensure all entry bytes are read 558 if (currentEntryHasOutstandingBytes()) { 559 drainCurrentEntryData(); 560 } else { 561 // this is guaranteed to exhaust the stream 562 skip(Long.MAX_VALUE); //NOSONAR 563 564 final long inB = current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED 565 ? getBytesInflated() : current.bytesRead; 566 567 // this is at most a single read() operation and can't 568 // exceed the range of int 569 final int diff = (int) (current.bytesReadFromStream - inB); 570 571 // Pushback any required bytes 572 if (diff > 0) { 573 pushback(buf.array(), buf.limit() - diff, diff); 574 current.bytesReadFromStream -= diff; 575 } 576 577 // Drain remainder of entry if not all data bytes were required 578 if (currentEntryHasOutstandingBytes()) { 579 drainCurrentEntryData(); 580 } 581 } 582 583 if (lastStoredEntry == null && current.hasDataDescriptor) { 584 readDataDescriptor(); 585 } 586 587 inf.reset(); 588 buf.clear().flip(); 589 current = null; 590 lastStoredEntry = null; 591 } 592 593 /** 594 * If the compressed size of the current entry is included in the entry header 595 * and there are any outstanding bytes in the underlying stream, then 596 * this returns true. 597 * 598 * @return true, if current entry is determined to have outstanding bytes, false otherwise 599 */ 600 private boolean currentEntryHasOutstandingBytes() { 601 return current.bytesReadFromStream <= current.entry.getCompressedSize() 602 && !current.hasDataDescriptor; 603 } 604 605 /** 606 * Read all data of the current entry from the underlying stream 607 * that hasn't been read, yet. 608 */ 609 private void drainCurrentEntryData() throws IOException { 610 long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream; 611 while (remaining > 0) { 612 final long n = inputStream.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining)); 613 if (n < 0) { 614 throw new EOFException("Truncated ZIP entry: " 615 + ArchiveUtils.sanitize(current.entry.getName())); 616 } 617 count(n); 618 remaining -= n; 619 } 620 } 621 622 private int fill() throws IOException { 623 if (closed) { 624 throw new IOException("The stream is closed"); 625 } 626 final int length = inputStream.read(buf.array()); 627 if (length > 0) { 628 buf.limit(length); 629 count(buf.limit()); 630 inf.setInput(buf.array(), 0, buf.limit()); 631 } 632 return length; 633 } 634 635 /** 636 * Reads forward until the signature of the "End of central 637 * directory" record is found. 638 */ 639 private boolean findEocdRecord() throws IOException { 640 int currentByte = -1; 641 boolean skipReadCall = false; 642 while (skipReadCall || (currentByte = readOneByte()) > -1) { 643 skipReadCall = false; 644 if (!isFirstByteOfEocdSig(currentByte)) { 645 continue; 646 } 647 currentByte = readOneByte(); 648 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) { 649 if (currentByte == -1) { 650 break; 651 } 652 skipReadCall = isFirstByteOfEocdSig(currentByte); 653 continue; 654 } 655 currentByte = readOneByte(); 656 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) { 657 if (currentByte == -1) { 658 break; 659 } 660 skipReadCall = isFirstByteOfEocdSig(currentByte); 661 continue; 662 } 663 currentByte = readOneByte(); 664 if (currentByte == -1) { 665 break; 666 } 667 if (currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) { 668 return true; 669 } 670 skipReadCall = isFirstByteOfEocdSig(currentByte); 671 } 672 return false; 673 } 674 675 /** 676 * Get the number of bytes Inflater has actually processed. 677 * 678 * <p>for Java < Java7 the getBytes* methods in 679 * Inflater/Deflater seem to return unsigned ints rather than 680 * longs that start over with 0 at 2^32.</p> 681 * 682 * <p>The stream knows how many bytes it has read, but not how 683 * many the Inflater actually consumed - it should be between the 684 * total number of bytes read for the entry and the total number 685 * minus the last read operation. Here we just try to make the 686 * value close enough to the bytes we've read by assuming the 687 * number of bytes consumed must be smaller than (or equal to) the 688 * number of bytes read but not smaller by more than 2^32.</p> 689 */ 690 private long getBytesInflated() { 691 long inB = inf.getBytesRead(); 692 if (current.bytesReadFromStream >= TWO_EXP_32) { 693 while (inB + TWO_EXP_32 <= current.bytesReadFromStream) { 694 inB += TWO_EXP_32; 695 } 696 } 697 return inB; 698 } 699 700 /** 701 * @since 1.17 702 */ 703 @SuppressWarnings("resource") // checkInputStream() does not allocate. 704 @Override 705 public long getCompressedCount() { 706 final int method = current.entry.getMethod(); 707 if (method == ZipArchiveOutputStream.STORED) { 708 return current.bytesRead; 709 } 710 if (method == ZipArchiveOutputStream.DEFLATED) { 711 return getBytesInflated(); 712 } 713 if (method == ZipMethod.UNSHRINKING.getCode() 714 || method == ZipMethod.IMPLODING.getCode() 715 || method == ZipMethod.ENHANCED_DEFLATED.getCode() 716 || method == ZipMethod.BZIP2.getCode()) { 717 return ((InputStreamStatistics) current.checkInputStream()).getCompressedCount(); 718 } 719 return -1; 720 } 721 722 @Override 723 public ArchiveEntry getNextEntry() throws IOException { 724 return getNextZipEntry(); 725 } 726 727 public ZipArchiveEntry getNextZipEntry() throws IOException { 728 uncompressedCount = 0; 729 730 boolean firstEntry = true; 731 if (closed || hitCentralDirectory) { 732 return null; 733 } 734 if (current != null) { 735 closeEntry(); 736 firstEntry = false; 737 } 738 739 final long currentHeaderOffset = getBytesRead(); 740 try { 741 if (firstEntry) { 742 // split archives have a special signature before the 743 // first local file header - look for it and fail with 744 // the appropriate error message if this is a split 745 // archive. 746 readFirstLocalFileHeader(); 747 } else { 748 readFully(lfhBuf); 749 } 750 } catch (final EOFException e) { //NOSONAR 751 return null; 752 } 753 754 final ZipLong sig = new ZipLong(lfhBuf); 755 if (!sig.equals(ZipLong.LFH_SIG)) { 756 if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG) || isApkSigningBlock(lfhBuf)) { 757 hitCentralDirectory = true; 758 skipRemainderOfArchive(); 759 return null; 760 } 761 throw new ZipException(String.format("Unexpected record signature: 0x%x", sig.getValue())); 762 } 763 764 int off = WORD; 765 current = new CurrentEntry(); 766 767 final int versionMadeBy = ZipShort.getValue(lfhBuf, off); 768 off += SHORT; 769 current.entry.setPlatform((versionMadeBy >> ZipFile.BYTE_SHIFT) & ZipFile.NIBLET_MASK); 770 771 final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(lfhBuf, off); 772 final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames(); 773 final ZipEncoding entryEncoding = hasUTF8Flag ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 774 current.hasDataDescriptor = gpFlag.usesDataDescriptor(); 775 current.entry.setGeneralPurposeBit(gpFlag); 776 777 off += SHORT; 778 779 current.entry.setMethod(ZipShort.getValue(lfhBuf, off)); 780 off += SHORT; 781 782 final long time = ZipUtil.dosToJavaTime(ZipLong.getValue(lfhBuf, off)); 783 current.entry.setTime(time); 784 off += WORD; 785 786 ZipLong size = null, cSize = null; 787 if (!current.hasDataDescriptor) { 788 current.entry.setCrc(ZipLong.getValue(lfhBuf, off)); 789 off += WORD; 790 791 cSize = new ZipLong(lfhBuf, off); 792 off += WORD; 793 794 size = new ZipLong(lfhBuf, off); 795 off += WORD; 796 } else { 797 off += 3 * WORD; 798 } 799 800 final int fileNameLen = ZipShort.getValue(lfhBuf, off); 801 802 off += SHORT; 803 804 final int extraLen = ZipShort.getValue(lfhBuf, off); 805 off += SHORT; // NOSONAR - assignment as documentation 806 807 final byte[] fileName = readRange(fileNameLen); 808 current.entry.setName(entryEncoding.decode(fileName), fileName); 809 if (hasUTF8Flag) { 810 current.entry.setNameSource(ZipArchiveEntry.NameSource.NAME_WITH_EFS_FLAG); 811 } 812 813 final byte[] extraData = readRange(extraLen); 814 try { 815 current.entry.setExtra(extraData); 816 } catch (final RuntimeException ex) { 817 final ZipException z = new ZipException("Invalid extra data in entry " + current.entry.getName()); 818 z.initCause(ex); 819 throw z; 820 } 821 822 if (!hasUTF8Flag && useUnicodeExtraFields) { 823 ZipUtil.setNameAndCommentFromExtraFields(current.entry, fileName, null); 824 } 825 826 processZip64Extra(size, cSize); 827 828 current.entry.setLocalHeaderOffset(currentHeaderOffset); 829 current.entry.setDataOffset(getBytesRead()); 830 current.entry.setStreamContiguous(true); 831 832 final ZipMethod m = ZipMethod.getMethodByCode(current.entry.getMethod()); 833 if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) { 834 if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) { 835 final InputStream bis = new BoundedInputStream(inputStream, current.entry.getCompressedSize()); 836 switch (m) { 837 case UNSHRINKING: 838 current.inputStream = new UnshrinkingInputStream(bis); 839 break; 840 case IMPLODING: 841 try { 842 current.inputStream = new ExplodingInputStream( 843 current.entry.getGeneralPurposeBit().getSlidingDictionarySize(), 844 current.entry.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), 845 bis); 846 } catch (final IllegalArgumentException ex) { 847 throw new IOException("bad IMPLODE data", ex); 848 } 849 break; 850 case BZIP2: 851 current.inputStream = new BZip2CompressorInputStream(bis); 852 break; 853 case ENHANCED_DEFLATED: 854 current.inputStream = new Deflate64CompressorInputStream(bis); 855 break; 856 default: 857 // we should never get here as all supported methods have been covered 858 // will cause an error when read is invoked, don't throw an exception here so people can 859 // skip unsupported entries 860 break; 861 } 862 } 863 } else if (m == ZipMethod.ENHANCED_DEFLATED) { 864 current.inputStream = new Deflate64CompressorInputStream(inputStream); 865 } 866 867 entriesRead++; 868 return current.entry; 869 } 870 871 /** 872 * @since 1.17 873 */ 874 @Override 875 public long getUncompressedCount() { 876 return uncompressedCount; 877 } 878 879 /** 880 * Checks whether this might be an APK Signing Block. 881 * 882 * <p>Unfortunately the APK signing block does not start with some kind of signature, it rather ends with one. It 883 * starts with a length, so what we do is parse the suspect length, skip ahead far enough, look for the signature 884 * and if we've found it, return true.</p> 885 * 886 * @param suspectLocalFileHeader the bytes read from the underlying stream in the expectation that they would hold 887 * the local file header of the next entry. 888 * 889 * @return true if this looks like an APK signing block 890 * 891 * @see <a href="https://source.android.com/security/apksigning/v2">https://source.android.com/security/apksigning/v2</a> 892 */ 893 private boolean isApkSigningBlock(final byte[] suspectLocalFileHeader) throws IOException { 894 // length of block excluding the size field itself 895 final BigInteger len = ZipEightByteInteger.getValue(suspectLocalFileHeader); 896 // LFH has already been read and all but the first eight bytes contain (part of) the APK signing block, 897 // also subtract 16 bytes in order to position us at the magic string 898 BigInteger toSkip = len.add(BigInteger.valueOf(DWORD - suspectLocalFileHeader.length 899 - (long) APK_SIGNING_BLOCK_MAGIC.length)); 900 final byte[] magic = new byte[APK_SIGNING_BLOCK_MAGIC.length]; 901 902 try { 903 if (toSkip.signum() < 0) { 904 // suspectLocalFileHeader contains the start of suspect magic string 905 final int off = suspectLocalFileHeader.length + toSkip.intValue(); 906 // length was shorter than magic length 907 if (off < DWORD) { 908 return false; 909 } 910 final int bytesInBuffer = Math.abs(toSkip.intValue()); 911 System.arraycopy(suspectLocalFileHeader, off, magic, 0, Math.min(bytesInBuffer, magic.length)); 912 if (bytesInBuffer < magic.length) { 913 readFully(magic, bytesInBuffer); 914 } 915 } else { 916 while (toSkip.compareTo(LONG_MAX) > 0) { 917 realSkip(Long.MAX_VALUE); 918 toSkip = toSkip.add(LONG_MAX.negate()); 919 } 920 realSkip(toSkip.longValue()); 921 readFully(magic); 922 } 923 } catch (final EOFException ex) { //NOSONAR 924 // length was invalid 925 return false; 926 } 927 return Arrays.equals(magic, APK_SIGNING_BLOCK_MAGIC); 928 } 929 930 private boolean isFirstByteOfEocdSig(final int b) { 931 return b == ZipArchiveOutputStream.EOCD_SIG[0]; 932 } 933 934 /** 935 * Records whether a Zip64 extra is present and sets the size 936 * information from it if sizes are 0xFFFFFFFF and the entry 937 * doesn't use a data descriptor. 938 */ 939 private void processZip64Extra(final ZipLong size, final ZipLong cSize) throws ZipException { 940 final ZipExtraField extra = 941 current.entry.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 942 if (extra != null && !(extra instanceof Zip64ExtendedInformationExtraField)) { 943 throw new ZipException("archive contains unparseable zip64 extra field"); 944 } 945 final Zip64ExtendedInformationExtraField z64 = 946 (Zip64ExtendedInformationExtraField) extra; 947 current.usesZip64 = z64 != null; 948 if (!current.hasDataDescriptor) { 949 if (z64 != null // same as current.usesZip64 but avoids NPE warning 950 && (ZipLong.ZIP64_MAGIC.equals(cSize) || ZipLong.ZIP64_MAGIC.equals(size)) ) { 951 if (z64.getCompressedSize() == null || z64.getSize() == null) { 952 // avoid NPE if it's a corrupted ZIP archive 953 throw new ZipException("archive contains corrupted zip64 extra field"); 954 } 955 long s = z64.getCompressedSize().getLongValue(); 956 if (s < 0) { 957 throw new ZipException("broken archive, entry with negative compressed size"); 958 } 959 current.entry.setCompressedSize(s); 960 s = z64.getSize().getLongValue(); 961 if (s < 0) { 962 throw new ZipException("broken archive, entry with negative size"); 963 } 964 current.entry.setSize(s); 965 } else if (cSize != null && size != null) { 966 if (cSize.getValue() < 0) { 967 throw new ZipException("broken archive, entry with negative compressed size"); 968 } 969 current.entry.setCompressedSize(cSize.getValue()); 970 if (size.getValue() < 0) { 971 throw new ZipException("broken archive, entry with negative size"); 972 } 973 current.entry.setSize(size.getValue()); 974 } 975 } 976 } 977 978 private void pushback(final byte[] buf, final int offset, final int length) throws IOException { 979 ((PushbackInputStream) inputStream).unread(buf, offset, length); 980 pushedBackBytes(length); 981 } 982 983 @Override 984 public int read(final byte[] buffer, final int offset, final int length) throws IOException { 985 if (length == 0) { 986 return 0; 987 } 988 if (closed) { 989 throw new IOException("The stream is closed"); 990 } 991 992 if (current == null) { 993 return -1; 994 } 995 996 // avoid int overflow, check null buffer 997 if (offset > buffer.length || length < 0 || offset < 0 || buffer.length - offset < length) { 998 throw new ArrayIndexOutOfBoundsException(); 999 } 1000 1001 ZipUtil.checkRequestedFeatures(current.entry); 1002 if (!supportsDataDescriptorFor(current.entry)) { 1003 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.DATA_DESCRIPTOR, 1004 current.entry); 1005 } 1006 if (!supportsCompressedSizeFor(current.entry)) { 1007 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.UNKNOWN_COMPRESSED_SIZE, 1008 current.entry); 1009 } 1010 1011 final int read; 1012 if (current.entry.getMethod() == ZipArchiveOutputStream.STORED) { 1013 read = readStored(buffer, offset, length); 1014 } else if (current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED) { 1015 read = readDeflated(buffer, offset, length); 1016 } else if (current.entry.getMethod() == ZipMethod.UNSHRINKING.getCode() 1017 || current.entry.getMethod() == ZipMethod.IMPLODING.getCode() 1018 || current.entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1019 || current.entry.getMethod() == ZipMethod.BZIP2.getCode()) { 1020 read = current.inputStream.read(buffer, offset, length); 1021 } else { 1022 throw new UnsupportedZipFeatureException(ZipMethod.getMethodByCode(current.entry.getMethod()), 1023 current.entry); 1024 } 1025 1026 if (read >= 0) { 1027 current.crc.update(buffer, offset, read); 1028 uncompressedCount += read; 1029 } 1030 1031 return read; 1032 } 1033 private void readDataDescriptor() throws IOException { 1034 readFully(wordBuf); 1035 ZipLong val = new ZipLong(wordBuf); 1036 if (ZipLong.DD_SIG.equals(val)) { 1037 // data descriptor with signature, skip sig 1038 readFully(wordBuf); 1039 val = new ZipLong(wordBuf); 1040 } 1041 current.entry.setCrc(val.getValue()); 1042 1043 // if there is a ZIP64 extra field, sizes are eight bytes 1044 // each, otherwise four bytes each. Unfortunately some 1045 // implementations - namely Java7 - use eight bytes without 1046 // using a ZIP64 extra field - 1047 // https://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588 1048 1049 // just read 16 bytes and check whether bytes nine to twelve 1050 // look like one of the signatures of what could follow a data 1051 // descriptor (ignoring archive decryption headers for now). 1052 // If so, push back eight bytes and assume sizes are four 1053 // bytes, otherwise sizes are eight bytes each. 1054 readFully(twoDwordBuf); 1055 final ZipLong potentialSig = new ZipLong(twoDwordBuf, DWORD); 1056 if (potentialSig.equals(ZipLong.CFH_SIG) || potentialSig.equals(ZipLong.LFH_SIG)) { 1057 pushback(twoDwordBuf, DWORD, DWORD); 1058 long size = ZipLong.getValue(twoDwordBuf); 1059 if (size < 0) { 1060 throw new ZipException("broken archive, entry with negative compressed size"); 1061 } 1062 current.entry.setCompressedSize(size); 1063 size = ZipLong.getValue(twoDwordBuf, WORD); 1064 if (size < 0) { 1065 throw new ZipException("broken archive, entry with negative size"); 1066 } 1067 current.entry.setSize(size); 1068 } else { 1069 long size = ZipEightByteInteger.getLongValue(twoDwordBuf); 1070 if (size < 0) { 1071 throw new ZipException("broken archive, entry with negative compressed size"); 1072 } 1073 current.entry.setCompressedSize(size); 1074 size = ZipEightByteInteger.getLongValue(twoDwordBuf, DWORD); 1075 if (size < 0) { 1076 throw new ZipException("broken archive, entry with negative size"); 1077 } 1078 current.entry.setSize(size); 1079 } 1080 } 1081 /** 1082 * Implementation of read for DEFLATED entries. 1083 */ 1084 private int readDeflated(final byte[] buffer, final int offset, final int length) throws IOException { 1085 final int read = readFromInflater(buffer, offset, length); 1086 if (read <= 0) { 1087 if (inf.finished()) { 1088 return -1; 1089 } 1090 if (inf.needsDictionary()) { 1091 throw new ZipException("This archive needs a preset dictionary" 1092 + " which is not supported by Commons" 1093 + " Compress."); 1094 } 1095 if (read == -1) { 1096 throw new IOException("Truncated ZIP file"); 1097 } 1098 } 1099 return read; 1100 } 1101 1102 /** 1103 * Fills the given array with the first local file header and 1104 * deals with splitting/spanning markers that may prefix the first 1105 * LFH. 1106 */ 1107 private void readFirstLocalFileHeader() throws IOException { 1108 readFully(lfhBuf); 1109 final ZipLong sig = new ZipLong(lfhBuf); 1110 1111 if (!skipSplitSig && sig.equals(ZipLong.DD_SIG)) { 1112 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.SPLITTING); 1113 } 1114 1115 // the split ZIP signature(08074B50) should only be skipped when the skipSplitSig is set 1116 if (sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER) || sig.equals(ZipLong.DD_SIG)) { 1117 // Just skip over the marker. 1118 final byte[] missedLfhBytes = new byte[4]; 1119 readFully(missedLfhBytes); 1120 System.arraycopy(lfhBuf, 4, lfhBuf, 0, LFH_LEN - 4); 1121 System.arraycopy(missedLfhBytes, 0, lfhBuf, LFH_LEN - 4, 4); 1122 } 1123 } 1124 1125 /** 1126 * Potentially reads more bytes to fill the inflater's buffer and 1127 * reads from it. 1128 */ 1129 private int readFromInflater(final byte[] buffer, final int offset, final int length) throws IOException { 1130 int read = 0; 1131 do { 1132 if (inf.needsInput()) { 1133 final int l = fill(); 1134 if (l > 0) { 1135 current.bytesReadFromStream += buf.limit(); 1136 } else if (l == -1) { 1137 return -1; 1138 } else { 1139 break; 1140 } 1141 } 1142 try { 1143 read = inf.inflate(buffer, offset, length); 1144 } catch (final DataFormatException e) { 1145 throw (IOException) new ZipException(e.getMessage()).initCause(e); 1146 } 1147 } while (read == 0 && inf.needsInput()); 1148 return read; 1149 } 1150 1151 private void readFully(final byte[] b) throws IOException { 1152 readFully(b, 0); 1153 } 1154 1155 // End of Central Directory Record 1156 // end of central dir signature WORD 1157 // number of this disk SHORT 1158 // number of the disk with the 1159 // start of the central directory SHORT 1160 // total number of entries in the 1161 // central directory on this disk SHORT 1162 // total number of entries in 1163 // the central directory SHORT 1164 // size of the central directory WORD 1165 // offset of start of central 1166 // directory with respect to 1167 // the starting disk number WORD 1168 // .ZIP file comment length SHORT 1169 // .ZIP file comment up to 64KB 1170 // 1171 1172 private void readFully(final byte[] b, final int off) throws IOException { 1173 final int len = b.length - off; 1174 final int count = IOUtils.readFully(inputStream, b, off, len); 1175 count(count); 1176 if (count < len) { 1177 throw new EOFException(); 1178 } 1179 } 1180 1181 /** 1182 * Reads bytes by reading from the underlying stream rather than 1183 * the (potentially inflating) archive stream - which {@link #read} would do. 1184 * 1185 * Also updates bytes-read counter. 1186 */ 1187 private int readOneByte() throws IOException { 1188 final int b = inputStream.read(); 1189 if (b != -1) { 1190 count(1); 1191 } 1192 return b; 1193 } 1194 1195 private byte[] readRange(final int len) throws IOException { 1196 final byte[] ret = IOUtils.readRange(inputStream, len); 1197 count(ret.length); 1198 if (ret.length < len) { 1199 throw new EOFException(); 1200 } 1201 return ret; 1202 } 1203 1204 /** 1205 * Implementation of read for STORED entries. 1206 */ 1207 private int readStored(final byte[] buffer, final int offset, final int length) throws IOException { 1208 1209 if (current.hasDataDescriptor) { 1210 if (lastStoredEntry == null) { 1211 readStoredEntry(); 1212 } 1213 return lastStoredEntry.read(buffer, offset, length); 1214 } 1215 1216 final long csize = current.entry.getSize(); 1217 if (current.bytesRead >= csize) { 1218 return -1; 1219 } 1220 1221 if (buf.position() >= buf.limit()) { 1222 buf.position(0); 1223 final int l = inputStream.read(buf.array()); 1224 if (l == -1) { 1225 buf.limit(0); 1226 throw new IOException("Truncated ZIP file"); 1227 } 1228 buf.limit(l); 1229 1230 count(l); 1231 current.bytesReadFromStream += l; 1232 } 1233 1234 int toRead = Math.min(buf.remaining(), length); 1235 if ((csize - current.bytesRead) < toRead) { 1236 // if it is smaller than toRead then it fits into an int 1237 toRead = (int) (csize - current.bytesRead); 1238 } 1239 buf.get(buffer, offset, toRead); 1240 current.bytesRead += toRead; 1241 return toRead; 1242 } 1243 1244 /** 1245 * Caches a stored entry that uses the data descriptor. 1246 * 1247 * <ul> 1248 * <li>Reads a stored entry until the signature of a local file 1249 * header, central directory header or data descriptor has been 1250 * found.</li> 1251 * <li>Stores all entry data in lastStoredEntry.</p> 1252 * <li>Rewinds the stream to position at the data 1253 * descriptor.</li> 1254 * <li>reads the data descriptor</li> 1255 * </ul> 1256 * 1257 * <p>After calling this method the entry should know its size, 1258 * the entry's data is cached and the stream is positioned at the 1259 * next local file or central directory header.</p> 1260 */ 1261 private void readStoredEntry() throws IOException { 1262 final ByteArrayOutputStream bos = new ByteArrayOutputStream(); 1263 int off = 0; 1264 boolean done = false; 1265 1266 // length of DD without signature 1267 final int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD; 1268 1269 while (!done) { 1270 final int r = inputStream.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off); 1271 if (r <= 0) { 1272 // read the whole archive without ever finding a 1273 // central directory 1274 throw new IOException("Truncated ZIP file"); 1275 } 1276 if (r + off < 4) { 1277 // buffer too small to check for a signature, loop 1278 off += r; 1279 continue; 1280 } 1281 1282 done = bufferContainsSignature(bos, off, r, ddLen); 1283 if (!done) { 1284 off = cacheBytesRead(bos, off, r, ddLen); 1285 } 1286 } 1287 if (current.entry.getCompressedSize() != current.entry.getSize()) { 1288 throw new ZipException("compressed and uncompressed size don't match" 1289 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1290 } 1291 final byte[] b = bos.toByteArray(); 1292 if (b.length != current.entry.getSize()) { 1293 throw new ZipException("actual and claimed size don't match" 1294 + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1295 } 1296 lastStoredEntry = new ByteArrayInputStream(b); 1297 } 1298 1299 /** 1300 * Skips bytes by reading from the underlying stream rather than 1301 * the (potentially inflating) archive stream - which {@link 1302 * #skip} would do. 1303 * 1304 * Also updates bytes-read counter. 1305 */ 1306 private void realSkip(final long value) throws IOException { 1307 if (value >= 0) { 1308 long skipped = 0; 1309 while (skipped < value) { 1310 final long rem = value - skipped; 1311 final int x = inputStream.read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1312 if (x == -1) { 1313 return; 1314 } 1315 count(x); 1316 skipped += x; 1317 } 1318 return; 1319 } 1320 throw new IllegalArgumentException(); 1321 } 1322 /** 1323 * Skips over and discards value bytes of data from this input 1324 * stream. 1325 * 1326 * <p>This implementation may end up skipping over some smaller 1327 * number of bytes, possibly 0, if and only if it reaches the end 1328 * of the underlying stream.</p> 1329 * 1330 * <p>The actual number of bytes skipped is returned.</p> 1331 * 1332 * @param value the number of bytes to be skipped. 1333 * @return the actual number of bytes skipped. 1334 * @throws IOException - if an I/O error occurs. 1335 * @throws IllegalArgumentException - if value is negative. 1336 */ 1337 @Override 1338 public long skip(final long value) throws IOException { 1339 if (value >= 0) { 1340 long skipped = 0; 1341 while (skipped < value) { 1342 final long rem = value - skipped; 1343 final int x = read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1344 if (x == -1) { 1345 return skipped; 1346 } 1347 skipped += x; 1348 } 1349 return skipped; 1350 } 1351 throw new IllegalArgumentException(); 1352 } 1353 1354 /** 1355 * Reads the stream until it find the "End of central directory 1356 * record" and consumes it as well. 1357 */ 1358 private void skipRemainderOfArchive() throws IOException { 1359 // skip over central directory. One LFH has been read too much 1360 // already. The calculation discounts file names and extra 1361 // data, so it will be too short. 1362 if (entriesRead > 0) { 1363 realSkip((long) entriesRead * CFH_LEN - LFH_LEN); 1364 final boolean foundEocd = findEocdRecord(); 1365 if (foundEocd) { 1366 realSkip((long) ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */); 1367 readFully(shortBuf); 1368 // file comment 1369 final int commentLen = ZipShort.getValue(shortBuf); 1370 if (commentLen >= 0) { 1371 realSkip(commentLen); 1372 return; 1373 } 1374 } 1375 } 1376 throw new IOException("Truncated ZIP file"); 1377 } 1378 1379 /** 1380 * Whether the compressed size for the entry is either known or 1381 * not required by the compression method being used. 1382 */ 1383 private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { 1384 return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN 1385 || entry.getMethod() == ZipEntry.DEFLATED 1386 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() 1387 || (entry.getGeneralPurposeBit().usesDataDescriptor() 1388 && allowStoredEntriesWithDataDescriptor 1389 && entry.getMethod() == ZipEntry.STORED); 1390 } 1391 1392 /** 1393 * Whether this entry requires a data descriptor this library can work with. 1394 * 1395 * @return true if allowStoredEntriesWithDataDescriptor is true, 1396 * the entry doesn't require any data descriptor or the method is 1397 * DEFLATED or ENHANCED_DEFLATED. 1398 */ 1399 private boolean supportsDataDescriptorFor(final ZipArchiveEntry entry) { 1400 return !entry.getGeneralPurposeBit().usesDataDescriptor() 1401 || (allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED) 1402 || entry.getMethod() == ZipEntry.DEFLATED 1403 || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode(); 1404 } 1405}