1 /* |
|
2 * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * |
|
4 * Redistribution and use in source and binary forms, with or without |
|
5 * modification, are permitted provided that the following conditions |
|
6 * are met: |
|
7 * |
|
8 * - Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * |
|
11 * - Redistributions in binary form must reproduce the above copyright |
|
12 * notice, this list of conditions and the following disclaimer in the |
|
13 * documentation and/or other materials provided with the distribution. |
|
14 * |
|
15 * - Neither the name of Oracle nor the names of its |
|
16 * contributors may be used to endorse or promote products derived |
|
17 * from this software without specific prior written permission. |
|
18 * |
|
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS |
|
20 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
|
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
|
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
|
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
|
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
|
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
30 */ |
|
31 |
|
32 /* |
|
33 * This source code is provided to illustrate the usage of a given feature |
|
34 * or technique and has been deliberately simplified. Additional steps |
|
35 * required for a production-quality application, such as security checks, |
|
36 * input validation and proper error handling, might not be present in |
|
37 * this sample code. |
|
38 */ |
|
39 |
|
40 |
|
41 package com.sun.nio.zipfs; |
|
42 |
|
43 import java.io.BufferedOutputStream; |
|
44 import java.io.ByteArrayInputStream; |
|
45 import java.io.ByteArrayOutputStream; |
|
46 import java.io.EOFException; |
|
47 import java.io.File; |
|
48 import java.io.IOException; |
|
49 import java.io.InputStream; |
|
50 import java.io.OutputStream; |
|
51 import java.nio.ByteBuffer; |
|
52 import java.nio.MappedByteBuffer; |
|
53 import java.nio.channels.*; |
|
54 import java.nio.file.*; |
|
55 import java.nio.file.attribute.*; |
|
56 import java.nio.file.spi.*; |
|
57 import java.util.*; |
|
58 import java.util.concurrent.locks.ReadWriteLock; |
|
59 import java.util.concurrent.locks.ReentrantReadWriteLock; |
|
60 import java.util.regex.Pattern; |
|
61 import java.util.zip.CRC32; |
|
62 import java.util.zip.Inflater; |
|
63 import java.util.zip.Deflater; |
|
64 import java.util.zip.InflaterInputStream; |
|
65 import java.util.zip.DeflaterOutputStream; |
|
66 import java.util.zip.ZipException; |
|
67 import java.util.zip.ZipError; |
|
68 import static java.lang.Boolean.*; |
|
69 import static com.sun.nio.zipfs.ZipConstants.*; |
|
70 import static com.sun.nio.zipfs.ZipUtils.*; |
|
71 import static java.nio.file.StandardOpenOption.*; |
|
72 import static java.nio.file.StandardCopyOption.*; |
|
73 |
|
74 /** |
|
75 * A FileSystem built on a zip file |
|
76 * |
|
77 * @author Xueming Shen |
|
78 */ |
|
79 |
|
80 public class ZipFileSystem extends FileSystem { |
|
81 |
|
82 private final ZipFileSystemProvider provider; |
|
83 private final ZipPath defaultdir; |
|
84 private boolean readOnly = false; |
|
85 private final Path zfpath; |
|
86 private final ZipCoder zc; |
|
87 |
|
88 // configurable by env map |
|
89 private final String defaultDir; // default dir for the file system |
|
90 private final String nameEncoding; // default encoding for name/comment |
|
91 private final boolean useTempFile; // use a temp file for newOS, default |
|
92 // is to use BAOS for better performance |
|
93 private final boolean createNew; // create a new zip if not exists |
|
94 private static final boolean isWindows = |
|
95 System.getProperty("os.name").startsWith("Windows"); |
|
96 |
|
97 ZipFileSystem(ZipFileSystemProvider provider, |
|
98 Path zfpath, |
|
99 Map<String, ?> env) |
|
100 throws IOException |
|
101 { |
|
102 // configurable env setup |
|
103 this.createNew = "true".equals(env.get("create")); |
|
104 this.nameEncoding = env.containsKey("encoding") ? |
|
105 (String)env.get("encoding") : "UTF-8"; |
|
106 this.useTempFile = TRUE.equals(env.get("useTempFile")); |
|
107 this.defaultDir = env.containsKey("default.dir") ? |
|
108 (String)env.get("default.dir") : "/"; |
|
109 if (this.defaultDir.charAt(0) != '/') |
|
110 throw new IllegalArgumentException("default dir should be absolute"); |
|
111 |
|
112 this.provider = provider; |
|
113 this.zfpath = zfpath; |
|
114 if (Files.notExists(zfpath)) { |
|
115 if (createNew) { |
|
116 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { |
|
117 new END().write(os, 0); |
|
118 } |
|
119 } else { |
|
120 throw new FileSystemNotFoundException(zfpath.toString()); |
|
121 } |
|
122 } |
|
123 // sm and existence check |
|
124 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); |
|
125 if (!Files.isWritable(zfpath)) |
|
126 this.readOnly = true; |
|
127 this.zc = ZipCoder.get(nameEncoding); |
|
128 this.defaultdir = new ZipPath(this, getBytes(defaultDir)); |
|
129 this.ch = Files.newByteChannel(zfpath, READ); |
|
130 this.cen = initCEN(); |
|
131 } |
|
132 |
|
133 @Override |
|
134 public FileSystemProvider provider() { |
|
135 return provider; |
|
136 } |
|
137 |
|
138 @Override |
|
139 public String getSeparator() { |
|
140 return "/"; |
|
141 } |
|
142 |
|
143 @Override |
|
144 public boolean isOpen() { |
|
145 return isOpen; |
|
146 } |
|
147 |
|
148 @Override |
|
149 public boolean isReadOnly() { |
|
150 return readOnly; |
|
151 } |
|
152 |
|
153 private void checkWritable() throws IOException { |
|
154 if (readOnly) |
|
155 throw new ReadOnlyFileSystemException(); |
|
156 } |
|
157 |
|
158 @Override |
|
159 public Iterable<Path> getRootDirectories() { |
|
160 ArrayList<Path> pathArr = new ArrayList<>(); |
|
161 pathArr.add(new ZipPath(this, new byte[]{'/'})); |
|
162 return pathArr; |
|
163 } |
|
164 |
|
165 ZipPath getDefaultDir() { // package private |
|
166 return defaultdir; |
|
167 } |
|
168 |
|
169 @Override |
|
170 public ZipPath getPath(String first, String... more) { |
|
171 String path; |
|
172 if (more.length == 0) { |
|
173 path = first; |
|
174 } else { |
|
175 StringBuilder sb = new StringBuilder(); |
|
176 sb.append(first); |
|
177 for (String segment: more) { |
|
178 if (segment.length() > 0) { |
|
179 if (sb.length() > 0) |
|
180 sb.append('/'); |
|
181 sb.append(segment); |
|
182 } |
|
183 } |
|
184 path = sb.toString(); |
|
185 } |
|
186 return new ZipPath(this, getBytes(path)); |
|
187 } |
|
188 |
|
189 @Override |
|
190 public UserPrincipalLookupService getUserPrincipalLookupService() { |
|
191 throw new UnsupportedOperationException(); |
|
192 } |
|
193 |
|
194 @Override |
|
195 public WatchService newWatchService() { |
|
196 throw new UnsupportedOperationException(); |
|
197 } |
|
198 |
|
199 FileStore getFileStore(ZipPath path) { |
|
200 return new ZipFileStore(path); |
|
201 } |
|
202 |
|
203 @Override |
|
204 public Iterable<FileStore> getFileStores() { |
|
205 ArrayList<FileStore> list = new ArrayList<>(1); |
|
206 list.add(new ZipFileStore(new ZipPath(this, new byte[]{'/'}))); |
|
207 return list; |
|
208 } |
|
209 |
|
210 private static final Set<String> supportedFileAttributeViews = |
|
211 Collections.unmodifiableSet( |
|
212 new HashSet<String>(Arrays.asList("basic", "zip"))); |
|
213 |
|
214 @Override |
|
215 public Set<String> supportedFileAttributeViews() { |
|
216 return supportedFileAttributeViews; |
|
217 } |
|
218 |
|
219 @Override |
|
220 public String toString() { |
|
221 return zfpath.toString(); |
|
222 } |
|
223 |
|
224 Path getZipFile() { |
|
225 return zfpath; |
|
226 } |
|
227 |
|
228 private static final String GLOB_SYNTAX = "glob"; |
|
229 private static final String REGEX_SYNTAX = "regex"; |
|
230 |
|
231 @Override |
|
232 public PathMatcher getPathMatcher(String syntaxAndInput) { |
|
233 int pos = syntaxAndInput.indexOf(':'); |
|
234 if (pos <= 0 || pos == syntaxAndInput.length()) { |
|
235 throw new IllegalArgumentException(); |
|
236 } |
|
237 String syntax = syntaxAndInput.substring(0, pos); |
|
238 String input = syntaxAndInput.substring(pos + 1); |
|
239 String expr; |
|
240 if (syntax.equals(GLOB_SYNTAX)) { |
|
241 expr = toRegexPattern(input); |
|
242 } else { |
|
243 if (syntax.equals(REGEX_SYNTAX)) { |
|
244 expr = input; |
|
245 } else { |
|
246 throw new UnsupportedOperationException("Syntax '" + syntax + |
|
247 "' not recognized"); |
|
248 } |
|
249 } |
|
250 // return matcher |
|
251 final Pattern pattern = Pattern.compile(expr); |
|
252 return new PathMatcher() { |
|
253 @Override |
|
254 public boolean matches(Path path) { |
|
255 return pattern.matcher(path.toString()).matches(); |
|
256 } |
|
257 }; |
|
258 } |
|
259 |
|
260 @Override |
|
261 public void close() throws IOException { |
|
262 beginWrite(); |
|
263 try { |
|
264 if (!isOpen) |
|
265 return; |
|
266 isOpen = false; // set closed |
|
267 } finally { |
|
268 endWrite(); |
|
269 } |
|
270 if (!streams.isEmpty()) { // unlock and close all remaining streams |
|
271 Set<InputStream> copy = new HashSet<>(streams); |
|
272 for (InputStream is: copy) |
|
273 is.close(); |
|
274 } |
|
275 beginWrite(); // lock and sync |
|
276 try { |
|
277 sync(); |
|
278 ch.close(); // close the ch just in case no update |
|
279 } finally { // and sync dose not close the ch |
|
280 endWrite(); |
|
281 } |
|
282 |
|
283 synchronized (inflaters) { |
|
284 for (Inflater inf : inflaters) |
|
285 inf.end(); |
|
286 } |
|
287 synchronized (deflaters) { |
|
288 for (Deflater def : deflaters) |
|
289 def.end(); |
|
290 } |
|
291 |
|
292 IOException ioe = null; |
|
293 synchronized (tmppaths) { |
|
294 for (Path p: tmppaths) { |
|
295 try { |
|
296 Files.deleteIfExists(p); |
|
297 } catch (IOException x) { |
|
298 if (ioe == null) |
|
299 ioe = x; |
|
300 else |
|
301 ioe.addSuppressed(x); |
|
302 } |
|
303 } |
|
304 } |
|
305 provider.removeFileSystem(zfpath, this); |
|
306 if (ioe != null) |
|
307 throw ioe; |
|
308 } |
|
309 |
|
310 ZipFileAttributes getFileAttributes(byte[] path) |
|
311 throws IOException |
|
312 { |
|
313 Entry e; |
|
314 beginRead(); |
|
315 try { |
|
316 ensureOpen(); |
|
317 e = getEntry0(path); |
|
318 if (e == null) { |
|
319 IndexNode inode = getInode(path); |
|
320 if (inode == null) |
|
321 return null; |
|
322 e = new Entry(inode.name); // pseudo directory |
|
323 e.method = METHOD_STORED; // STORED for dir |
|
324 e.mtime = e.atime = e.ctime = -1;// -1 for all times |
|
325 } |
|
326 } finally { |
|
327 endRead(); |
|
328 } |
|
329 return new ZipFileAttributes(e); |
|
330 } |
|
331 |
|
332 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) |
|
333 throws IOException |
|
334 { |
|
335 checkWritable(); |
|
336 beginWrite(); |
|
337 try { |
|
338 ensureOpen(); |
|
339 Entry e = getEntry0(path); // ensureOpen checked |
|
340 if (e == null) |
|
341 throw new NoSuchFileException(getString(path)); |
|
342 if (e.type == Entry.CEN) |
|
343 e.type = Entry.COPY; // copy e |
|
344 if (mtime != null) |
|
345 e.mtime = mtime.toMillis(); |
|
346 if (atime != null) |
|
347 e.atime = atime.toMillis(); |
|
348 if (ctime != null) |
|
349 e.ctime = ctime.toMillis(); |
|
350 update(e); |
|
351 } finally { |
|
352 endWrite(); |
|
353 } |
|
354 } |
|
355 |
|
356 boolean exists(byte[] path) |
|
357 throws IOException |
|
358 { |
|
359 beginRead(); |
|
360 try { |
|
361 ensureOpen(); |
|
362 return getInode(path) != null; |
|
363 } finally { |
|
364 endRead(); |
|
365 } |
|
366 } |
|
367 |
|
368 boolean isDirectory(byte[] path) |
|
369 throws IOException |
|
370 { |
|
371 beginRead(); |
|
372 try { |
|
373 IndexNode n = getInode(path); |
|
374 return n != null && n.isDir(); |
|
375 } finally { |
|
376 endRead(); |
|
377 } |
|
378 } |
|
379 |
|
380 private ZipPath toZipPath(byte[] path) { |
|
381 // make it absolute |
|
382 byte[] p = new byte[path.length + 1]; |
|
383 p[0] = '/'; |
|
384 System.arraycopy(path, 0, p, 1, path.length); |
|
385 return new ZipPath(this, p); |
|
386 } |
|
387 |
|
388 // returns the list of child paths of "path" |
|
389 Iterator<Path> iteratorOf(byte[] path, |
|
390 DirectoryStream.Filter<? super Path> filter) |
|
391 throws IOException |
|
392 { |
|
393 beginWrite(); // iteration of inodes needs exclusive lock |
|
394 try { |
|
395 ensureOpen(); |
|
396 IndexNode inode = getInode(path); |
|
397 if (inode == null) |
|
398 throw new NotDirectoryException(getString(path)); |
|
399 List<Path> list = new ArrayList<>(); |
|
400 IndexNode child = inode.child; |
|
401 while (child != null) { |
|
402 ZipPath zp = toZipPath(child.name); |
|
403 if (filter == null || filter.accept(zp)) |
|
404 list.add(zp); |
|
405 child = child.sibling; |
|
406 } |
|
407 return list.iterator(); |
|
408 } finally { |
|
409 endWrite(); |
|
410 } |
|
411 } |
|
412 |
|
413 void createDirectory(byte[] dir, FileAttribute<?>... attrs) |
|
414 throws IOException |
|
415 { |
|
416 checkWritable(); |
|
417 dir = toDirectoryPath(dir); |
|
418 beginWrite(); |
|
419 try { |
|
420 ensureOpen(); |
|
421 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir |
|
422 throw new FileAlreadyExistsException(getString(dir)); |
|
423 checkParents(dir); |
|
424 Entry e = new Entry(dir, Entry.NEW); |
|
425 e.method = METHOD_STORED; // STORED for dir |
|
426 update(e); |
|
427 } finally { |
|
428 endWrite(); |
|
429 } |
|
430 } |
|
431 |
|
432 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) |
|
433 throws IOException |
|
434 { |
|
435 checkWritable(); |
|
436 if (Arrays.equals(src, dst)) |
|
437 return; // do nothing, src and dst are the same |
|
438 |
|
439 beginWrite(); |
|
440 try { |
|
441 ensureOpen(); |
|
442 Entry eSrc = getEntry0(src); // ensureOpen checked |
|
443 if (eSrc == null) |
|
444 throw new NoSuchFileException(getString(src)); |
|
445 if (eSrc.isDir()) { // spec says to create dst dir |
|
446 createDirectory(dst); |
|
447 return; |
|
448 } |
|
449 boolean hasReplace = false; |
|
450 boolean hasCopyAttrs = false; |
|
451 for (CopyOption opt : options) { |
|
452 if (opt == REPLACE_EXISTING) |
|
453 hasReplace = true; |
|
454 else if (opt == COPY_ATTRIBUTES) |
|
455 hasCopyAttrs = true; |
|
456 } |
|
457 Entry eDst = getEntry0(dst); |
|
458 if (eDst != null) { |
|
459 if (!hasReplace) |
|
460 throw new FileAlreadyExistsException(getString(dst)); |
|
461 } else { |
|
462 checkParents(dst); |
|
463 } |
|
464 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry |
|
465 u.name(dst); // change name |
|
466 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) |
|
467 { |
|
468 u.type = eSrc.type; // make it the same type |
|
469 if (deletesrc) { // if it's a "rename", take the data |
|
470 u.bytes = eSrc.bytes; |
|
471 u.file = eSrc.file; |
|
472 } else { // if it's not "rename", copy the data |
|
473 if (eSrc.bytes != null) |
|
474 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); |
|
475 else if (eSrc.file != null) { |
|
476 u.file = getTempPathForEntry(null); |
|
477 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); |
|
478 } |
|
479 } |
|
480 } |
|
481 if (!hasCopyAttrs) |
|
482 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); |
|
483 update(u); |
|
484 if (deletesrc) |
|
485 updateDelete(eSrc); |
|
486 } finally { |
|
487 endWrite(); |
|
488 } |
|
489 } |
|
490 |
|
491 // Returns an output stream for writing the contents into the specified |
|
492 // entry. |
|
493 OutputStream newOutputStream(byte[] path, OpenOption... options) |
|
494 throws IOException |
|
495 { |
|
496 checkWritable(); |
|
497 boolean hasCreateNew = false; |
|
498 boolean hasCreate = false; |
|
499 boolean hasAppend = false; |
|
500 for (OpenOption opt: options) { |
|
501 if (opt == READ) |
|
502 throw new IllegalArgumentException("READ not allowed"); |
|
503 if (opt == CREATE_NEW) |
|
504 hasCreateNew = true; |
|
505 if (opt == CREATE) |
|
506 hasCreate = true; |
|
507 if (opt == APPEND) |
|
508 hasAppend = true; |
|
509 } |
|
510 beginRead(); // only need a readlock, the "update()" will |
|
511 try { // try to obtain a writelock when the os is |
|
512 ensureOpen(); // being closed. |
|
513 Entry e = getEntry0(path); |
|
514 if (e != null) { |
|
515 if (e.isDir() || hasCreateNew) |
|
516 throw new FileAlreadyExistsException(getString(path)); |
|
517 if (hasAppend) { |
|
518 InputStream is = getInputStream(e); |
|
519 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); |
|
520 copyStream(is, os); |
|
521 is.close(); |
|
522 return os; |
|
523 } |
|
524 return getOutputStream(new Entry(e, Entry.NEW)); |
|
525 } else { |
|
526 if (!hasCreate && !hasCreateNew) |
|
527 throw new NoSuchFileException(getString(path)); |
|
528 checkParents(path); |
|
529 return getOutputStream(new Entry(path, Entry.NEW)); |
|
530 } |
|
531 } finally { |
|
532 endRead(); |
|
533 } |
|
534 } |
|
535 |
|
536 // Returns an input stream for reading the contents of the specified |
|
537 // file entry. |
|
538 InputStream newInputStream(byte[] path) throws IOException { |
|
539 beginRead(); |
|
540 try { |
|
541 ensureOpen(); |
|
542 Entry e = getEntry0(path); |
|
543 if (e == null) |
|
544 throw new NoSuchFileException(getString(path)); |
|
545 if (e.isDir()) |
|
546 throw new FileSystemException(getString(path), "is a directory", null); |
|
547 return getInputStream(e); |
|
548 } finally { |
|
549 endRead(); |
|
550 } |
|
551 } |
|
552 |
|
553 private void checkOptions(Set<? extends OpenOption> options) { |
|
554 // check for options of null type and option is an intance of StandardOpenOption |
|
555 for (OpenOption option : options) { |
|
556 if (option == null) |
|
557 throw new NullPointerException(); |
|
558 if (!(option instanceof StandardOpenOption)) |
|
559 throw new IllegalArgumentException(); |
|
560 } |
|
561 } |
|
562 |
|
563 // Returns a Writable/ReadByteChannel for now. Might consdier to use |
|
564 // newFileChannel() instead, which dump the entry data into a regular |
|
565 // file on the default file system and create a FileChannel on top of |
|
566 // it. |
|
567 SeekableByteChannel newByteChannel(byte[] path, |
|
568 Set<? extends OpenOption> options, |
|
569 FileAttribute<?>... attrs) |
|
570 throws IOException |
|
571 { |
|
572 checkOptions(options); |
|
573 if (options.contains(StandardOpenOption.WRITE) || |
|
574 options.contains(StandardOpenOption.APPEND)) { |
|
575 checkWritable(); |
|
576 beginRead(); |
|
577 try { |
|
578 final WritableByteChannel wbc = Channels.newChannel( |
|
579 newOutputStream(path, options.toArray(new OpenOption[0]))); |
|
580 long leftover = 0; |
|
581 if (options.contains(StandardOpenOption.APPEND)) { |
|
582 Entry e = getEntry0(path); |
|
583 if (e != null && e.size >= 0) |
|
584 leftover = e.size; |
|
585 } |
|
586 final long offset = leftover; |
|
587 return new SeekableByteChannel() { |
|
588 long written = offset; |
|
589 public boolean isOpen() { |
|
590 return wbc.isOpen(); |
|
591 } |
|
592 |
|
593 public long position() throws IOException { |
|
594 return written; |
|
595 } |
|
596 |
|
597 public SeekableByteChannel position(long pos) |
|
598 throws IOException |
|
599 { |
|
600 throw new UnsupportedOperationException(); |
|
601 } |
|
602 |
|
603 public int read(ByteBuffer dst) throws IOException { |
|
604 throw new UnsupportedOperationException(); |
|
605 } |
|
606 |
|
607 public SeekableByteChannel truncate(long size) |
|
608 throws IOException |
|
609 { |
|
610 throw new UnsupportedOperationException(); |
|
611 } |
|
612 |
|
613 public int write(ByteBuffer src) throws IOException { |
|
614 int n = wbc.write(src); |
|
615 written += n; |
|
616 return n; |
|
617 } |
|
618 |
|
619 public long size() throws IOException { |
|
620 return written; |
|
621 } |
|
622 |
|
623 public void close() throws IOException { |
|
624 wbc.close(); |
|
625 } |
|
626 }; |
|
627 } finally { |
|
628 endRead(); |
|
629 } |
|
630 } else { |
|
631 beginRead(); |
|
632 try { |
|
633 ensureOpen(); |
|
634 Entry e = getEntry0(path); |
|
635 if (e == null || e.isDir()) |
|
636 throw new NoSuchFileException(getString(path)); |
|
637 final ReadableByteChannel rbc = |
|
638 Channels.newChannel(getInputStream(e)); |
|
639 final long size = e.size; |
|
640 return new SeekableByteChannel() { |
|
641 long read = 0; |
|
642 public boolean isOpen() { |
|
643 return rbc.isOpen(); |
|
644 } |
|
645 |
|
646 public long position() throws IOException { |
|
647 return read; |
|
648 } |
|
649 |
|
650 public SeekableByteChannel position(long pos) |
|
651 throws IOException |
|
652 { |
|
653 throw new UnsupportedOperationException(); |
|
654 } |
|
655 |
|
656 public int read(ByteBuffer dst) throws IOException { |
|
657 int n = rbc.read(dst); |
|
658 if (n > 0) { |
|
659 read += n; |
|
660 } |
|
661 return n; |
|
662 } |
|
663 |
|
664 public SeekableByteChannel truncate(long size) |
|
665 throws IOException |
|
666 { |
|
667 throw new NonWritableChannelException(); |
|
668 } |
|
669 |
|
670 public int write (ByteBuffer src) throws IOException { |
|
671 throw new NonWritableChannelException(); |
|
672 } |
|
673 |
|
674 public long size() throws IOException { |
|
675 return size; |
|
676 } |
|
677 |
|
678 public void close() throws IOException { |
|
679 rbc.close(); |
|
680 } |
|
681 }; |
|
682 } finally { |
|
683 endRead(); |
|
684 } |
|
685 } |
|
686 } |
|
687 |
|
688 // Returns a FileChannel of the specified entry. |
|
689 // |
|
690 // This implementation creates a temporary file on the default file system, |
|
691 // copy the entry data into it if the entry exists, and then create a |
|
692 // FileChannel on top of it. |
|
693 FileChannel newFileChannel(byte[] path, |
|
694 Set<? extends OpenOption> options, |
|
695 FileAttribute<?>... attrs) |
|
696 throws IOException |
|
697 { |
|
698 checkOptions(options); |
|
699 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || |
|
700 options.contains(StandardOpenOption.APPEND)); |
|
701 beginRead(); |
|
702 try { |
|
703 ensureOpen(); |
|
704 Entry e = getEntry0(path); |
|
705 if (forWrite) { |
|
706 checkWritable(); |
|
707 if (e == null) { |
|
708 if (!options.contains(StandardOpenOption.CREATE_NEW)) |
|
709 throw new NoSuchFileException(getString(path)); |
|
710 } else { |
|
711 if (options.contains(StandardOpenOption.CREATE_NEW)) |
|
712 throw new FileAlreadyExistsException(getString(path)); |
|
713 if (e.isDir()) |
|
714 throw new FileAlreadyExistsException("directory <" |
|
715 + getString(path) + "> exists"); |
|
716 } |
|
717 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile |
|
718 } else if (e == null || e.isDir()) { |
|
719 throw new NoSuchFileException(getString(path)); |
|
720 } |
|
721 |
|
722 final boolean isFCH = (e != null && e.type == Entry.FILECH); |
|
723 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); |
|
724 final FileChannel fch = tmpfile.getFileSystem() |
|
725 .provider() |
|
726 .newFileChannel(tmpfile, options, attrs); |
|
727 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH); |
|
728 if (forWrite) { |
|
729 u.flag = FLAG_DATADESCR; |
|
730 u.method = METHOD_DEFLATED; |
|
731 } |
|
732 // is there a better way to hook into the FileChannel's close method? |
|
733 return new FileChannel() { |
|
734 public int write(ByteBuffer src) throws IOException { |
|
735 return fch.write(src); |
|
736 } |
|
737 public long write(ByteBuffer[] srcs, int offset, int length) |
|
738 throws IOException |
|
739 { |
|
740 return fch.write(srcs, offset, length); |
|
741 } |
|
742 public long position() throws IOException { |
|
743 return fch.position(); |
|
744 } |
|
745 public FileChannel position(long newPosition) |
|
746 throws IOException |
|
747 { |
|
748 fch.position(newPosition); |
|
749 return this; |
|
750 } |
|
751 public long size() throws IOException { |
|
752 return fch.size(); |
|
753 } |
|
754 public FileChannel truncate(long size) |
|
755 throws IOException |
|
756 { |
|
757 fch.truncate(size); |
|
758 return this; |
|
759 } |
|
760 public void force(boolean metaData) |
|
761 throws IOException |
|
762 { |
|
763 fch.force(metaData); |
|
764 } |
|
765 public long transferTo(long position, long count, |
|
766 WritableByteChannel target) |
|
767 throws IOException |
|
768 { |
|
769 return fch.transferTo(position, count, target); |
|
770 } |
|
771 public long transferFrom(ReadableByteChannel src, |
|
772 long position, long count) |
|
773 throws IOException |
|
774 { |
|
775 return fch.transferFrom(src, position, count); |
|
776 } |
|
777 public int read(ByteBuffer dst) throws IOException { |
|
778 return fch.read(dst); |
|
779 } |
|
780 public int read(ByteBuffer dst, long position) |
|
781 throws IOException |
|
782 { |
|
783 return fch.read(dst, position); |
|
784 } |
|
785 public long read(ByteBuffer[] dsts, int offset, int length) |
|
786 throws IOException |
|
787 { |
|
788 return fch.read(dsts, offset, length); |
|
789 } |
|
790 public int write(ByteBuffer src, long position) |
|
791 throws IOException |
|
792 { |
|
793 return fch.write(src, position); |
|
794 } |
|
795 public MappedByteBuffer map(MapMode mode, |
|
796 long position, long size) |
|
797 throws IOException |
|
798 { |
|
799 throw new UnsupportedOperationException(); |
|
800 } |
|
801 public FileLock lock(long position, long size, boolean shared) |
|
802 throws IOException |
|
803 { |
|
804 return fch.lock(position, size, shared); |
|
805 } |
|
806 public FileLock tryLock(long position, long size, boolean shared) |
|
807 throws IOException |
|
808 { |
|
809 return fch.tryLock(position, size, shared); |
|
810 } |
|
811 protected void implCloseChannel() throws IOException { |
|
812 fch.close(); |
|
813 if (forWrite) { |
|
814 u.mtime = System.currentTimeMillis(); |
|
815 u.size = Files.size(u.file); |
|
816 |
|
817 update(u); |
|
818 } else { |
|
819 if (!isFCH) // if this is a new fch for reading |
|
820 removeTempPathForEntry(tmpfile); |
|
821 } |
|
822 } |
|
823 }; |
|
824 } finally { |
|
825 endRead(); |
|
826 } |
|
827 } |
|
828 |
|
829 // the outstanding input streams that need to be closed |
|
830 private Set<InputStream> streams = |
|
831 Collections.synchronizedSet(new HashSet<InputStream>()); |
|
832 |
|
833 // the ex-channel and ex-path that need to close when their outstanding |
|
834 // input streams are all closed by the obtainers. |
|
835 private Set<ExChannelCloser> exChClosers = new HashSet<>(); |
|
836 |
|
837 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); |
|
838 private Path getTempPathForEntry(byte[] path) throws IOException { |
|
839 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); |
|
840 if (path != null) { |
|
841 Entry e = getEntry0(path); |
|
842 if (e != null) { |
|
843 try (InputStream is = newInputStream(path)) { |
|
844 Files.copy(is, tmpPath, REPLACE_EXISTING); |
|
845 } |
|
846 } |
|
847 } |
|
848 return tmpPath; |
|
849 } |
|
850 |
|
851 private void removeTempPathForEntry(Path path) throws IOException { |
|
852 Files.delete(path); |
|
853 tmppaths.remove(path); |
|
854 } |
|
855 |
|
856 // check if all parents really exit. ZIP spec does not require |
|
857 // the existence of any "parent directory". |
|
858 private void checkParents(byte[] path) throws IOException { |
|
859 beginRead(); |
|
860 try { |
|
861 while ((path = getParent(path)) != null && path.length != 0) { |
|
862 if (!inodes.containsKey(IndexNode.keyOf(path))) { |
|
863 throw new NoSuchFileException(getString(path)); |
|
864 } |
|
865 } |
|
866 } finally { |
|
867 endRead(); |
|
868 } |
|
869 } |
|
870 |
|
871 private static byte[] ROOTPATH = new byte[0]; |
|
872 private static byte[] getParent(byte[] path) { |
|
873 int off = path.length - 1; |
|
874 if (off > 0 && path[off] == '/') // isDirectory |
|
875 off--; |
|
876 while (off > 0 && path[off] != '/') { off--; } |
|
877 if (off <= 0) |
|
878 return ROOTPATH; |
|
879 return Arrays.copyOf(path, off + 1); |
|
880 } |
|
881 |
|
882 private final void beginWrite() { |
|
883 rwlock.writeLock().lock(); |
|
884 } |
|
885 |
|
886 private final void endWrite() { |
|
887 rwlock.writeLock().unlock(); |
|
888 } |
|
889 |
|
890 private final void beginRead() { |
|
891 rwlock.readLock().lock(); |
|
892 } |
|
893 |
|
894 private final void endRead() { |
|
895 rwlock.readLock().unlock(); |
|
896 } |
|
897 |
|
898 /////////////////////////////////////////////////////////////////// |
|
899 |
|
900 private volatile boolean isOpen = true; |
|
901 private final SeekableByteChannel ch; // channel to the zipfile |
|
902 final byte[] cen; // CEN & ENDHDR |
|
903 private END end; |
|
904 private long locpos; // position of first LOC header (usually 0) |
|
905 |
|
906 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); |
|
907 |
|
908 // name -> pos (in cen), IndexNode itself can be used as a "key" |
|
909 private LinkedHashMap<IndexNode, IndexNode> inodes; |
|
910 |
|
911 final byte[] getBytes(String name) { |
|
912 return zc.getBytes(name); |
|
913 } |
|
914 |
|
915 final String getString(byte[] name) { |
|
916 return zc.toString(name); |
|
917 } |
|
918 |
|
919 protected void finalize() throws IOException { |
|
920 close(); |
|
921 } |
|
922 |
|
923 private long getDataPos(Entry e) throws IOException { |
|
924 if (e.locoff == -1) { |
|
925 Entry e2 = getEntry0(e.name); |
|
926 if (e2 == null) |
|
927 throw new ZipException("invalid loc for entry <" + e.name + ">"); |
|
928 e.locoff = e2.locoff; |
|
929 } |
|
930 byte[] buf = new byte[LOCHDR]; |
|
931 if (readFullyAt(buf, 0, buf.length, e.locoff) != buf.length) |
|
932 throw new ZipException("invalid loc for entry <" + e.name + ">"); |
|
933 return locpos + e.locoff + LOCHDR + LOCNAM(buf) + LOCEXT(buf); |
|
934 } |
|
935 |
|
936 // Reads len bytes of data from the specified offset into buf. |
|
937 // Returns the total number of bytes read. |
|
938 // Each/every byte read from here (except the cen, which is mapped). |
|
939 final long readFullyAt(byte[] buf, int off, long len, long pos) |
|
940 throws IOException |
|
941 { |
|
942 ByteBuffer bb = ByteBuffer.wrap(buf); |
|
943 bb.position(off); |
|
944 bb.limit((int)(off + len)); |
|
945 return readFullyAt(bb, pos); |
|
946 } |
|
947 |
|
948 private final long readFullyAt(ByteBuffer bb, long pos) |
|
949 throws IOException |
|
950 { |
|
951 synchronized(ch) { |
|
952 return ch.position(pos).read(bb); |
|
953 } |
|
954 } |
|
955 |
|
956 // Searches for end of central directory (END) header. The contents of |
|
957 // the END header will be read and placed in endbuf. Returns the file |
|
958 // position of the END header, otherwise returns -1 if the END header |
|
959 // was not found or an error occurred. |
|
960 private END findEND() throws IOException |
|
961 { |
|
962 byte[] buf = new byte[READBLOCKSZ]; |
|
963 long ziplen = ch.size(); |
|
964 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; |
|
965 long minPos = minHDR - (buf.length - ENDHDR); |
|
966 |
|
967 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) |
|
968 { |
|
969 int off = 0; |
|
970 if (pos < 0) { |
|
971 // Pretend there are some NUL bytes before start of file |
|
972 off = (int)-pos; |
|
973 Arrays.fill(buf, 0, off, (byte)0); |
|
974 } |
|
975 int len = buf.length - off; |
|
976 if (readFullyAt(buf, off, len, pos + off) != len) |
|
977 zerror("zip END header not found"); |
|
978 |
|
979 // Now scan the block backwards for END header signature |
|
980 for (int i = buf.length - ENDHDR; i >= 0; i--) { |
|
981 if (buf[i+0] == (byte)'P' && |
|
982 buf[i+1] == (byte)'K' && |
|
983 buf[i+2] == (byte)'\005' && |
|
984 buf[i+3] == (byte)'\006' && |
|
985 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { |
|
986 // Found END header |
|
987 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); |
|
988 END end = new END(); |
|
989 end.endsub = ENDSUB(buf); |
|
990 end.centot = ENDTOT(buf); |
|
991 end.cenlen = ENDSIZ(buf); |
|
992 end.cenoff = ENDOFF(buf); |
|
993 end.comlen = ENDCOM(buf); |
|
994 end.endpos = pos + i; |
|
995 if (end.cenlen == ZIP64_MINVAL || |
|
996 end.cenoff == ZIP64_MINVAL || |
|
997 end.centot == ZIP64_MINVAL32) |
|
998 { |
|
999 // need to find the zip64 end; |
|
1000 byte[] loc64 = new byte[ZIP64_LOCHDR]; |
|
1001 if (readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) |
|
1002 != loc64.length) { |
|
1003 return end; |
|
1004 } |
|
1005 long end64pos = ZIP64_LOCOFF(loc64); |
|
1006 byte[] end64buf = new byte[ZIP64_ENDHDR]; |
|
1007 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) |
|
1008 != end64buf.length) { |
|
1009 return end; |
|
1010 } |
|
1011 // end64 found, re-calcualte everything. |
|
1012 end.cenlen = ZIP64_ENDSIZ(end64buf); |
|
1013 end.cenoff = ZIP64_ENDOFF(end64buf); |
|
1014 end.centot = (int)ZIP64_ENDTOT(end64buf); // assume total < 2g |
|
1015 end.endpos = end64pos; |
|
1016 } |
|
1017 return end; |
|
1018 } |
|
1019 } |
|
1020 } |
|
1021 zerror("zip END header not found"); |
|
1022 return null; //make compiler happy |
|
1023 } |
|
1024 |
|
1025 // Reads zip file central directory. Returns the file position of first |
|
1026 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL |
|
1027 // then the error was a zip format error and zip->msg has the error text. |
|
1028 // Always pass in -1 for knownTotal; it's used for a recursive call. |
|
1029 private byte[] initCEN() throws IOException { |
|
1030 end = findEND(); |
|
1031 if (end.endpos == 0) { |
|
1032 inodes = new LinkedHashMap<>(10); |
|
1033 locpos = 0; |
|
1034 buildNodeTree(); |
|
1035 return null; // only END header present |
|
1036 } |
|
1037 if (end.cenlen > end.endpos) |
|
1038 zerror("invalid END header (bad central directory size)"); |
|
1039 long cenpos = end.endpos - end.cenlen; // position of CEN table |
|
1040 |
|
1041 // Get position of first local file (LOC) header, taking into |
|
1042 // account that there may be a stub prefixed to the zip file. |
|
1043 locpos = cenpos - end.cenoff; |
|
1044 if (locpos < 0) |
|
1045 zerror("invalid END header (bad central directory offset)"); |
|
1046 |
|
1047 // read in the CEN and END |
|
1048 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; |
|
1049 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { |
|
1050 zerror("read CEN tables failed"); |
|
1051 } |
|
1052 // Iterate through the entries in the central directory |
|
1053 inodes = new LinkedHashMap<>(end.centot + 1); |
|
1054 int pos = 0; |
|
1055 int limit = cen.length - ENDHDR; |
|
1056 while (pos < limit) { |
|
1057 if (CENSIG(cen, pos) != CENSIG) |
|
1058 zerror("invalid CEN header (bad signature)"); |
|
1059 int method = CENHOW(cen, pos); |
|
1060 int nlen = CENNAM(cen, pos); |
|
1061 int elen = CENEXT(cen, pos); |
|
1062 int clen = CENCOM(cen, pos); |
|
1063 if ((CENFLG(cen, pos) & 1) != 0) |
|
1064 zerror("invalid CEN header (encrypted entry)"); |
|
1065 if (method != METHOD_STORED && method != METHOD_DEFLATED) |
|
1066 zerror("invalid CEN header (unsupported compression method: " + method + ")"); |
|
1067 if (pos + CENHDR + nlen > limit) |
|
1068 zerror("invalid CEN header (bad header size)"); |
|
1069 byte[] name = Arrays.copyOfRange(cen, pos + CENHDR, pos + CENHDR + nlen); |
|
1070 IndexNode inode = new IndexNode(name, pos); |
|
1071 inodes.put(inode, inode); |
|
1072 // skip ext and comment |
|
1073 pos += (CENHDR + nlen + elen + clen); |
|
1074 } |
|
1075 if (pos + ENDHDR != cen.length) { |
|
1076 zerror("invalid CEN header (bad header size)"); |
|
1077 } |
|
1078 buildNodeTree(); |
|
1079 return cen; |
|
1080 } |
|
1081 |
|
1082 private void ensureOpen() throws IOException { |
|
1083 if (!isOpen) |
|
1084 throw new ClosedFileSystemException(); |
|
1085 } |
|
1086 |
|
1087 // Creates a new empty temporary file in the same directory as the |
|
1088 // specified file. A variant of Files.createTempFile. |
|
1089 private Path createTempFileInSameDirectoryAs(Path path) |
|
1090 throws IOException |
|
1091 { |
|
1092 Path parent = path.toAbsolutePath().getParent(); |
|
1093 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; |
|
1094 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); |
|
1095 tmppaths.add(tmpPath); |
|
1096 return tmpPath; |
|
1097 } |
|
1098 |
|
1099 ////////////////////update & sync ////////////////////////////////////// |
|
1100 |
|
1101 private boolean hasUpdate = false; |
|
1102 |
|
1103 // shared key. consumer guarantees the "writeLock" before use it. |
|
1104 private final IndexNode LOOKUPKEY = IndexNode.keyOf(null); |
|
1105 |
|
1106 private void updateDelete(IndexNode inode) { |
|
1107 beginWrite(); |
|
1108 try { |
|
1109 removeFromTree(inode); |
|
1110 inodes.remove(inode); |
|
1111 hasUpdate = true; |
|
1112 } finally { |
|
1113 endWrite(); |
|
1114 } |
|
1115 } |
|
1116 |
|
1117 private void update(Entry e) { |
|
1118 beginWrite(); |
|
1119 try { |
|
1120 IndexNode old = inodes.put(e, e); |
|
1121 if (old != null) { |
|
1122 removeFromTree(old); |
|
1123 } |
|
1124 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { |
|
1125 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); |
|
1126 e.sibling = parent.child; |
|
1127 parent.child = e; |
|
1128 } |
|
1129 hasUpdate = true; |
|
1130 } finally { |
|
1131 endWrite(); |
|
1132 } |
|
1133 } |
|
1134 |
|
1135 // copy over the whole LOC entry (header if necessary, data and ext) from |
|
1136 // old zip to the new one. |
|
1137 private long copyLOCEntry(Entry e, boolean updateHeader, |
|
1138 OutputStream os, |
|
1139 long written, byte[] buf) |
|
1140 throws IOException |
|
1141 { |
|
1142 long locoff = e.locoff; // where to read |
|
1143 e.locoff = written; // update the e.locoff with new value |
|
1144 |
|
1145 // calculate the size need to write out |
|
1146 long size = 0; |
|
1147 // if there is A ext |
|
1148 if ((e.flag & FLAG_DATADESCR) != 0) { |
|
1149 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) |
|
1150 size = 24; |
|
1151 else |
|
1152 size = 16; |
|
1153 } |
|
1154 // read loc, use the original loc.elen/nlen |
|
1155 if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR) |
|
1156 throw new ZipException("loc: reading failed"); |
|
1157 if (updateHeader) { |
|
1158 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header |
|
1159 size += e.csize; |
|
1160 written = e.writeLOC(os) + size; |
|
1161 } else { |
|
1162 os.write(buf, 0, LOCHDR); // write out the loc header |
|
1163 locoff += LOCHDR; |
|
1164 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on |
|
1165 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); |
|
1166 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; |
|
1167 written = LOCHDR + size; |
|
1168 } |
|
1169 int n; |
|
1170 while (size > 0 && |
|
1171 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) |
|
1172 { |
|
1173 if (size < n) |
|
1174 n = (int)size; |
|
1175 os.write(buf, 0, n); |
|
1176 size -= n; |
|
1177 locoff += n; |
|
1178 } |
|
1179 return written; |
|
1180 } |
|
1181 |
|
1182 // sync the zip file system, if there is any udpate |
|
1183 private void sync() throws IOException { |
|
1184 //System.out.printf("->sync(%s) starting....!%n", toString()); |
|
1185 // check ex-closer |
|
1186 if (!exChClosers.isEmpty()) { |
|
1187 for (ExChannelCloser ecc : exChClosers) { |
|
1188 if (ecc.streams.isEmpty()) { |
|
1189 ecc.ch.close(); |
|
1190 Files.delete(ecc.path); |
|
1191 exChClosers.remove(ecc); |
|
1192 } |
|
1193 } |
|
1194 } |
|
1195 if (!hasUpdate) |
|
1196 return; |
|
1197 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); |
|
1198 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) |
|
1199 { |
|
1200 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); |
|
1201 long written = 0; |
|
1202 byte[] buf = new byte[8192]; |
|
1203 Entry e = null; |
|
1204 |
|
1205 // write loc |
|
1206 for (IndexNode inode : inodes.values()) { |
|
1207 if (inode instanceof Entry) { // an updated inode |
|
1208 e = (Entry)inode; |
|
1209 try { |
|
1210 if (e.type == Entry.COPY) { |
|
1211 // entry copy: the only thing changed is the "name" |
|
1212 // and "nlen" in LOC header, so we udpate/rewrite the |
|
1213 // LOC in new file and simply copy the rest (data and |
|
1214 // ext) without enflating/deflating from the old zip |
|
1215 // file LOC entry. |
|
1216 written += copyLOCEntry(e, true, os, written, buf); |
|
1217 } else { // NEW, FILECH or CEN |
|
1218 e.locoff = written; |
|
1219 written += e.writeLOC(os); // write loc header |
|
1220 if (e.bytes != null) { // in-memory, deflated |
|
1221 os.write(e.bytes); // already |
|
1222 written += e.bytes.length; |
|
1223 } else if (e.file != null) { // tmp file |
|
1224 try (InputStream is = Files.newInputStream(e.file)) { |
|
1225 int n; |
|
1226 if (e.type == Entry.NEW) { // deflated already |
|
1227 while ((n = is.read(buf)) != -1) { |
|
1228 os.write(buf, 0, n); |
|
1229 written += n; |
|
1230 } |
|
1231 } else if (e.type == Entry.FILECH) { |
|
1232 // the data are not deflated, use ZEOS |
|
1233 try (OutputStream os2 = new EntryOutputStream(e, os)) { |
|
1234 while ((n = is.read(buf)) != -1) { |
|
1235 os2.write(buf, 0, n); |
|
1236 } |
|
1237 } |
|
1238 written += e.csize; |
|
1239 if ((e.flag & FLAG_DATADESCR) != 0) |
|
1240 written += e.writeEXT(os); |
|
1241 } |
|
1242 } |
|
1243 Files.delete(e.file); |
|
1244 tmppaths.remove(e.file); |
|
1245 } else { |
|
1246 // dir, 0-length data |
|
1247 } |
|
1248 } |
|
1249 elist.add(e); |
|
1250 } catch (IOException x) { |
|
1251 x.printStackTrace(); // skip any in-accurate entry |
|
1252 } |
|
1253 } else { // unchanged inode |
|
1254 if (inode.pos == -1) { |
|
1255 continue; // pseudo directory node |
|
1256 } |
|
1257 e = Entry.readCEN(this, inode.pos); |
|
1258 try { |
|
1259 written += copyLOCEntry(e, false, os, written, buf); |
|
1260 elist.add(e); |
|
1261 } catch (IOException x) { |
|
1262 x.printStackTrace(); // skip any wrong entry |
|
1263 } |
|
1264 } |
|
1265 } |
|
1266 |
|
1267 // now write back the cen and end table |
|
1268 end.cenoff = written; |
|
1269 for (Entry entry : elist) { |
|
1270 written += entry.writeCEN(os); |
|
1271 } |
|
1272 end.centot = elist.size(); |
|
1273 end.cenlen = written - end.cenoff; |
|
1274 end.write(os, written); |
|
1275 } |
|
1276 if (!streams.isEmpty()) { |
|
1277 // |
|
1278 // TBD: ExChannelCloser should not be necessary if we only |
|
1279 // sync when being closed, all streams should have been |
|
1280 // closed already. Keep the logic here for now. |
|
1281 // |
|
1282 // There are outstanding input streams open on existing "ch", |
|
1283 // so, don't close the "cha" and delete the "file for now, let |
|
1284 // the "ex-channel-closer" to handle them |
|
1285 ExChannelCloser ecc = new ExChannelCloser( |
|
1286 createTempFileInSameDirectoryAs(zfpath), |
|
1287 ch, |
|
1288 streams); |
|
1289 Files.move(zfpath, ecc.path, REPLACE_EXISTING); |
|
1290 exChClosers.add(ecc); |
|
1291 streams = Collections.synchronizedSet(new HashSet<InputStream>()); |
|
1292 } else { |
|
1293 ch.close(); |
|
1294 Files.delete(zfpath); |
|
1295 } |
|
1296 |
|
1297 Files.move(tmpFile, zfpath, REPLACE_EXISTING); |
|
1298 hasUpdate = false; // clear |
|
1299 /* |
|
1300 if (isOpen) { |
|
1301 ch = zfpath.newByteChannel(READ); // re-fresh "ch" and "cen" |
|
1302 cen = initCEN(); |
|
1303 } |
|
1304 */ |
|
1305 //System.out.printf("->sync(%s) done!%n", toString()); |
|
1306 } |
|
1307 |
|
1308 private IndexNode getInode(byte[] path) { |
|
1309 if (path == null) |
|
1310 throw new NullPointerException("path"); |
|
1311 IndexNode key = IndexNode.keyOf(path); |
|
1312 IndexNode inode = inodes.get(key); |
|
1313 if (inode == null && |
|
1314 (path.length == 0 || path[path.length -1] != '/')) { |
|
1315 // if does not ends with a slash |
|
1316 path = Arrays.copyOf(path, path.length + 1); |
|
1317 path[path.length - 1] = '/'; |
|
1318 inode = inodes.get(key.as(path)); |
|
1319 } |
|
1320 return inode; |
|
1321 } |
|
1322 |
|
1323 private Entry getEntry0(byte[] path) throws IOException { |
|
1324 IndexNode inode = getInode(path); |
|
1325 if (inode instanceof Entry) |
|
1326 return (Entry)inode; |
|
1327 if (inode == null || inode.pos == -1) |
|
1328 return null; |
|
1329 return Entry.readCEN(this, inode.pos); |
|
1330 } |
|
1331 |
|
1332 public void deleteFile(byte[] path, boolean failIfNotExists) |
|
1333 throws IOException |
|
1334 { |
|
1335 checkWritable(); |
|
1336 |
|
1337 IndexNode inode = getInode(path); |
|
1338 if (inode == null) { |
|
1339 if (path != null && path.length == 0) |
|
1340 throw new ZipException("root directory </> can't not be delete"); |
|
1341 if (failIfNotExists) |
|
1342 throw new NoSuchFileException(getString(path)); |
|
1343 } else { |
|
1344 if (inode.isDir() && inode.child != null) |
|
1345 throw new DirectoryNotEmptyException(getString(path)); |
|
1346 updateDelete(inode); |
|
1347 } |
|
1348 } |
|
1349 |
|
1350 private static void copyStream(InputStream is, OutputStream os) |
|
1351 throws IOException |
|
1352 { |
|
1353 byte[] copyBuf = new byte[8192]; |
|
1354 int n; |
|
1355 while ((n = is.read(copyBuf)) != -1) { |
|
1356 os.write(copyBuf, 0, n); |
|
1357 } |
|
1358 } |
|
1359 |
|
1360 // Returns an out stream for either |
|
1361 // (1) writing the contents of a new entry, if the entry exits, or |
|
1362 // (2) updating/replacing the contents of the specified existing entry. |
|
1363 private OutputStream getOutputStream(Entry e) throws IOException { |
|
1364 |
|
1365 if (e.mtime == -1) |
|
1366 e.mtime = System.currentTimeMillis(); |
|
1367 if (e.method == -1) |
|
1368 e.method = METHOD_DEFLATED; // TBD: use default method |
|
1369 // store size, compressed size, and crc-32 in LOC header |
|
1370 e.flag = 0; |
|
1371 if (zc.isUTF8()) |
|
1372 e.flag |= FLAG_EFS; |
|
1373 OutputStream os; |
|
1374 if (useTempFile) { |
|
1375 e.file = getTempPathForEntry(null); |
|
1376 os = Files.newOutputStream(e.file, WRITE); |
|
1377 } else { |
|
1378 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); |
|
1379 } |
|
1380 return new EntryOutputStream(e, os); |
|
1381 } |
|
1382 |
|
1383 private InputStream getInputStream(Entry e) |
|
1384 throws IOException |
|
1385 { |
|
1386 InputStream eis = null; |
|
1387 |
|
1388 if (e.type == Entry.NEW) { |
|
1389 if (e.bytes != null) |
|
1390 eis = new ByteArrayInputStream(e.bytes); |
|
1391 else if (e.file != null) |
|
1392 eis = Files.newInputStream(e.file); |
|
1393 else |
|
1394 throw new ZipException("update entry data is missing"); |
|
1395 } else if (e.type == Entry.FILECH) { |
|
1396 // FILECH result is un-compressed. |
|
1397 eis = Files.newInputStream(e.file); |
|
1398 // TBD: wrap to hook close() |
|
1399 // streams.add(eis); |
|
1400 return eis; |
|
1401 } else { // untouced CEN or COPY |
|
1402 eis = new EntryInputStream(e, ch); |
|
1403 } |
|
1404 if (e.method == METHOD_DEFLATED) { |
|
1405 // MORE: Compute good size for inflater stream: |
|
1406 long bufSize = e.size + 2; // Inflater likes a bit of slack |
|
1407 if (bufSize > 65536) |
|
1408 bufSize = 8192; |
|
1409 final long size = e.size; |
|
1410 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { |
|
1411 |
|
1412 private boolean isClosed = false; |
|
1413 public void close() throws IOException { |
|
1414 if (!isClosed) { |
|
1415 releaseInflater(inf); |
|
1416 this.in.close(); |
|
1417 isClosed = true; |
|
1418 streams.remove(this); |
|
1419 } |
|
1420 } |
|
1421 // Override fill() method to provide an extra "dummy" byte |
|
1422 // at the end of the input stream. This is required when |
|
1423 // using the "nowrap" Inflater option. (it appears the new |
|
1424 // zlib in 7 does not need it, but keep it for now) |
|
1425 protected void fill() throws IOException { |
|
1426 if (eof) { |
|
1427 throw new EOFException( |
|
1428 "Unexpected end of ZLIB input stream"); |
|
1429 } |
|
1430 len = this.in.read(buf, 0, buf.length); |
|
1431 if (len == -1) { |
|
1432 buf[0] = 0; |
|
1433 len = 1; |
|
1434 eof = true; |
|
1435 } |
|
1436 inf.setInput(buf, 0, len); |
|
1437 } |
|
1438 private boolean eof; |
|
1439 |
|
1440 public int available() throws IOException { |
|
1441 if (isClosed) |
|
1442 return 0; |
|
1443 long avail = size - inf.getBytesWritten(); |
|
1444 return avail > (long) Integer.MAX_VALUE ? |
|
1445 Integer.MAX_VALUE : (int) avail; |
|
1446 } |
|
1447 }; |
|
1448 } else if (e.method == METHOD_STORED) { |
|
1449 // TBD: wrap/ it does not seem necessary |
|
1450 } else { |
|
1451 throw new ZipException("invalid compression method"); |
|
1452 } |
|
1453 streams.add(eis); |
|
1454 return eis; |
|
1455 } |
|
1456 |
|
1457 // Inner class implementing the input stream used to read |
|
1458 // a (possibly compressed) zip file entry. |
|
1459 private class EntryInputStream extends InputStream { |
|
1460 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might |
|
1461 // point to a new channel after sync() |
|
1462 private long pos; // current position within entry data |
|
1463 protected long rem; // number of remaining bytes within entry |
|
1464 protected final long size; // uncompressed size of this entry |
|
1465 |
|
1466 EntryInputStream(Entry e, SeekableByteChannel zfch) |
|
1467 throws IOException |
|
1468 { |
|
1469 this.zfch = zfch; |
|
1470 rem = e.csize; |
|
1471 size = e.size; |
|
1472 pos = getDataPos(e); |
|
1473 } |
|
1474 public int read(byte b[], int off, int len) throws IOException { |
|
1475 ensureOpen(); |
|
1476 if (rem == 0) { |
|
1477 return -1; |
|
1478 } |
|
1479 if (len <= 0) { |
|
1480 return 0; |
|
1481 } |
|
1482 if (len > rem) { |
|
1483 len = (int) rem; |
|
1484 } |
|
1485 // readFullyAt() |
|
1486 long n = 0; |
|
1487 ByteBuffer bb = ByteBuffer.wrap(b); |
|
1488 bb.position(off); |
|
1489 bb.limit(off + len); |
|
1490 synchronized(zfch) { |
|
1491 n = zfch.position(pos).read(bb); |
|
1492 } |
|
1493 if (n > 0) { |
|
1494 pos += n; |
|
1495 rem -= n; |
|
1496 } |
|
1497 if (rem == 0) { |
|
1498 close(); |
|
1499 } |
|
1500 return (int)n; |
|
1501 } |
|
1502 public int read() throws IOException { |
|
1503 byte[] b = new byte[1]; |
|
1504 if (read(b, 0, 1) == 1) { |
|
1505 return b[0] & 0xff; |
|
1506 } else { |
|
1507 return -1; |
|
1508 } |
|
1509 } |
|
1510 public long skip(long n) throws IOException { |
|
1511 ensureOpen(); |
|
1512 if (n > rem) |
|
1513 n = rem; |
|
1514 pos += n; |
|
1515 rem -= n; |
|
1516 if (rem == 0) { |
|
1517 close(); |
|
1518 } |
|
1519 return n; |
|
1520 } |
|
1521 public int available() { |
|
1522 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; |
|
1523 } |
|
1524 public long size() { |
|
1525 return size; |
|
1526 } |
|
1527 public void close() { |
|
1528 rem = 0; |
|
1529 streams.remove(this); |
|
1530 } |
|
1531 } |
|
1532 |
|
1533 class EntryOutputStream extends DeflaterOutputStream |
|
1534 { |
|
1535 private CRC32 crc; |
|
1536 private Entry e; |
|
1537 private long written; |
|
1538 |
|
1539 EntryOutputStream(Entry e, OutputStream os) |
|
1540 throws IOException |
|
1541 { |
|
1542 super(os, getDeflater()); |
|
1543 if (e == null) |
|
1544 throw new NullPointerException("Zip entry is null"); |
|
1545 this.e = e; |
|
1546 crc = new CRC32(); |
|
1547 } |
|
1548 |
|
1549 @Override |
|
1550 public void write(byte b[], int off, int len) throws IOException { |
|
1551 if (e.type != Entry.FILECH) // only from sync |
|
1552 ensureOpen(); |
|
1553 if (off < 0 || len < 0 || off > b.length - len) { |
|
1554 throw new IndexOutOfBoundsException(); |
|
1555 } else if (len == 0) { |
|
1556 return; |
|
1557 } |
|
1558 switch (e.method) { |
|
1559 case METHOD_DEFLATED: |
|
1560 super.write(b, off, len); |
|
1561 break; |
|
1562 case METHOD_STORED: |
|
1563 written += len; |
|
1564 out.write(b, off, len); |
|
1565 break; |
|
1566 default: |
|
1567 throw new ZipException("invalid compression method"); |
|
1568 } |
|
1569 crc.update(b, off, len); |
|
1570 } |
|
1571 |
|
1572 @Override |
|
1573 public void close() throws IOException { |
|
1574 // TBD ensureOpen(); |
|
1575 switch (e.method) { |
|
1576 case METHOD_DEFLATED: |
|
1577 finish(); |
|
1578 e.size = def.getBytesRead(); |
|
1579 e.csize = def.getBytesWritten(); |
|
1580 e.crc = crc.getValue(); |
|
1581 break; |
|
1582 case METHOD_STORED: |
|
1583 // we already know that both e.size and e.csize are the same |
|
1584 e.size = e.csize = written; |
|
1585 e.crc = crc.getValue(); |
|
1586 break; |
|
1587 default: |
|
1588 throw new ZipException("invalid compression method"); |
|
1589 } |
|
1590 //crc.reset(); |
|
1591 if (out instanceof ByteArrayOutputStream) |
|
1592 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); |
|
1593 |
|
1594 if (e.type == Entry.FILECH) { |
|
1595 releaseDeflater(def); |
|
1596 return; |
|
1597 } |
|
1598 super.close(); |
|
1599 releaseDeflater(def); |
|
1600 update(e); |
|
1601 } |
|
1602 } |
|
1603 |
|
1604 static void zerror(String msg) { |
|
1605 throw new ZipError(msg); |
|
1606 } |
|
1607 |
|
1608 // Maxmum number of de/inflater we cache |
|
1609 private final int MAX_FLATER = 20; |
|
1610 // List of available Inflater objects for decompression |
|
1611 private final List<Inflater> inflaters = new ArrayList<>(); |
|
1612 |
|
1613 // Gets an inflater from the list of available inflaters or allocates |
|
1614 // a new one. |
|
1615 private Inflater getInflater() { |
|
1616 synchronized (inflaters) { |
|
1617 int size = inflaters.size(); |
|
1618 if (size > 0) { |
|
1619 Inflater inf = inflaters.remove(size - 1); |
|
1620 return inf; |
|
1621 } else { |
|
1622 return new Inflater(true); |
|
1623 } |
|
1624 } |
|
1625 } |
|
1626 |
|
1627 // Releases the specified inflater to the list of available inflaters. |
|
1628 private void releaseInflater(Inflater inf) { |
|
1629 synchronized (inflaters) { |
|
1630 if (inflaters.size() < MAX_FLATER) { |
|
1631 inf.reset(); |
|
1632 inflaters.add(inf); |
|
1633 } else { |
|
1634 inf.end(); |
|
1635 } |
|
1636 } |
|
1637 } |
|
1638 |
|
1639 // List of available Deflater objects for compression |
|
1640 private final List<Deflater> deflaters = new ArrayList<>(); |
|
1641 |
|
1642 // Gets an deflater from the list of available deflaters or allocates |
|
1643 // a new one. |
|
1644 private Deflater getDeflater() { |
|
1645 synchronized (deflaters) { |
|
1646 int size = deflaters.size(); |
|
1647 if (size > 0) { |
|
1648 Deflater def = deflaters.remove(size - 1); |
|
1649 return def; |
|
1650 } else { |
|
1651 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); |
|
1652 } |
|
1653 } |
|
1654 } |
|
1655 |
|
1656 // Releases the specified inflater to the list of available inflaters. |
|
1657 private void releaseDeflater(Deflater def) { |
|
1658 synchronized (deflaters) { |
|
1659 if (inflaters.size() < MAX_FLATER) { |
|
1660 def.reset(); |
|
1661 deflaters.add(def); |
|
1662 } else { |
|
1663 def.end(); |
|
1664 } |
|
1665 } |
|
1666 } |
|
1667 |
|
1668 // End of central directory record |
|
1669 static class END { |
|
1670 int disknum; |
|
1671 int sdisknum; |
|
1672 int endsub; // endsub |
|
1673 int centot; // 4 bytes |
|
1674 long cenlen; // 4 bytes |
|
1675 long cenoff; // 4 bytes |
|
1676 int comlen; // comment length |
|
1677 byte[] comment; |
|
1678 |
|
1679 /* members of Zip64 end of central directory locator */ |
|
1680 int diskNum; |
|
1681 long endpos; |
|
1682 int disktot; |
|
1683 |
|
1684 void write(OutputStream os, long offset) throws IOException { |
|
1685 boolean hasZip64 = false; |
|
1686 long xlen = cenlen; |
|
1687 long xoff = cenoff; |
|
1688 if (xlen >= ZIP64_MINVAL) { |
|
1689 xlen = ZIP64_MINVAL; |
|
1690 hasZip64 = true; |
|
1691 } |
|
1692 if (xoff >= ZIP64_MINVAL) { |
|
1693 xoff = ZIP64_MINVAL; |
|
1694 hasZip64 = true; |
|
1695 } |
|
1696 int count = centot; |
|
1697 if (count >= ZIP64_MINVAL32) { |
|
1698 count = ZIP64_MINVAL32; |
|
1699 hasZip64 = true; |
|
1700 } |
|
1701 if (hasZip64) { |
|
1702 long off64 = offset; |
|
1703 //zip64 end of central directory record |
|
1704 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature |
|
1705 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end |
|
1706 writeShort(os, 45); // version made by |
|
1707 writeShort(os, 45); // version needed to extract |
|
1708 writeInt(os, 0); // number of this disk |
|
1709 writeInt(os, 0); // central directory start disk |
|
1710 writeLong(os, centot); // number of directory entires on disk |
|
1711 writeLong(os, centot); // number of directory entires |
|
1712 writeLong(os, cenlen); // length of central directory |
|
1713 writeLong(os, cenoff); // offset of central directory |
|
1714 |
|
1715 //zip64 end of central directory locator |
|
1716 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature |
|
1717 writeInt(os, 0); // zip64 END start disk |
|
1718 writeLong(os, off64); // offset of zip64 END |
|
1719 writeInt(os, 1); // total number of disks (?) |
|
1720 } |
|
1721 writeInt(os, ENDSIG); // END record signature |
|
1722 writeShort(os, 0); // number of this disk |
|
1723 writeShort(os, 0); // central directory start disk |
|
1724 writeShort(os, count); // number of directory entries on disk |
|
1725 writeShort(os, count); // total number of directory entries |
|
1726 writeInt(os, xlen); // length of central directory |
|
1727 writeInt(os, xoff); // offset of central directory |
|
1728 if (comment != null) { // zip file comment |
|
1729 writeShort(os, comment.length); |
|
1730 writeBytes(os, comment); |
|
1731 } else { |
|
1732 writeShort(os, 0); |
|
1733 } |
|
1734 } |
|
1735 } |
|
1736 |
|
1737 // Internal node that links a "name" to its pos in cen table. |
|
1738 // The node itself can be used as a "key" to lookup itself in |
|
1739 // the HashMap inodes. |
|
1740 static class IndexNode { |
|
1741 byte[] name; |
|
1742 int hashcode; // node is hashable/hashed by its name |
|
1743 int pos = -1; // position in cen table, -1 menas the |
|
1744 // entry does not exists in zip file |
|
1745 IndexNode(byte[] name, int pos) { |
|
1746 name(name); |
|
1747 this.pos = pos; |
|
1748 } |
|
1749 |
|
1750 final static IndexNode keyOf(byte[] name) { // get a lookup key; |
|
1751 return new IndexNode(name, -1); |
|
1752 } |
|
1753 |
|
1754 final void name(byte[] name) { |
|
1755 this.name = name; |
|
1756 this.hashcode = Arrays.hashCode(name); |
|
1757 } |
|
1758 |
|
1759 final IndexNode as(byte[] name) { // reuse the node, mostly |
|
1760 name(name); // as a lookup "key" |
|
1761 return this; |
|
1762 } |
|
1763 |
|
1764 boolean isDir() { |
|
1765 return name != null && |
|
1766 (name.length == 0 || name[name.length - 1] == '/'); |
|
1767 } |
|
1768 |
|
1769 public boolean equals(Object other) { |
|
1770 if (!(other instanceof IndexNode)) { |
|
1771 return false; |
|
1772 } |
|
1773 return Arrays.equals(name, ((IndexNode)other).name); |
|
1774 } |
|
1775 |
|
1776 public int hashCode() { |
|
1777 return hashcode; |
|
1778 } |
|
1779 |
|
1780 IndexNode() {} |
|
1781 IndexNode sibling; |
|
1782 IndexNode child; // 1st child |
|
1783 } |
|
1784 |
|
1785 static class Entry extends IndexNode { |
|
1786 |
|
1787 static final int CEN = 1; // entry read from cen |
|
1788 static final int NEW = 2; // updated contents in bytes or file |
|
1789 static final int FILECH = 3; // fch update in "file" |
|
1790 static final int COPY = 4; // copy of a CEN entry |
|
1791 |
|
1792 |
|
1793 byte[] bytes; // updated content bytes |
|
1794 Path file; // use tmp file to store bytes; |
|
1795 int type = CEN; // default is the entry read from cen |
|
1796 |
|
1797 // entry attributes |
|
1798 int version; |
|
1799 int flag; |
|
1800 int method = -1; // compression method |
|
1801 long mtime = -1; // last modification time (in DOS time) |
|
1802 long atime = -1; // last access time |
|
1803 long ctime = -1; // create time |
|
1804 long crc = -1; // crc-32 of entry data |
|
1805 long csize = -1; // compressed size of entry data |
|
1806 long size = -1; // uncompressed size of entry data |
|
1807 byte[] extra; |
|
1808 |
|
1809 // cen |
|
1810 int versionMade; |
|
1811 int disk; |
|
1812 int attrs; |
|
1813 long attrsEx; |
|
1814 long locoff; |
|
1815 byte[] comment; |
|
1816 |
|
1817 Entry() {} |
|
1818 |
|
1819 Entry(byte[] name) { |
|
1820 name(name); |
|
1821 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); |
|
1822 this.crc = 0; |
|
1823 this.size = 0; |
|
1824 this.csize = 0; |
|
1825 this.method = METHOD_DEFLATED; |
|
1826 } |
|
1827 |
|
1828 Entry(byte[] name, int type) { |
|
1829 this(name); |
|
1830 this.type = type; |
|
1831 } |
|
1832 |
|
1833 Entry (Entry e, int type) { |
|
1834 name(e.name); |
|
1835 this.version = e.version; |
|
1836 this.ctime = e.ctime; |
|
1837 this.atime = e.atime; |
|
1838 this.mtime = e.mtime; |
|
1839 this.crc = e.crc; |
|
1840 this.size = e.size; |
|
1841 this.csize = e.csize; |
|
1842 this.method = e.method; |
|
1843 this.extra = e.extra; |
|
1844 this.versionMade = e.versionMade; |
|
1845 this.disk = e.disk; |
|
1846 this.attrs = e.attrs; |
|
1847 this.attrsEx = e.attrsEx; |
|
1848 this.locoff = e.locoff; |
|
1849 this.comment = e.comment; |
|
1850 this.type = type; |
|
1851 } |
|
1852 |
|
1853 Entry (byte[] name, Path file, int type) { |
|
1854 this(name, type); |
|
1855 this.file = file; |
|
1856 this.method = METHOD_STORED; |
|
1857 } |
|
1858 |
|
1859 int version() throws ZipException { |
|
1860 if (method == METHOD_DEFLATED) |
|
1861 return 20; |
|
1862 else if (method == METHOD_STORED) |
|
1863 return 10; |
|
1864 throw new ZipException("unsupported compression method"); |
|
1865 } |
|
1866 |
|
1867 ///////////////////// CEN ////////////////////// |
|
1868 static Entry readCEN(ZipFileSystem zipfs, int pos) |
|
1869 throws IOException |
|
1870 { |
|
1871 return new Entry().cen(zipfs, pos); |
|
1872 } |
|
1873 |
|
1874 private Entry cen(ZipFileSystem zipfs, int pos) |
|
1875 throws IOException |
|
1876 { |
|
1877 byte[] cen = zipfs.cen; |
|
1878 if (CENSIG(cen, pos) != CENSIG) |
|
1879 zerror("invalid CEN header (bad signature)"); |
|
1880 versionMade = CENVEM(cen, pos); |
|
1881 version = CENVER(cen, pos); |
|
1882 flag = CENFLG(cen, pos); |
|
1883 method = CENHOW(cen, pos); |
|
1884 mtime = dosToJavaTime(CENTIM(cen, pos)); |
|
1885 crc = CENCRC(cen, pos); |
|
1886 csize = CENSIZ(cen, pos); |
|
1887 size = CENLEN(cen, pos); |
|
1888 int nlen = CENNAM(cen, pos); |
|
1889 int elen = CENEXT(cen, pos); |
|
1890 int clen = CENCOM(cen, pos); |
|
1891 disk = CENDSK(cen, pos); |
|
1892 attrs = CENATT(cen, pos); |
|
1893 attrsEx = CENATX(cen, pos); |
|
1894 locoff = CENOFF(cen, pos); |
|
1895 |
|
1896 pos += CENHDR; |
|
1897 name(Arrays.copyOfRange(cen, pos, pos + nlen)); |
|
1898 |
|
1899 pos += nlen; |
|
1900 if (elen > 0) { |
|
1901 extra = Arrays.copyOfRange(cen, pos, pos + elen); |
|
1902 pos += elen; |
|
1903 readExtra(zipfs); |
|
1904 } |
|
1905 if (clen > 0) { |
|
1906 comment = Arrays.copyOfRange(cen, pos, pos + clen); |
|
1907 } |
|
1908 return this; |
|
1909 } |
|
1910 |
|
1911 int writeCEN(OutputStream os) throws IOException |
|
1912 { |
|
1913 int written = CENHDR; |
|
1914 int version0 = version(); |
|
1915 long csize0 = csize; |
|
1916 long size0 = size; |
|
1917 long locoff0 = locoff; |
|
1918 int elen64 = 0; // extra for ZIP64 |
|
1919 int elenNTFS = 0; // extra for NTFS (a/c/mtime) |
|
1920 int elenEXTT = 0; // extra for Extended Timestamp |
|
1921 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present |
|
1922 |
|
1923 // confirm size/length |
|
1924 int nlen = (name != null) ? name.length : 0; |
|
1925 int elen = (extra != null) ? extra.length : 0; |
|
1926 int eoff = 0; |
|
1927 int clen = (comment != null) ? comment.length : 0; |
|
1928 if (csize >= ZIP64_MINVAL) { |
|
1929 csize0 = ZIP64_MINVAL; |
|
1930 elen64 += 8; // csize(8) |
|
1931 } |
|
1932 if (size >= ZIP64_MINVAL) { |
|
1933 size0 = ZIP64_MINVAL; // size(8) |
|
1934 elen64 += 8; |
|
1935 } |
|
1936 if (locoff >= ZIP64_MINVAL) { |
|
1937 locoff0 = ZIP64_MINVAL; |
|
1938 elen64 += 8; // offset(8) |
|
1939 } |
|
1940 if (elen64 != 0) { |
|
1941 elen64 += 4; // header and data sz 4 bytes |
|
1942 } |
|
1943 while (eoff + 4 < elen) { |
|
1944 int tag = SH(extra, eoff); |
|
1945 int sz = SH(extra, eoff + 2); |
|
1946 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { |
|
1947 foundExtraTime = true; |
|
1948 } |
|
1949 eoff += (4 + sz); |
|
1950 } |
|
1951 if (!foundExtraTime) { |
|
1952 if (isWindows) { // use NTFS |
|
1953 elenNTFS = 36; // total 36 bytes |
|
1954 } else { // Extended Timestamp otherwise |
|
1955 elenEXTT = 9; // only mtime in cen |
|
1956 } |
|
1957 } |
|
1958 writeInt(os, CENSIG); // CEN header signature |
|
1959 if (elen64 != 0) { |
|
1960 writeShort(os, 45); // ver 4.5 for zip64 |
|
1961 writeShort(os, 45); |
|
1962 } else { |
|
1963 writeShort(os, version0); // version made by |
|
1964 writeShort(os, version0); // version needed to extract |
|
1965 } |
|
1966 writeShort(os, flag); // general purpose bit flag |
|
1967 writeShort(os, method); // compression method |
|
1968 // last modification time |
|
1969 writeInt(os, (int)javaToDosTime(mtime)); |
|
1970 writeInt(os, crc); // crc-32 |
|
1971 writeInt(os, csize0); // compressed size |
|
1972 writeInt(os, size0); // uncompressed size |
|
1973 writeShort(os, name.length); |
|
1974 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); |
|
1975 |
|
1976 if (comment != null) { |
|
1977 writeShort(os, Math.min(clen, 0xffff)); |
|
1978 } else { |
|
1979 writeShort(os, 0); |
|
1980 } |
|
1981 writeShort(os, 0); // starting disk number |
|
1982 writeShort(os, 0); // internal file attributes (unused) |
|
1983 writeInt(os, 0); // external file attributes (unused) |
|
1984 writeInt(os, locoff0); // relative offset of local header |
|
1985 writeBytes(os, name); |
|
1986 if (elen64 != 0) { |
|
1987 writeShort(os, EXTID_ZIP64);// Zip64 extra |
|
1988 writeShort(os, elen64 - 4); // size of "this" extra block |
|
1989 if (size0 == ZIP64_MINVAL) |
|
1990 writeLong(os, size); |
|
1991 if (csize0 == ZIP64_MINVAL) |
|
1992 writeLong(os, csize); |
|
1993 if (locoff0 == ZIP64_MINVAL) |
|
1994 writeLong(os, locoff); |
|
1995 } |
|
1996 if (elenNTFS != 0) { |
|
1997 writeShort(os, EXTID_NTFS); |
|
1998 writeShort(os, elenNTFS - 4); |
|
1999 writeInt(os, 0); // reserved |
|
2000 writeShort(os, 0x0001); // NTFS attr tag |
|
2001 writeShort(os, 24); |
|
2002 writeLong(os, javaToWinTime(mtime)); |
|
2003 writeLong(os, javaToWinTime(atime)); |
|
2004 writeLong(os, javaToWinTime(ctime)); |
|
2005 } |
|
2006 if (elenEXTT != 0) { |
|
2007 writeShort(os, EXTID_EXTT); |
|
2008 writeShort(os, elenEXTT - 4); |
|
2009 if (ctime == -1) |
|
2010 os.write(0x3); // mtime and atime |
|
2011 else |
|
2012 os.write(0x7); // mtime, atime and ctime |
|
2013 writeInt(os, javaToUnixTime(mtime)); |
|
2014 } |
|
2015 if (extra != null) // whatever not recognized |
|
2016 writeBytes(os, extra); |
|
2017 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); |
|
2018 writeBytes(os, comment); |
|
2019 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; |
|
2020 } |
|
2021 |
|
2022 ///////////////////// LOC ////////////////////// |
|
2023 static Entry readLOC(ZipFileSystem zipfs, long pos) |
|
2024 throws IOException |
|
2025 { |
|
2026 return readLOC(zipfs, pos, new byte[1024]); |
|
2027 } |
|
2028 |
|
2029 static Entry readLOC(ZipFileSystem zipfs, long pos, byte[] buf) |
|
2030 throws IOException |
|
2031 { |
|
2032 return new Entry().loc(zipfs, pos, buf); |
|
2033 } |
|
2034 |
|
2035 Entry loc(ZipFileSystem zipfs, long pos, byte[] buf) |
|
2036 throws IOException |
|
2037 { |
|
2038 assert (buf.length >= LOCHDR); |
|
2039 if (zipfs.readFullyAt(buf, 0, LOCHDR , pos) != LOCHDR) |
|
2040 throw new ZipException("loc: reading failed"); |
|
2041 if (LOCSIG(buf) != LOCSIG) |
|
2042 throw new ZipException("loc: wrong sig ->" |
|
2043 + Long.toString(LOCSIG(buf), 16)); |
|
2044 //startPos = pos; |
|
2045 version = LOCVER(buf); |
|
2046 flag = LOCFLG(buf); |
|
2047 method = LOCHOW(buf); |
|
2048 mtime = dosToJavaTime(LOCTIM(buf)); |
|
2049 crc = LOCCRC(buf); |
|
2050 csize = LOCSIZ(buf); |
|
2051 size = LOCLEN(buf); |
|
2052 int nlen = LOCNAM(buf); |
|
2053 int elen = LOCEXT(buf); |
|
2054 |
|
2055 name = new byte[nlen]; |
|
2056 if (zipfs.readFullyAt(name, 0, nlen, pos + LOCHDR) != nlen) { |
|
2057 throw new ZipException("loc: name reading failed"); |
|
2058 } |
|
2059 if (elen > 0) { |
|
2060 extra = new byte[elen]; |
|
2061 if (zipfs.readFullyAt(extra, 0, elen, pos + LOCHDR + nlen) |
|
2062 != elen) { |
|
2063 throw new ZipException("loc: ext reading failed"); |
|
2064 } |
|
2065 } |
|
2066 pos += (LOCHDR + nlen + elen); |
|
2067 if ((flag & FLAG_DATADESCR) != 0) { |
|
2068 // Data Descriptor |
|
2069 Entry e = zipfs.getEntry0(name); // get the size/csize from cen |
|
2070 if (e == null) |
|
2071 throw new ZipException("loc: name not found in cen"); |
|
2072 size = e.size; |
|
2073 csize = e.csize; |
|
2074 pos += (method == METHOD_STORED ? size : csize); |
|
2075 if (size >= ZIP64_MINVAL || csize >= ZIP64_MINVAL) |
|
2076 pos += 24; |
|
2077 else |
|
2078 pos += 16; |
|
2079 } else { |
|
2080 if (extra != null && |
|
2081 (size == ZIP64_MINVAL || csize == ZIP64_MINVAL)) { |
|
2082 // zip64 ext: must include both size and csize |
|
2083 int off = 0; |
|
2084 while (off + 20 < elen) { // HeaderID+DataSize+Data |
|
2085 int sz = SH(extra, off + 2); |
|
2086 if (SH(extra, off) == EXTID_ZIP64 && sz == 16) { |
|
2087 size = LL(extra, off + 4); |
|
2088 csize = LL(extra, off + 12); |
|
2089 break; |
|
2090 } |
|
2091 off += (sz + 4); |
|
2092 } |
|
2093 } |
|
2094 pos += (method == METHOD_STORED ? size : csize); |
|
2095 } |
|
2096 return this; |
|
2097 } |
|
2098 |
|
2099 int writeLOC(OutputStream os) |
|
2100 throws IOException |
|
2101 { |
|
2102 writeInt(os, LOCSIG); // LOC header signature |
|
2103 int version = version(); |
|
2104 int nlen = (name != null) ? name.length : 0; |
|
2105 int elen = (extra != null) ? extra.length : 0; |
|
2106 boolean foundExtraTime = false; // if extra timestamp present |
|
2107 int eoff = 0; |
|
2108 int elen64 = 0; |
|
2109 int elenEXTT = 0; |
|
2110 int elenNTFS = 0; |
|
2111 if ((flag & FLAG_DATADESCR) != 0) { |
|
2112 writeShort(os, version()); // version needed to extract |
|
2113 writeShort(os, flag); // general purpose bit flag |
|
2114 writeShort(os, method); // compression method |
|
2115 // last modification time |
|
2116 writeInt(os, (int)javaToDosTime(mtime)); |
|
2117 // store size, uncompressed size, and crc-32 in data descriptor |
|
2118 // immediately following compressed entry data |
|
2119 writeInt(os, 0); |
|
2120 writeInt(os, 0); |
|
2121 writeInt(os, 0); |
|
2122 } else { |
|
2123 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { |
|
2124 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) |
|
2125 writeShort(os, 45); // ver 4.5 for zip64 |
|
2126 } else { |
|
2127 writeShort(os, version()); // version needed to extract |
|
2128 } |
|
2129 writeShort(os, flag); // general purpose bit flag |
|
2130 writeShort(os, method); // compression method |
|
2131 // last modification time |
|
2132 writeInt(os, (int)javaToDosTime(mtime)); |
|
2133 writeInt(os, crc); // crc-32 |
|
2134 if (elen64 != 0) { |
|
2135 writeInt(os, ZIP64_MINVAL); |
|
2136 writeInt(os, ZIP64_MINVAL); |
|
2137 } else { |
|
2138 writeInt(os, csize); // compressed size |
|
2139 writeInt(os, size); // uncompressed size |
|
2140 } |
|
2141 } |
|
2142 while (eoff + 4 < elen) { |
|
2143 int tag = SH(extra, eoff); |
|
2144 int sz = SH(extra, eoff + 2); |
|
2145 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { |
|
2146 foundExtraTime = true; |
|
2147 } |
|
2148 eoff += (4 + sz); |
|
2149 } |
|
2150 if (!foundExtraTime) { |
|
2151 if (isWindows) { |
|
2152 elenNTFS = 36; // NTFS, total 36 bytes |
|
2153 } else { // on unix use "ext time" |
|
2154 elenEXTT = 9; |
|
2155 if (atime != -1) |
|
2156 elenEXTT += 4; |
|
2157 if (ctime != -1) |
|
2158 elenEXTT += 4; |
|
2159 } |
|
2160 } |
|
2161 writeShort(os, name.length); |
|
2162 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); |
|
2163 writeBytes(os, name); |
|
2164 if (elen64 != 0) { |
|
2165 writeShort(os, EXTID_ZIP64); |
|
2166 writeShort(os, 16); |
|
2167 writeLong(os, size); |
|
2168 writeLong(os, csize); |
|
2169 } |
|
2170 if (elenNTFS != 0) { |
|
2171 writeShort(os, EXTID_NTFS); |
|
2172 writeShort(os, elenNTFS - 4); |
|
2173 writeInt(os, 0); // reserved |
|
2174 writeShort(os, 0x0001); // NTFS attr tag |
|
2175 writeShort(os, 24); |
|
2176 writeLong(os, javaToWinTime(mtime)); |
|
2177 writeLong(os, javaToWinTime(atime)); |
|
2178 writeLong(os, javaToWinTime(ctime)); |
|
2179 } |
|
2180 if (elenEXTT != 0) { |
|
2181 writeShort(os, EXTID_EXTT); |
|
2182 writeShort(os, elenEXTT - 4);// size for the folowing data block |
|
2183 int fbyte = 0x1; |
|
2184 if (atime != -1) // mtime and atime |
|
2185 fbyte |= 0x2; |
|
2186 if (ctime != -1) // mtime, atime and ctime |
|
2187 fbyte |= 0x4; |
|
2188 os.write(fbyte); // flags byte |
|
2189 writeInt(os, javaToUnixTime(mtime)); |
|
2190 if (atime != -1) |
|
2191 writeInt(os, javaToUnixTime(atime)); |
|
2192 if (ctime != -1) |
|
2193 writeInt(os, javaToUnixTime(ctime)); |
|
2194 } |
|
2195 if (extra != null) { |
|
2196 writeBytes(os, extra); |
|
2197 } |
|
2198 return LOCHDR + name.length + elen + elen64 + elenNTFS + elenEXTT; |
|
2199 } |
|
2200 |
|
2201 // Data Descriptior |
|
2202 int writeEXT(OutputStream os) |
|
2203 throws IOException |
|
2204 { |
|
2205 writeInt(os, EXTSIG); // EXT header signature |
|
2206 writeInt(os, crc); // crc-32 |
|
2207 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { |
|
2208 writeLong(os, csize); |
|
2209 writeLong(os, size); |
|
2210 return 24; |
|
2211 } else { |
|
2212 writeInt(os, csize); // compressed size |
|
2213 writeInt(os, size); // uncompressed size |
|
2214 return 16; |
|
2215 } |
|
2216 } |
|
2217 |
|
2218 // read NTFS, UNIX and ZIP64 data from cen.extra |
|
2219 void readExtra(ZipFileSystem zipfs) throws IOException { |
|
2220 if (extra == null) |
|
2221 return; |
|
2222 int elen = extra.length; |
|
2223 int off = 0; |
|
2224 int newOff = 0; |
|
2225 while (off + 4 < elen) { |
|
2226 // extra spec: HeaderID+DataSize+Data |
|
2227 int pos = off; |
|
2228 int tag = SH(extra, pos); |
|
2229 int sz = SH(extra, pos + 2); |
|
2230 pos += 4; |
|
2231 if (pos + sz > elen) // invalid data |
|
2232 break; |
|
2233 switch (tag) { |
|
2234 case EXTID_ZIP64 : |
|
2235 if (size == ZIP64_MINVAL) { |
|
2236 if (pos + 8 > elen) // invalid zip64 extra |
|
2237 break; // fields, just skip |
|
2238 size = LL(extra, pos); |
|
2239 pos += 8; |
|
2240 } |
|
2241 if (csize == ZIP64_MINVAL) { |
|
2242 if (pos + 8 > elen) |
|
2243 break; |
|
2244 csize = LL(extra, pos); |
|
2245 pos += 8; |
|
2246 } |
|
2247 if (locoff == ZIP64_MINVAL) { |
|
2248 if (pos + 8 > elen) |
|
2249 break; |
|
2250 locoff = LL(extra, pos); |
|
2251 pos += 8; |
|
2252 } |
|
2253 break; |
|
2254 case EXTID_NTFS: |
|
2255 pos += 4; // reserved 4 bytes |
|
2256 if (SH(extra, pos) != 0x0001) |
|
2257 break; |
|
2258 if (SH(extra, pos + 2) != 24) |
|
2259 break; |
|
2260 // override the loc field, datatime here is |
|
2261 // more "accurate" |
|
2262 mtime = winToJavaTime(LL(extra, pos + 4)); |
|
2263 atime = winToJavaTime(LL(extra, pos + 12)); |
|
2264 ctime = winToJavaTime(LL(extra, pos + 20)); |
|
2265 break; |
|
2266 case EXTID_EXTT: |
|
2267 // spec says the Extened timestamp in cen only has mtime |
|
2268 // need to read the loc to get the extra a/ctime |
|
2269 byte[] buf = new byte[LOCHDR]; |
|
2270 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) |
|
2271 != buf.length) |
|
2272 throw new ZipException("loc: reading failed"); |
|
2273 if (LOCSIG(buf) != LOCSIG) |
|
2274 throw new ZipException("loc: wrong sig ->" |
|
2275 + Long.toString(LOCSIG(buf), 16)); |
|
2276 |
|
2277 int locElen = LOCEXT(buf); |
|
2278 if (locElen < 9) // EXTT is at lease 9 bytes |
|
2279 break; |
|
2280 int locNlen = LOCNAM(buf); |
|
2281 buf = new byte[locElen]; |
|
2282 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) |
|
2283 != buf.length) |
|
2284 throw new ZipException("loc extra: reading failed"); |
|
2285 int locPos = 0; |
|
2286 while (locPos + 4 < buf.length) { |
|
2287 int locTag = SH(buf, locPos); |
|
2288 int locSZ = SH(buf, locPos + 2); |
|
2289 locPos += 4; |
|
2290 if (locTag != EXTID_EXTT) { |
|
2291 locPos += locSZ; |
|
2292 continue; |
|
2293 } |
|
2294 int flag = CH(buf, locPos++); |
|
2295 if ((flag & 0x1) != 0) { |
|
2296 mtime = unixToJavaTime(LG(buf, locPos)); |
|
2297 locPos += 4; |
|
2298 } |
|
2299 if ((flag & 0x2) != 0) { |
|
2300 atime = unixToJavaTime(LG(buf, locPos)); |
|
2301 locPos += 4; |
|
2302 } |
|
2303 if ((flag & 0x4) != 0) { |
|
2304 ctime = unixToJavaTime(LG(buf, locPos)); |
|
2305 locPos += 4; |
|
2306 } |
|
2307 break; |
|
2308 } |
|
2309 break; |
|
2310 default: // unknown tag |
|
2311 System.arraycopy(extra, off, extra, newOff, sz + 4); |
|
2312 newOff += (sz + 4); |
|
2313 } |
|
2314 off += (sz + 4); |
|
2315 } |
|
2316 if (newOff != 0 && newOff != extra.length) |
|
2317 extra = Arrays.copyOf(extra, newOff); |
|
2318 else |
|
2319 extra = null; |
|
2320 } |
|
2321 } |
|
2322 |
|
2323 private static class ExChannelCloser { |
|
2324 Path path; |
|
2325 SeekableByteChannel ch; |
|
2326 Set<InputStream> streams; |
|
2327 ExChannelCloser(Path path, |
|
2328 SeekableByteChannel ch, |
|
2329 Set<InputStream> streams) |
|
2330 { |
|
2331 this.path = path; |
|
2332 this.ch = ch; |
|
2333 this.streams = streams; |
|
2334 } |
|
2335 } |
|
2336 |
|
2337 // ZIP directory has two issues: |
|
2338 // (1) ZIP spec does not require the ZIP file to include |
|
2339 // directory entry |
|
2340 // (2) all entries are not stored/organized in a "tree" |
|
2341 // structure. |
|
2342 // A possible solution is to build the node tree ourself as |
|
2343 // implemented below. |
|
2344 private IndexNode root; |
|
2345 |
|
2346 private void addToTree(IndexNode inode, HashSet<IndexNode> dirs) { |
|
2347 if (dirs.contains(inode)) { |
|
2348 return; |
|
2349 } |
|
2350 IndexNode parent; |
|
2351 byte[] name = inode.name; |
|
2352 byte[] pname = getParent(name); |
|
2353 if (inodes.containsKey(LOOKUPKEY.as(pname))) { |
|
2354 parent = inodes.get(LOOKUPKEY); |
|
2355 } else { // pseudo directory entry |
|
2356 parent = new IndexNode(pname, -1); |
|
2357 inodes.put(parent, parent); |
|
2358 } |
|
2359 addToTree(parent, dirs); |
|
2360 inode.sibling = parent.child; |
|
2361 parent.child = inode; |
|
2362 if (name[name.length -1] == '/') |
|
2363 dirs.add(inode); |
|
2364 } |
|
2365 |
|
2366 private void removeFromTree(IndexNode inode) { |
|
2367 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); |
|
2368 IndexNode child = parent.child; |
|
2369 if (child.equals(inode)) { |
|
2370 parent.child = child.sibling; |
|
2371 } else { |
|
2372 IndexNode last = child; |
|
2373 while ((child = child.sibling) != null) { |
|
2374 if (child.equals(inode)) { |
|
2375 last.sibling = child.sibling; |
|
2376 break; |
|
2377 } else { |
|
2378 last = child; |
|
2379 } |
|
2380 } |
|
2381 } |
|
2382 } |
|
2383 |
|
2384 private void buildNodeTree() throws IOException { |
|
2385 beginWrite(); |
|
2386 try { |
|
2387 HashSet<IndexNode> dirs = new HashSet<>(); |
|
2388 IndexNode root = new IndexNode(ROOTPATH, -1); |
|
2389 inodes.put(root, root); |
|
2390 dirs.add(root); |
|
2391 for (IndexNode node : inodes.keySet().toArray(new IndexNode[0])) { |
|
2392 addToTree(node, dirs); |
|
2393 } |
|
2394 } finally { |
|
2395 endWrite(); |
|
2396 } |
|
2397 } |
|
2398 } |
|