[Pkg-javascript-commits] [node-zip-stream] 01/05: Imported Upstream version 0.3.5

Andrew Kelley andrewrk-guest at moszumanska.debian.org
Tue Jul 1 18:14:14 UTC 2014


This is an automated email from the git hooks/post-receive script.

andrewrk-guest pushed a commit to branch master
in repository node-zip-stream.

commit 40a14005a7ffd445c59c12d554f414d37941fae5
Author: Andrew Kelley <superjoe30 at gmail.com>
Date:   Tue Jul 1 18:02:46 2014 +0000

    Imported Upstream version 0.3.5
---
 .gitignore                                       |    3 +
 .travis.yml                                      |    9 +
 APPNOTE-2.0.txt                                  | 1003 ++++++++++++++++++++++
 CONTRIBUTING.md                                  |   14 +
 LICENSE-MIT                                      |   22 +
 README.md                                        |  120 +++
 lib/headers.js                                   |  279 ++++++
 lib/util/index.js                                |  105 +++
 lib/zip-stream.js                                |  307 +++++++
 package.json                                     |   54 ++
 test/fixtures/directory/level0.txt               |    1 +
 test/fixtures/directory/subdir/level1.txt        |    1 +
 test/fixtures/directory/subdir/subsub/level2.txt |    1 +
 test/fixtures/empty.txt                          |    0
 test/fixtures/headers/zip-centralfooter.bin      |  Bin 0 -> 22 bytes
 test/fixtures/headers/zip-centralheader.bin      |  Bin 0 -> 54 bytes
 test/fixtures/headers/zip-data.bin               |    1 +
 test/fixtures/headers/zip-file.bin               |  Bin 0 -> 38 bytes
 test/fixtures/headers/zip-filedescriptor.bin     |  Bin 0 -> 16 bytes
 test/fixtures/image.png                          |  Bin 0 -> 101108 bytes
 test/fixtures/test.txt                           |    1 +
 test/headers.js                                  |  118 +++
 test/helpers/index.js                            |  101 +++
 test/pack.js                                     |  356 ++++++++
 test/util.js                                     |  128 +++
 25 files changed, 2624 insertions(+)

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c766b8b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+npm-debug.log
+node_modules/
+tmp/
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..bdece26
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,9 @@
+language: node_js
+node_js:
+  - 0.11
+  - "0.10"
+  - "0.8"
+matrix:
+  fast_finish: true
+  allow_failures:
+    - node_js: 0.11
\ No newline at end of file
diff --git a/APPNOTE-2.0.txt b/APPNOTE-2.0.txt
new file mode 100644
index 0000000..e6b8105
--- /dev/null
+++ b/APPNOTE-2.0.txt
@@ -0,0 +1,1003 @@
+Copyright (c) 1990-1993 PKWARE, Inc.  All Rights Reserved
+Disclaimer
+----------
+
+Although PKWARE will attempt to supply current and accurate
+information relating to its file formats, algorithms, and the
+subject programs, the possibility of error can not be eliminated.
+PKWARE therefore expressly disclaims any warranty that the
+information contained in the associated materials relating to the
+subject programs and/or the format of the files created or
+accessed by the subject programs and/or the algorithms used by
+the subject programs, or any other matter, is current, correct or
+accurate as delivered.  Any risk of damage due to any possible
+inaccurate information is assumed by the user of the information.
+Furthermore, the information relating to the subject programs
+and/or the file formats created or accessed by the subject
+programs and/or the algorithms used by the subject programs is
+subject to change without notice.
+
+
+General Format of a ZIP file
+----------------------------
+
+  Files stored in arbitrary order.  Large zipfiles can span multiple
+  diskette media.
+
+  Overall zipfile format:
+
+    [local file header + file data + data_descriptor] . . .
+    [central directory] end of central directory record
+
+
+  A.  Local file header:
+
+  local file header signature     4 bytes  (0x04034b50)
+  version needed to extract       2 bytes
+  general purpose bit flag        2 bytes
+  compression method              2 bytes
+  last mod file time              2 bytes
+  last mod file date              2 bytes
+  crc-32                          4 bytes
+  compressed size                 4 bytes
+  uncompressed size               4 bytes
+  filename length                 2 bytes
+  extra field length              2 bytes
+
+  filename (variable size)
+  extra field (variable size)
+
+
+  B.  Data descriptor:
+
+  crc-32                          4 bytes
+  compressed size                 4 bytes
+  uncompressed size               4 bytes
+
+      This descriptor exists only if bit 3 of the general
+      purpose bit flag is set (see below).  It is byte aligned
+      and immediately follows the last byte of compressed data.
+      This descriptor is used only when it was not possible to
+      seek in the output zip file, e.g., when the output zip file
+      was standard output or a non seekable device.
+
+  C.  Central directory structure:
+
+      [file header] . . .  end of central dir record
+
+      File header:
+
+  central file header signature   4 bytes  (0x02014b50)
+  version made by                 2 bytes
+  version needed to extract       2 bytes
+  general purpose bit flag        2 bytes
+  compression method              2 bytes
+  last mod file time              2 bytes
+  last mod file date              2 bytes
+  crc-32                          4 bytes
+  compressed size                 4 bytes
+  uncompressed size               4 bytes
+  filename length                 2 bytes
+  extra field length              2 bytes
+  file comment length             2 bytes
+  disk number start               2 bytes
+  internal file attributes        2 bytes
+  external file attributes        4 bytes
+  relative offset of local header 4 bytes
+
+  filename (variable size)
+  extra field (variable size)
+  file comment (variable size)
+
+      End of central dir record:
+
+  end of central dir signature    4 bytes  (0x06054b50)
+  number of this disk             2 bytes
+  number of the disk with the
+  start of the central directory  2 bytes
+  total number of entries in
+  the central dir on this disk    2 bytes
+  total number of entries in
+  the central dir                 2 bytes
+  size of the central directory   4 bytes
+  offset of start of central
+  directory with respect to
+  the starting disk number        4 bytes
+  zipfile comment length          2 bytes
+  zipfile comment (variable size)
+
+
+  D.  Explanation of fields:
+
+      version made by (2 bytes)
+
+    The upper byte indicates the host system (OS) for the
+    file.  Software can use this information to determine
+    the line record format for text files etc.  The current
+    mappings are:
+
+    0 - MS-DOS and OS/2 (F.A.T. file systems)
+    1 - Amiga                     2 - VAX/VMS
+    3 - *nix                      4 - VM/CMS
+    5 - Atari ST                  6 - OS/2 H.P.F.S.
+    7 - Macintosh                 8 - Z-System
+    9 - CP/M                      10 thru 255 - unused
+
+    The lower byte indicates the version number of the
+    software used to encode the file.  The value/10
+    indicates the major version number, and the value
+    mod 10 is the minor version number.
+
+      version needed to extract (2 bytes)
+
+    The minimum software version needed to extract the
+    file, mapped as above.
+
+      general purpose bit flag: (2 bytes)
+
+    bit 0: If set, indicates that the file is encrypted.
+
+    (For Method 6 - Imploding)
+    bit 1: If the compression method used was type 6,
+     Imploding, then this bit, if set, indicates
+     an 8K sliding dictionary was used.  If clear,
+     then a 4K sliding dictionary was used.
+    bit 2: If the compression method used was type 6,
+     Imploding, then this bit, if set, indicates
+     an 3 Shannon-Fano trees were used to encode the
+     sliding dictionary output.  If clear, then 2
+     Shannon-Fano trees were used.
+
+    (For Method 8 - Deflating)
+    bit 2  bit 1
+      0      0    Normal (-en) compression option was used.
+      0      1    Maximum (-ex) compression option was used.
+      1      0    Fast (-ef) compression option was used.
+      1      1    Super Fast (-es) compression option was used.
+
+    Note:  Bits 1 and 2 are undefined if the compression
+     method is any other.
+
+    (For method 8)
+    bit 3: If this bit is set, the fields crc-32, compressed size
+     and uncompressed size are set to zero in the local
+     header.  The correct values are put in the data descriptor
+     immediately following the compressed data.
+
+    The upper three bits are reserved and used internally
+    by the software when processing the zipfile.  The
+    remaining bits are unused.
+
+      compression method: (2 bytes)
+
+    (see accompanying documentation for algorithm
+    descriptions)
+
+    0 - The file is stored (no compression)
+    1 - The file is Shrunk
+    2 - The file is Reduced with compression factor 1
+    3 - The file is Reduced with compression factor 2
+    4 - The file is Reduced with compression factor 3
+    5 - The file is Reduced with compression factor 4
+    6 - The file is Imploded
+    7 - Reserved for Tokenizing compression algorithm
+    8 - The file is Deflated
+
+      date and time fields: (2 bytes each)
+
+    The date and time are encoded in standard MS-DOS format.
+    If input came from standard input, the date and time are
+    those at which compression was started for this data.
+
+      CRC-32: (4 bytes)
+
+    The CRC-32 algorithm was generously contributed by
+    David Schwaderer and can be found in his excellent
+    book "C Programmers Guide to NetBIOS" published by
+    Howard W. Sams & Co. Inc.  The 'magic number' for
+    the CRC is 0xdebb20e3.  The proper CRC pre and post
+    conditioning is used, meaning that the CRC register
+    is pre-conditioned with all ones (a starting value
+    of 0xffffffff) and the value is post-conditioned by
+    taking the one's complement of the CRC residual.
+    If bit 3 of the general purpose flag is set, this
+    field is set to zero in the local header and the correct
+    value is put in the data descriptor and in the central
+    directory.
+
+      compressed size: (4 bytes)
+      uncompressed size: (4 bytes)
+
+    The size of the file compressed and uncompressed,
+    respectively.  If bit 3 of the general purpose bit flag
+    is set, these fields are set to zero in the local header
+    and the correct values are put in the data descriptor and
+    in the central directory.
+
+      filename length: (2 bytes)
+      extra field length: (2 bytes)
+      file comment length: (2 bytes)
+
+    The length of the filename, extra field, and comment
+    fields respectively.  The combined length of any
+    directory record and these three fields should not
+    generally exceed 65,535 bytes.  If input came from standard
+    input, the filename length is set to zero.
+
+
+      disk number start: (2 bytes)
+
+    The number of the disk on which this file begins.
+
+      internal file attributes: (2 bytes)
+
+    The lowest bit of this field indicates, if set, that
+    the file is apparently an ASCII or text file.  If not
+    set, that the file apparently contains binary data.
+    The remaining bits are unused in version 1.0.
+
+      external file attributes: (4 bytes)
+
+    The mapping of the external attributes is
+    host-system dependent (see 'version made by').  For
+    MS-DOS, the low order byte is the MS-DOS directory
+    attribute byte.  If input came from standard input, this
+    field is set to zero.
+
+      relative offset of local header: (4 bytes)
+
+    This is the offset from the start of the first disk on
+    which this file appears, to where the local header should
+    be found.
+
+      filename: (Variable)
+
+    The name of the file, with optional relative path.
+    The path stored should not contain a drive or
+    device letter, or a leading slash.  All slashes
+    should be forward slashes '/' as opposed to
+    backwards slashes '\' for compatibility with Amiga
+    and Unix file systems etc.  If input came from standard
+    input, there is no filename field.
+
+      extra field: (Variable)
+
+    This is for future expansion.  If additional information
+    needs to be stored in the future, it should be stored
+    here.  Earlier versions of the software can then safely
+    skip this file, and find the next file or header.  This
+    field will be 0 length in version 1.0.
+
+    In order to allow different programs and different types
+    of information to be stored in the 'extra' field in .ZIP
+    files, the following structure should be used for all
+    programs storing data in this field:
+
+    header1+data1 + header2+data2 . . .
+
+    Each header should consist of:
+
+      Header ID - 2 bytes
+      Data Size - 2 bytes
+
+    Note: all fields stored in Intel low-byte/high-byte order.
+
+    The Header ID field indicates the type of data that is in
+    the following data block.
+
+    Header ID's of 0 thru 31 are reserved for use by PKWARE.
+    The remaining ID's can be used by third party vendors for
+    proprietary usage.
+
+    The current Header ID mappings are:
+
+    0x0007        AV Info
+    0x0009        OS/2
+    0x000c        VAX/VMS
+
+    The Data Size field indicates the size of the following
+    data block. Programs can use this value to skip to the
+    next header block, passing over any data blocks that are
+    not of interest.
+
+    Note: As stated above, the size of the entire .ZIP file
+    header, including the filename, comment, and extra
+    field should not exceed 64K in size.
+
+    In case two different programs should appropriate the same
+    Header ID value, it is strongly recommended that each
+    program place a unique signature of at least two bytes in
+    size (and preferably 4 bytes or bigger) at the start of
+    each data area.  Every program should verify that its
+    unique signature is present, in addition to the Header ID
+    value being correct, before assuming that it is a block of
+    known type.
+
+   -VAX/VMS Extra Field:
+
+    The following is the layout of the VAX/VMS attributes "extra"
+    block.  (Last Revision 12/17/91)
+
+    Note: all fields stored in Intel low-byte/high-byte order.
+
+    Value         Size            Description
+    -----         ----            -----------
+  (VMS)   0x000c        Short           Tag for this "extra" block type
+    TSize         Short           Size of the total "extra" block
+    CRC           Long            32-bit CRC for remainder of the block
+    Tag1          Short           VMS attribute tag value #1
+    Size1         Short           Size of attribute #1, in bytes
+    (var.)        Size1           Attribute #1 data
+    .
+    .
+    .
+    TagN          Short           VMS attribute tage value #N
+    SizeN         Short           Size of attribute #N, in bytes
+    (var.)        SizeN           Attribute #N data
+
+    Rules:
+
+    1. There will be one or more of attributes present, which will
+       each be preceded by the above TagX & SizeX values.  These
+       values are identical to the ATR$C_XXXX and ATR$S_XXXX constants
+       which are defined in ATR.H under VMS C.  Neither of these values
+       will ever be zero.
+
+    2. No word alignment or padding is performed.
+
+    3. A well-behaved PKZIP/VMS program should never produce more than
+       one sub-block with the same TagX value.  Also, there will never
+       be more than one "extra" block of type 0x000c in a particular
+       directory record.
+
+      file comment: (Variable)
+
+    The comment for this file.
+
+      number of this disk: (2 bytes)
+
+    The number of this disk, which contains central
+    directory end record.
+
+      number of the disk with the start of the central directory: (2 bytes)
+
+    The number of the disk on which the central
+    directory starts.
+
+      total number of entries in the central dir on this disk: (2 bytes)
+
+    The number of central directory entries on this disk.
+
+      total number of entries in the central dir: (2 bytes)
+
+    The total number of files in the zipfile.
+
+
+      size of the central directory: (4 bytes)
+
+    The size (in bytes) of the entire central directory.
+
+      offset of start of central directory with respect to
+      the starting disk number:  (4 bytes)
+
+    Offset of the start of the central direcory on the
+    disk on which the central directory starts.
+
+      zipfile comment length: (2 bytes)
+
+    The length of the comment for this zipfile.
+
+      zipfile comment: (Variable)
+
+    The comment for this zipfile.
+
+
+  D.  General notes:
+
+      1)  All fields unless otherwise noted are unsigned and stored
+    in Intel low-byte:high-byte, low-word:high-word order.
+
+      2)  String fields are not null terminated, since the
+    length is given explicitly.
+
+      3)  Local headers should not span disk boundries.  Also, even
+    though the central directory can span disk boundries, no
+    single record in the central directory should be split
+    across disks.
+
+      4)  The entries in the central directory may not necessarily
+    be in the same order that files appear in the zipfile.
+
+UnShrinking - Method 1
+----------------------
+
+Shrinking is a Dynamic Ziv-Lempel-Welch compression algorithm
+with partial clearing.  The initial code size is 9 bits, and
+the maximum code size is 13 bits.  Shrinking differs from
+conventional Dynamic Ziv-Lempel-Welch implementations in several
+respects:
+
+1)  The code size is controlled by the compressor, and is not
+    automatically increased when codes larger than the current
+    code size are created (but not necessarily used).  When
+    the decompressor encounters the code sequence 256
+    (decimal) followed by 1, it should increase the code size
+    read from the input stream to the next bit size.  No
+    blocking of the codes is performed, so the next code at
+    the increased size should be read from the input stream
+    immediately after where the previous code at the smaller
+    bit size was read.  Again, the decompressor should not
+    increase the code size used until the sequence 256,1 is
+    encountered.
+
+2)  When the table becomes full, total clearing is not
+    performed.  Rather, when the compresser emits the code
+    sequence 256,2 (decimal), the decompressor should clear
+    all leaf nodes from the Ziv-Lempel tree, and continue to
+    use the current code size.  The nodes that are cleared
+    from the Ziv-Lempel tree are then re-used, with the lowest
+    code value re-used first, and the highest code value
+    re-used last.  The compressor can emit the sequence 256,2
+    at any time.
+
+
+
+Expanding - Methods 2-5
+-----------------------
+
+The Reducing algorithm is actually a combination of two
+distinct algorithms.  The first algorithm compresses repeated
+byte sequences, and the second algorithm takes the compressed
+stream from the first algorithm and applies a probabilistic
+compression method.
+
+The probabilistic compression stores an array of 'follower
+sets' S(j), for j=0 to 255, corresponding to each possible
+ASCII character.  Each set contains between 0 and 32
+characters, to be denoted as S(j)[0],...,S(j)[m], where m<32.
+The sets are stored at the beginning of the data area for a
+Reduced file, in reverse order, with S(255) first, and S(0)
+last.
+
+The sets are encoded as { N(j), S(j)[0],...,S(j)[N(j)-1] },
+where N(j) is the size of set S(j).  N(j) can be 0, in which
+case the follower set for S(j) is empty.  Each N(j) value is
+encoded in 6 bits, followed by N(j) eight bit character values
+corresponding to S(j)[0] to S(j)[N(j)-1] respectively.  If
+N(j) is 0, then no values for S(j) are stored, and the value
+for N(j-1) immediately follows.
+
+Immediately after the follower sets, is the compressed data
+stream.  The compressed data stream can be interpreted for the
+probabilistic decompression as follows:
+
+
+let Last-Character <- 0.
+loop until done
+    if the follower set S(Last-Character) is empty then
+  read 8 bits from the input stream, and copy this
+  value to the output stream.
+    otherwise if the follower set S(Last-Character) is non-empty then
+  read 1 bit from the input stream.
+  if this bit is not zero then
+      read 8 bits from the input stream, and copy this
+      value to the output stream.
+  otherwise if this bit is zero then
+      read B(N(Last-Character)) bits from the input
+      stream, and assign this value to I.
+      Copy the value of S(Last-Character)[I] to the
+      output stream.
+
+    assign the last value placed on the output stream to
+    Last-Character.
+end loop
+
+
+B(N(j)) is defined as the minimal number of bits required to
+encode the value N(j)-1.
+
+
+The decompressed stream from above can then be expanded to
+re-create the original file as follows:
+
+
+let State <- 0.
+
+loop until done
+    read 8 bits from the input stream into C.
+    case State of
+  0:  if C is not equal to DLE (144 decimal) then
+    copy C to the output stream.
+      otherwise if C is equal to DLE then
+    let State <- 1.
+
+  1:  if C is non-zero then
+    let V <- C.
+    let Len <- L(V)
+    let State <- F(Len).
+      otherwise if C is zero then
+    copy the value 144 (decimal) to the output stream.
+    let State <- 0
+
+  2:  let Len <- Len + C
+      let State <- 3.
+
+  3:  move backwards D(V,C) bytes in the output stream
+      (if this position is before the start of the output
+      stream, then assume that all the data before the
+      start of the output stream is filled with zeros).
+      copy Len+3 bytes from this position to the output stream.
+      let State <- 0.
+    end case
+end loop
+
+
+The functions F,L, and D are dependent on the 'compression
+factor', 1 through 4, and are defined as follows:
+
+For compression factor 1:
+    L(X) equals the lower 7 bits of X.
+    F(X) equals 2 if X equals 127 otherwise F(X) equals 3.
+    D(X,Y) equals the (upper 1 bit of X) * 256 + Y + 1.
+For compression factor 2:
+    L(X) equals the lower 6 bits of X.
+    F(X) equals 2 if X equals 63 otherwise F(X) equals 3.
+    D(X,Y) equals the (upper 2 bits of X) * 256 + Y + 1.
+For compression factor 3:
+    L(X) equals the lower 5 bits of X.
+    F(X) equals 2 if X equals 31 otherwise F(X) equals 3.
+    D(X,Y) equals the (upper 3 bits of X) * 256 + Y + 1.
+For compression factor 4:
+    L(X) equals the lower 4 bits of X.
+    F(X) equals 2 if X equals 15 otherwise F(X) equals 3.
+    D(X,Y) equals the (upper 4 bits of X) * 256 + Y + 1.
+
+
+Imploding - Method 6
+--------------------
+
+The Imploding algorithm is actually a combination of two distinct
+algorithms.  The first algorithm compresses repeated byte
+sequences using a sliding dictionary.  The second algorithm is
+used to compress the encoding of the sliding dictionary ouput,
+using multiple Shannon-Fano trees.
+
+The Imploding algorithm can use a 4K or 8K sliding dictionary
+size. The dictionary size used can be determined by bit 1 in the
+general purpose flag word; a 0 bit indicates a 4K dictionary
+while a 1 bit indicates an 8K dictionary.
+
+The Shannon-Fano trees are stored at the start of the compressed
+file. The number of trees stored is defined by bit 2 in the
+general purpose flag word; a 0 bit indicates two trees stored, a
+1 bit indicates three trees are stored.  If 3 trees are stored,
+the first Shannon-Fano tree represents the encoding of the
+Literal characters, the second tree represents the encoding of
+the Length information, the third represents the encoding of the
+Distance information.  When 2 Shannon-Fano trees are stored, the
+Length tree is stored first, followed by the Distance tree.
+
+The Literal Shannon-Fano tree, if present is used to represent
+the entire ASCII character set, and contains 256 values.  This
+tree is used to compress any data not compressed by the sliding
+dictionary algorithm.  When this tree is present, the Minimum
+Match Length for the sliding dictionary is 3.  If this tree is
+not present, the Minimum Match Length is 2.
+
+The Length Shannon-Fano tree is used to compress the Length part
+of the (length,distance) pairs from the sliding dictionary
+output.  The Length tree contains 64 values, ranging from the
+Minimum Match Length, to 63 plus the Minimum Match Length.
+
+The Distance Shannon-Fano tree is used to compress the Distance
+part of the (length,distance) pairs from the sliding dictionary
+output. The Distance tree contains 64 values, ranging from 0 to
+63, representing the upper 6 bits of the distance value.  The
+distance values themselves will be between 0 and the sliding
+dictionary size, either 4K or 8K.
+
+The Shannon-Fano trees themselves are stored in a compressed
+format. The first byte of the tree data represents the number of
+bytes of data representing the (compressed) Shannon-Fano tree
+minus 1.  The remaining bytes represent the Shannon-Fano tree
+data encoded as:
+
+    High 4 bits: Number of values at this bit length + 1. (1 - 16)
+    Low  4 bits: Bit Length needed to represent value + 1. (1 - 16)
+
+The Shannon-Fano codes can be constructed from the bit lengths
+using the following algorithm:
+
+1)  Sort the Bit Lengths in ascending order, while retaining the
+    order of the original lengths stored in the file.
+
+2)  Generate the Shannon-Fano trees:
+
+    Code <- 0
+    CodeIncrement <- 0
+    LastBitLength <- 0
+    i <- number of Shannon-Fano codes - 1   (either 255 or 63)
+
+    loop while i >= 0
+  Code = Code + CodeIncrement
+  if BitLength(i) <> LastBitLength then
+      LastBitLength=BitLength(i)
+      CodeIncrement = 1 shifted left (16 - LastBitLength)
+  ShannonCode(i) = Code
+  i <- i - 1
+    end loop
+
+
+3)  Reverse the order of all the bits in the above ShannonCode()
+    vector, so that the most significant bit becomes the least
+    significant bit.  For example, the value 0x1234 (hex) would
+    become 0x2C48 (hex).
+
+4)  Restore the order of Shannon-Fano codes as originally stored
+    within the file.
+
+Example:
+
+    This example will show the encoding of a Shannon-Fano tree
+    of size 8.  Notice that the actual Shannon-Fano trees used
+    for Imploding are either 64 or 256 entries in size.
+
+Example:   0x02, 0x42, 0x01, 0x13
+
+    The first byte indicates 3 values in this table.  Decoding the
+    bytes:
+      0x42 = 5 codes of 3 bits long
+      0x01 = 1 code  of 2 bits long
+      0x13 = 2 codes of 4 bits long
+
+    This would generate the original bit length array of:
+    (3, 3, 3, 3, 3, 2, 4, 4)
+
+    There are 8 codes in this table for the values 0 thru 7.  Using the
+    algorithm to obtain the Shannon-Fano codes produces:
+
+          Reversed     Order     Original
+Val  Sorted   Constructed Code      Value     Restored    Length
+---  ------   -----------------   --------    --------    ------
+0:     2      1100000000000000        11       101          3
+1:     3      1010000000000000       101       001          3
+2:     3      1000000000000000       001       110          3
+3:     3      0110000000000000       110       010          3
+4:     3      0100000000000000       010       100          3
+5:     3      0010000000000000       100        11          2
+6:     4      0001000000000000      1000      1000          4
+7:     4      0000000000000000      0000      0000          4
+
+
+The values in the Val, Order Restored and Original Length columns
+now represent the Shannon-Fano encoding tree that can be used for
+decoding the Shannon-Fano encoded data.  How to parse the
+variable length Shannon-Fano values from the data stream is beyond the
+scope of this document.  (See the references listed at the end of
+this document for more information.)  However, traditional decoding
+schemes used for Huffman variable length decoding, such as the
+Greenlaw algorithm, can be succesfully applied.
+
+The compressed data stream begins immediately after the
+compressed Shannon-Fano data.  The compressed data stream can be
+interpreted as follows:
+
+loop until done
+    read 1 bit from input stream.
+
+    if this bit is non-zero then       (encoded data is literal data)
+  if Literal Shannon-Fano tree is present
+      read and decode character using Literal Shannon-Fano tree.
+  otherwise
+      read 8 bits from input stream.
+  copy character to the output stream.
+    otherwise                   (encoded data is sliding dictionary match)
+  if 8K dictionary size
+      read 7 bits for offset Distance (lower 7 bits of offset).
+  otherwise
+      read 6 bits for offset Distance (lower 6 bits of offset).
+
+  using the Distance Shannon-Fano tree, read and decode the
+    upper 6 bits of the Distance value.
+
+  using the Length Shannon-Fano tree, read and decode
+    the Length value.
+
+  Length <- Length + Minimum Match Length
+
+  if Length = 63 + Minimum Match Length
+      read 8 bits from the input stream,
+      add this value to Length.
+
+  move backwards Distance+1 bytes in the output stream, and
+  copy Length characters from this position to the output
+  stream.  (if this position is before the start of the output
+  stream, then assume that all the data before the start of
+  the output stream is filled with zeros).
+end loop
+
+Tokenizing - Method 7
+--------------------
+
+This method is not used by PKZIP.
+
+Deflating - Method 8
+-----------------
+
+The Deflate algorithm is similar to the Implode algorithm using
+a sliding dictionary of up to 32K with secondary compression
+from Huffman/Shannon-Fano codes.
+
+The compressed data is stored in blocks with a header describing
+the block and the Huffman codes used in the data block.  The header
+format is as follows:
+
+   Bit 0: Last Block bit     This bit is set to 1 if this is the last
+           compressed block in the data.
+   Bits 1-2: Block type
+      00 (0) - Block is stored - All stored data is byte aligned.
+         Skip bits until next byte, then next word = block length,
+         followed by the ones compliment of the block length word.
+         Remaining data in block is the stored data.
+
+      01 (1) - Use fixed Huffman codes for literal and distance codes.
+         Lit Code    Bits             Dist Code   Bits
+         ---------   ----             ---------   ----
+     0 - 143    8                 0 - 31      5
+         144 - 255    9
+         256 - 279    7
+         280 - 287    8
+
+         Literal codes 286-287 and distance codes 30-31 are never
+         used but participate in the Huffman construction.
+
+      10 (2) - Dynamic Huffman codes.  (See expanding Huffman codes)
+
+      11 (3) - Reserved - Flag a "Error in compressed data" if seen.
+
+Expanding Huffman Codes
+-----------------------
+If the data block is stored with dynamic Huffman codes, the Huffman
+codes are sent in the following compressed format:
+
+   5 Bits: # of Literal codes sent - 256 (256 - 286)
+     All other codes are never sent.
+   5 Bits: # of Dist codes - 1           (1 - 32)
+   4 Bits: # of Bit Length codes - 3     (3 - 19)
+
+The Huffman codes are sent as bit lengths and the codes are built as
+described in the implode algorithm.  The bit lengths themselves are
+compressed with Huffman codes.  There are 19 bit length codes:
+
+   0 - 15: Represent bit lengths of 0 - 15
+       16: Copy the previous bit length 3 - 6 times.
+     The next 2 bits indicate repeat length (0 = 3, ... ,3 = 6)
+        Example:  Codes 8, 16 (+2 bits 11), 16 (+2 bits 10) will
+      expand to 12 bit lengths of 8 (1 + 6 + 5)
+       17: Repeat a bit length of 0 for 3 - 10 times. (3 bits of length)
+       18: Repeat a bit length of 0 for 11 - 138 times (7 bits of length)
+
+The lengths of the bit length codes are sent packed 3 bits per value
+(0 - 7) in the following order:
+
+   16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
+
+The Huffman codes should be built as described in the Implode algorithm
+except codes are assigned starting at the shortest bit length, i.e. the
+shortest code should be all 0's rather than all 1's.  Also, codes with
+a bit length of zero do not participate in the tree construction.  The
+codes are then used to decode the bit lengths for the literal and distance
+tables.
+
+The bit lengths for the literal tables are sent first with the number
+of entries sent described by the 5 bits sent earlier.  There are up
+to 286 literal characters; the first 256 represent the respective 8
+bit character, code 256 represents the End-Of-Block code, the remaining
+29 codes represent copy lengths of 3 thru 258.  There are up to 30
+distance codes representing distances from 1 thru 32k as described
+below.
+
+           Length Codes
+           ------------
+      Extra             Extra              Extra              Extra
+ Code Bits Length  Code Bits Lengths  Code Bits Lengths  Code Bits Length(s)
+ ---- ---- ------  ---- ---- -------  ---- ---- -------  ---- ---- ---------
+  257   0     3     265   1   11,12    273   3   35-42    281   5  131-162
+  258   0     4     266   1   13,14    274   3   43-50    282   5  163-194
+  259   0     5     267   1   15,16    275   3   51-58    283   5  195-226
+  260   0     6     268   1   17,18    276   3   59-66    284   5  227-257
+  261   0     7     269   2   19-22    277   4   67-82    285   0    258
+  262   0     8     270   2   23-26    278   4   83-98
+  263   0     9     271   2   27-30    279   4   99-114
+  264   0    10     272   2   31-34    280   4  115-130
+
+          Distance Codes
+          --------------
+      Extra           Extra             Extra               Extra
+ Code Bits Dist  Code Bits  Dist   Code Bits Distance  Code Bits Distance
+ ---- ---- ----  ---- ---- ------  ---- ---- --------  ---- ---- --------
+   0   0    1      8   3   17-24    16    7  257-384    24   11  4097-6144
+   1   0    2      9   3   25-32    17    7  385-512    25   11  6145-8192
+   2   0    3     10   4   33-48    18    8  513-768    26   12  8193-12288
+   3   0    4     11   4   49-64    19    8  769-1024   27   12 12289-16384
+   4   1   5,6    12   5   65-96    20    9 1025-1536   28   13 16385-24576
+   5   1   7,8    13   5   97-128   21    9 1537-2048   29   13 24577-32768
+   6   2   9-12   14   6  129-192   22   10 2049-3072
+   7   2  13-16   15   6  193-256   23   10 3073-4096
+
+The compressed data stream begins immediately after the
+compressed header data.  The compressed data stream can be
+interpreted as follows:
+
+do
+   read header from input stream.
+
+   if stored block
+      skip bits until byte aligned
+      read count and 1's compliment of count
+      copy count bytes data block
+   otherwise
+      loop until end of block code sent
+   decode literal character from input stream
+   if literal < 256
+      copy character to the output stream
+   otherwise
+      if literal = end of block
+         break from loop
+      otherwise
+         decode distance from input stream
+
+         move backwards distance bytes in the output stream, and
+         copy length characters from this position to the output
+         stream.
+      end loop
+while not last block
+
+if data descriptor exists
+   skip bits until byte aligned
+   read crc and sizes
+endif
+
+Decryption
+----------
+
+The encryption used in PKZIP was generously supplied by Roger
+Schlafly.  PKWARE is grateful to Mr. Schlafly for his expert
+help and advice in the field of data encryption.
+
+PKZIP encrypts the compressed data stream.  Encrypted files must
+be decrypted before they can be extracted.
+
+Each encrypted file has an extra 12 bytes stored at the start of
+the data area defining the encryption header for that file.  The
+encryption header is originally set to random values, and then
+itself encrypted, using three, 32-bit keys.  The key values are
+initialized using the supplied encryption password.  After each byte
+is encrypted, the keys are then updated using pseudo-random number
+generation techniques in combination with the same CRC-32 algorithm
+used in PKZIP and described elsewhere in this document.
+
+The following is the basic steps required to decrypt a file:
+
+1) Initialize the three 32-bit keys with the password.
+2) Read and decrypt the 12-byte encryption header, further
+   initializing the encryption keys.
+3) Read and decrypt the compressed data stream using the
+   encryption keys.
+
+
+Step 1 - Initializing the encryption keys
+-----------------------------------------
+
+Key(0) <- 305419896
+Key(1) <- 591751049
+Key(2) <- 878082192
+
+loop for i <- 0 to length(password)-1
+    update_keys(password(i))
+end loop
+
+
+Where update_keys() is defined as:
+
+
+update_keys(char):
+  Key(0) <- crc32(key(0),char)
+  Key(1) <- Key(1) + (Key(0) & 000000ffH)
+  Key(1) <- Key(1) * 134775813 + 1
+  Key(2) <- crc32(key(2),key(1) >> 24)
+end update_keys
+
+
+Where crc32(old_crc,char) is a routine that given a CRC value and a
+character, returns an updated CRC value after applying the CRC-32
+algorithm described elsewhere in this document.
+
+
+Step 2 - Decrypting the encryption header
+-----------------------------------------
+
+The purpose of this step is to further initialize the encryption
+keys, based on random data, to render a plaintext attack on the
+data ineffective.
+
+
+Read the 12-byte encryption header into Buffer, in locations
+Buffer(0) thru Buffer(11).
+
+loop for i <- 0 to 11
+    C <- buffer(i) ^ decrypt_byte()
+    update_keys(C)
+    buffer(i) <- C
+end loop
+
+
+Where decrypt_byte() is defined as:
+
+
+unsigned char decrypt_byte()
+    local unsigned short temp
+    temp <- Key(2) | 2
+    decrypt_byte <- (temp * (temp ^ 1)) >> 8
+end decrypt_byte
+
+
+After the header is decrypted,  the last 1 or 2 bytes in Buffer
+should be the high-order word/byte of the CRC for the file being
+decrypted, stored in Intel low-byte/high-byte order.  Versions of
+PKZIP prior to 2.0 used a 2 byte CRC check; a 1 byte CRC check is
+used on versions after 2.0.  This can be used to test if the password
+supplied is correct or not.
+
+
+Step 3 - Decrypting the compressed data stream
+----------------------------------------------
+
+The compressed data stream can be decrypted as follows:
+
+
+loop until done
+    read a charcter into C
+    Temp <- C ^ decrypt_byte()
+    update_keys(temp)
+    output Temp
+end loop
+
+
+In addition to the above mentioned contributors to PKZIP and PKUNZIP,
+I would like to extend special thanks to Robert Mahoney for suggesting
+the extension .ZIP for this software.
+
+
+References:
+
+    Fiala, Edward R., and Greene, Daniel H., "Data compression with
+       finite windows",  Communications of the ACM, Volume 32, Number 4,
+       April 1989, pages 490-505.
+
+    Held, Gilbert, "Data Compression, Techniques and Applications,
+        Hardware and Software Considerations",
+       John Wiley & Sons, 1987.
+
+    Huffman, D.A., "A method for the construction of minimum-redundancy
+       codes", Proceedings of the IRE, Volume 40, Number 9, September 1952,
+       pages 1098-1101.
+
+    Nelson, Mark, "LZW Data Compression", Dr. Dobbs Journal, Volume 14,
+       Number 10, October 1989, pages 29-37.
+
+    Nelson, Mark, "The Data Compression Book",  M&T Books, 1991.
+
+    Storer, James A., "Data Compression, Methods and Theory",
+       Computer Science Press, 1988
+
+    Welch, Terry, "A Technique for High-Performance Data Compression",
+       IEEE Computer, Volume 17, Number 6, June 1984, pages 8-19.
+
+    Ziv, J. and Lempel, A., "A universal algorithm for sequential data
+       compression", Communications of the ACM, Volume 30, Number 6,
+       June 1987, pages 520-540.
+
+    Ziv, J. and Lempel, A., "Compression of individual sequences via
+       variable-rate coding", IEEE Transactions on Information Theory,
+       Volume 24, Number 5, September 1978, pages 530-536.
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..1fa92d4
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,14 @@
+## Contributing
+
+#### Code Style Guide
+
+* code should be indented with 2 spaces
+* single quotes should be used where feasible
+* commas should be followed by a single space (function params, etc)
+* variable declaration should include `var`, [no multiple declarations](http://benalman.com/news/2012/05/multiple-var-statements-javascript/)
+
+#### Tests
+
+* tests should be added to the nodeunit configs in `test/`
+* tests can be run with `npm test`
+* see existing tests for guidance
\ No newline at end of file
diff --git a/LICENSE-MIT b/LICENSE-MIT
new file mode 100644
index 0000000..56420a6
--- /dev/null
+++ b/LICENSE-MIT
@@ -0,0 +1,22 @@
+Copyright (c) 2014 Chris Talkington, contributors.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..202d731
--- /dev/null
+++ b/README.md
@@ -0,0 +1,120 @@
+# zip-stream v0.3.5 [![Build Status](https://travis-ci.org/ctalkington/node-zip-stream.svg?branch=master)](https://travis-ci.org/ctalkington/node-zip-stream)
+
+zip-stream is a streaming zip archive generator. It was built to be a successor to [zipstream](https://npmjs.org/package/zipstream). Dependencies are kept to a minimum through the use of many of node's built-in modules including the use of zlib module for compression.
+
+[![NPM](https://nodei.co/npm/zip-stream.png)](https://nodei.co/npm/zip-stream/)
+
+### Install
+
+```bash
+npm install zip-stream --save
+```
+
+You can also use `npm install https://github.com/ctalkington/node-zip-stream/archive/master.tar.gz` to test upcoming versions.
+
+### Usage
+
+This module is meant to be wrapped internally by other modules and therefore lacks any queue management. This means you have to wait until the previous entry has been fully consumed to add another. Nested callbacks should be used to add multiple entries. There are modules like [async](https://npmjs.org/package/async) that ease the so called "callback hell".
+
+If you want a module that handles entry queueing and much more, you should check out [archiver](https://npmjs.org/package/archiver) which uses this module internally.
+
+```js
+var packer = require('zip-stream');
+var archive = new packer(); // OR new packer(options)
+
+archive.on('error', function(err) {
+  throw err;
+});
+
+// pipe archive where you want it (ie fs, http, etc)
+// listen to the destination's end, close, or finish event
+
+archive.entry('string contents', { name: 'string.txt' }, function(err, entry) {
+  if (err) throw err;
+  archive.entry(null, { name: 'directory/' }, function(err, entry) {
+    if (err) throw err;
+    archive.finalize();
+  });
+});
+```
+
+### Instance API
+
+#### entry(input, data, callback(err, data))
+
+Appends an input source (text string, buffer, or stream) to the instance. When the instance has received, processed, and emitted the input, the callback is fired.
+
+#### finalize()
+
+Finalizes the instance. You should listen to the destination stream's `end`/`close`/`finish` event to know when all output has been safely consumed.
+
+### Instance Options
+
+#### comment `string`
+
+Sets the zip comment.
+
+#### forceUTC `boolean`
+
+If true, forces the entry date to UTC. Helps with testing across timezones.
+
+#### store `boolean`
+
+If true, all entry contents will be archived without compression by default.
+
+#### zlib `object`
+
+Passed to node's [zlib](http://nodejs.org/api/zlib.html#zlib_options) module to control compression. Options may vary by node version.
+
+### Entry Data
+
+#### name `string` `required`
+
+Sets the entry name including internal path.
+
+#### type `string`
+
+Sets the entry type. Defaults to `file` or `directory` if name ends with trailing slash.
+
+#### date `string|Date`
+
+Sets the entry date. This can be any valid date string or instance. Defaults to current time in locale.
+
+#### store `boolean`
+
+If true, entry contents will be archived without compression.
+
+#### comment `string`
+
+Sets the entry comment.
+
+#### mode `number`
+
+Sets the entry permissions. (experimental)
+
+## Debugging
+
+This library makes use of the [debug](https://npmjs.org/package/debug) module with a namespace of `zip-stream` which can be triggered by setting `DEBUG` in your environment like so:
+
+```shell
+# unix
+DEBUG=zip-stream:* node script
+
+# windows (powershell)
+$env:DEBUG="zip-stream:*"
+node script
+
+# windows (cmd)
+SET DEBUG="zip-stream:*"
+node script
+```
+
+## Things of Interest
+
+- [Releases](https://github.com/ctalkington/node-zip-stream/releases)
+- [Contributing](https://github.com/ctalkington/node-zip-stream/blob/master/CONTRIBUTING.md)
+- [MIT License](https://github.com/ctalkington/node-zip-stream/blob/master/LICENSE-MIT)
+
+## Credits
+
+Concept inspired by Antoine van Wel's [zipstream](https://npmjs.org/package/zipstream) module, which is no longer being updated.
\ No newline at end of file
diff --git a/lib/headers.js b/lib/headers.js
new file mode 100644
index 0000000..c052c06
--- /dev/null
+++ b/lib/headers.js
@@ -0,0 +1,279 @@
+/**
+ * node-zip-stream
+ *
+ * Copyright (c) 2014 Chris Talkington, contributors.
+ * Licensed under the MIT license.
+ * https://github.com/ctalkington/node-zip-stream/blob/master/LICENSE-MIT
+ */
+var inherits = require('util').inherits;
+var util = require('./util');
+
+var debug = util.debug('zip-stream:headers');
+
+var DEFAULT_FILE_MODE = 0100644; // 644 -rw-r--r-- = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
+var DEFAULT_DIR_MODE = 040755; // 755 drwxr-xr-x = S_IFDIR | S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH
+var EXT_FILE_ATTR_DIR = 010173200020; // 755 drwxr-xr-x = (((S_IFDIR | 0755) << 16) | S_DOS_D)
+var EXT_FILE_ATTR_FILE = 020151000040; // 644 -rw-r--r-- = (((S_IFREG | 0644) << 16) | S_DOS_A) >>> 0
+
+// Unix file types
+var S_IFIFO = 010000; // named pipe (fifo)
+var S_IFCHR = 020000; // character special
+var S_IFDIR = 040000; // directory
+var S_IFBLK = 060000; // block special
+var S_IFREG = 0100000; // regular
+var S_IFLNK = 0120000; // symbolic link
+var S_IFSOCK = 0140000; // socket
+
+var S_IRWXU = 0700; // RWX mask for owner
+var S_IRUSR = 0400; // R for owner
+var S_IWUSR = 0200; // W for owner
+var S_IXUSR = 0100; // X for owner
+
+var S_IRWXG = 070; // RWX mask for group
+var S_IRGRP = 040; // R for group
+var S_IWGRP = 020; // W for group
+var S_IXGRP = 010; // X for group
+
+var S_IRWXO = 07; // RWX mask for other
+var S_IROTH = 04; // R for other
+var S_IWOTH = 02; // W for other
+var S_IXOTH = 01; // X for other
+
+var S_ISVTX = 01000; // save swapped text even after use
+
+// setuid/setgid/sticky bits
+var S_ISUID = 04000; // set user id on execution
+var S_ISGID = 02000; // set group id on execution
+var S_ISTXT = 01000; // sticky bit
+
+// DOS file type flags
+var S_DOS_A = 040; // Archive
+var S_DOS_D = 020; // Directory
+var S_DOS_V = 010; // Volume
+var S_DOS_S = 04; // System
+var S_DOS_H = 02; // Hidden
+var S_DOS_R = 01; // Read Only
+
+function ZipHeader() {
+  this.name = 'zipHeader';
+  this.bufferSize = 0;
+  this.fields = [];
+}
+
+ZipHeader.prototype.toBuffer = function(data) {
+  var self = this;
+  var buf = new Buffer(self.bufferSize);
+  var offset = 0;
+  var val;
+  var valLength;
+  var fallback;
+
+  debug('%s:start', self.name);
+
+  data = self._normalize(data);
+
+  self.fields.forEach(function(field) {
+    fallback = (field.type === 'string') ? '' : 0;
+    val = data[field.name] || field.def || fallback;
+    valLength = (field.lenField && data[field.lenField] > 0) ? data[field.lenField] : field.len;
+
+    if (typeof buf['write' + field.type] === 'function') {
+      debug('%s:%s:%s:%d+%d', self.name, field.type, field.name, offset, field.len);
+      buf['write' + field.type](val, offset);
+    } else if (val.length > 0) {
+      debug('%s:%s:%d+%d', self.name, field.name, offset, val.length);
+      buf.write(val, offset);
+    }
+
+    offset += valLength;
+  });
+
+  debug('%s:finish:%d', self.name, offset);
+
+  return buf.slice(0, offset);
+};
+
+ZipHeader.prototype.toObject = function(buf) {
+  var self = this;
+  var data = {};
+  var offset = 0;
+  var valLength;
+
+  self.fields.forEach(function(field) {
+    valLength = (field.lenField && data[field.lenField] > 0) ? data[field.lenField] : field.len;
+
+    if (typeof buf['read' + field.type] === 'function') {
+      data[field.name] = buf['read' + field.type](offset);
+    } else if (valLength > 0) {
+      data[field.name] = buf.toString(null, offset, valLength);
+    } else {
+      data[field.name] = null;
+    }
+
+    offset += valLength;
+  });
+
+  return data;
+};
+
+ZipHeader.prototype._generateExternalAttributes = function(mode, type) {
+  var isDir = type === 'directory';
+
+  var owner = (mode >> 6) & 07;
+  var group = (mode >> 3) & 07;
+  var other = mode & 07;
+
+  var attr = isDir ? S_IFDIR : S_IFREG;
+  attr |= ((owner & 07) << 6) | ((group & 07) << 3) | (other & 07);
+
+  return (attr << 16) | (isDir ? S_DOS_D : S_DOS_A);
+};
+
+ZipHeader.prototype._normalize = function(data) {
+  // Don't always set mode as this is a experimental feature
+  // if (!data.mode) {
+  //   data.mode = DEFAULT_FILE_MODE;
+  // }
+
+  data.filenameLength = 0;
+  data.commentLength = 0;
+  data.extraFieldLength = 0;
+
+  if (data.name) {
+    if (Buffer.byteLength(data.name) !== data.name.length) {
+      data.flags |= (1 << 11);
+    }
+
+    data.filenameLength = Buffer.byteLength(data.name);
+  }
+
+  if (data.comment) {
+    if (Buffer.byteLength(data.comment) !== data.comment.length) {
+      data.flags |= (1 << 11);
+    }
+
+    data.commentLength = Buffer.byteLength(data.comment);
+  }
+
+  if (data.extraField) {
+    data.extraFieldLength = data.extraField.length;
+  }
+
+  if (data.mode) {
+    data.mode &= ~S_IFDIR;
+
+    if (data.type === 'file') {
+      data.mode |= S_IFREG;
+    }
+
+    data.externalFileAttributes &= ~data.externalFileAttributes;
+    data.externalFileAttributes |= this._generateExternalAttributes(data.mode, data.type);
+    data.externalFileAttributes >>>= 0;
+  }
+
+  return data;
+};
+
+function ZipHeaderFile() {
+  ZipHeader.call(this);
+
+  this.name = 'file';
+  this.bufferSize = 1024;
+  this.fields = [
+    {name: 'signature', len: 4, type: 'UInt32LE', def: 0x04034b50},
+    {name: 'versionNeededToExtract', len: 2, type: 'UInt16LE', def: 20},
+    {name: 'flags', len: 2, type: 'UInt16LE'},
+    {name: 'compressionMethod', len: 2, type: 'UInt16LE'},
+    {name: 'lastModifiedDate', len: 4, type: 'UInt32LE'},
+    {name: 'crc32', len: 4, type: 'UInt32LE', def: 0},
+    {name: 'compressedSize', len: 4, type: 'UInt32LE'},
+    {name: 'uncompressedSize', len: 4, type: 'UInt32LE'},
+    {name: 'filenameLength', len: 2, type: 'UInt16LE'},
+    {name: 'extraFieldLength', len: 2, type: 'UInt16LE'},
+    {name: 'name', len: 0, lenField: 'filenameLength', type: 'string'},
+    {name: 'extraField', len: 0, lenField: 'extraFieldLength', type: 'string'}
+  ];
+}
+inherits(ZipHeaderFile, ZipHeader);
+
+function ZipHeaderFileDescriptor() {
+  ZipHeader.call(this);
+
+  this.name = 'fileDescriptor';
+  this.bufferSize = 16;
+  this.fields = [
+    {name: 'signature', len: 4, type: 'UInt32LE', def: 0x08074b50},
+    {name: 'crc32', len: 4, type: 'UInt32LE'},
+    {name: 'compressedSize', len: 4, type: 'UInt32LE'},
+    {name: 'uncompressedSize', len: 4, type: 'UInt32LE'}
+  ];
+}
+inherits(ZipHeaderFileDescriptor, ZipHeader);
+
+function ZipHeaderCentralDirectory() {
+  ZipHeader.call(this);
+
+  this.name = 'centralDirectory';
+  this.bufferSize = 1024;
+  this.fields = [
+    {name: 'signature', len: 4, type: 'UInt32LE', def: 0x02014b50},
+    {name: 'versionMadeBy', len: 2, type: 'UInt16LE', def: 20},
+    {name: 'versionNeededToExtract', len: 2, type: 'UInt16LE', def: 20},
+    {name: 'flags', len: 2, type: 'UInt16LE'},
+    {name: 'compressionMethod', len: 2, type: 'UInt16LE'},
+    {name: 'lastModifiedDate', len: 4, type: 'UInt32LE'},
+    {name: 'crc32', len: 4, type: 'UInt32LE'},
+    {name: 'compressedSize', len: 4, type: 'UInt32LE'},
+    {name: 'uncompressedSize', len: 4, type: 'UInt32LE'},
+    {name: 'filenameLength', len: 2, type: 'UInt16LE'},
+    {name: 'extranameLength', len: 2, type: 'UInt16LE'},
+    {name: 'commentLength', len: 2, type: 'UInt16LE'},
+    {name: 'diskNumberStart', len: 2, type: 'UInt16LE'},
+    {name: 'internalFileAttributes', len: 2, type: 'UInt16LE'},
+    {name: 'externalFileAttributes', len: 4, type: 'UInt32LE'},
+    {name: 'offset', len: 4, type: 'UInt32LE'},
+    {name: 'name', len: 0, lenField: 'filenameLength', type: 'string'},
+    {name: 'extraField', len: 0, lenField: 'extraFieldLength', type: 'string'},
+    {name: 'comment', len: 0, lenField: 'commentLength', type: 'string'}
+  ];
+}
+inherits(ZipHeaderCentralDirectory, ZipHeader);
+
+function ZipHeaderCentralFooter() {
+  ZipHeader.call(this);
+
+  this.name = 'centralFooter';
+  this.bufferSize = 512;
+  this.fields = [
+    {name: 'signature', len: 4, type: 'UInt32LE', def: 0x06054b50},
+    {name: 'diskNumber', len: 2, type: 'UInt16LE'},
+    {name: 'diskNumberStart', len: 2, type: 'UInt16LE'},
+    {name: 'directoryRecordsDisk', len: 2, type: 'UInt16LE'},
+    {name: 'directoryRecords', len: 2, type: 'UInt16LE'},
+    {name: 'centralDirectorySize', len: 4, type: 'UInt32LE'},
+    {name: 'centralDirectoryOffset', len: 4, type: 'UInt32LE'},
+    {name: 'commentLength', len: 2, type: 'UInt16LE'},
+    {name: 'comment', len: 0, lenField: 'commentLength', type: 'string'}
+  ];
+}
+inherits(ZipHeaderCentralFooter, ZipHeader);
+
+var headers = {
+  file: new ZipHeaderFile(),
+  fileDescriptor: new ZipHeaderFileDescriptor(),
+  centralDirectory: new ZipHeaderCentralDirectory(),
+  centralFooter: new ZipHeaderCentralFooter()
+};
+
+var encode = exports.encode = function(type, data) {
+  if (!headers[type] || typeof headers[type].toBuffer !== 'function') {
+    throw new Error('Unknown encode type');
+  }
+
+  return headers[type].toBuffer(data);
+};
+
+exports.file = ZipHeaderFile;
+exports.fileDescriptor = ZipHeaderFileDescriptor;
+exports.centralDirectory = ZipHeaderCentralDirectory;
+exports.centralFooter = ZipHeaderCentralFooter;
\ No newline at end of file
diff --git a/lib/util/index.js b/lib/util/index.js
new file mode 100644
index 0000000..07df06d
--- /dev/null
+++ b/lib/util/index.js
@@ -0,0 +1,105 @@
+/**
+ * node-zip-stream
+ *
+ * Copyright (c) 2014 Chris Talkington, contributors.
+ * Licensed under the MIT license.
+ * https://github.com/ctalkington/node-zip-stream/blob/master/LICENSE-MIT
+ */
+var fs = require('fs');
+var path = require('path');
+
+var Stream = require('stream').Stream;
+var PassThrough = require('readable-stream').PassThrough;
+
+var loDefaults = require('lodash.defaults');
+
+var util = module.exports = {};
+
+util.debug = require('debug');
+
+util.convertDateTimeDos = function(input) {
+  return new Date(
+    ((input >> 25) & 0x7f) + 1980,
+    ((input >> 21) & 0x0f) - 1,
+    (input >> 16) & 0x1f,
+    (input >> 11) & 0x1f,
+    (input >> 5) & 0x3f,
+    (input & 0x1f) << 1
+  );
+};
+
+util.dateify = function(dateish) {
+  dateish = dateish || new Date();
+
+  if (dateish instanceof Date) {
+    dateish = dateish;
+  } else if (typeof dateish === 'string') {
+    dateish = new Date(dateish);
+  } else {
+    dateish = new Date();
+  }
+
+  return dateish;
+};
+
+// this is slightly different from lodash version
+util.defaults = function(object, source, guard) {
+  var args = arguments;
+  args[0] = args[0] || {};
+
+  return loDefaults.apply(null, args);
+};
+
+util.dosDateTime = function(d, utc) {
+  d = (d instanceof Date) ? d : util.dateify(d);
+  utc = utc || false;
+
+  var year = utc ? d.getUTCFullYear() : d.getFullYear();
+
+  if (year < 1980) {
+    return 2162688; // 1980-1-1 00:00:00
+  } else if (year >= 2044) {
+    return 2141175677; // 2043-12-31 23:59:58
+  }
+
+  var val = {
+    year: year,
+    month: utc ? d.getUTCMonth() : d.getMonth(),
+    date: utc ? d.getUTCDate() : d.getDate(),
+    hours: utc ? d.getUTCHours() : d.getHours(),
+    minutes: utc ? d.getUTCMinutes() : d.getMinutes(),
+    seconds: utc ? d.getUTCSeconds() : d.getSeconds()
+  };
+
+  return ((val.year-1980) << 25) | ((val.month+1) << 21) | (val.date << 16) |
+    (val.hours << 11) | (val.minutes << 5) | (val.seconds / 2);
+};
+
+util.isStream = function(source) {
+  return source instanceof Stream;
+};
+
+util.normalizeInputSource = function(source) {
+  if (source === null) {
+    return new Buffer(0);
+  } else if (typeof source === 'string') {
+    return new Buffer(source);
+  } else if (util.isStream(source) && !source._readableState) {
+    var normalized = new PassThrough();
+    source.pipe(normalized);
+
+    return normalized;
+  }
+
+  return source;
+};
+
+util.sanitizePath = function() {
+  var filepath = path.join.apply(path, arguments);
+  return filepath.replace(/\\/g, '/').replace(/:/g, '').replace(/^\/+/, '');
+};
+
+util.unixifyPath = function() {
+  var filepath = path.join.apply(path, arguments);
+  return filepath.replace(/\\/g, '/');
+};
\ No newline at end of file
diff --git a/lib/zip-stream.js b/lib/zip-stream.js
new file mode 100644
index 0000000..e13d752
--- /dev/null
+++ b/lib/zip-stream.js
@@ -0,0 +1,307 @@
+/**
+ * node-zip-stream
+ *
+ * Copyright (c) 2014 Chris Talkington, contributors.
+ * Licensed under the MIT license.
+ * https://github.com/ctalkington/node-zip-stream/blob/master/LICENSE-MIT
+ */
+var inherits = require('util').inherits;
+var Transform = require('readable-stream').Transform;
+
+var crc32 = require('buffer-crc32');
+var ChecksumStream = require('crc32-stream');
+var DeflateCRC32Stream = require('deflate-crc32-stream');
+var headers = require('./headers');
+var util = require('./util');
+
+var debug = util.debug('zip-stream:instance');
+var debugEntry = util.debug('zip-stream:entry');
+
+var ZipStream = module.exports = function(options) {
+  if (!(this instanceof ZipStream)) {
+    return new ZipStream(options);
+  }
+
+  debug('init');
+
+  options = this.options = util.defaults(options, {
+    highWaterMark: 1024 * 1024,
+    comment: '',
+    forceUTC: false,
+    store: false
+  });
+
+  if (typeof options.zlib !== 'object') {
+    options.zlib = {};
+  }
+
+  if (typeof options.level === 'number' && options.level >= 0) {
+    options.zlib.level = options.level;
+    delete options.level;
+  } else if (typeof options.zlib.level !== 'number') {
+    options.zlib.level = 1;
+  }
+
+  if (options.zlib.level === 0) {
+    options.store = true;
+  }
+
+  Transform.call(this, options);
+
+  this.offset = 0;
+  this.entries = [];
+
+  this._finalize = false;
+  this._finalized = false;
+  this._processing = false;
+
+  this.once('end', function() {
+    debug('stats:' + this.entries.length + 'e:' + this.offset + 'b');
+    debug('end');
+  });
+};
+
+inherits(ZipStream, Transform);
+
+ZipStream.prototype._afterAppend = function(entry) {
+  debugEntry('%s:finish', entry.name);
+
+  this.entries.push(entry);
+  this._processing = false;
+
+  if (this._finalize) {
+    this.finalize();
+  }
+};
+
+ZipStream.prototype._appendBuffer = function(source, data, callback) {
+  var self = this;
+
+  data.offset = self.offset;
+
+  if (source.length === 0) {
+    data.store = true;
+    data.compressionMethod = 0;
+  }
+
+  if (data.store) {
+    data.uncompressedSize = source.length;
+    data.compressedSize = data.uncompressedSize;
+    data.crc32 = crc32.unsigned(source);
+  } else {
+    data.flags |= (1 << 3);
+  }
+
+  self._writeHeader('file', data);
+
+  if (data.store) {
+    self.write(source);
+    self._afterAppend(data);
+    callback(null, data);
+  } else {
+    var processStream = self._newProcessStream(data.store, function(err) {
+      if (err) {
+        return callback(err);
+      }
+
+      data.crc32 = processStream.digest();
+      data.uncompressedSize = processStream.size();
+      data.compressedSize = processStream.compressedSize || data.uncompressedSize;
+
+      self._writeHeader('fileDescriptor', data);
+      self._afterAppend(data);
+      callback(null, data);
+    });
+
+    processStream.end(source);
+  }
+};
+
+ZipStream.prototype._appendStream = function(source, data, callback) {
+  var self = this;
+
+  data.flags |= (1 << 3);
+  data.offset = self.offset;
+
+  self._writeHeader('file', data);
+
+  var processStream = self._newProcessStream(data.store, function(err) {
+    if (err) {
+      return callback(err);
+    }
+
+    data.crc32 = processStream.digest();
+    data.uncompressedSize = processStream.size();
+    data.compressedSize = processStream.size(true);
+
+    self._writeHeader('fileDescriptor', data);
+    self._afterAppend(data);
+    callback(null, data);
+  });
+
+  source.pipe(processStream);
+};
+
+ZipStream.prototype._emitErrorCallback = function(err, data) {
+  if (err) {
+    this.emit('error', err);
+  }
+};
+
+ZipStream.prototype._newProcessStream = function(store, callback) {
+  var process;
+
+  if (store) {
+    process = new ChecksumStream();
+  } else {
+    process = new DeflateCRC32Stream(this.options.zlib);
+  }
+
+  if (typeof callback === 'function') {
+    process.once('error', callback);
+    process.once('end', callback);
+  }
+
+  process.pipe(this, { end: false });
+
+  return process;
+};
+
+ZipStream.prototype._normalizeFileData = function(data) {
+  data = util.defaults(data, {
+    type: 'file',
+    name: null,
+    date: null,
+    store: this.options.store,
+    comment: ''
+  });
+
+  var isDir = data.type === 'directory';
+
+  if (data.name) {
+    data.name = util.sanitizePath(data.name);
+
+    if (data.name.slice(-1) === '/') {
+      isDir = true;
+      data.type = 'directory';
+    } else if (isDir) {
+      data.name += '/';
+    }
+  }
+
+  if (isDir) {
+    data.store = true;
+  }
+
+  if (typeof data.lastModifiedDate !== 'number') {
+    data.lastModifiedDate = util.dosDateTime(data.date, this.options.forceUTC);
+  }
+
+  data.flags = 0;
+  data.compressionMethod = data.store ? 0 : 8;
+  data.uncompressedSize = 0;
+  data.compressedSize = 0;
+
+  return data;
+};
+
+ZipStream.prototype._transform = function(chunk, encoding, callback) {
+  callback(null, chunk);
+};
+
+ZipStream.prototype._writeCentralDirectory = function() {
+  var entries = this.entries;
+  var comment = this.options.comment;
+  var cdoffset = this.offset;
+  var cdsize = 0;
+
+  var centralDirectoryBuffer;
+  for (var i = 0; i < entries.length; i++) {
+    var entry = entries[i];
+
+    centralDirectoryBuffer = this._writeHeader('centralDirectory', entry);
+    cdsize += centralDirectoryBuffer.length;
+  }
+
+  var centralDirectoryFooterData = {
+    directoryRecordsDisk: entries.length,
+    directoryRecords: entries.length,
+    centralDirectorySize: cdsize,
+    centralDirectoryOffset: cdoffset,
+    comment: comment
+  };
+
+  this._writeHeader('centralFooter', centralDirectoryFooterData);
+};
+
+ZipStream.prototype._writeHeader = function(type, data) {
+  var encoded = headers.encode(type, data);
+  this.write(encoded);
+
+  return encoded;
+};
+
+ZipStream.prototype.entry = function(source, data, callback) {
+  if (typeof callback !== 'function') {
+    callback = this._emitErrorCallback.bind(this);
+  }
+
+  if (this._processing) {
+    callback(new Error('already processing an entry'));
+    return;
+  }
+
+  if (this._finalize || this._finalized) {
+    callback(new Error('entry after finalize()'));
+    return;
+  }
+
+  data = this._normalizeFileData(data);
+  debugEntry('%s:start', data.name);
+
+  if (data.type !== 'file' && data.type !== 'directory') {
+    callback(new Error(data.type + ' entries not currently supported'));
+    return;
+  }
+
+  if (typeof data.name !== 'string' || data.name.length === 0) {
+    callback(new Error('entry name must be a non-empty string value'));
+    return;
+  }
+
+  this._processing = true;
+  source = util.normalizeInputSource(source);
+
+  if (Buffer.isBuffer(source)) {
+    debugEntry('%s:source:buffer', data.name);
+    this._appendBuffer(source, data, callback);
+  } else if (util.isStream(source)) {
+    debugEntry('%s:source:stream', data.name);
+    this._appendStream(source, data, callback);
+  } else {
+    this._processing = false;
+    callback(new Error('input source must be valid Stream or Buffer instance'));
+    return;
+  }
+};
+
+ZipStream.prototype.finalize = function() {
+  if (this._processing) {
+    this._finalize = true;
+    return;
+  }
+
+  debug('finalize');
+  this._writeCentralDirectory();
+  this._finalized = true;
+  debug('finalized');
+  this.end();
+};
+
+ZipStream.prototype.write = function(chunk, cb) {
+  if (chunk) {
+    this.offset += chunk.length;
+  }
+
+  return Transform.prototype.write.call(this, chunk, cb);
+};
\ No newline at end of file
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..0bd5b5c
--- /dev/null
+++ b/package.json
@@ -0,0 +1,54 @@
+{
+  "name": "zip-stream",
+  "version": "0.3.5",
+  "description": "a streaming zip archive generator.",
+  "homepage": "https://github.com/ctalkington/node-zip-stream",
+  "author": {
+    "name": "Chris Talkington",
+    "url": "http://christalkington.com/"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/ctalkington/node-zip-stream.git"
+  },
+  "bugs": {
+    "url": "https://github.com/ctalkington/node-zip-stream/issues"
+  },
+  "licenses": [
+    {
+      "type": "MIT",
+      "url": "https://github.com/ctalkington/node-zip-stream/blob/master/LICENSE-MIT"
+    }
+  ],
+  "main": "lib/zip-stream.js",
+  "files": [
+    "lib",
+    "LICENSE-MIT"
+  ],
+  "engines": {
+    "node": ">= 0.8.0"
+  },
+  "scripts": {
+    "test": "mocha --reporter dot"
+  },
+  "dependencies": {
+    "buffer-crc32": "~0.2.1",
+    "crc32-stream": "~0.2.0",
+    "debug": "~1.0.2",
+    "deflate-crc32-stream": "~0.1.0",
+    "lodash.defaults": "~2.4.1",
+    "readable-stream": "~1.0.26"
+  },
+  "devDependencies": {
+    "chai": "~1.9.1",
+    "mocha": "~1.18.2",
+    "rimraf": "~2.2.8",
+    "mkdirp": "~0.5.0"
+  },
+  "keywords": [
+    "archive",
+    "stream",
+    "zip-stream",
+    "zip"
+  ]
+}
\ No newline at end of file
diff --git a/test/fixtures/directory/level0.txt b/test/fixtures/directory/level0.txt
new file mode 100644
index 0000000..a423c4a
--- /dev/null
+++ b/test/fixtures/directory/level0.txt
@@ -0,0 +1 @@
+level0
\ No newline at end of file
diff --git a/test/fixtures/directory/subdir/level1.txt b/test/fixtures/directory/subdir/level1.txt
new file mode 100644
index 0000000..8d938e2
--- /dev/null
+++ b/test/fixtures/directory/subdir/level1.txt
@@ -0,0 +1 @@
+level1
\ No newline at end of file
diff --git a/test/fixtures/directory/subdir/subsub/level2.txt b/test/fixtures/directory/subdir/subsub/level2.txt
new file mode 100644
index 0000000..00f8d1d
--- /dev/null
+++ b/test/fixtures/directory/subdir/subsub/level2.txt
@@ -0,0 +1 @@
+level2
\ No newline at end of file
diff --git a/test/fixtures/empty.txt b/test/fixtures/empty.txt
new file mode 100644
index 0000000..e69de29
diff --git a/test/fixtures/headers/zip-centralfooter.bin b/test/fixtures/headers/zip-centralfooter.bin
new file mode 100644
index 0000000..1746fc5
Binary files /dev/null and b/test/fixtures/headers/zip-centralfooter.bin differ
diff --git a/test/fixtures/headers/zip-centralheader.bin b/test/fixtures/headers/zip-centralheader.bin
new file mode 100644
index 0000000..5b22aa9
Binary files /dev/null and b/test/fixtures/headers/zip-centralheader.bin differ
diff --git a/test/fixtures/headers/zip-data.bin b/test/fixtures/headers/zip-data.bin
new file mode 100644
index 0000000..e068544
--- /dev/null
+++ b/test/fixtures/headers/zip-data.bin
@@ -0,0 +1 @@
+this is a text file
\ No newline at end of file
diff --git a/test/fixtures/headers/zip-file.bin b/test/fixtures/headers/zip-file.bin
new file mode 100644
index 0000000..31275e1
Binary files /dev/null and b/test/fixtures/headers/zip-file.bin differ
diff --git a/test/fixtures/headers/zip-filedescriptor.bin b/test/fixtures/headers/zip-filedescriptor.bin
new file mode 100644
index 0000000..0703558
Binary files /dev/null and b/test/fixtures/headers/zip-filedescriptor.bin differ
diff --git a/test/fixtures/image.png b/test/fixtures/image.png
new file mode 100644
index 0000000..c6955e2
Binary files /dev/null and b/test/fixtures/image.png differ
diff --git a/test/fixtures/test.txt b/test/fixtures/test.txt
new file mode 100644
index 0000000..e068544
--- /dev/null
+++ b/test/fixtures/test.txt
@@ -0,0 +1 @@
+this is a text file
\ No newline at end of file
diff --git a/test/headers.js b/test/headers.js
new file mode 100644
index 0000000..d212d1a
--- /dev/null
+++ b/test/headers.js
@@ -0,0 +1,118 @@
+/*global before,describe,it */
+var fs = require('fs');
+var assert = require('chai').assert;
+
+var helpers = require('./helpers');
+
+var headers = require('../lib/headers');
+
+var testDate = new Date('Jan 03 2013 14:26:38 GMT');
+var testDateEpoch = 1357223198;
+
+describe('headers', function() {
+
+  describe('zip', function() {
+    var fileFixture = fs.readFileSync('test/fixtures/headers/zip-file.bin');
+    var fileDescriptorFixture = fs.readFileSync('test/fixtures/headers/zip-filedescriptor.bin');
+    var centralDirectoryFixture = fs.readFileSync('test/fixtures/headers/zip-centralheader.bin');
+    var centralFooterFixture = fs.readFileSync('test/fixtures/headers/zip-centralfooter.bin');
+
+    describe('#encode(type, object)', function() {
+
+      describe('type->file', function() {
+        var actual = headers.encode('file', {
+          name: 'test.txt',
+          filenameLength: 8,
+          date: testDate,
+          comment: '',
+          store: true,
+          lastModifiedDate: 1109619539,
+          versionMadeBy: 20,
+          versionNeededToExtract: 20,
+          flags: 2056,
+          compressionMethod: 0,
+          uncompressedSize: 0,
+          compressedSize: 0,
+          offset: 0
+        });
+
+        it('should return an instance of Buffer', function() {
+          assert.instanceOf(actual, Buffer);
+        });
+
+        it('should match provided fixture', function() {
+          // fs.writeFileSync('test/fixtures/headers/zip-file.bin', actual);
+          assert.deepEqual(actual, fileFixture);
+        });
+      });
+
+      describe('type->fileDescriptor', function() {
+        var actual = headers.encode('fileDescriptor', {
+          crc32: 585446183,
+          uncompressedSize: 19,
+          compressedSize: 19,
+        });
+
+        it('should return an instance of Buffer', function() {
+          assert.instanceOf(actual, Buffer);
+        });
+
+        it('should match provided fixture', function() {
+          // fs.writeFileSync('test/fixtures/headers/zip-filedescriptor.bin', actual);
+          assert.deepEqual(actual, fileDescriptorFixture);
+        });
+      });
+
+      describe('type->centralDirectory', function() {
+        var actual = headers.encode('centralDirectory', {
+          name: 'test.txt',
+          filenameLength: 8,
+          date: testDate,
+          store: true,
+          comment: '',
+          mode: 0644,
+          lastModifiedDate: 1109619539,
+          versionMadeBy: 20,
+          versionNeededToExtract: 20,
+          flags: 2056,
+          compressionMethod: 0,
+          uncompressedSize: 19,
+          compressedSize: 19,
+          offset: 0,
+          crc32: 585446183
+        });
+
+        it('should return an instance of Buffer', function() {
+          assert.instanceOf(actual, Buffer);
+        });
+
+        it('should match provided fixture', function() {
+          // fs.writeFileSync('test/fixtures/headers/zip-centralheader.bin', actual);
+          assert.deepEqual(actual, centralDirectoryFixture);
+        });
+      });
+
+      describe('type->centralFooter', function() {
+        var actual = headers.encode('centralFooter', {
+          directoryRecordsDisk: 1,
+          directoryRecords: 1,
+          centralDirectorySize: 56,
+          centralDirectoryOffset: 73,
+          comment: ''
+        });
+
+        it('should return an instance of Buffer', function() {
+          assert.instanceOf(actual, Buffer);
+        });
+
+        it('should match provided fixture', function() {
+          // fs.writeFileSync('test/fixtures/headers/zip-centralfooter.bin', actual);
+          assert.deepEqual(actual, centralFooterFixture);
+        });
+      });
+
+    });
+
+  });
+
+});
\ No newline at end of file
diff --git a/test/helpers/index.js b/test/helpers/index.js
new file mode 100644
index 0000000..86334e0
--- /dev/null
+++ b/test/helpers/index.js
@@ -0,0 +1,101 @@
+var crypto = require('crypto');
+var fs = require('fs');
+var inherits = require('util').inherits;
+
+var Stream = require('stream').Stream;
+var Readable = require('readable-stream').Readable;
+var Writable = require('readable-stream').Writable;
+
+function adjustDateByOffset(d, offset) {
+  d = (d instanceof Date) ? d : new Date();
+
+  if (offset >= 1) {
+    d.setMinutes(d.getMinutes() - offset);
+  } else {
+    d.setMinutes(d.getMinutes() + Math.abs(offset));
+  }
+
+  return d;
+}
+
+module.exports.adjustDateByOffset = adjustDateByOffset;
+
+function binaryBuffer(n) {
+  var buffer = new Buffer(n);
+
+  for (var i = 0; i < n; i++) {
+    buffer.writeUInt8(i&255, i);
+  }
+
+  return buffer;
+}
+
+module.exports.binaryBuffer = binaryBuffer;
+
+function BinaryStream(size, options) {
+  Readable.call(this, options);
+
+  var buf = new Buffer(size);
+
+  for (var i = 0; i < size; i++) {
+    buf.writeUInt8(i&255, i);
+  }
+
+  this.push(buf);
+  this.push(null);
+}
+
+inherits(BinaryStream, Readable);
+
+BinaryStream.prototype._read = function(size) {};
+
+module.exports.BinaryStream = BinaryStream;
+
+function DeadEndStream(options) {
+  Writable.call(this, options);
+}
+
+inherits(DeadEndStream, Writable);
+
+DeadEndStream.prototype._write = function(chuck, encoding, callback) {
+  callback();
+};
+
+module.exports.DeadEndStream = DeadEndStream;
+
+function fileBuffer(filepath) {
+  return fs.readFileSync(filepath);
+}
+
+module.exports.fileBuffer = fileBuffer;
+
+function UnBufferedStream() {
+  this.readable = true;
+}
+
+inherits(UnBufferedStream, Stream);
+
+module.exports.UnBufferedStream = UnBufferedStream;
+
+function WriteHashStream(path, options) {
+  fs.WriteStream.call(this, path, options);
+
+  this.hash = crypto.createHash('sha1');
+  this.digest = null;
+
+  this.on('close', function() {
+    this.digest = this.hash.digest('hex');
+  });
+}
+
+inherits(WriteHashStream, fs.WriteStream);
+
+WriteHashStream.prototype.write = function(chunk) {
+  if (chunk) {
+    this.hash.update(chunk);
+  }
+
+  return fs.WriteStream.prototype.write.call(this, chunk);
+};
+
+module.exports.WriteHashStream = WriteHashStream;
\ No newline at end of file
diff --git a/test/pack.js b/test/pack.js
new file mode 100644
index 0000000..beae378
--- /dev/null
+++ b/test/pack.js
@@ -0,0 +1,356 @@
+/*global before,describe,it */
+var fs = require('fs');
+
+var assert = require('chai').assert;
+var mkdir = require('mkdirp');
+
+var helpers = require('./helpers');
+var binaryBuffer = helpers.binaryBuffer;
+var fileBuffer = helpers.fileBuffer;
+var WriteHashStream = helpers.WriteHashStream;
+
+var Packer = require('../lib/zip-stream.js');
+
+var testBuffer = binaryBuffer(1024 * 16);
+
+var testDate = new Date('Jan 03 2013 14:26:38 GMT');
+var testDate2 = new Date('Feb 10 2013 10:24:42 GMT');
+
+var testDateOverflow = new Date('Jan 1 2044 00:00:00 GMT');
+var testDateUnderflow = new Date('Dec 30 1979 23:59:58 GMT');
+
+describe('pack', function() {
+  before(function() {
+    mkdir.sync('tmp');
+  });
+
+  describe('#entry', function() {
+
+    it('should append Buffer sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/buffer.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '6576fe7e1ef7aa22b51c1c18a837176602c1b3b6');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'buffer.txt', date: testDate });
+      archive.finalize();
+    });
+
+    it('should append Stream sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/stream.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '696d847c779cb4ad77c52de4dcb5995fabe82053');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(fs.createReadStream('test/fixtures/test.txt'), { name: 'stream.txt', date: testDate });
+      archive.finalize();
+    });
+
+    it('should append multiple sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/multiple.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '696fec6b6267159b6d0cff2f59cdc0b9259f14a1');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry('string', { name: 'string.txt', date: testDate }, function(err) {
+        if (err) throw err;
+        archive.entry(testBuffer, { name: 'buffer.txt', date: testDate2 }, function(err) {
+          if (err) throw err;
+          archive.entry(fs.createReadStream('test/fixtures/test.txt'), { name: 'stream.txt', date: testDate2 }, function(err) {
+            if (err) throw err;
+            archive.entry(fs.createReadStream('test/fixtures/test.txt'), { name: 'stream-store.txt', date: testDate, store: true }, function(err) {
+              if (err) throw err;
+              archive.finalize();
+            });
+          });
+        });
+      });
+    });
+
+    it('should support STORE for Buffer sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/buffer-store.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, 'dc19b326088033922e488192defb8dc5cca6ffdd');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'buffer.txt', date: testDate, store: true });
+      archive.finalize();
+    });
+
+    it('should support STORE for Stream sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/stream-store.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '0afeba5761199501ae58c3670713761b4d42bc3a');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(fs.createReadStream('test/fixtures/test.txt'), { name: 'stream.txt', date: testDate, store: true });
+      archive.finalize();
+    });
+
+    it('should support archive and file comments', function(done) {
+      var archive = new Packer({
+        comment: 'this is a zip comment',
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/comments.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '0ca2a710775e8645d8bb170f12ef5372abba4b77');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'buffer.txt', date: testDate, comment: 'this is a file comment' });
+      archive.finalize();
+    });
+
+    it('should STORE files when compression level is zero', function(done) {
+      var archive = new Packer({
+        forceUTC: true,
+        level: 0
+      });
+
+      var testStream = new WriteHashStream('tmp/store-level0.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, 'dc19b326088033922e488192defb8dc5cca6ffdd');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'buffer.txt', date: testDate });
+      archive.finalize();
+    });
+
+    it('should properly handle utf8 encoded characters in file names and comments', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/accentedchars-filenames.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '554638f3269bd13d21657da6f1d30e8502405274');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'àáâãäçèéêëìíîïñòóôõöùúûüýÿ.txt', date: testDate, comment: 'àáâãäçèéêëìíîïñòóôõöùúûüýÿ' }, function(err) {
+        if (err) throw err;
+        archive.entry(testBuffer, { name: 'ÀÁÂÃÄÇÈÉÊËÌÍÎÏÑÒÓÔÕÖÙÚÛÜÝ.txt', date: testDate2, comment: 'ÀÁÂÃÄÇÈÉÊËÌÍÎÏÑÒÓÔÕÖÙÚÛÜÝ' }, function(err) {
+          if (err) throw err;
+          archive.finalize();
+        });
+      });
+    });
+
+    it('should append zero length sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/zerolength.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '4ecd1e32661437f8e62492d6d0dcc845e8ff0fb6');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry('', { name: 'string.txt', date: testDate }, function(err) {
+        if (err) throw err;
+        archive.entry(new Buffer(0), { name: 'buffer.txt', date: testDate }, function(err) {
+          if (err) throw err;
+          archive.entry(fs.createReadStream('test/fixtures/empty.txt'), { name: 'stream.txt', date: testDate }, function(err) {
+            if (err) throw err;
+            archive.finalize();
+          });
+        });
+      });
+    });
+
+    it('should support setting file mode (permissions)', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/filemode.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '133dee4946ae133a723a728b56775672729d6246');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'buffer.txt', date: testDate, mode: 0644 });
+      archive.finalize();
+    });
+
+    it('should support creating an empty zip', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/empty.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, 'b04f3ee8f5e43fa3b162981b50bb72fe1acabb33');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.finalize();
+    });
+
+    it('should support compressing images for Buffer sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/buffer-image.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '318b485627b9abf7cbd411e43985fc8d7358d151');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(fileBuffer('test/fixtures/image.png'), { name: 'image.png', date: testDate });
+      archive.finalize();
+    });
+
+    it('should support compressing images for Stream sources', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/stream-image.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '318b485627b9abf7cbd411e43985fc8d7358d151');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(fs.createReadStream('test/fixtures/image.png'), { name: 'image.png', date: testDate });
+      archive.finalize();
+    });
+
+    it('should prevent UInt32 under/overflow of dates', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/date-boundaries.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '99e71f01a7ec48e8a67344c18065fb06fa08c051');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(testBuffer, { name: 'date-underflow.txt', date: testDateUnderflow }, function(err) {
+        if (err) throw err;
+        archive.entry(testBuffer, { name: 'date-overflow.txt', date: testDateOverflow }, function(err) {
+          if (err) throw err;
+          archive.finalize();
+        });
+      });
+    });
+
+    it('should handle data that exceeds its internal buffer size', function(done) {
+      var archive = new Packer({
+        highWaterMark: 1024 * 4,
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/buffer-overflow.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, '2397e129ae3c9398cfe63ed92d007821ad418d0c');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(binaryBuffer(1024 * 512), { name: 'buffer-overflow.txt', date: testDate }, function(err) {
+        if (err) throw err;
+        archive.entry(binaryBuffer(1024 * 1024), { name: 'buffer-overflow-store.txt', date: testDate, store: true }, function(err) {
+          if (err) throw err;
+          archive.finalize();
+        });
+      });
+    });
+
+    it('should support directory entries', function(done) {
+      var archive = new Packer({
+        forceUTC: true
+      });
+
+      var testStream = new WriteHashStream('tmp/type-directory.zip');
+
+      testStream.on('close', function() {
+        assert.equal(testStream.digest, 'ce375e4f7db818cab9f9dba79cd89002459d9fe6');
+        done();
+      });
+
+      archive.pipe(testStream);
+
+      archive.entry(null, { name: 'directory/', date: testDate });
+
+      archive.finalize();
+    });
+
+  });
+
+});
\ No newline at end of file
diff --git a/test/util.js b/test/util.js
new file mode 100644
index 0000000..ef62a32
--- /dev/null
+++ b/test/util.js
@@ -0,0 +1,128 @@
+/*global before,describe,it */
+var fs = require('fs');
+var assert = require('chai').assert;
+
+var Stream = require('stream').Stream;
+var Readable = require('readable-stream').Readable;
+var Writable = require('readable-stream').Writable;
+var PassThrough = require('readable-stream').PassThrough;
+
+var helpers = require('./helpers');
+var BinaryStream = helpers.BinaryStream;
+var DeadEndStream = helpers.DeadEndStream;
+var UnBufferedStream = helpers.UnBufferedStream;
+
+var utils = require('../lib/util');
+
+var testDateString = 'Jan 03 2013 14:26:38 GMT';
+var testDate = new Date(testDateString);
+var testDateDosUTC = 1109619539;
+var testTimezoneOffset = testDate.getTimezoneOffset();
+
+var testDateOverflow = new Date('Jan 1 2044 00:00:00 GMT');
+var testDateOverflowDosUTC = 2141175677;
+
+var testDateUnderflow = new Date('Dec 30 1979 23:59:58 GMT');
+var testDateUnderflowDosUTC = 2162688;
+
+describe('utils', function() {
+
+  describe('convertDateTimeDos(input)', function() {
+    it('should convert DOS input into an instance of Date', function() {
+      var actual = helpers.adjustDateByOffset(utils.convertDateTimeDos(testDateDosUTC), testTimezoneOffset);
+
+      assert.deepEqual(actual, testDate);
+    });
+  });
+
+  describe('dateify(dateish)', function() {
+    it('should return an instance of Date', function() {
+      assert.instanceOf(utils.dateify(testDate), Date);
+      assert.instanceOf(utils.dateify(testDateString), Date);
+      assert.instanceOf(utils.dateify(null), Date);
+    });
+
+    it('should passthrough an instance of Date', function() {
+      assert.deepEqual(utils.dateify(testDate), testDate);
+    });
+
+    it('should convert dateish string to an instance of Date', function() {
+      assert.deepEqual(utils.dateify(testDateString), testDate);
+    });
+  });
+
+  describe('defaults(object, source, guard)', function() {
+    it('should default when object key is missing', function() {
+      var actual = utils.defaults({ value1: true }, {
+        value2: true
+      });
+
+      assert.deepEqual(actual, {
+        value1: true,
+        value2: true
+      });
+    });
+  });
+
+  describe('dosDateTime(date, utc)', function() {
+    it('should convert date into its DOS representation', function() {
+      assert.equal(utils.dosDateTime(testDate, true), testDateDosUTC);
+    });
+
+    it('should prevent UInt32 underflow', function () {
+      assert.equal(utils.dosDateTime(testDateUnderflow, true), testDateUnderflowDosUTC);
+    });
+
+    it('should prevent UInt32 overflow', function () {
+      assert.equal(utils.dosDateTime(testDateOverflow, true), testDateOverflowDosUTC);
+    });
+  });
+
+  describe('isStream(source)', function() {
+    it('should return false if source is not a stream', function() {
+      assert.notOk(utils.isStream('string'));
+      assert.notOk(utils.isStream(new Buffer(2)));
+    });
+
+    it('should return true if source is a stream', function() {
+      assert.ok(utils.isStream(new Stream()));
+
+      assert.ok(utils.isStream(new Readable()));
+      assert.ok(utils.isStream(new Writable()));
+      assert.ok(utils.isStream(new PassThrough()));
+
+      assert.ok(utils.isStream(new UnBufferedStream()));
+      assert.ok(utils.isStream(new DeadEndStream()));
+    });
+  });
+
+  describe('normalizeInputSource(source)', function() {
+    it('should normalize strings to an instanceOf Buffer', function() {
+      var normalized = utils.normalizeInputSource('some string');
+
+      assert.instanceOf(normalized, Buffer);
+    });
+
+    it('should normalize older unbuffered streams', function() {
+      var noBufferStream = new UnBufferedStream();
+      var normalized = utils.normalizeInputSource(noBufferStream);
+
+      assert.instanceOf(normalized, PassThrough);
+    });
+  });
+
+  describe('sanitizePath(filepath)', function() {
+    it('should sanitize filepath', function() {
+      assert.equal(utils.sanitizePath('\\this/path//file.txt'), 'this/path/file.txt');
+      assert.equal(utils.sanitizePath('/this/path/file.txt'), 'this/path/file.txt');
+      assert.equal(utils.sanitizePath('c:\\this\\path\\file.txt'), 'c/this/path/file.txt');
+    });
+  });
+
+  describe('unixifyPath(filepath)', function() {
+    it('should unixify filepath', function() {
+      assert.equal(utils.unixifyPath('this\\path\\file.txt'), 'this/path/file.txt');
+    });
+  });
+
+});
\ No newline at end of file

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-javascript/node-zip-stream.git



More information about the Pkg-javascript-commits mailing list