| 'use strict' | ||
| // tar -c | ||
| const hlo = require('./high-level-opt.js') | ||
| const Pack = require('./pack.js') | ||
| const fs = require('fs') | ||
| const c = module.exports = (opt_, files, cb) => { | ||
| if (typeof files === 'function') | ||
| cb = files | ||
| if (Array.isArray(opt_)) | ||
| files = opt_, opt_ = {} | ||
| if (!files || !Array.isArray(files) || !files.length) | ||
| throw new TypeError('no files or directories specified') | ||
| const opt = hlo(opt_) | ||
| if (opt.sync && typeof cb === 'function') | ||
| throw new TypeError('callback not supported for sync tar functions') | ||
| if (!opt.file && typeof cb === 'function') | ||
| throw new TypeError('callback only supported with file option') | ||
| return opt.file && opt.sync ? createFileSync(opt, files) | ||
| : opt.file ? createFile(opt, files, cb) | ||
| : opt.sync ? createSync(opt, files) | ||
| : create(opt, files) | ||
| } | ||
| const createFileSync = (opt, files) => { | ||
| const p = new Pack.Sync(opt) | ||
| let threw = true | ||
| let fd | ||
| try { | ||
| fd = fs.openSync(opt.file, 'w', opt.mode || 0o666) | ||
| p.on('data', chunk => fs.writeSync(fd, chunk, 0, chunk.length)) | ||
| p.on('end', _ => fs.closeSync(fd)) | ||
| files.forEach(file => p.add(file)) | ||
| p.end() | ||
| threw = false | ||
| } finally { | ||
| if (threw) | ||
| try { fs.closeSync(fd) } catch (er) {} | ||
| } | ||
| } | ||
| const createFile = (opt, files, cb) => { | ||
| const p = new Pack(opt) | ||
| const stream = fs.createWriteStream(opt.file, { mode: opt.mode || 0o666 }) | ||
| p.pipe(stream) | ||
| const promise = new Promise((res, rej) => { | ||
| stream.on('error', rej) | ||
| stream.on('close', res) | ||
| p.on('error', rej) | ||
| }) | ||
| files.forEach(file => p.add(file)) | ||
| p.end() | ||
| return cb ? promise.then(cb, cb) : promise | ||
| } | ||
| const createSync = (opt, files) => { | ||
| const p = new Pack.Sync(opt) | ||
| files.forEach(file => p.add(file)) | ||
| return p.end() | ||
| } | ||
| const create = (opt, files) => { | ||
| const p = new Pack(opt) | ||
| files.forEach(file => p.add(file)) | ||
| return p.end() | ||
| } |
| 'use strict' | ||
| // turn tar(1) style args like `C` into the more verbose things like `cwd` | ||
| const argmap = new Map([ | ||
| ['C', 'cwd'], | ||
| ['f', 'file'], | ||
| ['z', 'gzip'], | ||
| ['P', 'preservePaths'], | ||
| ['U', 'unlink'], | ||
| ['strip-components', 'strip'], | ||
| ['stripComponents', 'strip'], | ||
| ['keep-newer', 'newer'], | ||
| ['keepNewer', 'newer'], | ||
| ['keep-newer-files', 'newer'], | ||
| ['keepNewerFiles', 'newer'], | ||
| ['k', 'keep'], | ||
| ['keep-existing', 'keep'], | ||
| ['keepExisting', 'keep'], | ||
| ['m', 'noMtime'], | ||
| ['no-mtime', 'noMtime'], | ||
| ['p', 'preserveOwner'], | ||
| ['L', 'follow'], | ||
| ['h', 'follow'] | ||
| ]) | ||
| const parse = module.exports = opt => opt ? Object.keys(opt).map(k => [ | ||
| argmap.has(k) ? argmap.get(k) : k, opt[k] | ||
| ]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {} |
| 'use strict' | ||
| // Tar can encode large and negative numbers using a leading byte of | ||
| // 0xff for negative, and 0x80 for positive. The trailing byte in the | ||
| // section will always be 0x20, or in some implementations 0x00. | ||
| // this module encodes and decodes these things. | ||
| const encode = exports.encode = (num, buf) => { | ||
| buf[buf.length - 1] = 0x20 | ||
| if (num < 0) | ||
| encodeNegative(num, buf) | ||
| else | ||
| encodePositive(num, buf) | ||
| return buf | ||
| } | ||
| const encodePositive = (num, buf) => { | ||
| buf[0] = 0x80 | ||
| for (var i = buf.length - 2; i > 0; i--) { | ||
| if (num === 0) | ||
| buf[i] = 0 | ||
| else { | ||
| buf[i] = num % 0x100 | ||
| num = Math.floor(num / 0x100) | ||
| } | ||
| } | ||
| } | ||
| const encodeNegative = (num, buf) => { | ||
| buf[0] = 0xff | ||
| var flipped = false | ||
| num = num * -1 | ||
| for (var i = buf.length - 2; i > 0; i--) { | ||
| var byte | ||
| if (num === 0) | ||
| byte = 0 | ||
| else { | ||
| byte = num % 0x100 | ||
| num = Math.floor(num / 0x100) | ||
| } | ||
| if (flipped) | ||
| buf[i] = onesComp(byte) | ||
| else if (byte === 0) | ||
| buf[i] = 0 | ||
| else { | ||
| flipped = true | ||
| buf[i] = twosComp(byte) | ||
| } | ||
| } | ||
| } | ||
| const parse = exports.parse = (buf) => { | ||
| var post = buf[buf.length - 1] | ||
| var pre = buf[0] | ||
| return pre === 0x80 ? pos(buf.slice(1, buf.length - 1)) | ||
| : twos(buf.slice(1, buf.length - 1)) | ||
| } | ||
| const twos = (buf) => { | ||
| var len = buf.length | ||
| var sum = 0 | ||
| var flipped = false | ||
| for (var i = len - 1; i > -1; i--) { | ||
| var byte = buf[i] | ||
| var f | ||
| if (flipped) | ||
| f = onesComp(byte) | ||
| else if (byte === 0) | ||
| f = byte | ||
| else { | ||
| flipped = true | ||
| f = twosComp(byte) | ||
| } | ||
| if (f !== 0) | ||
| sum += f * Math.pow(256, len - i - 1) | ||
| } | ||
| return sum * -1 | ||
| } | ||
| const pos = (buf) => { | ||
| var len = buf.length | ||
| var sum = 0 | ||
| for (var i = len - 1; i > -1; i--) { | ||
| var byte = buf[i] | ||
| if (byte !== 0) | ||
| sum += byte * Math.pow(256, len - i - 1) | ||
| } | ||
| return sum | ||
| } | ||
| const onesComp = byte => (0xff ^ byte) & 0xff | ||
| const twosComp = byte => ((0xff ^ byte) + 1) & 0xff |
+129
| 'use strict' | ||
| // XXX: This shares a lot in common with extract.js | ||
| // maybe some DRY opportunity here? | ||
| // tar -t | ||
| const hlo = require('./high-level-opt.js') | ||
| const Parser = require('./parse.js') | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| const t = module.exports = (opt_, files, cb) => { | ||
| if (typeof opt_ === 'function') | ||
| cb = opt_, files = [], opt_ = {} | ||
| else if (Array.isArray(opt_)) | ||
| files = opt_, opt_ = {} | ||
| if (typeof files === 'function') | ||
| cb = files, files = [] | ||
| if (!files) | ||
| files = [] | ||
| const opt = hlo(opt_) | ||
| if (opt.sync && typeof cb === 'function') | ||
| throw new TypeError('callback not supported for sync tar functions') | ||
| if (!opt.file && typeof cb === 'function') | ||
| throw new TypeError('callback only supported with file option') | ||
| if (files.length) | ||
| filesFilter(opt, files) | ||
| onentryFunction(opt) | ||
| return opt.file && opt.sync ? listFileSync(opt) | ||
| : opt.file ? listFile(opt, cb) | ||
| : list(opt) | ||
| } | ||
| const onentryFunction = opt => { | ||
| const onentry = opt.onentry | ||
| opt.onentry = onentry ? e => { | ||
| onentry(e) | ||
| e.resume() | ||
| } : e => e.resume() | ||
| } | ||
| // construct a filter that limits the file entries listed | ||
| // include child entries if a dir is included | ||
| const filesFilter = (opt, files) => { | ||
| const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true])) | ||
| const filter = opt.filter | ||
| const mapHas = (file, r) => { | ||
| const root = r || path.parse(file).root || '.' | ||
| const ret = file === root ? false | ||
| : map.has(file) ? map.get(file) | ||
| : mapHas(path.dirname(file), root) | ||
| map.set(file, ret) | ||
| return ret | ||
| } | ||
| opt.filter = filter | ||
| ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, '')) | ||
| : file => mapHas(file.replace(/\/+$/, '')) | ||
| } | ||
| const listFileSync = opt => { | ||
| const p = list(opt) | ||
| const file = opt.file | ||
| let threw = true | ||
| let fd | ||
| try { | ||
| const stat = fs.statSync(file) | ||
| const readSize = opt.maxReadSize || 16*1024*1024 | ||
| if (stat.size < readSize) { | ||
| p.end(fs.readFileSync(file)) | ||
| } else { | ||
| let pos = 0 | ||
| const buf = Buffer.allocUnsafe(readSize) | ||
| fd = fs.openSync(file, 'r') | ||
| while (pos < stat.size) { | ||
| let bytesRead = fs.readSync(fd, buf, 0, readSize, pos) | ||
| pos += bytesRead | ||
| p.write(buf.slice(0, bytesRead)) | ||
| } | ||
| p.end() | ||
| } | ||
| threw = false | ||
| } finally { | ||
| if (threw && fd) | ||
| try { fs.closeSync(fd) } catch (er) {} | ||
| } | ||
| } | ||
| const listFile = (opt, cb) => { | ||
| const parse = new Parser(opt) | ||
| const readSize = opt.maxReadSize || 16*1024*1024 | ||
| const file = opt.file | ||
| const p = new Promise((resolve, reject) => { | ||
| parse.on('error', reject) | ||
| parse.on('end', resolve) | ||
| fs.stat(file, (er, stat) => { | ||
| if (er) | ||
| reject(er) | ||
| else if (stat.size < readSize) | ||
| fs.readFile(file, (er, data) => { | ||
| if (er) | ||
| return reject(er) | ||
| parse.end(data) | ||
| }) | ||
| else { | ||
| const stream = fs.createReadStream(file, { | ||
| highWaterMark: readSize | ||
| }) | ||
| stream.on('error', reject) | ||
| stream.pipe(parse) | ||
| } | ||
| }) | ||
| }) | ||
| return cb ? p.then(cb, cb) : p | ||
| } | ||
| const list = opt => new Parser(opt) |
+118
| 'use strict' | ||
| // wrapper around mkdirp for tar's needs. | ||
| const mkdirp = require('mkdirp') | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| class SymlinkError extends Error { | ||
| constructor (symlink, path) { | ||
| super('Cannot extract through symbolic link') | ||
| this.path = path | ||
| this.symlink = symlink | ||
| } | ||
| get name () { | ||
| return 'SylinkError' | ||
| } | ||
| } | ||
| const mkdir = module.exports = (dir, opt, cb) => { | ||
| const mode = opt.mode | 0o0700 | ||
| const preserve = opt.preserve | ||
| const unlink = opt.unlink | ||
| const cache = opt.cache | ||
| const cwd = opt.cwd | ||
| const done = er => { | ||
| if (!er) | ||
| cache.set(dir, true) | ||
| cb(er) | ||
| } | ||
| if (cache && cache.get(dir) === true || dir === cwd) | ||
| return cb() | ||
| if (preserve) | ||
| return mkdirp(dir, mode, done) | ||
| const sub = path.relative(cwd, dir) | ||
| const parts = sub.split(/\/|\\/) | ||
| mkdir_(cwd, parts, mode, cache, unlink, done) | ||
| } | ||
| const mkdir_ = (base, parts, mode, cache, unlink, cb) => { | ||
| if (!parts.length) | ||
| return cb() | ||
| const p = parts.shift() | ||
| const part = base + '/' + p | ||
| if (cache.get(part)) | ||
| return mkdir_(part, parts, mode, cache, unlink, cb) | ||
| fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cb)) | ||
| } | ||
| const onmkdir = (part, parts, mode, cache, unlink, cb) => er => { | ||
| if (er) { | ||
| fs.lstat(part, (statEr, st) => { | ||
| if (statEr) | ||
| cb(statEr) | ||
| else if (st.isDirectory()) | ||
| mkdir_(part, parts, mode, cache, unlink, cb) | ||
| else if (unlink) | ||
| fs.unlink(part, er => { | ||
| if (er) | ||
| return cb(er) | ||
| fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cb)) | ||
| }) | ||
| else if (st.isSymbolicLink()) | ||
| return cb(new SymlinkError(part, part + '/' + parts.join('/'))) | ||
| else | ||
| cb(er) | ||
| }) | ||
| } else | ||
| mkdir_(part, parts, mode, cache, unlink, cb) | ||
| } | ||
| const mkdirSync = module.exports.sync = (dir, opt) => { | ||
| const mode = opt.mode | 0o0700 | ||
| const preserve = opt.preserve | ||
| const unlink = opt.unlink | ||
| const cache = opt.cache | ||
| const cwd = opt.cwd | ||
| if (cache && cache.get(dir) === true || dir === cwd) | ||
| return | ||
| if (preserve) { | ||
| mkdirp.sync(dir, mode) | ||
| cache.set(dir, true) | ||
| return | ||
| } | ||
| const sub = path.relative(cwd, dir) | ||
| const parts = sub.split(/\/|\\/) | ||
| for (let p = parts.shift(), part = cwd; | ||
| p && (part += '/' + p); | ||
| p = parts.shift()) { | ||
| if (cache.get(part)) | ||
| continue | ||
| try { | ||
| fs.mkdirSync(part, mode) | ||
| cache.set(part, true) | ||
| } catch (er) { | ||
| const st = fs.lstatSync(part) | ||
| if (st.isDirectory()) { | ||
| cache.set(part, true) | ||
| continue | ||
| } else if (unlink) { | ||
| fs.unlinkSync(part) | ||
| fs.mkdirSync(part, mode) | ||
| cache.set(part, true) | ||
| continue | ||
| } else if (st.isSymbolicLink()) | ||
| return new SymlinkError(part, part + '/' + parts.join('/')) | ||
| } | ||
| } | ||
| cache.set(dir, true) | ||
| } |
+145
| 'use strict' | ||
| const Header = require('./header.js') | ||
| const path = require('path') | ||
| class Pax { | ||
| constructor (obj, global) { | ||
| this.atime = obj.atime || null | ||
| this.charset = obj.charset || null | ||
| this.comment = obj.comment || null | ||
| this.ctime = obj.ctime || null | ||
| this.gid = obj.gid || null | ||
| this.gname = obj.gname || null | ||
| this.linkpath = obj.linkpath || null | ||
| this.mtime = obj.mtime || null | ||
| this.path = obj.path || null | ||
| this.size = obj.size || null | ||
| this.uid = obj.uid || null | ||
| this.uname = obj.uname || null | ||
| this.dev = obj.dev || null | ||
| this.ino = obj.ino || null | ||
| this.nlink = obj.nlink || null | ||
| this.global = global || false | ||
| } | ||
| encode () { | ||
| const body = this.encodeBody() | ||
| if (body === '') | ||
| return null | ||
| const bodyLen = Buffer.byteLength(body) | ||
| // round up to 512 bytes | ||
| // add 512 for header | ||
| const bufLen = 512 * Math.ceil(1 + bodyLen / 512) | ||
| const buf = Buffer.allocUnsafe(bufLen) | ||
| // 0-fill the header section, it might not hit every field | ||
| for (let i = 0; i < 512; i++) { | ||
| buf[i] = 0 | ||
| } | ||
| new Header({ | ||
| // XXX split the path | ||
| // then the path should be PaxHeader + basename, but less than 99, | ||
| // prepend with the dirname | ||
| path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99), | ||
| mode: this.mode || 0o644, | ||
| uid: this.uid || null, | ||
| gid: this.gid || null, | ||
| size: bodyLen, | ||
| mtime: this.mtime || null, | ||
| type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader', | ||
| linkpath: '', | ||
| uname: this.uname || '', | ||
| gname: this.gname || '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| atime: this.atime || null, | ||
| ctime: this.ctime || null | ||
| }).encode(buf) | ||
| buf.write(body, 512, bodyLen, 'utf8') | ||
| // null pad after the body | ||
| for (let i = bodyLen + 512; i < buf.length; i++) { | ||
| buf[i] = 0 | ||
| } | ||
| return buf | ||
| } | ||
| encodeBody () { | ||
| return ( | ||
| this.encodeField('path') + | ||
| this.encodeField('ctime') + | ||
| this.encodeField('atime') + | ||
| this.encodeField('dev') + | ||
| this.encodeField('ino') + | ||
| this.encodeField('nlink') + | ||
| this.encodeField('charset') + | ||
| this.encodeField('comment') + | ||
| this.encodeField('gid') + | ||
| this.encodeField('gname') + | ||
| this.encodeField('linkpath') + | ||
| this.encodeField('mtime') + | ||
| this.encodeField('size') + | ||
| this.encodeField('uid') + | ||
| this.encodeField('uname') | ||
| ) | ||
| } | ||
| encodeField (field) { | ||
| if (this[field] === null || this[field] === undefined) | ||
| return '' | ||
| const v = this[field] instanceof Date ? this[field].getTime() / 1000 | ||
| : this[field] | ||
| const s = ' ' + | ||
| (field === 'dev' || field === 'ino' || field === 'nlink' | ||
| ? 'SCHILY.' : '') + | ||
| field + '=' + v + '\n' | ||
| const byteLen = Buffer.byteLength(s) | ||
| // the digits includes the length of the digits in ascii base-10 | ||
| // so if it's 9 characters, then adding 1 for the 9 makes it 10 | ||
| // which makes it 11 chars. | ||
| let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1 | ||
| if (byteLen + digits >= Math.pow(10, digits)) | ||
| digits += 1 | ||
| const len = digits + byteLen | ||
| return len + s | ||
| } | ||
| } | ||
| Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g) | ||
| const merge = (a, b) => | ||
| b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a | ||
| const parseKV = string => | ||
| string | ||
| .replace(/\n$/, '') | ||
| .split('\n') | ||
| .reduce(parseKVLine, Object.create(null)) | ||
| const parseKVLine = (set, line) => { | ||
| const n = parseInt(line, 10) | ||
| // XXX Values with \n in them will fail this. | ||
| // Refactor to not be a naive line-by-line parse. | ||
| if (n !== Buffer.byteLength(line) + 1) | ||
| return set | ||
| line = line.substr((n + ' ').length) | ||
| const kv = line.split('=') | ||
| const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1') | ||
| if (!k) | ||
| return set | ||
| const v = kv.join('=') | ||
| set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k) | ||
| ? new Date(v * 1000) | ||
| : /^[0-9]+$/.test(v) ? +v | ||
| : v | ||
| return set | ||
| } | ||
| module.exports = Pax |
| 'use strict' | ||
| const types = require('./types.js') | ||
| const MiniPass = require('minipass') | ||
| const SLURP = Symbol('slurp') | ||
| module.exports = class ReadEntry extends MiniPass { | ||
| constructor (header, ex, gex) { | ||
| super() | ||
| this.extended = ex | ||
| this.globalExtended = gex | ||
| this.header = header | ||
| this.blockRemain = 512 * Math.ceil(header.size / 512) | ||
| this.remain = header.size | ||
| this.type = header.type | ||
| this.meta = false | ||
| this.ignore = false | ||
| switch (this.type) { | ||
| case 'File': | ||
| case 'OldFile': | ||
| case 'Link': | ||
| case 'SymbolicLink': | ||
| case 'CharacterDevice': | ||
| case 'BlockDevice': | ||
| case 'Directory': | ||
| case 'FIFO': | ||
| case 'ContiguousFile': | ||
| case 'GNUDumpDir': | ||
| break | ||
| case 'NextFileHasLongLinkpath': | ||
| case 'NextFileHasLongPath': | ||
| case 'OldGnuLongPath': | ||
| case 'GlobalExtendedHeader': | ||
| case 'ExtendedHeader': | ||
| case 'OldExtendedHeader': | ||
| this.meta = true | ||
| break | ||
| // NOTE: gnutar and bsdtar treat unrecognized types as 'File' | ||
| // it may be worth doing the same, but with a warning. | ||
| default: | ||
| this.ignore = true | ||
| } | ||
| this.path = header.path | ||
| this.mode = header.mode | ||
| if (this.mode) | ||
| this.mode = this.mode & 0o7777 | ||
| this.uid = header.uid | ||
| this.gid = header.gid | ||
| this.uname = header.uname | ||
| this.gname = header.gname | ||
| this.size = header.size | ||
| this.mtime = header.mtime | ||
| this.atime = header.atime | ||
| this.ctime = header.ctime | ||
| this.linkpath = header.linkpath | ||
| this.uname = header.uname | ||
| this.gname = header.gname | ||
| if (ex) this[SLURP](ex) | ||
| if (gex) this[SLURP](gex, true) | ||
| } | ||
| write (data) { | ||
| const writeLen = data.length | ||
| if (writeLen > this.blockRemain) | ||
| throw new Error('writing more to entry than is appropriate') | ||
| const r = this.remain | ||
| const br = this.blockRemain | ||
| this.remain = Math.max(0, r - writeLen) | ||
| this.blockRemain = Math.max(0, br - writeLen) | ||
| if (this.ignore) | ||
| return true | ||
| if (r >= writeLen) | ||
| return super.write(data) | ||
| // r < writeLen | ||
| return super.write(data.slice(0, r)) | ||
| } | ||
| [SLURP] (ex, global) { | ||
| for (let k in ex) { | ||
| // we slurp in everything except for the path attribute in | ||
| // a global extended header, because that's weird. | ||
| if (ex[k] !== null && ex[k] !== undefined && | ||
| !(global && k === 'path')) | ||
| this[k] = ex[k] | ||
| } | ||
| } | ||
| } |
+179
| 'use strict' | ||
| // tar -r | ||
| const hlo = require('./high-level-opt.js') | ||
| const Pack = require('./pack.js') | ||
| const Parse = require('./parse.js') | ||
| const fs = require('fs') | ||
| // starting at the head of the file, read a Header | ||
| // If the checksum is invalid, that's our position to start writing | ||
| // If it is, jump forward by the specified size (round up to 512) | ||
| // and try again. | ||
| // Write the new Pack stream starting there. | ||
| const Header = require('./header.js') | ||
| const r = module.exports = (opt_, files, cb) => { | ||
| const opt = hlo(opt_) | ||
| if (!opt.file) | ||
| throw new TypeError('file is required') | ||
| if (opt.gzip) | ||
| throw new TypeError('cannot append to compressed archives') | ||
| if (!files || !Array.isArray(files) || !files.length) | ||
| throw new TypeError('no files or directories specified') | ||
| return opt.sync ? replaceSync(opt, files) | ||
| : replace(opt, files, cb) | ||
| } | ||
| const replaceSync = (opt, files) => { | ||
| const p = new Pack.Sync(opt) | ||
| let threw = true | ||
| let fd | ||
| try { | ||
| try { | ||
| fd = fs.openSync(opt.file, 'r+') | ||
| } catch (er) { | ||
| if (er.code === 'ENOENT') | ||
| fd = fs.openSync(opt.file, 'w+') | ||
| else | ||
| throw er | ||
| } | ||
| const st = fs.fstatSync(fd) | ||
| const headBuf = Buffer.alloc(512) | ||
| let position | ||
| POSITION: for (position = 0; position < st.size; position += 512) { | ||
| for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) { | ||
| bytes = fs.readSync( | ||
| fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos | ||
| ) | ||
| if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) | ||
| throw new Error('cannot append to compressed archives') | ||
| if (!bytes) | ||
| break POSITION | ||
| } | ||
| let h = new Header(headBuf) | ||
| if (!h.cksumValid) | ||
| break | ||
| let entryBlockSize = 512 * Math.ceil(h.size / 512) | ||
| if (position + entryBlockSize + 512 > st.size) | ||
| break | ||
| // the 512 for the header we just parsed will be added as well | ||
| // also jump ahead all the blocks for the body | ||
| position += entryBlockSize | ||
| if (opt.mtimeCache) | ||
| opt.mtimeCache.set(h.path, h.mtime) | ||
| } | ||
| p.on('data', c => { | ||
| fs.writeSync(fd, c, 0, c.length, position) | ||
| position += c.length | ||
| }) | ||
| p.on('end', _ => fs.closeSync(fd)) | ||
| files.forEach(file => p.add(file)) | ||
| p.end() | ||
| threw = false | ||
| } finally { | ||
| if (threw) | ||
| try { fs.closeSync(fd) } catch (er) {} | ||
| } | ||
| } | ||
| const replace = (opt, files, cb) => { | ||
| const p = new Pack(opt) | ||
| const getPos = (fd, size, cb_) => { | ||
| const cb = (er, pos) => { | ||
| if (er) | ||
| fs.close(fd, _ => cb_(er)) | ||
| else | ||
| cb_(null, pos) | ||
| } | ||
| let position = 0 | ||
| if (size === 0) | ||
| return cb(null, 0) | ||
| let bufPos = 0 | ||
| const headBuf = Buffer.alloc(512) | ||
| const onread = (er, bytes) => { | ||
| if (er) | ||
| return cb(er) | ||
| bufPos += bytes | ||
| if (bufPos < 512 && bytes) | ||
| return fs.read( | ||
| fd, headBuf, bufPos, headBuf.length - bufPos, | ||
| position + bufPos, onread | ||
| ) | ||
| if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) | ||
| return cb(new Error('cannot append to compressed archives')) | ||
| // truncated header | ||
| if (bufPos < 512) | ||
| return cb(null, position) | ||
| const h = new Header(headBuf) | ||
| if (!h.cksumValid) | ||
| return cb(null, position) | ||
| const entryBlockSize = 512 * Math.ceil(h.size / 512) | ||
| if (position + entryBlockSize + 512 > size) | ||
| return cb(null, position) | ||
| position += entryBlockSize + 512 | ||
| if (position >= size) | ||
| return cb(null, position) | ||
| if (opt.mtimeCache) | ||
| opt.mtimeCache.set(h.path, h.mtime) | ||
| bufPos = 0 | ||
| fs.read(fd, headBuf, 0, 512, position, onread) | ||
| } | ||
| fs.read(fd, headBuf, 0, 512, position, onread) | ||
| } | ||
| const promise = new Promise((resolve, reject) => { | ||
| p.on('error', reject) | ||
| const onopen = (er, fd) => { | ||
| if (er) { | ||
| if (er.code === 'ENOENT') | ||
| return fs.open(opt.file, 'w+', onopen) | ||
| return reject(er) | ||
| } | ||
| fs.fstat(fd, (er, st) => { | ||
| if (er) | ||
| return reject(er) | ||
| getPos(fd, st.size, (er, position) => { | ||
| if (er) | ||
| return reject(er) | ||
| const stream = fs.createWriteStream(opt.file, { | ||
| fd: fd, | ||
| flags: 'r+', | ||
| start: position | ||
| }) | ||
| p.pipe(stream) | ||
| stream.on('error', reject) | ||
| stream.on('close', resolve) | ||
| files.forEach(file => p.add(file)) | ||
| p.end() | ||
| }) | ||
| }) | ||
| } | ||
| fs.open(opt.file, 'r+', onopen) | ||
| }) | ||
| return cb ? promise.then(cb, cb) : promise | ||
| } |
+44
| 'use strict' | ||
| // map types from key to human-friendly name | ||
| exports.name = new Map([ | ||
| ['0', 'File'], | ||
| // same as File | ||
| ['', 'OldFile'], | ||
| ['1', 'Link'], | ||
| ['2', 'SymbolicLink'], | ||
| // Devices and FIFOs aren't fully supported | ||
| // they are parsed, but skipped when unpacking | ||
| ['3', 'CharacterDevice'], | ||
| ['4', 'BlockDevice'], | ||
| ['5', 'Directory'], | ||
| ['6', 'FIFO'], | ||
| // same as File | ||
| ['7', 'ContiguousFile'], | ||
| // pax headers | ||
| ['g', 'GlobalExtendedHeader'], | ||
| ['x', 'ExtendedHeader'], | ||
| // vendor-specific stuff | ||
| // skip | ||
| ['A', 'SolarisACL'], | ||
| // like 5, but with data, which should be skipped | ||
| ['D', 'GNUDumpDir'], | ||
| // metadata only, skip | ||
| ['I', 'Inode'], | ||
| // data = link path of next file | ||
| ['K', 'NextFileHasLongLinkpath'], | ||
| // data = path of next file | ||
| ['L', 'NextFileHasLongPath'], | ||
| // skip | ||
| ['M', 'ContinuationFile'], | ||
| // like L | ||
| ['N', 'OldGnuLongPath'], | ||
| // skip | ||
| ['S', 'SparseFile'], | ||
| // skip | ||
| ['V', 'TapeVolumeHeader'], | ||
| // like x | ||
| ['X', 'OldExtendedHeader'] | ||
| ]) | ||
| // map the other direction | ||
| exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]])) |
+380
| 'use strict' | ||
| const assert = require('assert') | ||
| const EE = require('events').EventEmitter | ||
| const Parser = require('./parse.js') | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| const mkdir = require('./mkdir.js') | ||
| const mkdirSync = mkdir.sync | ||
| const ONENTRY = Symbol('onEntry') | ||
| const CHECKFS = Symbol('checkFs') | ||
| const MAKEFS = Symbol('makeFs') | ||
| const FILE = Symbol('file') | ||
| const DIRECTORY = Symbol('directory') | ||
| const LINK = Symbol('link') | ||
| const SYMLINK = Symbol('symlink') | ||
| const HARDLINK = Symbol('hardlink') | ||
| const UNSUPPORTED = Symbol('unsupported') | ||
| const UNKNOWN = Symbol('unknown') | ||
| const CHECKPATH = Symbol('checkPath') | ||
| const MKDIR = Symbol('mkdir') | ||
| const ONERROR = Symbol('onError') | ||
| const PENDING = Symbol('pending') | ||
| const PEND = Symbol('pend') | ||
| const UNPEND = Symbol('unpend') | ||
| const ENDED = Symbol('ended') | ||
| const MAYBECLOSE = Symbol('maybeClose') | ||
| const SKIP = Symbol('skip') | ||
| class Unpack extends Parser { | ||
| constructor (opt) { | ||
| super(opt) | ||
| if (!opt) | ||
| opt = {} | ||
| this[PENDING] = 0 | ||
| this[ENDED] = false | ||
| this.on('end', _ => { | ||
| this[ENDED] = true | ||
| this[MAYBECLOSE]() | ||
| }) | ||
| this.dirCache = opt.dirCache || new Map() | ||
| if (opt.preserveOwner === undefined) | ||
| this.preserveOwner = process.getuid && process.getuid() === 0 | ||
| else | ||
| this.preserveOwner = !!opt.preserveOwner | ||
| // do not unpack over files that are newer than what's in the archive | ||
| this.newer = !!opt.newer | ||
| // do not unpack over ANY files | ||
| this.keep = !!opt.keep | ||
| // do not set mtime/atime of extracted entries | ||
| this.noMtime = !!opt.noMtime | ||
| // allow .., absolute path entries, and unpacking through symlinks | ||
| // without this, warn and skip .., relativize absolutes, and error | ||
| // on symlinks in extraction path | ||
| this.preservePaths = !!opt.preservePaths | ||
| // unlink files and links before writing. This breaks existing hard | ||
| // links, and removes symlink directories rather than erroring | ||
| this.unlink = !!opt.unlink | ||
| this.cwd = path.resolve(opt.cwd || process.cwd()) | ||
| this.strip = +opt.strip || 0 | ||
| this.umask = typeof opt.umask === 'number' ? opt.umask : process.umask() | ||
| // default mode for dirs created as parents | ||
| this.dmode = opt.dmode || (0o0777 & (~this.umask)) | ||
| this.fmode = opt.fmode || (0o0666 & (~this.umask)) | ||
| this.on('entry', entry => this[ONENTRY](entry)) | ||
| } | ||
| [MAYBECLOSE] () { | ||
| if (this[ENDED] && this[PENDING] === 0) | ||
| this.emit('close') | ||
| } | ||
| [CHECKPATH] (entry) { | ||
| if (this.strip) { | ||
| const parts = entry.path.split(/\/|\\/) | ||
| if (parts.length < this.strip) | ||
| return false | ||
| entry.path = parts.slice(this.strip).join('/') | ||
| } | ||
| if (!this.preservePaths) { | ||
| const p = entry.path | ||
| if (p.match(/(^|\/|\\)\.\.(\\|\/|$)/)) { | ||
| this.warn('path contains \'..\'', p) | ||
| return false | ||
| } | ||
| if (path.isAbsolute(p)) { | ||
| const parsed = path.parse(p) | ||
| this.warn('stripping ' + parsed.root + ' from absolute path', p) | ||
| entry.path = p.substr(parsed.root.length) | ||
| } | ||
| } | ||
| if (path.isAbsolute(entry.path)) | ||
| entry.absolute = entry.path | ||
| else | ||
| entry.absolute = path.resolve(this.cwd, entry.path) | ||
| return true | ||
| } | ||
| [ONENTRY] (entry) { | ||
| if (!this[CHECKPATH](entry)) | ||
| return entry.resume() | ||
| assert.equal(typeof entry.absolute, 'string') | ||
| switch (entry.type) { | ||
| case 'Directory': | ||
| case 'GNUDumpDir': | ||
| if (entry.mode) | ||
| entry.mode = entry.mode | 0o700 | ||
| case 'File': | ||
| case 'OldFile': | ||
| case 'ContiguousFile': | ||
| case 'Link': | ||
| case 'SymbolicLink': | ||
| return this[CHECKFS](entry) | ||
| case 'CharacterDevice': | ||
| case 'BlockDevice': | ||
| case 'FIFO': | ||
| return this[UNSUPPORTED](entry) | ||
| // this should be impossible | ||
| /* istanbul ignore next */ | ||
| default: | ||
| return this.emit('error', new Error('unknown type: ' + entry.type)) | ||
| } | ||
| } | ||
| [ONERROR] (er, entry) { | ||
| this.warn(er.message, er) | ||
| this[UNPEND]() | ||
| entry.resume() | ||
| } | ||
| [MKDIR] (dir, mode, cb) { | ||
| mkdir(dir, { | ||
| preserve: this.preservePaths, | ||
| unlink: this.unlink, | ||
| cache: this.dirCache, | ||
| cwd: this.cwd, | ||
| mode: mode | ||
| }, cb) | ||
| } | ||
| [FILE] (entry) { | ||
| const mode = entry.mode & 0o7777 || this.fmode | ||
| const stream = fs.createWriteStream(entry.absolute, { mode: mode }) | ||
| stream.on('error', er => this[ONERROR](er, entry)) | ||
| stream.on('close', _ => { | ||
| if (entry.mtime && !this.noMtime) | ||
| fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, _ => _) | ||
| if (entry.uid && this.preserveOwner) | ||
| fs.chown( | ||
| entry.absolute, entry.uid, entry.gid || process.getgid(), _ => _ | ||
| ) | ||
| this[UNPEND]() | ||
| }) | ||
| entry.pipe(stream) | ||
| } | ||
| [DIRECTORY] (entry) { | ||
| const mode = entry.mode & 0o7777 || this.dmode | ||
| this[MKDIR](entry.absolute, mode, er => { | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| if (entry.mtime && !this.noMtime) | ||
| fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, _ => _) | ||
| if (entry.uid && this.preserveOwner) | ||
| fs.chown( | ||
| entry.absolute, entry.uid, entry.gid || process.getgid(), _ => _ | ||
| ) | ||
| this[UNPEND]() | ||
| entry.resume() | ||
| }) | ||
| } | ||
| [UNSUPPORTED] (entry) { | ||
| this.warn('unsupported entry type: ' + entry.type, entry) | ||
| entry.resume() | ||
| } | ||
| [SYMLINK] (entry) { | ||
| this[LINK](entry, entry.linkpath, 'symlink') | ||
| } | ||
| [HARDLINK] (entry) { | ||
| this[LINK](entry, path.resolve(this.cwd, entry.linkpath), 'link') | ||
| } | ||
| [PEND] () { | ||
| this[PENDING]++ | ||
| } | ||
| [UNPEND] () { | ||
| this[PENDING]-- | ||
| this[MAYBECLOSE]() | ||
| } | ||
| [SKIP] (entry) { | ||
| this[UNPEND]() | ||
| entry.resume() | ||
| } | ||
| // check if a thing is there, and if so, try to clobber it | ||
| [CHECKFS] (entry) { | ||
| this[PEND]() | ||
| this[MKDIR](path.dirname(entry.absolute), this.dmode, er => { | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| fs.lstat(entry.absolute, (er, st) => { | ||
| if (st && (this.keep || this.newer && st.mtime > entry.mtime)) | ||
| this[SKIP](entry) | ||
| else if (er || (entry.type === 'File' && !this.unlink && st.isFile())) | ||
| this[MAKEFS](null, entry) | ||
| else if (st.isDirectory()) { | ||
| if (entry.type === 'Directory') { | ||
| if (!entry.mode || (st.mode & 0o7777) === entry.mode) | ||
| this[MAKEFS](null, entry) | ||
| else | ||
| fs.chmod(entry.absolute, entry.mode, er => this[MAKEFS](er, entry)) | ||
| } else | ||
| fs.rmdir(entry.absolute, er => this[MAKEFS](er, entry)) | ||
| } else | ||
| fs.unlink(entry.absolute, er => this[MAKEFS](er, entry)) | ||
| }) | ||
| }) | ||
| } | ||
| [MAKEFS] (er, entry) { | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| switch (entry.type) { | ||
| case 'File': | ||
| case 'OldFile': | ||
| case 'ContiguousFile': | ||
| return this[FILE](entry) | ||
| case 'Link': | ||
| return this[HARDLINK](entry) | ||
| case 'SymbolicLink': | ||
| return this[SYMLINK](entry) | ||
| case 'Directory': | ||
| case 'GNUDumpDir': | ||
| return this[DIRECTORY](entry) | ||
| // should be impossible | ||
| /* istanbul ignore next */ | ||
| default: | ||
| return this.emit('error', new Error('unknown type: ' + entry.type)) | ||
| } | ||
| } | ||
| [LINK] (entry, linkpath, link) { | ||
| // XXX: get the type ('file' or 'dir') for windows | ||
| fs[link](linkpath, entry.absolute, er => { | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| this[UNPEND]() | ||
| entry.resume() | ||
| }) | ||
| } | ||
| } | ||
| class UnpackSync extends Unpack { | ||
| constructor (opt) { | ||
| super(opt) | ||
| } | ||
| [CHECKFS] (entry) { | ||
| const er = this[MKDIR](path.dirname(entry.absolute), this.dmode) | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| try { | ||
| const st = fs.lstatSync(entry.absolute) | ||
| if (this.keep || this.newer && st.mtime > entry.mtime) | ||
| return this[SKIP](entry) | ||
| else if (entry.type === 'File' && !this.unlink && st.isFile()) | ||
| return this[MAKEFS](null, entry) | ||
| else { | ||
| try { | ||
| if (st.isDirectory()) { | ||
| if (entry.type === 'Directory') { | ||
| if (entry.mode && (st.mode & 0o7777) !== entry.mode) | ||
| fs.chmodSync(entry.absolute, entry.mode) | ||
| } else | ||
| fs.rmdirSync(entry.absolute) | ||
| } else | ||
| fs.unlinkSync(entry.absolute) | ||
| return this[MAKEFS](null, entry) | ||
| } catch (er) { | ||
| return this[ONERROR](er, entry) | ||
| } | ||
| } | ||
| } catch (er) { | ||
| return this[MAKEFS](null, entry) | ||
| } | ||
| } | ||
| [FILE] (entry) { | ||
| const mode = entry.mode & 0o7777 || this.fmode | ||
| try { | ||
| const fd = fs.openSync(entry.absolute, 'w', mode) | ||
| entry.on('data', buf => fs.writeSync(fd, buf, 0, buf.length, null)) | ||
| entry.on('end', _ => { | ||
| if (entry.mtime && !this.noMtime) { | ||
| try { | ||
| fs.futimesSync(fd, entry.atime || new Date(), entry.mtime) | ||
| } catch (er) {} | ||
| } | ||
| if (entry.uid && this.preserveOwner) { | ||
| try { | ||
| fs.fchownSync(fd, entry.uid, entry.gid || process.getgid()) | ||
| } catch (er) {} | ||
| } | ||
| try { fs.closeSync(fd) } catch (er) { this[ONERROR](er, entry) } | ||
| }) | ||
| } catch (er) { this[ONERROR](er, entry) } | ||
| } | ||
| [DIRECTORY] (entry) { | ||
| const mode = entry.mode & 0o7777 || this.dmode | ||
| const er = this[MKDIR](entry.absolute, mode) | ||
| if (er) | ||
| return this[ONERROR](er, entry) | ||
| if (entry.mtime && !this.noMtime) { | ||
| try { | ||
| fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime) | ||
| } catch (er) {} | ||
| } | ||
| if (entry.uid && this.preserveOwner) { | ||
| try { | ||
| fs.chownSync(entry.absolute, entry.uid, entry.gid || process.getgid()) | ||
| } catch (er) {} | ||
| } | ||
| entry.resume() | ||
| } | ||
| [MKDIR] (dir, mode) { | ||
| try { | ||
| return mkdir.sync(dir, { | ||
| preserve: this.preservePaths, | ||
| unlink: this.unlink, | ||
| cache: this.dirCache, | ||
| cwd: this.cwd, | ||
| mode: mode | ||
| }) | ||
| } catch (er) { | ||
| return er | ||
| } | ||
| } | ||
| [LINK] (entry, linkpath, link) { | ||
| try { | ||
| fs[link + 'Sync'](linkpath, entry.absolute) | ||
| entry.resume() | ||
| } catch (er) { | ||
| return this[ONERROR](er, entry) | ||
| } | ||
| } | ||
| } | ||
| Unpack.Sync = UnpackSync | ||
| module.exports = Unpack |
| 'use strict' | ||
| // tar -u | ||
| const hlo = require('./high-level-opt.js') | ||
| const r = require('./replace.js') | ||
| // just call tar.r with the filter and mtimeCache | ||
| const u = module.exports = (opt_, files, cb) => { | ||
| const opt = hlo(opt_) | ||
| if (!opt.file) | ||
| throw new TypeError('file is required') | ||
| if (opt.gzip) | ||
| throw new TypeError('cannot append to compressed archives') | ||
| if (!files || !Array.isArray(files) || !files.length) | ||
| throw new TypeError('no files or directories specified') | ||
| mtimeFilter(opt) | ||
| return r(opt, files, cb) | ||
| } | ||
| const mtimeFilter = opt => { | ||
| const filter = opt.filter | ||
| if (!opt.mtimeCache) | ||
| opt.mtimeCache = new Map() | ||
| opt.filter = filter ? (path, stat) => | ||
| filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime) | ||
| : (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime) | ||
| } |
| 'use strict' | ||
| const MiniPass = require('minipass') | ||
| const Pax = require('./pax.js') | ||
| const Header = require('./header.js') | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| const types = require('./types.js') | ||
| const maxReadSize = 16 * 1024 * 1024 | ||
| const PROCESS = Symbol('process') | ||
| const FILE = Symbol('file') | ||
| const DIRECTORY = Symbol('directory') | ||
| const SYMLINK = Symbol('symlink') | ||
| const HARDLINK = Symbol('hardlink') | ||
| const HEADER = Symbol('header') | ||
| const READ = Symbol('read') | ||
| const LSTAT = Symbol('lstat') | ||
| const ONLSTAT = Symbol('onlstat') | ||
| const ONREAD = Symbol('onread') | ||
| const ONREADLINK = Symbol('onreadlink') | ||
| const OPENFILE = Symbol('openfile') | ||
| const ONOPENFILE = Symbol('onopenfile') | ||
| const CLOSE = Symbol('close') | ||
| class WriteEntry extends MiniPass { | ||
| constructor (p, opt) { | ||
| opt = opt || {} | ||
| super(opt) | ||
| if (typeof p !== 'string') | ||
| throw new TypeError('path is required') | ||
| this.path = p | ||
| // suppress atime, ctime, uid, gid, uname, gname | ||
| this.portable = !!opt.portable | ||
| // until node has builtin pwnam functions, this'll have to do | ||
| this.myuid = process.getuid && process.getuid() | ||
| this.myuser = process.env.USER || '' | ||
| this.maxReadSize = opt.maxReadSize || maxReadSize | ||
| this.linkCache = opt.linkCache || new Map() | ||
| this.statCache = opt.statCache || new Map() | ||
| this.preservePaths = !!opt.preservePaths | ||
| this.cwd = opt.cwd || process.cwd() | ||
| this.strict = !!opt.strict | ||
| if (typeof opt.onwarn === 'function') | ||
| this.on('warn', opt.onwarn) | ||
| if (path.isAbsolute(p) && !this.preservePaths) { | ||
| const parsed = path.parse(p) | ||
| this.warn('stripping ' + parsed.root + ' from absolute path', p) | ||
| this.path = p.substr(parsed.root.length) | ||
| } | ||
| this.win32 = !!opt.win32 || process.platform === 'win32' | ||
| if (this.win32) { | ||
| this.path = this.path.replace(/\\/g, '/') | ||
| p = p.replace(/\\/g, '/') | ||
| } | ||
| this.absolute = opt.absolute || path.resolve(this.cwd, p) | ||
| if (this.path === '') | ||
| this.path = './' | ||
| if (this.statCache.has(this.absolute)) | ||
| this[ONLSTAT](this.statCache.get(this.absolute)) | ||
| else | ||
| this[LSTAT]() | ||
| } | ||
| warn (msg, data) { | ||
| if (!this.strict) | ||
| return this.emit('warn', msg, data) | ||
| const er = new Error(msg) | ||
| er.data = data | ||
| this.emit('error', er) | ||
| } | ||
| [LSTAT] () { | ||
| fs.lstat(this.absolute, (er, stat) => { | ||
| if (er) | ||
| return this.emit('error', er) | ||
| this[ONLSTAT](stat) | ||
| }) | ||
| } | ||
| [ONLSTAT] (stat) { | ||
| this.statCache.set(this.absolute, stat) | ||
| this.stat = stat | ||
| if (!stat.isFile()) | ||
| stat.size = 0 | ||
| this.type = getType(stat) | ||
| this.emit('stat', stat) | ||
| this[PROCESS]() | ||
| } | ||
| [PROCESS] () { | ||
| switch (this.type) { | ||
| case 'File': return this[FILE]() | ||
| case 'Directory': return this[DIRECTORY]() | ||
| case 'SymbolicLink': return this[SYMLINK]() | ||
| // unsupported types are ignored. | ||
| default: return this.end() | ||
| } | ||
| } | ||
| [HEADER] () { | ||
| this.header = new Header({ | ||
| path: this.path, | ||
| linkpath: this.linkpath, | ||
| // only the permissions and setuid/setgid/sticky bitflags | ||
| // not the higher-order bits that specify file type | ||
| mode: this.stat.mode & 0o7777, | ||
| uid: this.portable ? null : this.stat.uid, | ||
| gid: this.portable ? null : this.stat.gid, | ||
| size: this.stat.size, | ||
| mtime: this.stat.mtime, | ||
| type: this.type, | ||
| uname: this.portable ? null : | ||
| this.stat.uid === this.myuid ? this.myuser : '', | ||
| atime: this.portable ? null : this.stat.atime, | ||
| ctime: this.portable ? null : this.stat.ctime | ||
| }) | ||
| if (this.header.encode()) | ||
| this.write(new Pax({ | ||
| atime: this.portable ? null : this.header.atime, | ||
| ctime: this.portable ? null : this.header.ctime, | ||
| gid: this.portable ? null : this.header.gid, | ||
| mtime: this.header.mtime, | ||
| path: this.path, | ||
| linkpath: this.linkpath, | ||
| size: this.size, | ||
| uid: this.portable ? null : this.header.uid, | ||
| uname: this.portable ? null : this.header.uname, | ||
| dev: this.portable ? null : this.stat.dev, | ||
| ino: this.portable ? null : this.stat.ino, | ||
| nlink: this.portable ? null : this.stat.nlink | ||
| }).encode()) | ||
| this.write(this.header.block) | ||
| } | ||
| [DIRECTORY] () { | ||
| if (this.path.substr(-1) !== '/') | ||
| this.path += '/' | ||
| this.stat.size = 0 | ||
| this[HEADER]() | ||
| this.end() | ||
| } | ||
| [SYMLINK] () { | ||
| fs.readlink(this.absolute, (er, linkpath) => { | ||
| if (er) | ||
| return this.emit('error', er) | ||
| this[ONREADLINK](linkpath) | ||
| }) | ||
| } | ||
| [ONREADLINK] (linkpath) { | ||
| this.linkpath = linkpath | ||
| this[HEADER]() | ||
| this.end() | ||
| } | ||
| [HARDLINK] (linkpath) { | ||
| this.type = 'Link' | ||
| this.linkpath = path.relative(this.cwd, linkpath) | ||
| this.stat.size = 0 | ||
| this[HEADER]() | ||
| this.end() | ||
| } | ||
| [FILE] () { | ||
| if (this.stat.nlink > 1) { | ||
| const linkKey = this.stat.dev + ':' + this.stat.ino | ||
| if (this.linkCache.has(linkKey)) { | ||
| const linkpath = this.linkCache.get(linkKey) | ||
| if (linkpath.indexOf(this.cwd) === 0) | ||
| return this[HARDLINK](linkpath) | ||
| } | ||
| this.linkCache.set(linkKey, this.absolute) | ||
| } | ||
| this[HEADER]() | ||
| if (this.stat.size === 0) | ||
| return this.end() | ||
| this[OPENFILE]() | ||
| } | ||
| [OPENFILE] () { | ||
| fs.open(this.absolute, 'r', (er, fd) => { | ||
| if (er) | ||
| return this.emit('error', er) | ||
| this[ONOPENFILE](fd) | ||
| }) | ||
| } | ||
| [ONOPENFILE] (fd) { | ||
| const blockLen = 512 * Math.ceil(this.stat.size / 512) | ||
| const bufLen = Math.min(blockLen, this.maxReadSize) | ||
| const buf = Buffer.allocUnsafe(bufLen) | ||
| this[READ](fd, buf, 0, buf.length, 0, this.stat.size, blockLen) | ||
| } | ||
| [READ] (fd, buf, offset, length, pos, remain, blockRemain) { | ||
| fs.read(fd, buf, offset, length, pos, (er, bytesRead) => { | ||
| if (er) | ||
| return this[CLOSE](fd, _ => this.emit('error', er)) | ||
| this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead) | ||
| }) | ||
| } | ||
| [CLOSE] (fd, cb) { | ||
| fs.close(fd, cb) | ||
| } | ||
| [ONREAD] (fd, buf, offset, length, pos, remain, blockRemain, bytesRead) { | ||
| if (bytesRead <= 0 && remain > 0) { | ||
| const er = new Error('unexpected EOF') | ||
| er.path = this.absolute | ||
| er.syscall = 'read' | ||
| er.code = 'EOF' | ||
| this.emit('error', er) | ||
| } | ||
| // null out the rest of the buffer, if we could fit the block padding | ||
| if (bytesRead === remain) { | ||
| for (let i = bytesRead; i < length && bytesRead < blockRemain; i++) { | ||
| buf[i + offset] = 0 | ||
| bytesRead ++ | ||
| remain ++ | ||
| } | ||
| } | ||
| const writeBuf = offset === 0 && bytesRead === buf.length ? | ||
| buf : buf.slice(offset, offset + bytesRead) | ||
| remain -= bytesRead | ||
| blockRemain -= bytesRead | ||
| pos += bytesRead | ||
| offset += bytesRead | ||
| this.write(writeBuf) | ||
| if (!remain) { | ||
| if (blockRemain) | ||
| this.write(Buffer.alloc(blockRemain)) | ||
| this.end() | ||
| this[CLOSE](fd, _ => _) | ||
| return | ||
| } | ||
| if (offset >= length) { | ||
| buf = Buffer.allocUnsafe(length) | ||
| offset = 0 | ||
| } | ||
| length = buf.length - offset | ||
| this[READ](fd, buf, offset, length, pos, remain, blockRemain) | ||
| } | ||
| } | ||
| class WriteEntrySync extends WriteEntry { | ||
| constructor (path, opt) { | ||
| super(path, opt) | ||
| } | ||
| [LSTAT] () { | ||
| this[ONLSTAT](fs.lstatSync(this.absolute)) | ||
| } | ||
| [SYMLINK] () { | ||
| this[ONREADLINK](fs.readlinkSync(this.absolute)) | ||
| } | ||
| [OPENFILE] () { | ||
| this[ONOPENFILE](fs.openSync(this.absolute, 'r')) | ||
| } | ||
| [READ] (fd, buf, offset, length, pos, remain, blockRemain) { | ||
| let threw = true | ||
| try { | ||
| const bytesRead = fs.readSync(fd, buf, offset, length, pos) | ||
| this[ONREAD](fd, buf, offset, length, pos, remain, blockRemain, bytesRead) | ||
| threw = false | ||
| } finally { | ||
| if (threw) | ||
| try { this[CLOSE](fd) } catch (er) {} | ||
| } | ||
| } | ||
| [CLOSE] (fd) { | ||
| fs.closeSync(fd) | ||
| } | ||
| } | ||
| WriteEntry.Sync = WriteEntrySync | ||
| const getType = stat => | ||
| stat.isFile() ? 'File' | ||
| : stat.isDirectory() ? 'Directory' | ||
| : stat.isSymbolicLink() ? 'SymbolicLink' | ||
| : 'Unsupported' | ||
| module.exports = WriteEntry |
+105
-74
@@ -1,94 +0,125 @@ | ||
| // give it a tarball and a path, and it'll dump the contents | ||
| 'use strict' | ||
| module.exports = Extract | ||
| // tar -x | ||
| const hlo = require('./high-level-opt.js') | ||
| const Unpack = require('./unpack.js') | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| var tar = require("../tar.js") | ||
| , fstream = require("fstream") | ||
| , inherits = require("inherits") | ||
| , path = require("path") | ||
| const x = module.exports = (opt_, files, cb) => { | ||
| if (typeof opt_ === 'function') | ||
| cb = opt_, files = [], opt_ = {} | ||
| else if (Array.isArray(opt_)) | ||
| files = opt_, opt_ = {} | ||
| function Extract (opts) { | ||
| if (!(this instanceof Extract)) return new Extract(opts) | ||
| tar.Parse.apply(this) | ||
| if (typeof files === 'function') | ||
| cb = files, files = [] | ||
| if (typeof opts !== "object") { | ||
| opts = { path: opts } | ||
| } | ||
| if (!files) | ||
| files = [] | ||
| // better to drop in cwd? seems more standard. | ||
| opts.path = opts.path || path.resolve("node-tar-extract") | ||
| opts.type = "Directory" | ||
| opts.Directory = true | ||
| const opt = hlo(opt_) | ||
| // similar to --strip or --strip-components | ||
| opts.strip = +opts.strip | ||
| if (!opts.strip || opts.strip <= 0) opts.strip = 0 | ||
| if (opt.sync && typeof cb === 'function') | ||
| throw new TypeError('callback not supported for sync tar functions') | ||
| this._fst = fstream.Writer(opts) | ||
| if (!opt.file && typeof cb === 'function') | ||
| throw new TypeError('callback only supported with file option') | ||
| this.pause() | ||
| var me = this | ||
| if (files.length) | ||
| filesFilter(opt, files) | ||
| // Hardlinks in tarballs are relative to the root | ||
| // of the tarball. So, they need to be resolved against | ||
| // the target directory in order to be created properly. | ||
| me.on("entry", function (entry) { | ||
| // if there's a "strip" argument, then strip off that many | ||
| // path components. | ||
| if (opts.strip) { | ||
| var p = entry.path.split("/").slice(opts.strip).join("/") | ||
| entry.path = entry.props.path = p | ||
| if (entry.linkpath) { | ||
| var lp = entry.linkpath.split("/").slice(opts.strip).join("/") | ||
| entry.linkpath = entry.props.linkpath = lp | ||
| } | ||
| } | ||
| if (entry.type === "Link") { | ||
| entry.linkpath = entry.props.linkpath = | ||
| path.join(opts.path, path.join("/", entry.props.linkpath)) | ||
| } | ||
| return opt.file && opt.sync ? extractFileSync(opt) | ||
| : opt.file ? extractFile(opt, cb) | ||
| : opt.sync ? extractSync(opt) | ||
| : extract(opt) | ||
| } | ||
| if (entry.type === "SymbolicLink") { | ||
| var dn = path.dirname(entry.path) || "" | ||
| var linkpath = entry.props.linkpath | ||
| var target = path.resolve(opts.path, dn, linkpath) | ||
| if (target.indexOf(opts.path) !== 0) { | ||
| linkpath = path.join(opts.path, path.join("/", linkpath)) | ||
| // construct a filter that limits the file entries listed | ||
| // include child entries if a dir is included | ||
| const filesFilter = (opt, files) => { | ||
| const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true])) | ||
| const filter = opt.filter | ||
| const mapHas = (file, r) => { | ||
| const root = r || path.parse(file).root || '.' | ||
| const ret = file === root ? false | ||
| : map.has(file) ? map.get(file) | ||
| : mapHas(path.dirname(file), root) | ||
| map.set(file, ret) | ||
| return ret | ||
| } | ||
| opt.filter = filter | ||
| ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, '')) | ||
| : file => mapHas(file.replace(/\/+$/, '')) | ||
| } | ||
| const extractFileSync = opt => { | ||
| const u = new Unpack.Sync(opt) | ||
| const file = opt.file | ||
| let threw = true | ||
| let fd | ||
| try { | ||
| const stat = fs.statSync(file) | ||
| const readSize = opt.maxReadSize || 16*1024*1024 | ||
| if (stat.size < readSize) | ||
| u.end(fs.readFileSync(file)) | ||
| else { | ||
| let pos = 0 | ||
| const buf = Buffer.allocUnsafe(readSize) | ||
| fd = fs.openSync(file, 'r') | ||
| while (pos < stat.size) { | ||
| let bytesRead = fs.readSync(fd, buf, 0, readSize, pos) | ||
| pos += bytesRead | ||
| u.write(buf.slice(0, bytesRead)) | ||
| } | ||
| entry.linkpath = entry.props.linkpath = linkpath | ||
| u.end() | ||
| fs.closeSync(fd) | ||
| } | ||
| }) | ||
| threw = false | ||
| } finally { | ||
| if (threw && fd) | ||
| try { fs.closeSync(fd) } catch (er) {} | ||
| } | ||
| } | ||
| this._fst.on("ready", function () { | ||
| me.pipe(me._fst, { end: false }) | ||
| me.resume() | ||
| }) | ||
| const extractFile = (opt, cb) => { | ||
| const u = new Unpack(opt) | ||
| const readSize = opt.maxReadSize || 16*1024*1024 | ||
| this._fst.on('error', function(err) { | ||
| me.emit('error', err) | ||
| }) | ||
| const file = opt.file | ||
| const p = new Promise((resolve, reject) => { | ||
| u.on('error', reject) | ||
| u.on('close', resolve) | ||
| this._fst.on('drain', function() { | ||
| me.emit('drain') | ||
| fs.stat(file, (er, stat) => { | ||
| if (er) | ||
| reject(er) | ||
| else if (stat.size < readSize) | ||
| fs.readFile(file, (er, data) => { | ||
| if (er) | ||
| return reject(er) | ||
| u.end(data) | ||
| }) | ||
| else { | ||
| const stream = fs.createReadStream(file, { | ||
| highWaterMark: readSize | ||
| }) | ||
| stream.on('error', reject) | ||
| stream.pipe(u) | ||
| } | ||
| }) | ||
| }) | ||
| return cb ? p.then(cb, cb) : p | ||
| } | ||
| // this._fst.on("end", function () { | ||
| // console.error("\nEEEE Extract End", me._fst.path) | ||
| // }) | ||
| this._fst.on("close", function () { | ||
| // console.error("\nEEEE Extract End", me._fst.path) | ||
| me.emit("finish") | ||
| me.emit("end") | ||
| me.emit("close") | ||
| }) | ||
| const extractSync = opt => { | ||
| return new Unpack.Sync(opt) | ||
| } | ||
| inherits(Extract, tar.Parse) | ||
| Extract.prototype._streamEnd = function () { | ||
| var me = this | ||
| if (!me._ended || me._entry) me.error("unexpected eof") | ||
| me._fst.end() | ||
| // my .end() is coming later. | ||
| const extract = opt => { | ||
| return new Unpack(opt) | ||
| } |
+216
-329
@@ -0,385 +1,272 @@ | ||
| 'use strict' | ||
| // parse a 512-byte header block to a data object, or vice-versa | ||
| // If the data won't fit nicely in a simple header, then generate | ||
| // the appropriate extended header file, and return that. | ||
| // encode returns `true` if a pax extended header is needed, because | ||
| // the data could not be faithfully encoded in a simple header. | ||
| // (Also, check header.needPax to see if it needs a pax header.) | ||
| module.exports = TarHeader | ||
| const types = require('./types.js') | ||
| const pathModule = require('path') | ||
| const large = require('./large-numbers.js') | ||
| var tar = require("../tar.js") | ||
| , fields = tar.fields | ||
| , fieldOffs = tar.fieldOffs | ||
| , fieldEnds = tar.fieldEnds | ||
| , fieldSize = tar.fieldSize | ||
| , numeric = tar.numeric | ||
| , assert = require("assert").ok | ||
| , space = " ".charCodeAt(0) | ||
| , slash = "/".charCodeAt(0) | ||
| , bslash = process.platform === "win32" ? "\\".charCodeAt(0) : null | ||
| const TYPE = Symbol('type') | ||
| function TarHeader (block) { | ||
| if (!(this instanceof TarHeader)) return new TarHeader(block) | ||
| if (block) this.decode(block) | ||
| } | ||
| class Header { | ||
| constructor (data, off) { | ||
| this.cksumValid = false | ||
| this.needPax = false | ||
| this.nullBlock = false | ||
| TarHeader.prototype = | ||
| { decode : decode | ||
| , encode: encode | ||
| , calcSum: calcSum | ||
| , checkSum: checkSum | ||
| } | ||
| this.block = null | ||
| this.path = null | ||
| this.mode = null | ||
| this.uid = null | ||
| this.gid = null | ||
| this.size = null | ||
| this.mtime = null | ||
| this.cksum = null | ||
| this[TYPE] = '0' | ||
| this.linkpath = null | ||
| this.uname = null | ||
| this.gname = null | ||
| this.devmaj = 0 | ||
| this.devmin = 0 | ||
| this.atime = null | ||
| this.ctime = null | ||
| TarHeader.parseNumeric = parseNumeric | ||
| TarHeader.encode = encode | ||
| TarHeader.decode = decode | ||
| // note that this will only do the normal ustar header, not any kind | ||
| // of extended posix header file. If something doesn't fit comfortably, | ||
| // then it will set obj.needExtended = true, and set the block to | ||
| // the closest approximation. | ||
| function encode (obj) { | ||
| if (!obj && !(this instanceof TarHeader)) throw new Error( | ||
| "encode must be called on a TarHeader, or supplied an object") | ||
| obj = obj || this | ||
| var block = obj.block = new Buffer(512) | ||
| // if the object has a "prefix", then that's actually an extension of | ||
| // the path field. | ||
| if (obj.prefix) { | ||
| // console.error("%% header encoding, got a prefix", obj.prefix) | ||
| obj.path = obj.prefix + "/" + obj.path | ||
| // console.error("%% header encoding, prefixed path", obj.path) | ||
| obj.prefix = "" | ||
| if (Buffer.isBuffer(data)) { | ||
| this.decode(data, off || 0) | ||
| } else if (data) | ||
| this.set(data) | ||
| } | ||
| obj.needExtended = false | ||
| decode (buf, off) { | ||
| if (!off) | ||
| off = 0 | ||
| if (obj.mode) { | ||
| if (typeof obj.mode === "string") obj.mode = parseInt(obj.mode, 8) | ||
| obj.mode = obj.mode & 0777 | ||
| } | ||
| if (!buf || !(buf.length >= off + 512)) | ||
| throw new Error('need 512 bytes for header') | ||
| for (var f = 0; fields[f] !== null; f ++) { | ||
| var field = fields[f] | ||
| , off = fieldOffs[f] | ||
| , end = fieldEnds[f] | ||
| , ret | ||
| this.path = decString(buf, off, 100) | ||
| this.mode = decNumber(buf, off + 100, 8) | ||
| this.uid = decNumber(buf, off + 108, 8) | ||
| this.gid = decNumber(buf, off + 116, 8) | ||
| this.size = decNumber(buf, off + 124, 12) | ||
| this.mtime = decDate(buf, off + 136, 12) | ||
| this.cksum = decNumber(buf, off + 148, 12) | ||
| switch (field) { | ||
| case "cksum": | ||
| // special, done below, after all the others | ||
| break | ||
| // old tar versions marked dirs as a file with a trailing / | ||
| this[TYPE] = decString(buf, off + 156, 1) | ||
| if (this[TYPE] === '') | ||
| this[TYPE] = '0' | ||
| if (this[TYPE] === '0' && this.path.substr(-1) === '/') | ||
| this[TYPE] = '5' | ||
| case "prefix": | ||
| // special, this is an extension of the "path" field. | ||
| // console.error("%% header encoding, skip prefix later") | ||
| break | ||
| // tar implementations sometimes incorrectly put the stat(dir).size | ||
| // as the size in the tarball, even though Directory entries are | ||
| // not able to have any body at all. In the very rare chance that | ||
| // it actually DOES have a body, we weren't going to do anything with | ||
| // it anyway, and it'll just be a warning about an invalid header. | ||
| if (this[TYPE] === '5') | ||
| this.size = 0 | ||
| case "type": | ||
| // convert from long name to a single char. | ||
| var type = obj.type || "0" | ||
| if (type.length > 1) { | ||
| type = tar.types[obj.type] | ||
| if (!type) type = "0" | ||
| } | ||
| writeText(block, off, end, type) | ||
| break | ||
| this.linkpath = decString(buf, off + 157, 100) | ||
| if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') { | ||
| this.uname = decString(buf, off + 265, 32) | ||
| this.gname = decString(buf, off + 297, 32) | ||
| this.devmaj = decNumber(buf, off + 329, 8) | ||
| this.devmin = decNumber(buf, off + 337, 8) | ||
| if (buf[off + 475] !== 0) { | ||
| // definitely a prefix, definitely >130 chars. | ||
| const prefix = decString(buf, off + 345, 155) | ||
| this.path = prefix + '/' + this.path | ||
| } else { | ||
| const prefix = decString(buf, off + 345, 130) | ||
| if (prefix) | ||
| this.path = prefix + '/' + this.path | ||
| this.atime = decDate(buf, off + 476, 12) | ||
| this.ctime = decDate(buf, off + 488, 12) | ||
| } | ||
| } | ||
| case "path": | ||
| // uses the "prefix" field if > 100 bytes, but <= 255 | ||
| var pathLen = Buffer.byteLength(obj.path) | ||
| , pathFSize = fieldSize[fields.path] | ||
| , prefFSize = fieldSize[fields.prefix] | ||
| // paths between 100 and 255 should use the prefix field. | ||
| // longer than 255 | ||
| if (pathLen > pathFSize && | ||
| pathLen <= pathFSize + prefFSize) { | ||
| // need to find a slash somewhere in the middle so that | ||
| // path and prefix both fit in their respective fields | ||
| var searchStart = pathLen - 1 - pathFSize | ||
| , searchEnd = prefFSize | ||
| , found = false | ||
| , pathBuf = new Buffer(obj.path) | ||
| for ( var s = searchStart | ||
| ; (s <= searchEnd) | ||
| ; s ++ ) { | ||
| if (pathBuf[s] === slash || pathBuf[s] === bslash) { | ||
| found = s | ||
| break | ||
| } | ||
| } | ||
| if (found !== false) { | ||
| prefix = pathBuf.slice(0, found).toString("utf8") | ||
| path = pathBuf.slice(found + 1).toString("utf8") | ||
| ret = writeText(block, off, end, path) | ||
| off = fieldOffs[fields.prefix] | ||
| end = fieldEnds[fields.prefix] | ||
| // console.error("%% header writing prefix", off, end, prefix) | ||
| ret = writeText(block, off, end, prefix) || ret | ||
| break | ||
| } | ||
| } | ||
| // paths less than 100 chars don't need a prefix | ||
| // and paths longer than 255 need an extended header and will fail | ||
| // on old implementations no matter what we do here. | ||
| // Null out the prefix, and fallthrough to default. | ||
| // console.error("%% header writing no prefix") | ||
| var poff = fieldOffs[fields.prefix] | ||
| , pend = fieldEnds[fields.prefix] | ||
| writeText(block, poff, pend, "") | ||
| // fallthrough | ||
| // all other fields are numeric or text | ||
| default: | ||
| ret = numeric[field] | ||
| ? writeNumeric(block, off, end, obj[field]) | ||
| : writeText(block, off, end, obj[field] || "") | ||
| break | ||
| let sum = 8 * 0x20 | ||
| for (let i = off; i < off + 148; i++) { | ||
| sum += buf[i] | ||
| } | ||
| obj.needExtended = obj.needExtended || ret | ||
| for (let i = off + 156; i < off + 512; i++) { | ||
| sum += buf[i] | ||
| } | ||
| this.cksumValid = sum === this.cksum | ||
| if (this.cksum === null && sum === 8 * 0x20) | ||
| this.nullBlock = true | ||
| } | ||
| var off = fieldOffs[fields.cksum] | ||
| , end = fieldEnds[fields.cksum] | ||
| encode (buf, off) { | ||
| if (!buf) { | ||
| buf = this.block = Buffer.alloc(512) | ||
| off = 0 | ||
| } | ||
| writeNumeric(block, off, end, calcSum.call(this, block)) | ||
| if (!off) | ||
| off = 0 | ||
| return block | ||
| } | ||
| if (!(buf.length >= off + 512)) | ||
| throw new Error('need 512 bytes for header') | ||
| // if it's a negative number, or greater than will fit, | ||
| // then use write256. | ||
| var MAXNUM = { 12: 077777777777 | ||
| , 11: 07777777777 | ||
| , 8 : 07777777 | ||
| , 7 : 0777777 } | ||
| function writeNumeric (block, off, end, num) { | ||
| var writeLen = end - off | ||
| , maxNum = MAXNUM[writeLen] || 0 | ||
| const prefixSize = this.ctime || this.atime ? 130 : 155 | ||
| const split = splitPrefix(this.path || '', prefixSize) | ||
| const path = split[0] | ||
| const prefix = split[1] | ||
| this.needPax = split[2] | ||
| num = num || 0 | ||
| // console.error(" numeric", num) | ||
| this.needPax = encString(buf, off, 100, path) || this.needPax | ||
| this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax | ||
| this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax | ||
| this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax | ||
| this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax | ||
| this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax | ||
| buf[off + 156] = this[TYPE].charCodeAt(0) | ||
| this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax | ||
| buf.write('ustar\u000000', off + 257, 8) | ||
| this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax | ||
| this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax | ||
| this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax | ||
| this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax | ||
| this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax | ||
| if (buf[off + 475] !== 0) | ||
| this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax | ||
| else { | ||
| this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax | ||
| this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax | ||
| this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax | ||
| } | ||
| if (num instanceof Date || | ||
| Object.prototype.toString.call(num) === "[object Date]") { | ||
| num = num.getTime() / 1000 | ||
| } | ||
| let sum = 8 * 0x20 | ||
| for (let i = off; i < off + 148; i++) { | ||
| sum += buf[i] | ||
| } | ||
| for (let i = off + 156; i < off + 512; i++) { | ||
| sum += buf[i] | ||
| } | ||
| this.cksum = sum | ||
| encNumber(buf, off + 148, 8, this.cksum) | ||
| this.cksumValid = true | ||
| if (num > maxNum || num < 0) { | ||
| write256(block, off, end, num) | ||
| // need an extended header if negative or too big. | ||
| return true | ||
| return this.needPax | ||
| } | ||
| // god, tar is so annoying | ||
| // if the string is small enough, you should put a space | ||
| // between the octal string and the \0, but if it doesn't | ||
| // fit, then don't. | ||
| var numStr = Math.floor(num).toString(8) | ||
| if (num < MAXNUM[writeLen - 1]) numStr += " " | ||
| // pad with "0" chars | ||
| if (numStr.length < writeLen) { | ||
| numStr = (new Array(writeLen - numStr.length).join("0")) + numStr | ||
| set (data) { | ||
| for (let i in data) { | ||
| if (data[i] !== null && data[i] !== undefined) | ||
| this[i] = data[i] | ||
| } | ||
| } | ||
| if (numStr.length !== writeLen - 1) { | ||
| throw new Error("invalid length: " + JSON.stringify(numStr) + "\n" + | ||
| "expected: "+writeLen) | ||
| get type () { | ||
| return types.name.get(this[TYPE]) || this[TYPE] | ||
| } | ||
| block.write(numStr, off, writeLen, "utf8") | ||
| block[end - 1] = 0 | ||
| } | ||
| function write256 (block, off, end, num) { | ||
| var buf = block.slice(off, end) | ||
| var positive = num >= 0 | ||
| buf[0] = positive ? 0x80 : 0xFF | ||
| // get the number as a base-256 tuple | ||
| if (!positive) num *= -1 | ||
| var tuple = [] | ||
| do { | ||
| var n = num % 256 | ||
| tuple.push(n) | ||
| num = (num - n) / 256 | ||
| } while (num) | ||
| var bytes = tuple.length | ||
| var fill = buf.length - bytes | ||
| for (var i = 1; i < fill; i ++) { | ||
| buf[i] = positive ? 0 : 0xFF | ||
| get typeKey () { | ||
| return this[TYPE] | ||
| } | ||
| // tuple is a base256 number, with [0] as the *least* significant byte | ||
| // if it's negative, then we need to flip all the bits once we hit the | ||
| // first non-zero bit. The 2's-complement is (0x100 - n), and the 1's- | ||
| // complement is (0xFF - n). | ||
| var zero = true | ||
| for (i = bytes; i > 0; i --) { | ||
| var byte = tuple[bytes - i] | ||
| if (positive) buf[fill + i] = byte | ||
| else if (zero && byte === 0) buf[fill + i] = 0 | ||
| else if (zero) { | ||
| zero = false | ||
| buf[fill + i] = 0x100 - byte | ||
| } else buf[fill + i] = 0xFF - byte | ||
| set type (type) { | ||
| if (types.code.has(type)) | ||
| this[TYPE] = types.code.get(type) | ||
| else | ||
| this[TYPE] = type | ||
| } | ||
| } | ||
| function writeText (block, off, end, str) { | ||
| // strings are written as utf8, then padded with \0 | ||
| var strLen = Buffer.byteLength(str) | ||
| , writeLen = Math.min(strLen, end - off) | ||
| // non-ascii fields need extended headers | ||
| // long fields get truncated | ||
| , needExtended = strLen !== str.length || strLen > writeLen | ||
| const splitPrefix = (p, prefixSize) => { | ||
| const pathSize = 100 | ||
| let pp = p | ||
| let prefix = '' | ||
| let ret | ||
| const root = pathModule.parse(p).root || '.' | ||
| // write the string, and null-pad | ||
| if (writeLen > 0) block.write(str, off, writeLen, "utf8") | ||
| for (var i = off + writeLen; i < end; i ++) block[i] = 0 | ||
| if (Buffer.byteLength(pp) < pathSize) | ||
| ret = [pp, prefix, false] | ||
| else { | ||
| // first set prefix to the dir, and path to the base | ||
| prefix = pathModule.dirname(pp) | ||
| pp = pathModule.basename(pp) | ||
| return needExtended | ||
| } | ||
| do { | ||
| // both fit! | ||
| if (Buffer.byteLength(pp) <= pathSize && | ||
| Buffer.byteLength(prefix) <= prefixSize) | ||
| ret = [pp, prefix, false] | ||
| function calcSum (block) { | ||
| block = block || this.block | ||
| assert(Buffer.isBuffer(block) && block.length === 512) | ||
| // prefix fits in prefix, but path doesn't fit in path | ||
| else if (Buffer.byteLength(pp) > pathSize && | ||
| Buffer.byteLength(prefix) <= prefixSize) | ||
| ret = [pp.substr(0, pathSize - 1), prefix, true] | ||
| if (!block) throw new Error("Need block to checksum") | ||
| else { | ||
| // make path take a bit from prefix | ||
| pp = pathModule.join(pathModule.basename(prefix), pp) | ||
| prefix = pathModule.dirname(prefix) | ||
| } | ||
| } while (prefix !== root && !ret) | ||
| // now figure out what it would be if the cksum was " " | ||
| var sum = 0 | ||
| , start = fieldOffs[fields.cksum] | ||
| , end = fieldEnds[fields.cksum] | ||
| for (var i = 0; i < fieldOffs[fields.cksum]; i ++) { | ||
| sum += block[i] | ||
| // at this point, found no resolution, just truncate | ||
| if (!ret) | ||
| ret = [p.substr(0, pathSize - 1), '', true] | ||
| } | ||
| for (var i = start; i < end; i ++) { | ||
| sum += space | ||
| } | ||
| for (var i = end; i < 512; i ++) { | ||
| sum += block[i] | ||
| } | ||
| return sum | ||
| return ret | ||
| } | ||
| const decString = (buf, off, size) => | ||
| buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '') | ||
| function checkSum (block) { | ||
| var sum = calcSum.call(this, block) | ||
| block = block || this.block | ||
| const decDate = (buf, off, size) => | ||
| numToDate(decNumber(buf, off, size)) | ||
| var cksum = block.slice(fieldOffs[fields.cksum], fieldEnds[fields.cksum]) | ||
| cksum = parseNumeric(cksum) | ||
| const numToDate = num => num === null ? null : new Date(num * 1000) | ||
| return cksum === sum | ||
| } | ||
| const decNumber = (buf, off, size) => | ||
| buf[off] & 0x80 ? large.parse(buf.slice(off, off + size)) | ||
| : decSmallNumber(buf, off, size) | ||
| function decode (block) { | ||
| block = block || this.block | ||
| assert(Buffer.isBuffer(block) && block.length === 512) | ||
| const nanNull = value => isNaN(value) ? null : value | ||
| this.block = block | ||
| this.cksumValid = this.checkSum() | ||
| const decSmallNumber = (buf, off, size) => | ||
| nanNull(parseInt( | ||
| buf.slice(off, off + size) | ||
| .toString('utf8').replace(/\0.*$/, '').trim(), 8)) | ||
| var prefix = null | ||
| // slice off each field. | ||
| for (var f = 0; fields[f] !== null; f ++) { | ||
| var field = fields[f] | ||
| , val = block.slice(fieldOffs[f], fieldEnds[f]) | ||
| switch (field) { | ||
| case "ustar": | ||
| // if not ustar, then everything after that is just padding. | ||
| if (val.toString() !== "ustar\0") { | ||
| this.ustar = false | ||
| return | ||
| } else { | ||
| // console.error("ustar:", val, val.toString()) | ||
| this.ustar = val.toString() | ||
| } | ||
| break | ||
| // prefix is special, since it might signal the xstar header | ||
| case "prefix": | ||
| var atime = parseNumeric(val.slice(131, 131 + 12)) | ||
| , ctime = parseNumeric(val.slice(131 + 12, 131 + 12 + 12)) | ||
| if ((val[130] === 0 || val[130] === space) && | ||
| typeof atime === "number" && | ||
| typeof ctime === "number" && | ||
| val[131 + 12] === space && | ||
| val[131 + 12 + 12] === space) { | ||
| this.atime = atime | ||
| this.ctime = ctime | ||
| val = val.slice(0, 130) | ||
| } | ||
| prefix = val.toString("utf8").replace(/\0+$/, "") | ||
| // console.error("%% header reading prefix", prefix) | ||
| break | ||
| // all other fields are null-padding text | ||
| // or a number. | ||
| default: | ||
| if (numeric[field]) { | ||
| this[field] = parseNumeric(val) | ||
| } else { | ||
| this[field] = val.toString("utf8").replace(/\0+$/, "") | ||
| } | ||
| break | ||
| } | ||
| } | ||
| // if we got a prefix, then prepend it to the path. | ||
| if (prefix) { | ||
| this.path = prefix + "/" + this.path | ||
| // console.error("%% header got a prefix", this.path) | ||
| } | ||
| // the maximum encodable as a null-terminated octal, by field size | ||
| const MAXNUM = { | ||
| 12: 0o77777777777, | ||
| 8 : 0o7777777 | ||
| } | ||
| function parse256 (buf) { | ||
| // first byte MUST be either 80 or FF | ||
| // 80 for positive, FF for 2's comp | ||
| var positive | ||
| if (buf[0] === 0x80) positive = true | ||
| else if (buf[0] === 0xFF) positive = false | ||
| else return null | ||
| const encNumber = (buf, off, size, number) => | ||
| number === null ? false : | ||
| number > MAXNUM[size] || number < 0 | ||
| ? (large.encode(number, buf.slice(off, off + size)), true) | ||
| : (encSmallNumber(buf, off, size, number), false) | ||
| // build up a base-256 tuple from the least sig to the highest | ||
| var zero = false | ||
| , tuple = [] | ||
| for (var i = buf.length - 1; i > 0; i --) { | ||
| var byte = buf[i] | ||
| if (positive) tuple.push(byte) | ||
| else if (zero && byte === 0) tuple.push(0) | ||
| else if (zero) { | ||
| zero = false | ||
| tuple.push(0x100 - byte) | ||
| } else tuple.push(0xFF - byte) | ||
| } | ||
| const encSmallNumber = (buf, off, size, number) => | ||
| buf.write(octalString(number, size), off, size, 'ascii') | ||
| for (var sum = 0, i = 0, l = tuple.length; i < l; i ++) { | ||
| sum += tuple[i] * Math.pow(256, i) | ||
| } | ||
| const octalString = (number, size) => | ||
| padOctal(Math.floor(number).toString(8), size) | ||
| return positive ? sum : -1 * sum | ||
| } | ||
| const padOctal = (string, size) => | ||
| (string.length === size - 1 ? string | ||
| : new Array(size - string.length - 1).join('0') + string + ' ') + '\0' | ||
| function parseNumeric (f) { | ||
| if (f[0] & 0x80) return parse256(f) | ||
| const encDate = (buf, off, size, date) => | ||
| date === null ? false : | ||
| encNumber(buf, off, size, date.getTime() / 1000) | ||
| var str = f.toString("utf8").split("\0")[0].trim() | ||
| , res = parseInt(str, 8) | ||
| // enough to fill the longest string we've got | ||
| const NULLS = new Array(156).join('\0') | ||
| // pad with nulls, return true if it's longer or non-ascii | ||
| const encString = (buf, off, size, string) => | ||
| string === null ? false : | ||
| (buf.write(string + NULLS, off, size, 'utf8'), | ||
| string.length !== Buffer.byteLength(string) || string.length > size) | ||
| return isNaN(res) ? null : res | ||
| } | ||
| module.exports = Header |
+302
-182
@@ -1,236 +0,356 @@ | ||
| // pipe in an fstream, and it'll make a tarball. | ||
| // key-value pair argument is global extended header props. | ||
| 'use strict' | ||
| module.exports = Pack | ||
| // A readable tar stream creator | ||
| // Technically, this is a transform stream that you write paths into, | ||
| // and tar format comes out of. | ||
| // The `add()` method is like `write()` but returns this, | ||
| // and end() return `this` as well, so you can | ||
| // do `new Pack(opt).add('files').add('dir').end().pipe(output) | ||
| // You could also do something like: | ||
| // streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar')) | ||
| var EntryWriter = require("./entry-writer.js") | ||
| , Stream = require("stream").Stream | ||
| , path = require("path") | ||
| , inherits = require("inherits") | ||
| , GlobalHeaderWriter = require("./global-header-writer.js") | ||
| , collect = require("fstream").collect | ||
| , eof = new Buffer(512) | ||
| class PackJob { | ||
| constructor (path, absolute) { | ||
| this.path = path || './' | ||
| this.absolute = absolute | ||
| this.realpath = null | ||
| this.stat = null | ||
| this.readdir = null | ||
| this.pending = false | ||
| this.ignore = false | ||
| this.piped = false | ||
| } | ||
| } | ||
| for (var i = 0; i < 512; i ++) eof[i] = 0 | ||
| const MiniPass = require('minipass') | ||
| const zlib = require('minizlib') | ||
| const WriteEntry = require('./write-entry.js') | ||
| const WriteEntrySync = WriteEntry.Sync | ||
| const Yallist = require('yallist') | ||
| const EOF = Buffer.alloc(1024) | ||
| const ONSTAT = Symbol('onStat') | ||
| const ENDED = Symbol('ended') | ||
| const QUEUE = Symbol('queue') | ||
| const CURRENT = Symbol('current') | ||
| const PROCESS = Symbol('process') | ||
| const PROCESSING = Symbol('processing') | ||
| const PROCESSJOB = Symbol('processJob') | ||
| const JOBS = Symbol('jobs') | ||
| const JOBDONE = Symbol('jobDone') | ||
| const ADDENTRY = Symbol('addEntry') | ||
| const STAT = Symbol('stat') | ||
| const READDIR = Symbol('readdir') | ||
| const ONREADDIR = Symbol('onreaddir') | ||
| const PIPE = Symbol('pipe') | ||
| const ENTRY = Symbol('entry') | ||
| const WRITEENTRYCLASS = Symbol('writeEntryClass') | ||
| const WRITE = Symbol('write') | ||
| const ONDRAIN = Symbol('ondrain') | ||
| inherits(Pack, Stream) | ||
| const fs = require('fs') | ||
| const path = require('path') | ||
| function Pack (props) { | ||
| // console.error("-- p ctor") | ||
| var me = this | ||
| if (!(me instanceof Pack)) return new Pack(props) | ||
| class Pack extends MiniPass { | ||
| constructor (opt) { | ||
| super(opt) | ||
| opt = opt || Object.create(null) | ||
| this.opt = opt | ||
| this.cwd = opt.cwd || process.cwd() | ||
| this.maxReadSize = opt.maxReadSize | ||
| this.preservePaths = !!opt.preservePaths | ||
| this.strict = !!opt.strict | ||
| this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '') | ||
| this.linkCache = opt.linkCache || new Map() | ||
| this.statCache = opt.statCache || new Map() | ||
| this.readdirCache = opt.readdirCache || new Map() | ||
| this[WRITEENTRYCLASS] = WriteEntry | ||
| if (typeof opt.onwarn === 'function') | ||
| this.on('warn', opt.onwarn) | ||
| if (props) me._noProprietary = props.noProprietary | ||
| else me._noProprietary = false | ||
| this.zip = null | ||
| if (opt.gzip) { | ||
| if (typeof opt.gzip !== 'object') | ||
| opt.gzip = {} | ||
| this.zip = new zlib.Gzip(opt.gzip) | ||
| this.zip.on('data', chunk => super.write(chunk)) | ||
| this.zip.on('end', _ => super.end()) | ||
| this.zip.on('drain', _ => { | ||
| this[ONDRAIN]() | ||
| }) | ||
| } else | ||
| this.on('drain', this[ONDRAIN]) | ||
| me._global = props | ||
| this.portable = !!opt.portable | ||
| this.noDirRecurse = !!opt.noDirRecurse | ||
| this.follow = !!opt.follow | ||
| me.readable = true | ||
| me.writable = true | ||
| me._buffer = [] | ||
| // console.error("-- -- set current to null in ctor") | ||
| me._currentEntry = null | ||
| me._processing = false | ||
| this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true | ||
| me._pipeRoot = null | ||
| me.on("pipe", function (src) { | ||
| if (src.root === me._pipeRoot) return | ||
| me._pipeRoot = src | ||
| src.on("end", function () { | ||
| me._pipeRoot = null | ||
| }) | ||
| me.add(src) | ||
| }) | ||
| } | ||
| this[QUEUE] = new Yallist | ||
| this[JOBS] = 0 | ||
| this.jobs = +opt.jobs || 4 | ||
| this[PROCESSING] = false | ||
| this[ENDED] = false | ||
| } | ||
| Pack.prototype.addGlobal = function (props) { | ||
| // console.error("-- p addGlobal") | ||
| if (this._didGlobal) return | ||
| this._didGlobal = true | ||
| [WRITE] (chunk) { | ||
| return super.write(chunk) | ||
| } | ||
| var me = this | ||
| GlobalHeaderWriter(props) | ||
| .on("data", function (c) { | ||
| me.emit("data", c) | ||
| }) | ||
| .end() | ||
| } | ||
| add (path) { | ||
| this.write(path) | ||
| return this | ||
| } | ||
| Pack.prototype.add = function (stream) { | ||
| if (this._global && !this._didGlobal) this.addGlobal(this._global) | ||
| end (path) { | ||
| if (path) | ||
| this.write(path) | ||
| this[ENDED] = true | ||
| this[PROCESS]() | ||
| return this | ||
| } | ||
| if (this._ended) return this.emit("error", new Error("add after end")) | ||
| write (path) { | ||
| if (this[ENDED]) | ||
| throw new Error('write after end') | ||
| collect(stream) | ||
| this._buffer.push(stream) | ||
| this._process() | ||
| this._needDrain = this._buffer.length > 0 | ||
| return !this._needDrain | ||
| } | ||
| this[ADDENTRY](path) | ||
| return this.flowing | ||
| } | ||
| Pack.prototype.pause = function () { | ||
| this._paused = true | ||
| if (this._currentEntry) this._currentEntry.pause() | ||
| this.emit("pause") | ||
| } | ||
| [ADDENTRY] (p) { | ||
| const absolute = path.resolve(this.cwd, p) | ||
| if (this.prefix) | ||
| p = this.prefix + '/' + p | ||
| this[QUEUE].push(new PackJob(p, absolute)) | ||
| this[PROCESS]() | ||
| } | ||
| Pack.prototype.resume = function () { | ||
| this._paused = false | ||
| if (this._currentEntry) this._currentEntry.resume() | ||
| this.emit("resume") | ||
| this._process() | ||
| } | ||
| [STAT] (job) { | ||
| job.pending = true | ||
| this[JOBS] += 1 | ||
| const stat = this.follow ? 'stat' : 'lstat' | ||
| fs[stat](job.absolute, (er, stat) => { | ||
| job.pending = false | ||
| this[JOBS] -= 1 | ||
| if (er) | ||
| return this.emit('error', er) | ||
| this[ONSTAT](job, stat) | ||
| }) | ||
| } | ||
| Pack.prototype.end = function () { | ||
| this._ended = true | ||
| this._buffer.push(eof) | ||
| this._process() | ||
| } | ||
| [ONSTAT] (job, stat) { | ||
| this.statCache.set(job.absolute, stat) | ||
| job.stat = stat | ||
| Pack.prototype._process = function () { | ||
| var me = this | ||
| if (me._paused || me._processing) { | ||
| return | ||
| } | ||
| // now we have the stat, we can filter it. | ||
| if (!this.filter(job.path, stat)) | ||
| job.ignore = true | ||
| var entry = me._buffer.shift() | ||
| if (!entry) { | ||
| if (me._needDrain) { | ||
| me.emit("drain") | ||
| } | ||
| return | ||
| this[PROCESS]() | ||
| } | ||
| if (entry.ready === false) { | ||
| // console.error("-- entry is not ready", entry) | ||
| me._buffer.unshift(entry) | ||
| entry.on("ready", function () { | ||
| // console.error("-- -- ready!", entry) | ||
| me._process() | ||
| [READDIR] (job) { | ||
| job.pending = true | ||
| this[JOBS] += 1 | ||
| fs.readdir(job.absolute, (er, entries) => { | ||
| job.pending = false | ||
| this[JOBS] -= 1 | ||
| if (er) | ||
| return this.emit('error', er) | ||
| this[ONREADDIR](job, entries) | ||
| }) | ||
| return | ||
| } | ||
| me._processing = true | ||
| [ONREADDIR] (job, entries) { | ||
| this.readdirCache.set(job.absolute, entries) | ||
| job.readdir = entries | ||
| this[PROCESS]() | ||
| } | ||
| if (entry === eof) { | ||
| // need 2 ending null blocks. | ||
| me.emit("data", eof) | ||
| me.emit("data", eof) | ||
| me.emit("end") | ||
| me.emit("close") | ||
| return | ||
| [PROCESS] () { | ||
| if (this[PROCESSING]) | ||
| return | ||
| this[PROCESSING] = true | ||
| for (let w = this[QUEUE].head; | ||
| w !== null && this[JOBS] < this.jobs; | ||
| w = w.next) { | ||
| this[PROCESSJOB](w.value) | ||
| } | ||
| this[PROCESSING] = false | ||
| if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) { | ||
| if (this.zip) | ||
| this.zip.end(EOF) | ||
| else { | ||
| super.write(EOF) | ||
| super.end() | ||
| } | ||
| } | ||
| } | ||
| // Change the path to be relative to the root dir that was | ||
| // added to the tarball. | ||
| // | ||
| // XXX This should be more like how -C works, so you can | ||
| // explicitly set a root dir, and also explicitly set a pathname | ||
| // in the tarball to use. That way we can skip a lot of extra | ||
| // work when resolving symlinks for bundled dependencies in npm. | ||
| get [CURRENT] () { | ||
| return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value | ||
| } | ||
| var root = path.dirname((entry.root || entry).path); | ||
| if (me._global && me._global.fromBase && entry.root && entry.root.path) { | ||
| // user set 'fromBase: true' indicating tar root should be directory itself | ||
| root = entry.root.path; | ||
| [JOBDONE] (job) { | ||
| this[QUEUE].shift() | ||
| this[JOBS] -= 1 | ||
| this[PROCESS]() | ||
| } | ||
| var wprops = {} | ||
| [PROCESSJOB] (job) { | ||
| if (job.pending) | ||
| return | ||
| Object.keys(entry.props || {}).forEach(function (k) { | ||
| wprops[k] = entry.props[k] | ||
| }) | ||
| if (!job.stat) { | ||
| if (this.statCache.has(job.absolute)) | ||
| this[ONSTAT](job, this.statCache.get(job.absolute)) | ||
| else | ||
| this[STAT](job) | ||
| } | ||
| if (!job.stat) | ||
| return | ||
| if (me._noProprietary) wprops.noProprietary = true | ||
| // filtered out! | ||
| if (job.ignore) { | ||
| if (job === this[CURRENT]) | ||
| this[QUEUE].shift() | ||
| return | ||
| } | ||
| wprops.path = path.relative(root, entry.path || '') | ||
| if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) { | ||
| if (this.readdirCache.has(job.absolute)) | ||
| this[ONREADDIR](job, this.readdirCache.get(job.absolute)) | ||
| else | ||
| this[READDIR](job) | ||
| if (!job.readdir) | ||
| return | ||
| } | ||
| // actually not a matter of opinion or taste. | ||
| if (process.platform === "win32") { | ||
| wprops.path = wprops.path.replace(/\\/g, "/") | ||
| if (!job.entry) { | ||
| job.entry = this[ENTRY](job) | ||
| if (!job.entry) { | ||
| job.ignore = true | ||
| return | ||
| } | ||
| } | ||
| if (job === this[CURRENT] && !job.piped) | ||
| this[PIPE](job) | ||
| } | ||
| if (!wprops.type) | ||
| wprops.type = 'Directory' | ||
| warn (msg, data) { | ||
| return this.emit('warn', msg, data) | ||
| } | ||
| switch (wprops.type) { | ||
| // sockets not supported | ||
| case "Socket": | ||
| return | ||
| [ENTRY] (job) { | ||
| this[JOBS] += 1 | ||
| try { | ||
| return new this[WRITEENTRYCLASS](job.path, { | ||
| onwarn: (msg, data) => { | ||
| this.warn(msg, data) | ||
| }, | ||
| cwd: this.cwd, | ||
| absolute: job.absolute, | ||
| preservePaths: this.preservePaths, | ||
| maxReadSize: this.maxReadSize, | ||
| strict: this.strict, | ||
| portable: this.portable, | ||
| linkCache: this.linkCache, | ||
| statCache: this.statCache | ||
| }).on('end', _ => { | ||
| this[JOBDONE](job) | ||
| }) | ||
| } catch (er) { | ||
| this.emit('error', er) | ||
| } | ||
| } | ||
| case "Directory": | ||
| wprops.path += "/" | ||
| wprops.size = 0 | ||
| break | ||
| [ONDRAIN] () { | ||
| if (this[CURRENT] && this[CURRENT].entry) | ||
| this[CURRENT].entry.resume() | ||
| } | ||
| case "Link": | ||
| var lp = path.resolve(path.dirname(entry.path), entry.linkpath) | ||
| wprops.linkpath = path.relative(root, lp) || "." | ||
| wprops.size = 0 | ||
| break | ||
| // like .pipe() but using super, because our write() is special | ||
| [PIPE] (job) { | ||
| job.piped = true | ||
| case "SymbolicLink": | ||
| var lp = path.resolve(path.dirname(entry.path), entry.linkpath) | ||
| wprops.linkpath = path.relative(path.dirname(entry.path), lp) || "." | ||
| wprops.size = 0 | ||
| break | ||
| } | ||
| if (job.readdir) | ||
| job.readdir.forEach(entry => { | ||
| const base = job.path === './' ? '' : job.path.replace(/\/*$/, '/') | ||
| this[ADDENTRY](base + entry) | ||
| }) | ||
| // console.error("-- new writer", wprops) | ||
| // if (!wprops.type) { | ||
| // // console.error("-- no type?", entry.constructor.name, entry) | ||
| // } | ||
| const source = job.entry | ||
| const zip = this.zip | ||
| // console.error("-- -- set current to new writer", wprops.path) | ||
| var writer = me._currentEntry = EntryWriter(wprops) | ||
| if (zip) | ||
| source.on('data', chunk => { | ||
| if (!zip.write(chunk)) | ||
| source.pause() | ||
| }) | ||
| else | ||
| source.on('data', chunk => { | ||
| if (!super.write(chunk)) | ||
| source.pause() | ||
| }) | ||
| } | ||
| writer.parent = me | ||
| resume () { | ||
| if (this.zip) | ||
| this.zip.resume() | ||
| return super.resume() | ||
| } | ||
| // writer.on("end", function () { | ||
| // // console.error("-- -- writer end", writer.path) | ||
| // }) | ||
| pause () { | ||
| if (this.zip) | ||
| this.zip.pause() | ||
| return super.pause() | ||
| } | ||
| } | ||
| writer.on("data", function (c) { | ||
| me.emit("data", c) | ||
| }) | ||
| class PackSync extends Pack { | ||
| constructor (opt) { | ||
| super(opt) | ||
| this[WRITEENTRYCLASS] = WriteEntrySync | ||
| } | ||
| writer.on("header", function () { | ||
| Buffer.prototype.toJSON = function () { | ||
| return this.toString().split(/\0/).join(".") | ||
| } | ||
| // console.error("-- -- writer header %j", writer.props) | ||
| if (writer.props.size === 0) nextEntry() | ||
| }) | ||
| writer.on("close", nextEntry) | ||
| // pause/resume are no-ops in sync streams. | ||
| pause () {} | ||
| resume () {} | ||
| var ended = false | ||
| function nextEntry () { | ||
| if (ended) return | ||
| ended = true | ||
| [STAT] (job) { | ||
| const stat = this.follow ? 'statSync' : 'lstatSync' | ||
| this[ONSTAT](job, fs[stat](job.absolute)) | ||
| } | ||
| // console.error("-- -- writer close", writer.path) | ||
| // console.error("-- -- set current to null", wprops.path) | ||
| me._currentEntry = null | ||
| me._processing = false | ||
| me._process() | ||
| [READDIR] (job, stat) { | ||
| this[ONREADDIR](job, fs.readdirSync(job.absolute)) | ||
| } | ||
| writer.on("error", function (er) { | ||
| // console.error("-- -- writer error", writer.path) | ||
| me.emit("error", er) | ||
| }) | ||
| // gotta get it all in this tick | ||
| [PIPE] (job) { | ||
| const source = job.entry | ||
| const zip = this.zip | ||
| // if it's the root, then there's no need to add its entries, | ||
| // or data, since they'll be added directly. | ||
| if (entry === me._pipeRoot) { | ||
| // console.error("-- is the root, don't auto-add") | ||
| writer.add = null | ||
| if (job.readdir) | ||
| job.readdir.forEach(entry => { | ||
| this[ADDENTRY](job.path + '/' + entry) | ||
| }) | ||
| if (zip) | ||
| source.on('data', chunk => { | ||
| zip.write(chunk) | ||
| }) | ||
| else | ||
| source.on('data', chunk => { | ||
| super[WRITE](chunk) | ||
| }) | ||
| } | ||
| entry.pipe(writer) | ||
| } | ||
| Pack.prototype.destroy = function () {} | ||
| Pack.prototype.write = function () {} | ||
| Pack.Sync = PackSync | ||
| module.exports = Pack |
+360
-223
@@ -0,275 +1,412 @@ | ||
| 'use strict' | ||
| // A writable stream. | ||
| // It emits "entry" events, which provide a readable stream that has | ||
| // header info attached. | ||
| // this[BUFFER] is the remainder of a chunk if we're waiting for | ||
| // the full 512 bytes of a header to come in. We will Buffer.concat() | ||
| // it to the next write(), which is a mem copy, but a small one. | ||
| // | ||
| // this[QUEUE] is a Yallist of entries that haven't been emitted | ||
| // yet this can only get filled up if the user keeps write()ing after | ||
| // a write() returns false, or does a write() with more than one entry | ||
| // | ||
| // We don't buffer chunks, we always parse them and either create an | ||
| // entry, or push it into the active entry. The ReadEntry class knows | ||
| // to throw data away if .ignore=true | ||
| // | ||
| // Shift entry off the buffer when it emits 'end', and emit 'entry' for | ||
| // the next one in the list. | ||
| // | ||
| // At any time, we're pushing body chunks into the entry at WRITEENTRY, | ||
| // and waiting for 'end' on the entry at READENTRY | ||
| // | ||
| // ignored entries get .resume() called on them straight away | ||
| module.exports = Parse.create = Parse | ||
| const path = require('path') | ||
| const Header = require('./header.js') | ||
| const EE = require('events') | ||
| const Yallist = require('yallist') | ||
| const maxMetaEntrySize = 1024 * 1024 | ||
| const Entry = require('./read-entry.js') | ||
| const Pax = require('./pax.js') | ||
| const zlib = require('minizlib') | ||
| var stream = require("stream") | ||
| , Stream = stream.Stream | ||
| , BlockStream = require("block-stream") | ||
| , tar = require("../tar.js") | ||
| , TarHeader = require("./header.js") | ||
| , Entry = require("./entry.js") | ||
| , BufferEntry = require("./buffer-entry.js") | ||
| , ExtendedHeader = require("./extended-header.js") | ||
| , assert = require("assert").ok | ||
| , inherits = require("inherits") | ||
| , fstream = require("fstream") | ||
| const gzipHeader = new Buffer([0x1f, 0x8b]) | ||
| const STATE = Symbol('state') | ||
| const WRITEENTRY = Symbol('writeEntry') | ||
| const READENTRY = Symbol('readEntry') | ||
| const NEXTENTRY = Symbol('nextEntry') | ||
| const PROCESSENTRY = Symbol('processEntry') | ||
| const EX = Symbol('extendedHeader') | ||
| const GEX = Symbol('globalExtendedHeader') | ||
| const META = Symbol('meta') | ||
| const EMITMETA = Symbol('emitMeta') | ||
| const BUFFER = Symbol('buffer') | ||
| const QUEUE = Symbol('queue') | ||
| const ENDED = Symbol('ended') | ||
| const EMITTEDEND = Symbol('emittedEnd') | ||
| const EMIT = Symbol('emit') | ||
| const UNZIP = Symbol('unzip') | ||
| const CONSUMECHUNK = Symbol('consumeChunk') | ||
| const CONSUMECHUNKSUB = Symbol('consumeChunkSub') | ||
| const CONSUMEBODY = Symbol('consumeBody') | ||
| const CONSUMEMETA = Symbol('consumeMeta') | ||
| const CONSUMEHEADER = Symbol('consumeHeader') | ||
| const CONSUMING = Symbol('consuming') | ||
| const BUFFERCONCAT = Symbol('bufferConcat') | ||
| const MAYBEEND = Symbol('maybeEnd') | ||
| const WRITING = Symbol('writing') | ||
| const ABORTED = Symbol('aborted') | ||
| // reading a tar is a lot like reading a directory | ||
| // However, we're actually not going to run the ctor, | ||
| // since it does a stat and various other stuff. | ||
| // This inheritance gives us the pause/resume/pipe | ||
| // behavior that is desired. | ||
| inherits(Parse, fstream.Reader) | ||
| function noop () { return true } | ||
| function Parse () { | ||
| var me = this | ||
| if (!(me instanceof Parse)) return new Parse() | ||
| module.exports = class Parser extends EE { | ||
| constructor (opt) { | ||
| const start = process.hrtime() | ||
| opt = opt || {} | ||
| super(opt) | ||
| // doesn't apply fstream.Reader ctor? | ||
| // no, becasue we don't want to stat/etc, we just | ||
| // want to get the entry/add logic from .pipe() | ||
| Stream.apply(me) | ||
| this.strict = !!opt.strict | ||
| this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize | ||
| this.filter = typeof opt.filter === 'function' ? opt.filter : noop | ||
| me.writable = true | ||
| me.readable = true | ||
| me._stream = new BlockStream(512) | ||
| me.position = 0 | ||
| me._ended = false | ||
| this[QUEUE] = new Yallist() | ||
| this[BUFFER] = null | ||
| this[READENTRY] = null | ||
| this[WRITEENTRY] = null | ||
| this[STATE] = 'begin' | ||
| this[META] = '' | ||
| this[EX] = null | ||
| this[GEX] = null | ||
| this[ENDED] = false | ||
| this[UNZIP] = null | ||
| this[ABORTED] = false | ||
| if (typeof opt.onwarn === 'function') | ||
| this.on('warn', opt.onwarn) | ||
| if (typeof opt.onentry === 'function') | ||
| this.on('entry', opt.onentry) | ||
| } | ||
| me._stream.on("error", function (e) { | ||
| me.emit("error", e) | ||
| }) | ||
| [CONSUMEHEADER] (chunk, position) { | ||
| const header = new Header(chunk, position) | ||
| me._stream.on("data", function (c) { | ||
| me._process(c) | ||
| }) | ||
| if (header.nullBlock) | ||
| this[EMIT]('nullBlock') | ||
| else if (!header.cksumValid) | ||
| this.warn('invalid entry', header) | ||
| else if (!header.path) | ||
| this.warn('invalid: path is required', header) | ||
| else { | ||
| const type = header.type | ||
| if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) | ||
| this.warn('invalid: linkpath required', header) | ||
| else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath) | ||
| this.warn('invalid: linkpath forbidden', header) | ||
| else { | ||
| const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX]) | ||
| me._stream.on("end", function () { | ||
| me._streamEnd() | ||
| }) | ||
| if (entry.meta) { | ||
| if (entry.size > this.maxMetaEntrySize) { | ||
| entry.ignore = true | ||
| this[EMIT]('ignoredEntry', entry) | ||
| this[STATE] = 'ignore' | ||
| } else if (entry.size > 0) { | ||
| this[META] = '' | ||
| entry.on('data', c => this[META] += c) | ||
| this[STATE] = 'meta' | ||
| } | ||
| } else { | ||
| me._stream.on("drain", function () { | ||
| me.emit("drain") | ||
| }) | ||
| } | ||
| this[EX] = null | ||
| entry.ignore = entry.ignore || !this.filter(entry.path, entry) | ||
| if (entry.ignore) { | ||
| this[EMIT]('ignoredEntry', entry) | ||
| this[STATE] = entry.remain ? 'ignore' : 'begin' | ||
| } else { | ||
| if (entry.remain) | ||
| this[STATE] = 'body' | ||
| else { | ||
| this[STATE] = 'begin' | ||
| entry.end() | ||
| } | ||
| // overridden in Extract class, since it needs to | ||
| // wait for its DirWriter part to finish before | ||
| // emitting "end" | ||
| Parse.prototype._streamEnd = function () { | ||
| var me = this | ||
| if (!me._ended || me._entry) me.error("unexpected eof") | ||
| me.emit("end") | ||
| } | ||
| if (!this[READENTRY]) { | ||
| this[QUEUE].push(entry) | ||
| this[NEXTENTRY]() | ||
| } else | ||
| this[QUEUE].push(entry) | ||
| } | ||
| } | ||
| } | ||
| } | ||
| } | ||
| // a tar reader is actually a filter, not just a readable stream. | ||
| // So, you should pipe a tarball stream into it, and it needs these | ||
| // write/end methods to do that. | ||
| Parse.prototype.write = function (c) { | ||
| if (this._ended) { | ||
| // gnutar puts a LOT of nulls at the end. | ||
| // you can keep writing these things forever. | ||
| // Just ignore them. | ||
| for (var i = 0, l = c.length; i > l; i ++) { | ||
| if (c[i] !== 0) return this.error("write() after end()") | ||
| [PROCESSENTRY] (entry) { | ||
| let go = true | ||
| if (!entry) { | ||
| this[READENTRY] = null | ||
| go = false | ||
| } else if (Array.isArray(entry)) | ||
| this.emit.apply(this, entry) | ||
| else { | ||
| this[READENTRY] = entry | ||
| this.emit('entry', entry) | ||
| if (!entry.emittedEnd) { | ||
| entry.on('end', _ => this[NEXTENTRY]()) | ||
| go = false | ||
| } | ||
| } | ||
| return | ||
| return go | ||
| } | ||
| return this._stream.write(c) | ||
| } | ||
| Parse.prototype.end = function (c) { | ||
| this._ended = true | ||
| return this._stream.end(c) | ||
| } | ||
| [NEXTENTRY] () { | ||
| do {} while (this[PROCESSENTRY](this[QUEUE].shift())) | ||
| // don't need to do anything, since we're just | ||
| // proxying the data up from the _stream. | ||
| // Just need to override the parent's "Not Implemented" | ||
| // error-thrower. | ||
| Parse.prototype._read = function () {} | ||
| if (!this[QUEUE].length) { | ||
| // At this point, there's nothing in the queue, but we may have an | ||
| // entry which is being consumed (readEntry). | ||
| // If we don't, then we definitely can handle more data. | ||
| // If we do, and either it's flowing, or it has never had any data | ||
| // written to it, then it needs more. | ||
| // The only other possibility is that it has returned false from a | ||
| // write() call, so we wait for the next drain to continue. | ||
| const re = this[READENTRY] | ||
| const drainNow = !re || re.flowing || re.size === re.remain | ||
| if (drainNow) { | ||
| if (!this[WRITING]) | ||
| this.emit('drain') | ||
| } else | ||
| re.once('drain', _ => this.emit('drain')) | ||
| } | ||
| } | ||
| Parse.prototype._process = function (c) { | ||
| assert(c && c.length === 512, "block size should be 512") | ||
| [CONSUMEBODY] (chunk, position) { | ||
| // write up to but no more than writeEntry.blockRemain | ||
| const entry = this[WRITEENTRY] | ||
| const br = entry.blockRemain | ||
| const c = (br >= chunk.length && position === 0) ? chunk | ||
| : chunk.slice(position, position + br) | ||
| // one of three cases. | ||
| // 1. A new header | ||
| // 2. A part of a file/extended header | ||
| // 3. One of two or more EOF null blocks | ||
| entry.write(c) | ||
| if (this._entry) { | ||
| var entry = this._entry | ||
| if(!entry._abort) entry.write(c) | ||
| else { | ||
| entry._remaining -= c.length | ||
| if(entry._remaining < 0) entry._remaining = 0 | ||
| } | ||
| if (entry._remaining === 0) { | ||
| if (!entry.blockRemain) { | ||
| this[STATE] = 'begin' | ||
| this[WRITEENTRY] = null | ||
| entry.end() | ||
| this._entry = null | ||
| } | ||
| } else { | ||
| // either zeroes or a header | ||
| var zero = true | ||
| for (var i = 0; i < 512 && zero; i ++) { | ||
| zero = c[i] === 0 | ||
| } | ||
| // eof is *at least* 2 blocks of nulls, and then the end of the | ||
| // file. you can put blocks of nulls between entries anywhere, | ||
| // so appending one tarball to another is technically valid. | ||
| // ending without the eof null blocks is not allowed, however. | ||
| if (zero) { | ||
| if (this._eofStarted) | ||
| this._ended = true | ||
| this._eofStarted = true | ||
| } else { | ||
| this._eofStarted = false | ||
| this._startEntry(c) | ||
| } | ||
| return c.length | ||
| } | ||
| this.position += 512 | ||
| } | ||
| [CONSUMEMETA] (chunk, position) { | ||
| const entry = this[WRITEENTRY] | ||
| const ret = this[CONSUMEBODY](chunk, position) | ||
| // take a header chunk, start the right kind of entry. | ||
| Parse.prototype._startEntry = function (c) { | ||
| var header = new TarHeader(c) | ||
| , self = this | ||
| , entry | ||
| , ev | ||
| , EntryType | ||
| , onend | ||
| , meta = false | ||
| // if we finished, then the entry is reset | ||
| if (!this[WRITEENTRY]) | ||
| this[EMITMETA](entry) | ||
| if (null === header.size || !header.cksumValid) { | ||
| var e = new Error("invalid tar file") | ||
| e.header = header | ||
| e.tar_file_offset = this.position | ||
| e.tar_block = this.position / 512 | ||
| return this.emit("error", e) | ||
| return ret | ||
| } | ||
| switch (tar.types[header.type]) { | ||
| case "File": | ||
| case "OldFile": | ||
| case "Link": | ||
| case "SymbolicLink": | ||
| case "CharacterDevice": | ||
| case "BlockDevice": | ||
| case "Directory": | ||
| case "FIFO": | ||
| case "ContiguousFile": | ||
| case "GNUDumpDir": | ||
| // start a file. | ||
| // pass in any extended headers | ||
| // These ones consumers are typically most interested in. | ||
| EntryType = Entry | ||
| ev = "entry" | ||
| break | ||
| [EMIT] (ev, data, extra) { | ||
| if (!this[QUEUE].length && !this[READENTRY]) | ||
| this.emit(ev, data, extra) | ||
| else | ||
| this[QUEUE].push([ev, data, extra]) | ||
| } | ||
| case "GlobalExtendedHeader": | ||
| // extended headers that apply to the rest of the tarball | ||
| EntryType = ExtendedHeader | ||
| onend = function () { | ||
| self._global = self._global || {} | ||
| Object.keys(entry.fields).forEach(function (k) { | ||
| self._global[k] = entry.fields[k] | ||
| [EMITMETA] (entry) { | ||
| this[EMIT]('meta', this[META]) | ||
| switch (entry.type) { | ||
| case 'ExtendedHeader': | ||
| case 'OldExtendedHeader': | ||
| this[EX] = Pax.parse(this[META], this[EX], false) | ||
| break | ||
| case 'GlobalExtendedHeader': | ||
| this[GEX] = Pax.parse(this[META], this[GEX], true) | ||
| break | ||
| case 'NextFileHasLongPath': | ||
| case 'OldGnuLongPath': | ||
| this[EX] = this[EX] || Object.create(null) | ||
| this[EX].path = this[META] | ||
| break | ||
| case 'NextFileHasLongLinkpath': | ||
| this[EX] = this[EX] || Object.create(null) | ||
| this[EX].linkpath = this[META] | ||
| break | ||
| /* istanbul ignore next */ | ||
| default: throw new Error('unknown meta: ' + entry.type) | ||
| } | ||
| } | ||
| abort (msg, error) { | ||
| this[ABORTED] = true | ||
| this.warn(msg, error) | ||
| this.emit('abort') | ||
| } | ||
| write (chunk) { | ||
| if (this[ABORTED]) | ||
| return | ||
| // first write, might be gzipped | ||
| if (this[UNZIP] === null && chunk) { | ||
| if (this[BUFFER]) { | ||
| chunk = Buffer.concat([this[BUFFER], chunk]) | ||
| this[BUFFER] = null | ||
| } | ||
| if (chunk.length < gzipHeader.length) { | ||
| this[BUFFER] = chunk | ||
| return true | ||
| } | ||
| for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) { | ||
| if (chunk[i] !== gzipHeader[i]) | ||
| this[UNZIP] = false | ||
| } | ||
| if (this[UNZIP] === null) { | ||
| const ended = this[ENDED] | ||
| this[ENDED] = false | ||
| this[UNZIP] = new zlib.Unzip() | ||
| this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk)) | ||
| this[UNZIP].on('error', er => | ||
| this.abort('zlib error: ' + er.message, er)) | ||
| this[UNZIP].on('end', _ => { | ||
| this[ENDED] = true | ||
| this[CONSUMECHUNK]() | ||
| }) | ||
| return ended ? this[UNZIP].end(chunk) : this[UNZIP].write(chunk) | ||
| } | ||
| ev = "globalExtendedHeader" | ||
| meta = true | ||
| break | ||
| } | ||
| case "ExtendedHeader": | ||
| case "OldExtendedHeader": | ||
| // extended headers that apply to the next entry | ||
| EntryType = ExtendedHeader | ||
| onend = function () { | ||
| self._extended = entry.fields | ||
| } | ||
| ev = "extendedHeader" | ||
| meta = true | ||
| break | ||
| this[WRITING] = true | ||
| if (this[UNZIP]) | ||
| this[UNZIP].write(chunk) | ||
| else | ||
| this[CONSUMECHUNK](chunk) | ||
| this[WRITING] = false | ||
| case "NextFileHasLongLinkpath": | ||
| // set linkpath=<contents> in extended header | ||
| EntryType = BufferEntry | ||
| onend = function () { | ||
| self._extended = self._extended || {} | ||
| self._extended.linkpath = entry.body | ||
| } | ||
| ev = "longLinkpath" | ||
| meta = true | ||
| break | ||
| // return false if there's a queue, or if the current entry isn't flowing | ||
| const ret = | ||
| this[QUEUE].length ? false : | ||
| this[READENTRY] ? this[READENTRY].flowing : | ||
| true | ||
| case "NextFileHasLongPath": | ||
| case "OldGnuLongPath": | ||
| // set path=<contents> in file-extended header | ||
| EntryType = BufferEntry | ||
| onend = function () { | ||
| self._extended = self._extended || {} | ||
| self._extended.path = entry.body | ||
| } | ||
| ev = "longPath" | ||
| meta = true | ||
| break | ||
| // if we have no queue, then that means a clogged READENTRY | ||
| if (!ret && !this[QUEUE].length) | ||
| this[READENTRY].once('drain', _ => this.emit('drain')) | ||
| default: | ||
| // all the rest we skip, but still set the _entry | ||
| // member, so that we can skip over their data appropriately. | ||
| // emit an event to say that this is an ignored entry type? | ||
| EntryType = Entry | ||
| ev = "ignoredEntry" | ||
| break | ||
| return ret | ||
| } | ||
| var global, extended | ||
| if (meta) { | ||
| global = extended = null | ||
| } else { | ||
| var global = this._global | ||
| var extended = this._extended | ||
| [BUFFERCONCAT] (c) { | ||
| if (c && !this[ABORTED]) | ||
| this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c | ||
| } | ||
| // extendedHeader only applies to one entry, so once we start | ||
| // an entry, it's over. | ||
| this._extended = null | ||
| [MAYBEEND] () { | ||
| if (this[ENDED] && !this[EMITTEDEND] && !this[ABORTED]) { | ||
| this[EMITTEDEND] = true | ||
| const entry = this[WRITEENTRY] | ||
| if (entry && entry.blockRemain) { | ||
| const have = this[BUFFER] ? this[BUFFER].length : 0 | ||
| this.warn('Truncated input (needed ' + entry.blockRemain + | ||
| ' more bytes, only ' + have + ' available)', entry) | ||
| if (this[BUFFER]) | ||
| entry.write(this[BUFFER]) | ||
| entry.end() | ||
| } | ||
| this[EMIT]('end') | ||
| } | ||
| } | ||
| entry = new EntryType(header, extended, global) | ||
| entry.meta = meta | ||
| // only proxy data events of normal files. | ||
| if (!meta) { | ||
| entry.on("data", function (c) { | ||
| me.emit("data", c) | ||
| }) | ||
| [CONSUMECHUNK] (chunk) { | ||
| if (this[CONSUMING]) { | ||
| this[BUFFERCONCAT](chunk) | ||
| } else if (!chunk && !this[BUFFER]) { | ||
| this[MAYBEEND]() | ||
| } else { | ||
| this[CONSUMING] = true | ||
| if (this[BUFFER]) { | ||
| this[BUFFERCONCAT](chunk) | ||
| const c = this[BUFFER] | ||
| this[BUFFER] = null | ||
| this[CONSUMECHUNKSUB](c) | ||
| } else { | ||
| this[CONSUMECHUNKSUB](chunk) | ||
| } | ||
| while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) { | ||
| const c = this[BUFFER] | ||
| this[BUFFER] = null | ||
| this[CONSUMECHUNKSUB](c) | ||
| } | ||
| this[CONSUMING] = false | ||
| } | ||
| if (!this[BUFFER] || this[ENDED]) | ||
| this[MAYBEEND]() | ||
| } | ||
| if (onend) entry.on("end", onend) | ||
| [CONSUMECHUNKSUB] (chunk) { | ||
| // we know that we are in CONSUMING mode, so anything written goes into | ||
| // the buffer. Advance the position and put any remainder in the buffer. | ||
| let position = 0 | ||
| let length = chunk.length | ||
| while (position + 512 <= length && !this[ABORTED]) { | ||
| switch (this[STATE]) { | ||
| case 'begin': | ||
| this[CONSUMEHEADER](chunk, position) | ||
| position += 512 | ||
| break | ||
| this._entry = entry | ||
| var me = this | ||
| case 'ignore': | ||
| case 'body': | ||
| position += this[CONSUMEBODY](chunk, position) | ||
| break | ||
| entry.on("pause", function () { | ||
| me.pause() | ||
| }) | ||
| case 'meta': | ||
| position += this[CONSUMEMETA](chunk, position) | ||
| break | ||
| entry.on("resume", function () { | ||
| me.resume() | ||
| }) | ||
| /* istanbul ignore next */ | ||
| default: | ||
| throw new Error('invalid state: ' + this[STATE]) | ||
| } | ||
| } | ||
| if (this.listeners("*").length) { | ||
| this.emit("*", ev, entry) | ||
| if (position < length) { | ||
| if (this[BUFFER]) | ||
| this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]]) | ||
| else | ||
| this[BUFFER] = chunk.slice(position) | ||
| } | ||
| } | ||
| this.emit(ev, entry) | ||
| warn (msg, data) { | ||
| if (!this.strict) | ||
| this.emit('warn', msg, data) | ||
| else if (data instanceof Error) | ||
| this.emit('error', data) | ||
| else { | ||
| const er = new Error(msg) | ||
| er.data = data | ||
| this[EMIT]('error', er) | ||
| } | ||
| } | ||
| // Zero-byte entry. End immediately. | ||
| if (entry.props.size === 0) { | ||
| entry.end() | ||
| this._entry = null | ||
| end (chunk) { | ||
| if (!this[ABORTED]) { | ||
| if (this[UNZIP]) | ||
| this[UNZIP].end(chunk) | ||
| else { | ||
| this[ENDED] = true | ||
| this.write(chunk) | ||
| } | ||
| } | ||
| } | ||
| } |
+3
-0
| The ISC License | ||
| Copyright (c) Isaac Z. Schlueter and Contributors | ||
| Permission to use, copy, modify, and/or distribute this software for any | ||
| purpose with or without fee is hereby granted, provided that the above | ||
| copyright notice and this permission notice appear in all copies. | ||
| THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
@@ -7,0 +10,0 @@ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
+23
-10
@@ -5,23 +5,36 @@ { | ||
| "description": "tar for node", | ||
| "version": "2.2.1", | ||
| "version": "3.0.0", | ||
| "repository": { | ||
| "type": "git", | ||
| "url": "git://github.com/isaacs/node-tar.git" | ||
| "url": "https://github.com/npm/node-tar.git" | ||
| }, | ||
| "main": "tar.js", | ||
| "scripts": { | ||
| "test": "tap test/*.js" | ||
| "test": "tap test/*.js --100 -J --coverage-report=text", | ||
| "preversion": "npm test", | ||
| "postversion": "npm publish", | ||
| "postpublish": "git push origin --all; git push origin --tags", | ||
| "genparse": "node scripts/generate-parse-fixtures.js", | ||
| "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done" | ||
| }, | ||
| "dependencies": { | ||
| "block-stream": "*", | ||
| "fstream": "^1.0.2", | ||
| "inherits": "2" | ||
| "minipass": "^2.0.1", | ||
| "minizlib": "^1.0.3", | ||
| "mkdirp": "^0.5.0", | ||
| "yallist": "^3.0.2" | ||
| }, | ||
| "devDependencies": { | ||
| "graceful-fs": "^4.1.2", | ||
| "chmodr": "^1.0.2", | ||
| "events-to-array": "^1.1.2", | ||
| "mutate-fs": "^1.1.0", | ||
| "rimraf": "1.x", | ||
| "tap": "0.x", | ||
| "mkdirp": "^0.5.0" | ||
| "tap": "^10.3.1", | ||
| "tar-fs": "^1.15.2", | ||
| "tar-stream": "^1.5.2" | ||
| }, | ||
| "license": "ISC" | ||
| "license": "ISC", | ||
| "files": [ | ||
| "tar.js", | ||
| "lib/" | ||
| ] | ||
| } |
+781
-30
| # node-tar | ||
| Tar for Node.js. | ||
| [Fast](./benchmarks) and full-featured Tar for Node.js | ||
| [](https://nodei.co/npm/tar/) | ||
| The API is designed to mimic the behavior of `tar(1)` on unix systems. | ||
| If you are familiar with how tar works, most of this will hopefully be | ||
| straightforward for you. If not, then hopefully this module can teach | ||
| you useful unix skills that may come in handy someday :) | ||
| ## API | ||
| ## Background | ||
| See `examples/` for usage examples. | ||
| A "tar file" or "tarball" is an archive of file system entries | ||
| (directories, files, links, etc.) The name comes from "tape archive". | ||
| If you run `man tar` on almost any Unix command line, you'll learn | ||
| quite a bit about what it can do, and its history. | ||
| ### var tar = require('tar') | ||
| Tar has 5 main top-level commands: | ||
| Returns an object with `.Pack`, `.Extract` and `.Parse` methods. | ||
| * `c` Create an archive | ||
| * `r` Replace entries within an archive | ||
| * `u` Update entries within an archive (ie, replace if they're newer) | ||
| * `t` List out the contents of an archive | ||
| * `x` Extract an archive to disk | ||
| ### tar.Pack([properties]) | ||
| The other flags and options modify how this top level function works. | ||
| Returns a through stream. Use | ||
| [fstream](https://npmjs.org/package/fstream) to write files into the | ||
| pack stream and you will receive tar archive data from the pack | ||
| stream. | ||
| ## High-Level API | ||
| This only works with directories, it does not work with individual files. | ||
| These 5 functions are the high-level API. All of them have a | ||
| single-character name (for unix nerds familiar with `tar(1)`) as well | ||
| as a long name (for everyone else). | ||
| The optional `properties` object are used to set properties in the tar | ||
| 'Global Extended Header'. If the `fromBase` property is set to true, | ||
| the tar will contain files relative to the path passed, and not with | ||
| the path included. | ||
| All the high-level functions take the following arguments, all three | ||
| of which are optional and may be omitted. | ||
| ### tar.Extract([options]) | ||
| 1. `options` - An optional object specifying various options | ||
| 2. `paths` - An array of paths to add or extract | ||
| 3. `callback` - Called when the command is completed, if async. (If | ||
| sync or no file specified, providing a callback throws a | ||
| `TypeError`.) | ||
| Returns a through stream. Write tar data to the stream and the files | ||
| in the tarball will be extracted onto the filesystem. | ||
| If the command is sync (ie, if `options.sync=true`), then the | ||
| callback is not allowed, since the action will be completed immediately. | ||
| `options` can be: | ||
| If a `file` argument is specified, and the command is async, then a | ||
| `Promise` is returned. In this case, if async, a callback may be | ||
| provided which is called when the command is completed. | ||
| If a `file` option is not specified, then a stream is returned. For | ||
| `create`, this is a readable stream of the generated archive. For | ||
| `list` and `extract` this is a writable stream that an archive should | ||
| be written into. If a file is not specified, then a callback is not | ||
| allowed, because you're already getting a stream to work with. | ||
| `replace` and `update` only work on existing archives, and so require | ||
| a `file` argument. | ||
| Sync commands without a file argument return a stream that acts on its | ||
| input immediately in the same tick. For readable streams, this means | ||
| that all of the data is immediately available by calling | ||
| `stream.read()`. For writable streams, it will be acted upon as soon | ||
| as it is provided, but this can be at any time. | ||
| ### Warnings | ||
| Some things cause tar to emit a warning, but should usually not cause | ||
| the entire operation to fail. There are three ways to handle | ||
| warnings: | ||
| 1. **Ignore them** (default) Invalid entries won't be put in the | ||
| archive, and invalid entries won't be unpacked. This is usually | ||
| fine, but can hide failures that you might care about. | ||
| 2. **Notice them** Add an `onwarn` function to the options, or listen | ||
| to the `'warn'` event on any tar stream. The function will get | ||
| called as `onwarn(message, data)`. Handle as appropriate. | ||
| 3. **Explode them.** Set `strict: true` in the options object, and | ||
| `warn` messages will be emitted as `'error'` events instead. If | ||
| there's no `error` handler, this causes the program to crash. If | ||
| used with a promise-returning/callback-taking method, then it'll | ||
| send the error to the promise/callback. | ||
| ### Examples | ||
| The API mimics the `tar(1)` command line functionality, with aliases | ||
| for more human-readable option and function names. The goal is that | ||
| if you know how to use `tar(1)` in Unix, then you know how to use | ||
| `require('tar')` in JavaScript. | ||
| To replicate `tar czf my-tarball.tgz files and folders`, you'd do: | ||
| ```js | ||
| { | ||
| path: '/path/to/extract/tar/into', | ||
| strip: 0, // how many path segments to strip from the root when extracting | ||
| } | ||
| tar.c( | ||
| { | ||
| gzip: <true|gzip options>, | ||
| file: 'my-tarball.tgz' | ||
| }, | ||
| ['some', 'files', 'and', 'folders'] | ||
| ).then(_ => { .. tarball has been created .. }) | ||
| ``` | ||
| `options` also get passed to the `fstream.Writer` instance that `tar` | ||
| uses internally. | ||
| To replicate `tar cz files and folders > my-tarball.tgz`, you'd do: | ||
| ### tar.Parse() | ||
| ```js | ||
| tar.c( // or tar.create | ||
| { | ||
| gzip: <true|gzip options> | ||
| }, | ||
| ['some', 'files', 'and', 'folders'] | ||
| ).pipe(fs.createWriteStream('my-tarball.tgz') | ||
| ``` | ||
| Returns a writable stream. Write tar data to it and it will emit | ||
| `entry` events for each entry parsed from the tarball. This is used by | ||
| `tar.Extract`. | ||
| To replicate `tar xf my-tarball.tgz` you'd do: | ||
| ```js | ||
| tar.x( // or tar.extract( | ||
| { | ||
| file: 'my-tarball.tgz' | ||
| } | ||
| ).then(_=> { .. tarball has been dumped in cwd .. }) | ||
| ``` | ||
| To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`: | ||
| ```js | ||
| fs.createReadStream('my-tarball.tgz').pipe( | ||
| tar.x({ | ||
| strip: 1, | ||
| C: 'some-dir' // alias for cwd:'some-dir', also ok | ||
| }) | ||
| ) | ||
| ``` | ||
| To replicate `tar tf my-tarball.tgz`, do this: | ||
| ```js | ||
| tar.t({ | ||
| file: 'my-tarball.tgz', | ||
| onentry: entry => { .. do whatever with it .. } | ||
| }) | ||
| ``` | ||
| To replicate `cat my-tarball.tgz | tar t` do: | ||
| ```js | ||
| fs.createReadStream('my-tarball.tgz') | ||
| .pipe(tar.t()) | ||
| .on('entry', entry => { .. do whatever with it .. }) | ||
| ``` | ||
| To do anything synchronous, add `sync: true` to the options. Note | ||
| that sync functions don't take a callback and don't return a promise. | ||
| When the function returns, it's already done. Sync methods without a | ||
| file argument return a sync stream, which flushes immediately. But, | ||
| of course, it still won't be done until you `.end()` it. | ||
| To filter entries, add `filter: <function>` to the options. | ||
| Tar-creating methods call the filter with `filter(path, stat)`. | ||
| Tar-reading methods (including extraction) call the filter with | ||
| `filter(path, entry)`. The filter is called in the `this`-context of | ||
| the `Pack` or `Unpack` stream object. | ||
| The arguments list to `tar t` and `tar x` specify a list of filenames | ||
| to extract or list, so they're equivalent to a filter that tests if | ||
| the file is in the list. | ||
| For those who _aren't_ fans of tar's single-character command names: | ||
| ``` | ||
| tar.c === tar.create | ||
| tar.r === tar.replace (appends to archive, file is required) | ||
| tar.u === tar.update (appends if newer, file is required) | ||
| tar.x === tar.extract | ||
| tar.t === tar.list | ||
| ``` | ||
| Keep reading for all the command descriptions and options, as well as | ||
| the low-level API that they are built on. | ||
| ### tar.c(options, fileList, callback) [alias: tar.create] | ||
| Create a tarball archive. | ||
| The `fileList` is an array of paths to add to the tarball. Adding a | ||
| directory also adds its children recursively. | ||
| The following options are supported: | ||
| - `file` Write the tarball archive to the specified filename. If this | ||
| is specified, then the callback will be fired when the file has been | ||
| written, and a promise will be returned that resolves when the file | ||
| is written. If a filename is not specified, then a Readable Stream | ||
| will be returned which will emit the file data. [Alias: `f`] | ||
| - `sync` Act synchronously. If this is set, then any provided file | ||
| will be fully written after the call to `tar.c`. If this is set, | ||
| and a file is not provided, then the resulting stream will already | ||
| have the data ready to `read` or `emit('data')` as soon as you | ||
| request it. | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `cwd` The current working directory for creating the archive. | ||
| Defaults to `process.cwd()`. [Alias: `C`] | ||
| - `prefix` A path portion to prefix onto the entries in the archive. | ||
| - `gzip` Set to any truthy value to create a gzipped archive, or an | ||
| object with settings for `zlib.Gzip()` [Alias: `z`] | ||
| - `filter` A function that gets called with `(path, stat)` for each | ||
| entry being added. Return `true` to add the entry to the archive, | ||
| or `false` to omit it. | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. [Alias: `P`] | ||
| - `mode` The mode to set on the created file archive | ||
| - `noDirRecurse` Do not recursively archive the contents of | ||
| directories. [Alias: `n`] | ||
| - `follow` Set to true to pack the targets of symbolic links. Without | ||
| this option, symbolic links are archived as such. [Alias: `L`, `h`] | ||
| The following options are mostly internal, but can be modified in some | ||
| advanced use cases, such as re-using caches between runs. | ||
| - `linkCache` A Map object containing the device and inode value for | ||
| any file whose nlink is > 1, to identify hard links. | ||
| - `statCache` A Map object that caches calls `lstat`. | ||
| - `readdirCache` A Map object that caches calls to `readdir`. | ||
| - `jobs` A number specifying how many concurrent jobs to run. | ||
| Defaults to 4. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 1 MB. | ||
| ### tar.x(options, fileList, callback) [alias: tar.extract] | ||
| Extract a tarball archive. | ||
| The `fileList` is an array of paths to extract from the tarball. If | ||
| no paths are provided, then all the entries are extracted. | ||
| If the archive is gzipped, then tar will detect this and unzip it. | ||
| Note that all directories that are created will be forced to be | ||
| writable, readable, and listable by their owner, to avoid cases where | ||
| a directory prevents extraction of child entries by virtue of its | ||
| mode. | ||
| The following options are supported: | ||
| - `cwd` Extract files relative to the specified directory. Defaults | ||
| to `process.cwd()`. [Alias: `C`] | ||
| - `file` The archive file to extract. If not specified, then a | ||
| Writable stream is returned where the archive data should be | ||
| written. [Alias: `f`] | ||
| - `sync` Create files and directories synchronously. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `filter` A function that gets called with `(path, entry)` for each | ||
| entry being unpacked. Return `true` to unpack the entry from the | ||
| archive, or `false` to skip it. | ||
| - `newer` Set to true to keep the existing file on disk if it's newer | ||
| than the file in the archive. [Alias: `keep-newer`, | ||
| `keep-newer-files`] | ||
| - `keep` Do not overwrite existing files. In particular, if a file | ||
| appears more than once in an archive, later copies will not | ||
| overwrite earlier copies. [Alias: `k`, `keep-existing`] | ||
| - `preservePaths` Allow absolute paths, paths containing `..`, and | ||
| extracting through symbolic links. By default, `/` is stripped from | ||
| absolute paths, `..` paths are not extracted, and any file whose | ||
| location would be modified by a symbolic link is not extracted. | ||
| [Alias: `P`] | ||
| - `unlink` Unlink files before creating them. Without this option, | ||
| tar overwrites existing files, which preserves existing hardlinks. | ||
| With this option, existing hardlinks will be broken, as will any | ||
| symlink that would affect the location of an extracted file. [Alias: | ||
| `U`] | ||
| - `strip` Remove the specified number of leading path elements. | ||
| Pathnames with fewer elements will be silently skipped. Note that | ||
| the pathname is edited after applying the filter, but before | ||
| security checks. [Alias: `strip-components`, `stripComponents`] | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `preserveOwner` If true, tar will set the `uid` and `gid` of | ||
| extracted entries to the `uid` and `gid` fields in the archive. | ||
| This defaults to true when run as root, and false otherwise. If | ||
| false, then files and directories will be set with the owner and | ||
| group of the user running the process. This is similar to `-p` in | ||
| `tar(1)`, but ACLs and other system-specific data is never unpacked | ||
| in this implementation, and modes are set by default already. | ||
| [Alias: `p`] | ||
| The following options are mostly internal, but can be modified in some | ||
| advanced use cases, such as re-using caches between runs. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 16 MB. | ||
| - `umask` Filter the modes of entries like `process.umask()`. | ||
| - `dmode` Default mode for directories | ||
| - `fmode` Default mode for files | ||
| - `dirCache` A Map object of which directories exist. | ||
| - `maxMetaEntrySize` The maximum size of meta entries that is | ||
| supported. Defaults to 1 MB. | ||
| ### tar.t(options, fileList, callback) [alias: tar.list] | ||
| List the contents of a tarball archive. | ||
| The `fileList` is an array of paths to list from the tarball. If | ||
| no paths are provided, then all the entries are listed. | ||
| If the archive is gzipped, then tar will detect this and unzip it. | ||
| Returns an event emitter that emits `entry` events with | ||
| `tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'` | ||
| events. (If you want to get actual readable entries, use the | ||
| `tar.Parse` class instead.) | ||
| The following options are supported: | ||
| - `cwd` Extract files relative to the specified directory. Defaults | ||
| to `process.cwd()`. [Alias: `C`] | ||
| - `file` The archive file to list. If not specified, then a | ||
| Writable stream is returned where the archive data should be | ||
| written. [Alias: `f`] | ||
| - `sync` Read the specified file synchronously. (This has no effect | ||
| when a file option isn't specified, because entries are emitted as | ||
| fast as they are parsed from the stream anyway.) | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `filter` A function that gets called with `(path, entry)` for each | ||
| entry being listed. Return `true` to emit the entry from the | ||
| archive, or `false` to skip it. | ||
| - `onentry` A function that gets called with `(entry)` for each entry | ||
| that passes the filter. This is important for when both `file` and | ||
| `sync` are set, because it will be called synchronously. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 16 MB. | ||
| ### tar.u(options, fileList, callback) [alias: tar.update] | ||
| Add files to an archive if they are newer than the entry already in | ||
| the tarball archive. | ||
| The `fileList` is an array of paths to add to the tarball. Adding a | ||
| directory also adds its children recursively. | ||
| The following options are supported: | ||
| - `file` Required. Write the tarball archive to the specified | ||
| filename. [Alias: `f`] | ||
| - `sync` Act synchronously. If this is set, then any provided file | ||
| will be fully written after the call to `tar.c`. | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `cwd` The current working directory for adding entries to the | ||
| archive. Defaults to `process.cwd()`. [Alias: `C`] | ||
| - `prefix` A path portion to prefix onto the entries in the archive. | ||
| - `gzip` Set to any truthy value to create a gzipped archive, or an | ||
| object with settings for `zlib.Gzip()` [Alias: `z`] | ||
| - `filter` A function that gets called with `(path, stat)` for each | ||
| entry being added. Return `true` to add the entry to the archive, | ||
| or `false` to omit it. | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. [Alias: `P`] | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 16 MB. | ||
| - `noDirRecurse` Do not recursively archive the contents of | ||
| directories. [Alias: `n`] | ||
| - `follow` Set to true to pack the targets of symbolic links. Without | ||
| this option, symbolic links are archived as such. [Alias: `L`, `h`] | ||
| ### tar.r(options, fileList, callback) [alias: tar.replace] | ||
| Add files to an existing archive. Because later entries override | ||
| earlier entries, this effectively replaces any existing entries. | ||
| The `fileList` is an array of paths to add to the tarball. Adding a | ||
| directory also adds its children recursively. | ||
| The following options are supported: | ||
| - `file` Required. Write the tarball archive to the specified | ||
| filename. [Alias: `f`] | ||
| - `sync` Act synchronously. If this is set, then any provided file | ||
| will be fully written after the call to `tar.c`. | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `cwd` The current working directory for adding entries to the | ||
| archive. Defaults to `process.cwd()`. [Alias: `C`] | ||
| - `prefix` A path portion to prefix onto the entries in the archive. | ||
| - `gzip` Set to any truthy value to create a gzipped archive, or an | ||
| object with settings for `zlib.Gzip()` [Alias: `z`] | ||
| - `filter` A function that gets called with `(path, stat)` for each | ||
| entry being added. Return `true` to add the entry to the archive, | ||
| or `false` to omit it. | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. [Alias: `P`] | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 16 MB. | ||
| - `noDirRecurse` Do not recursively archive the contents of | ||
| directories. [Alias: `n`] | ||
| - `follow` Set to true to pack the targets of symbolic links. Without | ||
| this option, symbolic links are archived as such. [Alias: `L`, `h`] | ||
| ## Low-Level API | ||
| ### class tar.Pack | ||
| A readable tar stream. | ||
| Has all the standard readable stream interface stuff. `'data'` and | ||
| `'end'` events, `read()` method, `pause()` and `resume()`, etc. | ||
| #### constructor(options) | ||
| The following options are supported: | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `cwd` The current working directory for creating the archive. | ||
| Defaults to `process.cwd()`. | ||
| - `prefix` A path portion to prefix onto the entries in the archive. | ||
| - `gzip` Set to any truthy value to create a gzipped archive, or an | ||
| object with settings for `zlib.Gzip()` | ||
| - `filter` A function that gets called with `(path, stat)` for each | ||
| entry being added. Return `true` to add the entry to the archive, | ||
| or `false` to omit it. | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. | ||
| - `linkCache` A Map object containing the device and inode value for | ||
| any file whose nlink is > 1, to identify hard links. | ||
| - `statCache` A Map object that caches calls `lstat`. | ||
| - `readdirCache` A Map object that caches calls to `readdir`. | ||
| - `jobs` A number specifying how many concurrent jobs to run. | ||
| Defaults to 4. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 16 MB. | ||
| - `noDirRecurse` Do not recursively archive the contents of | ||
| directories. | ||
| - `follow` Set to true to pack the targets of symbolic links. Without | ||
| this option, symbolic links are archived as such. | ||
| #### add(path) | ||
| Adds an entry to the archive. Returns the Pack stream. | ||
| #### write(path) | ||
| Adds an entry to the archive. Returns true if flushed. | ||
| #### end() | ||
| Finishes the archive. | ||
| ### class tar.Pack.Sync | ||
| Synchronous version of `tar.Pack`. | ||
| ### class tar.Unpack | ||
| A writable stream that unpacks a tar archive onto the file system. | ||
| All the normal writable stream stuff is supported. `write()` and | ||
| `end()` methods, `'drain'` events, etc. | ||
| Note that all directories that are created will be forced to be | ||
| writable, readable, and listable by their owner, to avoid cases where | ||
| a directory prevents extraction of child entries by virtue of its | ||
| mode. | ||
| `'close'` is emitted when it's done writing stuff to the file system. | ||
| #### constructor(options) | ||
| - `cwd` Extract files relative to the specified directory. Defaults | ||
| to `process.cwd()`. | ||
| - `filter` A function that gets called with `(path, entry)` for each | ||
| entry being unpacked. Return `true` to unpack the entry from the | ||
| archive, or `false` to skip it. | ||
| - `newer` Set to true to keep the existing file on disk if it's newer | ||
| than the file in the archive. | ||
| - `keep` Do not overwrite existing files. In particular, if a file | ||
| appears more than once in an archive, later copies will not | ||
| overwrite earlier copies. | ||
| - `preservePaths` Allow absolute paths, paths containing `..`, and | ||
| extracting through symbolic links. By default, `/` is stripped from | ||
| absolute paths, `..` paths are not extracted, and any file whose | ||
| location would be modified by a symbolic link is not extracted. | ||
| - `unlink` Unlink files before creating them. Without this option, | ||
| tar overwrites existing files, which preserves existing hardlinks. | ||
| With this option, existing hardlinks will be broken, as will any | ||
| symlink that would affect the location of an extracted file. | ||
| - `strip` Remove the specified number of leading path elements. | ||
| Pathnames with fewer elements will be silently skipped. Note that | ||
| the pathname is edited after applying the filter, but before | ||
| security checks. | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| - `umask` Filter the modes of entries like `process.umask()`. | ||
| - `dmode` Default mode for directories | ||
| - `fmode` Default mode for files | ||
| - `dirCache` A Map object of which directories exist. | ||
| - `maxMetaEntrySize` The maximum size of meta entries that is | ||
| supported. Defaults to 1 MB. | ||
| - `preserveOwner` If true, tar will set the `uid` and `gid` of | ||
| extracted entries to the `uid` and `gid` fields in the archive. | ||
| This defaults to true when run as root, and false otherwise. If | ||
| false, then files and directories will be set with the owner and | ||
| group of the user running the process. This is similar to `-p` in | ||
| `tar(1)`, but ACLs and other system-specific data is never unpacked | ||
| in this implementation, and modes are set by default already. | ||
| ### class tar.Unpack.Sync | ||
| Synchronous version of `tar.Unpack`. | ||
| ### class tar.Parse | ||
| A writable stream that parses a tar archive stream. All the standard | ||
| writable stream stuff is supported. | ||
| If the archive is gzipped, then tar will detect this and unzip it. | ||
| Emits `'entry'` events with `tar.ReadEntry` objects, which are | ||
| themselves readable streams that you can pipe wherever. | ||
| Each `entry` will not emit until the one before it is flushed through, | ||
| so make sure to either consume the data (with `on('data', ...)` or | ||
| `.pipe(...)`) or throw it away with `.resume()` to keep the stream | ||
| flowing. | ||
| #### constructor(options) | ||
| Returns an event emitter that emits `entry` events with | ||
| `tar.ReadEntry` objects. | ||
| The following options are supported: | ||
| - `cwd` Extract files relative to the specified directory. Defaults | ||
| to `process.cwd()`. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `filter` A function that gets called with `(path, entry)` for each | ||
| entry being listed. Return `true` to emit the entry from the | ||
| archive, or `false` to skip it. | ||
| - `onentry` A function that gets called with `(entry)` for each entry | ||
| that passes the filter. | ||
| #### abort(message, error) | ||
| Stop all parsing activities. This is called when there are zlib | ||
| errors. It also emits a warning with the message and error provided. | ||
| ### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass) | ||
| A representation of an entry that is being read out of a tar archive. | ||
| It has the following fields: | ||
| - `extended` The extended metadata object provided to the constructor. | ||
| - `globalExtended` The global extended metadata object provided to the | ||
| constructor. | ||
| - `remain` The number of bytes remaining to be written into the | ||
| stream. | ||
| - `blockRemain` The number of 512-byte blocks remaining to be written | ||
| into the stream. | ||
| - `ignore` Whether this entry should be ignored. | ||
| - `meta` True if this represents metadata about the next entry, false | ||
| if it represents a filesystem object. | ||
| - All the fields from the header, extended header, and global extended | ||
| header are added to the ReadEntry object. So it has `path`, `type`, | ||
| `size, `mode`, and so on. | ||
| #### constructor(header, extended, globalExtended) | ||
| Create a new ReadEntry object with the specified header, extended | ||
| header, and global extended header values. | ||
| ### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass) | ||
| A representation of an entry that is being written from the file | ||
| system into a tar archive. | ||
| Emits data for the Header, and for the Pax Extended Header if one is | ||
| required, as well as any body data. | ||
| Creating a WriteEntry for a directory does not also create | ||
| WriteEntry objects for all of the directory contents. | ||
| It has the following fields: | ||
| - `path` The path field that will be written to the archive. By | ||
| default, this is also the path from the cwd to the file system | ||
| object. | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `myuid` If supported, the uid of the user running the current | ||
| process. | ||
| - `myuser` The `env.USER` string if set, or `''`. Set as the entry | ||
| `uname` field if the file's `uid` matches `this.myuid`. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 1 MB. | ||
| - `linkCache` A Map object containing the device and inode value for | ||
| any file whose nlink is > 1, to identify hard links. | ||
| - `statCache` A Map object that caches calls `lstat`. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. | ||
| - `cwd` The current working directory for creating the archive. | ||
| Defaults to `process.cwd()`. | ||
| - `absolute` The absolute path to the entry on the filesystem. By | ||
| default, this is `path.resolve(this.cwd, this.path)`, but it can be | ||
| overridden explicitly. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `win32` True if on a windows platform. Causes behavior where paths | ||
| replace `\` with `/`. | ||
| #### constructor(path, options) | ||
| `path` is the path of the entry as it is written in the archive. | ||
| The following options are supported: | ||
| - `portable` Omit metadata that is system-specific: `ctime`, `atime`, | ||
| `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note | ||
| that `mtime` is still included, because this is necessary other | ||
| time-based operations. | ||
| - `maxReadSize` The maximum buffer size for `fs.read()` operations. | ||
| Defaults to 1 MB. | ||
| - `linkCache` A Map object containing the device and inode value for | ||
| any file whose nlink is > 1, to identify hard links. | ||
| - `statCache` A Map object that caches calls `lstat`. | ||
| - `preservePaths` Allow absolute paths and paths containing `..`. By | ||
| default, `/` is stripped from absolute paths, `..` paths are not | ||
| added to the archive. | ||
| - `cwd` The current working directory for creating the archive. | ||
| Defaults to `process.cwd()`. | ||
| - `absolute` The absolute path to the entry on the filesystem. By | ||
| default, this is `path.resolve(this.cwd, this.path)`, but it can be | ||
| overridden explicitly. | ||
| - `strict` Treat warnings as crash-worthy errors. Default false. | ||
| - `win32` True if on a windows platform. Causes behavior where paths | ||
| replace `\` with `/`. | ||
| - `onwarn` A function that will get called with `(message, data)` for | ||
| any warnings encountered. | ||
| #### warn(message, data) | ||
| If strict, emit an error with the provided message. | ||
| Othewise, emit a `'warn'` event with the provided message and data. | ||
| ### class tar.WriteEntry.Sync | ||
| Synchronous version of tar.WriteEntry | ||
| ### class tar.Header | ||
| A class for reading and writing header blocks. | ||
| It has the following fields: | ||
| - `nullBlock` True if decoding a block which is entirely composed of | ||
| `0x00` null bytes. (Useful because tar files are terminated by | ||
| at least 2 null blocks.) | ||
| - `cksumValid` True if the checksum in the header is valid, false | ||
| otherwise. | ||
| - `needPax` True if the values, as encoded, will require a Pax | ||
| extended header. | ||
| - `path` The path of the entry. | ||
| - `mode` The 4 lowest-order octal digits of the file mode. That is, | ||
| read/write/execute permissions for world, group, and owner, and the | ||
| setuid, setgid, and sticky bits. | ||
| - `uid` Numeric user id of the file owner | ||
| - `gid` Numeric group id of the file owner | ||
| - `size` Size of the file in bytes | ||
| - `mtime` Modified time of the file | ||
| - `cksum` The checksum of the header. This is generated by adding all | ||
| the bytes of the header block, treating the checksum field itself as | ||
| all ascii space characters (that is, `0x20`). | ||
| - `type` The human-readable name of the type of entry this represents, | ||
| or the alphanumeric key if unknown. | ||
| - `typeKey` The alphanumeric key for the type of entry this header | ||
| represents. | ||
| - `linkpath` The target of Link and SymbolicLink entries. | ||
| - `uname` Human-readable user name of the file owner | ||
| - `gname` Human-readable group name of the file owner | ||
| - `devmaj` The major portion of the device number. Always `0` for | ||
| files, directories, and links. | ||
| - `devmin` The minor portion of the device number. Always `0` for | ||
| files, directories, and links. | ||
| - `atime` File access time. | ||
| - `ctime` File change time. | ||
| #### constructor(data, [offset=0]) | ||
| `data` is optional. It is either a Buffer that should be interpreted | ||
| as a tar Header starting at the specified offset and continuing for | ||
| 512 bytes, or a data object of keys and values to set on the header | ||
| object, and eventually encode as a tar Header. | ||
| #### decode(block, offset) | ||
| Decode the provided buffer starting at the specified offset. | ||
| Buffer length must be greater than 512 bytes. | ||
| #### set(data) | ||
| Set the fields in the data object. | ||
| #### encode(buffer, offset) | ||
| Encode the header fields into the buffer at the specified offset. | ||
| Returns `this.needPax` to indicate whether a Pax Extended Header is | ||
| required to properly encode the specified data. | ||
| ### class tar.Pax | ||
| An object representing a set of key-value pairs in an Pax extended | ||
| header entry. | ||
| It has the following fields. Where the same name is used, they have | ||
| the same semantics as the tar.Header field of the same name. | ||
| - `global` True if this represents a global extended header, or false | ||
| if it is for a single entry. | ||
| - `atime` | ||
| - `charset` | ||
| - `comment` | ||
| - `ctime` | ||
| - `gid` | ||
| - `gname` | ||
| - `linkpath` | ||
| - `mtime` | ||
| - `path` | ||
| - `size` | ||
| - `uid` | ||
| - `uname` | ||
| - `dev` | ||
| - `ino` | ||
| - `nlink` | ||
| #### constructor(object, global) | ||
| Set the fields set in the object. `global` is a boolean that defaults | ||
| to false. | ||
| #### encode() | ||
| Return a Buffer containing the header and body for the Pax extended | ||
| header entry, or `null` if there is nothing to encode. | ||
| #### encodeBody() | ||
| Return a string representing the body of the pax extended header | ||
| entry. | ||
| #### encodeField(fieldName) | ||
| Return a string representing the key/value encoding for the specified | ||
| fieldName, or `''` if the field is unset. | ||
| ### tar.Pax.parse(string, extended, global) | ||
| Return a new Pax object created by parsing the contents of the string | ||
| provided. | ||
| If the `extended` object is set, then also add the fields from that | ||
| object. (This is necessary because multiple metadata entries can | ||
| occur in sequence.) | ||
| ### tar.types | ||
| A translation table for the `type` field in tar headers. | ||
| #### tar.types.name.get(code) | ||
| Get the human-readable name for a given alphanumeric code. | ||
| #### tar.types.code.get(name) | ||
| Get the alphanumeric code for a given human-readable name. |
+16
-171
@@ -1,173 +0,18 @@ | ||
| // field paths that every tar file must have. | ||
| // header is padded to 512 bytes. | ||
| var f = 0 | ||
| , fields = {} | ||
| , path = fields.path = f++ | ||
| , mode = fields.mode = f++ | ||
| , uid = fields.uid = f++ | ||
| , gid = fields.gid = f++ | ||
| , size = fields.size = f++ | ||
| , mtime = fields.mtime = f++ | ||
| , cksum = fields.cksum = f++ | ||
| , type = fields.type = f++ | ||
| , linkpath = fields.linkpath = f++ | ||
| , headerSize = 512 | ||
| , blockSize = 512 | ||
| , fieldSize = [] | ||
| 'use strict' | ||
| fieldSize[path] = 100 | ||
| fieldSize[mode] = 8 | ||
| fieldSize[uid] = 8 | ||
| fieldSize[gid] = 8 | ||
| fieldSize[size] = 12 | ||
| fieldSize[mtime] = 12 | ||
| fieldSize[cksum] = 8 | ||
| fieldSize[type] = 1 | ||
| fieldSize[linkpath] = 100 | ||
| // high-level commands | ||
| exports.c = exports.create = require('./lib/create.js') | ||
| exports.r = exports.replace = require('./lib/replace.js') | ||
| exports.t = exports.list = require('./lib/list.js') | ||
| exports.u = exports.update = require('./lib/update.js') | ||
| exports.x = exports.extract = require('./lib/extract.js') | ||
| // "ustar\0" may introduce another bunch of headers. | ||
| // these are optional, and will be nulled out if not present. | ||
| var ustar = fields.ustar = f++ | ||
| , ustarver = fields.ustarver = f++ | ||
| , uname = fields.uname = f++ | ||
| , gname = fields.gname = f++ | ||
| , devmaj = fields.devmaj = f++ | ||
| , devmin = fields.devmin = f++ | ||
| , prefix = fields.prefix = f++ | ||
| , fill = fields.fill = f++ | ||
| // terminate fields. | ||
| fields[f] = null | ||
| fieldSize[ustar] = 6 | ||
| fieldSize[ustarver] = 2 | ||
| fieldSize[uname] = 32 | ||
| fieldSize[gname] = 32 | ||
| fieldSize[devmaj] = 8 | ||
| fieldSize[devmin] = 8 | ||
| fieldSize[prefix] = 155 | ||
| fieldSize[fill] = 12 | ||
| // nb: prefix field may in fact be 130 bytes of prefix, | ||
| // a null char, 12 bytes for atime, 12 bytes for ctime. | ||
| // | ||
| // To recognize this format: | ||
| // 1. prefix[130] === ' ' or '\0' | ||
| // 2. atime and ctime are octal numeric values | ||
| // 3. atime and ctime have ' ' in their last byte | ||
| var fieldEnds = {} | ||
| , fieldOffs = {} | ||
| , fe = 0 | ||
| for (var i = 0; i < f; i ++) { | ||
| fieldOffs[i] = fe | ||
| fieldEnds[i] = (fe += fieldSize[i]) | ||
| } | ||
| // build a translation table of field paths. | ||
| Object.keys(fields).forEach(function (f) { | ||
| if (fields[f] !== null) fields[fields[f]] = f | ||
| }) | ||
| // different values of the 'type' field | ||
| // paths match the values of Stats.isX() functions, where appropriate | ||
| var types = | ||
| { 0: "File" | ||
| , "\0": "OldFile" // like 0 | ||
| , "": "OldFile" | ||
| , 1: "Link" | ||
| , 2: "SymbolicLink" | ||
| , 3: "CharacterDevice" | ||
| , 4: "BlockDevice" | ||
| , 5: "Directory" | ||
| , 6: "FIFO" | ||
| , 7: "ContiguousFile" // like 0 | ||
| // posix headers | ||
| , g: "GlobalExtendedHeader" // k=v for the rest of the archive | ||
| , x: "ExtendedHeader" // k=v for the next file | ||
| // vendor-specific stuff | ||
| , A: "SolarisACL" // skip | ||
| , D: "GNUDumpDir" // like 5, but with data, which should be skipped | ||
| , I: "Inode" // metadata only, skip | ||
| , K: "NextFileHasLongLinkpath" // data = link path of next file | ||
| , L: "NextFileHasLongPath" // data = path of next file | ||
| , M: "ContinuationFile" // skip | ||
| , N: "OldGnuLongPath" // like L | ||
| , S: "SparseFile" // skip | ||
| , V: "TapeVolumeHeader" // skip | ||
| , X: "OldExtendedHeader" // like x | ||
| } | ||
| Object.keys(types).forEach(function (t) { | ||
| types[types[t]] = types[types[t]] || t | ||
| }) | ||
| // values for the mode field | ||
| var modes = | ||
| { suid: 04000 // set uid on extraction | ||
| , sgid: 02000 // set gid on extraction | ||
| , svtx: 01000 // set restricted deletion flag on dirs on extraction | ||
| , uread: 0400 | ||
| , uwrite: 0200 | ||
| , uexec: 0100 | ||
| , gread: 040 | ||
| , gwrite: 020 | ||
| , gexec: 010 | ||
| , oread: 4 | ||
| , owrite: 2 | ||
| , oexec: 1 | ||
| , all: 07777 | ||
| } | ||
| var numeric = | ||
| { mode: true | ||
| , uid: true | ||
| , gid: true | ||
| , size: true | ||
| , mtime: true | ||
| , devmaj: true | ||
| , devmin: true | ||
| , cksum: true | ||
| , atime: true | ||
| , ctime: true | ||
| , dev: true | ||
| , ino: true | ||
| , nlink: true | ||
| } | ||
| Object.keys(modes).forEach(function (t) { | ||
| modes[modes[t]] = modes[modes[t]] || t | ||
| }) | ||
| var knownExtended = | ||
| { atime: true | ||
| , charset: true | ||
| , comment: true | ||
| , ctime: true | ||
| , gid: true | ||
| , gname: true | ||
| , linkpath: true | ||
| , mtime: true | ||
| , path: true | ||
| , realtime: true | ||
| , security: true | ||
| , size: true | ||
| , uid: true | ||
| , uname: true } | ||
| exports.fields = fields | ||
| exports.fieldSize = fieldSize | ||
| exports.fieldOffs = fieldOffs | ||
| exports.fieldEnds = fieldEnds | ||
| exports.types = types | ||
| exports.modes = modes | ||
| exports.numeric = numeric | ||
| exports.headerSize = headerSize | ||
| exports.blockSize = blockSize | ||
| exports.knownExtended = knownExtended | ||
| exports.Pack = require("./lib/pack.js") | ||
| exports.Parse = require("./lib/parse.js") | ||
| exports.Extract = require("./lib/extract.js") | ||
| // classes | ||
| exports.Pack = require('./lib/pack.js') | ||
| exports.Unpack = require('./lib/unpack.js') | ||
| exports.Parse = require('./lib/parse.js') | ||
| exports.ReadEntry = require('./lib/read-entry.js') | ||
| exports.WriteEntry = require('./lib/write-entry.js') | ||
| exports.Header = require('./lib/header.js') | ||
| exports.Pax = require('./lib/pax.js') | ||
| exports.types = require('./lib/types.js') |
Sorry, the diff of this file is not supported yet
| language: node_js | ||
| node_js: | ||
| - 0.10 | ||
| - 0.11 |
| var tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| function onError(err) { | ||
| console.error('An error occurred:', err) | ||
| } | ||
| function onEnd() { | ||
| console.log('Extracted!') | ||
| } | ||
| var extractor = tar.Extract({path: __dirname + "/extract"}) | ||
| .on('error', onError) | ||
| .on('end', onEnd); | ||
| fs.createReadStream(__dirname + "/../test/fixtures/c.tar") | ||
| .on('error', onError) | ||
| .pipe(extractor); |
| var tar = require("../tar.js") | ||
| , fstream = require("fstream") | ||
| , fs = require("fs") | ||
| var dirDest = fs.createWriteStream('dir.tar') | ||
| function onError(err) { | ||
| console.error('An error occurred:', err) | ||
| } | ||
| function onEnd() { | ||
| console.log('Packed!') | ||
| } | ||
| var packer = tar.Pack({ noProprietary: true }) | ||
| .on('error', onError) | ||
| .on('end', onEnd); | ||
| // This must be a "directory" | ||
| fstream.Reader({ path: __dirname, type: "Directory" }) | ||
| .on('error', onError) | ||
| .pipe(packer) | ||
| .pipe(dirDest) |
| var tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| fs.createReadStream(__dirname + "/../test/fixtures/c.tar") | ||
| .pipe(tar.Parse()) | ||
| .on("extendedHeader", function (e) { | ||
| console.error("extended pax header", e.props) | ||
| e.on("end", function () { | ||
| console.error("extended pax fields:", e.fields) | ||
| }) | ||
| }) | ||
| .on("ignoredEntry", function (e) { | ||
| console.error("ignoredEntry?!?", e.props) | ||
| }) | ||
| .on("longLinkpath", function (e) { | ||
| console.error("longLinkpath entry", e.props) | ||
| e.on("end", function () { | ||
| console.error("value=%j", e.body.toString()) | ||
| }) | ||
| }) | ||
| .on("longPath", function (e) { | ||
| console.error("longPath entry", e.props) | ||
| e.on("end", function () { | ||
| console.error("value=%j", e.body.toString()) | ||
| }) | ||
| }) | ||
| .on("entry", function (e) { | ||
| console.error("entry", e.props) | ||
| e.on("data", function (c) { | ||
| console.error(" >>>" + c.toString().replace(/\n/g, "\\n")) | ||
| }) | ||
| e.on("end", function () { | ||
| console.error(" <<<EOF") | ||
| }) | ||
| }) | ||
| // just like the Entry class, but it buffers the contents | ||
| // | ||
| // XXX It would be good to set a maximum BufferEntry filesize, | ||
| // since it eats up memory. In normal operation, | ||
| // these are only for long filenames or link names, which are | ||
| // rarely very big. | ||
| module.exports = BufferEntry | ||
| var inherits = require("inherits") | ||
| , Entry = require("./entry.js") | ||
| function BufferEntry () { | ||
| Entry.apply(this, arguments) | ||
| this._buffer = new Buffer(this.props.size) | ||
| this._offset = 0 | ||
| this.body = "" | ||
| this.on("end", function () { | ||
| this.body = this._buffer.toString().slice(0, -1) | ||
| }) | ||
| } | ||
| inherits(BufferEntry, Entry) | ||
| // collect the bytes as they come in. | ||
| BufferEntry.prototype.write = function (c) { | ||
| c.copy(this._buffer, this._offset) | ||
| this._offset += c.length | ||
| Entry.prototype.write.call(this, c) | ||
| } |
| module.exports = EntryWriter | ||
| var tar = require("../tar.js") | ||
| , TarHeader = require("./header.js") | ||
| , Entry = require("./entry.js") | ||
| , inherits = require("inherits") | ||
| , BlockStream = require("block-stream") | ||
| , ExtendedHeaderWriter | ||
| , Stream = require("stream").Stream | ||
| , EOF = {} | ||
| inherits(EntryWriter, Stream) | ||
| function EntryWriter (props) { | ||
| var me = this | ||
| if (!(me instanceof EntryWriter)) { | ||
| return new EntryWriter(props) | ||
| } | ||
| Stream.apply(this) | ||
| me.writable = true | ||
| me.readable = true | ||
| me._stream = new BlockStream(512) | ||
| me._stream.on("data", function (c) { | ||
| me.emit("data", c) | ||
| }) | ||
| me._stream.on("drain", function () { | ||
| me.emit("drain") | ||
| }) | ||
| me._stream.on("end", function () { | ||
| me.emit("end") | ||
| me.emit("close") | ||
| }) | ||
| me.props = props | ||
| if (props.type === "Directory") { | ||
| props.size = 0 | ||
| } | ||
| props.ustar = "ustar\0" | ||
| props.ustarver = "00" | ||
| me.path = props.path | ||
| me._buffer = [] | ||
| me._didHeader = false | ||
| me._meta = false | ||
| me.on("pipe", function () { | ||
| me._process() | ||
| }) | ||
| } | ||
| EntryWriter.prototype.write = function (c) { | ||
| // console.error(".. ew write") | ||
| if (this._ended) return this.emit("error", new Error("write after end")) | ||
| this._buffer.push(c) | ||
| this._process() | ||
| this._needDrain = this._buffer.length > 0 | ||
| return !this._needDrain | ||
| } | ||
| EntryWriter.prototype.end = function (c) { | ||
| // console.error(".. ew end") | ||
| if (c) this._buffer.push(c) | ||
| this._buffer.push(EOF) | ||
| this._ended = true | ||
| this._process() | ||
| this._needDrain = this._buffer.length > 0 | ||
| } | ||
| EntryWriter.prototype.pause = function () { | ||
| // console.error(".. ew pause") | ||
| this._paused = true | ||
| this.emit("pause") | ||
| } | ||
| EntryWriter.prototype.resume = function () { | ||
| // console.error(".. ew resume") | ||
| this._paused = false | ||
| this.emit("resume") | ||
| this._process() | ||
| } | ||
| EntryWriter.prototype.add = function (entry) { | ||
| // console.error(".. ew add") | ||
| if (!this.parent) return this.emit("error", new Error("no parent")) | ||
| // make sure that the _header and such is emitted, and clear out | ||
| // the _currentEntry link on the parent. | ||
| if (!this._ended) this.end() | ||
| return this.parent.add(entry) | ||
| } | ||
| EntryWriter.prototype._header = function () { | ||
| // console.error(".. ew header") | ||
| if (this._didHeader) return | ||
| this._didHeader = true | ||
| var headerBlock = TarHeader.encode(this.props) | ||
| if (this.props.needExtended && !this._meta) { | ||
| var me = this | ||
| ExtendedHeaderWriter = ExtendedHeaderWriter || | ||
| require("./extended-header-writer.js") | ||
| ExtendedHeaderWriter(this.props) | ||
| .on("data", function (c) { | ||
| me.emit("data", c) | ||
| }) | ||
| .on("error", function (er) { | ||
| me.emit("error", er) | ||
| }) | ||
| .end() | ||
| } | ||
| // console.error(".. .. ew headerBlock emitting") | ||
| this.emit("data", headerBlock) | ||
| this.emit("header") | ||
| } | ||
| EntryWriter.prototype._process = function () { | ||
| // console.error(".. .. ew process") | ||
| if (!this._didHeader && !this._meta) { | ||
| this._header() | ||
| } | ||
| if (this._paused || this._processing) { | ||
| // console.error(".. .. .. paused=%j, processing=%j", this._paused, this._processing) | ||
| return | ||
| } | ||
| this._processing = true | ||
| var buf = this._buffer | ||
| for (var i = 0; i < buf.length; i ++) { | ||
| // console.error(".. .. .. i=%d", i) | ||
| var c = buf[i] | ||
| if (c === EOF) this._stream.end() | ||
| else this._stream.write(c) | ||
| if (this._paused) { | ||
| // console.error(".. .. .. paused mid-emission") | ||
| this._processing = false | ||
| if (i < buf.length) { | ||
| this._needDrain = true | ||
| this._buffer = buf.slice(i + 1) | ||
| } | ||
| return | ||
| } | ||
| } | ||
| // console.error(".. .. .. emitted") | ||
| this._buffer.length = 0 | ||
| this._processing = false | ||
| // console.error(".. .. .. emitting drain") | ||
| this.emit("drain") | ||
| } | ||
| EntryWriter.prototype.destroy = function () {} |
-220
| // A passthrough read/write stream that sets its properties | ||
| // based on a header, extendedHeader, and globalHeader | ||
| // | ||
| // Can be either a file system object of some sort, or | ||
| // a pax/ustar metadata entry. | ||
| module.exports = Entry | ||
| var TarHeader = require("./header.js") | ||
| , tar = require("../tar") | ||
| , assert = require("assert").ok | ||
| , Stream = require("stream").Stream | ||
| , inherits = require("inherits") | ||
| , fstream = require("fstream").Abstract | ||
| function Entry (header, extended, global) { | ||
| Stream.call(this) | ||
| this.readable = true | ||
| this.writable = true | ||
| this._needDrain = false | ||
| this._paused = false | ||
| this._reading = false | ||
| this._ending = false | ||
| this._ended = false | ||
| this._remaining = 0 | ||
| this._abort = false | ||
| this._queue = [] | ||
| this._index = 0 | ||
| this._queueLen = 0 | ||
| this._read = this._read.bind(this) | ||
| this.props = {} | ||
| this._header = header | ||
| this._extended = extended || {} | ||
| // globals can change throughout the course of | ||
| // a file parse operation. Freeze it at its current state. | ||
| this._global = {} | ||
| var me = this | ||
| Object.keys(global || {}).forEach(function (g) { | ||
| me._global[g] = global[g] | ||
| }) | ||
| this._setProps() | ||
| } | ||
| inherits(Entry, Stream) | ||
| Entry.prototype.write = function (c) { | ||
| if (this._ending) this.error("write() after end()", null, true) | ||
| if (this._remaining === 0) { | ||
| this.error("invalid bytes past eof") | ||
| } | ||
| // often we'll get a bunch of \0 at the end of the last write, | ||
| // since chunks will always be 512 bytes when reading a tarball. | ||
| if (c.length > this._remaining) { | ||
| c = c.slice(0, this._remaining) | ||
| } | ||
| this._remaining -= c.length | ||
| // put it on the stack. | ||
| var ql = this._queueLen | ||
| this._queue.push(c) | ||
| this._queueLen ++ | ||
| this._read() | ||
| // either paused, or buffered | ||
| if (this._paused || ql > 0) { | ||
| this._needDrain = true | ||
| return false | ||
| } | ||
| return true | ||
| } | ||
| Entry.prototype.end = function (c) { | ||
| if (c) this.write(c) | ||
| this._ending = true | ||
| this._read() | ||
| } | ||
| Entry.prototype.pause = function () { | ||
| this._paused = true | ||
| this.emit("pause") | ||
| } | ||
| Entry.prototype.resume = function () { | ||
| // console.error(" Tar Entry resume", this.path) | ||
| this.emit("resume") | ||
| this._paused = false | ||
| this._read() | ||
| return this._queueLen - this._index > 1 | ||
| } | ||
| // This is bound to the instance | ||
| Entry.prototype._read = function () { | ||
| // console.error(" Tar Entry _read", this.path) | ||
| if (this._paused || this._reading || this._ended) return | ||
| // set this flag so that event handlers don't inadvertently | ||
| // get multiple _read() calls running. | ||
| this._reading = true | ||
| // have any data to emit? | ||
| while (this._index < this._queueLen && !this._paused) { | ||
| var chunk = this._queue[this._index ++] | ||
| this.emit("data", chunk) | ||
| } | ||
| // check if we're drained | ||
| if (this._index >= this._queueLen) { | ||
| this._queue.length = this._queueLen = this._index = 0 | ||
| if (this._needDrain) { | ||
| this._needDrain = false | ||
| this.emit("drain") | ||
| } | ||
| if (this._ending) { | ||
| this._ended = true | ||
| this.emit("end") | ||
| } | ||
| } | ||
| // if the queue gets too big, then pluck off whatever we can. | ||
| // this should be fairly rare. | ||
| var mql = this._maxQueueLen | ||
| if (this._queueLen > mql && this._index > 0) { | ||
| mql = Math.min(this._index, mql) | ||
| this._index -= mql | ||
| this._queueLen -= mql | ||
| this._queue = this._queue.slice(mql) | ||
| } | ||
| this._reading = false | ||
| } | ||
| Entry.prototype._setProps = function () { | ||
| // props = extended->global->header->{} | ||
| var header = this._header | ||
| , extended = this._extended | ||
| , global = this._global | ||
| , props = this.props | ||
| // first get the values from the normal header. | ||
| var fields = tar.fields | ||
| for (var f = 0; fields[f] !== null; f ++) { | ||
| var field = fields[f] | ||
| , val = header[field] | ||
| if (typeof val !== "undefined") props[field] = val | ||
| } | ||
| // next, the global header for this file. | ||
| // numeric values, etc, will have already been parsed. | ||
| ;[global, extended].forEach(function (p) { | ||
| Object.keys(p).forEach(function (f) { | ||
| if (typeof p[f] !== "undefined") props[f] = p[f] | ||
| }) | ||
| }) | ||
| // no nulls allowed in path or linkpath | ||
| ;["path", "linkpath"].forEach(function (p) { | ||
| if (props.hasOwnProperty(p)) { | ||
| props[p] = props[p].split("\0")[0] | ||
| } | ||
| }) | ||
| // set date fields to be a proper date | ||
| ;["mtime", "ctime", "atime"].forEach(function (p) { | ||
| if (props.hasOwnProperty(p)) { | ||
| props[p] = new Date(props[p] * 1000) | ||
| } | ||
| }) | ||
| // set the type so that we know what kind of file to create | ||
| var type | ||
| switch (tar.types[props.type]) { | ||
| case "OldFile": | ||
| case "ContiguousFile": | ||
| type = "File" | ||
| break | ||
| case "GNUDumpDir": | ||
| type = "Directory" | ||
| break | ||
| case undefined: | ||
| type = "Unknown" | ||
| break | ||
| case "Link": | ||
| case "SymbolicLink": | ||
| case "CharacterDevice": | ||
| case "BlockDevice": | ||
| case "Directory": | ||
| case "FIFO": | ||
| default: | ||
| type = tar.types[props.type] | ||
| } | ||
| this.type = type | ||
| this.path = props.path | ||
| this.size = props.size | ||
| // size is special, since it signals when the file needs to end. | ||
| this._remaining = props.size | ||
| } | ||
| // the parser may not call write if _abort is true. | ||
| // useful for skipping data from some files quickly. | ||
| Entry.prototype.abort = function(){ | ||
| this._abort = true | ||
| } | ||
| Entry.prototype.warn = fstream.warn | ||
| Entry.prototype.error = fstream.error |
| module.exports = ExtendedHeaderWriter | ||
| var inherits = require("inherits") | ||
| , EntryWriter = require("./entry-writer.js") | ||
| inherits(ExtendedHeaderWriter, EntryWriter) | ||
| var tar = require("../tar.js") | ||
| , path = require("path") | ||
| , TarHeader = require("./header.js") | ||
| // props is the props of the thing we need to write an | ||
| // extended header for. | ||
| // Don't be shy with it. Just encode everything. | ||
| function ExtendedHeaderWriter (props) { | ||
| // console.error(">> ehw ctor") | ||
| var me = this | ||
| if (!(me instanceof ExtendedHeaderWriter)) { | ||
| return new ExtendedHeaderWriter(props) | ||
| } | ||
| me.fields = props | ||
| var p = | ||
| { path : ("PaxHeader" + path.join("/", props.path || "")) | ||
| .replace(/\\/g, "/").substr(0, 100) | ||
| , mode : props.mode || 0666 | ||
| , uid : props.uid || 0 | ||
| , gid : props.gid || 0 | ||
| , size : 0 // will be set later | ||
| , mtime : props.mtime || Date.now() / 1000 | ||
| , type : "x" | ||
| , linkpath : "" | ||
| , ustar : "ustar\0" | ||
| , ustarver : "00" | ||
| , uname : props.uname || "" | ||
| , gname : props.gname || "" | ||
| , devmaj : props.devmaj || 0 | ||
| , devmin : props.devmin || 0 | ||
| } | ||
| EntryWriter.call(me, p) | ||
| // console.error(">> ehw props", me.props) | ||
| me.props = p | ||
| me._meta = true | ||
| } | ||
| ExtendedHeaderWriter.prototype.end = function () { | ||
| // console.error(">> ehw end") | ||
| var me = this | ||
| if (me._ended) return | ||
| me._ended = true | ||
| me._encodeFields() | ||
| if (me.props.size === 0) { | ||
| // nothing to write! | ||
| me._ready = true | ||
| me._stream.end() | ||
| return | ||
| } | ||
| me._stream.write(TarHeader.encode(me.props)) | ||
| me.body.forEach(function (l) { | ||
| me._stream.write(l) | ||
| }) | ||
| me._ready = true | ||
| // console.error(">> ehw _process calling end()", me.props) | ||
| this._stream.end() | ||
| } | ||
| ExtendedHeaderWriter.prototype._encodeFields = function () { | ||
| // console.error(">> ehw _encodeFields") | ||
| this.body = [] | ||
| if (this.fields.prefix) { | ||
| this.fields.path = this.fields.prefix + "/" + this.fields.path | ||
| this.fields.prefix = "" | ||
| } | ||
| encodeFields(this.fields, "", this.body, this.fields.noProprietary) | ||
| var me = this | ||
| this.body.forEach(function (l) { | ||
| me.props.size += l.length | ||
| }) | ||
| } | ||
| function encodeFields (fields, prefix, body, nop) { | ||
| // console.error(">> >> ehw encodeFields") | ||
| // "%d %s=%s\n", <length>, <keyword>, <value> | ||
| // The length is a decimal number, and includes itself and the \n | ||
| // Numeric values are decimal strings. | ||
| Object.keys(fields).forEach(function (k) { | ||
| var val = fields[k] | ||
| , numeric = tar.numeric[k] | ||
| if (prefix) k = prefix + "." + k | ||
| // already including NODETAR.type, don't need File=true also | ||
| if (k === fields.type && val === true) return | ||
| switch (k) { | ||
| // don't include anything that's always handled just fine | ||
| // in the normal header, or only meaningful in the context | ||
| // of nodetar | ||
| case "mode": | ||
| case "cksum": | ||
| case "ustar": | ||
| case "ustarver": | ||
| case "prefix": | ||
| case "basename": | ||
| case "dirname": | ||
| case "needExtended": | ||
| case "block": | ||
| case "filter": | ||
| return | ||
| case "rdev": | ||
| if (val === 0) return | ||
| break | ||
| case "nlink": | ||
| case "dev": // Truly a hero among men, Creator of Star! | ||
| case "ino": // Speak his name with reverent awe! It is: | ||
| k = "SCHILY." + k | ||
| break | ||
| default: break | ||
| } | ||
| if (val && typeof val === "object" && | ||
| !Buffer.isBuffer(val)) encodeFields(val, k, body, nop) | ||
| else if (val === null || val === undefined) return | ||
| else body.push.apply(body, encodeField(k, val, nop)) | ||
| }) | ||
| return body | ||
| } | ||
| function encodeField (k, v, nop) { | ||
| // lowercase keys must be valid, otherwise prefix with | ||
| // "NODETAR." | ||
| if (k.charAt(0) === k.charAt(0).toLowerCase()) { | ||
| var m = k.split(".")[0] | ||
| if (!tar.knownExtended[m]) k = "NODETAR." + k | ||
| } | ||
| // no proprietary | ||
| if (nop && k.charAt(0) !== k.charAt(0).toLowerCase()) { | ||
| return [] | ||
| } | ||
| if (typeof val === "number") val = val.toString(10) | ||
| var s = new Buffer(" " + k + "=" + v + "\n") | ||
| , digits = Math.floor(Math.log(s.length) / Math.log(10)) + 1 | ||
| // console.error("1 s=%j digits=%j s.length=%d", s.toString(), digits, s.length) | ||
| // if adding that many digits will make it go over that length, | ||
| // then add one to it. For example, if the string is: | ||
| // " foo=bar\n" | ||
| // then that's 9 characters. With the "9", that bumps the length | ||
| // up to 10. However, this is invalid: | ||
| // "10 foo=bar\n" | ||
| // but, since that's actually 11 characters, since 10 adds another | ||
| // character to the length, and the length includes the number | ||
| // itself. In that case, just bump it up again. | ||
| if (s.length + digits >= Math.pow(10, digits)) digits += 1 | ||
| // console.error("2 s=%j digits=%j s.length=%d", s.toString(), digits, s.length) | ||
| var len = digits + s.length | ||
| // console.error("3 s=%j digits=%j s.length=%d len=%d", s.toString(), digits, s.length, len) | ||
| var lenBuf = new Buffer("" + len) | ||
| if (lenBuf.length + s.length !== len) { | ||
| throw new Error("Bad length calculation\n"+ | ||
| "len="+len+"\n"+ | ||
| "lenBuf="+JSON.stringify(lenBuf.toString())+"\n"+ | ||
| "lenBuf.length="+lenBuf.length+"\n"+ | ||
| "digits="+digits+"\n"+ | ||
| "s="+JSON.stringify(s.toString())+"\n"+ | ||
| "s.length="+s.length) | ||
| } | ||
| return [lenBuf, s] | ||
| } |
| // An Entry consisting of: | ||
| // | ||
| // "%d %s=%s\n", <length>, <keyword>, <value> | ||
| // | ||
| // The length is a decimal number, and includes itself and the \n | ||
| // \0 does not terminate anything. Only the length terminates the string. | ||
| // Numeric values are decimal strings. | ||
| module.exports = ExtendedHeader | ||
| var Entry = require("./entry.js") | ||
| , inherits = require("inherits") | ||
| , tar = require("../tar.js") | ||
| , numeric = tar.numeric | ||
| , keyTrans = { "SCHILY.dev": "dev" | ||
| , "SCHILY.ino": "ino" | ||
| , "SCHILY.nlink": "nlink" } | ||
| function ExtendedHeader () { | ||
| Entry.apply(this, arguments) | ||
| this.on("data", this._parse) | ||
| this.fields = {} | ||
| this._position = 0 | ||
| this._fieldPos = 0 | ||
| this._state = SIZE | ||
| this._sizeBuf = [] | ||
| this._keyBuf = [] | ||
| this._valBuf = [] | ||
| this._size = -1 | ||
| this._key = "" | ||
| } | ||
| inherits(ExtendedHeader, Entry) | ||
| ExtendedHeader.prototype._parse = parse | ||
| var s = 0 | ||
| , states = ExtendedHeader.states = {} | ||
| , SIZE = states.SIZE = s++ | ||
| , KEY = states.KEY = s++ | ||
| , VAL = states.VAL = s++ | ||
| , ERR = states.ERR = s++ | ||
| Object.keys(states).forEach(function (s) { | ||
| states[states[s]] = states[s] | ||
| }) | ||
| states[s] = null | ||
| // char code values for comparison | ||
| var _0 = "0".charCodeAt(0) | ||
| , _9 = "9".charCodeAt(0) | ||
| , point = ".".charCodeAt(0) | ||
| , a = "a".charCodeAt(0) | ||
| , Z = "Z".charCodeAt(0) | ||
| , a = "a".charCodeAt(0) | ||
| , z = "z".charCodeAt(0) | ||
| , space = " ".charCodeAt(0) | ||
| , eq = "=".charCodeAt(0) | ||
| , cr = "\n".charCodeAt(0) | ||
| function parse (c) { | ||
| if (this._state === ERR) return | ||
| for ( var i = 0, l = c.length | ||
| ; i < l | ||
| ; this._position++, this._fieldPos++, i++) { | ||
| // console.error("top of loop, size="+this._size) | ||
| var b = c[i] | ||
| if (this._size >= 0 && this._fieldPos > this._size) { | ||
| error(this, "field exceeds length="+this._size) | ||
| return | ||
| } | ||
| switch (this._state) { | ||
| case ERR: return | ||
| case SIZE: | ||
| // console.error("parsing size, b=%d, rest=%j", b, c.slice(i).toString()) | ||
| if (b === space) { | ||
| this._state = KEY | ||
| // this._fieldPos = this._sizeBuf.length | ||
| this._size = parseInt(new Buffer(this._sizeBuf).toString(), 10) | ||
| this._sizeBuf.length = 0 | ||
| continue | ||
| } | ||
| if (b < _0 || b > _9) { | ||
| error(this, "expected [" + _0 + ".." + _9 + "], got " + b) | ||
| return | ||
| } | ||
| this._sizeBuf.push(b) | ||
| continue | ||
| case KEY: | ||
| // can be any char except =, not > size. | ||
| if (b === eq) { | ||
| this._state = VAL | ||
| this._key = new Buffer(this._keyBuf).toString() | ||
| if (keyTrans[this._key]) this._key = keyTrans[this._key] | ||
| this._keyBuf.length = 0 | ||
| continue | ||
| } | ||
| this._keyBuf.push(b) | ||
| continue | ||
| case VAL: | ||
| // field must end with cr | ||
| if (this._fieldPos === this._size - 1) { | ||
| // console.error("finished with "+this._key) | ||
| if (b !== cr) { | ||
| error(this, "expected \\n at end of field") | ||
| return | ||
| } | ||
| var val = new Buffer(this._valBuf).toString() | ||
| if (numeric[this._key]) { | ||
| val = parseFloat(val) | ||
| } | ||
| this.fields[this._key] = val | ||
| this._valBuf.length = 0 | ||
| this._state = SIZE | ||
| this._size = -1 | ||
| this._fieldPos = -1 | ||
| continue | ||
| } | ||
| this._valBuf.push(b) | ||
| continue | ||
| } | ||
| } | ||
| } | ||
| function error (me, msg) { | ||
| msg = "invalid header: " + msg | ||
| + "\nposition=" + me._position | ||
| + "\nfield position=" + me._fieldPos | ||
| me.error(msg) | ||
| me.state = ERR | ||
| } |
| module.exports = GlobalHeaderWriter | ||
| var ExtendedHeaderWriter = require("./extended-header-writer.js") | ||
| , inherits = require("inherits") | ||
| inherits(GlobalHeaderWriter, ExtendedHeaderWriter) | ||
| function GlobalHeaderWriter (props) { | ||
| if (!(this instanceof GlobalHeaderWriter)) { | ||
| return new GlobalHeaderWriter(props) | ||
| } | ||
| ExtendedHeaderWriter.call(this, props) | ||
| this.props.type = "g" | ||
| } |
| // the fixtures have some weird stuff that is painful | ||
| // to include directly in the repo for various reasons. | ||
| // | ||
| // So, unpack the fixtures with the system tar first. | ||
| // | ||
| // This means, of course, that it'll only work if you | ||
| // already have a tar implementation, and some of them | ||
| // will not properly unpack the fixtures anyway. | ||
| // | ||
| // But, since usually those tests will fail on Windows | ||
| // and other systems with less capable filesystems anyway, | ||
| // at least this way we don't cause inconveniences by | ||
| // merely cloning the repo or installing the package. | ||
| var tap = require("tap") | ||
| , child_process = require("child_process") | ||
| , rimraf = require("rimraf") | ||
| , test = tap.test | ||
| , path = require("path") | ||
| test("clean fixtures", function (t) { | ||
| rimraf(path.resolve(__dirname, "fixtures"), function (er) { | ||
| t.ifError(er, "rimraf ./fixtures/") | ||
| t.end() | ||
| }) | ||
| }) | ||
| test("clean tmp", function (t) { | ||
| rimraf(path.resolve(__dirname, "tmp"), function (er) { | ||
| t.ifError(er, "rimraf ./tmp/") | ||
| t.end() | ||
| }) | ||
| }) | ||
| test("extract fixtures", function (t) { | ||
| var c = child_process.spawn("tar" | ||
| ,["xzvf", "fixtures.tgz"] | ||
| ,{ cwd: __dirname }) | ||
| c.stdout.on("data", errwrite) | ||
| c.stderr.on("data", errwrite) | ||
| function errwrite (chunk) { | ||
| process.stderr.write(chunk) | ||
| } | ||
| c.on("exit", function (code) { | ||
| t.equal(code, 0, "extract fixtures should exit with 0") | ||
| if (code) { | ||
| t.comment("Note, all tests from here on out will fail because of this.") | ||
| } | ||
| t.end() | ||
| }) | ||
| }) |
Sorry, the diff of this file is not supported yet
| // Set the umask, so that it works the same everywhere. | ||
| process.umask(parseInt('22', 8)) | ||
| var fs = require('fs') | ||
| var path = require('path') | ||
| var fstream = require('fstream') | ||
| var test = require('tap').test | ||
| var tar = require('../tar.js') | ||
| var file = path.resolve(__dirname, 'dir-normalization.tar') | ||
| var target = path.resolve(__dirname, 'tmp/dir-normalization-test') | ||
| var ee = 0 | ||
| var expectEntries = [ | ||
| { path: 'fixtures/', | ||
| mode: '755', | ||
| type: '5', | ||
| linkpath: '' | ||
| }, | ||
| { path: 'fixtures/a/', | ||
| mode: '755', | ||
| type: '5', | ||
| linkpath: '' | ||
| }, | ||
| { path: 'fixtures/the-chumbler', | ||
| mode: '755', | ||
| type: '2', | ||
| linkpath: path.resolve(target, 'a/b/c/d/the-chumbler'), | ||
| }, | ||
| { path: 'fixtures/a/b/', | ||
| mode: '755', | ||
| type: '5', | ||
| linkpath: '' | ||
| }, | ||
| { path: 'fixtures/a/x', | ||
| mode: '644', | ||
| type: '0', | ||
| linkpath: '' | ||
| }, | ||
| { path: 'fixtures/a/b/c/', | ||
| mode: '755', | ||
| type: '5', | ||
| linkpath: '' | ||
| }, | ||
| { path: 'fixtures/a/b/c/y', | ||
| mode: '755', | ||
| type: '2', | ||
| linkpath: '../../x', | ||
| } | ||
| ] | ||
| var ef = 0 | ||
| var expectFiles = [ | ||
| { path: '', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 0, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 1, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures/a', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 2, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures/a/b', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 3, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures/a/b/c', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 4, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures/a/b/c/y', | ||
| mode: '120755', | ||
| type: 'SymbolicLink', | ||
| depth: 5, | ||
| linkpath: '../../x' | ||
| }, | ||
| { path: '/fixtures/a/x', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 3, | ||
| linkpath: undefined | ||
| }, | ||
| { path: '/fixtures/the-chumbler', | ||
| mode: '120755', | ||
| type: 'SymbolicLink', | ||
| depth: 2, | ||
| linkpath: path.resolve(target, 'a/b/c/d/the-chumbler') | ||
| } | ||
| ] | ||
| test('preclean', function (t) { | ||
| require('rimraf').sync(path.join(__dirname, '/tmp/dir-normalization-test')) | ||
| t.pass('cleaned!') | ||
| t.end() | ||
| }) | ||
| test('extract test', function (t) { | ||
| var extract = tar.Extract(target) | ||
| var inp = fs.createReadStream(file) | ||
| inp.pipe(extract) | ||
| extract.on('end', function () { | ||
| t.equal(ee, expectEntries.length, 'should see ' + expectEntries.length + ' entries') | ||
| // should get no more entries after end | ||
| extract.removeAllListeners('entry') | ||
| extract.on('entry', function (e) { | ||
| t.fail('Should not get entries after end!') | ||
| }) | ||
| next() | ||
| }) | ||
| extract.on('entry', function (entry) { | ||
| var mode = entry.props.mode & (~parseInt('22', 8)) | ||
| var found = { | ||
| path: entry.path, | ||
| mode: mode.toString(8), | ||
| type: entry.props.type, | ||
| linkpath: entry.props.linkpath, | ||
| } | ||
| var wanted = expectEntries[ee++] | ||
| t.equivalent(found, wanted, 'tar entry ' + ee + ' ' + (wanted && wanted.path)) | ||
| }) | ||
| function next () { | ||
| var r = fstream.Reader({ | ||
| path: target, | ||
| type: 'Directory', | ||
| sort: 'alpha' | ||
| }) | ||
| r.on('ready', function () { | ||
| foundEntry(r) | ||
| }) | ||
| r.on('end', finish) | ||
| function foundEntry (entry) { | ||
| var p = entry.path.substr(target.length) | ||
| var mode = entry.props.mode & (~parseInt('22', 8)) | ||
| var found = { | ||
| path: p, | ||
| mode: mode.toString(8), | ||
| type: entry.props.type, | ||
| depth: entry.props.depth, | ||
| linkpath: entry.props.linkpath | ||
| } | ||
| var wanted = expectFiles[ef++] | ||
| t.equivalent(found, wanted, 'unpacked file ' + ef + ' ' + (wanted && wanted.path)) | ||
| entry.on('entry', foundEntry) | ||
| } | ||
| function finish () { | ||
| t.equal(ef, expectFiles.length, 'should have ' + ef + ' items') | ||
| t.end() | ||
| } | ||
| } | ||
| }) |
Sorry, the diff of this file is not supported yet
| var fs = require('fs') | ||
| var path = require('path') | ||
| var zlib = require('zlib') | ||
| var tap = require('tap') | ||
| var tar = require('../tar.js') | ||
| var file = path.join(__dirname, 'cb-never-called-1.0.1.tgz') | ||
| var target = path.join(__dirname, 'tmp/extract-test') | ||
| tap.test('preclean', function (t) { | ||
| require('rimraf').sync(__dirname + '/tmp/extract-test') | ||
| t.pass('cleaned!') | ||
| t.end() | ||
| }) | ||
| tap.test('extract test', function (t) { | ||
| var extract = tar.Extract(target) | ||
| var inp = fs.createReadStream(file) | ||
| inp.pipe(zlib.createGunzip()).pipe(extract) | ||
| extract.on('error', function (er) { | ||
| t.equal(er.message, 'unexpected eof', 'error noticed') | ||
| t.end() | ||
| }) | ||
| extract.on('end', function () { | ||
| t.fail('shouldn\'t reach this point due to errors') | ||
| t.end() | ||
| }) | ||
| }) |
| // Set the umask, so that it works the same everywhere. | ||
| process.umask(parseInt('22', 8)) | ||
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| , gfs = require("graceful-fs") | ||
| , path = require("path") | ||
| , file = path.resolve(__dirname, "fixtures/dir.tar") | ||
| , target = path.resolve(__dirname, "tmp/extract-test") | ||
| , index = 0 | ||
| , fstream = require("fstream") | ||
| , rimraf = require("rimraf") | ||
| , mkdirp = require("mkdirp") | ||
| , ee = 0 | ||
| , expectEntries = [ | ||
| { | ||
| "path" : "dir/", | ||
| "mode" : "750", | ||
| "type" : "5", | ||
| "depth" : undefined, | ||
| "size" : 0, | ||
| "linkpath" : "", | ||
| "nlink" : undefined, | ||
| "dev" : undefined, | ||
| "ino" : undefined | ||
| }, | ||
| { | ||
| "path" : "dir/sub/", | ||
| "mode" : "750", | ||
| "type" : "5", | ||
| "depth" : undefined, | ||
| "size" : 0, | ||
| "linkpath" : "", | ||
| "nlink" : undefined, | ||
| "dev" : undefined, | ||
| "ino" : undefined | ||
| } ] | ||
| function slow (fs, method, t1, t2) { | ||
| var orig = fs[method] | ||
| if (!orig) return null | ||
| fs[method] = function () { | ||
| var args = [].slice.call(arguments) | ||
| console.error("slow", method, args[0]) | ||
| var cb = args.pop() | ||
| setTimeout(function () { | ||
| orig.apply(fs, args.concat(function(er, data) { | ||
| setTimeout(function() { | ||
| cb(er, data) | ||
| }, t2) | ||
| })) | ||
| }, t1) | ||
| } | ||
| } | ||
| // Make sure we get the graceful-fs that fstream is using. | ||
| var gfs2 | ||
| try { | ||
| gfs2 = require("fstream/node_modules/graceful-fs") | ||
| } catch (er) {} | ||
| var slowMethods = ["chown", "chmod", "utimes", "lutimes"] | ||
| slowMethods.forEach(function (method) { | ||
| var t1 = 500 | ||
| var t2 = 0 | ||
| slow(fs, method, t1, t2) | ||
| slow(gfs, method, t1, t2) | ||
| if (gfs2) { | ||
| slow(gfs2, method, t1, t2) | ||
| } | ||
| }) | ||
| // The extract class basically just pipes the input | ||
| // to a Reader, and then to a fstream.DirWriter | ||
| // So, this is as much a test of fstream.Reader and fstream.Writer | ||
| // as it is of tar.Extract, but it sort of makes sense. | ||
| tap.test("preclean", function (t) { | ||
| rimraf.sync(target) | ||
| /mkdirp.sync(target) | ||
| t.pass("cleaned!") | ||
| t.end() | ||
| }) | ||
| tap.test("extract test", function (t) { | ||
| var extract = tar.Extract(target) | ||
| var inp = fs.createReadStream(file) | ||
| // give it a weird buffer size to try to break in odd places | ||
| inp.bufferSize = 1234 | ||
| inp.pipe(extract) | ||
| extract.on("end", function () { | ||
| rimraf.sync(target) | ||
| t.equal(ee, expectEntries.length, "should see "+ee+" entries") | ||
| // should get no more entries after end | ||
| extract.removeAllListeners("entry") | ||
| extract.on("entry", function (e) { | ||
| t.fail("Should not get entries after end!") | ||
| }) | ||
| t.end() | ||
| }) | ||
| extract.on("entry", function (entry) { | ||
| var found = | ||
| { path: entry.path | ||
| , mode: entry.props.mode.toString(8) | ||
| , type: entry.props.type | ||
| , depth: entry.props.depth | ||
| , size: entry.props.size | ||
| , linkpath: entry.props.linkpath | ||
| , nlink: entry.props.nlink | ||
| , dev: entry.props.dev | ||
| , ino: entry.props.ino | ||
| } | ||
| var wanted = expectEntries[ee ++] | ||
| t.equivalent(found, wanted, "tar entry " + ee + " " + wanted.path) | ||
| }) | ||
| }) |
-367
| // Set the umask, so that it works the same everywhere. | ||
| process.umask(parseInt('22', 8)) | ||
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| , path = require("path") | ||
| , file = path.resolve(__dirname, "fixtures/c.tar") | ||
| , target = path.resolve(__dirname, "tmp/extract-test") | ||
| , index = 0 | ||
| , fstream = require("fstream") | ||
| , ee = 0 | ||
| , expectEntries = | ||
| [ { path: 'c.txt', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 513, | ||
| linkpath: '', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: 'cc.txt', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 513, | ||
| linkpath: '', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: 'r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 100, | ||
| linkpath: '', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: 'Ω.txt', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 2, | ||
| linkpath: '', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: 'Ω.txt', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 2, | ||
| linkpath: '', | ||
| nlink: 1, | ||
| dev: 234881026, | ||
| ino: 51693379 }, | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 200, | ||
| linkpath: '', | ||
| nlink: 1, | ||
| dev: 234881026, | ||
| ino: 51681874 }, | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 201, | ||
| linkpath: '', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: '200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', | ||
| mode: '777', | ||
| type: '2', | ||
| depth: undefined, | ||
| size: 0, | ||
| linkpath: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| nlink: undefined, | ||
| dev: undefined, | ||
| ino: undefined }, | ||
| { path: '200-hard', | ||
| mode: '644', | ||
| type: '0', | ||
| depth: undefined, | ||
| size: 200, | ||
| linkpath: '', | ||
| nlink: 2, | ||
| dev: 234881026, | ||
| ino: 51681874 }, | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '644', | ||
| type: '1', | ||
| depth: undefined, | ||
| size: 0, | ||
| linkpath: path.resolve(target, '200-hard'), | ||
| nlink: 2, | ||
| dev: 234881026, | ||
| ino: 51681874 } ] | ||
| , ef = 0 | ||
| , expectFiles = | ||
| [ { path: '', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 0, | ||
| linkpath: undefined }, | ||
| { path: '/200-hard', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 1, | ||
| size: 200, | ||
| linkpath: undefined, | ||
| nlink: 2 }, | ||
| { path: '/200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', | ||
| mode: '120777', | ||
| type: 'SymbolicLink', | ||
| depth: 1, | ||
| size: 200, | ||
| linkpath: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| nlink: 1 }, | ||
| { path: '/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '100644', | ||
| type: 'Link', | ||
| depth: 1, | ||
| size: 200, | ||
| linkpath: path.join(target, '200-hard'), | ||
| nlink: 2 }, | ||
| { path: '/c.txt', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 1, | ||
| size: 513, | ||
| linkpath: undefined, | ||
| nlink: 1 }, | ||
| { path: '/cc.txt', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 1, | ||
| size: 513, | ||
| linkpath: undefined, | ||
| nlink: 1 }, | ||
| { path: '/r', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 1, | ||
| linkpath: undefined }, | ||
| { path: '/r/e', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 2, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 3, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 4, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 5, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 6, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 7, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 8, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 9, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 10, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 11, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 12, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 13, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 14, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 15, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 16, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 17, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 18, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 19, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 20, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 21, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 22, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h', | ||
| mode: '40755', | ||
| type: 'Directory', | ||
| depth: 23, | ||
| linkpath: undefined }, | ||
| { path: '/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 24, | ||
| size: 100, | ||
| linkpath: undefined, | ||
| nlink: 1 }, | ||
| { path: '/Ω.txt', | ||
| mode: '100644', | ||
| type: 'File', | ||
| depth: 1, | ||
| size: 2, | ||
| linkpath: undefined, | ||
| nlink: 1 } ] | ||
| // The extract class basically just pipes the input | ||
| // to a Reader, and then to a fstream.DirWriter | ||
| // So, this is as much a test of fstream.Reader and fstream.Writer | ||
| // as it is of tar.Extract, but it sort of makes sense. | ||
| tap.test("preclean", function (t) { | ||
| require("rimraf").sync(__dirname + "/tmp/extract-test") | ||
| t.pass("cleaned!") | ||
| t.end() | ||
| }) | ||
| tap.test("extract test", function (t) { | ||
| var extract = tar.Extract(target) | ||
| var inp = fs.createReadStream(file) | ||
| // give it a weird buffer size to try to break in odd places | ||
| inp.bufferSize = 1234 | ||
| inp.pipe(extract) | ||
| extract.on("end", function () { | ||
| t.equal(ee, expectEntries.length, "should see "+ee+" entries") | ||
| // should get no more entries after end | ||
| extract.removeAllListeners("entry") | ||
| extract.on("entry", function (e) { | ||
| t.fail("Should not get entries after end!") | ||
| }) | ||
| next() | ||
| }) | ||
| extract.on("entry", function (entry) { | ||
| var found = | ||
| { path: entry.path | ||
| , mode: entry.props.mode.toString(8) | ||
| , type: entry.props.type | ||
| , depth: entry.props.depth | ||
| , size: entry.props.size | ||
| , linkpath: entry.props.linkpath | ||
| , nlink: entry.props.nlink | ||
| , dev: entry.props.dev | ||
| , ino: entry.props.ino | ||
| } | ||
| var wanted = expectEntries[ee ++] | ||
| t.equivalent(found, wanted, "tar entry " + ee + " " + wanted.path) | ||
| }) | ||
| function next () { | ||
| var r = fstream.Reader({ path: target | ||
| , type: "Directory" | ||
| // this is just to encourage consistency | ||
| , sort: "alpha" }) | ||
| r.on("ready", function () { | ||
| foundEntry(r) | ||
| }) | ||
| r.on("end", finish) | ||
| function foundEntry (entry) { | ||
| var p = entry.path.substr(target.length) | ||
| var found = | ||
| { path: p | ||
| , mode: entry.props.mode.toString(8) | ||
| , type: entry.props.type | ||
| , depth: entry.props.depth | ||
| , size: entry.props.size | ||
| , linkpath: entry.props.linkpath | ||
| , nlink: entry.props.nlink | ||
| } | ||
| var wanted = expectFiles[ef ++] | ||
| t.has(found, wanted, "unpacked file " + ef + " " + wanted.path) | ||
| entry.on("entry", foundEntry) | ||
| } | ||
| function finish () { | ||
| t.equal(ef, expectFiles.length, "should have "+ef+" items") | ||
| t.end() | ||
| } | ||
| } | ||
| }) |
Sorry, the diff of this file is not supported yet
-183
| var tap = require("tap") | ||
| var TarHeader = require("../lib/header.js") | ||
| var tar = require("../tar.js") | ||
| var fs = require("fs") | ||
| var headers = | ||
| { "a.txt file header": | ||
| [ "612e747874000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030303036343420003035373736312000303030303234200030303030303030303430312031313635313336303333332030313234353100203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000757374617200303069736161637300000000000000000000000000000000000000000000000000007374616666000000000000000000000000000000000000000000000000000000303030303030200030303030303020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true | ||
| , path: 'a.txt' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 257 | ||
| , mtime: 1319493851 | ||
| , cksum: 5417 | ||
| , type: '0' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } | ||
| ] | ||
| , "omega pax": // the extended header from omega tar. | ||
| [ "5061784865616465722fcea92e74787400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030303036343420003035373736312000303030303234200030303030303030303137302031313534333731303631312030313530353100207800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000757374617200303069736161637300000000000000000000000000000000000000000000000000007374616666000000000000000000000000000000000000000000000000000000303030303030200030303030303020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true | ||
| , path: 'PaxHeader/Ω.txt' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 120 | ||
| , mtime: 1301254537 | ||
| , cksum: 6697 | ||
| , type: 'x' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } ] | ||
| , "omega file header": | ||
| [ "cea92e7478740000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030303036343420003035373736312000303030303234200030303030303030303030322031313534333731303631312030313330373200203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000757374617200303069736161637300000000000000000000000000000000000000000000000000007374616666000000000000000000000000000000000000000000000000000000303030303030200030303030303020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true | ||
| , path: 'Ω.txt' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 2 | ||
| , mtime: 1301254537 | ||
| , cksum: 5690 | ||
| , type: '0' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } ] | ||
| , "foo.js file header": | ||
| [ "666f6f2e6a730000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030303036343420003035373736312000303030303234200030303030303030303030342031313534333637303734312030313236313700203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000757374617200303069736161637300000000000000000000000000000000000000000000000000007374616666000000000000000000000000000000000000000000000000000000303030303030200030303030303020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true | ||
| , path: 'foo.js' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 4 | ||
| , mtime: 1301246433 | ||
| , cksum: 5519 | ||
| , type: '0' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } | ||
| ] | ||
| , "b.txt file header": | ||
| [ "622e747874000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030303036343420003035373736312000303030303234200030303030303030313030302031313635313336303637372030313234363100203000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000757374617200303069736161637300000000000000000000000000000000000000000000000000007374616666000000000000000000000000000000000000000000000000000000303030303030200030303030303020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true | ||
| , path: 'b.txt' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 512 | ||
| , mtime: 1319494079 | ||
| , cksum: 5425 | ||
| , type: '0' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } | ||
| ] | ||
| , "deep nested file": | ||
| [ "636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363636363633030303634342000303537373631200030303030323420003030303030303030313434203131363532313531353333203034333331340020300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000075737461720030306973616163730000000000000000000000000000000000000000000000000000737461666600000000000000000000000000000000000000000000000000000030303030303020003030303030302000722f652f612f6c2f6c2f792f2d2f642f652f652f702f2d2f662f6f2f6c2f642f652f722f2d2f702f612f742f680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" | ||
| , { cksumValid: true, | ||
| path: 'r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' | ||
| , mode: 420 | ||
| , uid: 24561 | ||
| , gid: 20 | ||
| , size: 100 | ||
| , mtime: 1319687003 | ||
| , cksum: 18124 | ||
| , type: '0' | ||
| , linkpath: '' | ||
| , ustar: 'ustar\0' | ||
| , ustarver: '00' | ||
| , uname: 'isaacs' | ||
| , gname: 'staff' | ||
| , devmaj: 0 | ||
| , devmin: 0 | ||
| , fill: '' } | ||
| ] | ||
| } | ||
| tap.test("parsing", function (t) { | ||
| Object.keys(headers).forEach(function (name) { | ||
| var h = headers[name] | ||
| , header = new Buffer(h[0], "hex") | ||
| , expect = h[1] | ||
| , parsed = new TarHeader(header) | ||
| // console.error(parsed) | ||
| t.has(parsed, expect, "parse " + name) | ||
| }) | ||
| t.end() | ||
| }) | ||
| tap.test("encoding", function (t) { | ||
| Object.keys(headers).forEach(function (name) { | ||
| var h = headers[name] | ||
| , expect = new Buffer(h[0], "hex") | ||
| , encoded = TarHeader.encode(h[1]) | ||
| // might have slightly different bytes, since the standard | ||
| // isn't very strict, but should have the same semantics | ||
| // checkSum will be different, but cksumValid will be true | ||
| var th = new TarHeader(encoded) | ||
| delete h[1].block | ||
| delete h[1].needExtended | ||
| delete h[1].cksum | ||
| t.has(th, h[1], "fields "+name) | ||
| }) | ||
| t.end() | ||
| }) | ||
| // test these manually. they're a bit rare to find in the wild | ||
| tap.test("parseNumeric tests", function (t) { | ||
| var parseNumeric = TarHeader.parseNumeric | ||
| , numbers = | ||
| { "303737373737373700": 2097151 | ||
| , "30373737373737373737373700": 8589934591 | ||
| , "303030303036343400": 420 | ||
| , "800000ffffffffffff": 281474976710655 | ||
| , "ffffff000000000001": -281474976710654 | ||
| , "ffffff000000000000": -281474976710655 | ||
| , "800000000000200000": 2097152 | ||
| , "8000000000001544c5": 1393861 | ||
| , "ffffffffffff1544c5": -15383354 } | ||
| Object.keys(numbers).forEach(function (n) { | ||
| var b = new Buffer(n, "hex") | ||
| t.equal(parseNumeric(b), numbers[n], n + " === " + numbers[n]) | ||
| }) | ||
| t.end() | ||
| }) |
| // This is exactly like test/pack.js, except that it's excluding | ||
| // any proprietary headers. | ||
| // | ||
| // This loses some information about the filesystem, but creates | ||
| // tarballs that are supported by more versions of tar, especially | ||
| // old non-spec-compliant copies of gnutar. | ||
| // the symlink file is excluded from git, because it makes | ||
| // windows freak the hell out. | ||
| var fs = require("fs") | ||
| , path = require("path") | ||
| , symlink = path.resolve(__dirname, "fixtures/symlink") | ||
| try { fs.unlinkSync(symlink) } catch (e) {} | ||
| fs.symlinkSync("./hardlink-1", symlink) | ||
| process.on("exit", function () { | ||
| fs.unlinkSync(symlink) | ||
| }) | ||
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , pkg = require("../package.json") | ||
| , Pack = tar.Pack | ||
| , fstream = require("fstream") | ||
| , Reader = fstream.Reader | ||
| , Writer = fstream.Writer | ||
| , input = path.resolve(__dirname, "fixtures/") | ||
| , target = path.resolve(__dirname, "tmp/pack.tar") | ||
| , uid = process.getuid ? process.getuid() : 0 | ||
| , gid = process.getgid ? process.getgid() : 0 | ||
| , entries = | ||
| // the global header and root fixtures/ dir are going to get | ||
| // a different date each time, so omit that bit. | ||
| // Also, dev/ino values differ across machines, so that's not | ||
| // included. | ||
| [ [ 'entry', | ||
| { path: 'fixtures/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/200cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: 'fixtures/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200 } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/a.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 257, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/b.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 512, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/c.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 513, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/cc.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 513, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/dir/', | ||
| mode: 488, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/dir/sub/', | ||
| mode: 488, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/foo.js', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 4, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/hardlink-1', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/hardlink-2', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '1', | ||
| linkpath: 'fixtures/hardlink-1', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/omega.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/omega.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/star.4.html', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 54081, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/packtest/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: 'fixtures/packtest/Ω.txt', | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2 } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 100, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/symlink', | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '2', | ||
| linkpath: 'hardlink-1', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: "fixtures/Ω.txt" | ||
| , uid: uid | ||
| , gid: gid | ||
| , size: 2 } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| ] | ||
| // first, make sure that the hardlinks are actually hardlinks, or this | ||
| // won't work. Git has a way of replacing them with a copy. | ||
| var hard1 = path.resolve(__dirname, "fixtures/hardlink-1") | ||
| , hard2 = path.resolve(__dirname, "fixtures/hardlink-2") | ||
| , fs = require("fs") | ||
| try { fs.unlinkSync(hard2) } catch (e) {} | ||
| fs.linkSync(hard1, hard2) | ||
| tap.test("with global header", { timeout: 10000 }, function (t) { | ||
| runTest(t, true) | ||
| }) | ||
| tap.test("without global header", { timeout: 10000 }, function (t) { | ||
| runTest(t, false) | ||
| }) | ||
| function alphasort (a, b) { | ||
| return a === b ? 0 | ||
| : a.toLowerCase() > b.toLowerCase() ? 1 | ||
| : a.toLowerCase() < b.toLowerCase() ? -1 | ||
| : a > b ? 1 | ||
| : -1 | ||
| } | ||
| function runTest (t, doGH) { | ||
| var reader = Reader({ path: input | ||
| , filter: function () { | ||
| return !this.path.match(/\.(tar|hex)$/) | ||
| } | ||
| , sort: alphasort | ||
| }) | ||
| var props = doGH ? pkg : {} | ||
| props.noProprietary = true | ||
| var pack = Pack(props) | ||
| var writer = Writer(target) | ||
| // global header should be skipped regardless, since it has no content. | ||
| var entry = 0 | ||
| t.ok(reader, "reader ok") | ||
| t.ok(pack, "pack ok") | ||
| t.ok(writer, "writer ok") | ||
| pack.pipe(writer) | ||
| var parse = tar.Parse() | ||
| t.ok(parse, "parser should be ok") | ||
| pack.on("data", function (c) { | ||
| // console.error("PACK DATA") | ||
| if (c.length !== 512) { | ||
| // this one is too noisy, only assert if it'll be relevant | ||
| t.equal(c.length, 512, "parser should emit data in 512byte blocks") | ||
| } | ||
| parse.write(c) | ||
| }) | ||
| pack.on("end", function () { | ||
| // console.error("PACK END") | ||
| t.pass("parser ends") | ||
| parse.end() | ||
| }) | ||
| pack.on("error", function (er) { | ||
| t.fail("pack error", er) | ||
| }) | ||
| parse.on("error", function (er) { | ||
| t.fail("parse error", er) | ||
| }) | ||
| writer.on("error", function (er) { | ||
| t.fail("writer error", er) | ||
| }) | ||
| reader.on("error", function (er) { | ||
| t.fail("reader error", er) | ||
| }) | ||
| parse.on("*", function (ev, e) { | ||
| var wanted = entries[entry++] | ||
| if (!wanted) { | ||
| t.fail("unexpected event: "+ev) | ||
| return | ||
| } | ||
| t.equal(ev, wanted[0], "event type should be "+wanted[0]) | ||
| if (ev !== wanted[0] || e.path !== wanted[1].path) { | ||
| console.error("wanted", wanted) | ||
| console.error([ev, e.props]) | ||
| e.on("end", function () { | ||
| console.error(e.fields) | ||
| throw "break" | ||
| }) | ||
| } | ||
| t.has(e.props, wanted[1], "properties "+wanted[1].path) | ||
| if (wanted[2]) { | ||
| e.on("end", function () { | ||
| if (!e.fields) { | ||
| t.ok(e.fields, "should get fields") | ||
| } else { | ||
| t.has(e.fields, wanted[2], "should get expected fields") | ||
| } | ||
| }) | ||
| } | ||
| }) | ||
| reader.pipe(pack) | ||
| writer.on("close", function () { | ||
| t.equal(entry, entries.length, "should get all expected entries") | ||
| t.pass("it finished") | ||
| t.end() | ||
| }) | ||
| } |
-952
| // the symlink file is excluded from git, because it makes | ||
| // windows freak the hell out. | ||
| var fs = require("fs") | ||
| , path = require("path") | ||
| , symlink = path.resolve(__dirname, "fixtures/symlink") | ||
| try { fs.unlinkSync(symlink) } catch (e) {} | ||
| fs.symlinkSync("./hardlink-1", symlink) | ||
| process.on("exit", function () { | ||
| fs.unlinkSync(symlink) | ||
| }) | ||
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , pkg = require("../package.json") | ||
| , Pack = tar.Pack | ||
| , fstream = require("fstream") | ||
| , Reader = fstream.Reader | ||
| , Writer = fstream.Writer | ||
| , input = path.resolve(__dirname, "fixtures/") | ||
| , target = path.resolve(__dirname, "tmp/pack.tar") | ||
| , uid = process.getuid ? process.getuid() : 0 | ||
| , gid = process.getgid ? process.getgid() : 0 | ||
| , entries = | ||
| // the global header and root fixtures/ dir are going to get | ||
| // a different date each time, so omit that bit. | ||
| // Also, dev/ino values differ across machines, so that's not | ||
| // included. | ||
| [ [ 'globalExtendedHeader', | ||
| { path: 'PaxHeader/', | ||
| mode: 438, | ||
| uid: 0, | ||
| gid: 0, | ||
| type: 'g', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { "NODETAR.author": pkg.author, | ||
| "NODETAR.name": pkg.name, | ||
| "NODETAR.description": pkg.description, | ||
| "NODETAR.version": pkg.version, | ||
| "NODETAR.repository.type": pkg.repository.type, | ||
| "NODETAR.repository.url": pkg.repository.url, | ||
| "NODETAR.main": pkg.main, | ||
| "NODETAR.scripts.test": pkg.scripts.test } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/200cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: 'fixtures/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| 'NODETAR.depth': '1', | ||
| 'NODETAR.type': 'File', | ||
| nlink: 1, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200, | ||
| 'NODETAR.blksize': '4096', | ||
| 'NODETAR.blocks': '8' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| 'NODETAR.depth': '1', | ||
| 'NODETAR.type': 'File', | ||
| nlink: 1, | ||
| 'NODETAR.blksize': '4096', | ||
| 'NODETAR.blocks': '8' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/a.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 257, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/b.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 512, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/c.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 513, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/cc.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 513, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/dir/', | ||
| mode: 488, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/dir/sub/', | ||
| mode: 488, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/foo.js', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 4, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/hardlink-1', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 200, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/hardlink-2', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '1', | ||
| linkpath: 'fixtures/hardlink-1', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/omega.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/omega.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/star.4.html', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 54081, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/packtest/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: 'fixtures/packtest/Ω.txt', | ||
| 'NODETAR.depth': '2', | ||
| 'NODETAR.type': 'File', | ||
| nlink: 1, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| 'NODETAR.blksize': '4096', | ||
| 'NODETAR.blocks': '8' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/packtest/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| 'NODETAR.depth': '2', | ||
| 'NODETAR.type': 'File', | ||
| nlink: 1, | ||
| 'NODETAR.blksize': '4096', | ||
| 'NODETAR.blocks': '8' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/', | ||
| mode: 493, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '5', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 100, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/symlink', | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 0, | ||
| type: '2', | ||
| linkpath: 'hardlink-1', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' } ] | ||
| , [ 'extendedHeader', | ||
| { path: 'PaxHeader/fixtures/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: "fixtures/Ω.txt" | ||
| , "NODETAR.depth": "1" | ||
| , "NODETAR.type": "File" | ||
| , nlink: 1 | ||
| , uid: uid | ||
| , gid: gid | ||
| , size: 2 | ||
| , "NODETAR.blksize": "4096" | ||
| , "NODETAR.blocks": "8" } ] | ||
| , [ 'entry', | ||
| { path: 'fixtures/Ω.txt', | ||
| mode: 420, | ||
| uid: uid, | ||
| gid: gid, | ||
| size: 2, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\u0000', | ||
| ustarver: '00', | ||
| uname: '', | ||
| gname: '', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| 'NODETAR.depth': '1', | ||
| 'NODETAR.type': 'File', | ||
| nlink: 1, | ||
| 'NODETAR.blksize': '4096', | ||
| 'NODETAR.blocks': '8' } ] | ||
| ] | ||
| // first, make sure that the hardlinks are actually hardlinks, or this | ||
| // won't work. Git has a way of replacing them with a copy. | ||
| var hard1 = path.resolve(__dirname, "fixtures/hardlink-1") | ||
| , hard2 = path.resolve(__dirname, "fixtures/hardlink-2") | ||
| , fs = require("fs") | ||
| try { fs.unlinkSync(hard2) } catch (e) {} | ||
| fs.linkSync(hard1, hard2) | ||
| tap.test("with global header", { timeout: 10000 }, function (t) { | ||
| runTest(t, true) | ||
| }) | ||
| tap.test("without global header", { timeout: 10000 }, function (t) { | ||
| runTest(t, false) | ||
| }) | ||
| tap.test("with from base", { timeout: 10000 }, function (t) { | ||
| runTest(t, true, true) | ||
| }) | ||
| function alphasort (a, b) { | ||
| return a === b ? 0 | ||
| : a.toLowerCase() > b.toLowerCase() ? 1 | ||
| : a.toLowerCase() < b.toLowerCase() ? -1 | ||
| : a > b ? 1 | ||
| : -1 | ||
| } | ||
| function runTest (t, doGH, doFromBase) { | ||
| var reader = Reader({ path: input | ||
| , filter: function () { | ||
| return !this.path.match(/\.(tar|hex)$/) | ||
| } | ||
| , sort: alphasort | ||
| }) | ||
| var props = doGH ? pkg : {} | ||
| if(doFromBase) props.fromBase = true; | ||
| var pack = Pack(props) | ||
| var writer = Writer(target) | ||
| // skip the global header if we're not doing that. | ||
| var entry = doGH ? 0 : 1 | ||
| t.ok(reader, "reader ok") | ||
| t.ok(pack, "pack ok") | ||
| t.ok(writer, "writer ok") | ||
| pack.pipe(writer) | ||
| var parse = tar.Parse() | ||
| t.ok(parse, "parser should be ok") | ||
| pack.on("data", function (c) { | ||
| // console.error("PACK DATA") | ||
| if (c.length !== 512) { | ||
| // this one is too noisy, only assert if it'll be relevant | ||
| t.equal(c.length, 512, "parser should emit data in 512byte blocks") | ||
| } | ||
| parse.write(c) | ||
| }) | ||
| pack.on("end", function () { | ||
| // console.error("PACK END") | ||
| t.pass("parser ends") | ||
| parse.end() | ||
| }) | ||
| pack.on("error", function (er) { | ||
| t.fail("pack error", er) | ||
| }) | ||
| parse.on("error", function (er) { | ||
| t.fail("parse error", er) | ||
| }) | ||
| writer.on("error", function (er) { | ||
| t.fail("writer error", er) | ||
| }) | ||
| reader.on("error", function (er) { | ||
| t.fail("reader error", er) | ||
| }) | ||
| parse.on("*", function (ev, e) { | ||
| var wanted = entries[entry++] | ||
| if (!wanted) { | ||
| t.fail("unexpected event: "+ev) | ||
| return | ||
| } | ||
| t.equal(ev, wanted[0], "event type should be "+wanted[0]) | ||
| if(doFromBase) { | ||
| if(wanted[1].path.indexOf('fixtures/') && wanted[1].path.length == 100) | ||
| wanted[1].path = wanted[1].path.replace('fixtures/', '') + 'ccccccccc' | ||
| if(wanted[1]) wanted[1].path = wanted[1].path.replace('fixtures/', '').replace('//', '/') | ||
| if(wanted[1].path == '') wanted[1].path = '/' | ||
| if(wanted[2] && wanted[2].path) wanted[2].path = wanted[2].path.replace('fixtures', '').replace(/^\//, '') | ||
| wanted[1].linkpath = wanted[1].linkpath.replace('fixtures/', '') | ||
| } | ||
| if (ev !== wanted[0] || e.path !== wanted[1].path) { | ||
| console.error("wanted", wanted) | ||
| console.error([ev, e.props]) | ||
| e.on("end", function () { | ||
| console.error(e.fields) | ||
| throw "break" | ||
| }) | ||
| } | ||
| t.has(e.props, wanted[1], "properties "+wanted[1].path) | ||
| if (wanted[2]) { | ||
| e.on("end", function () { | ||
| if (!e.fields) { | ||
| t.ok(e.fields, "should get fields") | ||
| } else { | ||
| t.has(e.fields, wanted[2], "should get expected fields") | ||
| } | ||
| }) | ||
| } | ||
| }) | ||
| reader.pipe(pack) | ||
| writer.on("close", function () { | ||
| t.equal(entry, entries.length, "should get all expected entries") | ||
| t.pass("it finished") | ||
| t.end() | ||
| }) | ||
| } |
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| , path = require("path") | ||
| , file = path.resolve(__dirname, "fixtures/c.tar") | ||
| tap.test("parser test", function (t) { | ||
| var parser = tar.Parse() | ||
| var total = 0 | ||
| var dataTotal = 0 | ||
| parser.on("end", function () { | ||
| t.equals(total-513,dataTotal,'should have discarded only c.txt') | ||
| t.end() | ||
| }) | ||
| fs.createReadStream(file) | ||
| .pipe(parser) | ||
| .on('entry',function(entry){ | ||
| if(entry.path === 'c.txt') entry.abort() | ||
| total += entry.size; | ||
| entry.on('data',function(data){ | ||
| dataTotal += data.length | ||
| }) | ||
| }) | ||
| }) |
-359
| var tap = require("tap") | ||
| , tar = require("../tar.js") | ||
| , fs = require("fs") | ||
| , path = require("path") | ||
| , file = path.resolve(__dirname, "fixtures/c.tar") | ||
| , index = 0 | ||
| , expect = | ||
| [ [ 'entry', | ||
| { path: 'c.txt', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 513, | ||
| mtime: new Date('Wed, 26 Oct 2011 01:10:58 GMT'), | ||
| cksum: 5422, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| undefined ], | ||
| [ 'entry', | ||
| { path: 'cc.txt', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 513, | ||
| mtime: new Date('Wed, 26 Oct 2011 01:11:02 GMT'), | ||
| cksum: 5525, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| undefined ], | ||
| [ 'entry', | ||
| { path: 'r/e/a/l/l/y/-/d/e/e/p/-/f/o/l/d/e/r/-/p/a/t/h/cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 100, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:43:23 GMT'), | ||
| cksum: 18124, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| undefined ], | ||
| [ 'entry', | ||
| { path: 'Ω.txt', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 2, | ||
| mtime: new Date('Thu, 27 Oct 2011 17:51:49 GMT'), | ||
| cksum: 5695, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| undefined ], | ||
| [ 'extendedHeader', | ||
| { path: 'PaxHeader/Ω.txt', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 120, | ||
| mtime: new Date('Thu, 27 Oct 2011 17:51:49 GMT'), | ||
| cksum: 6702, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: 'Ω.txt', | ||
| ctime: 1319737909, | ||
| atime: 1319739061, | ||
| dev: 234881026, | ||
| ino: 51693379, | ||
| nlink: 1 } ], | ||
| [ 'entry', | ||
| { path: 'Ω.txt', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 2, | ||
| mtime: new Date('Thu, 27 Oct 2011 17:51:49 GMT'), | ||
| cksum: 5695, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| ctime: new Date('Thu, 27 Oct 2011 17:51:49 GMT'), | ||
| atime: new Date('Thu, 27 Oct 2011 18:11:01 GMT'), | ||
| dev: 234881026, | ||
| ino: 51693379, | ||
| nlink: 1 }, | ||
| undefined ], | ||
| [ 'extendedHeader', | ||
| { path: 'PaxHeader/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 353, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 14488, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| ctime: 1319686868, | ||
| atime: 1319741254, | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 1 } ], | ||
| [ 'entry', | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 200, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 14570, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| ctime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| atime: new Date('Thu, 27 Oct 2011 18:47:34 GMT'), | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 1 }, | ||
| undefined ], | ||
| [ 'longPath', | ||
| { path: '././@LongLink', | ||
| mode: 0, | ||
| uid: 0, | ||
| gid: 0, | ||
| size: 201, | ||
| mtime: new Date('Thu, 01 Jan 1970 00:00:00 GMT'), | ||
| cksum: 4976, | ||
| type: 'L', | ||
| linkpath: '', | ||
| ustar: false }, | ||
| '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' ], | ||
| [ 'entry', | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 1000, | ||
| gid: 1000, | ||
| size: 201, | ||
| mtime: new Date('Thu, 27 Oct 2011 22:21:50 GMT'), | ||
| cksum: 14086, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: false }, | ||
| undefined ], | ||
| [ 'longLinkpath', | ||
| { path: '././@LongLink', | ||
| mode: 0, | ||
| uid: 0, | ||
| gid: 0, | ||
| size: 201, | ||
| mtime: new Date('Thu, 01 Jan 1970 00:00:00 GMT'), | ||
| cksum: 4975, | ||
| type: 'K', | ||
| linkpath: '', | ||
| ustar: false }, | ||
| '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' ], | ||
| [ 'longPath', | ||
| { path: '././@LongLink', | ||
| mode: 0, | ||
| uid: 0, | ||
| gid: 0, | ||
| size: 201, | ||
| mtime: new Date('Thu, 01 Jan 1970 00:00:00 GMT'), | ||
| cksum: 4976, | ||
| type: 'L', | ||
| linkpath: '', | ||
| ustar: false }, | ||
| '200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL' ], | ||
| [ 'entry', | ||
| { path: '200LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', | ||
| mode: 511, | ||
| uid: 1000, | ||
| gid: 1000, | ||
| size: 0, | ||
| mtime: new Date('Fri, 28 Oct 2011 23:05:17 GMT'), | ||
| cksum: 21603, | ||
| type: '2', | ||
| linkpath: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| ustar: false }, | ||
| undefined ], | ||
| [ 'extendedHeader', | ||
| { path: 'PaxHeader/200-hard', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 143, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 6533, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { ctime: 1320617144, | ||
| atime: 1320617232, | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 2 } ], | ||
| [ 'entry', | ||
| { path: '200-hard', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 200, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 5526, | ||
| type: '0', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| ctime: new Date('Sun, 06 Nov 2011 22:05:44 GMT'), | ||
| atime: new Date('Sun, 06 Nov 2011 22:07:12 GMT'), | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 2 }, | ||
| undefined ], | ||
| [ 'extendedHeader', | ||
| { path: 'PaxHeader/200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 353, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 14488, | ||
| type: 'x', | ||
| linkpath: '', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '' }, | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| ctime: 1320617144, | ||
| atime: 1320617406, | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 2 } ], | ||
| [ 'entry', | ||
| { path: '200ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', | ||
| mode: 420, | ||
| uid: 24561, | ||
| gid: 20, | ||
| size: 0, | ||
| mtime: new Date('Thu, 27 Oct 2011 03:41:08 GMT'), | ||
| cksum: 15173, | ||
| type: '1', | ||
| linkpath: '200-hard', | ||
| ustar: 'ustar\0', | ||
| ustarver: '00', | ||
| uname: 'isaacs', | ||
| gname: 'staff', | ||
| devmaj: 0, | ||
| devmin: 0, | ||
| fill: '', | ||
| ctime: new Date('Sun, 06 Nov 2011 22:05:44 GMT'), | ||
| atime: new Date('Sun, 06 Nov 2011 22:10:06 GMT'), | ||
| 'LIBARCHIVE.creationtime': '1319686852', | ||
| dev: 234881026, | ||
| ino: 51681874, | ||
| nlink: 2 }, | ||
| undefined ] ] | ||
| tap.test("parser test", function (t) { | ||
| var parser = tar.Parse() | ||
| parser.on("end", function () { | ||
| t.equal(index, expect.length, "saw all expected events") | ||
| t.end() | ||
| }) | ||
| fs.createReadStream(file) | ||
| .pipe(parser) | ||
| .on("*", function (ev, entry) { | ||
| var wanted = expect[index] | ||
| if (!wanted) { | ||
| return t.fail("Unexpected event: " + ev) | ||
| } | ||
| var result = [ev, entry.props] | ||
| entry.on("end", function () { | ||
| result.push(entry.fields || entry.body) | ||
| t.equal(ev, wanted[0], index + " event type") | ||
| t.equivalent(entry.props, wanted[1], wanted[1].path + " entry properties") | ||
| if (wanted[2]) { | ||
| t.equivalent(result[2], wanted[2], "metadata values") | ||
| } | ||
| index ++ | ||
| }) | ||
| }) | ||
| }) |
| // clean up the fixtures | ||
| var tap = require("tap") | ||
| , rimraf = require("rimraf") | ||
| , test = tap.test | ||
| , path = require("path") | ||
| test("clean fixtures", function (t) { | ||
| rimraf(path.resolve(__dirname, "fixtures"), function (er) { | ||
| t.ifError(er, "rimraf ./fixtures/") | ||
| t.end() | ||
| }) | ||
| }) | ||
| test("clean tmp", function (t) { | ||
| rimraf(path.resolve(__dirname, "tmp"), function (er) { | ||
| t.ifError(er, "rimraf ./tmp/") | ||
| t.end() | ||
| }) | ||
| }) |
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
Wildcard dependency
QualityPackage has a dependency with a floating version range. This can cause issues if the dependency publishes a new major version.
Found 1 instance in 1 package
New author
Supply chain riskA new npm collaborator published a version of the package for the first time. New collaborators are usually benign additions to a project, but do indicate a change to the security surface area of a package.
Found 1 instance in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 3 instances in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
URL strings
Supply chain riskPackage contains fragments of external URLs or IP addresses, which the package may be accessing at runtime.
Found 1 instance in 1 package
0
-100%802
1472.55%11
-67.65%1
-50%107234
-34.88%4
33.33%7
75%20
-39.39%2422
-47.59%+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
+ Added
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed
- Removed