• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1'use strict'
2const Buffer = require('./buffer.js')
3
4// tar -r
5const hlo = require('./high-level-opt.js')
6const Pack = require('./pack.js')
7const Parse = require('./parse.js')
8const fs = require('fs')
9const fsm = require('fs-minipass')
10const t = require('./list.js')
11const path = require('path')
12
13// starting at the head of the file, read a Header
14// If the checksum is invalid, that's our position to start writing
15// If it is, jump forward by the specified size (round up to 512)
16// and try again.
17// Write the new Pack stream starting there.
18
19const Header = require('./header.js')
20
21const r = module.exports = (opt_, files, cb) => {
22  const opt = hlo(opt_)
23
24  if (!opt.file)
25    throw new TypeError('file is required')
26
27  if (opt.gzip)
28    throw new TypeError('cannot append to compressed archives')
29
30  if (!files || !Array.isArray(files) || !files.length)
31    throw new TypeError('no files or directories specified')
32
33  files = Array.from(files)
34
35  return opt.sync ? replaceSync(opt, files)
36    : replace(opt, files, cb)
37}
38
39const replaceSync = (opt, files) => {
40  const p = new Pack.Sync(opt)
41
42  let threw = true
43  let fd
44  let position
45
46  try {
47    try {
48      fd = fs.openSync(opt.file, 'r+')
49    } catch (er) {
50      if (er.code === 'ENOENT')
51        fd = fs.openSync(opt.file, 'w+')
52      else
53        throw er
54    }
55
56    const st = fs.fstatSync(fd)
57    const headBuf = Buffer.alloc(512)
58
59    POSITION: for (position = 0; position < st.size; position += 512) {
60      for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
61        bytes = fs.readSync(
62          fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
63        )
64
65        if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
66          throw new Error('cannot append to compressed archives')
67
68        if (!bytes)
69          break POSITION
70      }
71
72      let h = new Header(headBuf)
73      if (!h.cksumValid)
74        break
75      let entryBlockSize = 512 * Math.ceil(h.size / 512)
76      if (position + entryBlockSize + 512 > st.size)
77        break
78      // the 512 for the header we just parsed will be added as well
79      // also jump ahead all the blocks for the body
80      position += entryBlockSize
81      if (opt.mtimeCache)
82        opt.mtimeCache.set(h.path, h.mtime)
83    }
84    threw = false
85
86    streamSync(opt, p, position, fd, files)
87  } finally {
88    if (threw)
89      try { fs.closeSync(fd) } catch (er) {}
90  }
91}
92
93const streamSync = (opt, p, position, fd, files) => {
94  const stream = new fsm.WriteStreamSync(opt.file, {
95    fd: fd,
96    start: position
97  })
98  p.pipe(stream)
99  addFilesSync(p, files)
100}
101
102const replace = (opt, files, cb) => {
103  files = Array.from(files)
104  const p = new Pack(opt)
105
106  const getPos = (fd, size, cb_) => {
107    const cb = (er, pos) => {
108      if (er)
109        fs.close(fd, _ => cb_(er))
110      else
111        cb_(null, pos)
112    }
113
114    let position = 0
115    if (size === 0)
116      return cb(null, 0)
117
118    let bufPos = 0
119    const headBuf = Buffer.alloc(512)
120    const onread = (er, bytes) => {
121      if (er)
122        return cb(er)
123      bufPos += bytes
124      if (bufPos < 512 && bytes)
125        return fs.read(
126          fd, headBuf, bufPos, headBuf.length - bufPos,
127          position + bufPos, onread
128        )
129
130      if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b)
131        return cb(new Error('cannot append to compressed archives'))
132
133      // truncated header
134      if (bufPos < 512)
135        return cb(null, position)
136
137      const h = new Header(headBuf)
138      if (!h.cksumValid)
139        return cb(null, position)
140
141      const entryBlockSize = 512 * Math.ceil(h.size / 512)
142      if (position + entryBlockSize + 512 > size)
143        return cb(null, position)
144
145      position += entryBlockSize + 512
146      if (position >= size)
147        return cb(null, position)
148
149      if (opt.mtimeCache)
150        opt.mtimeCache.set(h.path, h.mtime)
151      bufPos = 0
152      fs.read(fd, headBuf, 0, 512, position, onread)
153    }
154    fs.read(fd, headBuf, 0, 512, position, onread)
155  }
156
157  const promise = new Promise((resolve, reject) => {
158    p.on('error', reject)
159    let flag = 'r+'
160    const onopen = (er, fd) => {
161      if (er && er.code === 'ENOENT' && flag === 'r+') {
162        flag = 'w+'
163        return fs.open(opt.file, flag, onopen)
164      }
165
166      if (er)
167        return reject(er)
168
169      fs.fstat(fd, (er, st) => {
170        if (er)
171          return fs.close(fd, () => reject(er))
172
173        getPos(fd, st.size, (er, position) => {
174          if (er)
175            return reject(er)
176          const stream = new fsm.WriteStream(opt.file, {
177            fd: fd,
178            start: position
179          })
180          p.pipe(stream)
181          stream.on('error', reject)
182          stream.on('close', resolve)
183          addFilesAsync(p, files)
184        })
185      })
186    }
187    fs.open(opt.file, flag, onopen)
188  })
189
190  return cb ? promise.then(cb, cb) : promise
191}
192
193const addFilesSync = (p, files) => {
194  files.forEach(file => {
195    if (file.charAt(0) === '@')
196      t({
197        file: path.resolve(p.cwd, file.substr(1)),
198        sync: true,
199        noResume: true,
200        onentry: entry => p.add(entry)
201      })
202    else
203      p.add(file)
204  })
205  p.end()
206}
207
208const addFilesAsync = (p, files) => {
209  while (files.length) {
210    const file = files.shift()
211    if (file.charAt(0) === '@')
212      return t({
213        file: path.resolve(p.cwd, file.substr(1)),
214        noResume: true,
215        onentry: entry => p.add(entry)
216      }).then(_ => addFilesAsync(p, files))
217    else
218      p.add(file)
219  }
220  p.end()
221}
222