1// Submit a mix of short and long jobs to the threadpool. 2// Report total job throughput. 3// If we partition the long job, overall job throughput goes up significantly. 4// However, this comes at the cost of the long job throughput. 5// 6// Short jobs: small zip jobs. 7// Long jobs: fs.readFile on a large file. 8 9'use strict'; 10 11const path = require('path'); 12const common = require('../common.js'); 13const filename = path.resolve(__dirname, 14 `.removeme-benchmark-garbage-${process.pid}`); 15const fs = require('fs'); 16const zlib = require('zlib'); 17const assert = require('assert'); 18 19const bench = common.createBenchmark(main, { 20 dur: [5], 21 len: [1024, 16 * 1024 * 1024], 22 concurrent: [1, 10] 23}); 24 25function main({ len, dur, concurrent }) { 26 try { fs.unlinkSync(filename); } catch {} 27 let data = Buffer.alloc(len, 'x'); 28 fs.writeFileSync(filename, data); 29 data = null; 30 31 const zipData = Buffer.alloc(1024, 'a'); 32 33 let reads = 0; 34 let zips = 0; 35 let benchEnded = false; 36 bench.start(); 37 setTimeout(() => { 38 const totalOps = reads + zips; 39 benchEnded = true; 40 bench.end(totalOps); 41 try { fs.unlinkSync(filename); } catch {} 42 }, dur * 1000); 43 44 function read() { 45 fs.readFile(filename, afterRead); 46 } 47 48 function afterRead(er, data) { 49 if (er) { 50 if (er.code === 'ENOENT') { 51 // Only OK if unlinked by the timer from main. 52 assert.ok(benchEnded); 53 return; 54 } 55 throw er; 56 } 57 58 if (data.length !== len) 59 throw new Error('wrong number of bytes returned'); 60 61 reads++; 62 if (!benchEnded) 63 read(); 64 } 65 66 function zip() { 67 zlib.deflate(zipData, afterZip); 68 } 69 70 function afterZip(er, data) { 71 if (er) 72 throw er; 73 74 zips++; 75 if (!benchEnded) 76 zip(); 77 } 78 79 // Start reads 80 while (concurrent-- > 0) read(); 81 82 // Start a competing zip 83 zip(); 84} 85