How do I dynamically generate Mocha tests in a describe()'s before() block?

点点圈 提交于 2020-12-30 09:06:03

问题


I am creating a mocha test suite that is testing a command line utility that our nwjs app is calling which takes files and produces an output json file. I have thousands of combinations of input files and my tests (it()s) that I want to generate depend on the contents of the json output from the cmdline utility.

Mocha seems to want to require me to create all of the it()s upfront, but that means these scripts need to be run upfront and the json output captured. I was hoping to do:

'use strict';
const path = require('path');
const glob = require('glob');
const expect = require('sharedjs/chai-wrapper').expect;
const utils = require('sharedjs/utils');

describe('Generated Tests:', function() {
  let testNum = 0;
  let globOpts = { nodir: true }
  let type1files = glob.sync(path.join(filetype1_dir, '*'), globOpts);
  let type2files = glob.sync(path.join(filetype2_dir, '*'), globOpts);
  for (let i = 0; i < type1files.length; i++) {
    for (let j = 0; j < type2files.length; j++) {
      testNum++;
      let testName = utils.mkTestName(testNum, i, j);

      describe(testName, function() {
        let run;
        before(function() {
          run = utils.runCommand(type1files[i], type2files[j]);
          // run = { status: result.status, command: result.args.join(' '), output: fse.readJsonSync(outfile) }
          if (run.status !== 0) {
            throw new Error(run.status+'='+run.command);
          }
        });

        for (let key in run.output.analysis) {
          it(key+'=0', function() {
            expect(run.output.analysis[key].value).to.be.equal('0', key+'=0');
          } 
        }
      });
    }
  }
});

I'll be making thousands of command line calls here. I don't want to make them all up front, cache the files (or worse, have all of the json objects loaded into memory) and then start running the tests.

I know that I can create a high level "validate json" test and then just do a bunch of expect()'s in there but there are two problems with that. First, they wouldn't be independent named tests shown as failures and second, the first expect failure will fail the test so I won't have visibility to other errors further down the json.

Ideas?

-- UPDATED WITH SAMPLE JSON OUTPUT FROM utils.runCommand() --

{
    data1: { ... },
    data2: { ... },
    analysis: {
        dynamicKey1: <analysisObj>,
        dynamicKey...: <analysisObj>,
        dynamicKeyN: <analysisObj>
    }
}

The keys in the analysis are dependent on the type of data that is entered and there are a large number of possibilities. The name of the dynamic keys can change from run to run. From a testing perspective, I am not interested in the name of the key, but that it's analysisObj is conformant. For example, if I pass in identical data1 and data2 to utils.runCommand(), then the portion of the analysisObj that represents the delta between the two should be zero across the board.

I don't get the analysisObjs until after I run the script and if I'm running 100,000 tests, I don't want to have to pre-run or pre-load all of this into memory or a filesystem.


回答1:


I want to thank @JoshLee for pointing me down some helpful research paths.

After looking at the mocha code, focusing mainly on:

  • mocha/lib/mocha.js
  • mocha/lib/suite.js
  • mocha/lib/test.js
  • mocha/lib/interfaces/bdd.js

I learned that

  1. The describe() call returns a Suite object
  2. The Suite object contains the tests to be run (suite.tests)
  3. When the suite's before() is run, the tests haven't been looked at
  4. I can add as many tests as I want in the before() method using suite.addTest() and they will all get run
  5. Best of all, my utils.runCommand() only gets run at the start of each test suite and each test suite is run sequentially. (previously my added tests would come after all of the initial describe block had been run once)

The output is as expected and the results reflect the proper number of tests. I've run this auto-generating a little over 50,000 tests spread unevenly across 1980 test suites using mochawesome for the reporter and it worked great.

There are 5 steps required to pull this off described in the updated code snippet below.


'use strict';
const path = require('path');
const glob = require('glob');
const expect = require('sharedjs/chai-wrapper').expect;
const utils = require('sharedjs/utils');

// Step 1: Pull in Test class directly from mocha
const Test = require('mocha/lib/test');

// Step 2: Simulates it() from mocha/lib/interfaces/bdd.js
//   I ignore the isPending() check from bdd.js. I don't know
//   if ignoring it is required, but I didn't see a need to add
//   it for my case to work
function addTest(suite, title, fn) {
  let test = new Test(title, fn);
  test.file = __filename;
  suite.addTest(test);
  return test;
}

let testNum = 0;
let globOpts = { nodir: true }
let type1files = glob.sync(path.join(filetype1_dir, '*'), globOpts);
let type2files = glob.sync(path.join(filetype2_dir, '*'), globOpts);
for (let i = 0; i < type1files.length; i++) {
  for (let j = 0; j < type2files.length; j++) {
    testNum++;
    let testName = utils.mkTestName(testNum, i, j);

    // Step 3: Save the suite object so that we can add tests to it.
    let suite = describe(testName, function() {
      let run;
      before(function() {
        run = utils.runCommand(type1files[i], type2files[j]);
        // run = { status: result.status, command: result.args.join(' '),
        //         output: fse.readJsonSync(outfile) }
        if (run.status !== 0) {
          throw new Error(run.status+'='+run.command);
        }

        for (let key in run.output.analysis) {
          // Step 4: Dynamically add tests 
          //   suite is defined at this point since before() is always
          //   run after describe() returns.
          addTest(suite, key+'=0', function() {
            expect(run.output.analysis[key].value).to.be.equal('0', key+'=0');
          });
        }            
      });
    });

    // Step 5: Add dummy test in describe() block so that it will be run.
    //   Can be it() for a pass result or it.skip() for pending.
    it('Placeholder for ' + testName, function () {
      expect(true).to.be.true;
    });
  }
}


来源:https://stackoverflow.com/questions/53200246/how-do-i-dynamically-generate-mocha-tests-in-a-describes-before-block

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!