forked from ccxt/ccxt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run-tests.js
458 lines (367 loc) · 17.1 KB
/
run-tests.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
/* ---------------------------------------------------------------------------
A tests launcher. Runs tests for all languages and all exchanges, in
parallel, with a humanized error reporting.
Usage: node run-tests [--php] [--js] [--python] [--python-async] [exchange] [method|symbol]
--------------------------------------------------------------------------- */
import fs from 'fs'
import ansi from 'ansicolor'
import log from 'ololog'
import ps from 'child_process'
ansi.nice
// --------------------------------------------------------------------------- //
process.on ('uncaughtException', e => { log.bright.red.error (e); process.exit (1) })
process.on ('unhandledRejection', e => { log.bright.red.error (e); process.exit (1) })
// --------------------------------------------------------------------------- //
const [,, ...args] = process.argv
const langKeys = {
'--ts': false, // run TypeScript tests only
'--js': false, // run JavaScript tests only
'--php': false, // run PHP tests only
'--python': false, // run Python 3 tests only
'--python-async': false, // run Python 3 async tests only
'--csharp': false, // run C# tests only
'--php-async': false, // run php async tests only,
}
const debugKeys = {
'--warnings': false,
'--info': false,
}
const exchangeSpecificFlags = {
'--ws': false,
'--sandbox': false,
'--useProxy': false,
'--verbose': false,
'--private': false,
'--privateOnly': false,
'--request': false,
'--response': false,
}
let exchanges = []
let symbol = 'all'
let method = undefined
let maxConcurrency = 5 // Number.MAX_VALUE // no limit
for (const arg of args) {
if (arg in exchangeSpecificFlags) { exchangeSpecificFlags[arg] = true }
else if (arg.startsWith ('--')) {
if (arg in langKeys) {
langKeys[arg] = true
} else if (arg in debugKeys) {
debugKeys[arg] = true
} else {
log.bright.red ('\nUnknown option', arg.white, '\n');
}
}
else if (arg.includes ('()')) { method = arg }
else if (arg.includes ('/')) { symbol = arg }
else if (Number.isFinite (Number (arg))) { maxConcurrency = Number (arg) }
else { exchanges.push (arg) }
}
const wsFlag = exchangeSpecificFlags['--ws'] ? 'WS': '';
// for REST exchange test, we might need to wait for 200+ seconds for some exchanges
// for WS, watchOHLCV might need 60 seconds for update (so, spot & swap ~ 120sec)
const timeoutSeconds = wsFlag ? 120 : 250;
// --------------------------------------------------------------------------- //
const exchangeOptions = []
for (const key of Object.keys (exchangeSpecificFlags)) {
if (exchangeSpecificFlags[key]) {
exchangeOptions.push (key)
}
}
// --------------------------------------------------------------------------- //
const content = fs.readFileSync ('./skip-tests.json', 'utf8');
const skipSettings = JSON.parse (content);
if (!exchanges.length) {
if (!fs.existsSync ('./exchanges.json')) {
log.bright.red ('\n\tNo', 'exchanges.json'.white, 'found, please run', 'npm run build'.white, 'to generate it!\n')
process.exit (1)
}
let exchangesFile = fs.readFileSync('./exchanges.json');
exchangesFile = JSON.parse(exchangesFile)
exchanges = wsFlag ? exchangesFile.ws : exchangesFile.ids
}
// --------------------------------------------------------------------------- //
const sleep = s => new Promise (resolve => setTimeout (resolve, s*1000))
const timeout = (s, promise) => Promise.race ([ promise, sleep (s).then (() => {
throw new Error ('RUNTEST_TIMED_OUT');
}) ])
// --------------------------------------------------------------------------- //
const exec = (bin, ...args) => {
/* A custom version of child_process.exec that captures both stdout and
stderr, not separating them into distinct buffers — so that we can show
the same output as if it were running in a terminal. */
let output = ''
let stderr = ''
const generateResultFromOutput = (output, stderr, code) => {
// keep this commented code for a while (just in case), as the below avoids vscode false positive warnings from output: https://github.com/nodejs/node/issues/34799 during debugging
// const removeDebuger = (str) => str.replace ('Debugger attached.','').replace('Waiting for the debugger to disconnect...', '').replace(/\(node:\d+\) ExperimentalWarning: Custom ESM Loaders is an experimental feature and might change at any time\n\(Use `node --trace-warnings ...` to show where the warning was created\)\n/, '');
// stderr = removeDebuger(stderr), output = removeDebuger(output);
output = ansi.strip (output.trim ())
// detect error
const hasFailed = (
// exception caught in "test -> testMethod"
output.indexOf('[TEST_FAILURE]') > -1 ||
// 1) thrown from JS assert module
output.indexOf('AssertionError:') > -1 ||
// 2) thrown from PYTHON (i.e. [AssertionError], [KeyError], [ValueError], etc)
output.match(/\[\w+Error\]/) ||
// 3) thrown from PHP assert hook
output.indexOf('[ASSERT_ERROR]') > -1 ||
// 4) thrown from PHP async library
output.indexOf('Fatal error:') > -1
);
// ### Infos ###
const infos = []
// check output for pattern like `[INFO:TESTING] xyz message`
if (output.length) {
const infoRegex = /\[INFO(|:([\w_-]+))\].+$(?!\n)*/gm
let matchInfo;
while ((matchInfo = infoRegex.exec (output))) {
infos.push (matchInfo[0])
}
}
// ### Warnings ###
const warnings = []
// check output for pattern like `[TEST_WARNING] whatever`
if (output.length) {
const warningRegex = /\[TEST_WARNING\].+$(?!\n)*/gmi
let matchWarnings;
while (matchWarnings = warningRegex.exec (stderr)) {
warnings.push (matchWarnings[0])
}
}
// check stderr
if (stderr.length > 0) {
warnings.push (stderr)
}
return {
failed: hasFailed || code !== 0,
output,
warnings,
infos,
}
}
return timeout (timeoutSeconds, new Promise (resolver => {
const psSpawn = ps.spawn (bin, args)
psSpawn.stdout.on ('data', data => { output += data.toString () })
psSpawn.stderr.on ('data', data => { output += data.toString (); stderr += data.toString ().trim (); })
psSpawn.on ('exit', code => {
const result = generateResultFromOutput (output, stderr, code)
return resolver (result) ;
})
})).catch (e => {
const isTimeout = e.message === 'RUNTEST_TIMED_OUT';
if (isTimeout) {
stderr += '\n' + 'RUNTEST_TIMED_OUT: ';
const result = generateResultFromOutput (output, stderr, 0);
return result;
}
return {
failed: true,
output: e.message,
warnings: [],
infos: [],
}
} );
};
// ------------------------------------------------------------------------ //
// const execWithRetry = () => {
// // Sometimes execution (on a remote CI server) is just fails with no
// // apparent reason, leaving an empty stdout/stderr behind. I suspect
// // it's related to out-of-memory errors. So in that case we will re-try
// // until it eventually finalizes.
// }
// ------------------------------------------------------------------------ //
let numExchangesTested = 0
// Tests of different languages for the same exchange should be run sequentially to prevent the interleaving nonces problem. //
const sequentialMap = async (input, fn) => {
const result = []
for (const item of input) { result.push (await fn (item)) }
return result
}
// ------------------------------------------------------------------------ //
const percentsDone = () => ((numExchangesTested / exchanges.length) * 100).toFixed (0) + '%';
const testExchange = async (exchange) => {
// no need to test alias classes
if (exchange.alias) {
numExchangesTested++;
log.bright (('[' + percentsDone() + ']').dim, 'Tested', exchange.cyan, wsFlag, '[Skipped alias]'.yellow)
return [];
}
if (
skipSettings[exchange] &&
(
(skipSettings[exchange].skip && !wsFlag)
||
(skipSettings[exchange].skipWs && wsFlag)
)
) {
if (!('until' in skipSettings[exchange]) || new Date(skipSettings[exchange].until) > new Date()) {
numExchangesTested++;
const reason = ('until' in skipSettings[exchange]) ? ' till ' + skipSettings[exchange].until : '';
log.bright (('[' + percentsDone() + ']').dim, 'Tested', exchange.cyan, wsFlag, ('[Skipped]' + reason).yellow)
return [];
}
}
// Run tests for all/selected languages (in parallel) //
let args = [exchange];
if (symbol !== undefined && symbol !== 'all') {
args.push(symbol);
}
if (method !== undefined) {
args.push(method);
}
args = args.concat(exchangeOptions)
// pass it to the test(ts/py/php) script too
if (debugKeys['--info']) {
args.push ('--info')
}
let allTests = [
{ key: '--js', language: 'JavaScript', exec: ['node', 'js/src/test/tests.init.js', ...args] },
{ key: '--python-async', language: 'Python Async', exec: ['python3', 'python/ccxt/test/tests_init.py', ...args] },
{ key: '--php-async', language: 'PHP Async', exec: ['php', '-f', 'php/test/tests_init.php', ...args] },
{ key: '--csharp', language: 'C#', exec: ['dotnet', 'run', '--project', 'cs/tests/tests.csproj', ...args] },
{ key: '--ts', language: 'TypeScript', exec: ['node', '--import', 'tsx', 'ts/src/test/tests.init.ts', ...args] },
{ key: '--python', language: 'Python', exec: ['python3', 'python/ccxt/test/tests_init.py', '--sync', ...args] },
{ key: '--php', language: 'PHP', exec: ['php', '-f', 'php/test/tests_init.php', '--', '--sync', ...args] },
];
// select tests based on cli arguments
let selectedTests = [];
const langsAreProvided = (Object.values (langKeys).filter (x => x===true)).length > 0;
if (langsAreProvided) {
selectedTests = allTests.filter (t => langKeys[t.key]);
} else {
selectedTests = allTests.filter (t => t.key !== '--ts'); // exclude TypeScript when running all tests without specific languages
}
// remove skipped tests
if (skipSettings[exchange]) {
if (skipSettings[exchange].skipCSharp) selectedTests = selectedTests.filter (t => t.key !== '--csharp');
if (skipSettings[exchange].skipPhpAsync) selectedTests = selectedTests.filter (t => t.key !== '--php-async');
}
// if it's WS tests, then remove sync versions (php & python) from queue
if (wsFlag) {
selectedTests = selectedTests.filter (t => t.key !== '--python' && t.key !== '--php');
}
const completeTests = await sequentialMap (selectedTests, async test => Object.assign (test, await exec (...test.exec)));
const failed = completeTests.find (test => test.failed);
const hasWarnings = completeTests.find (test => test.warnings.length);
const warnings = completeTests.reduce (
(total, { warnings }) => {
return warnings.length ? total.concat(['\n\n']).concat (warnings) : []
}, []
);
const infos = completeTests.reduce (
(total, { infos }) => {
return infos.length ? total.concat(['\n\n']).concat (infos) : []
}, []
);
// Print interactive log output
let logMessage = '';
if (failed) {
logMessage = 'FAIL'.red;
} else if (hasWarnings) {
logMessage = ('WARN: ' + (warnings.length ? warnings.join (' ') : '')).yellow;
} else {
logMessage = 'OK'.green;
}
numExchangesTested++;
log.bright (('[' + percentsDone() + ']').dim, 'Tested', exchange.cyan, wsFlag, logMessage)
// independenly of the success result, show infos
// ( these infos will be shown as soon as each exchange test is finished, and will not wait 100% of all tests to be finished )
const displayInfos = true; // temporarily disable from run-tests, because they are still outputed in console from individual langs
if (displayInfos) {
if (debugKeys['--info'] && infos.length) {
// show info if enabled
log.indent (1).bright ((
'\n|-------------- INFO --------------|\n' +
infos.join('\n') +
'\n|--------------------------------------------|\n'
).blue);
}
}
// Return collected data to main loop
return {
exchange,
failed,
hasWarnings,
explain () {
for (let { language, failed, output, warnings, infos } of completeTests) {
const fullSkip = output.indexOf('[SKIPPED]') >= 0;
if (fullSkip)
continue;
// if failed, then show full output (includes warnings)
if (failed) {
log.bright ('\nFAILED'.bgBrightRed.white, exchange.red, '(' + language + ' ' + wsFlag + '):\n')
log.indent (1) ('\n', output)
}
// if not failed, but there are warnings, then show them
else if (warnings.length) {
log.bright ('\nWARN'.yellow.inverse, exchange.yellow, '(' + language + ' ' + wsFlag + '):\n')
log.indent (1) ('\n', warnings.join ('\n'))
}
}
}
}
}
// ------------------------------------------------------------------------ //
function TaskPool (maxConcurrency) {
const pending = []
, queue = []
let numActive = 0
return {
pending,
run (task) {
if (numActive >= maxConcurrency) { // queue task
return new Promise (resolve => queue.push (() => this.run (task).then (resolve)))
} else { // execute task
let p = task ().then (x => {
numActive--
return (queue.length && (numActive < maxConcurrency))
? queue.shift () ().then (() => x)
: x
})
numActive++
pending.push (p)
return p
}
}
}
}
// ------------------------------------------------------------------------ //
async function testAllExchanges () {
const taskPool = TaskPool (maxConcurrency)
const results = []
for (const exchange of exchanges) {
taskPool.run (() => testExchange (exchange).then (x => results.push (x)))
}
await Promise.all (taskPool.pending)
return results
}
// ------------------------------------------------------------------------ //
(async function () {
// show output like `Testing { exchanges: ["binance"], symbol: "all", debugKeys: { '--warnings': false, '--info': true }, langKeys: { '--ts': false, '--js': false, '--php': false, '--python': false, '--python-async': false, '--php-async': false }, exchangeSpecificFlags: { '--ws': true, '--sandbox': false, '--verbose': false, '--private': false, '--privateOnly': false }, maxConcurrency: 100 }`
log.bright.magenta.noPretty (
'Testing'.white,
Object.assign ({ exchanges, method, symbol, debugKeys, langKeys, exchangeSpecificFlags }, maxConcurrency >= Number.MAX_VALUE ? {} : { maxConcurrency })
)
const tested = await testAllExchanges ()
, warnings = tested.filter (t => !t.failed && t.hasWarnings)
, failed = tested.filter (t => t.failed)
, succeeded = tested.filter (t => !t.failed && !t.hasWarnings)
log.newline ()
warnings.forEach (t => t.explain ())
failed.forEach (t => t.explain ())
log.newline ()
if (failed.length) { log.noPretty.bright.red ('FAIL'.bgBrightRed.white, failed.map (t => t.exchange)) }
if (warnings.length) { log.noPretty.bright.yellow ('WARN'.inverse, warnings.map (t => t.exchange)) }
log.newline ()
log.bright ('All done,', [failed.length && (failed.length + ' failed') .red,
succeeded.length && (succeeded.length + ' succeeded').green,
warnings.length && (warnings.length + ' warnings') .yellow].filter (s => s).join (', '))
if (failed.length) {
await sleep (10) // to fight TravisCI log truncation issue, see https://github.com/travis-ci/travis-ci/issues/8189
process.exit (1)
} else {
process.exit (0)
}
}) ();
// ------------------------------------------------------------------------ //