New command line options for jats

-t:<test path> to specify test path/a different test dir
-e to enumerate failed tests
-f to run skipped tests
--timeout:<timeout> to specify a custom timeout
skipped tests forced which succeeded will display an info message
when only a single test file is ran, more info is displayed by
default to stdout
This commit is contained in:
Productive2 2021-02-20 22:23:57 +01:00
parent 3f0ae9bc1b
commit 9964f2b92c
6 changed files with 87 additions and 17 deletions

View File

@ -54,6 +54,7 @@ when isMainModule:
var targetFiles: seq[string]
var verbose = true
var quitVal = QuitValue.Success
var testDir = "japl"
proc evalKey(key: string) =
## Modifies the globals that define what JATS does based on the
@ -69,6 +70,10 @@ when isMainModule:
verbose = false
elif key == "stdout":
debugActions.add(DebugAction.Stdout)
elif key == "f" or key == "force":
force = true
elif key == "e" or key == "enumerate":
enumerate = true
else:
echo &"Unknown flag: {key}"
action = Action.Help
@ -88,6 +93,24 @@ when isMainModule:
echo "Can't parse non-integer option passed to -j/--jobs."
action = Action.Help
quitVal = QuitValue.ArgParseErr
elif key == "t" or key == "test" or key == "tests":
testDir = val
elif key == "timeout":
try:
var timeoutSeconds = parseFloat(val)
# a round is 100 ms, so let's not get close to that
if timeoutSeconds < 0.3:
timeoutSeconds = 0.3
# I don't want anything not nicely convertible to int,
# so how about cut it off at 10 hours. Open an issue
# if that's not enough... or just tweak it you lunatic
if timeoutSeconds > 36000.0:
timeoutSeconds = 36000.0
timeout = (timeoutSeconds * 10).int
except ValueError:
echo "Can't parse invalid timeout value " & val
action = Action.Help
quitVal = QuitValue.ArgParseErr
else:
echo &"Unknown option: {key}"
action = Action.Help
@ -119,15 +142,18 @@ when isMainModule:
echo """
JATS - Just Another Test Suite
Usage:
jats
Runs the tests
Flags:
Usage: ./jats <flags>
Debug output flags:
-i (or --interactive) displays all debug info
-o:<filename> (or --output:<filename>) saves debug info to a file
-s (or --silent) will disable all output (except --stdout)
--stdout will put all debug info to stdout
-e (or --enumerate) will list all tests that fail, crash or get killed
Test behavior flags:
-j:<parallel test count> (or --jobs:<parallel test count>) to specify number of tests to run parallel
-t:<test file or dir> (or --test:<path> or --tests:<path>) to specify where tests are
-f (or --force) will run skipped tests
Miscellaneous flags:
-h (or --help) displays this help message
-v (or --version) displays the version number of JATS
"""
@ -167,18 +193,24 @@ Flags:
# the second half of the test suite defined in ~japl/tests/japl
# Find ~japl/tests/japl and the test runner JATR
var jatr = "jatr"
var testDir = "japl"
if not fileExists(jatr):
if fileExists("tests" / jatr):
log(LogLevel.Debug,
&"Must be in root: prepending \"tests\" to paths")
&"Must be in root: prepending \"tests\" to jatr path")
jatr = "tests" / jatr
testDir = "tests" / testDir
else:
# only those two dirs are realistically useful for now,
echo "The tests directory couldn't be found."
echo "The test runner was not found."
quit int(QuitValue.JatrNotFound)
if not dirExists(testDir) and not fileExists(testDir):
if dirExists("tests" / testDir) or fileExists("tests" / testDir):
log(LogLevel.Debug, "Prepending \"tests\" to test path")
testDir = "tests" / testDir
else:
echo "The test dir/file was not found."
quit int(QuitValue.JatrNotFound)
# set the global var which specifies the path to the test runner
testRunner = jatr
log(LogLevel.Info, &"Running JAPL tests.")

View File

@ -35,20 +35,24 @@ type LogLevel* {.pure.} = enum
## All the different possible log levels
Debug, # always written to file only (large outputs, such as the entire output of the failing test or stacktrace)
Info, # important information about the progress of the test suite
Enumeration, # a white output for the enumerate option
Error, # failing tests (printed with yellow)
Fatal # always printed with red, halts the entire suite (test parsing errors, printed with red)
# log config: which log levels to show, show in silent mode and save to the
# detailed debug logs
const echoedLogs = {LogLevel.Info, LogLevel.Error, LogLevel.Fatal}
const echoedLogsSilent = {LogLevel.Fatal} # will be echoed even if test suite is silent
const savedLogs = {LogLevel.Debug, LogLevel.Info, LogLevel.Error, LogLevel.Fatal}
const echoedLogs = {LogLevel.Info, LogLevel.Error, LogLevel.Fatal,
LogLevel.Enumeration}
const echoedLogsSilent = {LogLevel.Fatal, LogLevel.Enumeration} # will be echoed even if test suite is silent
const savedLogs = {LogLevel.Debug, LogLevel.Info, LogLevel.Error,
LogLevel.Fatal, LogLevel.Enumeration}
# aesthetic config:
# progress bar length
const progbarLength = 25
# log level colors
const logColors = [LogLevel.Debug: fgDefault, LogLevel.Info: fgGreen,
LogLevel.Enumeration: fgDefault,
LogLevel.Error: fgYellow, LogLevel.Fatal: fgRed]
# global vars for the proc log

View File

@ -14,6 +14,7 @@
import testobject
import logutils
import testconfig
import os
import strutils
@ -136,7 +137,9 @@ proc buildTest(lines: seq[string], i: var int, name: string, path: string): Test
fatal &"Invalid mode {parsed.mode} when inside a block (currently in mode {mode}) at line {i} in {path}."
else: # still if modal, but not inside
if parsed.mode == "skip":
result.skip()
result.m_skipped = true
if not force:
result.skip()
elif parsed.mode == "end":
# end of test
return result
@ -175,6 +178,15 @@ proc buildTestFile(path: string): seq[Test] =
proc buildTests*(testDir: string): seq[Test] =
## Builds all test within the directory testDir
## if testDir is a file, only build that one file
if not dirExists(testDir):
if fileExists(testDir):
result &= buildTestFile(testDir)
for test in result:
test.important = true
else:
fatal "test dir/file doesn't exist"
for candidateObj in walkDir(testDir):
let candidate = candidateObj.path
if dirExists(candidate):

View File

@ -16,8 +16,11 @@ const jatsVersion* = "(dev)"
var maxAliveTests* = 16 # number of tests that can run parallel
const testWait* = 100 # number of milliseconds per cycle
const timeout* = 50 # number of cycles after which a test is killed for timeout
var timeout* = 50 # number of cycles after which a test is killed for timeout
var testRunner* = "jatr"
var force*: bool = false # if skipped tests get executed
var enumerate*: bool = false # if true, all failed/crashed and killed tests
# are enumerated to stdout
const outputIgnore* = [ "^DEBUG.*$" ]

View File

@ -43,18 +43,31 @@ proc printResults*(tests: seq[Test]): bool =
crash = 0
killed = 0
for test in tests:
log(LogLevel.Debug, &"Test {test.name}@{test.path} result: {test.result}")
var level = LogLevel.Debug
var detailLevel = LogLevel.Debug
if test.important:
level = LogLevel.Info
detailLevel = LogLevel.Info
if (test.result in {TestResult.Crash, TestResult.Mismatch, TestResult.Killed} and enumerate):
level = LogLevel.Enumeration
log(level, &"Test {test.name}@{test.path} result: {test.result}")
case test.result:
of TestResult.Skip:
inc skipped
of TestResult.Mismatch:
inc fail
log(LogLevel.Debug, &"[{test.name}@{test.path}\nstdout:\n{test.output}\nstderr:\n{test.error}\nexpected stdout:\n{test.expectedOutput}\nexpected stderr:\n{test.expectedError}\n]")
log(LogLevel.Debug, &"\nMismatch pos for stdout: {test.mismatchPos}\nMismatch pos for stderr: {test.errorMismatchPos}")
log(detailLevel, &"[{test.name}@{test.path}\nstdout:\n{test.output}\nstderr:\n{test.error}\nexpected stdout:\n{test.expectedOutput}\nexpected stderr:\n{test.expectedError}\n]")
log(detailLevel, &"\nMismatch pos for stdout: {test.mismatchPos}\nMismatch pos for stderr: {test.errorMismatchPos}")
of TestResult.Crash:
inc crash
log(LogLevel.Debug, &"{test.name}@{test.path} \ncrash:\n{test.error}")
log(detailLevel, &"{test.name}@{test.path} \ncrash:\n{test.error}")
of TestResult.Success:
if test.m_skipped:
log(LogLevel.Info, &"Test {test.name}@{test.path} succeeded, despite being marked to be skipped.")
inc success
of TestResult.Killed:
inc killed

View File

@ -40,6 +40,10 @@ type
source*: string
path*: string
name*: string
important*: bool # if set to true, the stdout/stderr and extra debug
# will get printed when finished
m_skipped*: bool # metadata, whether the skipped mode is in the file
# NOT WHETHER THE TEST IS ACTUALLY SKIPPED
# generated after building
expectedOutput*: seq[ExpectedLine]
expectedError*: seq[ExpectedLine]
@ -122,6 +126,8 @@ proc newTest*(name: string, path: string): Test =
result.name = name
result.mismatchPos = -1
result.errorMismatchPos = -1
result.important = false
result.m_skipped = false
proc skip*(test: Test) =
test.result = TestResult.Skip