peon-rewrite/src/util/testing.nim

318 lines
11 KiB
Nim

# Copyright 2024 Mattia Giambirtone & All Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Peon's own custom test suite. Because it's much better to spend a month rolling your
## own solution rather than spending 2 hours learning testament. Yeah, I suffer from NIH
## syndrome, so?
import std/strformat
import std/strutils
import std/sequtils
import frontend/parsing/lexer
import frontend/parsing/parser
import util/symbols
type
TestStatus* = enum
## Test status enumeration
Init, Running, Success,
Failed, Crashed,
TimedOut, Skipped
TestKind* = enum
## Test type enumeration
Tokenizer, Parser, TypeChecker,
Runtime
TestRunner = proc (suite: TestSuite, test: Test)
# Represents a test outcome. The exc field contains
# the exception raised during the test, if any. The
# error field indicates whether the test errored out
# or not. If exc is non-null and error is false, this
# means the error was expected behavior
TestOutcome = tuple[error: bool, exc: ref Exception, line: int, location: tuple[start, stop: int]]
Test* {.inheritable.} = ref object
## A generic test object
skip*: bool # Skip running this test if true
name*: string # Test name. Only useful for displaying purposes
case kind*: TestKind: # Test kind (tokenizer, parser, compiler, etc.)
of Tokenizer:
lexer: Lexer
tokens: seq[Token]
of Parser:
tree: ParseTree
else:
discard
source*: string # The source input of the test. Usually peon code
status*: TestStatus # The test's current state
case expected*: TestStatus: # The test's expected final state after run()
of Failed:
line: int
message: string
location: tuple[start, stop: int]
else:
discard
outcome*: TestOutcome # The test's outcome
runnerFunc: TestRunner # The test's internal runner function
reason*: string # A human readable reason why the test failed
TestSuite* = ref object
## A suite of tests
tests*: seq[Test]
proc `$`*(self: tuple[start, stop: int]): string =
if self == (-1, -1):
result = "none"
else:
result = &"(start={self.start}, stop={self.stop})"
proc `$`*(self: TestOutcome): string =
result &= &"Outcome(error={self.error}"
if not self.exc.isNil():
var name = ($self.exc.name).split(":")[0]
result &= &", exc=(name='{name}', msg='{self.exc.msg}')"
if self.line != -1:
result &= &", line={self.line}"
if self.location != (-1, -1):
result &= &", location={self.location}"
result &= ")"
proc setup(self: Test) =
case self.kind:
of Tokenizer:
self.lexer = newLexer()
self.lexer.fillSymbolTable()
else:
discard # TODO
proc tokenizeSucceedsRunner(suite: TestSuite, test: Test) =
## Runs a tokenitazion test that is expected to succeed
## and checks that it returns the tokens we expect
test.setup()
try:
let tokens = test.lexer.lex(test.source, test.name)
if tokens.len() != test.tokens.len() :
test.status = Failed
test.reason = &"Number of provided tokens ({test.tokens.len()}) does not match number of returned tokens ({tokens.len()})"
return
var i = 0
for (provided, expected) in zip(tokens, test.tokens):
if provided.kind != expected.kind:
test.status = Failed
test.reason = &"Token type mismatch at #{i}: expected {expected.kind}, got {provided.kind}\n Expected: {expected}\n Got: {provided}"
return
if provided.lexeme != expected.lexeme:
test.status = Failed
test.reason = &"Token lexeme mismatch at #{i}: expected '{expected.lexeme}', got '{provided.lexeme}'\n Expected: {expected}\n Got: {provided}"
return
if provided.line != expected.line:
test.status = Failed
test.reason = &"Token line mismatch at #{i}: expected {expected.line}, got {provided.line}\n Expected: {expected}\n Got: {provided}"
return
if provided.pos != expected.pos:
test.status = Failed
test.reason = &"Token position mismatch at #{i}: expected {expected.pos}, got {provided.pos}\n Expected: {expected}\n Got: {provided}"
return
if provided.relPos != expected.relPos:
test.status = Failed
test.reason = &"Token relative position mismatch at #{i}: expected {expected.relPos}, got {provided.relPos}\n Expected: {expected}\n Got: {provided}"
return
if provided.spaces != expected.spaces:
test.status = Failed
test.reason = &"Token spacing mismatch at #{i}: expected {expected.spaces}, got {provided.spaces}\n Expected: {expected}\n Got: {provided}"
return
inc(i)
except LexingError:
var exc = LexingError(getCurrentException())
test.outcome.location = exc.pos
test.outcome.line = exc.line
test.status = Failed
test.outcome.error = true
test.outcome.exc = getCurrentException()
test.reason = "Tokenization failed"
return
except CatchableError:
test.status = Crashed
test.outcome.error = true
test.outcome.exc = getCurrentException()
return
test.status = Success
proc tokenizeFailsRunner(suite: TestSuite, test: Test) =
## Runs a tokenitazion test that is expected to fail
## and checks that it does so in the way we expect
test.setup()
try:
discard test.lexer.lex(test.source, test.name)
except LexingError:
var exc = LexingError(getCurrentException())
test.outcome.location = exc.pos
test.outcome.line = exc.line
if exc.pos == test.location and exc.line == test.line and exc.msg == test.message:
test.status = Success
else:
if exc.pos != test.location or exc.line != test.line:
test.reason = &"Expecting failure at {test.line}:({test.location.start}, {test.location.stop}), failed at {exc.line}:({exc.pos.start}, {exc.pos.stop})"
else:
# message is wrong
test.reason = &"Expecting error message to be '{test.message}', got '{exc.msg}'"
test.status = Failed
test.outcome.error = true
test.outcome.exc = getCurrentException()
return
except CatchableError:
test.status = Crashed
test.outcome.error = true
test.outcome.exc = getCurrentException()
return
test.status = Failed
proc newTestSuite*: TestSuite =
## Creates a new test suite
new(result)
proc addTest*(self: TestSuite, test: Test) =
## Adds a test to the test suite
self.tests.add(test)
proc addTests*(self: TestSuite, tests: openarray[Test]) =
## Adds the given tests to the test suite
for test in tests:
self.addTest(test)
proc removeTest*(self: TestSuite, test: Test) =
## Removes the given test from the test suite
self.tests.delete(self.tests.find(test))
proc removeTests*(self: TestSuite, tests: openarray[Test]) =
## Removes the given tests from the test suite
for test in tests:
self.removeTest(test)
proc testTokenizeSucceeds*(name, source: string, tokens: seq[Token], skip = false): Test =
## Creates a new tokenizer test that is expected to succeed.
## The type of each token returned by the tokenizer is matched
## against the given list of tokens: the test only succeeds
## if no discrepancies are found
result = Test(expected: Success, kind: Tokenizer)
result.outcome.line = -1
result.outcome.location = (-1, -1)
result.name = name
result.status = Init
result.source = source
result.skip = skip
result.runnerFunc = tokenizeSucceedsRunner
result.tokens = tokens
proc testTokenizeFails*(name, source: string, message: string, line: int, location: tuple[start, stop: int], skip = false): Test =
## Creates a new tokenizer test that is expected to fail with the
## given error message and at the given location
result = Test(expected: Failed, kind: Tokenizer)
result.name = name
result.status = Init
result.source = source
result.skip = skip
result.runnerFunc = tokenizeFailsRunner
result.message = message
result.location = location
result.line = line
proc run*(self: TestSuite, verbose: bool = false) =
## Runs the test suite to completion,
## sequentially
if verbose:
echo "Starting test suite"
var
ran = 0
failed = 0
crashed = 0
successful = 0
skipped = 0
for test in self.tests:
if test.skip:
test.status = Skipped
inc(skipped)
continue
if verbose:
stdout.write(&"Running '{test.name}' ({ran + 1}/{self.tests.len()})\r")
test.runnerFunc(self, test)
case test.status:
of Success:
inc(successful)
of Failed:
inc(failed)
of Crashed:
inc(crashed)
else:
discard
inc(ran)
if verbose:
echo &"Ran {ran} tests ({skipped} skipped, {successful} successful, {failed} failed, {crashed} crashed)"
proc successful*(self: TestSuite): bool =
## Returns whether the test suite completed
## successfully or not. If called before run(),
## this function returns false. Skipped tests
## do not affect the outcome of this function
for test in self.tests:
if test.status in [Skipped, Success]:
continue
return false
return true
proc getExpectedException(self: Test): ref Exception =
## Gets the exception that we expect to be
## raised by the test. Could be nil if we
## expect no errors
if self.expected == Success:
return nil
case self.kind:
of Tokenizer:
return LexingError(msg: self.message, line: self.line, file: self.name, lexer: self.lexer, pos: self.location)
else:
discard # TODO
proc getExpectedOutcome*(self: Test): TestOutcome =
## Gets the expected outcome of a test
doAssert self.expected in [Success, Failed], "expected outcome is neither Success nor Failed: wtf?"
case self.kind:
of Tokenizer:
if self.expected == Success:
return (false, self.getExpectedException(), -1, (-1, -1))
else:
return (false, self.getExpectedException(), self.line, self.location)
else:
discard