Hopefully fixed dependency resolution. Added file locking on logs. Improved README, fixed some minor issues

This commit is contained in:
Nocturn9x 2021-12-09 17:09:00 +01:00
parent 342fd15ffb
commit fd35755da5
5 changed files with 236 additions and 119 deletions

View File

@ -76,10 +76,21 @@ stdout = /var/log/sshd # Path of the stdout log for the service
stdin = /dev/null # Path of the stdin fd for the service stdin = /dev/null # Path of the stdin fd for the service
``` ```
A dependency name can either be the name of a unit file (without the `.conf` extension), or one of the following placeholders: __Note__: Unsupervised services cannot be restarted, as NimD has no control over them once they're spawned.
A dependency name can either be the name of a unit file (case sensitive, but without the `.conf` extension), or one of the following placeholders:
- `net` -> Stands for network connection. Services like NetworkManager and dhcpcd should be set as providers for this - `net` -> Stands for network connection. Services like NetworkManager and dhcpcd should be set as providers for this
- `fs` -> If you mount your disks using a oneshot service (recommended for the best experience), your service should provide this - `fs` -> If you mount your disks using a oneshot service (recommended for the best experience), your service should provide this
- `ssh` -> The service provides some sort of SSH functionality
- `ftp` -> The service provides an FTP server
- `http` -> The service is an HTTP webserver
Note that NimD resolves placeholders before service names: this means that if you have a service named `ssh.conf`, using `ssh` as
a dependency will __not__ set that service as a dependency and will __not__ override the default behavior unless said unit file also has
`provides=ssh` in it. Also note that multiple providers for the same service raise a warning by default and cause NimD to let the alphabet decide
which dependency is started (i.e. they are sorted lexicographically by their filename, without the extension, and the first is picked), but this
behavior can be changed (e.g. raising an error instead)
## Configuring NimD ## Configuring NimD
@ -94,12 +105,14 @@ logFile = /var/log/nimd # Path to log file
[Filesystem] [Filesystem]
autoMount = true # Automatically parses /etc/fstab and mounts disks autoMount = true # Automatically parses /etc/fstab and mounts disks
fstabPath = /etc/fstab # Path to your system's fstab (defaults to /etc/fstab) autoUnmount = true # Automatically parses /proc/mounts and unmounts everything on shutdown
createDirs = /path/to/dir1, /path/to/dir2 # Creates these directories on boot. Empty to disable fstabPath = /etc/fstab # Path to your system's fstab (defaults to /etc/fstab)
createSymlinks = /path/to/dir1, /path/to/dir2 # Creates these symlinks on boot. Empty to disable createDirs = /path/to/dir1, /path/to/dir2 # Creates these directories on boot. Empty to disable
createSymlinks = /path/to/symlink:/path/to/dest, ... # Creates these symlinks on boot. Empty to disable
[Misc] [Misc]
controlSocket = /var/run/nimd.sock # Path to the Unix domain socket to create for IPC controlSocket = /var/run/nimd.sock # Path to the Unix domain socket to create for IPC
onDependencyConflict = skip # Other option: warn, error
``` ```

View File

@ -349,13 +349,14 @@ proc createDirectories*(logger: Logger) =
## in their config. Note that the entire path ## in their config. Note that the entire path
## of the directory is created if it does not ## of the directory is created if it does not
## exist yet ## exist yet
var hasChmod = false var hasChmod = true
try: try:
if findExe("chmod").isEmptyOrWhitespace(): if findExe("chmod").isEmptyOrWhitespace():
logger.warning("Could not find chmod binary, directory permissions will default to OS configuration") logger.warning("Could not find chmod binary, directory permissions will default to OS configuration")
hasChmod = true hasChmod = false
except: except:
logger.error(&"Failed to search for chmod binary: {getCurrentExceptionMsg()}") logger.error(&"Failed to search for chmod binary, directory permissions will default to OS configuration: {getCurrentExceptionMsg()}")
hasChmod = false
for dir in directories: for dir in directories:
try: try:
if exists(dir.path): if exists(dir.path):
@ -374,6 +375,6 @@ proc createDirectories*(logger: Logger) =
if hasChmod: if hasChmod:
logger.debug(&"Setting permissions to {dir.permissions} for {dir.path}") logger.debug(&"Setting permissions to {dir.permissions} for {dir.path}")
if (let code = execShellCmd(&"chmod -R {dir.permissions} {dir.path}"); code) != 0: if (let code = execShellCmd(&"chmod -R {dir.permissions} {dir.path}"); code) != 0:
logger.warning(&"Command 'chmod -R {dir.permissions}' exited non-zero status code {code}") logger.warning(&"Command 'chmod -R {dir.permissions} {dir.path}' exited non-zero status code {code}")
except: except:
logger.error(&"Failed to create directory at {dir.path}: {getCurrentExceptionMsg()}") logger.error(&"Failed to create directory at {dir.path}: {getCurrentExceptionMsg()}")

View File

@ -30,7 +30,9 @@ import ../util/logging
type type
RunLevel* = enum RunLevel* = enum
## Enum of possible runlevels ## Enum of possible runlevels
Boot, Default, Shutdown Boot = 0,
Default,
Shutdown
ServiceKind* = enum ServiceKind* = enum
## Enumerates all service ## Enumerates all service
## types ## types
@ -38,6 +40,14 @@ type
RestartKind* = enum RestartKind* = enum
## Enum of possible restart modes ## Enum of possible restart modes
Always, OnFailure, Never Always, OnFailure, Never
DependencyKind* = enum
## Enum of possible dependencies
Network, Filesystem,
Ssh, Ftp, Http, Other
Dependency* = ref object
## A dependency
kind*: DependencyKind
provider*: Service
Service* = ref object of RootObj Service* = ref object of RootObj
## A service object ## A service object
name: string name: string
@ -49,17 +59,28 @@ type
supervised: bool supervised: bool
restart: RestartKind restart: RestartKind
restartDelay: int restartDelay: int
depends*: seq[Service] depends: seq[Dependency]
provides*: seq[Service] provides: seq[Dependency]
## These two fields are
## used by the dependency
## resolver
isMarked: bool
isResolved: bool
proc newDependency*(kind: DependencyKind, provider: Service): Dependency =
## Creates a new dependency object
result = Dependency(kind: kind, provider: provider)
proc newService*(name, description: string, kind: ServiceKind, workDir: string, runlevel: RunLevel, exec: string, supervised: bool, restart: RestartKind, proc newService*(name, description: string, kind: ServiceKind, workDir: string, runlevel: RunLevel, exec: string, supervised: bool, restart: RestartKind,
restartDelay: int, depends, provides: seq[Service]): Service = restartDelay: int, depends, provides: seq[Dependency]): Service =
## Creates a new service object ## Creates a new service object
result = Service(name: name, description: description, kind: kind, workDir: workDir, runLevel: runLevel, result = Service(name: name, description: description, kind: kind, workDir: workDir, runLevel: runLevel,
exec: exec, supervised: supervised, restart: restart, restartDelay: restartDelay, exec: exec, supervised: supervised, restart: restart, restartDelay: restartDelay,
depends: depends, provides: provides) depends: depends, provides: provides, isMarked: false, isResolved: false)
result.provides.add(result) result.provides.add(newDependency(Other, result))
proc extend[T](self: var seq[T], other: seq[T]) = proc extend[T](self: var seq[T], other: seq[T]) =
@ -72,34 +93,83 @@ var services: seq[Service] = @[]
var processIDs: TableRef[int, Service] = newTable[int, Service]() var processIDs: TableRef[int, Service] = newTable[int, Service]()
proc resolveDependencies(logger: Logger, node: Service, resolved, unresolved: var seq[Service]) = proc resolve(logger: Logger, node: Service): seq[Service] =
## Resolves dependencies and modifies the resolved ## Returns a sorted list of services according
## parameter in place to a list that satisfies the ## to their dependency and provider requirements.
## dependency tree. This is basically traversing ## This function recursively iterates over the
## a directed cyclic graph, although note that cycles ## list of services, treating it as a DAG
## in our graph are errors and cause the dependants and ## (Directed Acyclic Graph) and builds a topologically
## the providers to be skipped and an error to be logged ## sorted list such that a service appears in it only
## after all of its dependencies and only
# Note: It turns out this is an NP-hard problem (see https://stackoverflow.com/a/28102139/12159081), ## before all of its dependents.
# so hopefully this doesn't blow up. No wonder runit doesn't do any dependency resolution, lol. ## This function also automatically handles
# The algorithm comes from https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/ ## detached subgraphs, which can occurr if
# and has been extended to support the dependent-provider paradigm ## one or more dependencies have common
var ok = true ## dependencies/dependents between each other,
unresolved.add(node) ## but not with the rest of the graph. Nodes
for dependency in node.depends: ## that have no dependencies nor provide any
if dependency notin resolved: ## service may be located anywhere in the list,
if dependency in unresolved: ## as that does not invalidate the invariants
logger.error(&"Could not resolve dependencies for '{node.name}' -> '{dependency.name}': cyclic dependency detected") ## described above. The algorithm comes from
## https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
## and has been extended to support the dependent-provider paradigm.
## Note that it is not an error for a service in a given runlevel to depend
## on services in other runlevels: when that occurs a warning is raised and
## the service in the lower runlevel is promoted to the higher one (runlevels start from 0),
## which means adding a module in a given runlevel implicitly adds all of its dependencies
## to said runlevel as well, regardless of what was specified in their unit file
if node.isResolved:
logger.debug(&"Dependency '{node.name}' has already been satisfied, skipping it")
return @[]
var ok: bool = true
result = @[]
node.isMarked = true
for service in node.provides:
if service.provider == node:
continue # Services implicitly provide themselves
if node.runlevel < service.provider.runlevel:
logger.warning(&"Service '{node.name}' in runlevel {node.runlevel} depends on '{service.provider.name}' in runlevel {service.provider.runlevel}, loading dependency regardless")
if not service.provider.isResolved:
if service.provider.isMarked:
logger.warning(&"Cyclic dependency from '{node.name}' to '{service.provider.name}' detected while building dependency graph: skipping both")
ok = false ok = false
continue break
resolveDependencies(logger, dependency, resolved, unresolved) service.provider.isMarked = true
for dependency in node.provides: result.extend(resolve(logger, service.provider))
if dependency == node: for service in node.depends:
continue if service.provider == node:
resolveDependencies(logger, dependency, resolved, unresolved) logger.warning(&"Cyclic dependency from '{node.name}' to itself detected while building dependency graph: skipping it")
ok = false
break
if node.runlevel > service.provider.runlevel:
logger.warning(&"Service '{node.name}' in runlevel {node.runlevel} depends on '{service.provider.name}' in runlevel {service.provider.runlevel}, loading both")
if not service.provider.isResolved:
if service.provider.isMarked:
logger.warning(&"Cyclic dependency from '{node.name}' to '{service.provider.name}' detected while building dependency graph: skipping both")
ok = false
break
service.provider.isMarked = true
result.extend(resolve(logger, service.provider))
if ok: if ok:
resolved.add(node) result.add(node)
unresolved.del(unresolved.find(node)) node.isResolved = true
node.isMarked = false
proc resolveDependencies(logger: Logger, services: seq[Service], level: RunLevel): seq[Service] =
## Iteratively calls resolve() until all services
## have been processed
result = @[]
var node: Service
var i = 1
var s: seq[Service] = @[]
for service in services:
if service.runlevel == level:
s.add(service)
while i <= len(s):
node = s[^i]
result.extend(resolve(logger, node))
inc(i)
proc isManagedProcess*(pid: int): bool = proc isManagedProcess*(pid: int): bool =
@ -239,47 +309,49 @@ proc startService(logger: Logger, service: Service) =
proc startServices*(logger: Logger, level: RunLevel, workers: int = 1) = proc startServices*(logger: Logger, level: RunLevel, workers: int = 1) =
## Starts the registered services in the ## Starts the registered services in the
## given runlevel ## given runlevel
var resolved: seq[Service] = @[] if len(services) == 0:
var unresolved: seq[Service] = @[] return
resolveDependencies(logger, services[0], resolved, unresolved) var dependencies = resolveDependencies(logger, services, level)
if workers > cpuinfo.countProcessors(): if workers > cpuinfo.countProcessors():
logger.warning(&"The configured number of workers ({workers}) is greater than the number of CPU cores ({cpuinfo.countProcessors()}), performance may degrade") logger.warning(&"The configured number of workers ({workers}) is greater than the number of CPU cores ({cpuinfo.countProcessors()}), performance may degrade")
var workerCount: int = 0
var status: cint var status: cint
var pid: int = posix.fork() var pid: int = posix.fork()
var pids: seq[int] = @[]
if pid == -1: if pid == -1:
logger.error(&"Error, cannot fork: {posix.strerror(posix.errno)}") logger.error(&"Error, cannot fork: {posix.strerror(posix.errno)}")
elif pid == 0: elif pid == 0:
var service: Service
logger.debug("Started service spawner process") logger.debug("Started service spawner process")
var servicesCopy: seq[Service] = @[] while dependencies.len() > 0:
for service in services: for _ in 0..<workers:
if service.runlevel == level: if len(dependencies) == 0:
servicesCopy.add(service) break
while servicesCopy.len() > 0: service = dependencies[0]
if workerCount == workers: dependencies.del(0)
logger.debug(&"Worker queue full, waiting for some worker to exit...") pid = posix.fork()
logger.trace(&"Calling waitpid() on {pid}") if pid == -1:
var returnCode = waitPid(cint(pid), status, WUNTRACED) logger.error(&"An error occurred while forking to spawn services, trying again: {posix.strerror(posix.errno)}")
logger.trace(&"Call to waitpid() set status to {status} and returned {returnCode}") elif pid == 0:
dec(workerCount) logger.trace(&"New child has been spawned")
pid = posix.fork() if not service.supervised or service.kind == Oneshot:
if pid == -1: logger.info(&"""Starting {(if service.kind != Oneshot: "unsupervised" else: "oneshot")} service '{service.name}'""")
logger.error(&"An error occurred while forking to spawn services, trying again: {posix.strerror(posix.errno)}") else:
elif pid == 0: logger.info(&"Starting supervised service '{service.name}'")
logger.trace(&"New child has been spawned") startService(logger, service)
if not servicesCopy[0].supervised or servicesCopy[0].kind == Oneshot:
logger.info(&"""Starting {(if servicesCopy[0].kind != Oneshot: "unsupervised" else: "oneshot")} service '{servicesCopy[0].name}'""")
else: else:
logger.info(&"Starting supervised service '{servicesCopy[0].name}'") pids.add(pid)
startService(logger, servicesCopy[0]) if service.supervised:
elif servicesCopy.len() > 0: addManagedProcess(pid, service)
workerCount += 1 if len(pids) == workers:
if servicesCopy[0].supervised: logger.debug(&"""Worker queue full, waiting for some worker{(if workers > 1: "s" else: "")} to exit...""")
addManagedProcess(pid, servicesCopy[0]) for i, pid in pids:
servicesCopy.del(0) logger.trace(&"Calling waitpid() on {pid}")
var returnCode = waitPid(cint(pid), status, WUNTRACED)
logger.trace(&"Call to waitpid() on {pid} set status to {status} and returned {returnCode}")
pids = @[]
quit(0) quit(0)
else: else:
logger.debug(&"Waiting for completion of service spawning in runlevel {($level).toLowerAscii()}") logger.debug(&"Waiting for completion of service spawning in runlevel {($level).toLowerAscii()}")
logger.trace(&"Calling waitpid() on {pid}") logger.trace(&"Calling waitpid() on {pid}")
var returnCode = waitPid(cint(pid), status, WUNTRACED) var returnCode = waitPid(cint(pid), status, WUNTRACED)
logger.trace(&"Call to waitpid() set status to {status} and returned {returnCode}") logger.trace(&"Call to waitpid() on {pid} set status to {status} and returned {returnCode}")

View File

@ -36,11 +36,10 @@ proc addStuff =
# Tests here. Check logging output (debug) to see if # Tests here. Check logging output (debug) to see if
# they work as intended # they work as intended
addSymlink(newSymlink(dest="/dev/std/err", source="/")) # Should say link already exists and points to /proc/self/fd/2 addSymlink(newSymlink(dest="/dev/std/err", source="/")) # Should say link already exists and points to /proc/self/fd/2
addSymlink(newSymlink(dest="/dev/std/in", source="/does/not/exist")) # Shuld say destination does not exist addSymlink(newSymlink(dest="/dev/std/in", source="/does/not/exist")) # Should say destination does not exist
addSymlink(newSymlink(dest="/dev/std/in", source="/proc/self/fd/0")) # Should say link already exists addSymlink(newSymlink(dest="/dev/std/in", source="/proc/self/fd/0")) # Should say link already exists
addDirectory(newDirectory("test", 777)) # Should create a directory addDirectory(newDirectory("test", 777)) # Should create a directory
addDirectory(newDirectory("/dev/disk", 123)) # Should say directory already exists addDirectory(newDirectory("/dev/disk", 123)) # Should say directory already exists
addDirectory(newDirectory("/dev/test/owo", 000)) # Should say path does not exist
# Shutdown handler to unmount disks # Shutdown handler to unmount disks
addShutdownHandler(newShutdownHandler(unmountAllDisks)) addShutdownHandler(newShutdownHandler(unmountAllDisks))
# Adds test services # Adds test services
@ -51,25 +50,25 @@ proc addStuff =
var errorer = newService(name="errorer", description="la mamma di gavd", var errorer = newService(name="errorer", description="la mamma di gavd",
exec="/bin/false", supervised=true, restart=OnFailure, exec="/bin/false", supervised=true, restart=OnFailure,
restartDelay=5, runlevel=Boot, workDir="/", kind=Simple, restartDelay=5, runlevel=Boot, workDir="/", kind=Simple,
depends=(@[echoer]), provides=(@[])) depends=(@[newDependency(Other, echoer)]), provides=(@[]))
var test = newService(name="broken", description="", exec="/bin/echo owo", var test = newService(name="broken", description="", exec="/bin/echo owo",
runlevel=Boot, kind=Oneshot, workDir=getCurrentDir(), runlevel=Boot, kind=Oneshot, workDir=getCurrentDir(),
supervised=false, restart=Never, restartDelay=0, supervised=false, restart=Never, restartDelay=0,
depends=(@[echoer]), provides=(@[])) depends=(@[newDependency(Other, echoer)]), provides=(@[]))
var exiter = newService(name="exiter", description="la mamma di licenziat", var exiter = newService(name="exiter", description="la mamma di licenziat",
exec="/bin/true", supervised=true, restart=Always, exec="/bin/true", supervised=true, restart=Always,
restartDelay=5, runlevel=Boot, workDir="/", kind=Simple, restartDelay=5, runlevel=Boot, workDir="/", kind=Simple,
depends=(@[errorer]), provides=(@[])) depends=(@[newDependency(Other, errorer)]), provides=(@[]))
addService(errorer) addService(errorer)
addService(echoer) addService(echoer)
addService(exiter) addService(exiter)
addService(test) addService(test)
echoer.depends.add(test)
proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab") = proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab", setHostname: bool = true, workerCount: int = 1) =
## NimD's entry point and setup ## NimD's entry point and setup
## function ## function
setStdIoUnbuffered() # Colors and output synchronization don't work otherwise
logger.debug("Starting NimD: A minimal, self-contained, dependency-based Linux init system written in Nim") logger.debug("Starting NimD: A minimal, self-contained, dependency-based Linux init system written in Nim")
logger.info(&"NimD version {NimdVersion.major}.{NimdVersion.minor}.{NimdVersion.patch} is starting up!") logger.info(&"NimD version {NimdVersion.major}.{NimdVersion.minor}.{NimdVersion.patch} is starting up!")
logger.trace("Calling getCurrentProcessId()") logger.trace("Calling getCurrentProcessId()")
@ -110,12 +109,15 @@ proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab")
except: except:
logger.fatal(&"A fatal error has occurred while preparing filesystem, booting cannot continue. Error -> {getCurrentExceptionMsg()}") logger.fatal(&"A fatal error has occurred while preparing filesystem, booting cannot continue. Error -> {getCurrentExceptionMsg()}")
nimDExit(logger, 131, emerg=false) nimDExit(logger, 131, emerg=false)
logger.info("Setting hostname") if setHostname:
logger.debug(&"Hostname was set to '{setHostname(logger)}'") logger.info("Setting hostname")
logger.debug(&"Hostname was set to '{misc.setHostname(logger)}'")
else:
logger.info("Skipping setting hostname")
logger.debug("Entering critical fork() section: blocking signals") logger.debug("Entering critical fork() section: blocking signals")
blockSignals(logger) # They are later unblocked in mainLoop blockSignals(logger) # They are later unblocked in mainLoop
logger.info("Processing boot runlevel") logger.info("Processing boot runlevel")
startServices(logger, workers=1, level=Boot) startServices(logger, workers=workerCount, level=Boot)
logger.debug("Starting main loop") logger.debug("Starting main loop")
mainLoop(logger) mainLoop(logger)

View File

@ -40,7 +40,7 @@ type
var defaultLevel = LogLevel.Info var defaultLevel = LogLevel.Info
var logFile = "/var/log/nimd" var logFile = "/var/log/nimd"
var logToFile: bool = false var logToFileOnly: bool = false
proc log(self: Logger, level: LogLevel = defaultLevel, message: string) # Forward declaration proc log(self: Logger, level: LogLevel = defaultLevel, message: string) # Forward declaration
@ -79,52 +79,71 @@ proc log(self: Logger, level: LogLevel = defaultLevel, message: string) =
# Do NOT touch the alignment offsets or your console output and logs will look like trash # Do NOT touch the alignment offsets or your console output and logs will look like trash
proc lockFile(logger: Logger, handle: File) =
## Locks the given file across the whole system for writing using fcntl()
if fcntl(handle.getFileHandle(), F_WRLCK) == -1:
stderr.writeLine(&"Error while locking handle (code {posix.errno}, {posix.strerror(posix.errno)}): output may be mangled")
proc unlockFile(logger: Logger, handle: File) =
## Unlocks the given file across the whole system for writing using fcntl()
if fcntl(handle.getFileHandle(), F_UNLCK) == -1:
stderr.writeLine(&"Error while locking stderr (code {posix.errno}, {posix.strerror(posix.errno)}): output may be mangled")
proc logTraceStderr(self: LogHandler, logger: Logger, message: string) = proc logTraceStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgMagenta) setForegroundColor(fgMagenta)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""") stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logDebugStderr(self: LogHandler, logger: Logger, message: string) = proc logDebugStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgCyan) setForegroundColor(fgCyan)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""") stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logInfoStderr(self: LogHandler, logger: Logger, message: string) = proc logInfoStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgGreen) setForegroundColor(fgGreen)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""") stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logWarningStderr(self: LogHandler, logger: Logger, message: string) = proc logWarningStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgYellow) setForegroundColor(fgYellow)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""") stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logErrorStderr(self: LogHandler, logger: Logger, message: string) = proc logErrorStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgRed) setForegroundColor(fgRed)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""") stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logCriticalStderr(self: LogHandler, logger: Logger, message: string) = proc logCriticalStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgYellow) setForegroundColor(fgYellow)
setBackgroundColor(bgRed) setBackgroundColor(bgRed)
stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})]""") stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})]""")
setBackgroundColor(bgDefault) setBackgroundColor(bgDefault)
stderr.writeLine(&""" {message}""") stderr.writeLine(&""" {message}""")
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
stderr.flushFile() logger.unlockFile(stderr)
proc logFatalStderr(self: LogHandler, logger: Logger, message: string) = proc logFatalStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgBlack) setForegroundColor(fgBlack)
setBackgroundColor(bgRed) setBackgroundColor(bgRed)
stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})]""") stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})]""")
@ -132,51 +151,64 @@ proc logFatalStderr(self: LogHandler, logger: Logger, message: string) =
setBackgroundColor(bgDefault) setBackgroundColor(bgDefault)
stderr.writeline(&""" {message}""") stderr.writeline(&""" {message}""")
setForegroundColor(fgDefault) setForegroundColor(fgDefault)
stderr.flushFile() logger.unlockFile(stderr)
proc logTraceFile(self: LogHandler, logger: Logger, message: string) = proc logTraceFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logDebugFile(self: LogHandler, logger: Logger, message: string) = proc logDebugFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logInfoFile(self: LogHandler, logger: Logger, message: string) = proc logInfoFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logWarningFile(self: LogHandler, logger: Logger, message: string) = proc logWarningFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logErrorFile(self: LogHandler, logger: Logger, message: string) = proc logErrorFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logCriticalFile(self: LogHandler, logger: Logger, message: string) = proc logCriticalFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logFatalFile(self: LogHandler, logger: Logger, message: string) = proc logFatalFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})] {message}""") var self = StreamHandler(self)
StreamHandler(self).file.flushFile() logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc switchToFile*(self: Logger) = proc switchToFile*(self: Logger) =
## Switches logging to file and ## Switches logging to file and
## changes the behavior of getDefaultLogger ## changes the behavior of getDefaultLogger
## accordingly ## accordingly
if logToFile: if logToFileOnly:
return return
logToFile = true
self.handlers = @[] # Don't you love it when you can just let the GC manage memory for you? self.handlers = @[] # Don't you love it when you can just let the GC manage memory for you?
self.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile)) self.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile))
self.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile)) self.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile))
@ -191,9 +223,8 @@ proc switchToConsole*(self: Logger) =
## Switches logging to the console and ## Switches logging to the console and
## changes the behavior of getDefaultLogger ## changes the behavior of getDefaultLogger
## accordingly ## accordingly
if not logToFile: if not logToFileOnly:
return return
logToFile = false
self.handlers = @[] self.handlers = @[]
self.addHandler(createHandler(logTraceStderr, LogLevel.Trace)) self.addHandler(createHandler(logTraceStderr, LogLevel.Trace))
self.addHandler(createHandler(logDebugStderr, LogLevel.Debug)) self.addHandler(createHandler(logDebugStderr, LogLevel.Debug))
@ -211,8 +242,7 @@ proc getDefaultLogger*(): Logger =
## standard error with some basic info like the ## standard error with some basic info like the
## current date and time and the log level ## current date and time and the log level
result = newLogger() result = newLogger()
if not logToFile: if not logToFileOnly:
setStdIoUnbuffered() # Colors don't work otherwise!
result.addHandler(createHandler(logTraceStderr, LogLevel.Trace)) result.addHandler(createHandler(logTraceStderr, LogLevel.Trace))
result.addHandler(createHandler(logDebugStderr, LogLevel.Debug)) result.addHandler(createHandler(logDebugStderr, LogLevel.Debug))
result.addHandler(createHandler(logInfoStderr, LogLevel.Info)) result.addHandler(createHandler(logInfoStderr, LogLevel.Info))
@ -220,11 +250,10 @@ proc getDefaultLogger*(): Logger =
result.addHandler(createHandler(logErrorStderr, LogLevel.Error)) result.addHandler(createHandler(logErrorStderr, LogLevel.Error))
result.addHandler(createHandler(logCriticalStderr, LogLevel.Critical)) result.addHandler(createHandler(logCriticalStderr, LogLevel.Critical))
result.addHandler(createHandler(logFatalStderr, LogLevel.Fatal)) result.addHandler(createHandler(logFatalStderr, LogLevel.Fatal))
else: result.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile))
result.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile)) result.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile))
result.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile)) result.addHandler(createStreamHandler(logInfoFile, LogLevel.Info, logFile))
result.addHandler(createStreamHandler(logInfoFile, LogLevel.Info, logFile)) result.addHandler(createStreamHandler(logWarningFile, LogLevel.Warning, logFile))
result.addHandler(createStreamHandler(logWarningFile, LogLevel.Warning, logFile)) result.addHandler(createStreamHandler(logErrorFile, LogLevel.Error, logFile))
result.addHandler(createStreamHandler(logErrorFile, LogLevel.Error, logFile)) result.addHandler(createStreamHandler(logCriticalFile, LogLevel.Critical, logFile))
result.addHandler(createStreamHandler(logCriticalFile, LogLevel.Critical, logFile)) result.addHandler(createStreamHandler(logFatalFile, LogLevel.Fatal, logFile))
result.addHandler(createStreamHandler(logFatalFile, LogLevel.Fatal, logFile))