Hopefully fixed dependency resolution. Added file locking on logs. Improved README, fixed some minor issues

This commit is contained in:
Nocturn9x 2021-12-09 17:09:00 +01:00
parent 342fd15ffb
commit fd35755da5
5 changed files with 236 additions and 119 deletions

View File

@ -76,10 +76,21 @@ stdout = /var/log/sshd # Path of the stdout log for the service
stdin = /dev/null # Path of the stdin fd for the service
```
A dependency name can either be the name of a unit file (without the `.conf` extension), or one of the following placeholders:
__Note__: Unsupervised services cannot be restarted, as NimD has no control over them once they're spawned.
A dependency name can either be the name of a unit file (case sensitive, but without the `.conf` extension), or one of the following placeholders:
- `net` -> Stands for network connection. Services like NetworkManager and dhcpcd should be set as providers for this
- `fs` -> If you mount your disks using a oneshot service (recommended for the best experience), your service should provide this
- `ssh` -> The service provides some sort of SSH functionality
- `ftp` -> The service provides an FTP server
- `http` -> The service is an HTTP webserver
Note that NimD resolves placeholders before service names: this means that if you have a service named `ssh.conf`, using `ssh` as
a dependency will __not__ set that service as a dependency and will __not__ override the default behavior unless said unit file also has
`provides=ssh` in it. Also note that multiple providers for the same service raise a warning by default and cause NimD to let the alphabet decide
which dependency is started (i.e. they are sorted lexicographically by their filename, without the extension, and the first is picked), but this
behavior can be changed (e.g. raising an error instead)
## Configuring NimD
@ -94,12 +105,14 @@ logFile = /var/log/nimd # Path to log file
[Filesystem]
autoMount = true # Automatically parses /etc/fstab and mounts disks
fstabPath = /etc/fstab # Path to your system's fstab (defaults to /etc/fstab)
createDirs = /path/to/dir1, /path/to/dir2 # Creates these directories on boot. Empty to disable
createSymlinks = /path/to/dir1, /path/to/dir2 # Creates these symlinks on boot. Empty to disable
autoMount = true # Automatically parses /etc/fstab and mounts disks
autoUnmount = true # Automatically parses /proc/mounts and unmounts everything on shutdown
fstabPath = /etc/fstab # Path to your system's fstab (defaults to /etc/fstab)
createDirs = /path/to/dir1, /path/to/dir2 # Creates these directories on boot. Empty to disable
createSymlinks = /path/to/symlink:/path/to/dest, ... # Creates these symlinks on boot. Empty to disable
[Misc]
controlSocket = /var/run/nimd.sock # Path to the Unix domain socket to create for IPC
controlSocket = /var/run/nimd.sock # Path to the Unix domain socket to create for IPC
onDependencyConflict = skip # Other option: warn, error
```

View File

@ -349,13 +349,14 @@ proc createDirectories*(logger: Logger) =
## in their config. Note that the entire path
## of the directory is created if it does not
## exist yet
var hasChmod = false
var hasChmod = true
try:
if findExe("chmod").isEmptyOrWhitespace():
logger.warning("Could not find chmod binary, directory permissions will default to OS configuration")
hasChmod = true
hasChmod = false
except:
logger.error(&"Failed to search for chmod binary: {getCurrentExceptionMsg()}")
logger.error(&"Failed to search for chmod binary, directory permissions will default to OS configuration: {getCurrentExceptionMsg()}")
hasChmod = false
for dir in directories:
try:
if exists(dir.path):
@ -374,6 +375,6 @@ proc createDirectories*(logger: Logger) =
if hasChmod:
logger.debug(&"Setting permissions to {dir.permissions} for {dir.path}")
if (let code = execShellCmd(&"chmod -R {dir.permissions} {dir.path}"); code) != 0:
logger.warning(&"Command 'chmod -R {dir.permissions}' exited non-zero status code {code}")
logger.warning(&"Command 'chmod -R {dir.permissions} {dir.path}' exited non-zero status code {code}")
except:
logger.error(&"Failed to create directory at {dir.path}: {getCurrentExceptionMsg()}")

View File

@ -30,7 +30,9 @@ import ../util/logging
type
RunLevel* = enum
## Enum of possible runlevels
Boot, Default, Shutdown
Boot = 0,
Default,
Shutdown
ServiceKind* = enum
## Enumerates all service
## types
@ -38,6 +40,14 @@ type
RestartKind* = enum
## Enum of possible restart modes
Always, OnFailure, Never
DependencyKind* = enum
## Enum of possible dependencies
Network, Filesystem,
Ssh, Ftp, Http, Other
Dependency* = ref object
## A dependency
kind*: DependencyKind
provider*: Service
Service* = ref object of RootObj
## A service object
name: string
@ -49,17 +59,28 @@ type
supervised: bool
restart: RestartKind
restartDelay: int
depends*: seq[Service]
provides*: seq[Service]
depends: seq[Dependency]
provides: seq[Dependency]
## These two fields are
## used by the dependency
## resolver
isMarked: bool
isResolved: bool
proc newDependency*(kind: DependencyKind, provider: Service): Dependency =
## Creates a new dependency object
result = Dependency(kind: kind, provider: provider)
proc newService*(name, description: string, kind: ServiceKind, workDir: string, runlevel: RunLevel, exec: string, supervised: bool, restart: RestartKind,
restartDelay: int, depends, provides: seq[Service]): Service =
restartDelay: int, depends, provides: seq[Dependency]): Service =
## Creates a new service object
result = Service(name: name, description: description, kind: kind, workDir: workDir, runLevel: runLevel,
exec: exec, supervised: supervised, restart: restart, restartDelay: restartDelay,
depends: depends, provides: provides)
result.provides.add(result)
depends: depends, provides: provides, isMarked: false, isResolved: false)
result.provides.add(newDependency(Other, result))
proc extend[T](self: var seq[T], other: seq[T]) =
@ -72,34 +93,83 @@ var services: seq[Service] = @[]
var processIDs: TableRef[int, Service] = newTable[int, Service]()
proc resolveDependencies(logger: Logger, node: Service, resolved, unresolved: var seq[Service]) =
## Resolves dependencies and modifies the resolved
## parameter in place to a list that satisfies the
## dependency tree. This is basically traversing
## a directed cyclic graph, although note that cycles
## in our graph are errors and cause the dependants and
## the providers to be skipped and an error to be logged
# Note: It turns out this is an NP-hard problem (see https://stackoverflow.com/a/28102139/12159081),
# so hopefully this doesn't blow up. No wonder runit doesn't do any dependency resolution, lol.
# The algorithm comes from https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
# and has been extended to support the dependent-provider paradigm
var ok = true
unresolved.add(node)
for dependency in node.depends:
if dependency notin resolved:
if dependency in unresolved:
logger.error(&"Could not resolve dependencies for '{node.name}' -> '{dependency.name}': cyclic dependency detected")
proc resolve(logger: Logger, node: Service): seq[Service] =
## Returns a sorted list of services according
## to their dependency and provider requirements.
## This function recursively iterates over the
## list of services, treating it as a DAG
## (Directed Acyclic Graph) and builds a topologically
## sorted list such that a service appears in it only
## after all of its dependencies and only
## before all of its dependents.
## This function also automatically handles
## detached subgraphs, which can occurr if
## one or more dependencies have common
## dependencies/dependents between each other,
## but not with the rest of the graph. Nodes
## that have no dependencies nor provide any
## service may be located anywhere in the list,
## as that does not invalidate the invariants
## described above. The algorithm comes from
## https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
## and has been extended to support the dependent-provider paradigm.
## Note that it is not an error for a service in a given runlevel to depend
## on services in other runlevels: when that occurs a warning is raised and
## the service in the lower runlevel is promoted to the higher one (runlevels start from 0),
## which means adding a module in a given runlevel implicitly adds all of its dependencies
## to said runlevel as well, regardless of what was specified in their unit file
if node.isResolved:
logger.debug(&"Dependency '{node.name}' has already been satisfied, skipping it")
return @[]
var ok: bool = true
result = @[]
node.isMarked = true
for service in node.provides:
if service.provider == node:
continue # Services implicitly provide themselves
if node.runlevel < service.provider.runlevel:
logger.warning(&"Service '{node.name}' in runlevel {node.runlevel} depends on '{service.provider.name}' in runlevel {service.provider.runlevel}, loading dependency regardless")
if not service.provider.isResolved:
if service.provider.isMarked:
logger.warning(&"Cyclic dependency from '{node.name}' to '{service.provider.name}' detected while building dependency graph: skipping both")
ok = false
continue
resolveDependencies(logger, dependency, resolved, unresolved)
for dependency in node.provides:
if dependency == node:
continue
resolveDependencies(logger, dependency, resolved, unresolved)
break
service.provider.isMarked = true
result.extend(resolve(logger, service.provider))
for service in node.depends:
if service.provider == node:
logger.warning(&"Cyclic dependency from '{node.name}' to itself detected while building dependency graph: skipping it")
ok = false
break
if node.runlevel > service.provider.runlevel:
logger.warning(&"Service '{node.name}' in runlevel {node.runlevel} depends on '{service.provider.name}' in runlevel {service.provider.runlevel}, loading both")
if not service.provider.isResolved:
if service.provider.isMarked:
logger.warning(&"Cyclic dependency from '{node.name}' to '{service.provider.name}' detected while building dependency graph: skipping both")
ok = false
break
service.provider.isMarked = true
result.extend(resolve(logger, service.provider))
if ok:
resolved.add(node)
unresolved.del(unresolved.find(node))
result.add(node)
node.isResolved = true
node.isMarked = false
proc resolveDependencies(logger: Logger, services: seq[Service], level: RunLevel): seq[Service] =
## Iteratively calls resolve() until all services
## have been processed
result = @[]
var node: Service
var i = 1
var s: seq[Service] = @[]
for service in services:
if service.runlevel == level:
s.add(service)
while i <= len(s):
node = s[^i]
result.extend(resolve(logger, node))
inc(i)
proc isManagedProcess*(pid: int): bool =
@ -239,47 +309,49 @@ proc startService(logger: Logger, service: Service) =
proc startServices*(logger: Logger, level: RunLevel, workers: int = 1) =
## Starts the registered services in the
## given runlevel
var resolved: seq[Service] = @[]
var unresolved: seq[Service] = @[]
resolveDependencies(logger, services[0], resolved, unresolved)
if len(services) == 0:
return
var dependencies = resolveDependencies(logger, services, level)
if workers > cpuinfo.countProcessors():
logger.warning(&"The configured number of workers ({workers}) is greater than the number of CPU cores ({cpuinfo.countProcessors()}), performance may degrade")
var workerCount: int = 0
var status: cint
var pid: int = posix.fork()
var pids: seq[int] = @[]
if pid == -1:
logger.error(&"Error, cannot fork: {posix.strerror(posix.errno)}")
elif pid == 0:
var service: Service
logger.debug("Started service spawner process")
var servicesCopy: seq[Service] = @[]
for service in services:
if service.runlevel == level:
servicesCopy.add(service)
while servicesCopy.len() > 0:
if workerCount == workers:
logger.debug(&"Worker queue full, waiting for some worker to exit...")
logger.trace(&"Calling waitpid() on {pid}")
var returnCode = waitPid(cint(pid), status, WUNTRACED)
logger.trace(&"Call to waitpid() set status to {status} and returned {returnCode}")
dec(workerCount)
pid = posix.fork()
if pid == -1:
logger.error(&"An error occurred while forking to spawn services, trying again: {posix.strerror(posix.errno)}")
elif pid == 0:
logger.trace(&"New child has been spawned")
if not servicesCopy[0].supervised or servicesCopy[0].kind == Oneshot:
logger.info(&"""Starting {(if servicesCopy[0].kind != Oneshot: "unsupervised" else: "oneshot")} service '{servicesCopy[0].name}'""")
while dependencies.len() > 0:
for _ in 0..<workers:
if len(dependencies) == 0:
break
service = dependencies[0]
dependencies.del(0)
pid = posix.fork()
if pid == -1:
logger.error(&"An error occurred while forking to spawn services, trying again: {posix.strerror(posix.errno)}")
elif pid == 0:
logger.trace(&"New child has been spawned")
if not service.supervised or service.kind == Oneshot:
logger.info(&"""Starting {(if service.kind != Oneshot: "unsupervised" else: "oneshot")} service '{service.name}'""")
else:
logger.info(&"Starting supervised service '{service.name}'")
startService(logger, service)
else:
logger.info(&"Starting supervised service '{servicesCopy[0].name}'")
startService(logger, servicesCopy[0])
elif servicesCopy.len() > 0:
workerCount += 1
if servicesCopy[0].supervised:
addManagedProcess(pid, servicesCopy[0])
servicesCopy.del(0)
pids.add(pid)
if service.supervised:
addManagedProcess(pid, service)
if len(pids) == workers:
logger.debug(&"""Worker queue full, waiting for some worker{(if workers > 1: "s" else: "")} to exit...""")
for i, pid in pids:
logger.trace(&"Calling waitpid() on {pid}")
var returnCode = waitPid(cint(pid), status, WUNTRACED)
logger.trace(&"Call to waitpid() on {pid} set status to {status} and returned {returnCode}")
pids = @[]
quit(0)
else:
logger.debug(&"Waiting for completion of service spawning in runlevel {($level).toLowerAscii()}")
logger.trace(&"Calling waitpid() on {pid}")
var returnCode = waitPid(cint(pid), status, WUNTRACED)
logger.trace(&"Call to waitpid() set status to {status} and returned {returnCode}")
logger.trace(&"Call to waitpid() on {pid} set status to {status} and returned {returnCode}")

View File

@ -36,11 +36,10 @@ proc addStuff =
# Tests here. Check logging output (debug) to see if
# they work as intended
addSymlink(newSymlink(dest="/dev/std/err", source="/")) # Should say link already exists and points to /proc/self/fd/2
addSymlink(newSymlink(dest="/dev/std/in", source="/does/not/exist")) # Shuld say destination does not exist
addSymlink(newSymlink(dest="/dev/std/in", source="/does/not/exist")) # Should say destination does not exist
addSymlink(newSymlink(dest="/dev/std/in", source="/proc/self/fd/0")) # Should say link already exists
addDirectory(newDirectory("test", 777)) # Should create a directory
addDirectory(newDirectory("/dev/disk", 123)) # Should say directory already exists
addDirectory(newDirectory("/dev/test/owo", 000)) # Should say path does not exist
# Shutdown handler to unmount disks
addShutdownHandler(newShutdownHandler(unmountAllDisks))
# Adds test services
@ -51,25 +50,25 @@ proc addStuff =
var errorer = newService(name="errorer", description="la mamma di gavd",
exec="/bin/false", supervised=true, restart=OnFailure,
restartDelay=5, runlevel=Boot, workDir="/", kind=Simple,
depends=(@[echoer]), provides=(@[]))
depends=(@[newDependency(Other, echoer)]), provides=(@[]))
var test = newService(name="broken", description="", exec="/bin/echo owo",
runlevel=Boot, kind=Oneshot, workDir=getCurrentDir(),
supervised=false, restart=Never, restartDelay=0,
depends=(@[echoer]), provides=(@[]))
depends=(@[newDependency(Other, echoer)]), provides=(@[]))
var exiter = newService(name="exiter", description="la mamma di licenziat",
exec="/bin/true", supervised=true, restart=Always,
restartDelay=5, runlevel=Boot, workDir="/", kind=Simple,
depends=(@[errorer]), provides=(@[]))
depends=(@[newDependency(Other, errorer)]), provides=(@[]))
addService(errorer)
addService(echoer)
addService(exiter)
addService(test)
echoer.depends.add(test)
proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab") =
proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab", setHostname: bool = true, workerCount: int = 1) =
## NimD's entry point and setup
## function
setStdIoUnbuffered() # Colors and output synchronization don't work otherwise
logger.debug("Starting NimD: A minimal, self-contained, dependency-based Linux init system written in Nim")
logger.info(&"NimD version {NimdVersion.major}.{NimdVersion.minor}.{NimdVersion.patch} is starting up!")
logger.trace("Calling getCurrentProcessId()")
@ -110,12 +109,15 @@ proc main(logger: Logger, mountDisks: bool = true, fstab: string = "/etc/fstab")
except:
logger.fatal(&"A fatal error has occurred while preparing filesystem, booting cannot continue. Error -> {getCurrentExceptionMsg()}")
nimDExit(logger, 131, emerg=false)
logger.info("Setting hostname")
logger.debug(&"Hostname was set to '{setHostname(logger)}'")
if setHostname:
logger.info("Setting hostname")
logger.debug(&"Hostname was set to '{misc.setHostname(logger)}'")
else:
logger.info("Skipping setting hostname")
logger.debug("Entering critical fork() section: blocking signals")
blockSignals(logger) # They are later unblocked in mainLoop
logger.info("Processing boot runlevel")
startServices(logger, workers=1, level=Boot)
startServices(logger, workers=workerCount, level=Boot)
logger.debug("Starting main loop")
mainLoop(logger)

View File

@ -40,7 +40,7 @@ type
var defaultLevel = LogLevel.Info
var logFile = "/var/log/nimd"
var logToFile: bool = false
var logToFileOnly: bool = false
proc log(self: Logger, level: LogLevel = defaultLevel, message: string) # Forward declaration
@ -79,52 +79,71 @@ proc log(self: Logger, level: LogLevel = defaultLevel, message: string) =
# Do NOT touch the alignment offsets or your console output and logs will look like trash
proc lockFile(logger: Logger, handle: File) =
## Locks the given file across the whole system for writing using fcntl()
if fcntl(handle.getFileHandle(), F_WRLCK) == -1:
stderr.writeLine(&"Error while locking handle (code {posix.errno}, {posix.strerror(posix.errno)}): output may be mangled")
proc unlockFile(logger: Logger, handle: File) =
## Unlocks the given file across the whole system for writing using fcntl()
if fcntl(handle.getFileHandle(), F_UNLCK) == -1:
stderr.writeLine(&"Error while locking stderr (code {posix.errno}, {posix.strerror(posix.errno)}): output may be mangled")
proc logTraceStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgMagenta)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logDebugStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgCyan)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logInfoStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgGreen)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logWarningStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgYellow)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logErrorStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgRed)
stderr.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""")
stderr.flushFile()
setForegroundColor(fgDefault)
logger.unlockFile(stderr)
proc logCriticalStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgYellow)
setBackgroundColor(bgRed)
stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})]""")
setBackgroundColor(bgDefault)
stderr.writeLine(&""" {message}""")
setForegroundColor(fgDefault)
stderr.flushFile()
logger.unlockFile(stderr)
proc logFatalStderr(self: LogHandler, logger: Logger, message: string) =
logger.lockFile(stderr)
setForegroundColor(fgBlack)
setBackgroundColor(bgRed)
stderr.write(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})]""")
@ -132,51 +151,64 @@ proc logFatalStderr(self: LogHandler, logger: Logger, message: string) =
setBackgroundColor(bgDefault)
stderr.writeline(&""" {message}""")
setForegroundColor(fgDefault)
stderr.flushFile()
logger.unlockFile(stderr)
proc logTraceFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} TRACE {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logDebugFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} DEBUG {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logInfoFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} INFO {"-":>4} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logWarningFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} WARNING {"-":>1} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logErrorFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<10} {"-":>1} {"":>1} ERROR {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logCriticalFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<4} {"-":>1} CRITICAL {"-":>2} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc logFatalFile(self: LogHandler, logger: Logger, message: string) =
StreamHandler(self).file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})] {message}""")
StreamHandler(self).file.flushFile()
var self = StreamHandler(self)
logger.lockFile(self.file)
self.file.writeLine(&"""[{fromUnix(getTime().toUnixFloat().int).format("d/M/yyyy HH:mm:ss"):<5} {"-":>1} {"":>1} FATAL {"-":>3} ({posix.getpid():03})] {message}""")
logger.unlockFile(self.file)
proc switchToFile*(self: Logger) =
## Switches logging to file and
## changes the behavior of getDefaultLogger
## accordingly
if logToFile:
if logToFileOnly:
return
logToFile = true
self.handlers = @[] # Don't you love it when you can just let the GC manage memory for you?
self.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile))
self.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile))
@ -191,9 +223,8 @@ proc switchToConsole*(self: Logger) =
## Switches logging to the console and
## changes the behavior of getDefaultLogger
## accordingly
if not logToFile:
if not logToFileOnly:
return
logToFile = false
self.handlers = @[]
self.addHandler(createHandler(logTraceStderr, LogLevel.Trace))
self.addHandler(createHandler(logDebugStderr, LogLevel.Debug))
@ -211,8 +242,7 @@ proc getDefaultLogger*(): Logger =
## standard error with some basic info like the
## current date and time and the log level
result = newLogger()
if not logToFile:
setStdIoUnbuffered() # Colors don't work otherwise!
if not logToFileOnly:
result.addHandler(createHandler(logTraceStderr, LogLevel.Trace))
result.addHandler(createHandler(logDebugStderr, LogLevel.Debug))
result.addHandler(createHandler(logInfoStderr, LogLevel.Info))
@ -220,11 +250,10 @@ proc getDefaultLogger*(): Logger =
result.addHandler(createHandler(logErrorStderr, LogLevel.Error))
result.addHandler(createHandler(logCriticalStderr, LogLevel.Critical))
result.addHandler(createHandler(logFatalStderr, LogLevel.Fatal))
else:
result.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile))
result.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile))
result.addHandler(createStreamHandler(logInfoFile, LogLevel.Info, logFile))
result.addHandler(createStreamHandler(logWarningFile, LogLevel.Warning, logFile))
result.addHandler(createStreamHandler(logErrorFile, LogLevel.Error, logFile))
result.addHandler(createStreamHandler(logCriticalFile, LogLevel.Critical, logFile))
result.addHandler(createStreamHandler(logFatalFile, LogLevel.Fatal, logFile))
result.addHandler(createStreamHandler(logTraceFile, LogLevel.Trace, logFile))
result.addHandler(createStreamHandler(logDebugFile, LogLevel.Debug, logFile))
result.addHandler(createStreamHandler(logInfoFile, LogLevel.Info, logFile))
result.addHandler(createStreamHandler(logWarningFile, LogLevel.Warning, logFile))
result.addHandler(createStreamHandler(logErrorFile, LogLevel.Error, logFile))
result.addHandler(createStreamHandler(logCriticalFile, LogLevel.Critical, logFile))
result.addHandler(createStreamHandler(logFatalFile, LogLevel.Fatal, logFile))