restructured logger from budigt

This commit is contained in:
juancwu 2026-04-25 20:35:03 +00:00
commit b2fd12b1c8
11 changed files with 1227 additions and 1 deletions

116
README.md
View file

@ -1,3 +1,119 @@
# splinter # splinter
Simple and minimal logger in Go for my projects. Simple and minimal logger in Go for my projects.
Fans out structured records to one or more streams. Ships with a console
stream (JSON or pretty text) and a file stream with size- and time-based
rotation, optional gzip compression, and backup pruning.
## Install
```sh
go get git.juancwu.dev/juancwu/splinter
```
Requires Go 1.26 or newer.
## Quick start
The package exposes a default logger that writes JSON to stderr at
`LevelInfo`:
```go
package main
import "git.juancwu.dev/juancwu/splinter"
func main() {
splinter.Info("server started", "port", 8080)
splinter.Error("request failed", "err", "timeout")
}
```
## Custom logger with file output and rotation
```go
package main
import (
"time"
"git.juancwu.dev/juancwu/splinter"
)
func main() {
logger := splinter.New(
splinter.WithStream(splinter.NewConsoleStream(
splinter.ConsolePretty, splinter.LevelDebug,
)),
splinter.WithStream(splinter.MustFileStream("logs/app.log", splinter.FileStreamConfig{
Level: splinter.LevelInfo,
Format: splinter.FileJSON,
MaxSizeMB: 100, // rotate at 100 MB
MaxAge: 24 * time.Hour, // and at least once per day
MaxBackups: 7, // keep the 7 most recent files
Compress: true, // gzip rotated backups
})),
splinter.WithAttrs(map[string]any{"service": "api"}),
)
defer logger.Close()
splinter.SetDefault(logger)
splinter.Info("ready")
}
```
## Child loggers
`Logger.With` returns a child that inherits streams and merges additional
attributes onto every record:
```go
req := logger.With(map[string]any{"request_id": id})
req.Info("handled", "status", 200)
```
## Custom streams
Implement the `Stream` interface to send logs anywhere — a database, a
message queue, an HTTP endpoint, an in-memory buffer for tests:
```go
type Stream interface {
Name() string
Write(ctx context.Context, rec splinter.Record) error
Enabled(level splinter.Level) bool
Close() error
}
```
Add it with `splinter.WithStream(myStream)` at construction time.
## Configuration reference
### `ConsoleStream`
| Constructor | Behaviour |
| ----------------------------------------- | -------------------------------------- |
| `NewConsoleStream(ConsoleJSON, level)` | One JSON object per line on stderr. |
| `NewConsoleStream(ConsolePretty, level)` | `key=value` text on stderr. |
| `ConsoleWriter(w)` option | Override the destination writer. |
### `FileStream`
| Field | Default | Notes |
| ------------ | ----------- | -------------------------------------------------------------- |
| `Level` | `LevelInfo` | Minimum level written to the file. |
| `Format` | `FileJSON` | `FileJSON` or `FileText`. |
| `MaxSizeMB` | `100` | Rotation threshold in MB. `0` disables size-based rotation. |
| `MaxAge` | `0` | Rotation threshold by age. `0` disables time-based rotation. |
| `MaxBackups` | `5` | Number of rotated files to retain (raw + `.gz` combined). |
| `Compress` | `false` | gzip rotated files asynchronously after rename. |
When both `MaxSizeMB` and `MaxAge` are zero, `MaxSizeMB` defaults to `100`
to prevent unbounded growth.
## License
MIT — see [LICENSE](LICENSE).

75
console_stream.go Normal file
View file

@ -0,0 +1,75 @@
package splinter
import (
"context"
"io"
"log/slog"
"os"
)
// ConsoleFormat selects the output style for ConsoleStream.
type ConsoleFormat int
const (
// ConsoleJSON writes structured JSON lines.
ConsoleJSON ConsoleFormat = iota
// ConsolePretty writes human-readable text (slog.TextHandler).
ConsolePretty
)
// ConsoleStream writes log records to an io.Writer (default os.Stderr) using
// the standard library's slog handlers.
type ConsoleStream struct {
logger *slog.Logger
level Level
}
// ConsoleOption configures a ConsoleStream.
type ConsoleOption func(*consoleConfig)
type consoleConfig struct {
writer io.Writer
}
// ConsoleWriter overrides the output destination (default: os.Stderr).
func ConsoleWriter(w io.Writer) ConsoleOption {
return func(c *consoleConfig) { c.writer = w }
}
// NewConsoleStream creates a console stream.
func NewConsoleStream(format ConsoleFormat, level Level, opts ...ConsoleOption) *ConsoleStream {
cfg := &consoleConfig{writer: os.Stderr}
for _, o := range opts {
o(cfg)
}
handlerOpts := &slog.HandlerOptions{Level: level}
var handler slog.Handler
switch format {
case ConsolePretty:
handler = slog.NewTextHandler(cfg.writer, handlerOpts)
default:
handler = slog.NewJSONHandler(cfg.writer, handlerOpts)
}
return &ConsoleStream{logger: slog.New(handler), level: level}
}
// Name implements Stream.
func (s *ConsoleStream) Name() string { return "console" }
// Write implements Stream.
func (s *ConsoleStream) Write(ctx context.Context, rec Record) error {
args := make([]any, 0, len(rec.Attrs)*2)
for k, v := range rec.Attrs {
args = append(args, k, v)
}
s.logger.Log(ctx, rec.Level, rec.Message, args...)
return nil
}
// Enabled implements Stream.
func (s *ConsoleStream) Enabled(level Level) bool { return level >= s.level }
// Close implements Stream. No-op for console output.
func (s *ConsoleStream) Close() error { return nil }

52
console_stream_test.go Normal file
View file

@ -0,0 +1,52 @@
package splinter
import (
"bytes"
"encoding/json"
"testing"
)
func TestConsoleStream_JSON(t *testing.T) {
var buf bytes.Buffer
logger := New(WithStream(NewConsoleStream(ConsoleJSON, LevelDebug, ConsoleWriter(&buf))))
logger.Info("hello", "key", "value")
var entry map[string]any
if err := json.Unmarshal(buf.Bytes(), &entry); err != nil {
t.Fatalf("parse JSON: %v\nbody: %s", err, buf.String())
}
if entry["msg"] != "hello" {
t.Errorf("expected msg=hello, got %v", entry["msg"])
}
if entry["key"] != "value" {
t.Errorf("expected key=value, got %v", entry["key"])
}
}
func TestConsoleStream_Pretty(t *testing.T) {
var buf bytes.Buffer
logger := New(WithStream(NewConsoleStream(ConsolePretty, LevelDebug, ConsoleWriter(&buf))))
logger.Info("pretty test", "foo", "bar")
if buf.Len() == 0 {
t.Fatal("expected text output, got nothing")
}
if bytes.Contains(buf.Bytes(), []byte("{")) {
t.Errorf("expected text format, got JSON-like output: %s", buf.String())
}
if !bytes.Contains(buf.Bytes(), []byte("foo=bar")) {
t.Errorf("expected foo=bar in output: %s", buf.String())
}
}
func TestConsoleStream_LevelFilter(t *testing.T) {
var buf bytes.Buffer
logger := New(WithStream(NewConsoleStream(ConsoleJSON, LevelError, ConsoleWriter(&buf))))
logger.Info("skipped")
logger.Error("kept")
lines := bytes.Split(bytes.TrimSpace(buf.Bytes()), []byte("\n"))
if len(lines) != 1 {
t.Fatalf("expected 1 line, got %d: %s", len(lines), buf.String())
}
}

113
file_rotation.go Normal file
View file

@ -0,0 +1,113 @@
package splinter
import (
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"time"
)
const rotationTimestampFormat = "20060102T150405.000000000Z"
// shouldRotate reports whether the current file has hit a rotation trigger.
// Caller must hold s.mu.
func (s *FileStream) shouldRotate() bool {
if s.cfg.MaxSizeMB > 0 && s.counter.n >= int64(s.cfg.MaxSizeMB)*1024*1024 {
return true
}
if s.cfg.MaxAge > 0 && time.Since(s.openedAt) >= s.cfg.MaxAge {
return true
}
return false
}
// rotate closes the current file, renames it to a timestamped backup, opens
// a fresh file, and kicks off async compression + pruning. Caller holds s.mu.
func (s *FileStream) rotate() error {
if err := s.file.Close(); err != nil {
return err
}
ts := time.Now().UTC().Format(rotationTimestampFormat)
ext := filepath.Ext(s.path)
base := s.path[:len(s.path)-len(ext)]
backupPath := fmt.Sprintf("%s.%s%s", base, ts, ext)
if err := os.Rename(s.path, backupPath); err != nil {
return err
}
f, err := os.OpenFile(s.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return err
}
s.attach(f, 0)
s.openedAt = time.Now()
go s.compressAndPrune(backupPath, base, ext)
return nil
}
// compressAndPrune runs in the background after rotation: gzips the new
// backup (if configured) and deletes any stale backups beyond MaxBackups.
// Errors are swallowed since this is best-effort housekeeping.
func (s *FileStream) compressAndPrune(backupPath, base, ext string) {
if s.cfg.Compress {
_ = gzipFile(backupPath)
}
s.pruneBackups(base, ext)
}
// gzipFile compresses src to src+".gz", then removes src on success.
func gzipFile(src string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(src+".gz", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
return err
}
gz := gzip.NewWriter(out)
if _, err := io.Copy(gz, in); err != nil {
gz.Close()
out.Close()
os.Remove(src + ".gz")
return err
}
if err := gz.Close(); err != nil {
out.Close()
os.Remove(src + ".gz")
return err
}
if err := out.Close(); err != nil {
os.Remove(src + ".gz")
return err
}
return os.Remove(src)
}
// pruneBackups removes the oldest rotated files when the total exceeds
// MaxBackups. Both raw and gzipped backups are considered.
func (s *FileStream) pruneBackups(base, ext string) {
raw, _ := filepath.Glob(fmt.Sprintf("%s.*%s", base, ext))
gz, _ := filepath.Glob(fmt.Sprintf("%s.*%s.gz", base, ext))
all := append(raw, gz...)
if len(all) <= s.cfg.MaxBackups {
return
}
// Lex sort puts older timestamps first; trailing ".gz" sorts after the
// raw form, so any transient duplicate during compression is handled
// naturally on the next rotation.
sort.Strings(all)
for _, p := range all[:len(all)-s.cfg.MaxBackups] {
_ = os.Remove(p)
}
}

166
file_stream.go Normal file
View file

@ -0,0 +1,166 @@
package splinter
import (
"context"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
"sync"
"time"
)
// FileFormat selects the on-disk encoding for FileStream.
type FileFormat int
const (
// FileJSON writes one JSON object per line (slog.JSONHandler).
FileJSON FileFormat = iota
// FileText writes a human-readable text line (slog.TextHandler).
FileText
)
// FileStreamConfig controls FileStream behaviour.
type FileStreamConfig struct {
// Level is the minimum level to write. Default: LevelInfo.
Level Level
// Format is the on-disk encoding. Default: FileJSON.
Format FileFormat
// MaxSizeMB is the file-size rotation threshold in megabytes.
// Zero disables size-based rotation. When both MaxSizeMB and MaxAge are
// zero, MaxSizeMB defaults to 100 so logs cannot grow unbounded.
MaxSizeMB int
// MaxAge is the age-based rotation threshold. Zero disables it.
// Rotation fires lazily on the first Write after the boundary elapses.
MaxAge time.Duration
// MaxBackups is the number of rotated files to retain. Default: 5.
MaxBackups int
// Compress gzips rotated files asynchronously after rotation.
Compress bool
}
func (c *FileStreamConfig) defaults() {
if c.MaxSizeMB == 0 && c.MaxAge == 0 {
c.MaxSizeMB = 100
}
if c.MaxBackups <= 0 {
c.MaxBackups = 5
}
}
// FileStream writes log records to a file with size and/or time-based
// rotation. Safe for concurrent use.
type FileStream struct {
mu sync.Mutex
file *os.File
counter *countingWriter
handler slog.Handler
path string
cfg FileStreamConfig
openedAt time.Time
}
// NewFileStream opens (or creates) the log file and returns a ready stream.
func NewFileStream(path string, cfg FileStreamConfig) (*FileStream, error) {
cfg.defaults()
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return nil, fmt.Errorf("splinter: create dir: %w", err)
}
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
if err != nil {
return nil, fmt.Errorf("splinter: open %s: %w", path, err)
}
info, err := f.Stat()
if err != nil {
f.Close()
return nil, fmt.Errorf("splinter: stat %s: %w", path, err)
}
s := &FileStream{
path: path,
cfg: cfg,
openedAt: time.Now(),
}
s.attach(f, info.Size())
return s, nil
}
// MustFileStream is like NewFileStream but panics on error. Convenient for
// application startup where a missing log file is fatal.
func MustFileStream(path string, cfg FileStreamConfig) *FileStream {
s, err := NewFileStream(path, cfg)
if err != nil {
panic(err)
}
return s
}
// attach wires up the file, counting writer, and slog handler. Caller holds
// s.mu (or is in NewFileStream where no concurrency exists yet).
func (s *FileStream) attach(f *os.File, initialSize int64) {
s.file = f
s.counter = &countingWriter{w: f, n: initialSize}
opts := &slog.HandlerOptions{Level: s.cfg.Level}
switch s.cfg.Format {
case FileText:
s.handler = slog.NewTextHandler(s.counter, opts)
default:
s.handler = slog.NewJSONHandler(s.counter, opts)
}
}
// Name implements Stream.
func (s *FileStream) Name() string { return "file:" + s.path }
// Enabled implements Stream.
func (s *FileStream) Enabled(level Level) bool { return level >= s.cfg.Level }
// Write implements Stream.
func (s *FileStream) Write(ctx context.Context, rec Record) error {
sr := slog.NewRecord(rec.Time, rec.Level, rec.Message, 0)
for k, v := range rec.Attrs {
sr.Add(k, v)
}
s.mu.Lock()
defer s.mu.Unlock()
if err := s.handler.Handle(ctx, sr); err != nil {
return fmt.Errorf("splinter: file write: %w", err)
}
if s.shouldRotate() {
if err := s.rotate(); err != nil {
return fmt.Errorf("splinter: rotate: %w", err)
}
}
return nil
}
// Close implements Stream. Flushes and closes the underlying file.
func (s *FileStream) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.file.Close()
}
// countingWriter wraps an io.Writer and tracks cumulative bytes written so
// FileStream can make accurate size-based rotation decisions.
type countingWriter struct {
w io.Writer
n int64
}
func (c *countingWriter) Write(p []byte) (int, error) {
n, err := c.w.Write(p)
c.n += int64(n)
return n, err
}

252
file_stream_test.go Normal file
View file

@ -0,0 +1,252 @@
package splinter
import (
"bytes"
"compress/gzip"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func TestFileStream_WritesJSON(t *testing.T) {
path := filepath.Join(t.TempDir(), "test.log")
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelDebug, Format: FileJSON})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
logger := New(WithStream(fs))
logger.Info("file test", "key", "val")
if err := fs.Close(); err != nil {
t.Fatalf("close: %v", err)
}
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read: %v", err)
}
var entry map[string]any
if err := json.Unmarshal(bytes.TrimSpace(data), &entry); err != nil {
t.Fatalf("parse: %v\nbody: %s", err, data)
}
if entry["msg"] != "file test" {
t.Errorf("expected msg=file test, got %v", entry["msg"])
}
if entry["key"] != "val" {
t.Errorf("expected key=val, got %v", entry["key"])
}
}
func TestFileStream_WritesText(t *testing.T) {
path := filepath.Join(t.TempDir(), "test.log")
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelDebug, Format: FileText})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
logger := New(WithStream(fs))
logger.Info("plain", "key", "val")
if err := fs.Close(); err != nil {
t.Fatalf("close: %v", err)
}
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read: %v", err)
}
if bytes.Contains(data, []byte("{")) {
t.Errorf("expected text format, got JSON-like output: %s", data)
}
if !bytes.Contains(data, []byte("key=val")) {
t.Errorf("expected key=val in output: %s", data)
}
}
func TestFileStream_LevelFilter(t *testing.T) {
path := filepath.Join(t.TempDir(), "test.log")
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelError})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
logger := New(WithStream(fs))
logger.Info("skip")
logger.Error("keep")
if err := fs.Close(); err != nil {
t.Fatalf("close: %v", err)
}
data, err := os.ReadFile(path)
if err != nil {
t.Fatalf("read: %v", err)
}
lines := bytes.Split(bytes.TrimSpace(data), []byte("\n"))
if len(lines) != 1 {
t.Errorf("expected 1 line, got %d", len(lines))
}
}
func TestFileStream_RotatesOnSize(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "app.log")
fs, err := NewFileStream(path, FileStreamConfig{
Level: LevelDebug,
MaxSizeMB: 1,
MaxBackups: 5,
})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
defer fs.Close()
big := strings.Repeat("x", 1024*1024+1)
logger := New(WithStream(fs))
logger.Info("trigger", "blob", big)
backups, err := filepath.Glob(filepath.Join(dir, "app.*.log"))
if err != nil {
t.Fatalf("glob: %v", err)
}
if len(backups) != 1 {
t.Fatalf("expected 1 backup after size rotation, got %d (%v)", len(backups), backups)
}
if _, err := os.Stat(path); err != nil {
t.Errorf("expected fresh app.log to exist: %v", err)
}
}
func TestFileStream_RotatesOnAge(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "app.log")
fs, err := NewFileStream(path, FileStreamConfig{
Level: LevelDebug,
MaxAge: 30 * time.Millisecond,
MaxBackups: 5,
})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
defer fs.Close()
logger := New(WithStream(fs))
logger.Info("first")
time.Sleep(60 * time.Millisecond)
logger.Info("second")
backups, err := filepath.Glob(filepath.Join(dir, "app.*.log"))
if err != nil {
t.Fatalf("glob: %v", err)
}
if len(backups) != 1 {
t.Fatalf("expected 1 backup after age rotation, got %d (%v)", len(backups), backups)
}
}
func TestFileStream_CompressOnRotation(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "app.log")
fs, err := NewFileStream(path, FileStreamConfig{
Level: LevelDebug,
MaxAge: 20 * time.Millisecond,
MaxBackups: 5,
Compress: true,
})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
defer fs.Close()
logger := New(WithStream(fs))
logger.Info("first", "k", "v")
time.Sleep(40 * time.Millisecond)
logger.Info("second", "k", "v")
gzPaths := waitForFiles(t, filepath.Join(dir, "app.*.log.gz"), 1, 2*time.Second)
rawPaths, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
if len(rawPaths) != 0 {
t.Errorf("expected raw .log backups to be removed after gzip, got %v", rawPaths)
}
gzFile, err := os.Open(gzPaths[0])
if err != nil {
t.Fatalf("open gz: %v", err)
}
defer gzFile.Close()
gr, err := gzip.NewReader(gzFile)
if err != nil {
t.Fatalf("gzip reader: %v", err)
}
body, err := io.ReadAll(gr)
if err != nil {
t.Fatalf("read gz: %v", err)
}
if !bytes.Contains(body, []byte(`"first"`)) {
t.Errorf("expected gzipped backup to contain first message, got %s", body)
}
}
func TestFileStream_PruneRespectsMaxBackups(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "app.log")
fs, err := NewFileStream(path, FileStreamConfig{
Level: LevelDebug,
MaxAge: 5 * time.Millisecond,
MaxBackups: 2,
})
if err != nil {
t.Fatalf("NewFileStream: %v", err)
}
defer fs.Close()
logger := New(WithStream(fs))
for i := 0; i < 5; i++ {
logger.Info("rotate")
// Sleep longer than MaxAge so the next write triggers rotation, plus
// enough nanoseconds to guarantee a unique timestamp suffix.
time.Sleep(15 * time.Millisecond)
}
// Final write that flushes the last rotation through to disk.
logger.Info("final")
// Wait for prune goroutines to settle.
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
backups, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
if len(backups) <= 2 {
break
}
time.Sleep(10 * time.Millisecond)
}
backups, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
if len(backups) > 2 {
t.Errorf("expected at most 2 backups, got %d (%v)", len(backups), backups)
}
}
// waitForFiles polls the glob pattern until it matches at least `want`
// entries or the deadline expires. Returns the matched paths.
func waitForFiles(t *testing.T, pattern string, want int, timeout time.Duration) []string {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
matches, err := filepath.Glob(pattern)
if err != nil {
t.Fatalf("glob %s: %v", pattern, err)
}
if len(matches) >= want {
return matches
}
time.Sleep(10 * time.Millisecond)
}
matches, _ := filepath.Glob(pattern)
t.Fatalf("timed out waiting for %d files matching %s (got %d: %v)", want, pattern, len(matches), matches)
return nil
}

3
go.mod Normal file
View file

@ -0,0 +1,3 @@
module git.juancwu.dev/juancwu/splinter
go 1.26.2

124
logger.go Normal file
View file

@ -0,0 +1,124 @@
package splinter
import (
"context"
"fmt"
"maps"
"os"
"time"
)
// Logger fans out log records to one or more Streams. Streams are fixed at
// construction time; child loggers from With share the same streams.
type Logger struct {
streams []Stream
baseAttrs map[string]any
onError func(stream string, rec Record, err error)
}
// Option configures a Logger.
type Option func(*Logger)
// WithStream adds a Stream to the logger.
func WithStream(s Stream) Option {
return func(l *Logger) { l.streams = append(l.streams, s) }
}
// WithAttrs pre-populates attributes on every record produced by this logger.
func WithAttrs(attrs map[string]any) Option {
return func(l *Logger) { maps.Copy(l.baseAttrs, attrs) }
}
// WithErrorHandler sets a callback invoked when a stream's Write fails.
// The default handler prints to stderr.
func WithErrorHandler(fn func(stream string, rec Record, err error)) Option {
return func(l *Logger) { l.onError = fn }
}
// New creates a Logger. With no streams supplied, a JSON ConsoleStream at
// LevelInfo writing to stderr is used.
func New(opts ...Option) *Logger {
l := &Logger{
baseAttrs: make(map[string]any),
onError: defaultOnError,
}
for _, o := range opts {
o(l)
}
if len(l.streams) == 0 {
l.streams = append(l.streams, NewConsoleStream(ConsoleJSON, LevelInfo))
}
return l
}
// With returns a child Logger with additional attributes merged onto the
// base. The child shares streams and the error handler with the parent.
func (l *Logger) With(attrs map[string]any) *Logger {
merged := make(map[string]any, len(l.baseAttrs)+len(attrs))
maps.Copy(merged, l.baseAttrs)
maps.Copy(merged, attrs)
return &Logger{
streams: l.streams,
baseAttrs: merged,
onError: l.onError,
}
}
// Debug logs at LevelDebug.
func (l *Logger) Debug(msg string, args ...any) { l.log(LevelDebug, msg, args...) }
// Info logs at LevelInfo.
func (l *Logger) Info(msg string, args ...any) { l.log(LevelInfo, msg, args...) }
// Warn logs at LevelWarn.
func (l *Logger) Warn(msg string, args ...any) { l.log(LevelWarn, msg, args...) }
// Error logs at LevelError.
func (l *Logger) Error(msg string, args ...any) { l.log(LevelError, msg, args...) }
// Close shuts down all streams. Returns the first error encountered, but
// always attempts to close every stream.
func (l *Logger) Close() error {
var firstErr error
for _, s := range l.streams {
if err := s.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (l *Logger) log(level Level, msg string, args ...any) {
rec := Record{
Time: time.Now(),
Level: level,
Message: msg,
Attrs: l.buildAttrs(args),
}
ctx := context.Background()
for _, s := range l.streams {
if !s.Enabled(level) {
continue
}
if err := s.Write(ctx, rec); err != nil {
l.onError(s.Name(), rec, err)
}
}
}
func (l *Logger) buildAttrs(args []any) map[string]any {
attrs := make(map[string]any, len(l.baseAttrs)+len(args)/2)
maps.Copy(attrs, l.baseAttrs)
for i := 0; i+1 < len(args); i += 2 {
key, ok := args[i].(string)
if !ok {
continue
}
attrs[key] = args[i+1]
}
return attrs
}
func defaultOnError(stream string, _ Record, err error) {
fmt.Fprintf(os.Stderr, "splinter: stream %q write failed: %v\n", stream, err)
}

219
logger_test.go Normal file
View file

@ -0,0 +1,219 @@
package splinter
import (
"bytes"
"context"
"encoding/json"
"errors"
"sync"
"testing"
)
type memoryStream struct {
mu sync.Mutex
records []Record
level Level
}
func newMemoryStream(level Level) *memoryStream {
return &memoryStream{level: level}
}
func (m *memoryStream) Name() string { return "memory" }
func (m *memoryStream) Write(_ context.Context, rec Record) error {
m.mu.Lock()
defer m.mu.Unlock()
m.records = append(m.records, rec)
return nil
}
func (m *memoryStream) Enabled(level Level) bool { return level >= m.level }
func (m *memoryStream) Close() error { return nil }
func (m *memoryStream) len() int {
m.mu.Lock()
defer m.mu.Unlock()
return len(m.records)
}
func (m *memoryStream) last() Record {
m.mu.Lock()
defer m.mu.Unlock()
return m.records[len(m.records)-1]
}
type failingStream struct{}
func (f *failingStream) Name() string { return "failing" }
func (f *failingStream) Write(_ context.Context, _ Record) error { return errors.New("always fails") }
func (f *failingStream) Enabled(_ Level) bool { return true }
func (f *failingStream) Close() error { return nil }
func TestLogger_LevelFiltering(t *testing.T) {
mem := newMemoryStream(LevelWarn)
logger := New(WithStream(mem))
logger.Debug("d")
logger.Info("i")
logger.Warn("w")
logger.Error("e")
if mem.len() != 2 {
t.Fatalf("expected 2 records (Warn+Error), got %d", mem.len())
}
}
func TestLogger_BaseAttrs(t *testing.T) {
mem := newMemoryStream(LevelDebug)
logger := New(WithStream(mem), WithAttrs(map[string]any{"service": "api"}))
logger.Info("request")
if mem.last().Attrs["service"] != "api" {
t.Errorf("expected service=api, got %v", mem.last().Attrs["service"])
}
}
func TestLogger_With(t *testing.T) {
mem := newMemoryStream(LevelDebug)
logger := New(WithStream(mem))
child := logger.With(map[string]any{"request_id": "abc"})
child.Info("handled")
if mem.last().Attrs["request_id"] != "abc" {
t.Errorf("expected request_id=abc, got %v", mem.last().Attrs["request_id"])
}
}
func TestLogger_InlineArgsOverrideBase(t *testing.T) {
mem := newMemoryStream(LevelDebug)
logger := New(WithStream(mem), WithAttrs(map[string]any{"env": "prod"}))
logger.Info("override", "env", "staging")
if mem.last().Attrs["env"] != "staging" {
t.Errorf("expected staging, got %v", mem.last().Attrs["env"])
}
}
func TestLogger_FanOut(t *testing.T) {
a := newMemoryStream(LevelDebug)
b := newMemoryStream(LevelDebug)
logger := New(WithStream(a), WithStream(b))
logger.Info("fanout")
if a.len() != 1 || b.len() != 1 {
t.Errorf("expected both streams to receive 1 record, got %d and %d", a.len(), b.len())
}
}
func TestLogger_ErrorHandler(t *testing.T) {
var captured string
logger := New(
WithStream(&failingStream{}),
WithErrorHandler(func(stream string, _ Record, err error) {
captured = stream + ": " + err.Error()
}),
)
logger.Info("trigger")
if captured != "failing: always fails" {
t.Errorf("expected handler to fire, got %q", captured)
}
}
func TestLogger_DefaultsToConsoleStream(t *testing.T) {
logger := New()
if len(logger.streams) != 1 {
t.Fatalf("expected 1 default stream, got %d", len(logger.streams))
}
if _, ok := logger.streams[0].(*ConsoleStream); !ok {
t.Errorf("expected default to be *ConsoleStream, got %T", logger.streams[0])
}
}
func TestPackageFuncs_RouteThroughDefault(t *testing.T) {
mem := newMemoryStream(LevelDebug)
prev := SetDefault(New(WithStream(mem)))
defer SetDefault(prev)
Debug("d")
Info("i")
Warn("w")
Error("e")
if mem.len() != 4 {
t.Fatalf("expected 4 records via package funcs, got %d", mem.len())
}
}
func TestSetDefault_ReturnsPrevious(t *testing.T) {
memA := newMemoryStream(LevelDebug)
memB := newMemoryStream(LevelDebug)
loggerA := New(WithStream(memA))
loggerB := New(WithStream(memB))
original := SetDefault(loggerA)
t.Cleanup(func() { SetDefault(original) })
prev := SetDefault(loggerB)
if prev != loggerA {
t.Errorf("expected SetDefault to return loggerA, got %p", prev)
}
Info("via B")
if memA.len() != 0 {
t.Errorf("expected memA to be empty after swap, got %d", memA.len())
}
if memB.len() != 1 {
t.Errorf("expected memB to have 1 record, got %d", memB.len())
}
}
func TestLevelFromString(t *testing.T) {
tests := []struct {
in string
want Level
}{
{"debug", LevelDebug},
{"DEBUG", LevelDebug},
{"info", LevelInfo},
{"warn", LevelWarn},
{"error", LevelError},
{"bogus", LevelInfo},
}
for _, tt := range tests {
if got := LevelFromString(tt.in); got != tt.want {
t.Errorf("LevelFromString(%q) = %v, want %v", tt.in, got, tt.want)
}
}
}
func TestRecord_LevelLabel(t *testing.T) {
tests := []struct {
level Level
want string
}{
{LevelDebug, "DEBUG"},
{LevelInfo, "INFO"},
{LevelWarn, "WARN"},
{LevelError, "ERROR"},
}
for _, tt := range tests {
r := Record{Level: tt.level}
if got := r.LevelLabel(); got != tt.want {
t.Errorf("LevelLabel(%v) = %q, want %q", tt.level, got, tt.want)
}
}
}
// Sanity: serialising a Record through encoding/json works for callers who
// build their own streams from scratch.
func TestRecord_JSONShape(t *testing.T) {
r := Record{Message: "hi", Attrs: map[string]any{"k": 1}}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(r); err != nil {
t.Fatalf("encode: %v", err)
}
if !bytes.Contains(buf.Bytes(), []byte(`"Message":"hi"`)) {
t.Errorf("missing Message in JSON: %s", buf.String())
}
}

41
splinter.go Normal file
View file

@ -0,0 +1,41 @@
// Package splinter is a small, opinionated logger that fans out structured
// records to one or more streams (console, file, custom). The default
// package-level logger writes JSON to stderr at LevelInfo; replace it with
// SetDefault to wire in your own configuration.
package splinter
import "sync"
var (
defaultMu sync.RWMutex
def = New(WithStream(NewConsoleStream(ConsoleJSON, LevelInfo)))
)
// Default returns the package-level Logger.
func Default() *Logger {
defaultMu.RLock()
defer defaultMu.RUnlock()
return def
}
// SetDefault replaces the package-level Logger. The previous Logger is
// returned so the caller can Close it if they own its streams.
func SetDefault(l *Logger) *Logger {
defaultMu.Lock()
prev := def
def = l
defaultMu.Unlock()
return prev
}
// Debug logs at LevelDebug on the default Logger.
func Debug(msg string, args ...any) { Default().Debug(msg, args...) }
// Info logs at LevelInfo on the default Logger.
func Info(msg string, args ...any) { Default().Info(msg, args...) }
// Warn logs at LevelWarn on the default Logger.
func Warn(msg string, args ...any) { Default().Warn(msg, args...) }
// Error logs at LevelError on the default Logger.
func Error(msg string, args ...any) { Default().Error(msg, args...) }

65
stream.go Normal file
View file

@ -0,0 +1,65 @@
package splinter
import (
"context"
"log/slog"
"time"
)
// Level mirrors slog.Level so callers don't need to import slog directly.
type Level = slog.Level
const (
LevelDebug Level = slog.LevelDebug
LevelInfo Level = slog.LevelInfo
LevelWarn Level = slog.LevelWarn
LevelError Level = slog.LevelError
)
// LevelFromString parses a level name (case-insensitive). Accepted:
// "debug", "info", "warn"/"warning", "error". Unknown input returns LevelInfo.
func LevelFromString(s string) Level {
var l slog.Level
if err := l.UnmarshalText([]byte(s)); err != nil {
return LevelInfo
}
return l
}
// Record is a self-contained log entry passed to every Stream.
type Record struct {
Time time.Time
Level Level
Message string
Attrs map[string]any
}
// LevelLabel returns a human-friendly label for the record's level.
func (r Record) LevelLabel() string {
switch {
case r.Level < LevelInfo:
return "DEBUG"
case r.Level < LevelWarn:
return "INFO"
case r.Level < LevelError:
return "WARN"
default:
return "ERROR"
}
}
// Stream is the abstraction for a log output destination.
type Stream interface {
// Name returns a short identifier used in error reporting (e.g. "console").
Name() string
// Write handles a single record. Implementations must be safe for
// concurrent use.
Write(ctx context.Context, rec Record) error
// Enabled reports whether the stream cares about the given level.
Enabled(level Level) bool
// Close flushes and releases any resources. Called once by Logger.Close.
Close() error
}