restructured logger from budigt
This commit is contained in:
parent
358ee6acc0
commit
b2fd12b1c8
11 changed files with 1227 additions and 1 deletions
166
file_stream.go
Normal file
166
file_stream.go
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
package splinter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileFormat selects the on-disk encoding for FileStream.
|
||||
type FileFormat int
|
||||
|
||||
const (
|
||||
// FileJSON writes one JSON object per line (slog.JSONHandler).
|
||||
FileJSON FileFormat = iota
|
||||
// FileText writes a human-readable text line (slog.TextHandler).
|
||||
FileText
|
||||
)
|
||||
|
||||
// FileStreamConfig controls FileStream behaviour.
|
||||
type FileStreamConfig struct {
|
||||
// Level is the minimum level to write. Default: LevelInfo.
|
||||
Level Level
|
||||
|
||||
// Format is the on-disk encoding. Default: FileJSON.
|
||||
Format FileFormat
|
||||
|
||||
// MaxSizeMB is the file-size rotation threshold in megabytes.
|
||||
// Zero disables size-based rotation. When both MaxSizeMB and MaxAge are
|
||||
// zero, MaxSizeMB defaults to 100 so logs cannot grow unbounded.
|
||||
MaxSizeMB int
|
||||
|
||||
// MaxAge is the age-based rotation threshold. Zero disables it.
|
||||
// Rotation fires lazily on the first Write after the boundary elapses.
|
||||
MaxAge time.Duration
|
||||
|
||||
// MaxBackups is the number of rotated files to retain. Default: 5.
|
||||
MaxBackups int
|
||||
|
||||
// Compress gzips rotated files asynchronously after rotation.
|
||||
Compress bool
|
||||
}
|
||||
|
||||
func (c *FileStreamConfig) defaults() {
|
||||
if c.MaxSizeMB == 0 && c.MaxAge == 0 {
|
||||
c.MaxSizeMB = 100
|
||||
}
|
||||
if c.MaxBackups <= 0 {
|
||||
c.MaxBackups = 5
|
||||
}
|
||||
}
|
||||
|
||||
// FileStream writes log records to a file with size and/or time-based
|
||||
// rotation. Safe for concurrent use.
|
||||
type FileStream struct {
|
||||
mu sync.Mutex
|
||||
file *os.File
|
||||
counter *countingWriter
|
||||
handler slog.Handler
|
||||
path string
|
||||
cfg FileStreamConfig
|
||||
openedAt time.Time
|
||||
}
|
||||
|
||||
// NewFileStream opens (or creates) the log file and returns a ready stream.
|
||||
func NewFileStream(path string, cfg FileStreamConfig) (*FileStream, error) {
|
||||
cfg.defaults()
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return nil, fmt.Errorf("splinter: create dir: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("splinter: open %s: %w", path, err)
|
||||
}
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("splinter: stat %s: %w", path, err)
|
||||
}
|
||||
|
||||
s := &FileStream{
|
||||
path: path,
|
||||
cfg: cfg,
|
||||
openedAt: time.Now(),
|
||||
}
|
||||
s.attach(f, info.Size())
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// MustFileStream is like NewFileStream but panics on error. Convenient for
|
||||
// application startup where a missing log file is fatal.
|
||||
func MustFileStream(path string, cfg FileStreamConfig) *FileStream {
|
||||
s, err := NewFileStream(path, cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// attach wires up the file, counting writer, and slog handler. Caller holds
|
||||
// s.mu (or is in NewFileStream where no concurrency exists yet).
|
||||
func (s *FileStream) attach(f *os.File, initialSize int64) {
|
||||
s.file = f
|
||||
s.counter = &countingWriter{w: f, n: initialSize}
|
||||
|
||||
opts := &slog.HandlerOptions{Level: s.cfg.Level}
|
||||
switch s.cfg.Format {
|
||||
case FileText:
|
||||
s.handler = slog.NewTextHandler(s.counter, opts)
|
||||
default:
|
||||
s.handler = slog.NewJSONHandler(s.counter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
// Name implements Stream.
|
||||
func (s *FileStream) Name() string { return "file:" + s.path }
|
||||
|
||||
// Enabled implements Stream.
|
||||
func (s *FileStream) Enabled(level Level) bool { return level >= s.cfg.Level }
|
||||
|
||||
// Write implements Stream.
|
||||
func (s *FileStream) Write(ctx context.Context, rec Record) error {
|
||||
sr := slog.NewRecord(rec.Time, rec.Level, rec.Message, 0)
|
||||
for k, v := range rec.Attrs {
|
||||
sr.Add(k, v)
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err := s.handler.Handle(ctx, sr); err != nil {
|
||||
return fmt.Errorf("splinter: file write: %w", err)
|
||||
}
|
||||
if s.shouldRotate() {
|
||||
if err := s.rotate(); err != nil {
|
||||
return fmt.Errorf("splinter: rotate: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Stream. Flushes and closes the underlying file.
|
||||
func (s *FileStream) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.file.Close()
|
||||
}
|
||||
|
||||
// countingWriter wraps an io.Writer and tracks cumulative bytes written so
|
||||
// FileStream can make accurate size-based rotation decisions.
|
||||
type countingWriter struct {
|
||||
w io.Writer
|
||||
n int64
|
||||
}
|
||||
|
||||
func (c *countingWriter) Write(p []byte) (int, error) {
|
||||
n, err := c.w.Write(p)
|
||||
c.n += int64(n)
|
||||
return n, err
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue