restructured logger from budigt
This commit is contained in:
parent
358ee6acc0
commit
b2fd12b1c8
11 changed files with 1227 additions and 1 deletions
252
file_stream_test.go
Normal file
252
file_stream_test.go
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
package splinter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileStream_WritesJSON(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "test.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelDebug, Format: FileJSON})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("file test", "key", "val")
|
||||
if err := fs.Close(); err != nil {
|
||||
t.Fatalf("close: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read: %v", err)
|
||||
}
|
||||
var entry map[string]any
|
||||
if err := json.Unmarshal(bytes.TrimSpace(data), &entry); err != nil {
|
||||
t.Fatalf("parse: %v\nbody: %s", err, data)
|
||||
}
|
||||
if entry["msg"] != "file test" {
|
||||
t.Errorf("expected msg=file test, got %v", entry["msg"])
|
||||
}
|
||||
if entry["key"] != "val" {
|
||||
t.Errorf("expected key=val, got %v", entry["key"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_WritesText(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "test.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelDebug, Format: FileText})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("plain", "key", "val")
|
||||
if err := fs.Close(); err != nil {
|
||||
t.Fatalf("close: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read: %v", err)
|
||||
}
|
||||
if bytes.Contains(data, []byte("{")) {
|
||||
t.Errorf("expected text format, got JSON-like output: %s", data)
|
||||
}
|
||||
if !bytes.Contains(data, []byte("key=val")) {
|
||||
t.Errorf("expected key=val in output: %s", data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_LevelFilter(t *testing.T) {
|
||||
path := filepath.Join(t.TempDir(), "test.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{Level: LevelError})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("skip")
|
||||
logger.Error("keep")
|
||||
if err := fs.Close(); err != nil {
|
||||
t.Fatalf("close: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read: %v", err)
|
||||
}
|
||||
lines := bytes.Split(bytes.TrimSpace(data), []byte("\n"))
|
||||
if len(lines) != 1 {
|
||||
t.Errorf("expected 1 line, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_RotatesOnSize(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "app.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{
|
||||
Level: LevelDebug,
|
||||
MaxSizeMB: 1,
|
||||
MaxBackups: 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
defer fs.Close()
|
||||
|
||||
big := strings.Repeat("x", 1024*1024+1)
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("trigger", "blob", big)
|
||||
|
||||
backups, err := filepath.Glob(filepath.Join(dir, "app.*.log"))
|
||||
if err != nil {
|
||||
t.Fatalf("glob: %v", err)
|
||||
}
|
||||
if len(backups) != 1 {
|
||||
t.Fatalf("expected 1 backup after size rotation, got %d (%v)", len(backups), backups)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
t.Errorf("expected fresh app.log to exist: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_RotatesOnAge(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "app.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{
|
||||
Level: LevelDebug,
|
||||
MaxAge: 30 * time.Millisecond,
|
||||
MaxBackups: 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
defer fs.Close()
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("first")
|
||||
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
logger.Info("second")
|
||||
|
||||
backups, err := filepath.Glob(filepath.Join(dir, "app.*.log"))
|
||||
if err != nil {
|
||||
t.Fatalf("glob: %v", err)
|
||||
}
|
||||
if len(backups) != 1 {
|
||||
t.Fatalf("expected 1 backup after age rotation, got %d (%v)", len(backups), backups)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_CompressOnRotation(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "app.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{
|
||||
Level: LevelDebug,
|
||||
MaxAge: 20 * time.Millisecond,
|
||||
MaxBackups: 5,
|
||||
Compress: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
defer fs.Close()
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
logger.Info("first", "k", "v")
|
||||
|
||||
time.Sleep(40 * time.Millisecond)
|
||||
logger.Info("second", "k", "v")
|
||||
|
||||
gzPaths := waitForFiles(t, filepath.Join(dir, "app.*.log.gz"), 1, 2*time.Second)
|
||||
|
||||
rawPaths, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
|
||||
if len(rawPaths) != 0 {
|
||||
t.Errorf("expected raw .log backups to be removed after gzip, got %v", rawPaths)
|
||||
}
|
||||
|
||||
gzFile, err := os.Open(gzPaths[0])
|
||||
if err != nil {
|
||||
t.Fatalf("open gz: %v", err)
|
||||
}
|
||||
defer gzFile.Close()
|
||||
gr, err := gzip.NewReader(gzFile)
|
||||
if err != nil {
|
||||
t.Fatalf("gzip reader: %v", err)
|
||||
}
|
||||
body, err := io.ReadAll(gr)
|
||||
if err != nil {
|
||||
t.Fatalf("read gz: %v", err)
|
||||
}
|
||||
if !bytes.Contains(body, []byte(`"first"`)) {
|
||||
t.Errorf("expected gzipped backup to contain first message, got %s", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStream_PruneRespectsMaxBackups(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "app.log")
|
||||
fs, err := NewFileStream(path, FileStreamConfig{
|
||||
Level: LevelDebug,
|
||||
MaxAge: 5 * time.Millisecond,
|
||||
MaxBackups: 2,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("NewFileStream: %v", err)
|
||||
}
|
||||
defer fs.Close()
|
||||
|
||||
logger := New(WithStream(fs))
|
||||
for i := 0; i < 5; i++ {
|
||||
logger.Info("rotate")
|
||||
// Sleep longer than MaxAge so the next write triggers rotation, plus
|
||||
// enough nanoseconds to guarantee a unique timestamp suffix.
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
}
|
||||
// Final write that flushes the last rotation through to disk.
|
||||
logger.Info("final")
|
||||
|
||||
// Wait for prune goroutines to settle.
|
||||
deadline := time.Now().Add(2 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
backups, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
|
||||
if len(backups) <= 2 {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
backups, _ := filepath.Glob(filepath.Join(dir, "app.*.log"))
|
||||
if len(backups) > 2 {
|
||||
t.Errorf("expected at most 2 backups, got %d (%v)", len(backups), backups)
|
||||
}
|
||||
}
|
||||
|
||||
// waitForFiles polls the glob pattern until it matches at least `want`
|
||||
// entries or the deadline expires. Returns the matched paths.
|
||||
func waitForFiles(t *testing.T, pattern string, want int, timeout time.Duration) []string {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
t.Fatalf("glob %s: %v", pattern, err)
|
||||
}
|
||||
if len(matches) >= want {
|
||||
return matches
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
matches, _ := filepath.Glob(pattern)
|
||||
t.Fatalf("timed out waiting for %d files matching %s (got %d: %v)", want, pattern, len(matches), matches)
|
||||
return nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue