Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,25 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [v1.2.0] - 2026-02-15

### Added

- Stack - classic LIFO stack in several implementations:
- StaticStack - stack with fixed capacity
- DynamicStack - stack with unlimited capacity that grows dynamicaly
- SyncStack - provides thread-safe wrappers for the methods of Stack interface

### Changed

- Logger:
- Add DI for FileLogger (FileLoggerConfig.SerializerProducer property): Now FileLogger relies on Serializer interface to encode logs.
This change removes hard-lock on JSON-lines log format.

### Removed

- All third-party dependencies

## [1.1.0] - 2026-02-08

### Added
Expand Down
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,19 @@ queue.Push("item2")
item, ok := queue.Pop()
```

#### Stack

Classic LIFO stack in several implementations.
```go
stack := structs.NewStaticStack[string](10)

stack.Push("example 1")
stack.Push("example 2")
stack.Pop() // "example 2", true
stack.Pop() // "example 1", true
stack.Pop() // "", false
```

#### WorkerPool
Concurrent task processing with configurable worker count and graceful shutdown.

Expand Down
8 changes: 1 addition & 7 deletions SECURITY.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,7 @@ This library contains high-performance concurrent data structures. Be aware of:

### Dependencies

Current dependencies are minimal and regularly updated:

```
github.com/json-iterator/go v1.1.12
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
github.com/modern-go/reflect2 v1.0.2
```
There are no third-party dependencies for this library.

### No User Data Collection

Expand Down
7 changes: 0 additions & 7 deletions go.mod
Original file line number Diff line number Diff line change
@@ -1,10 +1,3 @@
module github.com/abaxoth0/Ain

go 1.23.0

require github.com/json-iterator/go v1.1.12

require (
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
)
15 changes: 0 additions & 15 deletions go.sum
Original file line number Diff line number Diff line change
@@ -1,15 +0,0 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
28 changes: 12 additions & 16 deletions logger/file-logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"time"

"github.com/abaxoth0/Ain/structs"
jsoniter "github.com/json-iterator/go"
)

const (
Expand All @@ -30,9 +29,10 @@ type FileLoggerConfig struct {
// File permissions for log files
FilePerm os.FileMode //Default: 0644
// Amount of goroutines in fallback WorkerPool (which is used only when main ring buffer is overflowed).
FallbackWorkers int // Default: 5
FallbackBatchSize int // Default: 500
StopTimeout time.Duration // Default: 10 sec; To disable set to < 0
FallbackWorkers int // Default: 5
FallbackBatchSize int // Default: 500
StopTimeout time.Duration // Default: 10 sec; To disable set to < 0
SerializerProducer func() Serializer // Default: func () Serializer { return NewJSONSerializer() }

*LoggerConfig
}
Expand All @@ -57,6 +57,9 @@ func (c *FileLoggerConfig) fillEmptySettings() {
if c.StopTimeout < 0 {
c.StopTimeout = time.Duration((1 << 63) - 1)
}
if c.SerializerProducer == nil {
c.SerializerProducer = func() Serializer { return NewJSONSerializer() }
}
}

// Implements concurrent file-based logging with forwarding capabilities.
Expand Down Expand Up @@ -91,7 +94,7 @@ func NewFileLogger(config *FileLoggerConfig) (*FileLogger, error) {
forwardings: []Logger{},
streamPool: sync.Pool{
New: func() any {
return jsoniter.NewStream(jsoniter.ConfigFastest, nil, 1024)
return config.SerializerProducer()
},
},
config: config,
Expand Down Expand Up @@ -219,29 +222,22 @@ func (l *FileLogger) Stop(strict bool) error {
}

func (l *FileLogger) handler(entry *LogEntry) {
stream := l.streamPool.Get().(*jsoniter.Stream)
stream := l.streamPool.Get().(Serializer)
defer l.streamPool.Put(stream)

stream.Reset(nil)
stream.Error = nil
stream.Reset()

stream.WriteVal(entry)
if stream.Error != nil {
if err := stream.WriteVal(entry); err != nil {
// TODO:
// Need to somehow handle failed logs commits, cuz currently they are just loss.
// (Push to fallback? Retry queue/buffer?)
return
}

if stream.Buffered() > 0 {
// Add newline to ensure each log entry is on its own line
stream.WriteRaw("\n")
}

// NOTE:
// Logger from built-in "log" package uses mutexes and atomic operations
// under the hood, so it's already thread safe.
l.logger.Writer().Write(stream.Buffer())
l.logger.Writer().Write(append(stream.Buffer(), '\n'))
}

func (l *FileLogger) log(entry *LogEntry) {
Expand Down
119 changes: 119 additions & 0 deletions logger/logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -699,6 +699,125 @@ func TestHandleCritical(t *testing.T) {
})
}

type mockSerializer struct {
resetCalled bool
writeCalled bool
data []byte
}

func (s *mockSerializer) Reset() {
s.resetCalled = true
s.data = s.data[:0]
}

func (s *mockSerializer) WriteVal(v any) error {
s.writeCalled = true
s.data = append(s.data, `{"test":"data"}`...)
return nil
}

func (s *mockSerializer) Buffer() []byte {
return s.data
}

func TestFileLoggerSerializerDI(t *testing.T) {
t.Run("custom serializer producer is used", func(t *testing.T) {
tmpDir := t.TempDir()

producerCalled := false
logger, err := NewFileLogger(&FileLoggerConfig{
Path: tmpDir,
SerializerProducer: func() Serializer {
producerCalled = true
return &mockSerializer{}
},
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}

logger.Init()
if err := logger.Start(); err != nil {
t.Fatalf("Expected no error starting logger, got %v", err)
}

entry := NewLogEntry(InfoLogLevel, "test_source", "test message", "", nil)
logger.Log(&entry)

time.Sleep(100 * time.Millisecond)
if err := logger.Stop(true); err != nil {
t.Errorf("Failed to stop logger: %v\n", err)
}

if !producerCalled {
t.Error("Expected serializer producer to be called")
}
})

t.Run("default serializer when producer is nil", func(t *testing.T) {
tmpDir := t.TempDir()

logger, err := NewFileLogger(&FileLoggerConfig{
Path: tmpDir,
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}

if logger == nil {
t.Fatal("Expected logger to be non-nil with nil producer")
}

logger.Init()
if err := logger.Start(); err != nil {
t.Fatalf("Expected no error starting logger, got %v", err)
}

entry := NewLogEntry(InfoLogLevel, "test_source", "test message", "", nil)
logger.Log(&entry)

time.Sleep(100 * time.Millisecond)
if err := logger.Stop(true); err != nil {
t.Errorf("Failed to stop logger: %v\n", err)
}
})

t.Run("serializer receives reset and write calls", func(t *testing.T) {
tmpDir := t.TempDir()

serializer := &mockSerializer{}
logger, err := NewFileLogger(&FileLoggerConfig{
Path: tmpDir,
SerializerProducer: func() Serializer {
return serializer
},
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}

logger.Init()
if err := logger.Start(); err != nil {
t.Fatalf("Expected no error starting logger, got %v", err)
}

entry := NewLogEntry(InfoLogLevel, "test_source", "test message", "", nil)
logger.Log(&entry)

time.Sleep(100 * time.Millisecond)
if err := logger.Stop(true); err != nil {
t.Errorf("Failed to stop logger: %v\n", err)
}

if !serializer.resetCalled {
t.Error("Expected serializer Reset() to be called")
}
if !serializer.writeCalled {
t.Error("Expected serializer WriteVal() to be called")
}
})
}

func TestConcurrentLogging(t *testing.T) {
t.Run("concurrent source logging", func(t *testing.T) {
mock := &mockLogger{}
Expand Down
51 changes: 51 additions & 0 deletions logger/serializer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
package logger

import (
"bytes"
"encoding/json"
)

// Interface for serializing log entries.
// Implementations must be safe for concurrent use from multiple goroutines
// since a single instance may be retrieved from a pool and used concurrently.
type Serializer interface {
// Clears the serializer's internal buffer, preparing it for a new entry.
Reset()
// Serializes the given value to the internal buffer.
WriteVal(v any) error
// Returns the serialized data.
Buffer() []byte
}

// Default Serializer implementation using encoding/json.
// It uses a bytes.Buffer internally which is reset and reused via sync.Pool
// to reduce allocations.
type JSONSerializer struct {
buffer *bytes.Buffer
encoder *json.Encoder
}

func NewJSONSerializer() *JSONSerializer {
buf := new(bytes.Buffer)
return &JSONSerializer{
buffer: buf,
encoder: json.NewEncoder(buf),
}
}

func (s *JSONSerializer) Reset() {
s.buffer.Reset()
}

func (s *JSONSerializer) Buffer() []byte {
return s.buffer.Bytes()
}

func (s *JSONSerializer) WriteVal(v any) error {
if err := s.encoder.Encode(v); err != nil {
return err
}
// json.Encoder.Encode() appends \n at the end, need to trim it
s.buffer.Truncate(s.buffer.Len() - 1)
return nil
}
Loading
Loading