diff --git a/server/lib/events/event.go b/server/lib/events/event.go new file mode 100644 index 00000000..eb9c5674 --- /dev/null +++ b/server/lib/events/event.go @@ -0,0 +1,68 @@ +package events + +import ( + "encoding/json" + "strings" +) + +// maxS2RecordBytes is the S2 record size limit (SCHEMA-04). +const maxS2RecordBytes = 1_000_000 + +// EventCategory maps event type prefixes to log file names. +type EventCategory string + +const ( + CategoryCDP EventCategory = "cdp" + CategoryConsole EventCategory = "console" + CategoryNetwork EventCategory = "network" + CategoryLiveview EventCategory = "liveview" + CategoryCaptcha EventCategory = "captcha" +) + +// BrowserEvent is the canonical event structure for the browser capture pipeline. +type BrowserEvent struct { + CaptureSessionID string `json:"capture_session_id"` + Seq uint64 `json:"seq"` + Ts int64 `json:"ts"` + Type string `json:"type"` + TargetID string `json:"target_id,omitempty"` + CDPSessionID string `json:"cdp_session_id,omitempty"` + FrameID string `json:"frame_id,omitempty"` + ParentFrameID string `json:"parent_frame_id,omitempty"` + URL string `json:"url,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Truncated bool `json:"truncated,omitempty"` +} + +// CategoryFor returns the log category for a given event type. +// Event types follow the pattern "_", e.g. "console_log", +// "network_request", "cdp_navigation". Types not matching a known prefix +// fall through to CategoryCDP as a safe default. +func CategoryFor(eventType string) EventCategory { + prefix, _, _ := strings.Cut(eventType, "_") + switch prefix { + case "console": + return CategoryConsole + case "network": + return CategoryNetwork + case "liveview": + return CategoryLiveview + case "captcha": + return CategoryCaptcha + default: + return CategoryCDP + } +} + +// truncateIfNeeded returns a copy of ev with Data replaced with json.RawMessage("null") +// and Truncated set to true if the marshaled size exceeds maxS2RecordBytes. +// Per RESEARCH pitfall 3: never attempt byte-slice truncation of the Data field. +func truncateIfNeeded(ev BrowserEvent) BrowserEvent { + candidate, err := json.Marshal(ev) + if err != nil || len(candidate) <= maxS2RecordBytes { + return ev + } + ev.Data = json.RawMessage("null") + ev.Truncated = true + return ev +} diff --git a/server/lib/events/events_test.go b/server/lib/events/events_test.go new file mode 100644 index 00000000..30cd5528 --- /dev/null +++ b/server/lib/events/events_test.go @@ -0,0 +1,474 @@ +package events + +import ( + "bytes" + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestBrowserEvent: construct BrowserEvent with all SCHEMA-01 fields; marshal to JSON; +// assert all snake_case keys present. +func TestBrowserEvent(t *testing.T) { + ev := BrowserEvent{ + CaptureSessionID: "test-session-id", + Seq: 1, + Ts: 1234567890000, + Type: "console_log", + TargetID: "target-1", + CDPSessionID: "cdp-session-1", + FrameID: "frame-1", + ParentFrameID: "parent-frame-1", + URL: "https://example.com", + Data: json.RawMessage(`{"message":"hello"}`), + Truncated: false, + } + + b, err := json.Marshal(ev) + require.NoError(t, err) + + s := string(b) + assert.Contains(t, s, `"capture_session_id"`) + assert.Contains(t, s, `"seq"`) + assert.Contains(t, s, `"ts"`) + assert.Contains(t, s, `"type"`) + assert.Contains(t, s, `"target_id"`) + assert.Contains(t, s, `"cdp_session_id"`) + assert.Contains(t, s, `"frame_id"`) + assert.Contains(t, s, `"parent_frame_id"`) + assert.Contains(t, s, `"url"`) + assert.Contains(t, s, `"data"`) +} + +// TestBrowserEventData: embed a pre-serialized JSON object in Data field; marshal outer event; +// assert Data appears verbatim (no double-encoding). +func TestBrowserEventData(t *testing.T) { + rawData := json.RawMessage(`{"key":"value","num":42}`) + ev := BrowserEvent{ + CaptureSessionID: "test-session", + Seq: 1, + Ts: 1000, + Type: "cdp_event", + Data: rawData, + } + + b, err := json.Marshal(ev) + require.NoError(t, err) + + s := string(b) + // Data must appear verbatim — no double-encoding (should not be escaped string) + assert.Contains(t, s, `"data":{"key":"value","num":42}`) + assert.NotContains(t, s, `"data":"{`) // would indicate double-encoding +} + +// TestCategoryFor: table-driven; assert prefix routing is correct. +func TestCategoryFor(t *testing.T) { + cases := []struct { + eventType string + expected EventCategory + }{ + {"console_log", CategoryConsole}, + {"network_request", CategoryNetwork}, + {"liveview_click", CategoryLiveview}, + {"captcha_solve", CategoryCaptcha}, + {"cdp_nav", CategoryCDP}, + {"unknown_type", CategoryCDP}, + } + + for _, tc := range cases { + t.Run(tc.eventType, func(t *testing.T) { + got := CategoryFor(tc.eventType) + assert.Equal(t, tc.expected, got) + }) + } +} + +// TestRingBuffer: publish 3 events; reader reads all 3 in order. +func TestRingBuffer(t *testing.T) { + rb := NewRingBuffer(10) + reader := rb.NewReader() + + events := []BrowserEvent{ + {Seq: 1, Type: "cdp_event_1"}, + {Seq: 2, Type: "cdp_event_2"}, + {Seq: 3, Type: "cdp_event_3"}, + } + + for _, ev := range events { + rb.Publish(ev) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + for i, expected := range events { + got, err := reader.Read(ctx) + require.NoError(t, err, "reading event %d", i) + assert.Equal(t, expected.Type, got.Type) + } +} + +// TestRingBufferOverflow: ring capacity 2; publish 3 events with no reader; +// assert write returns immediately (no block); reader receives events_dropped then newest events. +func TestRingBufferOverflow(t *testing.T) { + rb := NewRingBuffer(2) + + // Publish 3 events with no reader — must not block + done := make(chan struct{}) + go func() { + rb.Publish(BrowserEvent{Seq: 1, Type: "cdp_event_1"}) + rb.Publish(BrowserEvent{Seq: 2, Type: "cdp_event_2"}) + rb.Publish(BrowserEvent{Seq: 3, Type: "cdp_event_3"}) + close(done) + }() + + select { + case <-done: + // good — did not block + case <-time.After(500 * time.Millisecond): + t.Fatal("Publish blocked with no readers") + } + + // Create reader after overflow; should get events_dropped then available events + reader := rb.NewReader() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + first, err := reader.Read(ctx) + require.NoError(t, err) + assert.Equal(t, "events_dropped", first.Type) +} + +// TestEventsDropped: ring capacity 2; reader gets notify channel; publish 3 events; +// reader reads; assert first result is events_dropped BrowserEvent. +func TestEventsDropped(t *testing.T) { + rb := NewRingBuffer(2) + reader := rb.NewReader() + + // Publish 3 events, overflowing the ring (capacity 2) + rb.Publish(BrowserEvent{Seq: 1, Type: "cdp_event_1"}) + rb.Publish(BrowserEvent{Seq: 2, Type: "cdp_event_2"}) + rb.Publish(BrowserEvent{Seq: 3, Type: "cdp_event_3"}) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + first, err := reader.Read(ctx) + require.NoError(t, err) + assert.Equal(t, "events_dropped", first.Type) + + // Data must be valid JSON with a "dropped" count + require.NotNil(t, first.Data) + assert.True(t, json.Valid(first.Data)) + assert.Contains(t, string(first.Data), `"dropped"`) +} + +// TestConcurrentReaders: 3 readers subscribe before publish; publish 5 events; +// each reader independently reads all 5; no reader affects another. +func TestConcurrentReaders(t *testing.T) { + rb := NewRingBuffer(20) + + numReaders := 3 + numEvents := 5 + + readers := make([]*Reader, numReaders) + for i := range readers { + readers[i] = rb.NewReader() + } + + // Publish events after readers are created + for i := 0; i < numEvents; i++ { + rb.Publish(BrowserEvent{Seq: uint64(i + 1), Type: "cdp_event"}) + } + + var wg sync.WaitGroup + results := make([][]BrowserEvent, numReaders) + + for i, r := range readers { + wg.Add(1) + go func(idx int, reader *Reader) { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + var evs []BrowserEvent + for j := 0; j < numEvents; j++ { + ev, err := reader.Read(ctx) + require.NoError(t, err) + evs = append(evs, ev) + } + results[idx] = evs + }(i, r) + } + + wg.Wait() + + // Each reader must have received all 5 events + for i, evs := range results { + assert.Len(t, evs, numEvents, "reader %d", i) + for j, ev := range evs { + assert.Equal(t, uint64(j+1), ev.Seq, "reader %d event %d", i, j) + } + } +} + +// TestFileWriter: per-category JSONL appender tests. +func TestFileWriter(t *testing.T) { + t.Run("writes_to_correct_file", func(t *testing.T) { + dir := t.TempDir() + fw := NewFileWriter(dir) + defer fw.Close() + + ev := BrowserEvent{ + CaptureSessionID: "sess-1", + Seq: 1, + Ts: 1000, + Type: "console_log", + Data: json.RawMessage(`{"message":"hello"}`), + } + require.NoError(t, fw.Write(ev)) + + data, err := os.ReadFile(filepath.Join(dir, "console.log")) + require.NoError(t, err) + + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + require.Len(t, lines, 1) + assert.True(t, json.Valid([]byte(lines[0]))) + assert.Contains(t, lines[0], `"capture_session_id"`) + assert.Contains(t, lines[0], `"console_log"`) + }) + + t.Run("category_routing", func(t *testing.T) { + dir := t.TempDir() + fw := NewFileWriter(dir) + defer fw.Close() + + typeToFile := map[string]string{ + "console_log": "console.log", + "network_request": "network.log", + "liveview_click": "liveview.log", + "captcha_solve": "captcha.log", + "cdp_navigation": "cdp.log", + } + + for typ := range typeToFile { + require.NoError(t, fw.Write(BrowserEvent{Type: typ, Seq: 1, Ts: 1})) + } + + for typ, file := range typeToFile { + data, err := os.ReadFile(filepath.Join(dir, file)) + require.NoError(t, err, "missing file for type %s", typ) + assert.True(t, json.Valid(bytes.TrimRight(data, "\n"))) + } + }) + + t.Run("concurrent_writes", func(t *testing.T) { + dir := t.TempDir() + fw := NewFileWriter(dir) + defer fw.Close() + + const goroutines = 10 + const eventsPerGoroutine = 100 + + var wg sync.WaitGroup + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + for j := 0; j < eventsPerGoroutine; j++ { + ev := BrowserEvent{ + Seq: uint64(i*eventsPerGoroutine + j), + Type: "console_log", + Ts: 1, + } + require.NoError(t, fw.Write(ev)) + } + }(i) + } + wg.Wait() + + data, err := os.ReadFile(filepath.Join(dir, "console.log")) + require.NoError(t, err) + + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + assert.Len(t, lines, goroutines*eventsPerGoroutine) + for _, line := range lines { + assert.True(t, json.Valid([]byte(line)), "invalid JSON line: %s", line) + } + }) + + t.Run("lazy_open", func(t *testing.T) { + dir := t.TempDir() + fw := NewFileWriter(dir) + defer fw.Close() + + // No writes yet — directory should be empty. + entries, err := os.ReadDir(dir) + require.NoError(t, err) + assert.Empty(t, entries, "files opened before first Write") + + require.NoError(t, fw.Write(BrowserEvent{Type: "console_log", Seq: 1, Ts: 1})) + + entries, err = os.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, entries, 1, "expected exactly one file after first Write") + assert.Equal(t, "console.log", entries[0].Name()) + }) +} + +// TestPipeline: Pipeline glue type tests. +func TestPipeline(t *testing.T) { + newPipeline := func(t *testing.T) (*Pipeline, string) { + t.Helper() + dir := t.TempDir() + rb := NewRingBuffer(100) + fw := NewFileWriter(dir) + p := NewPipeline(rb, fw) + t.Cleanup(func() { p.Close() }) + return p, dir + } + + t.Run("publish_increments_seq", func(t *testing.T) { + p, _ := newPipeline(t) + reader := p.NewReader() + + for i := 0; i < 3; i++ { + p.Publish(BrowserEvent{Type: "cdp_event", Ts: 1}) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + for want := uint64(1); want <= 3; want++ { + ev, err := reader.Read(ctx) + require.NoError(t, err) + assert.Equal(t, want, ev.Seq, "expected seq %d got %d", want, ev.Seq) + } + }) + + t.Run("publish_sets_ts", func(t *testing.T) { + p, _ := newPipeline(t) + reader := p.NewReader() + + before := time.Now().UnixMilli() + p.Publish(BrowserEvent{Type: "cdp_event"}) // Ts == 0 + after := time.Now().UnixMilli() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + ev, err := reader.Read(ctx) + require.NoError(t, err) + assert.GreaterOrEqual(t, ev.Ts, before) + assert.LessOrEqual(t, ev.Ts, after) + }) + + t.Run("publish_writes_file", func(t *testing.T) { + p, dir := newPipeline(t) + + p.Publish(BrowserEvent{Type: "console_log", Ts: 1}) + + data, err := os.ReadFile(filepath.Join(dir, "console.log")) + require.NoError(t, err) + + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + require.Len(t, lines, 1) + assert.True(t, json.Valid([]byte(lines[0]))) + assert.Contains(t, lines[0], `"console_log"`) + }) + + t.Run("publish_writes_ring", func(t *testing.T) { + p, _ := newPipeline(t) + + // Subscribe reader BEFORE publish. + reader := p.NewReader() + p.Publish(BrowserEvent{Type: "cdp_event", Ts: 1}) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + ev, err := reader.Read(ctx) + require.NoError(t, err) + assert.Equal(t, "cdp_event", ev.Type) + }) + + t.Run("start_sets_capture_session_id", func(t *testing.T) { + p, _ := newPipeline(t) + p.Start("test-uuid") + + reader := p.NewReader() + p.Publish(BrowserEvent{Type: "cdp_event", Ts: 1}) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + ev, err := reader.Read(ctx) + require.NoError(t, err) + assert.Equal(t, "test-uuid", ev.CaptureSessionID) + }) + + t.Run("truncation_applied", func(t *testing.T) { + p, dir := newPipeline(t) + reader := p.NewReader() + + largeData := strings.Repeat("x", 1_100_000) + rawData, err := json.Marshal(map[string]string{"payload": largeData}) + require.NoError(t, err) + + p.Publish(BrowserEvent{ + Type: "cdp_event", + Ts: 1, + Data: json.RawMessage(rawData), + }) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + // Ring buffer event must have Truncated==true. + ev, err := reader.Read(ctx) + require.NoError(t, err) + assert.True(t, ev.Truncated) + + // File must contain valid JSON with truncated==true. + data, err := os.ReadFile(filepath.Join(dir, "cdp.log")) + require.NoError(t, err) + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + require.Len(t, lines, 1) + assert.True(t, json.Valid([]byte(lines[0]))) + assert.Contains(t, lines[0], `"truncated":true`) + }) +} + +// TestTruncation: construct event with Data = 1.1MB JSON bytes; call truncateIfNeeded; +// assert Truncated==true and json.Valid(result.Data)==true and len(marshal(result)) <= 1_000_000. +func TestTruncation(t *testing.T) { + // Build a Data field that is ~1.1MB + largeData := strings.Repeat("x", 1_100_000) + rawData, err := json.Marshal(map[string]string{"payload": largeData}) + require.NoError(t, err) + + ev := BrowserEvent{ + CaptureSessionID: "test-session", + Seq: 1, + Ts: 1000, + Type: "cdp_event", + Data: json.RawMessage(rawData), + } + + result := truncateIfNeeded(ev) + + assert.True(t, result.Truncated) + assert.True(t, json.Valid(result.Data)) + + marshaled, err := json.Marshal(result) + require.NoError(t, err) + assert.LessOrEqual(t, len(marshaled), 1_000_000) +} diff --git a/server/lib/events/filewriter.go b/server/lib/events/filewriter.go new file mode 100644 index 00000000..4b40d204 --- /dev/null +++ b/server/lib/events/filewriter.go @@ -0,0 +1,78 @@ +package events + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" +) + +// FileWriter is a per-category JSONL appender. It opens each log file lazily on +// first write (O_APPEND|O_CREATE|O_WRONLY) and serialises concurrent writes +// within a category with a single mutex. +type FileWriter struct { + mu sync.Mutex + files map[EventCategory]*os.File + dir string +} + +// NewFileWriter returns a FileWriter that writes to dir. +// No files are opened until the first Write call. +func NewFileWriter(dir string) *FileWriter { + return &FileWriter{dir: dir, files: make(map[EventCategory]*os.File)} +} + +// Write serialises ev to JSON and appends it as a single JSONL line to the +// per-category log file. The mutex is held for the entire open+marshal+write +// sequence to prevent TOCTOU races and to guarantee whole-line atomicity for +// events larger than PIPE_BUF. +func (fw *FileWriter) Write(ev BrowserEvent) error { + cat := CategoryFor(ev.Type) + + fw.mu.Lock() + defer fw.mu.Unlock() + + // Lazy open. + f, ok := fw.files[cat] + if !ok { + path := filepath.Join(fw.dir, string(cat)+".log") + var err error + f, err = os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("filewriter: open %s: %w", path, err) + } + fw.files[cat] = f + } + + data, err := json.Marshal(ev) + if err != nil { + return fmt.Errorf("filewriter: marshal: %w", err) + } + + var buf bytes.Buffer + buf.Write(data) + buf.WriteByte('\n') + + if _, err := f.Write(buf.Bytes()); err != nil { + return fmt.Errorf("filewriter: write: %w", err) + } + + return nil +} + +// Close closes all open log file descriptors. The first encountered error is +// returned; subsequent files are still closed. +func (fw *FileWriter) Close() error { + fw.mu.Lock() + defer fw.mu.Unlock() + + var firstErr error + for _, f := range fw.files { + if err := f.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} diff --git a/server/lib/events/pipeline.go b/server/lib/events/pipeline.go new file mode 100644 index 00000000..11661150 --- /dev/null +++ b/server/lib/events/pipeline.go @@ -0,0 +1,67 @@ +package events + +import ( + "sync/atomic" + "time" +) + +// Pipeline glues a RingBuffer and a FileWriter into a single write path. +// A single call to Publish stamps the event with a monotonic sequence number, +// applies truncation, durably appends it to the per-category log file, and +// then makes it available to ring buffer readers. +type Pipeline struct { + ring *RingBuffer + files *FileWriter + seq atomic.Uint64 + captureSessionID atomic.Value // stores string +} + +// NewPipeline returns a Pipeline backed by the supplied ring and file writer. +func NewPipeline(ring *RingBuffer, files *FileWriter) *Pipeline { + p := &Pipeline{ring: ring, files: files} + p.captureSessionID.Store("") + return p +} + +// Start sets the capture session ID that will be stamped on every subsequent +// published event. It may be called at any time; the change is immediately +// visible to concurrent Publish calls. +func (p *Pipeline) Start(captureSessionID string) { + p.captureSessionID.Store(captureSessionID) +} + +// Publish stamps, truncates, files, and broadcasts a single event. +// +// Ordering: +// 1. Stamp CaptureSessionID, Seq, Ts (Ts only if caller left it zero) +// 2. Apply truncateIfNeeded (SCHEMA-04) — must happen before both sinks +// 3. Write to FileWriter (durable before in-memory) +// 4. Publish to RingBuffer (in-memory fan-out) +// +// Errors from FileWriter.Write are silently dropped; the ring buffer always +// receives the event even if the file write fails. +func (p *Pipeline) Publish(ev BrowserEvent) { + ev.CaptureSessionID = p.captureSessionID.Load().(string) + ev.Seq = p.seq.Add(1) // starts at 1 + if ev.Ts == 0 { + ev.Ts = time.Now().UnixMilli() + } + ev = truncateIfNeeded(ev) + + // File write first — durable before in-memory. + _ = p.files.Write(ev) + + // Ring buffer last — readers see the event after the file is written. + p.ring.Publish(ev) +} + +// NewReader returns a Reader positioned at the start of the ring buffer. +func (p *Pipeline) NewReader() *Reader { + return p.ring.NewReader() +} + +// Close closes the underlying FileWriter, flushing and releasing all open +// file descriptors. +func (p *Pipeline) Close() error { + return p.files.Close() +} diff --git a/server/lib/events/ringbuffer.go b/server/lib/events/ringbuffer.go new file mode 100644 index 00000000..3911912e --- /dev/null +++ b/server/lib/events/ringbuffer.go @@ -0,0 +1,110 @@ +package events + +import ( + "context" + "encoding/json" + "fmt" + "sync" +) + +// RingBuffer is a fixed-capacity circular buffer with closed-channel broadcast fan-out. +// Writers never block regardless of reader count or speed. +// Readers track their position by seq value (not ring index) and receive an +// events_dropped synthetic BrowserEvent when they fall behind the oldest retained event. +type RingBuffer struct { + mu sync.RWMutex + buf []BrowserEvent + head int // next write position (mod cap) + count int // items currently stored (0..cap) + written uint64 // total ever published (monotonic) + notify chan struct{} +} + +// NewRingBuffer creates a new RingBuffer with the given capacity. +func NewRingBuffer(capacity int) *RingBuffer { + return &RingBuffer{ + buf: make([]BrowserEvent, capacity), + notify: make(chan struct{}), + } +} + +// Publish adds an event to the ring buffer, evicting the oldest entry on overflow. +// Closes the current notify channel (waking all waiting readers) and replaces it +// with a new one — outside the lock to avoid blocking under contention. +func (rb *RingBuffer) Publish(ev BrowserEvent) { + rb.mu.Lock() + rb.buf[rb.head] = ev + rb.head = (rb.head + 1) % len(rb.buf) + if rb.count < len(rb.buf) { + rb.count++ + } + rb.written++ + old := rb.notify + rb.notify = make(chan struct{}) + rb.mu.Unlock() + close(old) // outside lock to avoid blocking under contention +} + +// oldestSeq returns the seq of the oldest event still in the ring. +// Must be called under at least a read lock. +func (rb *RingBuffer) oldestSeq() uint64 { + if rb.written <= uint64(len(rb.buf)) { + return 0 + } + return rb.written - uint64(len(rb.buf)) +} + +// NewReader returns a Reader positioned at seq 0. +// If the ring has already published events, the reader will receive an +// events_dropped BrowserEvent on the first Read call if it has fallen behind +// the oldest retained event. +func (rb *RingBuffer) NewReader() *Reader { + return &Reader{rb: rb, nextSeq: 0} +} + +// Reader tracks an independent read position in a RingBuffer. +type Reader struct { + rb *RingBuffer + nextSeq uint64 +} + +// Read blocks until the next event is available or ctx is cancelled. +// Returns (event, nil) for a normal event. +// Returns (events_dropped BrowserEvent, nil) if the reader has fallen behind +// the ring's oldest retained event — the dropped count is in Data as valid JSON. +func (r *Reader) Read(ctx context.Context) (BrowserEvent, error) { + for { + r.rb.mu.RLock() + notify := r.rb.notify + oldest := r.rb.oldestSeq() + written := r.rb.written + + // Reader fell behind — synthesize events_dropped before advancing. + if r.nextSeq < oldest { + dropped := oldest - r.nextSeq + r.nextSeq = oldest + r.rb.mu.RUnlock() + data := json.RawMessage(fmt.Sprintf(`{"dropped":%d}`, dropped)) + return BrowserEvent{Type: "events_dropped", Data: data}, nil + } + + // Event is available — read it. + if r.nextSeq < written { + idx := int(r.nextSeq % uint64(len(r.rb.buf))) + ev := r.rb.buf[idx] + r.nextSeq++ + r.rb.mu.RUnlock() + return ev, nil + } + + // No event yet — wait for notification. + r.rb.mu.RUnlock() + + select { + case <-ctx.Done(): + return BrowserEvent{}, ctx.Err() + case <-notify: + // new event available; loop to read it + } + } +}