Accumulate incoming messages in a buffered channel
Instead of using a deque, store incoming messages in a native buffered channel, if buffering is enabled. In addition, modify the batching algorithm so the enforced delay between consecutive `addMessages` invocations is applied after all pending messages are processed. This acts as a "cooldown", rather than a "warmup". This avoids the need for more complex timing logic to dispatch batches, removes latency in adding messages when received infrequently, and natively blocking the goroutine until messages are received. Because the message processing loop always performs a blocking read first, it is appropriate for low-throughput environments just as much as high-throughput ones. The default value of batchSize has been changed to 10, with a zero cooldown. This means that when messages are arriving faster than they can be inserted into sqlite, they will automatically become batched in groups of up to 10.
This commit is contained in:
parent
6f170b1ad7
commit
09e8fb81b5
7 changed files with 140 additions and 168 deletions
|
@ -1,86 +0,0 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BatchingQueue is a queue that creates batches of the enqueued elements based on a
|
||||
// max batch size and a batch timeout.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// q := NewBatchingQueue[int](2, 500 * time.Millisecond)
|
||||
// go func() {
|
||||
// for batch := range q.Dequeue() {
|
||||
// fmt.Println(batch)
|
||||
// }
|
||||
// }()
|
||||
// q.Enqueue(1)
|
||||
// q.Enqueue(2)
|
||||
// q.Enqueue(3)
|
||||
// time.Sleep(time.Second)
|
||||
//
|
||||
// This example will emit batch [1, 2] immediately (because the batch size is 2), and
|
||||
// a batch [3] after 500ms.
|
||||
type BatchingQueue[T any] struct {
|
||||
batchSize int
|
||||
timeout time.Duration
|
||||
in []T
|
||||
out chan []T
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewBatchingQueue creates a new BatchingQueue
|
||||
func NewBatchingQueue[T any](batchSize int, timeout time.Duration) *BatchingQueue[T] {
|
||||
q := &BatchingQueue[T]{
|
||||
batchSize: batchSize,
|
||||
timeout: timeout,
|
||||
in: make([]T, 0),
|
||||
out: make(chan []T),
|
||||
}
|
||||
go q.timeoutTicker()
|
||||
return q
|
||||
}
|
||||
|
||||
// Enqueue enqueues an element to the queue. If the configured batch size is reached,
|
||||
// the batch will be emitted immediately.
|
||||
func (q *BatchingQueue[T]) Enqueue(element T) {
|
||||
q.mu.Lock()
|
||||
q.in = append(q.in, element)
|
||||
var elements []T
|
||||
if len(q.in) == q.batchSize {
|
||||
elements = q.dequeueAll()
|
||||
}
|
||||
q.mu.Unlock()
|
||||
if len(elements) > 0 {
|
||||
q.out <- elements
|
||||
}
|
||||
}
|
||||
|
||||
// Dequeue returns a channel emitting batches of elements
|
||||
func (q *BatchingQueue[T]) Dequeue() <-chan []T {
|
||||
return q.out
|
||||
}
|
||||
|
||||
func (q *BatchingQueue[T]) dequeueAll() []T {
|
||||
elements := make([]T, len(q.in))
|
||||
copy(elements, q.in)
|
||||
q.in = q.in[:0]
|
||||
return elements
|
||||
}
|
||||
|
||||
func (q *BatchingQueue[T]) timeoutTicker() {
|
||||
if q.timeout == 0 {
|
||||
return
|
||||
}
|
||||
ticker := time.NewTicker(q.timeout)
|
||||
for range ticker.C {
|
||||
q.mu.Lock()
|
||||
elements := q.dequeueAll()
|
||||
q.mu.Unlock()
|
||||
if len(elements) > 0 {
|
||||
q.out <- elements
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package util_test
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"heckel.io/ntfy/util"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBatchingQueue_InfTimeout(t *testing.T) {
|
||||
q := util.NewBatchingQueue[int](25, 1*time.Hour)
|
||||
batches, total := make([][]int, 0), 0
|
||||
var mu sync.Mutex
|
||||
go func() {
|
||||
for batch := range q.Dequeue() {
|
||||
mu.Lock()
|
||||
batches = append(batches, batch)
|
||||
total += len(batch)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 101; i++ {
|
||||
go q.Enqueue(i)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
mu.Lock()
|
||||
require.Equal(t, 100, total) // One is missing, stuck in the last batch!
|
||||
require.Equal(t, 4, len(batches))
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
func TestBatchingQueue_WithTimeout(t *testing.T) {
|
||||
q := util.NewBatchingQueue[int](25, 100*time.Millisecond)
|
||||
batches, total := make([][]int, 0), 0
|
||||
var mu sync.Mutex
|
||||
go func() {
|
||||
for batch := range q.Dequeue() {
|
||||
mu.Lock()
|
||||
batches = append(batches, batch)
|
||||
total += len(batch)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
for i := 0; i < 101; i++ {
|
||||
go func(i int) {
|
||||
time.Sleep(time.Duration(rand.Intn(700)) * time.Millisecond)
|
||||
q.Enqueue(i)
|
||||
}(i)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
mu.Lock()
|
||||
require.Equal(t, 101, total)
|
||||
require.True(t, len(batches) > 4) // 101/25
|
||||
require.True(t, len(batches) < 21)
|
||||
mu.Unlock()
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue