Making RateLimiter and FixedLimiter, so they can both work with LimitWriter
parent
f6b9ebb693
commit
c76e55a1c8
|
@ -40,7 +40,7 @@ func newFileCache(dir string, totalSizeLimit int64, fileSizeLimit int64) (*fileC
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *fileCache) Write(id string, in io.Reader, limiters ...*util.Limiter) (int64, error) {
|
func (c *fileCache) Write(id string, in io.Reader, limiters ...util.Limiter) (int64, error) {
|
||||||
if !fileIDRegex.MatchString(id) {
|
if !fileIDRegex.MatchString(id) {
|
||||||
return 0, errInvalidFileID
|
return 0, errInvalidFileID
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func (c *fileCache) Write(id string, in io.Reader, limiters ...*util.Limiter) (i
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
limiters = append(limiters, util.NewLimiter(c.Remaining()), util.NewLimiter(c.fileSizeLimit))
|
limiters = append(limiters, util.NewFixedLimiter(c.Remaining()), util.NewFixedLimiter(c.fileSizeLimit))
|
||||||
limitWriter := util.NewLimitWriter(f, limiters...)
|
limitWriter := util.NewLimitWriter(f, limiters...)
|
||||||
size, err := io.Copy(limitWriter, in)
|
size, err := io.Copy(limitWriter, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -16,7 +16,7 @@ var (
|
||||||
|
|
||||||
func TestFileCache_Write_Success(t *testing.T) {
|
func TestFileCache_Write_Success(t *testing.T) {
|
||||||
dir, c := newTestFileCache(t)
|
dir, c := newTestFileCache(t)
|
||||||
size, err := c.Write("abc", strings.NewReader("normal file"), util.NewLimiter(999))
|
size, err := c.Write("abc", strings.NewReader("normal file"), util.NewFixedLimiter(999))
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.Equal(t, int64(11), size)
|
require.Equal(t, int64(11), size)
|
||||||
require.Equal(t, "normal file", readFile(t, dir+"/abc"))
|
require.Equal(t, "normal file", readFile(t, dir+"/abc"))
|
||||||
|
@ -64,7 +64,7 @@ func TestFileCache_Write_FailedFileSizeLimit(t *testing.T) {
|
||||||
|
|
||||||
func TestFileCache_Write_FailedAdditionalLimiter(t *testing.T) {
|
func TestFileCache_Write_FailedAdditionalLimiter(t *testing.T) {
|
||||||
dir, c := newTestFileCache(t)
|
dir, c := newTestFileCache(t)
|
||||||
_, err := c.Write("abc", bytes.NewReader(make([]byte, 1001)), util.NewLimiter(1000))
|
_, err := c.Write("abc", bytes.NewReader(make([]byte, 1001)), util.NewFixedLimiter(1000))
|
||||||
require.Equal(t, util.ErrLimitReached, err)
|
require.Equal(t, util.ErrLimitReached, err)
|
||||||
require.NoFileExists(t, dir+"/abc")
|
require.NoFileExists(t, dir+"/abc")
|
||||||
}
|
}
|
||||||
|
|
|
@ -648,7 +648,7 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *message,
|
||||||
if m.Message == "" {
|
if m.Message == "" {
|
||||||
m.Message = fmt.Sprintf(defaultAttachmentMessage, m.Attachment.Name)
|
m.Message = fmt.Sprintf(defaultAttachmentMessage, m.Attachment.Name)
|
||||||
}
|
}
|
||||||
m.Attachment.Size, err = s.fileCache.Write(m.ID, body, util.NewLimiter(remainingVisitorAttachmentSize))
|
m.Attachment.Size, err = s.fileCache.Write(m.ID, body, util.NewFixedLimiter(remainingVisitorAttachmentSize))
|
||||||
if err == util.ErrLimitReached {
|
if err == util.ErrLimitReached {
|
||||||
return errHTTPBadRequestAttachmentTooLarge
|
return errHTTPBadRequestAttachmentTooLarge
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
|
|
@ -909,13 +909,6 @@ func toMessage(t *testing.T, s string) *message {
|
||||||
return &m
|
return &m
|
||||||
}
|
}
|
||||||
|
|
||||||
func tempFile(t *testing.T, length int) (filename string, content string) {
|
|
||||||
filename = filepath.Join(t.TempDir(), util.RandomString(10))
|
|
||||||
content = util.RandomString(length)
|
|
||||||
require.Nil(t, os.WriteFile(filename, []byte(content), 0600))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func toHTTPError(t *testing.T, s string) *errHTTP {
|
func toHTTPError(t *testing.T, s string) *errHTTP {
|
||||||
var e errHTTP
|
var e errHTTP
|
||||||
require.Nil(t, json.NewDecoder(strings.NewReader(s)).Decode(&e))
|
require.Nil(t, json.NewDecoder(strings.NewReader(s)).Decode(&e))
|
||||||
|
|
|
@ -24,7 +24,7 @@ type visitor struct {
|
||||||
config *Config
|
config *Config
|
||||||
ip string
|
ip string
|
||||||
requests *rate.Limiter
|
requests *rate.Limiter
|
||||||
subscriptions *util.Limiter
|
subscriptions util.Limiter
|
||||||
emails *rate.Limiter
|
emails *rate.Limiter
|
||||||
seen time.Time
|
seen time.Time
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
@ -35,7 +35,7 @@ func newVisitor(conf *Config, ip string) *visitor {
|
||||||
config: conf,
|
config: conf,
|
||||||
ip: ip,
|
ip: ip,
|
||||||
requests: rate.NewLimiter(rate.Every(conf.VisitorRequestLimitReplenish), conf.VisitorRequestLimitBurst),
|
requests: rate.NewLimiter(rate.Every(conf.VisitorRequestLimitReplenish), conf.VisitorRequestLimitBurst),
|
||||||
subscriptions: util.NewLimiter(int64(conf.VisitorSubscriptionLimit)),
|
subscriptions: util.NewFixedLimiter(int64(conf.VisitorSubscriptionLimit)),
|
||||||
emails: rate.NewLimiter(rate.Every(conf.VisitorEmailLimitReplenish), conf.VisitorEmailLimitBurst),
|
emails: rate.NewLimiter(rate.Every(conf.VisitorEmailLimitReplenish), conf.VisitorEmailLimitBurst),
|
||||||
seen: time.Now(),
|
seen: time.Now(),
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func (v *visitor) EmailAllowed() error {
|
||||||
func (v *visitor) SubscriptionAllowed() error {
|
func (v *visitor) SubscriptionAllowed() error {
|
||||||
v.mu.Lock()
|
v.mu.Lock()
|
||||||
defer v.mu.Unlock()
|
defer v.mu.Unlock()
|
||||||
if err := v.subscriptions.Add(1); err != nil {
|
if err := v.subscriptions.Allow(1); err != nil {
|
||||||
return errVisitorLimitReached
|
return errVisitorLimitReached
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -71,7 +71,7 @@ func (v *visitor) SubscriptionAllowed() error {
|
||||||
func (v *visitor) RemoveSubscription() {
|
func (v *visitor) RemoveSubscription() {
|
||||||
v.mu.Lock()
|
v.mu.Lock()
|
||||||
defer v.mu.Unlock()
|
defer v.mu.Unlock()
|
||||||
v.subscriptions.Sub(1)
|
v.subscriptions.Allow(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *visitor) Keepalive() {
|
func (v *visitor) Keepalive() {
|
||||||
|
|
|
@ -2,31 +2,39 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrLimitReached is the error returned by the Limiter and LimitWriter when the predefined limit has been reached
|
// ErrLimitReached is the error returned by the Limiter and LimitWriter when the predefined limit has been reached
|
||||||
var ErrLimitReached = errors.New("limit reached")
|
var ErrLimitReached = errors.New("limit reached")
|
||||||
|
|
||||||
// Limiter is a helper that allows adding values up to a well-defined limit. Once the limit is reached
|
// Limiter is an interface that implements a rate limiting mechanism, e.g. based on time or a fixed value
|
||||||
// ErrLimitReached will be returned. Limiter may be used by multiple goroutines.
|
type Limiter interface {
|
||||||
type Limiter struct {
|
// Allow adds n to the limiters internal value, or returns ErrLimitReached if the limit has been reached
|
||||||
|
Allow(n int64) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixedLimiter is a helper that allows adding values up to a well-defined limit. Once the limit is reached
|
||||||
|
// ErrLimitReached will be returned. FixedLimiter may be used by multiple goroutines.
|
||||||
|
type FixedLimiter struct {
|
||||||
value int64
|
value int64
|
||||||
limit int64
|
limit int64
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLimiter creates a new Limiter
|
// NewFixedLimiter creates a new Limiter
|
||||||
func NewLimiter(limit int64) *Limiter {
|
func NewFixedLimiter(limit int64) *FixedLimiter {
|
||||||
return &Limiter{
|
return &FixedLimiter{
|
||||||
limit: limit,
|
limit: limit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds n to the limiters internal value, but only if the limit has not been reached. If the limit was
|
// Allow adds n to the limiters internal value, but only if the limit has not been reached. If the limit was
|
||||||
// exceeded after adding n, ErrLimitReached is returned.
|
// exceeded after adding n, ErrLimitReached is returned.
|
||||||
func (l *Limiter) Add(n int64) error {
|
func (l *FixedLimiter) Allow(n int64) error {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
if l.value+n > l.limit {
|
if l.value+n > l.limit {
|
||||||
|
@ -36,29 +44,34 @@ func (l *Limiter) Add(n int64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sub subtracts a value from the limiters internal value
|
// RateLimiter is a Limiter that wraps a rate.Limiter, allowing a floating time-based limit.
|
||||||
func (l *Limiter) Sub(n int64) {
|
type RateLimiter struct {
|
||||||
l.Add(-n)
|
limiter *rate.Limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets the value of the limiter to n. This function ignores the limit. It is meant to set the value
|
// NewRateLimiter creates a new RateLimiter
|
||||||
// based on reality.
|
func NewRateLimiter(r rate.Limit, b int) *RateLimiter {
|
||||||
func (l *Limiter) Set(n int64) {
|
return &RateLimiter{
|
||||||
l.mu.Lock()
|
limiter: rate.NewLimiter(r, b),
|
||||||
l.value = n
|
}
|
||||||
l.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Value returns the internal value of the limiter
|
// NewBytesLimiter creates a RateLimiter that is meant to be used for a bytes-per-interval limit,
|
||||||
func (l *Limiter) Value() int64 {
|
// e.g. 250 MB per day. And example of the underlying idea can be found here: https://go.dev/play/p/0ljgzIZQ6dJ
|
||||||
l.mu.Lock()
|
func NewBytesLimiter(bytes int, interval time.Duration) *RateLimiter {
|
||||||
defer l.mu.Unlock()
|
return NewRateLimiter(rate.Limit(bytes)*rate.Every(interval), bytes)
|
||||||
return l.value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Limit returns the defined limit
|
// Allow adds n to the limiters internal value, but only if the limit has not been reached. If the limit was
|
||||||
func (l *Limiter) Limit() int64 {
|
// exceeded after adding n, ErrLimitReached is returned.
|
||||||
return l.limit
|
func (l *RateLimiter) Allow(n int64) error {
|
||||||
|
if n <= 0 {
|
||||||
|
return nil // No-op. Can't take back bytes you're written!
|
||||||
|
}
|
||||||
|
if !l.limiter.AllowN(time.Now(), int(n)) {
|
||||||
|
return ErrLimitReached
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LimitWriter implements an io.Writer that will pass through all Write calls to the underlying
|
// LimitWriter implements an io.Writer that will pass through all Write calls to the underlying
|
||||||
|
@ -67,12 +80,12 @@ func (l *Limiter) Limit() int64 {
|
||||||
type LimitWriter struct {
|
type LimitWriter struct {
|
||||||
w io.Writer
|
w io.Writer
|
||||||
written int64
|
written int64
|
||||||
limiters []*Limiter
|
limiters []Limiter
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLimitWriter creates a new LimitWriter
|
// NewLimitWriter creates a new LimitWriter
|
||||||
func NewLimitWriter(w io.Writer, limiters ...*Limiter) *LimitWriter {
|
func NewLimitWriter(w io.Writer, limiters ...Limiter) *LimitWriter {
|
||||||
return &LimitWriter{
|
return &LimitWriter{
|
||||||
w: w,
|
w: w,
|
||||||
limiters: limiters,
|
limiters: limiters,
|
||||||
|
@ -84,9 +97,9 @@ func (w *LimitWriter) Write(p []byte) (n int, err error) {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
for i := 0; i < len(w.limiters); i++ {
|
for i := 0; i < len(w.limiters); i++ {
|
||||||
if err := w.limiters[i].Add(int64(len(p))); err != nil {
|
if err := w.limiters[i].Allow(int64(len(p))); err != nil {
|
||||||
for j := i - 1; j >= 0; j-- {
|
for j := i - 1; j >= 0; j-- {
|
||||||
w.limiters[j].Sub(int64(len(p)))
|
w.limiters[j].Allow(-int64(len(p))) // Revert limiters limits if allowed
|
||||||
}
|
}
|
||||||
return 0, ErrLimitReached
|
return 0, ErrLimitReached
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,34 +2,51 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLimiter_Add(t *testing.T) {
|
func TestFixedLimiter_Add(t *testing.T) {
|
||||||
l := NewLimiter(10)
|
l := NewFixedLimiter(10)
|
||||||
if err := l.Add(5); err != nil {
|
if err := l.Allow(5); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := l.Add(5); err != nil {
|
if err := l.Allow(5); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := l.Add(5); err != ErrLimitReached {
|
if err := l.Allow(5); err != ErrLimitReached {
|
||||||
t.Fatalf("expected ErrLimitReached, got %#v", err)
|
t.Fatalf("expected ErrLimitReached, got %#v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLimiter_AddSet(t *testing.T) {
|
func TestFixedLimiter_AddSub(t *testing.T) {
|
||||||
l := NewLimiter(10)
|
l := NewFixedLimiter(10)
|
||||||
l.Add(5)
|
l.Allow(5)
|
||||||
if l.Value() != 5 {
|
if l.value != 5 {
|
||||||
t.Fatalf("expected value to be %d, got %d", 5, l.Value())
|
t.Fatalf("expected value to be %d, got %d", 5, l.value)
|
||||||
}
|
}
|
||||||
l.Set(7)
|
l.Allow(-2)
|
||||||
if l.Value() != 7 {
|
if l.value != 3 {
|
||||||
t.Fatalf("expected value to be %d, got %d", 7, l.Value())
|
t.Fatalf("expected value to be %d, got %d", 7, l.value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBytesLimiter_Add_Simple(t *testing.T) {
|
||||||
|
l := NewBytesLimiter(250*1024*1024, 24*time.Hour) // 250 MB per 24h
|
||||||
|
require.Nil(t, l.Allow(100*1024*1024))
|
||||||
|
require.Nil(t, l.Allow(100*1024*1024))
|
||||||
|
require.Equal(t, ErrLimitReached, l.Allow(300*1024*1024))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesLimiter_Add_Wait(t *testing.T) {
|
||||||
|
l := NewBytesLimiter(250*1024*1024, 24*time.Hour) // 250 MB per 24h (~ 303 bytes per 100ms)
|
||||||
|
require.Nil(t, l.Allow(250*1024*1024))
|
||||||
|
require.Equal(t, ErrLimitReached, l.Allow(400))
|
||||||
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
require.Nil(t, l.Allow(400))
|
||||||
|
}
|
||||||
|
|
||||||
func TestLimitWriter_WriteNoLimiter(t *testing.T) {
|
func TestLimitWriter_WriteNoLimiter(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
lw := NewLimitWriter(&buf)
|
lw := NewLimitWriter(&buf)
|
||||||
|
@ -46,7 +63,7 @@ func TestLimitWriter_WriteNoLimiter(t *testing.T) {
|
||||||
|
|
||||||
func TestLimitWriter_WriteOneLimiter(t *testing.T) {
|
func TestLimitWriter_WriteOneLimiter(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
l := NewLimiter(10)
|
l := NewFixedLimiter(10)
|
||||||
lw := NewLimitWriter(&buf, l)
|
lw := NewLimitWriter(&buf, l)
|
||||||
if _, err := lw.Write(make([]byte, 10)); err != nil {
|
if _, err := lw.Write(make([]byte, 10)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -57,15 +74,15 @@ func TestLimitWriter_WriteOneLimiter(t *testing.T) {
|
||||||
if buf.Len() != 10 {
|
if buf.Len() != 10 {
|
||||||
t.Fatalf("expected buffer length to be %d, got %d", 10, buf.Len())
|
t.Fatalf("expected buffer length to be %d, got %d", 10, buf.Len())
|
||||||
}
|
}
|
||||||
if l.Value() != 10 {
|
if l.value != 10 {
|
||||||
t.Fatalf("expected limiter value to be %d, got %d", 10, l.Value())
|
t.Fatalf("expected limiter value to be %d, got %d", 10, l.value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLimitWriter_WriteTwoLimiters(t *testing.T) {
|
func TestLimitWriter_WriteTwoLimiters(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
l1 := NewLimiter(11)
|
l1 := NewFixedLimiter(11)
|
||||||
l2 := NewLimiter(9)
|
l2 := NewFixedLimiter(9)
|
||||||
lw := NewLimitWriter(&buf, l1, l2)
|
lw := NewLimitWriter(&buf, l1, l2)
|
||||||
if _, err := lw.Write(make([]byte, 8)); err != nil {
|
if _, err := lw.Write(make([]byte, 8)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -76,10 +93,47 @@ func TestLimitWriter_WriteTwoLimiters(t *testing.T) {
|
||||||
if buf.Len() != 8 {
|
if buf.Len() != 8 {
|
||||||
t.Fatalf("expected buffer length to be %d, got %d", 8, buf.Len())
|
t.Fatalf("expected buffer length to be %d, got %d", 8, buf.Len())
|
||||||
}
|
}
|
||||||
if l1.Value() != 8 {
|
if l1.value != 8 {
|
||||||
t.Fatalf("expected limiter 1 value to be %d, got %d", 8, l1.Value())
|
t.Fatalf("expected limiter 1 value to be %d, got %d", 8, l1.value)
|
||||||
}
|
}
|
||||||
if l2.Value() != 8 {
|
if l2.value != 8 {
|
||||||
t.Fatalf("expected limiter 2 value to be %d, got %d", 8, l2.Value())
|
t.Fatalf("expected limiter 2 value to be %d, got %d", 8, l2.value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLimitWriter_WriteTwoDifferentLimiters(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
l1 := NewFixedLimiter(32)
|
||||||
|
l2 := NewBytesLimiter(8, 200*time.Millisecond)
|
||||||
|
lw := NewLimitWriter(&buf, l1, l2)
|
||||||
|
_, err := lw.Write(make([]byte, 8))
|
||||||
|
require.Nil(t, err)
|
||||||
|
_, err = lw.Write(make([]byte, 4))
|
||||||
|
require.Equal(t, ErrLimitReached, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLimitWriter_WriteTwoDifferentLimiters_Wait(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
l1 := NewFixedLimiter(32)
|
||||||
|
l2 := NewBytesLimiter(8, 200*time.Millisecond)
|
||||||
|
lw := NewLimitWriter(&buf, l1, l2)
|
||||||
|
_, err := lw.Write(make([]byte, 8))
|
||||||
|
require.Nil(t, err)
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
_, err = lw.Write(make([]byte, 8))
|
||||||
|
require.Nil(t, err)
|
||||||
|
_, err = lw.Write(make([]byte, 4))
|
||||||
|
require.Equal(t, ErrLimitReached, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLimitWriter_WriteTwoDifferentLimiters_Wait_FixedLimiterFail(t *testing.T) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
l1 := NewFixedLimiter(11) // <<< This fails below
|
||||||
|
l2 := NewBytesLimiter(8, 200*time.Millisecond)
|
||||||
|
lw := NewLimitWriter(&buf, l1, l2)
|
||||||
|
_, err := lw.Write(make([]byte, 8))
|
||||||
|
require.Nil(t, err)
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
|
_, err = lw.Write(make([]byte, 8)) // <<< FixedLimiter fails
|
||||||
|
require.Equal(t, ErrLimitReached, err)
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue