Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ Most browsers open a single connection for a download. Surge opens multiple (up
- **Multiple Mirrors:** Download from multiple sources simultaneously. Surge distributes workers across all available mirrors and automatically handles failover.
- **Sequential Download:** Option to download files in strict order (Streaming Mode). Ideal for media files that you want to preview while downloading.
- **Daemon Architecture:** Surge runs a single background "engine." You can open 10 different terminal tabs and queue downloads; they all funnel into one efficient manager.
- **Rate Limiting:** Control your bandwidth usage globally or strictly on a per-download basis.
- **Beautiful TUI:** Built with Bubble Tea & Lipgloss, it looks good while it works.

For a deep dive into how we make downloads faster (like work stealing and slow worker handling), check out our **[Optimization Guide](docs/OPTIMIZATIONS.md)**.
Expand Down
7 changes: 7 additions & 0 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,13 @@ func ensureGlobalLocalServiceAndLifecycle() error {
Cancel: GlobalPool.Cancel,
UpdateURL: GlobalPool.UpdateURL,
PublishEvent: localService.Publish,
UpdateActiveRates: func(rate int64) {
for _, cfg := range GlobalPool.GetAll() {
if cfg.State != nil && cfg.State.Limiter != nil {
cfg.State.Limiter.SetRate(rate)
}
}
},
})

localService.SetLifecycleHooks(core.LifecycleHooks{
Expand Down
2 changes: 2 additions & 0 deletions docs/SETTINGS.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ Surge follows OS conventions for storing its files. Below is a breakdown of ever
| `sequential_download` | bool | Download file pieces in strict order (Streaming Mode). Useful for previewing media but may be slower. | `false` |
| `min_chunk_size` | int64 | Minimum size of a download chunk in bytes (e.g., `2097152` for 2MB). | `2MB` |
| `worker_buffer_size` | int | I/O buffer size per worker in bytes (e.g., `524288` for 512KB). | `512KB` |
| `global_rate_limit` | int64 | Maximum total combined download speed in KB/s. `0` for unlimited. | `0` |
| `per_task_rate_limit` | int64 | Maximum speed for each individual download in KB/s. `0` for unlimited. | `0` |

### Performance Settings

Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ require (
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
github.com/vfaronov/httpheader v0.1.0
golang.org/x/time v0.15.0
modernc.org/sqlite v1.48.1
)

Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U=
golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno=
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
Expand Down
8 changes: 8 additions & 0 deletions internal/config/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ type NetworkSettings struct {
SequentialDownload bool `json:"sequential_download" ui_label:"Sequential Download" ui_desc:"Download pieces in order (Streaming Mode). May be slower."`
MinChunkSize int64 `json:"min_chunk_size" ui_label:"Min Chunk Size" ui_desc:"Minimum download chunk size in MB (e.g., 2)."`
WorkerBufferSize int `json:"worker_buffer_size" ui_label:"Worker Buffer Size" ui_desc:"I/O buffer size per worker in KB (e.g., 512)."`
GlobalRateLimit int64 `json:"global_rate_limit" ui_label:"Global Rate Limit (KB/s)" ui_desc:"Maximum total combined download speed in KB/s. 0 for unlimited."`
PerTaskRateLimit int64 `json:"per_task_rate_limit" ui_label:"Per-Task Rate Limit (KB/s)" ui_desc:"Maximum speed for each individual download in KB/s. 0 for unlimited."`
}

// PerformanceSettings contains performance tuning parameters.
Expand Down Expand Up @@ -218,6 +220,8 @@ func DefaultSettings() *Settings {
SequentialDownload: false,
MinChunkSize: 2 * MB,
WorkerBufferSize: 512 * KB,
GlobalRateLimit: 0,
PerTaskRateLimit: 0,
},
Performance: PerformanceSettings{
MaxTaskRetries: 3,
Expand Down Expand Up @@ -301,6 +305,8 @@ type RuntimeConfig struct {
SequentialDownload bool
MinChunkSize int64
WorkerBufferSize int
GlobalRateLimit int64
PerTaskRateLimit int64
MaxTaskRetries int
SlowWorkerThreshold float64
SlowWorkerGracePeriod time.Duration
Expand All @@ -319,6 +325,8 @@ func (s *Settings) ToRuntimeConfig() *RuntimeConfig {
SequentialDownload: s.Network.SequentialDownload,
MinChunkSize: s.Network.MinChunkSize,
WorkerBufferSize: s.Network.WorkerBufferSize,
GlobalRateLimit: s.Network.GlobalRateLimit,
PerTaskRateLimit: s.Network.PerTaskRateLimit,
MaxTaskRetries: s.Performance.MaxTaskRetries,
SlowWorkerThreshold: s.Performance.SlowWorkerThreshold,
SlowWorkerGracePeriod: s.Performance.SlowWorkerGracePeriod,
Expand Down
14 changes: 14 additions & 0 deletions internal/core/local_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,19 @@ func (s *LocalDownloadService) ReloadSettings() error {
s.settingsMu.Lock()
s.settings = settings
s.settingsMu.Unlock()

// Update global rate limiter
utils.GlobalRateLimiter.SetRate(settings.Network.GlobalRateLimit * 1024)

// Update active per-task limiters
if s.Pool != nil {
rate := settings.Network.PerTaskRateLimit * 1024
for _, cfg := range s.Pool.GetAll() {
if cfg.State != nil && cfg.State.Limiter != nil {
cfg.State.Limiter.SetRate(rate)
}
}
}
return nil
}

Expand Down Expand Up @@ -499,6 +512,7 @@ func (s *LocalDownloadService) add(url string, path string, filename string, mir

state := types.NewProgressState(id, 0)
state.DestPath = filepath.Join(outPath, filename) // Best guess until download starts
state.Limiter = utils.NewTokenBucket(settings.Network.PerTaskRateLimit * 1024)

cfg := types.DownloadConfig{
URL: url,
Expand Down
99 changes: 99 additions & 0 deletions internal/engine/concurrent/ratelimit_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
package concurrent

import (
"context"
"os"
"path/filepath"
"testing"
"time"

"github.com/SurgeDM/Surge/internal/engine/types"
"github.com/SurgeDM/Surge/internal/testutil"
"github.com/SurgeDM/Surge/internal/utils"
)

func TestConcurrentDownloader_GlobalRateLimit(t *testing.T) {
tmpDir, cleanup, _ := testutil.TempDir("surge-ratelimit-conc")
defer cleanup()

fileSize := int64(128 * 1024) // 128KB
server := testutil.NewMockServerT(t,
testutil.WithFileSize(fileSize),
testutil.WithRangeSupport(true),
)
defer server.Close()

destPath := filepath.Join(tmpDir, "ratelimit_conc.bin")
state := types.NewProgressState("ratelimit-conc", fileSize)
runtime := &types.RuntimeConfig{
MaxConnectionsPerHost: 2,
MinChunkSize: 32 * 1024,
}

downloader := NewConcurrentDownloader("ratelimit-id", nil, state, runtime)

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

// Apply global rate limit of 64KB/s.
// First 64KB is instant (burst), next 64KB takes ~1s.
utils.GlobalRateLimiter.SetRate(64 * 1024)
defer utils.GlobalRateLimiter.SetRate(0)

if f, err := os.Create(destPath + ".surge"); err == nil {
_ = f.Close()
}

start := time.Now()
err := downloader.Download(ctx, server.URL(), nil, nil, destPath, fileSize)
if err != nil {
t.Fatalf("Download failed: %v", err)
}

elapsed := time.Since(start)
if elapsed < 800*time.Millisecond {
t.Errorf("Download completed too fast (%v), global rate limit not applied", elapsed)
}
}

func TestConcurrentDownloader_PerTaskRateLimit(t *testing.T) {
tmpDir, cleanup, _ := testutil.TempDir("surge-ratelimit-conc-task")
defer cleanup()

fileSize := int64(128 * 1024) // 128KB
server := testutil.NewMockServerT(t,
testutil.WithFileSize(fileSize),
testutil.WithRangeSupport(true),
)
defer server.Close()

destPath := filepath.Join(tmpDir, "ratelimit_conc_task.bin")
state := types.NewProgressState("ratelimit-conc-task", fileSize)
// Apply per-task rate limit of 64KB/s
state.Limiter = utils.NewTokenBucket(64 * 1024)

runtime := &types.RuntimeConfig{
MaxConnectionsPerHost: 2,
MinChunkSize: 32 * 1024,
}

downloader := NewConcurrentDownloader("ratelimit-task-id", nil, state, runtime)

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

if f, err := os.Create(destPath + ".surge"); err == nil {
_ = f.Close()
}

start := time.Now()
err := downloader.Download(ctx, server.URL(), nil, nil, destPath, fileSize)
if err != nil {
t.Fatalf("Download failed: %v", err)
}

elapsed := time.Since(start)
if elapsed < 800*time.Millisecond {
t.Errorf("Download completed too fast (%v), per task rate limit not applied", elapsed)
}
}
13 changes: 13 additions & 0 deletions internal/engine/concurrent/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,19 @@ func (d *ConcurrentDownloader) downloadTask(ctx context.Context, rawurl string,
for readSoFar < int(readSize) {
n, err := resp.Body.Read(buf[readSoFar:readSize])
if n > 0 {
// Apply per-task rate limit
if d.State != nil && d.State.Limiter != nil {
if limiterErr := d.State.Limiter.WaitN(ctx, n); limiterErr != nil {
readErr = limiterErr
break
}
}
// Apply global rate limit
if limiterErr := utils.GlobalRateLimiter.WaitN(ctx, n); limiterErr != nil {
readErr = limiterErr
break
}

readSoFar += n
// CONTINUOUS HEALTH KEEPALIVE:
// Update LastActivity directly off the TCP socket instead of waiting for the buffer
Expand Down
25 changes: 18 additions & 7 deletions internal/engine/single/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,16 +188,13 @@ func (d *SingleDownloader) Download(ctx context.Context, rawurl, destPath string

start := time.Now()
var written int64

bufPtr := bufPool.Get().(*[]byte)
buf := *bufPtr
defer bufPool.Put(bufPtr)

if d.State == nil {
written, err = io.CopyBuffer(outFile, resp.Body, buf)
} else {
progressReader := newProgressReader(resp.Body, d.State, types.WorkerBatchSize, types.WorkerBatchInterval)
written, err = io.CopyBuffer(outFile, progressReader, buf)
progressReader := newProgressReader(ctx, resp.Body, d.State, types.WorkerBatchSize, types.WorkerBatchInterval)
written, err = io.CopyBuffer(outFile, progressReader, buf)
if d.State != nil {
progressReader.Flush()
}
if err != nil {
Expand Down Expand Up @@ -237,6 +234,7 @@ func (d *SingleDownloader) Download(ctx context.Context, rawurl, destPath string
}

type progressReader struct {
ctx context.Context
reader io.Reader
state *types.ProgressState
batchSize int64
Expand All @@ -247,11 +245,12 @@ type progressReader struct {
readChecks uint8
}

func newProgressReader(reader io.Reader, state *types.ProgressState, batchSize int64, batchInterval time.Duration) *progressReader {
func newProgressReader(ctx context.Context, reader io.Reader, state *types.ProgressState, batchSize int64, batchInterval time.Duration) *progressReader {
if batchSize <= 0 {
batchSize = types.WorkerBatchSize
}
return &progressReader{
ctx: ctx,
reader: reader,
state: state,
batchSize: batchSize,
Expand All @@ -262,6 +261,18 @@ func newProgressReader(reader io.Reader, state *types.ProgressState, batchSize i

func (w *progressReader) Read(p []byte) (int, error) {
n, err := w.reader.Read(p)

if n > 0 {
if w.state != nil && w.state.Limiter != nil {
if limiterErr := w.state.Limiter.WaitN(w.ctx, n); limiterErr != nil {
return n, limiterErr
}
}
if limiterErr := utils.GlobalRateLimiter.WaitN(w.ctx, n); limiterErr != nil {
return n, limiterErr
}
}

if n <= 0 || w.state == nil {
return n, err
}
Expand Down
92 changes: 92 additions & 0 deletions internal/engine/single/ratelimit_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package single

import (
"context"
"os"
"path/filepath"
"testing"
"time"

"github.com/SurgeDM/Surge/internal/engine/types"
"github.com/SurgeDM/Surge/internal/testutil"
"github.com/SurgeDM/Surge/internal/utils"
)

func TestSingleDownloader_GlobalRateLimit(t *testing.T) {
tmpDir, cleanup, _ := testutil.TempDir("surge-ratelimit-single")
defer cleanup()

fileSize := int64(128 * 1024) // 128KB
server := testutil.NewMockServerT(t,
testutil.WithFileSize(fileSize),
testutil.WithRangeSupport(false),
)
defer server.Close()

destPath := filepath.Join(tmpDir, "ratelimit_single.bin")
state := types.NewProgressState("ratelimit-single", fileSize)
runtime := &types.RuntimeConfig{}

downloader := NewSingleDownloader("ratelimit-id", nil, state, runtime)

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

// Apply global rate limit of 64KB/s.
utils.GlobalRateLimiter.SetRate(64 * 1024)
defer utils.GlobalRateLimiter.SetRate(0)

if f, err := os.Create(destPath + ".surge"); err == nil {
_ = f.Close()
}

start := time.Now()
err := downloader.Download(ctx, server.URL(), destPath, fileSize, "ratelimit.bin")
if err != nil {
t.Fatalf("Download failed: %v", err)
}

elapsed := time.Since(start)
if elapsed < 800*time.Millisecond {
t.Errorf("Download completed too fast (%v), global rate limit not applied", elapsed)
}
}

func TestSingleDownloader_PerTaskRateLimit(t *testing.T) {
tmpDir, cleanup, _ := testutil.TempDir("surge-ratelimit-single-task")
defer cleanup()

fileSize := int64(128 * 1024) // 128KB
server := testutil.NewMockServerT(t,
testutil.WithFileSize(fileSize),
testutil.WithRangeSupport(false),
)
defer server.Close()

destPath := filepath.Join(tmpDir, "ratelimit_single_task.bin")
state := types.NewProgressState("ratelimit-single-task", fileSize)
// Apply per-task rate limit of 64KB/s
state.Limiter = utils.NewTokenBucket(64 * 1024)

runtime := &types.RuntimeConfig{}

downloader := NewSingleDownloader("ratelimit-task-id", nil, state, runtime)

ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()

if f, err := os.Create(destPath + ".surge"); err == nil {
_ = f.Close()
}

start := time.Now()
err := downloader.Download(ctx, server.URL(), destPath, fileSize, "ratelimit.bin")
if err != nil {
t.Fatalf("Download failed: %v", err)
}

elapsed := time.Since(start)
if elapsed < 800*time.Millisecond {
t.Errorf("Download completed too fast (%v), per-task rate limit not applied", elapsed)
}
}
2 changes: 2 additions & 0 deletions internal/engine/types/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ type RuntimeConfig struct {
MinChunkSize int64

WorkerBufferSize int
GlobalRateLimit int64
PerTaskRateLimit int64
MaxTaskRetries int
SlowWorkerThreshold float64
SlowWorkerGracePeriod time.Duration
Expand Down
Loading
Loading