refactor: add separation of concerns in fetcher and platform

This commit is contained in:
Alexander Navarro 2024-11-30 18:14:04 -03:00
parent 96af51ee68
commit 4094f71a7d
5 changed files with 227 additions and 145 deletions

View file

@ -61,12 +61,12 @@ type Worker[T, S any] struct {
// - rate_limit: default is 10 requests per second
// - timeout: if 0, timeout is disabled
type WorkConfig struct {
tasks_processed sync.WaitGroup // Wait group to synchronize task completion.
amount_of_workers uint8 // Number of workers to spawn.
max_retries uint8 // Maximum number of retries for a task before beign cancelled.
base_retry_time time.Duration // Base factor to wait for before retrying a task.
rate_limit <-chan time.Time // Ticker to limit the amount of request. Is recomended to pass the result of calling NewRateLimiter().
timeout time.Duration // Maximum execution time allowed for a task before beign canceled.
TasksProcessed sync.WaitGroup // Wait group to synchronize task completion.
AmountOfWorkers uint8 // Number of workers to spawn.
MaxRetries uint8 // Maximum number of retries for a task before beign cancelled.
BaseRetryTime time.Duration // Base factor to wait for before retrying a task.
RateLimit <-chan time.Time // Ticker to limit the amount of request. Is recomended to pass the result of calling NewRateLimiter().
Timeout time.Duration // Maximum execution time allowed for a task before beign canceled.
}
// Group the channels used for task processing for easy access between functions.
@ -142,9 +142,9 @@ func handleFailedWorkUnit[T, S any](
config *WorkConfig,
) bool {
// If retries == 0, retries are disabled, return immediately
if config.max_retries == 0 || config.max_retries <= workUnit.attempts {
if config.MaxRetries == 0 || config.MaxRetries <= workUnit.attempts {
channels.tasks_failed <- workUnit.err
config.tasks_processed.Done()
config.TasksProcessed.Done()
return false
}
@ -152,7 +152,7 @@ func handleFailedWorkUnit[T, S any](
workUnit.err = nil
if workUnit.timeout == 0 {
workUnit.timeout = config.base_retry_time
workUnit.timeout = config.BaseRetryTime
} else {
workUnit.timeout *= 2
}
@ -200,7 +200,7 @@ func listenForWorkResults[T, S any](
// Send message to user
channels.tasks_done <- workUnit.result
config.tasks_processed.Done()
config.TasksProcessed.Done()
case <-ctx.Done():
return
}
@ -237,7 +237,7 @@ func workUnitDispatcher[T, S any](
attempts: 0,
}
channels.units_dispatcher <- workUnit
config.tasks_processed.Add(1)
config.TasksProcessed.Add(1)
case <-ctx.Done():
fmt.Println("context done")
@ -258,7 +258,7 @@ func stopProcessingWork[T, S any](
channels *Channels[T, S],
config *WorkConfig,
) {
config.tasks_processed.Wait()
config.TasksProcessed.Wait()
close(channels.units_receiver)
close(channels.units_dispatcher)
@ -289,20 +289,20 @@ func asyncTaskRunner[T, S any](
work Work[T, S],
) (<-chan S, <-chan error, <-chan struct{}) {
// Set default values for WorkConfig if not provided
if config.amount_of_workers == 0 {
config.amount_of_workers = 5
if config.AmountOfWorkers == 0 {
config.AmountOfWorkers = 5
}
if config.base_retry_time == 0 {
config.base_retry_time = 1 * time.Second
if config.BaseRetryTime == 0 {
config.BaseRetryTime = 1 * time.Second
}
if config.rate_limit == nil {
config.rate_limit = NewRateLimiter(10, time.Second)
if config.RateLimit == nil {
config.RateLimit = NewRateLimiter(10, time.Second)
}
// Ensure a clean wait group is ussed
config.tasks_processed = sync.WaitGroup{}
config.TasksProcessed = sync.WaitGroup{}
channel_size := config.amount_of_workers * 3
channel_size := config.AmountOfWorkers * 3
done, finish := context.WithCancel(ctx)
@ -315,13 +315,13 @@ func asyncTaskRunner[T, S any](
}
// create pool of workers
for i := range config.amount_of_workers {
for i := range config.AmountOfWorkers {
worker := &Worker[T, S]{
id: uint8(i),
receptor: channels.units_dispatcher,
transmitter: channels.units_receiver,
rate_limit: config.rate_limit,
timeout: config.timeout,
rate_limit: config.RateLimit,
timeout: config.Timeout,
work: work,
}