refactor: add separation of concerns in fetcher and platform

This commit is contained in:
Alexander Navarro 2024-11-30 18:14:04 -03:00
parent 96af51ee68
commit 4094f71a7d
5 changed files with 227 additions and 145 deletions

View file

@ -4,113 +4,41 @@ import (
"context"
"fmt"
"slices"
"time"
)
// Utility struct to represent a collection of nodes, it's a [Node] itself so all
// the node's functionality is available.
// Platform represents a collection of nodes. It embeds a Node, so all the
// node's functionality is available.
type Platform struct {
Node // Underlaying node info
Node // Underlying node info
Collections []*Collection // Child nodes
}
// Is a type alias for FetchResponse containing a slice of Collection pointers.
type FetchCollectionResponse = FetchResponse[[]*Collection]
// Fetches collections using the provided fetcher and pagination settings.
// It updates the platform's collections and creates relationships between the platform and the collections.
//
// Parameters:
// - ctx: The context to control cancellation.
// - fetcher: The fetcher function to execute the work.
// - start_pagination: The initial pagination settings.
// - pool_config: The configuration for the worker pool.
//
// Returns:
// - error: The error if any occurred.
func (platform *Platform) FetchCollections(
ctx context.Context,
fetcher Work[Pagination, FetchCollectionResponse],
start_pagination Pagination,
startPagination Pagination,
poolConfig *WorkConfig,
) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
config := &WorkConfig{
amount_of_workers: 5,
max_retries: 2,
base_retry_time: time.Second,
rate_limit: NewRateLimiter(5, time.Minute),
timeout: time.Second * 2,
}
tasks := make(chan Pagination)
results, errors, done := asyncTaskRunner(
ctx,
tasks,
config,
fetcher,
)
var current_page uint64 = 0
if start_pagination.Pages == 0 {
// do the first fetch
tasks <- start_pagination
select {
case response, ok := <-results:
if !ok {
break
}
platform.Collections = slices.Concat(platform.Collections, response.Response)
pages, err := calculatePages(&response.Pagination, start_pagination.Offset)
if err != nil {
return err
}
start_pagination.Pages = pages
start_pagination.Total = response.Pagination.Total
current_page++
case error, ok := <-errors:
if !ok {
return fmt.Errorf("Could not do first fetch to calculate pages: %v\n", error)
}
case <-ctx.Done():
break
case <-done:
break
}
}
page_offset, err := getPageByOffset(&start_pagination)
values, err := fetchWithPagination(ctx, poolConfig, fetcher, startPagination)
if err != nil {
return err
}
current_page += page_offset
fmt.Printf("Total pages: %v, Current page: %v\n", start_pagination.Pages, current_page)
for current_page <= start_pagination.Pages {
page := start_pagination
page.Offset = current_page * page.Limit
tasks <- page
current_page++
}
close(tasks)
loop:
for {
select {
case response, ok := <-results:
if !ok {
continue
}
platform.Collections = slices.Concat(platform.Collections, response.Response)
case error, ok := <-errors:
if !ok {
continue
}
fmt.Printf("There was an error: %v\n", error)
case <-ctx.Done():
break loop
case <-done:
break loop
}
}
platform.Collections = slices.Concat(platform.Collections, values)
fmt.Printf("Collections: %v\n", len(platform.Collections))