fix: update readwise example to work without duplicating nodes

this is a workaround to the fact that readwise use cursor pagination
instead of offset pagination, proper handling will be added later
This commit is contained in:
Alexander Navarro 2024-12-02 09:34:12 -03:00
parent 92c9814e2a
commit 8f424d45f7
8 changed files with 42 additions and 9 deletions

View file

@ -22,6 +22,7 @@ type ReadwiseCursor struct {
}
type ReadwiseApiResponse struct {
Detail string `json:detail`
Count uint64 `json:"count"`
NextPageCursor string `json:"nextPageCursor"`
Results []ReadwiseDocument `json:"results"`
@ -70,6 +71,7 @@ func getReadwiseDocuments(
var documents []*synchronizator.Node
params := url.Values{}
params.Add("withHtmlContent", "true")
if cursor.Cursor != "" {
params.Add("pageCursor", cursor.Cursor)
}
@ -99,6 +101,14 @@ func getReadwiseDocuments(
return payload, err
}
if resp.StatusCode > 201 {
return payload, fmt.Errorf(
"Request failed with status %v: %v",
resp.StatusCode,
data.Detail,
)
}
var rawData RawReadwiseApiResponse
err = json.Unmarshal(body, &rawData)
if err != nil {
@ -164,14 +174,15 @@ func main() {
}
pagination := synchronizator.StartPagination
pagination.Pages = 0
pagination.Pages = 1
pagination.Offset = 100
pagination.Total = 100
pagination.Limit = 100
pool_config := &synchronizator.WorkConfig{
AmountOfWorkers: 5,
MaxRetries: 1,
BaseRetryTime: time.Second * 2,
RateLimit: synchronizator.NewRateLimiter(20, time.Minute),
MaxRetries: 2,
BaseRetryTime: time.Second * 30,
RateLimit: synchronizator.NewRateLimiter(10, time.Minute),
Timeout: time.Second * 2,
}