mirror of
https://github.com/geode-sdk/geode.git
synced 2024-11-26 01:18:01 -05:00
Split update checks into multiple batches (#1066)
* skip check with empty mod list * initial batching implementation * remove header that i don't use * use vector insert instead of copy
This commit is contained in:
parent
05c88ea606
commit
f6ed9c8f70
2 changed files with 85 additions and 14 deletions
|
@ -789,24 +789,14 @@ ServerRequest<std::optional<ServerModUpdate>> server::checkUpdates(Mod const* mo
|
|||
);
|
||||
}
|
||||
|
||||
ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCache) {
|
||||
if (useCache) {
|
||||
return getCache<checkAllUpdates>().get();
|
||||
}
|
||||
|
||||
auto modIDs = ranges::map<std::vector<std::string>>(
|
||||
Loader::get()->getAllMods(),
|
||||
[](auto mod) { return mod->getID(); }
|
||||
);
|
||||
|
||||
ServerRequest<std::vector<ServerModUpdate>> server::batchedCheckUpdates(std::vector<std::string> const& batch) {
|
||||
auto req = web::WebRequest();
|
||||
req.userAgent(getServerUserAgent());
|
||||
req.param("platform", GEODE_PLATFORM_SHORT_IDENTIFIER);
|
||||
req.param("gd", GEODE_GD_VERSION_STR);
|
||||
req.param("geode", Loader::get()->getVersion().toNonVString());
|
||||
if (modIDs.size()) {
|
||||
req.param("ids", ranges::join(modIDs, ";"));
|
||||
}
|
||||
|
||||
req.param("ids", ranges::join(batch, ";"));
|
||||
return req.get(formatServerURL("/mods/updates")).map(
|
||||
[](web::WebResponse* response) -> Result<std::vector<ServerModUpdate>, ServerError> {
|
||||
if (response->ok()) {
|
||||
|
@ -830,6 +820,79 @@ ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCach
|
|||
);
|
||||
}
|
||||
|
||||
void server::queueBatches(
|
||||
ServerRequest<std::vector<ServerModUpdate>>::PostResult const resolve,
|
||||
std::shared_ptr<std::vector<std::vector<std::string>>> const batches,
|
||||
std::shared_ptr<std::vector<ServerModUpdate>> accum
|
||||
) {
|
||||
// we have to do the copy here, or else our values die
|
||||
batchedCheckUpdates(batches->back()).listen([resolve, batches, accum](auto result) {
|
||||
if (result->ok()) {
|
||||
auto serverValues = result->unwrap();
|
||||
|
||||
accum->reserve(accum->size() + serverValues.size());
|
||||
accum->insert(accum->end(), serverValues.begin(), serverValues.end());
|
||||
|
||||
if (batches->size() > 1) {
|
||||
batches->pop_back();
|
||||
queueBatches(resolve, batches, accum);
|
||||
}
|
||||
else {
|
||||
resolve(Ok(*accum));
|
||||
}
|
||||
}
|
||||
else {
|
||||
resolve(*result);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCache) {
|
||||
if (useCache) {
|
||||
return getCache<checkAllUpdates>().get();
|
||||
}
|
||||
|
||||
auto modIDs = ranges::map<std::vector<std::string>>(
|
||||
Loader::get()->getAllMods(),
|
||||
[](auto mod) { return mod->getID(); }
|
||||
);
|
||||
|
||||
// if there's no mods, the request would just be empty anyways
|
||||
if (modIDs.empty()) {
|
||||
// you would think it could infer like literally anything
|
||||
return ServerRequest<std::vector<ServerModUpdate>>::immediate(
|
||||
Ok<std::vector<ServerModUpdate>>({})
|
||||
);
|
||||
}
|
||||
|
||||
auto modBatches = std::make_shared<std::vector<std::vector<std::string>>>();
|
||||
auto modCount = modIDs.size();
|
||||
std::size_t maxMods = 200u; // this affects 0.03% of users
|
||||
|
||||
if (modCount <= maxMods) {
|
||||
// no tricks needed
|
||||
return batchedCheckUpdates(modIDs);
|
||||
}
|
||||
|
||||
// even out the mod count, so a request with 230 mods sends two 115 mod requests
|
||||
auto batchCount = modCount / maxMods + 1;
|
||||
auto maxBatchSize = modCount / batchCount + 1;
|
||||
|
||||
for (std::size_t i = 0u; i < modCount; i += maxBatchSize) {
|
||||
auto end = std::min(modCount, i + maxBatchSize);
|
||||
modBatches->emplace_back(modIDs.begin() + i, modIDs.begin() + end);
|
||||
}
|
||||
|
||||
// chain requests to avoid doing too many large requests at once
|
||||
return ServerRequest<std::vector<ServerModUpdate>>::runWithCallback(
|
||||
[modBatches](auto finish, auto progress, auto hasBeenCancelled) {
|
||||
auto accum = std::make_shared<std::vector<ServerModUpdate>>();
|
||||
queueBatches(finish, modBatches, accum);
|
||||
},
|
||||
"Mod Update Check"
|
||||
);
|
||||
}
|
||||
|
||||
void server::clearServerCaches(bool clearGlobalCaches) {
|
||||
getCache<&getMods>().clear();
|
||||
getCache<&getMod>().clear();
|
||||
|
|
|
@ -151,6 +151,14 @@ namespace server {
|
|||
ServerRequest<std::unordered_set<std::string>> getTags(bool useCache = true);
|
||||
|
||||
ServerRequest<std::optional<ServerModUpdate>> checkUpdates(Mod const* mod);
|
||||
|
||||
ServerRequest<std::vector<ServerModUpdate>> batchedCheckUpdates(std::vector<std::string> const& batch);
|
||||
void queueBatches(
|
||||
ServerRequest<std::vector<ServerModUpdate>>::PostResult const finish,
|
||||
std::shared_ptr<std::vector<std::vector<std::string>>> const batches,
|
||||
std::shared_ptr<std::vector<ServerModUpdate>> const accum
|
||||
);
|
||||
|
||||
ServerRequest<std::vector<ServerModUpdate>> checkAllUpdates(bool useCache = true);
|
||||
|
||||
void clearServerCaches(bool clearGlobalCaches = false);
|
||||
|
|
Loading…
Reference in a new issue