diff --git a/loader/src/server/Server.cpp b/loader/src/server/Server.cpp index de0fca5d..5fd6990e 100644 --- a/loader/src/server/Server.cpp +++ b/loader/src/server/Server.cpp @@ -789,24 +789,14 @@ ServerRequest> server::checkUpdates(Mod const* mo ); } -ServerRequest> server::checkAllUpdates(bool useCache) { - if (useCache) { - return getCache().get(); - } - - auto modIDs = ranges::map>( - Loader::get()->getAllMods(), - [](auto mod) { return mod->getID(); } - ); - +ServerRequest> server::batchedCheckUpdates(std::vector const& batch) { auto req = web::WebRequest(); req.userAgent(getServerUserAgent()); req.param("platform", GEODE_PLATFORM_SHORT_IDENTIFIER); req.param("gd", GEODE_GD_VERSION_STR); req.param("geode", Loader::get()->getVersion().toNonVString()); - if (modIDs.size()) { - req.param("ids", ranges::join(modIDs, ";")); - } + + req.param("ids", ranges::join(batch, ";")); return req.get(formatServerURL("/mods/updates")).map( [](web::WebResponse* response) -> Result, ServerError> { if (response->ok()) { @@ -830,6 +820,79 @@ ServerRequest> server::checkAllUpdates(bool useCach ); } +void server::queueBatches( + ServerRequest>::PostResult const resolve, + std::shared_ptr>> const batches, + std::shared_ptr> accum +) { + // we have to do the copy here, or else our values die + batchedCheckUpdates(batches->back()).listen([resolve, batches, accum](auto result) { + if (result->ok()) { + auto serverValues = result->unwrap(); + + accum->reserve(accum->size() + serverValues.size()); + accum->insert(accum->end(), serverValues.begin(), serverValues.end()); + + if (batches->size() > 1) { + batches->pop_back(); + queueBatches(resolve, batches, accum); + } + else { + resolve(Ok(*accum)); + } + } + else { + resolve(*result); + } + }); +} + +ServerRequest> server::checkAllUpdates(bool useCache) { + if (useCache) { + return getCache().get(); + } + + auto modIDs = ranges::map>( + Loader::get()->getAllMods(), + [](auto mod) { return mod->getID(); } + ); + + // if there's no mods, the request would just be empty anyways + if (modIDs.empty()) { + // you would think it could infer like literally anything + return ServerRequest>::immediate( + Ok>({}) + ); + } + + auto modBatches = std::make_shared>>(); + auto modCount = modIDs.size(); + std::size_t maxMods = 200u; // this affects 0.03% of users + + if (modCount <= maxMods) { + // no tricks needed + return batchedCheckUpdates(modIDs); + } + + // even out the mod count, so a request with 230 mods sends two 115 mod requests + auto batchCount = modCount / maxMods + 1; + auto maxBatchSize = modCount / batchCount + 1; + + for (std::size_t i = 0u; i < modCount; i += maxBatchSize) { + auto end = std::min(modCount, i + maxBatchSize); + modBatches->emplace_back(modIDs.begin() + i, modIDs.begin() + end); + } + + // chain requests to avoid doing too many large requests at once + return ServerRequest>::runWithCallback( + [modBatches](auto finish, auto progress, auto hasBeenCancelled) { + auto accum = std::make_shared>(); + queueBatches(finish, modBatches, accum); + }, + "Mod Update Check" + ); +} + void server::clearServerCaches(bool clearGlobalCaches) { getCache<&getMods>().clear(); getCache<&getMod>().clear(); diff --git a/loader/src/server/Server.hpp b/loader/src/server/Server.hpp index 908d4eab..34b8f336 100644 --- a/loader/src/server/Server.hpp +++ b/loader/src/server/Server.hpp @@ -151,7 +151,15 @@ namespace server { ServerRequest> getTags(bool useCache = true); ServerRequest> checkUpdates(Mod const* mod); + + ServerRequest> batchedCheckUpdates(std::vector const& batch); + void queueBatches( + ServerRequest>::PostResult const finish, + std::shared_ptr>> const batches, + std::shared_ptr> const accum + ); + ServerRequest> checkAllUpdates(bool useCache = true); - + void clearServerCaches(bool clearGlobalCaches = false); }