mirror of
https://github.com/geode-sdk/geode.git
synced 2025-03-19 17:39:50 -04:00
change cache design to be shared
This commit is contained in:
parent
b21fa5bc8a
commit
e9c17a7c3d
3 changed files with 111 additions and 60 deletions
loader/src
|
@ -99,76 +99,128 @@ namespace server {
|
|||
ServerPromise<ByteVector> getModLogo(std::string const& id);
|
||||
|
||||
// Caching for server endpoints
|
||||
namespace impl_cache {
|
||||
namespace detail {
|
||||
template <class R, class Q>
|
||||
struct ExtractServerReqParams {
|
||||
using Result = R;
|
||||
using Query = Q;
|
||||
ExtractServerReqParams(ServerPromise<R>(*)(Q const&)) {}
|
||||
};
|
||||
}
|
||||
|
||||
template <auto F>
|
||||
class ServerResultCache final {
|
||||
public:
|
||||
using Extract = decltype(ExtractServerReqParams(F));
|
||||
using Result = Extract::Result;
|
||||
using Query = Extract::Query;
|
||||
using Cached = std::variant<ServerPromise<Result>, Result>;
|
||||
template <auto F>
|
||||
class ServerResultCache final {
|
||||
public:
|
||||
using Extract = decltype(detail::ExtractServerReqParams(F));
|
||||
using Result = Extract::Result;
|
||||
using Query = Extract::Query;
|
||||
using Cached = std::variant<ServerPromise<Result>, Result>;
|
||||
|
||||
private:
|
||||
std::mutex m_cachedMutex;
|
||||
std::map<Query, Result> m_cached;
|
||||
|
||||
ServerPromise<Result> fetch(Query const& query) {
|
||||
return ServerPromise<Result>([this, query = Query(query)](auto resolve, auto reject, auto progress, auto cancelled) {
|
||||
F(Query(query))
|
||||
.then([this, resolve, query = std::move(query)](auto res) {
|
||||
std::unique_lock lock(m_cachedMutex);
|
||||
m_cached[std::move(query)] = res;
|
||||
lock.unlock();
|
||||
|
||||
resolve(res);
|
||||
})
|
||||
.expect([reject](auto err) {
|
||||
reject(err);
|
||||
})
|
||||
.progress([progress](auto prog) {
|
||||
progress(prog);
|
||||
})
|
||||
.link(cancelled);
|
||||
});
|
||||
}
|
||||
private:
|
||||
class Cache final {
|
||||
std::mutex m_mutex;
|
||||
// I know this looks like a goofy choice over just
|
||||
// `std::unordered_map`, but hear me out:
|
||||
//
|
||||
// This needs preserved insertion order (so shrinking the cache
|
||||
// to match size limits doesn't just have to erase random
|
||||
// elements)
|
||||
//
|
||||
// If this used a map for values and another vector for storing
|
||||
// insertion order, it would have a pretty big memory footprint
|
||||
// (two copies of Query, one for order, one for map + two heap
|
||||
// allocations on top of that)
|
||||
//
|
||||
// In addition, it would be a bad idea to have a cache of 1000s
|
||||
// of items in any case (since that would likely take up a ton
|
||||
// of memory, which we want to avoid since it's likely many
|
||||
// crashes with the old index were due to too much memory
|
||||
// usage)
|
||||
//
|
||||
// Linear searching a vector of at most a couple dozen items is
|
||||
// lightning-fast (🚀), and besides the main performance benefit
|
||||
// comes from the lack of a web request - not how many extra
|
||||
// milliseconds we can squeeze out of a map access
|
||||
std::vector<std::pair<Query, Result>> m_values;
|
||||
|
||||
public:
|
||||
ServerPromise<Result> get(Query const& query) {
|
||||
std::unique_lock lock(m_cachedMutex);
|
||||
// Return cached value if there is one and the query matches
|
||||
if (m_cached.contains(query)) {
|
||||
auto cached = m_cached.at(query);
|
||||
lock.unlock();
|
||||
return ServerPromise<Result>([cached = std::move(cached)](auto resolve, auto) {
|
||||
resolve(std::move(cached));
|
||||
});
|
||||
std::optional<Result> get(Query const& query) {
|
||||
std::unique_lock _(m_mutex);
|
||||
// mfw no std::optional::map
|
||||
if (auto found = ranges::find(m_values, [&query](auto const& q) { return q.first == query; })) {
|
||||
return found->second;
|
||||
}
|
||||
lock.unlock();
|
||||
return this->fetch(std::move(query));
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
ServerPromise<Result> refetch(Query const& query) {
|
||||
// Clear cache for this query only
|
||||
std::unique_lock lock (m_cachedMutex);
|
||||
m_cached.erase(query);
|
||||
lock.unlock();
|
||||
|
||||
// Fetch new value
|
||||
return this->fetch(std::move(query));
|
||||
void add(Query&& query, Result&& result) {
|
||||
std::unique_lock _(m_mutex);
|
||||
m_values.emplace_back(std::make_pair(std::move(query), std::move(result)));
|
||||
}
|
||||
|
||||
// Clear all caches
|
||||
void invalidateAll() {
|
||||
std::unique_lock _(m_cachedMutex);
|
||||
m_cached.clear();
|
||||
void remove(Query const& query) {
|
||||
std::unique_lock _(m_mutex);
|
||||
ranges::remove(m_values, [&query](auto const& q) { return q.first == query; });
|
||||
}
|
||||
void shrink(size_t size) {
|
||||
std::unique_lock _(m_mutex);
|
||||
size_t removeCount = size < m_values.size() ? m_values.size() - size : 0;
|
||||
m_values.erase(m_values.begin(), m_values.begin() + removeCount);
|
||||
}
|
||||
void clear() {
|
||||
std::unique_lock _(m_mutex);
|
||||
m_values.clear();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Cache m_cache;
|
||||
// todo: loader setting to customize this
|
||||
static constexpr size_t CACHE_SIZE_LIMIT = 20;
|
||||
|
||||
ServerPromise<Result> fetch(Query const& query) {
|
||||
return ServerPromise<Result>([this, query = Query(query)](auto resolve, auto reject, auto progress, auto cancelled) {
|
||||
F(Query(query))
|
||||
.then([this, resolve, query = std::move(query)](auto res) {
|
||||
m_cache.add(Query(query), Result(res));
|
||||
resolve(res);
|
||||
})
|
||||
.expect([reject](auto err) {
|
||||
reject(err);
|
||||
})
|
||||
.progress([progress](auto prog) {
|
||||
progress(prog);
|
||||
})
|
||||
.link(cancelled);
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
static ServerResultCache<F>& shared() {
|
||||
static auto inst = ServerResultCache<F>();
|
||||
return inst;
|
||||
}
|
||||
|
||||
ServerPromise<Result> get(Query const& query) {
|
||||
// Return cached value if there is one and the query matches
|
||||
if (auto cached = m_cache.get(query)) {
|
||||
return ServerPromise<Result>([cached = std::move(cached).value()](auto resolve, auto) {
|
||||
resolve(std::move(cached));
|
||||
});
|
||||
}
|
||||
// Shrink to fit size limit + 1 new item
|
||||
m_cache.shrink(CACHE_SIZE_LIMIT - 1);
|
||||
return this->fetch(std::move(query));
|
||||
}
|
||||
|
||||
ServerPromise<Result> refetch(Query const& query) {
|
||||
// Clear cache for this query only
|
||||
m_cache.remove(query);
|
||||
|
||||
// Fetch new value
|
||||
return this->fetch(std::move(query));
|
||||
}
|
||||
|
||||
// Clear all caches
|
||||
void invalidateAll() {
|
||||
m_cache.clear();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -70,8 +70,6 @@ protected:
|
|||
std::string m_modID;
|
||||
CCNode* m_sprite = nullptr;
|
||||
EventListener<PromiseEventFilter<ByteVector, server::ServerError>> m_listener;
|
||||
// todo: use shared cache once impl'd
|
||||
static inline server::impl_cache::ServerResultCache<&server::getModLogo> s_cache = {};
|
||||
|
||||
bool init(std::string const& id, bool fetch) {
|
||||
if (!CCNode::init())
|
||||
|
@ -95,7 +93,7 @@ protected:
|
|||
this->setSprite(CCSprite::create("loadingCircle.png"));
|
||||
static_cast<CCSprite*>(m_sprite)->setBlendFunc({ GL_ONE, GL_ONE });
|
||||
m_sprite->runAction(CCRepeatForever::create(CCRotateBy::create(1.f, 360.f)));
|
||||
m_listener.setFilter(s_cache.get(id).listen());
|
||||
m_listener.setFilter(server::ServerResultCache<&server::getModLogo>::shared().get(id).listen());
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -264,6 +264,7 @@ void ModsLayer::onRefreshList(CCObject*) {
|
|||
|
||||
void ModsLayer::onBack(CCObject*) {
|
||||
CCDirector::get()->replaceScene(CCTransitionFade::create(.5f, MenuLayer::scene(false)));
|
||||
server::ServerResultCache<&server::getModLogo>::shared().invalidateAll();
|
||||
}
|
||||
|
||||
void ModsLayer::onGoToPage(CCObject*) {
|
||||
|
|
Loading…
Reference in a new issue