The good of this approach is that the non cached function keeps simple, and the cache is mixed deeply in to the implementation. The demo implementation only supports a single key, but that could be easily extended to a tuple.
#include <iostream> #include <memory> #include <unordered_map> #define CE(...) \ { \ const auto&& Result_ = (__VA_ARGS__); \ std::cout << __LINE__ << "]" << #__VA_ARGS__ << " = " << Result_ << "\n"; \ } int main(int argc, char* argv[]) { auto fib = [](auto&& impl, std::uint64_t n) { if (n < 2) return n; return impl(impl, n - 2) + impl(impl, n - 1); }; auto add_cache = [](auto&& impl_do, auto cache_ptr) { return [cache_ptr, impl_do]( auto& impl, const auto& key) -> decltype(impl_do(impl_do, key)) { auto [it, is_new] = cache_ptr->try_emplace(key); if (is_new) { it->second = impl_do(impl, key); std::cerr << "cached key=" << key << " value=" << it->second << "\n"; } return it->second; }; }; std::unordered_map<std::uint64_t, std::uint64_t> cache; auto fib_cache = add_cache(fib, &cache); CE(fib_cache(fib_cache, 40)); CE(fib_cache(fib_cache, 50)); auto fix = [](auto func) { return [func](const auto&... args) { return func(func, std::forward<decltype(args)>(args)...); }; }; CE(fix(fib_cache)(60)); CE(fix(fib_cache)(80)); return 0; }
No comments:
Post a Comment