mirror of
https://github.com/pretix/pretix.git
synced 2026-05-04 15:04:03 +00:00
Generalized EventRelatedCache to ObjectRelatedCache
This commit is contained in:
@@ -2,29 +2,14 @@ import hashlib
|
||||
import time
|
||||
|
||||
from django.core.cache import caches
|
||||
|
||||
from pretix.base.models import Event
|
||||
from django.db.models import Model
|
||||
|
||||
|
||||
class EventRelatedCache:
|
||||
"""
|
||||
This object behaves exactly like the cache implementations by Django
|
||||
but with one important difference: It stores all keys related to a
|
||||
certain event, so you pass an event when creating this object and if
|
||||
you store data in this cache, it is only stored for this event. The
|
||||
main purpose of this is to be able to flush all cached data related
|
||||
to this event at once.
|
||||
class NamespacedCache:
|
||||
|
||||
The EventRelatedCache instance itself is stateless, all state is
|
||||
stored in the cache backend, so you can instantiate this class as many
|
||||
times as you want.
|
||||
"""
|
||||
|
||||
def __init__(self, event: Event, cache: str='default'):
|
||||
assert isinstance(event, Event)
|
||||
def __init__(self, prefixkey, cache: str='default'):
|
||||
self.cache = caches[cache]
|
||||
self.event = event
|
||||
self.prefixkey = 'event:%s' % self.event.pk
|
||||
self.prefixkey = prefixkey
|
||||
|
||||
def _prefix_key(self, original_key: str) -> str:
|
||||
# Race conditions can happen here, but should be very very rare.
|
||||
@@ -36,15 +21,14 @@ class EventRelatedCache:
|
||||
if prefix is None:
|
||||
prefix = int(time.time())
|
||||
self.cache.set(self.prefixkey, prefix)
|
||||
key = 'event:%s:%d:%s' % (self.event.pk, prefix, original_key)
|
||||
key = '%s:%d:%s' % (self.prefixkey, prefix, original_key)
|
||||
if len(key) > 200: # Hash long keys, as memcached has a length limit
|
||||
# TODO: Use a more efficient, non-cryptographic hash algorithm
|
||||
key = hashlib.sha256(key.encode("UTF-8")).hexdigest()
|
||||
return key
|
||||
|
||||
@staticmethod
|
||||
def _strip_prefix(key: str) -> str:
|
||||
return key.split(":", 3)[-1] if 'event:' in key else key
|
||||
def _strip_prefix(self, key: str) -> str:
|
||||
return key.split(":", 2 + self.prefixkey.count(":"))[-1]
|
||||
|
||||
def clear(self):
|
||||
try:
|
||||
@@ -86,3 +70,22 @@ class EventRelatedCache:
|
||||
|
||||
def close(self): # NOQA
|
||||
pass
|
||||
|
||||
|
||||
class ObjectRelatedCache(NamespacedCache):
|
||||
"""
|
||||
This object behaves exactly like the cache implementations by Django
|
||||
but with one important difference: It stores all keys related to a
|
||||
certain object, so you pass an object when creating this object and if
|
||||
you store data in this cache, it is only stored for this object. The
|
||||
main purpose of this is to be able to flush all cached data related
|
||||
to this object at once.
|
||||
|
||||
The ObjectRelatedCache instance itself is stateless, all state is
|
||||
stored in the cache backend, so you can instantiate this class as many
|
||||
times as you want.
|
||||
"""
|
||||
|
||||
def __init__(self, obj, cache: str='default'):
|
||||
assert isinstance(obj, Model)
|
||||
super().__init__('%s:%s' % (obj._meta.object_name, obj.pk), cache)
|
||||
|
||||
Reference in New Issue
Block a user