Commit 781924a0 authored by echel0n's avatar echel0n

Refactored TVShow class episode cache, reverted using dogpile caching for fifo cache.

parent 670cb977
......@@ -44,7 +44,6 @@ from tornado.ioloop import IOLoop
import sickrage
from sickrage.core.announcements import Announcements
from sickrage.core.api import API
from sickrage.core.caches import tv_episodes_cache, MutexLock, configure_regions
from sickrage.core.caches.name_cache import NameCache
from sickrage.core.caches.quicksearch_cache import QuicksearchCache
from sickrage.core.common import SD, SKIPPED, WANTED
......@@ -236,10 +235,10 @@ class Core(object):
if success:
self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
configure_regions(self.cache_dir, replace_existing_backend=True)
# configure_regions(self.cache_dir, replace_existing_backend=True)
shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)
else:
configure_regions(self.cache_dir)
# else:
# configure_regions(self.cache_dir)
# migrate old database file names to new ones
if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
......
import os
from dogpile.cache import make_region
from dogpile.cache.backends.file import AbstractFileLock
from dogpile.util import ReadWriteMutex
class MutexLock(AbstractFileLock):
""":class:`MutexLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`."""
def __init__(self, filename):
"""Constructor.
:param filename:
"""
self.mutex = ReadWriteMutex()
def acquire_read_lock(self, wait):
"""Default acquire_read_lock."""
ret = self.mutex.acquire_read_lock(wait)
return wait or ret
def acquire_write_lock(self, wait):
"""Default acquire_write_lock."""
ret = self.mutex.acquire_write_lock(wait)
return wait or ret
def release_read_lock(self):
"""Default release_read_lock."""
return self.mutex.release_read_lock()
def release_write_lock(self):
"""Default release_write_lock."""
return self.mutex.release_write_lock()
def configure_regions(cache_dir, replace_existing_backend=False):
tv_episodes_cache.configure('dogpile.cache.dbm', replace_existing_backend=replace_existing_backend,
arguments={'filename': os.path.join(cache_dir, 'tv_episodes.dbm'), 'lock_factory': MutexLock})
tv_episodes_cache = make_region()
# import os
#
# from dogpile.cache import make_region
# from dogpile.cache.backends.file import AbstractFileLock
# from dogpile.util import ReadWriteMutex
#
#
# class MutexLock(AbstractFileLock):
# """:class:`MutexLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`."""
#
# def __init__(self, filename):
# """Constructor.
# :param filename:
# """
# self.mutex = ReadWriteMutex()
#
# def acquire_read_lock(self, wait):
# """Default acquire_read_lock."""
# ret = self.mutex.acquire_read_lock(wait)
# return wait or ret
#
# def acquire_write_lock(self, wait):
# """Default acquire_write_lock."""
# ret = self.mutex.acquire_write_lock(wait)
# return wait or ret
#
# def release_read_lock(self):
# """Default release_read_lock."""
# return self.mutex.release_read_lock()
#
# def release_write_lock(self):
# """Default release_write_lock."""
# return self.mutex.release_write_lock()
#
#
# def configure_regions(cache_dir, replace_existing_backend=False):
# tv_episodes_cache.configure('dogpile.cache.dbm', replace_existing_backend=replace_existing_backend,
# arguments={'filename': os.path.join(cache_dir, 'tv_episodes.dbm'), 'lock_factory': MutexLock})
#
#
# tv_episodes_cache = make_region()
......@@ -30,7 +30,6 @@ from mutagen.mp4 import MP4, MP4StreamInfoError
from sqlalchemy import orm
import sickrage
from sickrage.core import tv_episodes_cache
from sickrage.core.common import NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED, NAMING_DUPLICATE, NAMING_SEPARATED_REPEAT
from sickrage.core.common import Quality, SKIPPED, UNKNOWN, UNAIRED, statusStrings
from sickrage.core.databases.main import MainDB
......@@ -67,10 +66,6 @@ class TVEpisode(object):
query = session.query(MainDB.TVEpisode).filter_by(showid=showid, indexer=indexer, season=season, episode=episode).one()
self._data_local = query.as_dict()
episodes = self.show.episodes
episodes.append(self)
tv_episodes_cache.set(str(self.showid), episodes)
self.populate_episode(season, episode)
# self.checkForMetaFiles()
......@@ -290,15 +285,6 @@ class TVEpisode(object):
session.commit()
try:
episodes = self.show.episodes
index = next((i for i, x in enumerate(episodes) if
x.showid == self.showid and x.indexer == self.indexer and x.season == self.season and x.episode == self.episode))
episodes[index] = self
tv_episodes_cache.set(str(self.showid), episodes)
except StopIteration:
pass
def delete(self):
with sickrage.app.main_db.session() as session:
session.query(MainDB.TVEpisode).filter_by(showid=self.showid,
......@@ -307,9 +293,6 @@ class TVEpisode(object):
episode=self.episode).delete()
session.commit()
tv_episodes_cache.set(str(self.showid), [x for x in self.show.episodes if
x.showid != self.showid and x.indexer != self.indexer and x.season != self.season and x.episode != self.episode])
def refresh_subtitles(self):
"""Look for subtitles files and refresh the subtitles property"""
subtitles, save_subtitles = Subtitles().refresh_subtitles(self.showid, self.season, self.episode)
......@@ -680,7 +663,9 @@ class TVEpisode(object):
except OSError as e:
sickrage.app.log.warning('Unable to delete episode file %s: %s / %s' % (self.location, repr(e), str(e)))
# delete myself from the database and show episode cache
# delete myself from show episode cache
# delete myself from the database
sickrage.app.log.debug("Deleting %s S%02dE%02d from the DB" % (self.show.name, self.season or 0, self.episode or 0))
self.delete()
......
......@@ -28,12 +28,10 @@ import stat
import traceback
import send2trash
from dogpile.cache.api import NO_VALUE
from sqlalchemy import orm
from unidecode import unidecode
import sickrage
from sickrage.core import tv_episodes_cache
from sickrage.core.api import APIError
from sickrage.core.blackandwhitelist import BlackAndWhiteList
from sickrage.core.caches.image_cache import ImageCache
......@@ -50,6 +48,8 @@ from sickrage.indexers.exceptions import indexer_attributenotfound
class TVShow(object):
def __init__(self, indexer_id, indexer, lang='en', location=''):
self._episodes = []
with sickrage.app.main_db.session() as session:
try:
query = session.query(MainDB.TVShow).filter_by(indexer_id=indexer_id, indexer=indexer).one()
......@@ -345,12 +345,11 @@ class TVShow(object):
@property
def episodes(self):
if tv_episodes_cache.get(str(self.indexer_id)) == NO_VALUE:
if not self._episodes:
with sickrage.app.main_db.session() as session:
query = session.query(MainDB.TVShow).filter_by(indexer_id=self.indexer_id, indexer=self.indexer).one()
tv_episodes_cache.set(str(self.indexer_id),
[TVEpisode(showid=self.indexer_id, indexer=self.indexer, season=x.season, episode=x.episode) for x in query.episodes])
return tv_episodes_cache.get(str(self.indexer_id))
self._episodes = [TVEpisode(showid=self.indexer_id, indexer=self.indexer, season=x.season, episode=x.episode) for x in query.episodes]
return self._episodes
@property
def imdb_info(self):
......@@ -535,7 +534,7 @@ class TVShow(object):
session.commit()
def flush_episodes(self):
tv_episodes_cache.set(str(self.indexer_id), [])
self._episodes.clear()
def load_from_indexer(self, cache=True, tvapi=None):
if self.indexer is not INDEXER_TVRAGE:
......@@ -640,19 +639,13 @@ class TVShow(object):
season = query.season
episode = query.episode
episodes = self.episodes
try:
index = next((i for i, x in enumerate(episodes) if
x.showid == self.indexer_id and x.indexer == self.indexer and x.season == season and x.episode == episode))
tv_episode = episodes[index]
except StopIteration:
for tv_episode in self._episodes:
if tv_episode.season == season and tv_episode.episode == episode:
return tv_episode
else:
tv_episode = TVEpisode(showid=self.indexer_id, indexer=self.indexer, season=season, episode=episode)
episodes.append(tv_episode)
tv_episodes_cache.set(str(self.indexer_id), episodes)
return tv_episode
self._episodes.append(tv_episode)
return tv_episode
except orm.exc.MultipleResultsFound:
if absolute_number is not None:
sickrage.app.log.debug("Multiple entries for absolute number: " + str(absolute_number) + " in show: " + self.name + " found ")
......@@ -884,7 +877,8 @@ class TVShow(object):
try:
episode_obj = self.get_episode(season, episode)
except EpisodeNotFoundException:
episode_obj = TVEpisode(showid=self.indexer_id, indexer=self.indexer, season=season, episode=episode, location=filename)
sickrage.app.log.warning("{}: Unable to figure out what this file is, skipping {}".format(self.indexer_id, filename))
continue
# if there is a new file associated with this ep then re-check the quality
if episode_obj.location and os.path.normpath(episode_obj.location) != os.path.normpath(filename):
......@@ -927,8 +921,7 @@ class TVShow(object):
# if it was snatched and now exists then set the status correctly
if old_status == SNATCHED and old_quality <= new_quality:
sickrage.app.log.debug(
"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[
old_quality] +
"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[old_quality] +
" but a file exists with quality " + Quality.qualityStrings[new_quality] +
" so I'm setting the status to DOWNLOADED")
new_status = DOWNLOADED
......@@ -936,8 +929,7 @@ class TVShow(object):
# if it was snatched proper and we found a higher quality one then allow the status change
elif old_status == SNATCHED_PROPER and old_quality < new_quality:
sickrage.app.log.debug(
"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[
old_quality] +
"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[old_quality] +
" but a file exists with quality " + Quality.qualityStrings[new_quality] +
" so I'm setting the status to DOWNLOADED")
new_status = DOWNLOADED
......@@ -948,8 +940,7 @@ class TVShow(object):
if new_status is not None:
sickrage.app.log.debug(
"STATUS: we have an associated file, so setting the status from " + str(
episode_obj.status) + " to DOWNLOADED/" + str(
Quality.status_from_name(filename, anime=self.is_anime)))
episode_obj.status) + " to DOWNLOADED/" + str(Quality.status_from_name(filename, anime=self.is_anime)))
episode_obj.status = Quality.composite_status(new_status, new_quality)
# save episode data to database
......
......@@ -155,8 +155,8 @@ class BaseHandler(RequestHandler, ABC):
return self.mako_lookup.get_template('/errors/500.mako').render_unicode(**template_kwargs)
def render(self, template_name, **kwargs):
return self.run_task(lambda: self.write(self.render_string(template_name, **kwargs)))
async def render(self, template_name, **kwargs):
return self.finish(await self.run_task(lambda: self.render_string(template_name, **kwargs)))
# return self.finish(self.render_string(template_name, **kwargs))
# self.write(self.render_string(template_name, **kwargs))
......
......@@ -780,8 +780,7 @@ class DisplayShowHandler(BaseHandler, ABC):
return await self._genericMessage(_("Error"), _("Show not in show list"))
episode_objects = sorted(show_obj.episodes, key=lambda x: (x.season, x.episode), reverse=True)
season_results = list({x.season for x in episode_objects})
season_results = set()
submenu.append({
'title': _('Edit'),
......@@ -891,6 +890,8 @@ class DisplayShowHandler(BaseHandler, ABC):
}
for episode_object in episode_objects:
season_results.add(episode_object.season)
cur_ep_cat = show_obj.get_overview(int(episode_object.status or -1))
if episode_object.airdate > datetime.date.min:
......@@ -954,7 +955,7 @@ class DisplayShowHandler(BaseHandler, ABC):
show_message=show_message,
show=show_obj,
episode_objects=episode_objects,
seasonResults=season_results,
seasonResults=list(season_results),
sortedShowLists=sorted_show_lists,
bwl=bwl,
epCounts=ep_counts,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment