diff --git a/Dockerfile b/Dockerfile index 3aa93b0..8f545a1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,13 @@ -# build the tube archivist image from default python slim image +# multi stage to build tube archivist +# first stage to build python wheel, copy into final image -FROM python:3.10.4-slim-bullseye + +# First stage to build python wheel +FROM python:3.10.4-slim-bullseye AS builder ARG TARGETPLATFORM -ARG INSTALL_DEBUG -ENV PYTHONUNBUFFERED 1 - -# install distro packages needed -RUN apt-get clean && apt-get -y update && apt-get -y install --no-install-recommends \ - build-essential \ - nginx \ - atomicparsley \ - curl && rm -rf /var/lib/apt/lists/* +RUN apt-get update +RUN apt-get install -y --no-install-recommends build-essential gcc curl # get newest patched ffmpeg and ffprobe builds for amd64 fall back to repo ffmpeg for arm64 RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \ @@ -27,11 +23,35 @@ RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \ apt-get -y update && apt-get -y install --no-install-recommends ffmpeg && rm -rf /var/lib/apt/lists/* \ ; fi +# install requirements +COPY ./tubearchivist/requirements.txt /requirements.txt +RUN pip install --user -r requirements.txt + +# build final image +FROM python:3.10.4-slim-bullseye as tubearchivist + +ARG TARGETPLATFORM +ARG INSTALL_DEBUG + +ENV PYTHONUNBUFFERED 1 + +# copy build requirements +COPY --from=builder /root/.local /root/.local +COPY --from=builder /usr/bin/ffmpeg /usr/bin/ffmpeg +COPY --from=builder /usr/bin/ffprobe /usr/bin/ffprobe +ENV PATH=/root/.local/bin:$PATH + +# install distro packages needed +RUN apt-get clean && apt-get -y update && apt-get -y install --no-install-recommends \ + nginx \ + atomicparsley \ + curl && rm -rf /var/lib/apt/lists/* + # install debug tools for testing environment RUN if [ "$INSTALL_DEBUG" ] ; then \ apt-get -y update && apt-get -y install --no-install-recommends \ vim htop bmon net-tools iputils-ping procps \ - && pip install --no-cache-dir ipython --src /usr/local/src \ + && pip install --user ipython \ ; fi # make folders @@ -39,10 +59,6 @@ RUN mkdir /cache RUN mkdir /youtube RUN mkdir /app -# install python dependencies -COPY ./tubearchivist/requirements.txt /requirements.txt -RUN pip install --no-cache-dir -r requirements.txt --src /usr/local/src - # copy config files COPY docker_assets/nginx.conf /etc/nginx/sites-available/default diff --git a/docs/Home.md b/docs/Home.md index df1eda9..a1b5098 100644 --- a/docs/Home.md +++ b/docs/Home.md @@ -9,6 +9,7 @@ Table of contents: * [Settings](Settings): All the configuration options * [Video](Video): All details of a single video and playlist navigation. * [Users](Users): User management admin interface +* [Installation](Installation): WIP - detailed installation instructions for various platforms. ## Getting Started 1. [Subscribe](Channels#channels-overview) to some of your favourite YouTube channels. diff --git a/docs/Installation.md b/docs/Installation.md new file mode 100644 index 0000000..e15d784 --- /dev/null +++ b/docs/Installation.md @@ -0,0 +1,56 @@ +# Detailed Installation Instructions for Various Platforms + +## Unraid + +Tube Archivist, and all if it's dependencies are located in the [community applications](https://forums.unraid.net/topic/38582-plug-in-community-applications/) store. The three containers you will need are as follows: + +- **TubeArchivist-RedisJSON**: This container acts as a cache and temporary link between the application and the file system. Used to store and display messages and configuration variables. +- **TubeArchivist-ES**: ElasticSearch stores video meta data and makes everything searchable. Also keeps track of the download queue. +- **TubeArchivist**: Once your YouTube video collection grows, it becomes hard to search and find a specific video. That's where Tube Archivist comes in: By indexing your video collection with metadata from YouTube, you can organize, search and enjoy your archived YouTube videos without hassle offline through a convenient web interface. + +### Step 1: Install `TubeArchivist-RedisJSON` + +![enter image description here](https://i.imgur.com/ycAqFRU.png) +This is the easiest container to setup of the thee, just make sure that you do not have any port conflicts, and that your `/data` is mounted to the correct path. The other containers will map to the same directory. + +If you need to install `TubeArchivist-RedisJSON`on a different port, you'll have to follow [these steps](https://github.com/bbilly1/tubearchivist#redis-on-a-custom-port) later on when installing the `TubeArchivist` container + + +### Step 2: Install `TubeArchivist-ES` +![enter image description here](https://i.imgur.com/o6tsTdt.png) +ElasticSeach is also pretty easy to setup. Again, make sure you have no port conflicts, make sure that you mapped `/usr/share/elasticsearch/data` to the same directory as `RedisJSON`, and make sure to change the default password to something more secure. + +There is three additional settings in the "show more settings" area, but leave those as they are. + + +### Step 3: Install `TubeArchivist` + +![enter image description here](https://i.imgur.com/dwSCfgO.png) +It's finally time to set up TubeArchivist! + + - `Port:`Again, make sure that you have no port conflicts on 8000. + + - `Youtube Media Path:` is where you'll download all of your videos to. + Make sure that this is an empty directory to not cause confusion when + starting the application. If you have existing videos that you'd like + to import into Tube Archivist, please checkout the [settings + wiki.](https://github.com/bbilly1/tubearchivist/wiki/Settings#manual-media-files-import) + + +- `Appdata:` This should be the same base path as the other two containers. + + - `TA Username:`This will be your username for TubeArchivist. + + - `TA Password:`This will be your password for TubeArchivist. + + - `Redis` This will be JUST the ip address of your redis container + + - `ElasticSearch Password:`This is the password you defined in the `TubeArchivist-ES` container. + - `ElasticSearch:` This seems to cause some confusion, but it's a pretty simple step, just replace the IP and Port to match you `TubeArchivist-ES` container. + + (example: if your IP is 192.168.1.15, the value should be http://192.168.1.15:9200) + + - `Time Zone:` This is an important step for your scheduler, to find your timezone, use a site like [TimeZoneConverter](http://www.timezoneconverter.com/cgi-bin/findzone.tzc) + +### From there, you should be able to start up your containers and you're good to go! +If you're still having trouble, join us on [discord](https://discord.gg/AFwz8nE7BK) and come to the #unraid channel. diff --git a/docs/Settings.md b/docs/Settings.md index 8a66fdf..488b184 100644 --- a/docs/Settings.md +++ b/docs/Settings.md @@ -34,7 +34,7 @@ Additional settings passed to yt-dlp. ## Integrations All third party integrations of TubeArchivist will **always** be *opt in*. -- **API**: Your access token for the Tube Archivist API. +- **API**: Your access token for the Tube Archivist API. - **returnyoutubedislike.com**: This will get return dislikes and average ratings for each video by integrating with the API from [returnyoutubedislike.com](https://www.returnyoutubedislike.com/). - **Cast**: Enabling the cast integration in the settings page will load an additional JS library from **Google**. * Requirements @@ -55,7 +55,7 @@ Examples: - **0 15 \***: Run task every day at 15:00 in the afternoon. - **30 8 \*/2**: Run task every second day of the week (Sun, Tue, Thu, Sat) at 08:30 in the morning. - **0 \*/3,8-17 \***: Execute every hour divisible by 3, and every hour during office hours (8 in the morning - 5 in the afternoon). -- **0 8,16 \***: Execute every day at 8 in the morning and at 4 in the afternoon. +- **0 8,16 \***: Execute every day at 8 in the morning and at 4 in the afternoon. - **auto**: Sensible default. - **0**: (zero), deactivate that task. @@ -71,9 +71,9 @@ That's the equivalent task as run from the downloads page looking through your c Start downloading all videos currently in the download queue. ## Refresh Metadata -Rescan videos, channels and playlists on youtube and update metadata periodically. This will also deactivate an item and exclude it from future refreshes if the link on YouTube is no longer available. This task is meant to be run once per day, set your schedule accordingly. +Rescan videos, channels and playlists on youtube and update metadata periodically. This will also deactivate an item and exclude it from future refreshes if the link on YouTube is no longer available. This task is meant to be run once per day, set your schedule accordingly. -The field **Refresh older than x days** takes a number where TubeArchivist will consider an item as *outdated*. This value is used to calculate how many items need to be refreshed today based on the total indexed. This will spread out the requests to YouTube. Sensible value here is **90** days. +The field **Refresh older than x days** takes a number where TubeArchivist will consider an item as *outdated*. This value is used to calculate how many items need to be refreshed today based on the total indexed. This will spread out the requests to YouTube. Sensible value here is **90** days. ## Thumbnail check This will check if all expected thumbnails are there and will delete any artwork without matching video. @@ -130,5 +130,6 @@ This function will go through all your media files and looks at the whole index - Should the filename not match with the indexed media url, this will rename the video files correctly and update the index with the new link. - When you delete media files from the filesystem outside of the Tube Archivist interface, this will delete leftover metadata from the index. - When you have media files that are not indexed yet, this will grab the metadata from YouTube like it was a newly downloaded video. This can be useful when restoring from an older backup file with missing metadata but already downloaded mediafiles. NOTE: This only works if the media files are named in the same convention as Tube Archivist does, particularly the YouTube ID needs to be at the same index in the filename, alternatively see above for *Manual Media Files Import*. +-This will also check all of your thumbnails and download any that are missing. BE AWARE: There is no undo. diff --git a/tubearchivist/api/views.py b/tubearchivist/api/views.py index d938b2f..4ad216e 100644 --- a/tubearchivist/api/views.py +++ b/tubearchivist/api/views.py @@ -86,7 +86,8 @@ class VideoApiView(ApiBaseView): # pylint: disable=unused-argument """get request""" self.get_document(video_id) - self.process_keys() + if self.response.get("data"): + self.process_keys() return Response(self.response, status=self.status_code) diff --git a/tubearchivist/home/apps.py b/tubearchivist/home/apps.py index 46a940c..1053bc6 100644 --- a/tubearchivist/home/apps.py +++ b/tubearchivist/home/apps.py @@ -97,9 +97,8 @@ class StartupCheck: if invalid: print( - "minial required elasticsearch version: " - + f"{self.MIN_MAJOR}.{self.MIN_MINOR}, " - + "please update to recommended version." + "required elasticsearch version: " + + f"{self.MIN_MAJOR}.{self.MIN_MINOR}" ) sys.exit(1) diff --git a/tubearchivist/home/src/download/yt_dlp_handler.py b/tubearchivist/home/src/download/yt_dlp_handler.py index d764011..693d0e1 100644 --- a/tubearchivist/home/src/download/yt_dlp_handler.py +++ b/tubearchivist/home/src/download/yt_dlp_handler.py @@ -177,7 +177,9 @@ class VideoDownloader: except yt_dlp.utils.DownloadError: print("failed to download " + youtube_id) continue - vid_dict = index_new_video(youtube_id) + vid_dict = index_new_video( + youtube_id, video_overwrites=self.video_overwrites + ) self.channels.add(vid_dict["channel"]["channel_id"]) self.move_to_archive(vid_dict) self._delete_from_pending(youtube_id) diff --git a/tubearchivist/home/src/es/index_mapping.json b/tubearchivist/home/src/es/index_mapping.json index 0fddc11..3abf2c1 100644 --- a/tubearchivist/home/src/es/index_mapping.json +++ b/tubearchivist/home/src/es/index_mapping.json @@ -50,6 +50,9 @@ }, "index_playlists": { "type": "boolean" + }, + "integrate_sponsorblock": { + "type" : "boolean" } } } @@ -130,6 +133,9 @@ }, "index_playlists": { "type": "boolean" + }, + "integrate_sponsorblock": { + "type" : "boolean" } } } @@ -212,6 +218,19 @@ "index": false } } + }, + "sponsorblock": { + "properties": { + "last_refresh": { + "type": "date" + }, + "has_unlocked": { + "type": "boolean" + }, + "is_enabled": { + "type": "boolean" + } + } } }, "expected_set": { diff --git a/tubearchivist/home/src/frontend/forms.py b/tubearchivist/home/src/frontend/forms.py index 53ef284..337a805 100644 --- a/tubearchivist/home/src/frontend/forms.py +++ b/tubearchivist/home/src/frontend/forms.py @@ -198,8 +198,17 @@ class ChannelOverwriteForm(forms.Form): ("1", "Enable playlist index"), ] + SP_CHOICES = [ + ("", "-- change sponsorblock integrations"), + ("0", "disable sponsorblock integration"), + ("1", "enable sponsorblock integration"), + ] + download_format = forms.CharField(label=False, required=False) autodelete_days = forms.IntegerField(label=False, required=False) index_playlists = forms.ChoiceField( widget=forms.Select, choices=PLAYLIST_INDEX, required=False ) + integrate_sponsorblock = forms.ChoiceField( + widget=forms.Select, choices=SP_CHOICES, required=False + ) diff --git a/tubearchivist/home/src/index/channel.py b/tubearchivist/home/src/index/channel.py index 953078d..75824d8 100644 --- a/tubearchivist/home/src/index/channel.py +++ b/tubearchivist/home/src/index/channel.py @@ -340,7 +340,12 @@ class YoutubeChannel(YouTubeItem): def set_overwrites(self, overwrites): """set per channel overwrites""" - valid_keys = ["download_format", "autodelete_days", "index_playlists"] + valid_keys = [ + "download_format", + "autodelete_days", + "index_playlists", + "integrate_sponsorblock", + ] to_write = self.json_data.get("channel_overwrites", {}) for key, value in overwrites.items(): diff --git a/tubearchivist/home/src/index/video.py b/tubearchivist/home/src/index/video.py index 811059e..a5d8a42 100644 --- a/tubearchivist/home/src/index/video.py +++ b/tubearchivist/home/src/index/video.py @@ -292,6 +292,7 @@ class SponsorBlock: def __init__(self, user_id=False): self.user_id = user_id self.user_agent = f"{settings.TA_UPSTREAM} {settings.TA_VERSION}" + self.last_refresh = int(datetime.now().strftime("%s")) def get_sb_id(self): """get sponsorblock userid or generate if needed""" @@ -315,9 +316,35 @@ class SponsorBlock: response = requests.get(url, headers=headers) if not response.ok: print(f"{youtube_id}: sponsorblock failed: {response.text}") - return False + sponsor_dict = { + "last_refresh": self.last_refresh, + "is_enabled": True, + "segments": [], + } + else: + all_segments = response.json() + sponsor_dict = self._get_sponsor_dict(all_segments) - return response.json() + return sponsor_dict + + def _get_sponsor_dict(self, all_segments): + """format and process response""" + has_unlocked = False + cleaned_segments = [] + for segment in all_segments: + if not segment["locked"]: + has_unlocked = True + del segment["userID"] + del segment["description"] + cleaned_segments.append(segment) + + sponsor_dict = { + "last_refresh": self.last_refresh, + "has_unlocked": has_unlocked, + "is_enabled": True, + "segments": cleaned_segments, + } + return sponsor_dict def post_timestamps(self, youtube_id, start_time, end_time): """post timestamps to api""" @@ -358,9 +385,10 @@ class YoutubeVideo(YouTubeItem, YoutubeSubtitle): index_name = "ta_video" yt_base = "https://www.youtube.com/watch?v=" - def __init__(self, youtube_id): + def __init__(self, youtube_id, video_overwrites=False): super().__init__(youtube_id) self.channel_id = False + self.video_overwrites = video_overwrites self.es_path = f"{self.index_name}/_doc/{youtube_id}" def build_json(self): @@ -377,11 +405,24 @@ class YoutubeVideo(YouTubeItem, YoutubeSubtitle): if self.config["downloads"]["integrate_ryd"]: self._get_ryd_stats() - if self.config["downloads"]["integrate_sponsorblock"]: + if self._check_get_sb(): self._get_sponsorblock() return + def _check_get_sb(self): + """check if need to run sponsor block""" + if self.config["downloads"]["integrate_sponsorblock"]: + return True + try: + single_overwrite = self.video_overwrites[self.youtube_id] + _ = single_overwrite["integrate_sponsorblock"] + return True + except KeyError: + return False + + return False + def _process_youtube_meta(self): """extract relevant fields from youtube""" # extract @@ -498,7 +539,10 @@ class YoutubeVideo(YouTubeItem, YoutubeSubtitle): for media_url in to_del: file_path = os.path.join(video_base, media_url) - os.remove(file_path) + try: + os.remove(file_path) + except FileNotFoundError: + print(f"{self.youtube_id}: failed {media_url}, continue.") self.del_in_es() self.delete_subtitles() @@ -544,9 +588,9 @@ class YoutubeVideo(YouTubeItem, YoutubeSubtitle): _, _ = ElasticWrap(path).post(data=data) -def index_new_video(youtube_id): +def index_new_video(youtube_id, video_overwrites=False): """combined classes to create new video in index""" - video = YoutubeVideo(youtube_id) + video = YoutubeVideo(youtube_id, video_overwrites=video_overwrites) video.build_json() if not video.json_data: raise ValueError("failed to get metadata for " + youtube_id) diff --git a/tubearchivist/home/src/ta/config.py b/tubearchivist/home/src/ta/config.py index c89d62c..4b98c4a 100644 --- a/tubearchivist/home/src/ta/config.py +++ b/tubearchivist/home/src/ta/config.py @@ -220,18 +220,33 @@ class ScheduleBuilder: raise ValueError("invalid input") to_write = dict(zip(keys, values)) - all_hours = [int(i) for i in re.split(r"\D+", to_write["hour"])] - if max(all_hours) > 23: - print("hour can't be greater than 23") - raise ValueError("invalid input") - try: - int(to_write["minute"]) - except ValueError as error: - print("too frequent: only number in minutes are supported") - raise ValueError("invalid input") from error + self._validate_cron(to_write) return to_write + @staticmethod + def _validate_cron(to_write): + """validate all fields, raise value error for impossible schedule""" + all_hours = list(re.split(r"\D+", to_write["hour"])) + for hour in all_hours: + if hour.isdigit() and int(hour) > 23: + print("hour can not be greater than 23") + raise ValueError("invalid input") + + all_days = list(re.split(r"\D+", to_write["day_of_week"])) + for day in all_days: + if day.isdigit() and int(day) > 6: + print("day can not be greater than 6") + raise ValueError("invalid input") + + if not to_write["minute"].isdigit(): + print("too frequent: only number in minutes are supported") + raise ValueError("invalid input") + + if int(to_write["minute"]) > 59: + print("minutes can not be greater than 59") + raise ValueError("invalid input") + def build_schedule(self): """build schedule dict as expected by app.conf.beat_schedule""" schedule_dict = {} diff --git a/tubearchivist/home/templates/home/channel_id.html b/tubearchivist/home/templates/home/channel_id.html index 80b090b..a30b4f5 100644 --- a/tubearchivist/home/templates/home/channel_id.html +++ b/tubearchivist/home/templates/home/channel_id.html @@ -89,7 +89,15 @@ {% endif %}

{{ channel_overwrite_form.index_playlists }}
-
+
+

Enable SponsorBlock: + {% if channel_info.channel_overwrites.integrate_sponsorblock %} + {{ channel_info.channel_overwrites.integrate_sponsorblock }} + {% else %} + False + {% endif %}

+ {{ channel_overwrite_form.integrate_sponsorblock }}
+
diff --git a/tubearchivist/home/templates/home/settings.html b/tubearchivist/home/templates/home/settings.html index e4d9534..4fba379 100644 --- a/tubearchivist/home/templates/home/settings.html +++ b/tubearchivist/home/templates/home/settings.html @@ -124,12 +124,12 @@
-

Integrate with returnyoutubedislike.com to get dislikes and average ratings back: {{ config.downloads.integrate_ryd }}

+

Integrate with returnyoutubedislike.com to get dislikes and average ratings back: {{ config.downloads.integrate_ryd }}

Before activating that, make sure you have a scraping sleep interval of at least 3 secs set to avoid ratelimiting issues.
{{ app_form.downloads_integrate_ryd }}
-

Integrate with SponsorBlock to get sponsored timestamps: {{ config.downloads.integrate_sponsorblock }}

+

Integrate with SponsorBlock to get sponsored timestamps: {{ config.downloads.integrate_sponsorblock }}

Before activating that, make sure you have a scraping sleep interval of at least 3 secs set to avoid ratelimiting issues.
{{ app_form.downloads_integrate_sponsorblock }}
diff --git a/tubearchivist/home/templates/home/video.html b/tubearchivist/home/templates/home/video.html index 324d293..d1ff3a7 100644 --- a/tubearchivist/home/templates/home/video.html +++ b/tubearchivist/home/templates/home/video.html @@ -3,6 +3,36 @@ {% load static %} {% load humanize %}
+
+
+ {% if video.channel.channel_overwrites.integrate_sponsorblock %} + {% if video.channel.channel_overwrites.integrate_sponsorblock == True %} + {% if not video.sponsorblock %} +

This video doesn't have any sponsor segments added. To add a segment go to this video on YouTube and add a segment using the SponsorBlock extension.

+ {% endif %} + {% if video.sponsorblock %} + {% for segment in video.sponsorblock %} + {% if segment.locked != 1 %} +

This video has unlocked sponsor segments. Go to this video on YouTube and vote on the segments using the SponsorBlock extension.

+ {{ break }} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% elif config.downloads.integrate_sponsorblock %} + {% if not video.sponsorblock %} +

This video doesn't have any sponsor segments added. To add a segment go to this video on YouTube and add a segment using the SponsorBlock extension.

+ {% endif %} + {% if video.sponsorblock %} + {% for segment in video.sponsorblock %} + {% if segment.locked != 1 %} +

This video has unlocked sponsor segments. Go to this video on YouTube and vote on the segments using the SponsorBlock extension.

+ {{ break }} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} +
{% if cast %} @@ -114,6 +144,7 @@
diff --git a/tubearchivist/home/views.py b/tubearchivist/home/views.py index e7bd143..92359b8 100644 --- a/tubearchivist/home/views.py +++ b/tubearchivist/home/views.py @@ -111,7 +111,7 @@ class ArchivistViewConfig(View): """build default context for every view""" self.user_id = user_id self.user_conf = RedisArchivist() - self.default_conf = AppConfig().config + self.default_conf = AppConfig(self.user_id).config self.context = { "colors": self.default_conf["application"]["colors"], @@ -670,7 +670,7 @@ class VideoView(View): def get(self, request, video_id): """get single video""" - colors, cast = self.read_config(user_id=request.user.id) + config_handler = AppConfig(request.user.id) path = f"ta_video/_doc/{video_id}" look_up = SearchHandler(path, config=False) video_hit = look_up.get_data() @@ -692,9 +692,10 @@ class VideoView(View): "video": video_data, "playlist_nav": playlist_nav, "title": video_title, - "colors": colors, - "cast": cast, + "colors": config_handler.colors, + "cast": config_handler.config["application"]["enable_cast"], "version": settings.TA_VERSION, + "config": config_handler.config, } return render(request, "home/video.html", context) @@ -711,14 +712,6 @@ class VideoView(View): return all_navs - @staticmethod - def read_config(user_id): - """read config file""" - config_handler = AppConfig(user_id) - cast = config_handler.config["application"]["enable_cast"] - colors = config_handler.colors - return colors, cast - @staticmethod def star_creator(rating): """convert rating float to stars""" diff --git a/tubearchivist/requirements.txt b/tubearchivist/requirements.txt index af75bb3..108590a 100644 --- a/tubearchivist/requirements.txt +++ b/tubearchivist/requirements.txt @@ -1,6 +1,6 @@ -beautifulsoup4==4.10.0 +beautifulsoup4==4.11.1 celery==5.2.6 -Django==4.0.3 +Django==4.0.4 django-cors-headers==3.11.0 djangorestframework==3.13.1 Pillow==9.1.0 @@ -9,4 +9,4 @@ requests==2.27.1 ryd-client==0.0.3 uWSGI==2.0.20 whitenoise==6.0.0 -yt_dlp==2022.3.8.2 +yt_dlp==2022.4.8 diff --git a/tubearchivist/static/css/style.css b/tubearchivist/static/css/style.css index 0ddf32c..d92187a 100644 --- a/tubearchivist/static/css/style.css +++ b/tubearchivist/static/css/style.css @@ -62,6 +62,13 @@ h3 { color: var(--accent-font-light); } +h4 { + font-size: 0.7em; + margin-bottom: 7px; + font-family: Sen-Regular, sans-serif; + color: var(--accent-font-light); +} + p, i, li { font-family: Sen-Regular, sans-serif; margin-bottom: 10px; @@ -355,6 +362,18 @@ button:hover { height: 100vh; } +.notifications { + text-align: center; + width: 80%; + margin: auto; +} + +.sponsorblock { + text-align: center; + width: 80%; + margin: auto; +} + .video-player video, .video-main video { max-height: 80vh; diff --git a/tubearchivist/static/script.js b/tubearchivist/static/script.js index bc9696d..ee95b6f 100644 --- a/tubearchivist/static/script.js +++ b/tubearchivist/static/script.js @@ -327,9 +327,33 @@ function cancelDelete() { } // player +var sponsorBlock = []; function createPlayer(button) { var videoId = button.getAttribute('data-id'); var videoData = getVideoData(videoId); + + var sponsorBlockElements = ''; + if (videoData.config.downloads.integrate_sponsorblock && (typeof(videoData.data.channel.channel_overwrites) == "undefined" || typeof(videoData.data.channel.channel_overwrites.integrate_sponsorblock) == "undefined" || videoData.data.channel.channel_overwrites.integrate_sponsorblock == true)) { + sponsorBlock = videoData.data.sponsorblock; + if (!sponsorBlock) { + sponsorBlockElements = ` +
+

This video doesn't have any sponsor segments added. To add a segment go to this video on Youtube and add a segment using the SponsorBlock extension.

+
+ `; + } else { + for(let i in sponsorBlock) { + if(sponsorBlock[i].locked != 1) { + sponsorBlockElements = ` +
+

This video has unlocked sponsor segments. Go to this video on YouTube and vote on the segments using the SponsorBlock extension.

+
+ `; + break; + } + } + } + } var videoProgress = getVideoProgress(videoId).position; var videoName = videoData.data.title; @@ -353,7 +377,6 @@ function createPlayer(button) { var channelName = videoData.data.channel.channel_name; removePlayer(); - // document.getElementById(videoId).outerHTML = ''; // Remove watch indicator from video info // If cast integration is enabled create cast button var castButton = ''; @@ -383,6 +406,8 @@ function createPlayer(button) { const markup = `
${videoTag} +
+ ${sponsorBlockElements}
close-icon ${watchStatusIndicator} @@ -400,6 +425,53 @@ function createPlayer(button) { divPlayer.innerHTML = markup; } +// function sendSponsorBlockVote(uuid, vote) { +// var videoId = getVideoPlayerVideoId(); +// postSponsorSegmentVote(videoId, uuid, vote); +// } + +// var sponsorBlockTimestamps = []; +// function sendSponsorBlockSegment() { +// var videoId = getVideoPlayerVideoId(); +// var currentTime = getVideoPlayerCurrentTime(); +// var sponsorBlockElement = document.getElementById("sponsorblock"); +// if (sponsorBlockTimestamps[1]) { +// if (sponsorBlockTimestamps[1] > sponsorBlockTimestamps[0]) { +// postSponsorSegment(videoId, sponsorBlockTimestamps[0], sponsorBlockTimestamps[1]); +// sponsorBlockElement.innerHTML = ` +//

Timestamps sent! (Not really)

+// `; +// setTimeout(function(){ +// sponsorBlockElement.innerHTML = ` +// +// `; +// }, 3000); +// } else { +// sponsorBlockElement.innerHTML = ` +// Invalid Timestamps! +// `; +// setTimeout(function(){ +// sponsorBlockElement.innerHTML = ` +// +// `; +// }, 3000); +// } +// sponsorBlockTimestamps = []; +// } else if (sponsorBlockTimestamps[0]) { +// sponsorBlockTimestamps.push(currentTime); +// sponsorBlockElement.innerHTML = ` +//

${sponsorBlockTimestamps[0].toFixed(1)} s |

+//

${sponsorBlockTimestamps[1].toFixed(1)} s |

+// +// `; +// } else { +// sponsorBlockTimestamps.push(currentTime); +// sponsorBlockElement.innerHTML = ` +// +// `; +// } +// } + // Add video tag to video page when passed a video id, function loaded on page load `video.html (115-117)` function insertVideoTag(videoData, videoProgress) { var videoTag = createVideoTag(videoData, videoProgress); @@ -488,6 +560,32 @@ function onVideoProgress() { var videoId = getVideoPlayerVideoId(); var currentTime = getVideoPlayerCurrentTime(); var duration = getVideoPlayerDuration(); + var videoElement = getVideoPlayer(); + // var sponsorBlockElement = document.getElementById("sponsorblock"); + var notificationsElement = document.getElementById("notifications"); + if (sponsorBlock) { + for(let i in sponsorBlock) { + if(sponsorBlock[i].segment[0] <= currentTime + 0.3 && sponsorBlock[i].segment[0] >= currentTime) { + videoElement.currentTime = sponsorBlock[i].segment[1]; + notificationsElement.innerHTML += `

Skipped sponsor segment from ${formatTime(sponsorBlock[i].segment[0])} to ${formatTime(sponsorBlock[i].segment[1])}.

`; + } + // if(currentTime >= sponsorBlock[i].segment[1] && currentTime <= sponsorBlock[i].segment[1] + 0.2) { + // if(sponsorBlock[i].locked != 1) { + // sponsorBlockElement.innerHTML += ` + //
+ // + // + //
`; + // } + // } + if(currentTime > sponsorBlock[i].segment[1] + 10) { + var notificationsElementUUID = document.getElementById("notification-" + sponsorBlock[i].UUID); + if(notificationsElementUUID) { + notificationsElementUUID.outerHTML = ''; + } + } + } + } if ((currentTime % 10).toFixed(1) <= 0.2) { // Check progress every 10 seconds or else progress is checked a few times a second postVideoProgress(videoId, currentTime); if (!getVideoPlayerWatchStatus()) { // Check if video is already marked as watched @@ -542,6 +640,32 @@ function formatNumbers(number) { return numberFormatted; } +// Formats times in seconds for frontend +function formatTime(time) { + var hoursUnformatted = time / 3600; + var minutesUnformatted = (time % 3600) / 60; + var secondsUnformatted = time % 60; + + var hoursFormatted = Math.trunc(hoursUnformatted); + if(minutesUnformatted < 10 && hoursFormatted > 0) { + var minutesFormatted = "0" + Math.trunc(minutesUnformatted); + } else { + var minutesFormatted = Math.trunc(minutesUnformatted); + } + if(secondsUnformatted < 10) { + var secondsFormatted = "0" + Math.trunc(secondsUnformatted); + } else { + var secondsFormatted = Math.trunc(secondsUnformatted); + } + + var timeUnformatted = ''; + if(hoursFormatted > 0) { + timeUnformatted = hoursFormatted + ":" + } + var timeFormatted = timeUnformatted.concat(minutesFormatted, ":", secondsFormatted); + return timeFormatted; +} + // Gets video data when passed video ID function getVideoData(videoId) { var apiEndpoint = "/api/video/" + videoId + "/"; @@ -599,6 +723,30 @@ function postVideoProgress(videoId, videoProgress) { } } +// Send sponsor segment when given video id and and timestamps +function postSponsorSegment(videoId, startTime, endTime) { + var apiEndpoint = "/api/video/" + videoId + "/sponsor/"; + var data = { + "segment": { + "startTime": startTime, + "endTime": endTime + } + }; + apiRequest(apiEndpoint, "POST", data); +} + +// Send sponsor segment when given video id and and timestamps +function postSponsorSegmentVote(videoId, uuid, vote) { + var apiEndpoint = "/api/video/" + videoId + "/sponsor/"; + var data = { + "vote": { + "uuid": uuid, + "yourVote": vote + } + }; + apiRequest(apiEndpoint, "POST", data); +} + // Makes api requests when passed an endpoint and method ("GET", "POST", "DELETE") function apiRequest(apiEndpoint, method, data) { const xhttp = new XMLHttpRequest();