mirror of
https://github.com/tubearchivist/tubearchivist-frontend.git
synced 2024-11-22 11:50:14 +00:00
parse api views, #build
Changed: - return frontend relevant paths for api views - add video api list view
This commit is contained in:
commit
1cd285aec4
@ -1,6 +1,6 @@
|
|||||||
# build the tube archivist image from default python slim image
|
# build the tube archivist image from default python slim image
|
||||||
|
|
||||||
FROM python:3.10.3-slim-bullseye
|
FROM python:3.10.4-slim-bullseye
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG INSTALL_DEBUG
|
ARG INSTALL_DEBUG
|
||||||
|
|
||||||
|
@ -143,14 +143,13 @@ bestvideo[VCODEC=avc1]+bestaudio[ACODEC=mp4a]/mp4
|
|||||||
5. Enjoy your archived collection!
|
5. Enjoy your archived collection!
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
We have come far, nonetheless we are not short of ideas on how to improve and extend this project:
|
We have come far, nonetheless we are not short of ideas on how to improve and extend this project, in no particular order:
|
||||||
|
|
||||||
- [ ] User roles
|
- [ ] User roles
|
||||||
- [ ] Podcast mode to serve channel as mp3
|
- [ ] Podcast mode to serve channel as mp3
|
||||||
- [ ] Implement [PyFilesystem](https://github.com/PyFilesystem/pyfilesystem2) for flexible video storage
|
- [ ] Implement [PyFilesystem](https://github.com/PyFilesystem/pyfilesystem2) for flexible video storage
|
||||||
- [ ] Implement [Apprise](https://github.com/caronc/apprise) for notifications
|
- [ ] Implement [Apprise](https://github.com/caronc/apprise) for notifications
|
||||||
- [ ] Add [SponsorBlock](https://sponsor.ajay.app/) integration
|
- [ ] Add [SponsorBlock](https://sponsor.ajay.app/) integration
|
||||||
- [ ] Implement per channel settings
|
|
||||||
- [ ] User created playlists
|
- [ ] User created playlists
|
||||||
- [ ] Auto play or play next link
|
- [ ] Auto play or play next link
|
||||||
- [ ] SSO / LDAP support
|
- [ ] SSO / LDAP support
|
||||||
@ -159,8 +158,11 @@ We have come far, nonetheless we are not short of ideas on how to improve and ex
|
|||||||
- [ ] Show total video downloaded vs total videos available in channel
|
- [ ] Show total video downloaded vs total videos available in channel
|
||||||
- [ ] Make items in grid row configurable
|
- [ ] Make items in grid row configurable
|
||||||
- [ ] Add statistics of index
|
- [ ] Add statistics of index
|
||||||
|
- [ ] Auto ignore videos by keyword
|
||||||
|
- [ ] Custom searchable notes to videos, channels, playlists
|
||||||
|
|
||||||
Implemented:
|
Implemented:
|
||||||
|
- [X] Implement per channel settings [2022-03-26]
|
||||||
- [X] Subtitle download & indexing [2022-02-13]
|
- [X] Subtitle download & indexing [2022-02-13]
|
||||||
- [X] Fancy advanced unified search interface [2022-01-08]
|
- [X] Fancy advanced unified search interface [2022-01-08]
|
||||||
- [X] Auto rescan and auto download on a schedule [2021-12-17]
|
- [X] Auto rescan and auto download on a schedule [2021-12-17]
|
||||||
|
@ -38,6 +38,9 @@ after successful login returns
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Video List View
|
||||||
|
/api/video/
|
||||||
|
|
||||||
## Video Item View
|
## Video Item View
|
||||||
/api/video/\<video_id>/
|
/api/video/\<video_id>/
|
||||||
|
|
||||||
|
0
tubearchivist/api/src/__init__.py
Normal file
0
tubearchivist/api/src/__init__.py
Normal file
82
tubearchivist/api/src/search_processor.py
Normal file
82
tubearchivist/api/src/search_processor.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
"""
|
||||||
|
Functionality:
|
||||||
|
- processing search results for frontend
|
||||||
|
- this is duplicated code from home.src.frontend.searching.SearchHandler
|
||||||
|
"""
|
||||||
|
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
from home.src.download.thumbnails import ThumbManager
|
||||||
|
from home.src.ta.helper import date_praser
|
||||||
|
|
||||||
|
|
||||||
|
class SearchProcess:
|
||||||
|
"""process search results"""
|
||||||
|
|
||||||
|
def __init__(self, response):
|
||||||
|
self.response = response
|
||||||
|
self.processed = False
|
||||||
|
|
||||||
|
def process(self):
|
||||||
|
"""dedect type and process"""
|
||||||
|
if "_source" in self.response.keys():
|
||||||
|
# single
|
||||||
|
self.processed = self._process_result(self.response)
|
||||||
|
|
||||||
|
elif "hits" in self.response.keys():
|
||||||
|
# multiple
|
||||||
|
self.processed = []
|
||||||
|
all_sources = self.response["hits"]["hits"]
|
||||||
|
for result in all_sources:
|
||||||
|
self.processed.append(self._process_result(result))
|
||||||
|
|
||||||
|
return self.processed
|
||||||
|
|
||||||
|
def _process_result(self, result):
|
||||||
|
"""dedect which type of data to process"""
|
||||||
|
index = result["_index"]
|
||||||
|
processed = False
|
||||||
|
if index == "ta_video":
|
||||||
|
processed = self._process_video(result["_source"])
|
||||||
|
if index == "ta_channel":
|
||||||
|
processed = self._process_channel(result["_source"])
|
||||||
|
|
||||||
|
return processed
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _process_channel(channel_dict):
|
||||||
|
"""run on single channel"""
|
||||||
|
channel_id = channel_dict["channel_id"]
|
||||||
|
art_base = f"/cache/channels/{channel_id}"
|
||||||
|
date_str = date_praser(channel_dict["channel_last_refresh"])
|
||||||
|
channel_dict.update(
|
||||||
|
{
|
||||||
|
"channel_last_refresh": date_str,
|
||||||
|
"channel_banner_url": f"{art_base}_banner.jpg",
|
||||||
|
"channel_thumb_url": f"{art_base}_thumb.jpg",
|
||||||
|
"channel_tvart_url": False,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return dict(sorted(channel_dict.items()))
|
||||||
|
|
||||||
|
def _process_video(self, video_dict):
|
||||||
|
"""run on single video dict"""
|
||||||
|
video_id = video_dict["youtube_id"]
|
||||||
|
media_url = urllib.parse.quote(video_dict["media_url"])
|
||||||
|
vid_last_refresh = date_praser(video_dict["vid_last_refresh"])
|
||||||
|
published = date_praser(video_dict["published"])
|
||||||
|
vid_thumb_url = ThumbManager().vid_thumb_path(video_id)
|
||||||
|
channel = self._process_channel(video_dict["channel"])
|
||||||
|
|
||||||
|
video_dict.update(
|
||||||
|
{
|
||||||
|
"channel": channel,
|
||||||
|
"media_url": media_url,
|
||||||
|
"vid_last_refresh": vid_last_refresh,
|
||||||
|
"published": published,
|
||||||
|
"vid_thumb_url": vid_thumb_url,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return dict(sorted(video_dict.items()))
|
@ -7,6 +7,7 @@ from api.views import (
|
|||||||
DownloadApiView,
|
DownloadApiView,
|
||||||
LoginApiView,
|
LoginApiView,
|
||||||
PlaylistApiView,
|
PlaylistApiView,
|
||||||
|
VideoApiListView,
|
||||||
VideoApiView,
|
VideoApiView,
|
||||||
VideoProgressView,
|
VideoProgressView,
|
||||||
)
|
)
|
||||||
@ -14,6 +15,11 @@ from django.urls import path
|
|||||||
|
|
||||||
urlpatterns = [
|
urlpatterns = [
|
||||||
path("login/", LoginApiView.as_view(), name="api-login"),
|
path("login/", LoginApiView.as_view(), name="api-login"),
|
||||||
|
path(
|
||||||
|
"video/",
|
||||||
|
VideoApiListView.as_view(),
|
||||||
|
name="api-video-list",
|
||||||
|
),
|
||||||
path(
|
path(
|
||||||
"video/<slug:video_id>/",
|
"video/<slug:video_id>/",
|
||||||
VideoApiView.as_view(),
|
VideoApiView.as_view(),
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
"""all API views"""
|
"""all API views"""
|
||||||
|
|
||||||
|
from api.src.search_processor import SearchProcess
|
||||||
from home.src.download.thumbnails import ThumbManager
|
from home.src.download.thumbnails import ThumbManager
|
||||||
from home.src.es.connect import ElasticWrap
|
from home.src.es.connect import ElasticWrap
|
||||||
from home.src.ta.config import AppConfig
|
from home.src.ta.config import AppConfig
|
||||||
@ -36,7 +37,7 @@ class ApiBaseView(APIView):
|
|||||||
print(path)
|
print(path)
|
||||||
response, status_code = ElasticWrap(path).get()
|
response, status_code = ElasticWrap(path).get()
|
||||||
try:
|
try:
|
||||||
self.response["data"] = response["_source"]
|
self.response["data"] = SearchProcess(response).process()
|
||||||
except KeyError:
|
except KeyError:
|
||||||
print(f"item not found: {document_id}")
|
print(f"item not found: {document_id}")
|
||||||
self.response["data"] = False
|
self.response["data"] = False
|
||||||
@ -69,8 +70,7 @@ class ApiBaseView(APIView):
|
|||||||
"""get a list of results"""
|
"""get a list of results"""
|
||||||
print(self.search_base)
|
print(self.search_base)
|
||||||
response, status_code = ElasticWrap(self.search_base).get(data=data)
|
response, status_code = ElasticWrap(self.search_base).get(data=data)
|
||||||
all_hits = response["hits"]["hits"]
|
self.response["data"] = SearchProcess(response).process()
|
||||||
self.response["data"] = [i["_source"] for i in all_hits]
|
|
||||||
self.status_code = status_code
|
self.status_code = status_code
|
||||||
|
|
||||||
|
|
||||||
@ -89,6 +89,23 @@ class VideoApiView(ApiBaseView):
|
|||||||
return Response(self.response, status=self.status_code)
|
return Response(self.response, status=self.status_code)
|
||||||
|
|
||||||
|
|
||||||
|
class VideoApiListView(ApiBaseView):
|
||||||
|
"""resolves to /api/video/
|
||||||
|
GET: returns list of videos
|
||||||
|
"""
|
||||||
|
|
||||||
|
search_base = "ta_video/_search/"
|
||||||
|
|
||||||
|
def get(self, request):
|
||||||
|
# pylint: disable=unused-argument
|
||||||
|
"""get request"""
|
||||||
|
data = {"query": {"match_all": {}}}
|
||||||
|
self.get_document_list(data)
|
||||||
|
self.get_paginate()
|
||||||
|
|
||||||
|
return Response(self.response)
|
||||||
|
|
||||||
|
|
||||||
class VideoProgressView(ApiBaseView):
|
class VideoProgressView(ApiBaseView):
|
||||||
"""resolves to /api/video/<video_id>/
|
"""resolves to /api/video/<video_id>/
|
||||||
handle progress status for video
|
handle progress status for video
|
||||||
|
@ -8,6 +8,7 @@ import re
|
|||||||
import string
|
import string
|
||||||
import subprocess
|
import subprocess
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
from datetime import datetime
|
||||||
from urllib.parse import parse_qs, urlparse
|
from urllib.parse import parse_qs, urlparse
|
||||||
|
|
||||||
import yt_dlp
|
import yt_dlp
|
||||||
@ -88,6 +89,16 @@ def requests_headers():
|
|||||||
return {"User-Agent": template}
|
return {"User-Agent": template}
|
||||||
|
|
||||||
|
|
||||||
|
def date_praser(timestamp):
|
||||||
|
"""return formatted date string"""
|
||||||
|
if isinstance(timestamp, int):
|
||||||
|
date_obj = datetime.fromtimestamp(timestamp)
|
||||||
|
elif isinstance(timestamp, str):
|
||||||
|
date_obj = datetime.strptime(timestamp, "%Y-%m-%d")
|
||||||
|
|
||||||
|
return datetime.strftime(date_obj, "%d %b, %Y")
|
||||||
|
|
||||||
|
|
||||||
class UrlListParser:
|
class UrlListParser:
|
||||||
"""take a multi line string and detect valid youtube ids"""
|
"""take a multi line string and detect valid youtube ids"""
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user