initial commit

This commit is contained in:
ilyashramko
2023-11-09 23:12:10 +03:00
commit 027475e5c3
13 changed files with 1085 additions and 0 deletions

View File

@@ -0,0 +1,25 @@
{
"id": "com.synology.TMDBExample",
"description": "",
"version": "1.0",
"site": "http://www.themoviedb.org/",
"entry_file": "loader.sh",
"type": ["movie", "tvshow"],
"language": ["enu"],
"test_example": {
"movie": {
"title": "Harry Potter",
"original_available": "2001-11-16"
},
"tvshow": {
"title": "Game of Thrones",
"original_available": "2011-04-17"
},
"tvshow_episode": {
"title": "Game of Thrones",
"original_available": "2011-04-17",
"season": 1,
"episode": 1
}
}
}

View File

@@ -0,0 +1,54 @@
## The Movie Database API
### API Docs
https://developers.themoviedb.org/3
### Search
https://developers.themoviedb.org/3/search/search-movies
- required
- api_key
- query
- optional
- language, default: en-US
- year
year information can help distinguishing movie with the same name in different year
e.g. Total Recall, which has 1990 and 2012 version
https://api.themoviedb.org/3/search/movie?api_key=${APIKEY}&query=total%20recall
https://api.themoviedb.org/3/search/movie?api_key=${APIKEY}&query=total%20recall&language=zh-TW
https://api.themoviedb.org/3/search/movie?api_key=${APIKEY}&query=total%20recall&year=1990
### Movie
https://developers.themoviedb.org/3/movies/get-movie-details
- optional
- append_to_response, get credits (actor, director, writer) and releases (certificate) in one request
https://api.themoviedb.org/3/movie/861?api_key=${APIKEY}&append_to_response=credits,releases&language=zh-TW
### Translation
https://developers.themoviedb.org/3/movies/get-movie-translations
https://api.themoviedb.org/3/movie/861/translations?api_key=${APIKEY}
### Image
https://developers.themoviedb.org/3/movies/get-movie-images
Movie detail has poster_path and backdrop_path.
Image API can get more images and specify language using include_image_language parameter.
Using `append_to_response` is also possible to append images result in movie detail request.
### Language
ISO-639-1 and ISO 3166-1
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2

View File

@@ -0,0 +1,102 @@
ERROR_PLUGIN_QUERY_FAIL = 1003
ERROR_PLUGIN_PARSE_RESULT_FAIL = 1004
PLUGINID = 'com.synology.TMDBExample'
THEMOVIEDB_URL = 'https://api.themoviedb.org/3/'
BANNER_URL = 'https://image.tmdb.org/t/p/w500'
BACKDROP_URL = 'https://image.tmdb.org/t/p/original'
DEFAULT_EXPIRED_TIME = 86400
DEFAULT_LONG_EXPIRED_TIME = 86400 * 30
#TODO: you should assign your own APIKEY here
APIKEY = "bcebb6ce4f8ea47f712bfe5dca058619"
MOVIE_DATA_TEMPLATE = {
'title': '',
'tagline': '',
'original_available': '',
'original_title': '',
'summary': '',
'certificate': '',
'genre': [],
'actor': [],
'director': [],
'writer': [],
'extra': {}
}
"""
movie extra template
'extra': {
PLUGINID: {
'poster': [],
'backdrop': [],
'reference': {
'themoviedb': None,
'imdb': None
},
'rating': {
'themoviedb': None
},
'collection_id': {
'themoviedb': -1
}
}
}
"""
TVSHOW_DATA_TEMPLATE = {
'title': '',
'original_available': '',
'original_title': '',
'summary': '',
'extra': {}
}
"""
tvshow extra template
'extra': {
PLUGINID: {
'poster': [],
'backdrop': [],
}
}
"""
TVSHOW_EPISODE_DATA_TEMPLATE = {
'title': '',
'tagline': '',
'original_available': '',
'summary': '',
'certificate': '',
'genre': [],
'actor': [],
'director': [],
'writer': [],
'season': -1,
'episode': -1,
'extra': {}
}
"""
tvshow_episode extra template
'extra': {
PLUGINID: {
'tvshow': TVSHOW_DATA_TEMPLATE,
'poster': [],
'reference': {
'themoviedb_tv': None,
'imdb': None
},
'rating': {
'themoviedb_tv': None
}
}
}
"""
MOVIE_SIMILAR_DATA_TEMPLATE = {
'title': '',
'id': -1
}

View File

@@ -0,0 +1,23 @@
#!/bin/sh
BASEDIR=$(dirname $0)
ARGV=""
escape()
{
local ARG=$(echo -E $@ | sed "s/'/'\\\\''/g")
echo \'$ARG\'
}
i=1
while [ $i -le $# ]; do
eval ARG=\$\(escape \${$i}\)
ARGV="$ARGV $ARG"
i=`expr $i + 1`
done
eval "/usr/bin/env python3 "\
"$BASEDIR/search.py $ARGV"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"page":1,"results":[{"adult":false,"backdrop_path":"/lxD5ak7BOoinRNehOCA85CQ8ubr.jpg","genre_ids":[16,12,10751,35],"id":862,"original_language":"en","original_title":"Toy Story","overview":"Led by Woody, Andy's toys live happily in his room until Andy's birthday brings Buzz Lightyear onto the scene. Afraid of losing his place in Andy's heart, Woody plots against Buzz. But when circumstances separate Buzz and Woody from their owner, the duo eventually learns to put aside their differences.","popularity":100.954,"poster_path":"/uXDfjJbdP4ijW5hWSBrPrlKpxab.jpg","release_date":"1995-10-30","title":"Toy Story","video":false,"vote_average":7.97,"vote_count":17277},{"adult":false,"backdrop_path":"/k7vO3lOOkxZ88Bieu5UQLEBKZOl.jpg","genre_ids":[99],"id":711704,"original_language":"en","original_title":"Making 'Toy Story'","overview":"Documentary of the making of the groundbreaking Disney/Pixar animated hit movie.","popularity":5.067,"poster_path":"/s2vPOYtuOY3DMYTduRx0cOzDcWm.jpg","release_date":"1995-12-02","title":"Making 'Toy Story'","video":false,"vote_average":7.0,"vote_count":5}],"total_pages":1,"total_results":2}

View File

@@ -0,0 +1,400 @@
import argparse
import html
import os
import json
import copy
import re
import util_themoviedb
import searchinc
import constant
def _plugin_run():
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True, help='json string')
parser.add_argument("--lang", type=str, required=True, default=None, help='enu|cht|...')
parser.add_argument("--type", type=str, required=True, default=None, help='movie|tvshow|...')
parser.add_argument("--limit", type=int, default=1, help='result count')
parser.add_argument("--allowguess", type=bool, default=True)
# unknownPrm is useless, just for prevent error when unknow param inside
args, unknownPrm = parser.parse_known_args()
argv_input = json.loads(args.input)
argv_lang = args.lang
argv_type = args.type
argv_limit = args.limit
argv_allowguess = args.allowguess
cookie_path = searchinc.create_cookie_file()
result = None
success = True
error_code = 0
try:
if argv_type == 'movie_similar':
result = _similar(argv_input, argv_lang, argv_type, argv_limit)
else:
result = _process(argv_input, argv_lang, argv_type, argv_limit, argv_allowguess)
except SystemExit as query_e:
error_code = constant.ERROR_PLUGIN_QUERY_FAIL
success = False
except Exception as e:
error_code = constant.ERROR_PLUGIN_PARSE_RESULT_FAIL
success = False
searchinc.delete_cookie_file(cookie_path)
_process_output(success, error_code, result)
def _process(input_obj, lang, media_type, limit, allowguess):
title = input_obj['title']
year = _get_year(input_obj)
season = input_obj['season'] if 'season' in input_obj else 0
episode = input_obj['episode'] if 'episode' in input_obj else None
# search
query_data = []
titles = searchinc.get_guessing_names(title, allowguess)
for oneTitle in titles:
if not oneTitle:
continue
query_data = util_themoviedb.search_media(oneTitle, lang, limit, media_type, year)
if 0 < len(query_data):
break
return _get_metadata(query_data, lang, media_type, season, episode, limit)
def _similar(input_obj, lang, media_type, limit):
item_id = int(input_obj['tmdb_id']) if 'tmdb_id' in input_obj else -1
if (0 > item_id):
return []
return _get_similar_movies([{'id': item_id, 'collection_id': -1, 'lang': lang}], lang, limit)
def _get_year(input_obj):
year = 0
if 'original_available' in input_obj:
year = searchinc.parse_year(input_obj['original_available'])
if 'extra' in input_obj:
extraItem = input_obj['extra']
if 'tvshow' in extraItem and 'original_available' in extraItem['tvshow']:
year = searchinc.parse_year(extraItem['tvshow']['original_available'])
return year
def _get_metadata(query_data, lang, media_type, season, episode, limit):
result = []
for item in query_data:
if item['lang'] != lang:
continue
media_data = None
if media_type == 'movie':
media_data = util_themoviedb.get_movie_detail_data(item['id'], item['lang'], constant.DEFAULT_EXPIRED_TIME)
elif media_type == 'tvshow' or media_type == 'tvshow_episode':
media_data = util_themoviedb.get_tv_detail_data(item['id'], item['lang'])
else:
return []
if not media_data:
continue
if media_type == 'movie':
result.append(_parse_movie_info(media_data))
elif media_type == 'tvshow':
result.append(_parse_tvshow_info(media_data))
elif media_type == 'tvshow_episode':
episode_data = util_themoviedb.get_tv_episode_detail_data(media_data['id'], lang, season, episode)
result.extend(_parse_episodes_info(media_data, episode_data, season, episode))
if limit <= len(result):
result = result[:limit]
break
return result
def _get_similar_movies(query_data, lang, limit):
result = []
ids = []
for item in query_data:
if item['lang'] != lang:
continue
if 'collection_id' in item:
if 0 >= item['collection_id']:
item['collection_id'] = _get_collection_id(item['id'], item['lang'])
if 0 < item['collection_id']:
collection_response = util_themoviedb.get_movie_collection_data(item['collection_id'], item['lang'])
if collection_response:
result, ids = _parse_similar_data_to_result_and_ids(
collection_response['parts'], limit, result, ids)
if len(result) >= limit:
break
page = 1
while True:
similar_response = util_themoviedb.get_movie_similar_data(item['id'], lang, page)
if not similar_response:
break
result, ids = _parse_similar_data_to_result_and_ids(similar_response['results'], limit, result, ids)
if len(result) >= limit:
break
if similar_response['page'] >= similar_response['total_pages']:
break
page = similar_response['page'] + 1
if len(result) >= limit:
break
return result
def _get_collection_id(item_id, lang):
movie_data = util_themoviedb.get_movie_detail_data(item_id, lang, constant.DEFAULT_LONG_EXPIRED_TIME)
if not movie_data:
return -1
if movie_data.get('belongs_to_collection') and 'id' in movie_data['belongs_to_collection']:
return movie_data['belongs_to_collection']['id']
return -1
def _parse_similar_data_to_result_and_ids(movies, limit, result, ids):
# use for parsing each item in similar_response['results'] or collection_response['parts']
for movie in movies:
movie_id = movie['id']
movie_title = movie['title']
if movie_id in ids:
continue
data = copy.deepcopy(constant.MOVIE_SIMILAR_DATA_TEMPLATE)
data['title'] = movie_title
data['id'] = movie_id
result.append(data)
ids.append(movie_id)
if len(result) >= limit:
break
return result, ids
def _parse_movie_info(movie_data):
data = copy.deepcopy(constant.MOVIE_DATA_TEMPLATE)
data['title'] = movie_data['title']
data['original_available'] = movie_data['release_date']
data['tagline'] = movie_data['tagline']
data['summary'] = movie_data['overview']
data['certificate'] = _parse_movie_certificate_info(movie_data)
data['genre'] = _parse_genre(movie_data)
actor, director, writer = _get_cast_info(movie_data['credits'])
data['actor'] = actor
data['director'] = director
data['writer'] = writer
data = _set_data_value(data, ['extra', constant.PLUGINID, 'reference', 'themoviedb'], movie_data['id'])
data = _set_data_value(data, ['extra', constant.PLUGINID, 'reference', 'imdb'], movie_data['imdb_id'])
if movie_data['vote_average']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'rating', 'themoviedb'], movie_data['vote_average'])
if movie_data['poster_path']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'poster'], [
constant.BANNER_URL + movie_data['poster_path']])
if movie_data['backdrop_path']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'backdrop'], [
constant.BACKDROP_URL + movie_data['backdrop_path']])
if movie_data['belongs_to_collection'] and ('id' in movie_data['belongs_to_collection']):
data = _set_data_value(data, ['extra', constant.PLUGINID, 'collection_id',
'themoviedb'], movie_data['belongs_to_collection']['id'])
return data
def _parse_tvshow_info(tv_data):
data = copy.deepcopy(constant.TVSHOW_DATA_TEMPLATE)
data['title'] = tv_data['name']
data['original_available'] = tv_data['first_air_date']
data['summary'] = tv_data['overview']
if tv_data['poster_path']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'poster'], [
constant.BANNER_URL + tv_data['poster_path']])
if tv_data['backdrop_path']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'backdrop'], [
constant.BACKDROP_URL + tv_data['backdrop_path']])
return data
def _parse_episodes_info(tv_data, episode_data, season, episode):
parse_info_result = []
if episode != None:
parse_info_result.append(_parse_episode_info(tv_data, episode_data, season, episode))
else:
episodes = episode_data['episodes']
for episode_object in episodes:
parse_info_result.append(_parse_episode_info(tv_data, episode_object, season, episode))
return parse_info_result
def _parse_episode_info(tv_data, episode_data, season, episode):
data = copy.deepcopy(constant.TVSHOW_EPISODE_DATA_TEMPLATE)
data['title'] = tv_data['name']
data['season'] = season
data['episode'] = episode_data['episode_number'] if episode_data != None and 'episode_number' in episode_data else episode
tvshow_data = _parse_tvshow_info(tv_data)
data = _set_data_value(data, ['extra', constant.PLUGINID, 'tvshow'], tvshow_data)
if not episode_data:
return data
data['tagline'] = episode_data['name']
data['original_available'] = episode_data['air_date']
data['summary'] = episode_data['overview']
data['certificate'] = _parse_tv_certificate_info(tv_data)
data['genre'] = _parse_genre(tv_data)
if 'credits' in episode_data:
actor, director, writer = _get_cast_info(episode_data['credits'])
else:
actor, director, writer = _get_cast_info(episode_data)
data['actor'] = actor
data['director'] = director
data['writer'] = writer
if episode_data['still_path']:
data = _set_data_value(data, ['extra', constant.PLUGINID, 'poster'], [
constant.BANNER_URL + episode_data['still_path']])
data = _set_data_value(data, ['extra', constant.PLUGINID, 'reference', 'themoviedb_tv'], tv_data['id'])
data = _set_data_value(data, ['extra', constant.PLUGINID, 'reference', 'imdb'], tv_data['external_ids']['imdb_id'])
data = _set_data_value(data, ['extra', constant.PLUGINID, 'rating', 'themoviedb_tv'], tv_data['vote_average'])
return data
def _set_data_value(data, key_list, value):
if not value:
return data
now_data = data
for attr in key_list[:-1]:
if attr not in now_data:
now_data[attr] = {}
now_data = now_data[attr]
now_data[key_list[-1]] = value
return data
def _get_cast_info(cast_data):
actor = []
director = []
writer = []
if 'cast' in cast_data:
for item in cast_data['cast']:
if item['name'] not in actor:
actor.append(item['name'])
# only for tvshow episode
if 'guest_stars' in cast_data:
for item in cast_data['guest_stars']:
if item['name'] not in actor:
actor.append(item['name'])
if 'crew' in cast_data:
for item in cast_data['crew']:
if (item['department'] == 'Directing') and (item['name'] not in director):
director.append(item['name'])
if (item['department'] == 'Writing') and (item['name'] not in writer):
writer.append(item['name'])
return actor, director, writer
def _parse_movie_certificate_info(movie_data):
release_data = movie_data['releases']
certificate = {}
for item in release_data['countries']:
if not item['certification']:
continue
if item['iso_3166_1'].lower() == 'us':
return item['certification']
certificate[item['iso_3166_1']] = item['certification']
if len(certificate) == 0:
return None
return list(certificate.values())[0]
def _parse_tv_certificate_info(tv_data):
certificate = {}
for item in tv_data['content_ratings']['results']:
if not item['rating']:
continue
if item['iso_3166_1'].lower() == 'us':
return item['rating']
certificate[item['iso_3166_1']] = item['rating']
if len(certificate) == 0:
return None
return list(certificate.values())[0]
def _parse_genre(media_data):
genre = []
for item in media_data['genres']:
if item['name'] not in genre:
genre.append(item['name'])
return genre
def _process_output(success, error_code, datas):
result_obj = {}
if success:
result_obj = {'success': True, 'result': datas}
else:
result_obj = {'success': False, 'error_code': error_code}
json_string = json.dumps(result_obj, ensure_ascii=False, separators=(',', ':'))
json_string = html.unescape(json_string)
print(json_string)
if __name__ == "__main__":
_plugin_run()

View File

@@ -0,0 +1,221 @@
import os
import json
import shlex
import random
import tempfile
import re
import pickle
import urllib
import http
import http.cookiejar
import sys
PKG_INSTALL_DIR = os.path.dirname(os.path.realpath(__file__))
def get_plugin_data_directory(pluginId):
_remove_plugin_data()
pluginDirectory = PKG_INSTALL_DIR + '/plugin_data'
if not os.path.exists(pluginDirectory):
oldmask = os.umask(0)
os.makedirs(pluginDirectory, 0o755)
os.umask(oldmask)
return pluginDirectory
def load_local_cache(cache_path):
try:
with open(cache_path, 'r') as f:
jsonResult = json.loads(f.read())
return jsonResult
except:
return None
def http_get_download(url, filepath):
result = None
timeouts = 30
header = {
r'user-agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_1; de-de) AppleWebKit/527+ (KHTML, like Gecko) Version/3.1.1 Safari/525.20',
}
cookie = http.cookiejar.LWPCookieJar()
use_cookie = False
if 'cookie_path' in globals():
global cookie_path
if os.path.exists(cookie_path):
use_cookie = True
try:
if use_cookie:
cookie.load(cookie_path, ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request(url=url, headers=header, method='GET')
response = opener.open(request, timeout=timeouts)
result = response.read().decode('utf-8')
if use_cookie:
cookie.save(filename=cookie_path, ignore_discard=True, ignore_expires=True)
except urllib.error.HTTPError as http_e:
if http_e.code == 404:
response_obj = json.loads(http_e.read().decode())
if response_obj.get('status_code') == 34:
# there's a situation that tvshow can find info,
# but episode can't find info at certain episodes
# so we still need process goes on, we return false
return False
sys.exit()
except Exception:
# unexpected error
sys.exit()
if(not result):
return False
with open(filepath, 'w') as f:
f.write(result)
return True
def parse_year(date_string):
# input should be '2008' or 2008 or '2008-01-03'
if type(date_string) == int:
return date_string
try:
year = (int)(date_string.split('-', 1)[0])
except:
year = 0
return year
def get_guessing_names(title, allowguess):
if not allowguess:
return [title]
title_list = [title, _pure_lang_text(title, False)]
engTitle = _pure_lang_text(title, True)
if not engTitle:
engTitle = title
effective_word_count = _get_effective_word_count(engTitle)
if 2 <= effective_word_count:
title_list += [engTitle]
if 3 <= effective_word_count:
right_cut = _cut_string(engTitle, 1, True)
left_cut = _cut_string(engTitle, 1, False)
title_list += [right_cut, left_cut]
if 4 <= effective_word_count:
two_side_cut = _cut_string(_cut_string(engTitle, 1, False), 1, True)
right_cut = _cut_string(engTitle, 2, True)
left_cut = _cut_string(engTitle, 2, False)
title_list += [two_side_cut, right_cut, left_cut]
if 6 <= effective_word_count:
two_side_cut = _cut_string(_cut_string(engTitle, 2, False), 2, True)
title_list += [two_side_cut]
return title_list
def create_cookie_file():
tmpfile = tempfile.NamedTemporaryFile('w+t', prefix='plugin_cookie_', dir='/tmp', delete=False)
path = tmpfile.name
cookie = http.cookiejar.LWPCookieJar()
cookie.save(filename=path, ignore_discard=True, ignore_expires=True)
global cookie_path
cookie_path = path
return path
def delete_cookie_file(cookie_file):
if os.path.exists(cookie_file):
os.remove(cookie_file)
def _remove_plugin_data():
randval = random.randrange(0, 1000)
if randval != 0:
return
path = PKG_INSTALL_DIR + '/plugin_data/'
if not os.path.exists(path):
return
cmd = '/usr/bin/find ' + shlex.quote(path) + ' -mtime +1 -delete'
os.system(cmd)
def _pure_lang_text(text, only_english):
all_num = True
token = []
data = [x for x in text.split(' ') if x]
for term in data:
containCharResult = re.search('[a-z]', term, re.IGNORECASE)
containDigitResult = re.search('[0-9]', term, re.IGNORECASE)
if (containCharResult != None) and (containDigitResult != None):
# char and digit like 'hi123' would be ignore
continue
allDigitResult = re.search('^[0-9]+$', term, re.IGNORECASE)
if allDigitResult != None:
# pure digit is accept
token.append(term)
continue
allCharResult = re.search('^[a-z]+$', term, re.IGNORECASE)
if only_english and (allCharResult != None):
# pure english char
all_num = False
token.append(term)
continue
if (not only_english) and (allCharResult == None):
# not pure english char, like cht, jpn or sympol
all_num = False
token.append(term)
continue
if all_num:
return ''
return ' '.join(token)
def _get_effective_word_count(token):
filter = ['a', 'an', 'the', 'of', 'in', 'on', 'at', 'for', 'by']
if not isinstance(token, list):
token = [x for x in token.split(' ') if x]
count = 0
for term in token:
if term.lower() in filter:
continue
count += 1
return count
def _cut_string(text, cut_count, cut_from_right):
token = [x for x in text.split(' ') if x]
origWords = _get_effective_word_count(token)
newWords = origWords
while (len(token) > 1) and (cut_count > (origWords - newWords)):
if cut_from_right:
token.pop()
else:
token.pop(0)
newWords = _get_effective_word_count(token)
return ' '.join(token)

View File

@@ -0,0 +1,257 @@
import os
import urllib
import time
import json
import searchinc
import constant
def search_media(name, lang, limit, media_type, year):
page = 1
if media_type == 'movie':
search_func = _get_movie_search_data
elif media_type == 'tvshow' or media_type == 'tvshow_episode':
search_func = _get_tv_search_data
else:
return []
search_data = search_func(name, lang, year, page)
if not search_data.get('total_pages'):
return []
total_pages = search_data['total_pages']
total_result = parse_search_data(search_data, lang, limit, media_type, year)
while ((len(total_result) < limit) and page < total_pages):
page += 1
search_data = search_func(name, lang, year, page)
one_page_result = parse_search_data(search_data, lang, limit, media_type, year)
total_result.extend(one_page_result)
if (0 < limit) and (limit < len(total_result)):
total_result = total_result[0:limit]
return total_result
def parse_search_data(search_data, lang, limit, media_type, year):
if not search_data.get('results'):
return []
result = []
for item in search_data['results']:
data = {}
data['id'] = item['id']
if not _is_translation_available(data['id'], lang, media_type):
continue
data['lang'] = lang
if year and 'release_date' in item:
item_year = searchinc.parse_year(item['release_date'])
year_diff = abs(item_year - year)
if 2 <= year_diff and item_year:
continue
result.append(data)
if (0 < limit) and (limit <= len(result)):
break
return result
def _get_movie_search_data(name, lang, year, page):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
nameEncode = urllib.parse.quote_plus(name)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + '/movie/query/' + nameEncode + '_' + str(year) + '_' + convert_lang + '_' + str(page) + '.json'
# example: https://api.themoviedb.org/3/search/movie?api_key=xxxxx&query=harry%20potter&language=cht&year=0&page=1
url = constant.THEMOVIEDB_URL + 'search/movie?api_key=' + api_key + '&query=' + nameEncode + '&language=' + \
convert_lang + '&year=' + str(year) + '&page=' + str(page)
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def _get_movie_translation_data(item_id):
api_key = constant.APIKEY
cache_path = searchinc.get_plugin_data_directory(constant.PLUGINID) + '/movie/' + str(item_id) + '/translation.json'
# example: https://api.themoviedb.org/3/movie/671/translations?api_key=xxxxx
url = constant.THEMOVIEDB_URL + 'movie/' + str(item_id) + '/translations?api_key=' + api_key
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def get_movie_detail_data(item_id, lang, expired_time):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + '/movie/' + str(item_id) + '/' + convert_lang + '.json'
# example: https://api.themoviedb.org/3/movie/671?api_key=xxxxx&append_to_response=credits,releases&language=zh-tw
url = constant.THEMOVIEDB_URL + 'movie/' + str(item_id) + '?api_key=' + api_key + \
'&language=' + convert_lang + '&append_to_response=credits,releases'
return _get_data_from_cache_or_download(url, cache_path, expired_time)
def get_movie_similar_data(item_id, lang, page):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + "/movie/" + str(item_id) + "/" + convert_lang + "_" + str(page) + "_similar.json"
# example: https://api.themoviedb.org/3/movie/671/similar?api_key=xxxxx&language=zh-tw&page=1
url = constant.THEMOVIEDB_URL + "movie/" + str(item_id) + "/similar?api_key=" + api_key + \
'&language=' + convert_lang + '&page=' + str(page)
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_LONG_EXPIRED_TIME)
def get_movie_collection_data(item_id, lang):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + "/movie/" + str(item_id) + "/" + convert_lang + "_collection.json"
# example: https://api.themoviedb.org/3/collection/1241?api_key=xxxxx&language=zh-tw
url = constant.THEMOVIEDB_URL + "collection/" + str(item_id) + '?api_key=' + api_key + '&language=' + convert_lang
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_LONG_EXPIRED_TIME)
def _get_tv_search_data(name, lang, year, page):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
nameEncode = urllib.parse.quote_plus(name)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + '/tv/query/' + nameEncode + '_' + str(year) + '_' + convert_lang + '_' + str(page) + '.json'
# example: https://api.themoviedb.org/3/search/tv?api_key=xxxxx&query=superman&language=en&year=0&page=1
url = constant.THEMOVIEDB_URL + "search/tv?api_key=" + api_key + '&query=' + \
nameEncode + '&language=' + convert_lang + '&year=' + str(year) + '&page=' + str(page)
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def get_tv_detail_data(item_id, lang):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + "/tv/" + str(item_id) + "/" + convert_lang + ".json"
# example: https://api.themoviedb.org/3/tv/1403?api_key=xxxxx&append_to_response=credits,content_ratings,external_ids&language=en
url = constant.THEMOVIEDB_URL + "tv/" + str(item_id) + '?api_key=' + api_key + '&language=' + \
convert_lang + '&append_to_response=credits,content_ratings,external_ids'
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def get_tv_episode_detail_data(item_id, lang, season, episode):
api_key = constant.APIKEY
convert_lang = _convert_to_api_lang(lang)
episode_cache_pattern = '_e' + str(episode) if episode != None else ''
cache_path = searchinc.get_plugin_data_directory(
constant.PLUGINID) + "/tv/" + str(item_id) + "/" + convert_lang + '_s' + str(season) + episode_cache_pattern + ".json"
# example: https://api.themoviedb.org/3/tv/1403/season/1/episode/3?api_key=xxxxx&language=en&append_to_response=credits
episode_url_pattern = '/episode/' + str(episode) if episode != None else ''
url = constant.THEMOVIEDB_URL + "tv/" + \
str(item_id) + '/season/' + str(season) + episode_url_pattern + '?api_key=' + \
api_key + '&language=' + convert_lang + '&append_to_response=credits'
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def _get_tv_translation_data(item_id):
api_key = constant.APIKEY
cache_path = searchinc.get_plugin_data_directory(constant.PLUGINID) + "/tv/" + str(item_id) + "/translation.json"
# example: https://api.themoviedb.org/3/tv/1403/translations?api_key=xxxxx
url = constant.THEMOVIEDB_URL + "tv/" + str(item_id) + "/translations?api_key=" + api_key
return _get_data_from_cache_or_download(url, cache_path, constant.DEFAULT_EXPIRED_TIME)
def _get_data_from_cache_or_download(url, cache_path, expired_time):
result = None
if os.path.exists(cache_path):
last_modify_time = os.path.getmtime(cache_path)
if expired_time > (time.time()-last_modify_time):
result = searchinc.load_local_cache(cache_path)
if result != None:
return result
os.remove(cache_path)
else:
directory_path = os.path.dirname(cache_path)
if not os.path.exists(directory_path):
oldmask = os.umask(0)
os.makedirs(directory_path, 0o755)
os.umask(oldmask)
download_success = searchinc.http_get_download(url, cache_path)
if download_success:
result = searchinc.load_local_cache(cache_path)
return result
def _is_translation_available(item_id, lang, mediaType):
translationData = None
if mediaType == 'movie':
translationData = _get_movie_translation_data(item_id)
elif mediaType == 'tvshow' or mediaType == 'tvshow_episode':
translationData = _get_tv_translation_data(item_id)
else:
return False
if not translationData:
return False
translation_array = _parse_translation(translationData)
converted_lang = _convert_to_api_lang(lang)
if converted_lang not in translation_array:
return False
return True
def _parse_translation(translationData):
langList = []
for item in translationData['translations']:
iso639 = item['iso_639_1']
iso3166 = item['iso_3166_1']
langList.append(iso639 + '-' + iso3166)
return langList
def _convert_to_api_lang(lang):
langDict = {
'chs': 'zh-CN', 'cht': 'zh-TW', 'csy': 'cs-CZ', 'dan': 'da-DK',
'enu': 'en-US', 'fre': 'fr-FR', 'ger': 'de-DE', 'hun': 'hu-HU',
'ita': 'it-IT', 'jpn': 'ja-JP', 'krn': 'ko-KR', 'nld': 'nl-NL',
'nor': 'no-NO', 'plk': 'pl-PL', 'ptb': 'pt-BR', 'ptg': 'pt-PT',
'rus': 'ru-RU', 'spn': 'es-ES', 'sve': 'sv-SE', 'trk': 'tr-TR',
'tha': 'th-TH'
}
if lang in langDict.keys():
return langDict[lang]
if lang in langDict.values():
return lang
return None