mirror of
https://github.com/clinton-hall/nzbToMedia.git
synced 2025-08-20 21:33:13 -07:00
Refactor
Remove unnecessary else statements Fix variable names
This commit is contained in:
parent
71a242ccc1
commit
a4f593fc9c
25 changed files with 354 additions and 371 deletions
|
@ -16,7 +16,7 @@ from subprocess import PIPE, DEVNULL
|
|||
from nzb2media import main_db
|
||||
from nzb2media import version_check
|
||||
from nzb2media import databases
|
||||
from nzb2media.configuration import config
|
||||
from nzb2media.configuration import Config
|
||||
from nzb2media.nzb.configuration import configure_nzbs
|
||||
from nzb2media.plugins.plex import configure_plex
|
||||
from nzb2media.torrent.configuration import configure_torrent_class
|
||||
|
@ -410,7 +410,7 @@ def configure_migration():
|
|||
global CFG
|
||||
|
||||
# run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options.
|
||||
if not config.migrate():
|
||||
if not Config.migrate():
|
||||
log.error(f'Unable to migrate config file {CONFIG_FILE}, exiting ...')
|
||||
if 'NZBOP_SCRIPTDIR' in os.environ:
|
||||
pass # We will try and read config from Environment.
|
||||
|
@ -419,11 +419,11 @@ def configure_migration():
|
|||
|
||||
# run migrate to convert NzbGet data from old cfg style to new cfg style
|
||||
if 'NZBOP_SCRIPTDIR' in os.environ:
|
||||
CFG = config.addnzbget()
|
||||
CFG = Config.addnzbget()
|
||||
|
||||
else: # load newly migrated config
|
||||
log.info(f'Loading config from [{CONFIG_FILE}]')
|
||||
CFG = config()
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def configure_logging_part_2():
|
||||
|
|
|
@ -84,18 +84,15 @@ def process(
|
|||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
elif response.text == 'OK':
|
||||
if response.text == 'OK':
|
||||
log.debug(
|
||||
f'SUCCESS: ForceProcess for {dir_name} has been started in LazyLibrarian',
|
||||
)
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
else:
|
||||
log.error(
|
||||
f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian',
|
||||
)
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
log.error(f'FAILED: ForceProcess of {dir_name} has Failed in LazyLibrarian')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
|
|
|
@ -77,7 +77,7 @@ def process(
|
|||
|
||||
log.debug(f'Opening URL: {url}')
|
||||
try:
|
||||
r = requests.post(
|
||||
response = requests.post(
|
||||
url, params=params, stream=True, verify=False, timeout=(30, 300),
|
||||
)
|
||||
except requests.ConnectionError:
|
||||
|
@ -86,18 +86,18 @@ def process(
|
|||
f'{section}: Failed to post-process - Unable to connect to '
|
||||
f'{section}',
|
||||
)
|
||||
if r.status_code not in [
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
log.error(f'Server returned status {r.status_code}')
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{r.status_code}',
|
||||
f'{response.status_code}',
|
||||
)
|
||||
|
||||
for line in r.text.split('\n'):
|
||||
for line in response.text.split('\n'):
|
||||
if line:
|
||||
log.debug(line)
|
||||
if 'Post Processing SUCCESSFUL' in line:
|
||||
|
@ -108,12 +108,11 @@ def process(
|
|||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
'The issue does not appear to have successfully processed. '
|
||||
'Please check your Logs',
|
||||
)
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from '
|
||||
f'{section} was not as expected.',
|
||||
)
|
||||
log.warning(
|
||||
'The issue does not appear to have successfully processed. '
|
||||
'Please check your Logs',
|
||||
)
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from '
|
||||
f'{section} was not as expected.',
|
||||
)
|
||||
|
|
|
@ -34,7 +34,7 @@ class ProcessResult(typing.NamedTuple):
|
|||
|
||||
def command_complete(url, params, headers, section):
|
||||
try:
|
||||
r = requests.get(
|
||||
respone = requests.get(
|
||||
url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
|
@ -45,26 +45,25 @@ def command_complete(url, params, headers, section):
|
|||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url}')
|
||||
return None
|
||||
if r.status_code not in [
|
||||
if respone.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
log.error(f'Server returned status {r.status_code}')
|
||||
log.error(f'Server returned status {respone.status_code}')
|
||||
return None
|
||||
try:
|
||||
return respone.json()['status']
|
||||
except (ValueError, KeyError):
|
||||
# ValueError catches simplejson's JSONDecodeError and
|
||||
# json's ValueError
|
||||
log.error(f'{section} did not return expected json data.')
|
||||
return None
|
||||
else:
|
||||
try:
|
||||
return r.json()['status']
|
||||
except (ValueError, KeyError):
|
||||
# ValueError catches simplejson's JSONDecodeError and
|
||||
# json's ValueError
|
||||
log.error(f'{section} did not return expected json data.')
|
||||
return None
|
||||
|
||||
|
||||
def completed_download_handling(url2, headers, section='MAIN'):
|
||||
try:
|
||||
r = requests.get(
|
||||
response = requests.get(
|
||||
url2,
|
||||
params={},
|
||||
headers=headers,
|
||||
|
@ -75,16 +74,15 @@ def completed_download_handling(url2, headers, section='MAIN'):
|
|||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL: {url2}')
|
||||
return False
|
||||
if r.status_code not in [
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
log.error(f'Server returned status {r.status_code}')
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return False
|
||||
try:
|
||||
return response.json().get('enableCompletedDownloadHandling', False)
|
||||
except ValueError:
|
||||
# ValueError catches simplejson's JSONDecodeError and json's ValueError
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
return r.json().get('enableCompletedDownloadHandling', False)
|
||||
except ValueError:
|
||||
# ValueError catches simplejson's JSONDecodeError and json's ValueError
|
||||
return False
|
||||
|
|
|
@ -72,7 +72,7 @@ def process(
|
|||
log.debug(f'Opening URL: {url}')
|
||||
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
resposne = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return ProcessResult.failure(
|
||||
|
@ -80,7 +80,7 @@ def process(
|
|||
f'{section}',
|
||||
)
|
||||
|
||||
result = r.json()
|
||||
result = resposne.json()
|
||||
log.debug(result)
|
||||
if library:
|
||||
log.debug(f'moving files to library: {library}')
|
||||
|
@ -98,24 +98,23 @@ def process(
|
|||
f'{section}',
|
||||
)
|
||||
|
||||
if r.status_code not in [
|
||||
if resposne.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
log.error(f'Server returned status {r.status_code}')
|
||||
log.error(f'Server returned status {resposne.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{r.status_code}',
|
||||
f'{resposne.status_code}',
|
||||
)
|
||||
elif result['success']:
|
||||
if result['success']:
|
||||
log.debug(f'SUCCESS: Status for {gamez_id} has been set to {download_status} in Gamez')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
else:
|
||||
log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
log.error(f'FAILED: Status for {gamez_id} has NOT been updated in Gamez')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
|
|
|
@ -320,7 +320,7 @@ def process(
|
|||
message=f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
status_code=1,
|
||||
)
|
||||
elif section == 'CouchPotato' and result['success']:
|
||||
if section == 'CouchPotato' and result['success']:
|
||||
log.debug(f'SUCCESS: Finished {method} scan for folder {dir_name}')
|
||||
if method == 'manage':
|
||||
return ProcessResult(
|
||||
|
@ -342,11 +342,10 @@ def process(
|
|||
message=f'{section}: Successfully post-processed {input_name}',
|
||||
status_code=status,
|
||||
)
|
||||
else:
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - changed status to {update_movie_status}',
|
||||
status_code=1,
|
||||
)
|
||||
return ProcessResult(
|
||||
message=f'{section}: Failed to post-process - changed status to {update_movie_status}',
|
||||
status_code=1,
|
||||
)
|
||||
else:
|
||||
log.error(f'FAILED: {method} scan was unable to finish for folder {dir_name}. exiting!')
|
||||
return ProcessResult(
|
||||
|
@ -366,7 +365,7 @@ def process(
|
|||
status_code=1,
|
||||
# Return as failed to flag this in the downloader.
|
||||
) # Return failed flag, but log the event as successful.
|
||||
elif section == 'Watcher3':
|
||||
if section == 'Watcher3':
|
||||
log.debug(f'Sending failed download to {section} for CDH processing')
|
||||
path = remote_dir(dir_name) if remote_path else dir_name
|
||||
if input_name and os.path.isfile(
|
||||
|
@ -437,7 +436,7 @@ def process(
|
|||
status_code=1,
|
||||
message=f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
)
|
||||
elif result['success']:
|
||||
if result['success']:
|
||||
log.debug(f'SUCCESS: {input_name} has been set to ignored ...')
|
||||
else:
|
||||
log.warning(f'FAILED: Unable to set {input_name} to ignored!')
|
||||
|
@ -476,17 +475,17 @@ def process(
|
|||
f'{section}: Failed to post-process - Server returned status '
|
||||
f'{response.status_code}',
|
||||
)
|
||||
elif result['success']:
|
||||
|
||||
if result['success']:
|
||||
log.debug('SUCCESS: Snatched the next highest release ...')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully snatched next highest release',
|
||||
)
|
||||
else:
|
||||
log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!')
|
||||
return ProcessResult.success(
|
||||
f'{section}: No new release found now. '
|
||||
f'{section} will keep searching',
|
||||
)
|
||||
log.debug('SUCCESS: Unable to find a new release to snatch now. CP will keep searching!')
|
||||
return ProcessResult.success(
|
||||
f'{section}: No new release found now. '
|
||||
f'{section} will keep searching',
|
||||
)
|
||||
|
||||
# Added a release that was not in the wanted list so confirm rename
|
||||
# successful by finding this movie media.list.
|
||||
|
@ -539,7 +538,7 @@ def process(
|
|||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif command_status in ['failed']:
|
||||
if command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult(
|
||||
# message='{0}: Failed to post-process {1}'.format(section, input_name),
|
||||
|
@ -552,7 +551,7 @@ def process(
|
|||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
|
||||
elif not list_media_files(
|
||||
if not list_media_files(
|
||||
dir_name, media=True, audio=False, meta=False, archives=True,
|
||||
):
|
||||
log.debug(f'SUCCESS: Input Directory [{dir_name}] has no remaining media files. This has been fully processed.')
|
||||
|
@ -598,17 +597,17 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
|||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 60))
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 60))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return results
|
||||
|
||||
try:
|
||||
result = r.json()
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
# ValueError catches simplejson's JSONDecodeError and json's ValueError
|
||||
log.error('CouchPotato returned the following non-json data')
|
||||
for line in r.iter_lines():
|
||||
for line in response.iter_lines():
|
||||
log.error(line)
|
||||
return results
|
||||
|
||||
|
@ -623,8 +622,8 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
|||
# Gather release info and return it back, no need to narrow results
|
||||
if release_id:
|
||||
try:
|
||||
cur_id = result[section]['_id']
|
||||
results[cur_id] = result[section]
|
||||
key = result[section]['_id']
|
||||
results[key] = result[section]
|
||||
return results
|
||||
except Exception:
|
||||
pass
|
||||
|
@ -651,38 +650,38 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
|
|||
):
|
||||
continue
|
||||
|
||||
cur_id = release['_id']
|
||||
results[cur_id] = release
|
||||
results[cur_id]['title'] = movie['title']
|
||||
key = release['_id']
|
||||
results[key] = release
|
||||
results[key]['title'] = movie['title']
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Narrow results by removing old releases by comparing their last_edit field
|
||||
if len(results) > 1:
|
||||
rem_id = set()
|
||||
for id1, x1 in results.items():
|
||||
for x2 in results.values():
|
||||
for key, val1 in results.items():
|
||||
for val2 in results.values():
|
||||
try:
|
||||
if x2['last_edit'] > x1['last_edit']:
|
||||
rem_id.add(id1)
|
||||
if val2['last_edit'] > val1['last_edit']:
|
||||
rem_id.add(key)
|
||||
except Exception:
|
||||
continue
|
||||
for id in rem_id:
|
||||
results.pop(id)
|
||||
for ea_id in rem_id:
|
||||
results.pop(ea_id)
|
||||
|
||||
# Search downloads on clients for a match to try and narrow our results down to 1
|
||||
if len(results) > 1:
|
||||
rem_id = set()
|
||||
for cur_id, x in results.items():
|
||||
for key, val1 in results.items():
|
||||
try:
|
||||
if not find_download(
|
||||
str(x['download_info']['downloader']).lower(),
|
||||
x['download_info']['id'],
|
||||
str(val1['download_info']['downloader']).lower(),
|
||||
val1['download_info']['id'],
|
||||
):
|
||||
rem_id.add(cur_id)
|
||||
rem_id.add(key)
|
||||
except Exception:
|
||||
continue
|
||||
for id in rem_id:
|
||||
results.pop(id)
|
||||
for ea_id in rem_id:
|
||||
results.pop(ea_id)
|
||||
|
||||
return results
|
||||
|
|
|
@ -134,7 +134,7 @@ def process(
|
|||
f'{section}: Failed to post-process - No change in wanted status',
|
||||
)
|
||||
|
||||
elif status == 0 and section == 'Lidarr':
|
||||
if status == 0 and section == 'Lidarr':
|
||||
route = f'{web_root}/api/v1/command'
|
||||
url = nzb2media.utils.common.create_url(scheme, host, port, route)
|
||||
headers = {'X-Api-Key': apikey}
|
||||
|
@ -146,7 +146,7 @@ def process(
|
|||
data = {'name': 'Rename', 'path': dir_name}
|
||||
try:
|
||||
log.debug(f'Opening URL: {url} with data: {data}')
|
||||
r = requests.post(
|
||||
response = requests.post(
|
||||
url,
|
||||
data=json.dumps(data),
|
||||
headers=headers,
|
||||
|
@ -162,7 +162,7 @@ def process(
|
|||
)
|
||||
|
||||
try:
|
||||
res = r.json()
|
||||
res = response.json()
|
||||
scan_id = int(res['id'])
|
||||
log.debug(f'Scan started with id: {scan_id}')
|
||||
except Exception as error:
|
||||
|
@ -171,28 +171,31 @@ def process(
|
|||
f'{section}: Failed to post-process - Unable to start scan',
|
||||
)
|
||||
|
||||
n = 0
|
||||
num = 0
|
||||
params = {}
|
||||
url = f'{url}/{scan_id}'
|
||||
while n < 6: # set up wait_for minutes to see if command completes..
|
||||
while num < 6: # set up wait_for minutes to see if command completes..
|
||||
time.sleep(10 * wait_for)
|
||||
command_status = command_complete(url, params, headers, section)
|
||||
if command_status and command_status in ['completed', 'failed']:
|
||||
break
|
||||
n += 1
|
||||
num += 1
|
||||
if command_status:
|
||||
log.debug(f'The Scan command return status: {command_status}')
|
||||
|
||||
if not os.path.exists(dir_name):
|
||||
log.debug(f'The directory {dir_name} has been removed. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif command_status and command_status in ['completed']:
|
||||
|
||||
if command_status and command_status in ['completed']:
|
||||
log.debug('The Scan command has completed successfully. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif command_status and command_status in ['failed']:
|
||||
|
||||
if command_status and command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult.failure(
|
||||
# f'{section}: Failed to post-process {input_name}'
|
||||
|
@ -212,20 +215,19 @@ def process(
|
|||
return ProcessResult.failure(
|
||||
f'{section}: Download Failed. Sending back to {section}',
|
||||
)
|
||||
else:
|
||||
log.warning('FAILED DOWNLOAD DETECTED')
|
||||
if (
|
||||
delete_failed
|
||||
and os.path.isdir(dir_name)
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process. {section} does not '
|
||||
f'support failed downloads',
|
||||
)
|
||||
log.warning('FAILED DOWNLOAD DETECTED')
|
||||
if (
|
||||
delete_failed
|
||||
and os.path.isdir(dir_name)
|
||||
and not os.path.dirname(dir_name) == dir_name
|
||||
):
|
||||
log.debug(f'Deleting failed files and folder {dir_name}')
|
||||
remove_dir(dir_name)
|
||||
# Return as failed to flag this in the downloader.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process. {section} does not '
|
||||
f'support failed downloads',
|
||||
)
|
||||
|
||||
return ProcessResult.failure()
|
||||
|
||||
|
@ -241,13 +243,13 @@ def get_status(url, apikey, dir_name):
|
|||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.RequestException:
|
||||
log.error('Unable to open URL')
|
||||
return None
|
||||
|
||||
try:
|
||||
result = r.json()
|
||||
result = response.json()
|
||||
except ValueError:
|
||||
# ValueError catches simplejson's JSONDecodeError and json's ValueError
|
||||
return None
|
||||
|
@ -267,7 +269,7 @@ def force_process(
|
|||
log.debug(f'Opening URL: {url} with PARAMS: {params}')
|
||||
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 300))
|
||||
except requests.ConnectionError:
|
||||
log.error(f'Unable to open URL {url}')
|
||||
return ProcessResult.failure(
|
||||
|
@ -275,18 +277,19 @@ def force_process(
|
|||
f'{section}',
|
||||
)
|
||||
|
||||
log.debug(f'Result: {r.text}')
|
||||
log.debug(f'Result: {response.text}')
|
||||
|
||||
if r.status_code not in [
|
||||
if response.status_code not in [
|
||||
requests.codes.ok,
|
||||
requests.codes.created,
|
||||
requests.codes.accepted,
|
||||
]:
|
||||
log.error(f'Server returned status {r.status_code}')
|
||||
log.error(f'Server returned status {response.status_code}')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Server returned status {r.status_code}',
|
||||
f'{section}: Failed to post-process - Server returned status {response.status_code}',
|
||||
)
|
||||
elif r.text == 'OK':
|
||||
|
||||
if response.text == 'OK':
|
||||
log.debug(f'SUCCESS: Post-Processing started for {input_name} in folder {dir_name} ...')
|
||||
else:
|
||||
log.error(f'FAILED: Post-Processing has NOT started for {input_name} in folder {dir_name}. exiting!')
|
||||
|
|
|
@ -393,33 +393,32 @@ def process(
|
|||
if section == 'SickBeard':
|
||||
if init_sickbeard.fork_obj:
|
||||
return init_sickbeard.fork_obj.api_call()
|
||||
else:
|
||||
s = requests.Session()
|
||||
session = requests.Session()
|
||||
|
||||
log.debug(f'Opening URL: {url} with params: {fork_params}')
|
||||
if not apikey and username and password:
|
||||
login = f'{web_root}/login'
|
||||
login_params = {'username': username, 'password': password}
|
||||
response = s.get(login, verify=False, timeout=(30, 60))
|
||||
if response.status_code in [401, 403] and response.cookies.get('_xsrf'):
|
||||
login_params['_xsrf'] = response.cookies.get('_xsrf')
|
||||
s.post(
|
||||
login,
|
||||
data=login_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 60),
|
||||
)
|
||||
response = s.get(
|
||||
url,
|
||||
auth=(username, password),
|
||||
params=fork_params,
|
||||
log.debug(f'Opening URL: {url} with params: {fork_params}')
|
||||
if not apikey and username and password:
|
||||
login = f'{web_root}/login'
|
||||
login_params = {'username': username, 'password': password}
|
||||
response = session.get(login, verify=False, timeout=(30, 60))
|
||||
if response.status_code in [401, 403] and response.cookies.get('_xsrf'):
|
||||
login_params['_xsrf'] = response.cookies.get('_xsrf')
|
||||
session.post(
|
||||
login,
|
||||
data=login_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
timeout=(30, 60),
|
||||
)
|
||||
response = session.get(
|
||||
url,
|
||||
auth=(username, password),
|
||||
params=fork_params,
|
||||
stream=True,
|
||||
verify=False,
|
||||
timeout=(30, 1800),
|
||||
)
|
||||
elif section == 'SiCKRAGE':
|
||||
s = requests.Session()
|
||||
session = requests.Session()
|
||||
|
||||
if api_version >= 2 and sso_username and sso_password:
|
||||
oauth = OAuth2Session(
|
||||
|
@ -433,7 +432,7 @@ def process(
|
|||
username=sso_username,
|
||||
password=sso_password,
|
||||
)
|
||||
s.headers.update(
|
||||
session.headers.update(
|
||||
{'Authorization': 'Bearer ' + oauth_token['access_token']},
|
||||
)
|
||||
|
||||
|
@ -454,7 +453,7 @@ def process(
|
|||
else:
|
||||
params = fork_params
|
||||
|
||||
response = s.get(
|
||||
response = session.get(
|
||||
url,
|
||||
params=params,
|
||||
stream=True,
|
||||
|
@ -542,29 +541,33 @@ def process(
|
|||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif section == 'NzbDrone' and started:
|
||||
n = 0
|
||||
|
||||
if section == 'NzbDrone' and started:
|
||||
num = 0
|
||||
params = {}
|
||||
url = f'{url}/{scan_id}'
|
||||
while n < 6: # set up wait_for minutes to see if command completes..
|
||||
while num < 6: # set up wait_for minutes to see if command completes..
|
||||
time.sleep(10 * wait_for)
|
||||
command_status = command_complete(url, params, headers, section)
|
||||
if command_status and command_status in ['completed', 'failed']:
|
||||
break
|
||||
n += 1
|
||||
num += 1
|
||||
if command_status:
|
||||
log.debug(f'The Scan command return status: {command_status}')
|
||||
|
||||
if not os.path.exists(dir_name):
|
||||
log.debug(f'The directory {dir_name} has been removed. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif command_status and command_status in ['completed']:
|
||||
|
||||
if command_status and command_status in ['completed']:
|
||||
log.debug('The Scan command has completed successfully. Renaming was successful.')
|
||||
return ProcessResult.success(
|
||||
f'{section}: Successfully post-processed {input_name}',
|
||||
)
|
||||
elif command_status and command_status in ['failed']:
|
||||
|
||||
if command_status and command_status in ['failed']:
|
||||
log.debug('The Scan command has failed. Renaming was not successful.')
|
||||
# return ProcessResult.failure(
|
||||
# f'{section}: Failed to post-process {input_name}'
|
||||
|
@ -578,14 +581,12 @@ def process(
|
|||
f'Passing back to {section}',
|
||||
status_code=status,
|
||||
)
|
||||
else:
|
||||
log.warning('The Scan command did not return a valid status. Renaming was not successful.')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process {input_name}',
|
||||
)
|
||||
else:
|
||||
# We did not receive Success confirmation.
|
||||
log.warning('The Scan command did not return a valid status. Renaming was not successful.')
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
f'{section}: Failed to post-process {input_name}',
|
||||
)
|
||||
# We did not receive Success confirmation.
|
||||
return ProcessResult.failure(
|
||||
f'{section}: Failed to post-process - Returned log from {section} '
|
||||
f'was not as expected.',
|
||||
)
|
||||
|
|
|
@ -101,15 +101,15 @@ class ConfigObj(configobj.ConfigObj, Section):
|
|||
self.interpolation = False
|
||||
|
||||
@staticmethod
|
||||
def find_key(node, kv):
|
||||
def find_key(node, value):
|
||||
if isinstance(node, list):
|
||||
for i in node:
|
||||
yield from ConfigObj.find_key(i, kv)
|
||||
yield from ConfigObj.find_key(i, value)
|
||||
elif isinstance(node, dict):
|
||||
if kv in node:
|
||||
yield node[kv]
|
||||
if value in node:
|
||||
yield node[value]
|
||||
for j in node.values():
|
||||
yield from ConfigObj.find_key(j, kv)
|
||||
yield from ConfigObj.find_key(j, value)
|
||||
|
||||
@staticmethod
|
||||
def migrate():
|
||||
|
@ -121,7 +121,7 @@ class ConfigObj(configobj.ConfigObj, Section):
|
|||
# check for autoProcessMedia.cfg and create if it does not exist
|
||||
if not nzb2media.CONFIG_FILE.is_file():
|
||||
shutil.copyfile(nzb2media.CONFIG_SPEC_FILE, nzb2media.CONFIG_FILE)
|
||||
CFG_OLD = config(nzb2media.CONFIG_FILE)
|
||||
CFG_OLD = Config(nzb2media.CONFIG_FILE)
|
||||
except Exception as error:
|
||||
log.error(f'Error {error} when copying to .cfg')
|
||||
|
||||
|
@ -129,7 +129,7 @@ class ConfigObj(configobj.ConfigObj, Section):
|
|||
# check for autoProcessMedia.cfg.spec and create if it does not exist
|
||||
if not nzb2media.CONFIG_SPEC_FILE.is_file():
|
||||
shutil.copyfile(nzb2media.CONFIG_FILE, nzb2media.CONFIG_SPEC_FILE)
|
||||
CFG_NEW = config(nzb2media.CONFIG_SPEC_FILE)
|
||||
CFG_NEW = Config(nzb2media.CONFIG_SPEC_FILE)
|
||||
except Exception as error:
|
||||
log.error(f'Error {error} when copying to .spec')
|
||||
|
||||
|
@ -307,7 +307,7 @@ class ConfigObj(configobj.ConfigObj, Section):
|
|||
@staticmethod
|
||||
def addnzbget():
|
||||
# load configs into memory
|
||||
cfg_new = config()
|
||||
cfg_new = Config()
|
||||
|
||||
try:
|
||||
if (
|
||||
|
@ -1117,4 +1117,4 @@ class ConfigObj(configobj.ConfigObj, Section):
|
|||
|
||||
configobj.Section = Section
|
||||
configobj.ConfigObj = ConfigObj
|
||||
config = ConfigObj
|
||||
Config = ConfigObj
|
||||
|
|
|
@ -94,27 +94,27 @@ def extract(file_path, output_destination):
|
|||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
): # note, returns 0 if exists, or 1 if doesn't exist.
|
||||
for k, v in extract_commands.items():
|
||||
if cmd in v[0]:
|
||||
for key, val in extract_commands.items():
|
||||
if cmd in val[0]:
|
||||
if not call(
|
||||
['which', '7zr'],
|
||||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
): # we do have '7zr'
|
||||
extract_commands[k] = ['7zr', 'x', '-y']
|
||||
extract_commands[key] = ['7zr', 'x', '-y']
|
||||
elif not call(
|
||||
['which', '7z'], stdout=devnull, stderr=devnull,
|
||||
): # we do have '7z'
|
||||
extract_commands[k] = ['7z', 'x', '-y']
|
||||
extract_commands[key] = ['7z', 'x', '-y']
|
||||
elif not call(
|
||||
['which', '7za'],
|
||||
stdout=devnull,
|
||||
stderr=devnull,
|
||||
): # we do have '7za'
|
||||
extract_commands[k] = ['7za', 'x', '-y']
|
||||
extract_commands[key] = ['7za', 'x', '-y']
|
||||
else:
|
||||
log.error(f'EXTRACTOR: {cmd} not found, disabling support for {k}')
|
||||
del extract_commands[k]
|
||||
log.error(f'EXTRACTOR: {cmd} not found, disabling support for {key}')
|
||||
del extract_commands[key]
|
||||
devnull.close()
|
||||
else:
|
||||
log.warning('EXTRACTOR: Cannot determine which tool to use when called from Transmission')
|
||||
|
@ -246,6 +246,5 @@ def extract(file_path, output_destination):
|
|||
except Exception:
|
||||
pass
|
||||
return True
|
||||
else:
|
||||
log.error(f'EXTRACTOR: Extraction failed for {file_path}. Result was {res}')
|
||||
return False
|
||||
log.error(f'EXTRACTOR: Extraction failed for {file_path}. Result was {res}')
|
||||
return False
|
||||
|
|
|
@ -37,14 +37,13 @@ class DBConnection:
|
|||
result = None
|
||||
try:
|
||||
result = self.select('SELECT db_version FROM db_version')
|
||||
except sqlite3.OperationalError as e:
|
||||
if 'no such table: db_version' in e.args[0]:
|
||||
except sqlite3.OperationalError as error:
|
||||
if 'no such table: db_version' in error.args[0]:
|
||||
return 0
|
||||
|
||||
if result:
|
||||
return int(result[0]['db_version'])
|
||||
else:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
def fetch(self, query, args=None):
|
||||
if query is None:
|
||||
|
@ -94,16 +93,16 @@ class DBConnection:
|
|||
|
||||
while attempt < 5:
|
||||
try:
|
||||
for qu in querylist:
|
||||
if len(qu) == 1:
|
||||
for query in querylist:
|
||||
if len(query) == 1:
|
||||
if log_transaction:
|
||||
log.debug(qu[0])
|
||||
sql_result.append(self.connection.execute(qu[0]))
|
||||
elif len(qu) > 1:
|
||||
log.debug(query[0])
|
||||
sql_result.append(self.connection.execute(query[0]))
|
||||
elif len(query) > 1:
|
||||
if log_transaction:
|
||||
log.debug(f'{qu[0]} with args {qu[1]}')
|
||||
log.debug(f'{query[0]} with args {query[1]}')
|
||||
sql_result.append(
|
||||
self.connection.execute(qu[0], qu[1]),
|
||||
self.connection.execute(query[0], query[1]),
|
||||
)
|
||||
self.connection.commit()
|
||||
log.debug(f'Transaction with {len(querylist)} query\'s executed')
|
||||
|
@ -252,8 +251,8 @@ def _process_upgrade(connection, upgrade_class):
|
|||
else:
|
||||
log.debug(f'{upgrade_class.__name__} upgrade not required')
|
||||
|
||||
for upgradeSubClass in upgrade_class.__subclasses__():
|
||||
_process_upgrade(connection, upgradeSubClass)
|
||||
for upgrade_sub_class in upgrade_class.__subclasses__():
|
||||
_process_upgrade(connection, upgrade_sub_class)
|
||||
|
||||
|
||||
# Base migration class. All future DB changes should be subclassed from this class
|
||||
|
@ -283,8 +282,7 @@ class SchemaUpgrade:
|
|||
result = self.connection.select('SELECT db_version FROM db_version')
|
||||
if result:
|
||||
return int(result[-1]['db_version'])
|
||||
else:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
def inc_db_version(self):
|
||||
new_version = self.check_db_version() + 1
|
||||
|
|
|
@ -168,19 +168,19 @@ class PyMedusaApiV2(SickBeard):
|
|||
return ProcessResult.failure()
|
||||
|
||||
wait_for = int(self.sb_init.config.get('wait_for', 2))
|
||||
n = 0
|
||||
num = 0
|
||||
response = {}
|
||||
|
||||
queue_item_identifier = jdata['queueItem']['identifier']
|
||||
url = f'{self.url}/{queue_item_identifier}'
|
||||
while n < 12: # set up wait_for minutes to see if command completes..
|
||||
while num < 12: # set up wait_for minutes to see if command completes..
|
||||
time.sleep(5 * wait_for)
|
||||
response = self._get_identifier_status(url)
|
||||
if response and response.get('success'):
|
||||
break
|
||||
if 'error' in response:
|
||||
break
|
||||
n += 1
|
||||
num += 1
|
||||
|
||||
# Log Medusa's PP logs here.
|
||||
if response.get('output'):
|
||||
|
|
|
@ -52,9 +52,9 @@ class InitSickBeard:
|
|||
'stheno': 'Stheno',
|
||||
}
|
||||
_val = cfg.get('fork', 'auto')
|
||||
f1 = replace.get(_val, _val)
|
||||
fork_name = replace.get(_val, _val)
|
||||
try:
|
||||
self.fork = f1, nzb2media.FORKS[f1]
|
||||
self.fork = fork_name, nzb2media.FORKS[fork_name]
|
||||
except KeyError:
|
||||
self.fork = 'auto'
|
||||
self.protocol = 'https://' if self.ssl else 'http://'
|
||||
|
@ -83,9 +83,9 @@ class InitSickBeard:
|
|||
'stheno': 'Stheno',
|
||||
}
|
||||
_val = cfg.get('fork', 'auto')
|
||||
f1 = replace.get(_val.lower(), _val)
|
||||
fork_name = replace.get(_val.lower(), _val)
|
||||
try:
|
||||
self.fork = f1, nzb2media.FORKS[f1]
|
||||
self.fork = fork_name, nzb2media.FORKS[fork_name]
|
||||
except KeyError:
|
||||
self.fork = 'auto'
|
||||
protocol = 'https://' if self.ssl else 'http://'
|
||||
|
@ -209,9 +209,9 @@ class InitSickBeard:
|
|||
return self.fork, self.fork_params
|
||||
|
||||
@staticmethod
|
||||
def _api_check(r, params, rem_params):
|
||||
def _api_check(response, params, rem_params):
|
||||
try:
|
||||
json_data = r.json()
|
||||
json_data = response.json()
|
||||
except ValueError:
|
||||
log.error('Failed to get JSON data from response')
|
||||
log.debug('Response received')
|
||||
|
|
|
@ -76,8 +76,7 @@ def process(
|
|||
message='',
|
||||
status_code=-1,
|
||||
)
|
||||
else:
|
||||
usercat = 'ALL'
|
||||
usercat = 'ALL'
|
||||
if len(section) > 1:
|
||||
log.error(f'Category:[{input_category}] is not unique, {section.keys()} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.')
|
||||
return ProcessResult(
|
||||
|
|
|
@ -147,17 +147,17 @@ def reverse_filename(filename, dirname, name):
|
|||
head, file_extension = os.path.splitext(os.path.basename(filename))
|
||||
na_parts = season_pattern.search(head)
|
||||
if na_parts is not None:
|
||||
word_p = word_pattern.findall(na_parts.group(2))
|
||||
if word_p:
|
||||
match = word_pattern.findall(na_parts.group(2))
|
||||
if match:
|
||||
new_words = ''
|
||||
for wp in word_p:
|
||||
if wp[0] == '.':
|
||||
for group in match:
|
||||
if group[0] == '.':
|
||||
new_words += '.'
|
||||
new_words += re.sub(r'\W', '', wp)
|
||||
new_words += re.sub(r'\W', '', group)
|
||||
else:
|
||||
new_words = na_parts.group(2)
|
||||
for cr in char_replace:
|
||||
new_words = re.sub(cr[0], cr[1], new_words)
|
||||
for each_char in char_replace:
|
||||
new_words = re.sub(each_char[0], each_char[1], new_words)
|
||||
newname = new_words[::-1] + na_parts.group(1)[::-1]
|
||||
else:
|
||||
newname = head[::-1].title()
|
||||
|
|
|
@ -59,8 +59,7 @@ def is_video_good(video: pathlib.Path, status, require_lan=None):
|
|||
# if the download was 'failed', assume bad.
|
||||
# If it was successful, assume good.
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return True
|
||||
|
||||
log.info(f'Checking [{video.name}] for corruption, please stand by ...')
|
||||
video_details, result = get_video_details(video)
|
||||
|
@ -96,9 +95,8 @@ def is_video_good(video: pathlib.Path, status, require_lan=None):
|
|||
if len(video_streams) > 0 and len(valid_audio) > 0:
|
||||
log.info(f'SUCCESS: [{video.name}] has no corruption.')
|
||||
return True
|
||||
else:
|
||||
log.info(f'FAILED: [{video.name}] has {len(video_streams)} video streams and {len(audio_streams)} audio streams. Assume corruption.')
|
||||
return False
|
||||
log.info(f'FAILED: [{video.name}] has {len(video_streams)} video streams and {len(audio_streams)} audio streams. Assume corruption.')
|
||||
return False
|
||||
|
||||
|
||||
def zip_out(file, img):
|
||||
|
@ -191,8 +189,7 @@ def check_vid_file(video_details, result):
|
|||
]
|
||||
if len(video_streams) > 0 and len(audio_streams) > 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def build_commands(file, new_dir, movie_name):
|
||||
|
@ -328,7 +325,7 @@ def build_commands(file, new_dir, movie_name):
|
|||
|
||||
for video in video_streams:
|
||||
codec = video['codec_name']
|
||||
fr = video.get('avg_frame_rate', 0)
|
||||
frame_rate = video.get('avg_frame_rate', 0)
|
||||
width = video.get('width', 0)
|
||||
height = video.get('height', 0)
|
||||
scale = nzb2media.VRESOLUTION
|
||||
|
@ -337,7 +334,7 @@ def build_commands(file, new_dir, movie_name):
|
|||
else:
|
||||
video_cmd.extend(['-c:v', nzb2media.VCODEC])
|
||||
if nzb2media.VFRAMERATE and not (
|
||||
nzb2media.VFRAMERATE * 0.999 <= fr <= nzb2media.VFRAMERATE * 1.001
|
||||
nzb2media.VFRAMERATE * 0.999 <= frame_rate <= nzb2media.VFRAMERATE * 1.001
|
||||
):
|
||||
video_cmd.extend(['-r', str(nzb2media.VFRAMERATE)])
|
||||
if scale:
|
||||
|
@ -612,7 +609,7 @@ def build_commands(file, new_dir, movie_name):
|
|||
|
||||
s_mapped = []
|
||||
burnt = 0
|
||||
n = 0
|
||||
num = 0
|
||||
for lan in nzb2media.SLANGUAGES:
|
||||
try:
|
||||
subs1 = [
|
||||
|
@ -702,14 +699,14 @@ def build_commands(file, new_dir, movie_name):
|
|||
if metlan:
|
||||
meta_cmd.extend(
|
||||
[
|
||||
f'-metadata:s:s:{len(s_mapped) + n}',
|
||||
f'-metadata:s:s:{len(s_mapped) + num}',
|
||||
f'language={metlan.alpha3}',
|
||||
],
|
||||
)
|
||||
n += 1
|
||||
map_cmd.extend(['-map', f'{n}:0'])
|
||||
num += 1
|
||||
map_cmd.extend(['-map', f'{num}:0'])
|
||||
|
||||
if not nzb2media.ALLOWSUBS or (not s_mapped and not n):
|
||||
if not nzb2media.ALLOWSUBS or (not s_mapped and not num):
|
||||
sub_cmd.extend(['-sn'])
|
||||
else:
|
||||
if nzb2media.SCODEC:
|
||||
|
@ -774,19 +771,19 @@ def extract_subs(file, newfile_path):
|
|||
and item['codec_name'] != 'pgssub'
|
||||
]
|
||||
num = len(sub_streams)
|
||||
for n in range(num):
|
||||
sub = sub_streams[n]
|
||||
for ea_num in range(num):
|
||||
sub = sub_streams[ea_num]
|
||||
idx = sub['index']
|
||||
lan = sub.get('tags', {}).get('language', 'unk')
|
||||
|
||||
if num == 1:
|
||||
output_file = os.path.join(subdir, f'{name}.srt')
|
||||
if os.path.isfile(output_file):
|
||||
output_file = os.path.join(subdir, f'{name}.{n}.srt')
|
||||
output_file = os.path.join(subdir, f'{name}.{ea_num}.srt')
|
||||
else:
|
||||
output_file = os.path.join(subdir, f'{name}.{lan}.srt')
|
||||
if os.path.isfile(output_file):
|
||||
output_file = os.path.join(subdir, f'{name}.{lan}.{n}.srt')
|
||||
output_file = os.path.join(subdir, f'{name}.{lan}.{ea_num}.srt')
|
||||
|
||||
command = [
|
||||
nzb2media.FFMPEG,
|
||||
|
@ -825,14 +822,14 @@ def extract_subs(file, newfile_path):
|
|||
log.error('Extracting subtitles has failed')
|
||||
|
||||
|
||||
def process_list(it, new_dir):
|
||||
def process_list(iterable, new_dir):
|
||||
rem_list = []
|
||||
new_list = []
|
||||
combine = []
|
||||
vts_path = None
|
||||
mts_path = None
|
||||
success = True
|
||||
for item in it:
|
||||
for item in iterable:
|
||||
ext = os.path.splitext(item)[1].lower()
|
||||
if (
|
||||
ext in ['.iso', '.bin', '.img']
|
||||
|
@ -889,15 +886,15 @@ def process_list(it, new_dir):
|
|||
success = False
|
||||
break
|
||||
if success and new_list:
|
||||
it.extend(new_list)
|
||||
iterable.extend(new_list)
|
||||
for item in rem_list:
|
||||
it.remove(item)
|
||||
iterable.remove(item)
|
||||
log.debug(f'Successfully extracted .vob file {new_list[0]} from disk image')
|
||||
elif new_list and not success:
|
||||
new_list = []
|
||||
rem_list = []
|
||||
log.error('Failed extracting .vob files from disk image. Stopping transcoding.')
|
||||
return it, rem_list, new_list, success
|
||||
return iterable, rem_list, new_list, success
|
||||
|
||||
|
||||
def mount_iso(item, new_dir): # Currently only supports Linux Mount when permissions allow.
|
||||
|
@ -926,9 +923,9 @@ def mount_iso(item, new_dir): # Currently only supports Linux Mount when permis
|
|||
except Exception:
|
||||
vts_path = os.path.split(full_path)[0]
|
||||
return combine_vts(vts_path)
|
||||
elif (
|
||||
re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path)
|
||||
and '.mts' not in nzb2media.IGNOREEXTENSIONS
|
||||
if (
|
||||
re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path)
|
||||
and '.mts' not in nzb2media.IGNOREEXTENSIONS
|
||||
):
|
||||
log.debug(f'Found MTS image file: {full_path}')
|
||||
try:
|
||||
|
@ -974,14 +971,14 @@ def rip_iso(item, new_dir):
|
|||
]
|
||||
combined = []
|
||||
if file_list: # handle DVD
|
||||
for n in range(99):
|
||||
for title_set in range(99):
|
||||
concat = []
|
||||
m = 1
|
||||
part = 1
|
||||
while True:
|
||||
vts_name = f'VIDEO_TS{os.sep}VTS_{n + 1:02d}_{m:d}.VOB'
|
||||
vts_name = f'VIDEO_TS{os.sep}VTS_{title_set + 1:02d}_{part:d}.VOB'
|
||||
if vts_name in file_list:
|
||||
concat.append(vts_name)
|
||||
m += 1
|
||||
part += 1
|
||||
else:
|
||||
break
|
||||
if not concat:
|
||||
|
@ -991,7 +988,7 @@ def rip_iso(item, new_dir):
|
|||
continue
|
||||
name = '{name}.cd{x}'.format(
|
||||
name=os.path.splitext(os.path.split(item)[1])[0],
|
||||
x=n + 1,
|
||||
x=title_set + 1,
|
||||
)
|
||||
new_files.append({item: {'name': name, 'files': concat}})
|
||||
else: # check BlueRay for BDMV/STREAM/XXXX.MTS
|
||||
|
@ -1012,17 +1009,17 @@ def rip_iso(item, new_dir):
|
|||
mts_list.sort(
|
||||
key=lambda f: int(''.join(filter(str.isdigit, f))),
|
||||
)
|
||||
n = 0
|
||||
title_set = 0
|
||||
for mts_name in mts_list:
|
||||
concat = []
|
||||
n += 1
|
||||
title_set += 1
|
||||
concat.append(mts_name)
|
||||
if nzb2media.CONCAT:
|
||||
combined.extend(concat)
|
||||
continue
|
||||
name = '{name}.cd{x}'.format(
|
||||
name=os.path.splitext(os.path.split(item)[1])[0],
|
||||
x=n,
|
||||
x=title_set,
|
||||
)
|
||||
new_files.append({item: {'name': name, 'files': concat}})
|
||||
if nzb2media.CONCAT and combined:
|
||||
|
@ -1045,14 +1042,14 @@ def combine_vts(vts_path):
|
|||
name = os.path.basename(os.path.dirname(name))
|
||||
else:
|
||||
name = os.path.basename(name)
|
||||
for n in range(99):
|
||||
for title_set in range(99):
|
||||
concat = []
|
||||
m = 1
|
||||
part = 1
|
||||
while True:
|
||||
vts_name = f'VTS_{n + 1:02d}_{m:d}.VOB'
|
||||
vts_name = f'VTS_{title_set + 1:02d}_{part:d}.VOB'
|
||||
if os.path.isfile(os.path.join(vts_path, vts_name)):
|
||||
concat.append(os.path.join(vts_path, vts_name))
|
||||
m += 1
|
||||
part += 1
|
||||
else:
|
||||
break
|
||||
if not concat:
|
||||
|
@ -1062,7 +1059,7 @@ def combine_vts(vts_path):
|
|||
continue
|
||||
name = '{name}.cd{x}'.format(
|
||||
name=name,
|
||||
x=n + 1,
|
||||
x=title_set + 1,
|
||||
)
|
||||
new_files.append({vts_path: {'name': name, 'files': concat}})
|
||||
if nzb2media.CONCAT:
|
||||
|
@ -1078,7 +1075,7 @@ def combine_mts(mts_path):
|
|||
name = os.path.basename(os.path.dirname(name))
|
||||
else:
|
||||
name = os.path.basename(name)
|
||||
n = 0
|
||||
num = 0
|
||||
mts_list = [
|
||||
f
|
||||
for f in os.listdir(mts_path)
|
||||
|
@ -1096,10 +1093,10 @@ def combine_mts(mts_path):
|
|||
continue
|
||||
name = '{name}.cd{x}'.format(
|
||||
name=name,
|
||||
x=n + 1,
|
||||
x=num + 1,
|
||||
)
|
||||
new_files.append({mts_path: {'name': name, 'files': concat}})
|
||||
n += 1
|
||||
num += 1
|
||||
if nzb2media.CONCAT:
|
||||
new_files.append({mts_path: {'name': name, 'files': combined}})
|
||||
return new_files
|
||||
|
@ -1112,11 +1109,11 @@ def combine_cd(combine):
|
|||
for ea_item in combine
|
||||
}:
|
||||
concat = ''
|
||||
for n in range(99):
|
||||
for num in range(99):
|
||||
files = [
|
||||
file
|
||||
for file in combine
|
||||
if n + 1
|
||||
if num + 1
|
||||
== int(re.match('.+[cC][dD]([0-9]+).', file).groups()[0])
|
||||
and item in file
|
||||
]
|
||||
|
|
|
@ -15,37 +15,40 @@ def char_replace(name_in):
|
|||
# UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF
|
||||
# ISO-8859-15: 0xA6-0xFF
|
||||
# The function will detect if Name contains a special character
|
||||
# If there is special character, detects if it is a UTF-8, CP850 or ISO-8859-15 encoding
|
||||
# If there is special character, detects if it is a UTF-8, CP850 or
|
||||
# ISO-8859-15 encoding
|
||||
encoded = False
|
||||
encoding = None
|
||||
if isinstance(name_in, str):
|
||||
return encoded, name_in
|
||||
name = bytes(name_in)
|
||||
for Idx in range(len(name)):
|
||||
for idx, character in enumerate(name):
|
||||
# print('Trying to intuit the encoding')
|
||||
# /!\ detection is done 2char by 2char for UTF-8 special character
|
||||
if (len(name) != 1) & (Idx < (len(name) - 1)):
|
||||
try:
|
||||
next_character = name[idx + 1]
|
||||
except IndexError:
|
||||
# Detect CP850
|
||||
if (character >= 0x80) & (character <= 0xA5):
|
||||
encoding = 'cp850'
|
||||
break
|
||||
# Detect ISO-8859-15
|
||||
elif (character >= 0xA6) & (character <= 0xFF):
|
||||
encoding = 'iso-8859-15'
|
||||
break
|
||||
else:
|
||||
# Detect UTF-8
|
||||
if ((name[Idx] == 0xC2) | (name[Idx] == 0xC3)) & (
|
||||
(name[Idx + 1] >= 0xA0) & (name[Idx + 1] <= 0xFF)
|
||||
if ((character == 0xC2) | (character == 0xC3)) & (
|
||||
(next_character >= 0xA0) & (next_character <= 0xFF)
|
||||
):
|
||||
encoding = 'utf-8'
|
||||
break
|
||||
# Detect CP850
|
||||
elif (name[Idx] >= 0x80) & (name[Idx] <= 0xA5):
|
||||
elif (character >= 0x80) & (character <= 0xA5):
|
||||
encoding = 'cp850'
|
||||
break
|
||||
# Detect ISO-8859-15
|
||||
elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF):
|
||||
encoding = 'iso-8859-15'
|
||||
break
|
||||
else:
|
||||
# Detect CP850
|
||||
if (name[Idx] >= 0x80) & (name[Idx] <= 0xA5):
|
||||
encoding = 'cp850'
|
||||
break
|
||||
# Detect ISO-8859-15
|
||||
elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF):
|
||||
elif (character >= 0xA6) & (character <= 0xFF):
|
||||
encoding = 'iso-8859-15'
|
||||
break
|
||||
if encoding:
|
||||
|
|
|
@ -28,21 +28,21 @@ def move_file(filename, path, link):
|
|||
file_ext = os.path.splitext(filename)[1]
|
||||
try:
|
||||
if file_ext in nzb2media.AUDIO_CONTAINER:
|
||||
f = mediafile.MediaFile(filename)
|
||||
guess = mediafile.MediaFile(filename)
|
||||
|
||||
# get artist and album info
|
||||
artist = f.artist
|
||||
album = f.album
|
||||
artist = guess.artist
|
||||
album = guess.album
|
||||
|
||||
# create new path
|
||||
new_path = os.path.join(
|
||||
path, f'{sanitize_name(artist)} - {sanitize_name(album)}',
|
||||
)
|
||||
elif file_ext in nzb2media.MEDIA_CONTAINER:
|
||||
f = guessit.guessit(filename)
|
||||
guess = guessit.guessit(filename)
|
||||
|
||||
# get title
|
||||
title = f.get('series') or f.get('title')
|
||||
title = guess.get('series') or guess.get('title')
|
||||
|
||||
if not title:
|
||||
title = os.path.splitext(os.path.basename(filename))[0]
|
||||
|
@ -217,11 +217,11 @@ def extract_files(src, dst=None, keep_archive=None):
|
|||
extracted_folder = []
|
||||
extracted_archive = []
|
||||
|
||||
for inputFile in list_media_files(
|
||||
for input_file in list_media_files(
|
||||
src, media=False, audio=False, meta=False, archives=True,
|
||||
):
|
||||
dir_path = os.path.dirname(inputFile)
|
||||
full_file_name = os.path.basename(inputFile)
|
||||
dir_path = os.path.dirname(input_file)
|
||||
full_file_name = os.path.basename(input_file)
|
||||
archive_name = os.path.splitext(full_file_name)[0]
|
||||
archive_name = re.sub(r'part[0-9]+', '', archive_name)
|
||||
|
||||
|
@ -229,29 +229,29 @@ def extract_files(src, dst=None, keep_archive=None):
|
|||
continue # no need to extract this, but keep going to look for other archives and sub directories.
|
||||
|
||||
try:
|
||||
if extractor.extract(inputFile, dst or dir_path):
|
||||
if extractor.extract(input_file, dst or dir_path):
|
||||
extracted_folder.append(dir_path)
|
||||
extracted_archive.append(archive_name)
|
||||
except Exception:
|
||||
log.error(f'Extraction failed for: {full_file_name}')
|
||||
|
||||
for folder in extracted_folder:
|
||||
for inputFile in list_media_files(
|
||||
for input_file in list_media_files(
|
||||
folder, media=False, audio=False, meta=False, archives=True,
|
||||
):
|
||||
full_file_name = os.path.basename(inputFile)
|
||||
full_file_name = os.path.basename(input_file)
|
||||
archive_name = os.path.splitext(full_file_name)[0]
|
||||
archive_name = re.sub(r'part[0-9]+', '', archive_name)
|
||||
if archive_name not in extracted_archive or keep_archive:
|
||||
continue # don't remove if we haven't extracted this archive, or if we want to preserve them.
|
||||
log.info(f'Removing extracted archive {full_file_name} from folder {folder} ...')
|
||||
try:
|
||||
if not os.access(inputFile, os.W_OK):
|
||||
os.chmod(inputFile, stat.S_IWUSR)
|
||||
os.remove(inputFile)
|
||||
if not os.access(input_file, os.W_OK):
|
||||
os.chmod(input_file, stat.S_IWUSR)
|
||||
os.remove(input_file)
|
||||
time.sleep(1)
|
||||
except Exception as error:
|
||||
log.error(f'Unable to remove file {inputFile} due to: {error}')
|
||||
log.error(f'Unable to remove file {input_file} due to: {error}')
|
||||
|
||||
|
||||
def backup_versioned_file(old_file, version):
|
||||
|
|
|
@ -19,16 +19,16 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
|||
|
||||
# find imdbid in dirName
|
||||
log.info('Searching folder and file names for imdbID ...')
|
||||
m = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name)
|
||||
if m:
|
||||
imdbid = m.group(1)
|
||||
match = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name)
|
||||
if match:
|
||||
imdbid = match.group(1)
|
||||
log.info(f'Found imdbID [{imdbid}]')
|
||||
return imdbid
|
||||
if os.path.isdir(dir_name):
|
||||
for file in os.listdir(dir_name):
|
||||
m = re.search(r'\b(tt\d{7,8})\b', file)
|
||||
if m:
|
||||
imdbid = m.group(1)
|
||||
match = re.search(r'\b(tt\d{7,8})\b', file)
|
||||
if match:
|
||||
imdbid = match.group(1)
|
||||
log.info(f'Found imdbID [{imdbid}] via file name')
|
||||
return imdbid
|
||||
if 'NZBPR__DNZB_MOREINFO' in os.environ:
|
||||
|
@ -37,9 +37,9 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
|||
regex = re.compile(
|
||||
r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE,
|
||||
)
|
||||
m = regex.match(dnzb_more_info)
|
||||
if m:
|
||||
imdbid = m.group(1)
|
||||
match = regex.match(dnzb_more_info)
|
||||
if match:
|
||||
imdbid = match.group(1)
|
||||
log.info(f'Found imdbID [{imdbid}] from DNZB-MoreInfo')
|
||||
return imdbid
|
||||
log.info('Searching IMDB for imdbID ...')
|
||||
|
@ -67,7 +67,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
|||
log.debug(f'Opening URL: {url}')
|
||||
|
||||
try:
|
||||
r = requests.get(
|
||||
response = requests.get(
|
||||
url,
|
||||
params={'apikey': omdb_api_key, 'y': year, 't': title},
|
||||
verify=False,
|
||||
|
@ -78,7 +78,7 @@ def find_imdbid(dir_name, input_name, omdb_api_key):
|
|||
return
|
||||
|
||||
try:
|
||||
results = r.json()
|
||||
results = response.json()
|
||||
except Exception:
|
||||
log.error('No json data returned from omdbapi.com')
|
||||
|
||||
|
|
|
@ -29,14 +29,10 @@ def copy_link(src, target_link, use_link):
|
|||
if src != target_link and os.path.exists(target_link):
|
||||
log.info('MEDIAFILE already exists in the TARGET folder, skipping ...')
|
||||
return True
|
||||
elif (
|
||||
src == target_link
|
||||
and os.path.isfile(target_link)
|
||||
and os.path.isfile(src)
|
||||
):
|
||||
if src == target_link and os.path.isfile(target_link) and os.path.isfile(src):
|
||||
log.info('SOURCE AND TARGET files are the same, skipping ...')
|
||||
return True
|
||||
elif src == os.path.dirname(target_link):
|
||||
if src == os.path.dirname(target_link):
|
||||
log.info('SOURCE AND TARGET folders are the same, skipping ...')
|
||||
return True
|
||||
|
||||
|
@ -50,20 +46,20 @@ def copy_link(src, target_link, use_link):
|
|||
log.info('Directory junction linking SOURCE FOLDER -> TARGET FOLDER')
|
||||
linktastic.dirlink(src, target_link)
|
||||
return True
|
||||
elif use_link == 'hard':
|
||||
if use_link == 'hard':
|
||||
log.info('Hard linking SOURCE MEDIAFILE -> TARGET FOLDER')
|
||||
linktastic.link(src, target_link)
|
||||
return True
|
||||
elif use_link == 'sym':
|
||||
if use_link == 'sym':
|
||||
log.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER')
|
||||
linktastic.symlink(src, target_link)
|
||||
return True
|
||||
elif use_link == 'move-sym':
|
||||
if use_link == 'move-sym':
|
||||
log.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER')
|
||||
shutil.move(src, target_link)
|
||||
linktastic.symlink(target_link, src)
|
||||
return True
|
||||
elif use_link == 'move':
|
||||
if use_link == 'move':
|
||||
log.info('Moving SOURCE MEDIAFILE -> TARGET FOLDER')
|
||||
shutil.move(src, target_link)
|
||||
return True
|
||||
|
|
|
@ -115,14 +115,14 @@ def find_download(client_agent, download_id):
|
|||
'value': download_id,
|
||||
}
|
||||
try:
|
||||
r = requests.get(
|
||||
response = requests.get(
|
||||
url, params=params, verify=False, timeout=(30, 120),
|
||||
)
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return False # failure
|
||||
|
||||
result = r.json()
|
||||
result = response.json()
|
||||
if result['files']:
|
||||
return True
|
||||
return False
|
||||
|
|
|
@ -26,12 +26,12 @@ def get_nzoid(input_name):
|
|||
'output': 'json',
|
||||
}
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return nzoid # failure
|
||||
try:
|
||||
result = r.json()
|
||||
result = response.json()
|
||||
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
|
||||
slots.extend(
|
||||
[
|
||||
|
@ -43,12 +43,12 @@ def get_nzoid(input_name):
|
|||
log.warning('Data from SABnzbd queue could not be parsed')
|
||||
params['mode'] = 'history'
|
||||
try:
|
||||
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
response = requests.get(url, params=params, verify=False, timeout=(30, 120))
|
||||
except requests.ConnectionError:
|
||||
log.error('Unable to open URL')
|
||||
return nzoid # failure
|
||||
try:
|
||||
result = r.json()
|
||||
result = response.json()
|
||||
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
|
||||
slots.extend(
|
||||
[
|
||||
|
|
|
@ -84,8 +84,8 @@ def remove_empty_folders(path, remove_root=True):
|
|||
log.debug(f'Checking for empty folders in:{path}')
|
||||
files = os.listdir(path)
|
||||
if len(files):
|
||||
for f in files:
|
||||
fullpath = os.path.join(path, f)
|
||||
for each_file in files:
|
||||
fullpath = os.path.join(path, each_file)
|
||||
if os.path.isdir(fullpath):
|
||||
remove_empty_folders(fullpath)
|
||||
|
||||
|
@ -111,16 +111,16 @@ def remove_read_only(filename):
|
|||
|
||||
def flatten_dir(destination, files):
|
||||
log.info(f'FLATTEN: Flattening directory: {destination}')
|
||||
for outputFile in files:
|
||||
dir_path = os.path.dirname(outputFile)
|
||||
file_name = os.path.basename(outputFile)
|
||||
for output_file in files:
|
||||
dir_path = os.path.dirname(output_file)
|
||||
file_name = os.path.basename(output_file)
|
||||
if dir_path == destination:
|
||||
continue
|
||||
target = os.path.join(destination, file_name)
|
||||
try:
|
||||
shutil.move(outputFile, target)
|
||||
shutil.move(output_file, target)
|
||||
except Exception:
|
||||
log.error(f'Could not flatten {outputFile}')
|
||||
log.error(f'Could not flatten {output_file}')
|
||||
remove_empty_folders(destination) # Cleanup empty directories
|
||||
|
||||
|
||||
|
@ -152,7 +152,7 @@ def rchmod(path, mod):
|
|||
return # Skip files
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for d in dirs:
|
||||
os.chmod(os.path.join(root, d), mod)
|
||||
for f in files:
|
||||
os.chmod(os.path.join(root, f), mod)
|
||||
for each_dir in dirs:
|
||||
os.chmod(os.path.join(root, each_dir), mod)
|
||||
for each_file in files:
|
||||
os.chmod(os.path.join(root, each_file), mod)
|
||||
|
|
|
@ -24,23 +24,22 @@ class WindowsProcess:
|
|||
# {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}
|
||||
_path_str = os.fspath(nzb2media.PID_FILE).replace('\\', '/')
|
||||
self.mutexname = f'nzbtomedia_{_path_str}'
|
||||
self.CreateMutex = CreateMutex
|
||||
self.CloseHandle = CloseHandle
|
||||
self.GetLastError = GetLastError
|
||||
self.ERROR_ALREADY_EXISTS = ERROR_ALREADY_EXISTS
|
||||
self.create_mutex = CreateMutex
|
||||
self.close_handle = CloseHandle
|
||||
self.get_last_error = GetLastError
|
||||
self.error_already_exists = ERROR_ALREADY_EXISTS
|
||||
|
||||
def alreadyrunning(self):
|
||||
self.mutex = self.CreateMutex(None, 0, self.mutexname)
|
||||
self.lasterror = self.GetLastError()
|
||||
if self.lasterror == self.ERROR_ALREADY_EXISTS:
|
||||
self.CloseHandle(self.mutex)
|
||||
self.mutex = self.create_mutex(None, 0, self.mutexname)
|
||||
self.lasterror = self.get_last_error()
|
||||
if self.lasterror == self.error_already_exists:
|
||||
self.close_handle(self.mutex)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
def __del__(self):
|
||||
if self.mutex:
|
||||
self.CloseHandle(self.mutex)
|
||||
self.close_handle(self.mutex)
|
||||
|
||||
|
||||
class PosixProcess:
|
||||
|
@ -54,16 +53,16 @@ class PosixProcess:
|
|||
self.lock_socket.bind(f'\0{self.pidpath}')
|
||||
self.lasterror = False
|
||||
return self.lasterror
|
||||
except OSError as e:
|
||||
if 'Address already in use' in str(e):
|
||||
except OSError as error:
|
||||
if 'Address already in use' in str(error):
|
||||
self.lasterror = True
|
||||
return self.lasterror
|
||||
except AttributeError:
|
||||
pass
|
||||
if os.path.exists(self.pidpath):
|
||||
if self.pidpath.exists():
|
||||
# Make sure it is not a 'stale' pidFile
|
||||
try:
|
||||
pid = int(open(self.pidpath).read().strip())
|
||||
pid = int(self.pidpath.read_text().strip())
|
||||
except Exception:
|
||||
pid = None
|
||||
# Check list of running pids, if not running it is stale so overwrite
|
||||
|
@ -79,9 +78,9 @@ class PosixProcess:
|
|||
self.lasterror = False
|
||||
|
||||
if not self.lasterror:
|
||||
# Write my pid into pidFile to keep multiple copies of program from running
|
||||
with self.pidpath.open(mode='w') as fp:
|
||||
fp.write(os.getpid())
|
||||
# Write my pid into pidFile to keep multiple copies of program
|
||||
# from running
|
||||
self.pidpath.write_text(os.getpid())
|
||||
return self.lasterror
|
||||
|
||||
def __del__(self):
|
||||
|
|
|
@ -123,8 +123,7 @@ class GitUpdateManager(UpdateManager):
|
|||
if exit_status == 0:
|
||||
log.debug(f'Using: {main_git}')
|
||||
return main_git
|
||||
else:
|
||||
log.debug(f'Not using: {main_git}')
|
||||
log.debug(f'Not using: {main_git}')
|
||||
|
||||
# trying alternatives
|
||||
|
||||
|
@ -148,8 +147,7 @@ class GitUpdateManager(UpdateManager):
|
|||
if exit_status == 0:
|
||||
log.debug(f'Using: {cur_git}')
|
||||
return cur_git
|
||||
else:
|
||||
log.debug(f'Not using: {cur_git}')
|
||||
log.debug(f'Not using: {cur_git}')
|
||||
|
||||
# Still haven't found a working git
|
||||
log.debug(
|
||||
|
@ -230,8 +228,7 @@ class GitUpdateManager(UpdateManager):
|
|||
if self._cur_commit_hash:
|
||||
nzb2media.NZBTOMEDIA_VERSION = self._cur_commit_hash
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
def _find_git_branch(self):
|
||||
nzb2media.NZBTOMEDIA_BRANCH = self.get_github_branch()
|
||||
|
@ -277,8 +274,7 @@ class GitUpdateManager(UpdateManager):
|
|||
if not re.match('^[a-z0-9]+$', cur_commit_hash):
|
||||
log.debug('Output doesn\'t look like a hash, not using it')
|
||||
return
|
||||
else:
|
||||
self._newest_commit_hash = cur_commit_hash
|
||||
self._newest_commit_hash = cur_commit_hash
|
||||
else:
|
||||
log.debug('git didn\'t return newest commit hash')
|
||||
return
|
||||
|
@ -315,15 +311,15 @@ class GitUpdateManager(UpdateManager):
|
|||
|
||||
if not self._cur_commit_hash:
|
||||
return True
|
||||
else:
|
||||
try:
|
||||
self._check_github_for_update()
|
||||
except Exception as error:
|
||||
log.error(f'Unable to contact github, can\'t check for update: {error!r}')
|
||||
return False
|
||||
|
||||
if self._num_commits_behind > 0:
|
||||
return True
|
||||
try:
|
||||
self._check_github_for_update()
|
||||
except Exception as error:
|
||||
log.error(f'Unable to contact github, can\'t check for update: {error!r}')
|
||||
return False
|
||||
|
||||
if self._num_commits_behind > 0:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
@ -363,8 +359,8 @@ class SourceUpdateManager(UpdateManager):
|
|||
return
|
||||
|
||||
try:
|
||||
with open(version_file) as fp:
|
||||
self._cur_commit_hash = fp.read().strip(' \n\r')
|
||||
with open(version_file) as fin:
|
||||
self._cur_commit_hash = fin.read().strip(' \n\r')
|
||||
except OSError as error:
|
||||
log.debug(f'Unable to open \'version.txt\': {error}')
|
||||
|
||||
|
@ -401,14 +397,14 @@ class SourceUpdateManager(UpdateManager):
|
|||
self._num_commits_behind = 0
|
||||
self._newest_commit_hash = None
|
||||
|
||||
gh = github.GitHub(
|
||||
repository = github.GitHub(
|
||||
self.github_repo_user, self.github_repo, self.branch,
|
||||
)
|
||||
|
||||
# try to get newest commit hash and commits behind directly by
|
||||
# comparing branch and current commit
|
||||
if self._cur_commit_hash:
|
||||
branch_compared = gh.compare(
|
||||
branch_compared = repository.compare(
|
||||
base=self.branch, head=self._cur_commit_hash,
|
||||
)
|
||||
|
||||
|
@ -423,13 +419,13 @@ class SourceUpdateManager(UpdateManager):
|
|||
# fall back and iterate over last 100 (items per page in gh_api) commits
|
||||
if not self._newest_commit_hash:
|
||||
|
||||
for curCommit in gh.commits():
|
||||
for cur_commit in repository.commits():
|
||||
if not self._newest_commit_hash:
|
||||
self._newest_commit_hash = curCommit['sha']
|
||||
self._newest_commit_hash = cur_commit['sha']
|
||||
if not self._cur_commit_hash:
|
||||
break
|
||||
|
||||
if curCommit['sha'] == self._cur_commit_hash:
|
||||
if cur_commit['sha'] == self._cur_commit_hash:
|
||||
break
|
||||
|
||||
# when _cur_commit_hash doesn't match anything _num_commits_behind == 100
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue