Compare commits

...

495 commits

Author SHA1 Message Date
Clinton Hall
bfbf1fb4c1 support for qbittorrent v5.0 (#2001)
* support for qbittorrent v5.0

* Remove py3.8 tests

* Add py 3.13 tests

* Update mediafile.py for Py3.13

* Create filetype.py

* Update link for NZBGet
2024-11-08 07:29:55 +13:00
Clinton Hall
470f611240 Merge branch 'master' into nightly 2024-04-26 12:12:53 +12:00
Clinton Hall
97df874d36 class not added put into debug logging 2024-04-26 12:09:41 +12:00
Matt Park
e9fbbf540c added global ignore flag for bytecode cleanup
Resolves #1867
2024-04-26 12:09:41 +12:00
Clinton Hall
39f5c31486 fix warnings (#1990) 2024-04-26 12:09:41 +12:00
Clinton Hall
cbc2090b0b always return imdbid and dirname 2024-04-26 12:09:41 +12:00
Clinton Hall
cc109bcc0b Add Python 3.12 and fix Radarr handling (#1989)
* Added Python3.12 and future 3.13

* Fix Radarr result handling

* remove py2.7 and py3.7 support
2024-04-26 12:09:41 +12:00
Matt Park
4c512051f7 Update movies.py
Check for an updated dir_name in case IMDB id was appended.
2024-04-26 12:09:41 +12:00
Matt Park
0c564243c2 Update identification.py
Return updated dir_name if needed
2024-04-26 12:09:41 +12:00
Clinton Hall
9ea322111c class not added put into debug logging 2024-03-20 14:23:32 +13:00
Matt Park
e14bc6c733 added global ignore flag for bytecode cleanup
Resolves #1867
2024-03-05 10:59:37 +13:00
Clinton Hall
27df8a4d8e
fix warnings (#1990) 2024-03-01 18:25:19 +13:00
Clinton Hall
b7d6ad8c07
always return imdbid and dirname 2024-02-29 07:01:23 +13:00
Clinton Hall
f98d6fff65
Add Python 3.12 and fix Radarr handling (#1989)
* Added Python3.12 and future 3.13

* Fix Radarr result handling

* remove py2.7 and py3.7 support
2024-02-28 15:47:04 +13:00
Clinton Hall
b802aca7e1
Merge pull request #1982 from MattPark/last-resort-movie-id
Last resort movie identification
2023-12-16 09:32:35 +13:00
Matt Park
836df51d14
Update movies.py
Check for an updated dir_name in case IMDB id was appended.
2023-10-02 15:15:01 -04:00
Matt Park
c6292d5390
Update identification.py
Return updated dir_name if needed
2023-10-02 15:13:36 -04:00
Clinton Hall
558970c212
Merge pull request #1980 from clinton-hall/nightly
Merge Nightly
2023-08-10 21:23:42 +12:00
Clinton Hall
38c628d605
Merge pull request #1979 from clinton-hall/clinton-hall-patch-1
Remove Py2.7 tests
2023-08-10 21:14:47 +12:00
Clinton Hall
029b58b2a6
Remove Py2.7 tests
This is no longer supported in azure pipelines.
2023-08-10 21:09:25 +12:00
Clinton Hall
2885461a12
Merge pull request #1978 from clinton-hall/remove_group
Initialize remove_groups #1973
2023-08-10 21:01:31 +12:00
Clinton Hall
ad73e597e4
Initialize remove_groups #1973
This parameter was not being loaded and therefore was ignored.
2023-08-09 22:50:25 +12:00
clinton-hall
6c2f7c75d4 update to v 12.1.12 2023-07-03 17:41:15 +12:00
clinton-hall
95e22d7af4 Merge branch 'master' into nightly 2023-07-03 17:21:31 +12:00
kandarz
e72c0b9228
Add 'dvb_subtitle' codec to list of ignored codecs when using 'mov_text' (#1974)
Add 'dvb_subtitle' codec to list of ignored codecs when using 'mov_text'. DVB subtitles are bitmap based.
2023-07-03 16:59:24 +12:00
Clinton Hall
c4cc554ea1 update to sonarr api v3 2023-05-22 22:51:28 +12:00
Labrys of Knossos
3078da31af Fix posix_ownership. 2023-05-22 22:51:28 +12:00
Labrys of Knossos
1fdfd128ba Add comments. 2023-05-22 22:51:28 +12:00
Labrys of Knossos
d3100f6178 Add database permissions logging upon failed access. 2023-05-22 22:51:28 +12:00
Clinton Hall
01bb239cdf
Merge pull request #1969 from clinton-hall/Sonarr-apiv3
update to sonarr api v3
2023-05-22 22:43:15 +12:00
Clinton Hall
d0b555c251
update to sonarr api v3 2023-04-18 20:59:28 +12:00
Labrys of Knossos
0c5f7be263
Merge pull request #1955 from clinton-hall/permitted
Fix permissions for posix and add comments
2023-01-01 06:03:08 -05:00
Labrys of Knossos
19d9e27c43 Fix posix_ownership. 2022-12-31 22:26:19 -05:00
Labrys of Knossos
1046c50778
Merge pull request #1954 from clinton-hall/permitted
Add database permissions logging upon failed access.
2022-12-31 18:34:20 -05:00
Labrys of Knossos
2c2d7f24b1 Add comments. 2022-12-31 18:21:33 -05:00
Labrys of Knossos
6e52bb2b33 Add database permissions logging upon failed access. 2022-12-31 17:56:38 -05:00
Clinton Hall
bd9c91ff5e
Merge pull request #1936 from clinton-hall/nightly
update to V12.1.11
2022-12-12 20:24:01 +13:00
Clinton Hall
b8482bed0e
Remove Py3.6 tests.
No longer available for pipeline tests.
2022-12-12 20:18:18 +13:00
Labrys of Knossos
2b6a7add72
Merge pull request #1919 from clinton-hall/hello-friend
Add new Python versions to tests.
2022-12-02 22:32:51 -05:00
Labrys of Knossos
55c1091efa Add new Python versions to classifiers. 2022-12-02 22:25:50 -05:00
Labrys of Knossos
8b409a5716 Add new Python versions to tests. 2022-12-02 22:25:37 -05:00
Labrys of Knossos
9307563ab8
Merge pull request #1910 from clinton-hall/bumpversion
Fixes bumpversion configuration
2022-12-02 21:10:27 -05:00
Labrys of Knossos
69e1c4d22e Bump version: 12.1.10 → 12.1.11 2022-12-02 21:00:32 -05:00
Labrys of Knossos
8a5c8c0863 Fix bumpversion fails with FileNotFoundError
The `README.md` file was moved to the `.github` folder in commit 742d482 and merged in clinton-hall/nzbToMedia#1574.

Additionally the version number was removed from `README.md` in commit 8745af2.

Fixes clinton-hall/nzbToMedia#1909
2022-12-02 20:59:08 -05:00
Clinton Hall
18ac3575ba
Merge pull request #1907 from clinton-hall/vendor
Update vendored libraries
2022-12-03 13:38:44 +13:00
Labrys of Knossos
5e3641ac23 Updated decorator to 4.4.2 2022-12-01 17:34:33 -05:00
Labrys of Knossos
fb6011f88d Updated stevedore to 2.0.1 2022-11-29 01:47:46 -05:00
Labrys of Knossos
f1624a586f Updated importlib-metadata to 2.1.3 2022-11-29 01:35:03 -05:00
Labrys of Knossos
684cca8c9b Updated more-itertools to 5.0.0 2022-11-29 01:26:47 -05:00
Labrys of Knossos
1aff7eb85d Updated zipp to 2.0.1 2022-11-29 01:21:38 -05:00
Labrys of Knossos
f05b09f349 Updates vendored subliminal to 2.1.0
Updates rarfile to 3.1
Updates stevedore to 3.5.0
Updates appdirs to 1.4.4
Updates click to 8.1.3
Updates decorator to 5.1.1
Updates dogpile.cache to 1.1.8
Updates pbr to 5.11.0
Updates pysrt to 1.1.2
Updates pytz to 2022.6
Adds importlib-metadata version 3.1.1
Adds typing-extensions version 4.1.1
Adds zipp version 3.11.0
2022-11-29 00:44:49 -05:00
Labrys of Knossos
d8da02cb69 Updates vendored setuptools to 44.1.1 2022-11-29 00:44:48 -05:00
Labrys of Knossos
3a2e09c26e Updates python-qbittorrent to 0.4.3 2022-11-29 00:44:48 -05:00
Labrys of Knossos
968ec8a1d8 Update vendored beautifulsoup4 to 4.11.1
Adds soupsieve 2.3.2.post1
2022-11-29 00:44:48 -05:00
Labrys of Knossos
2226a74ef8 Update vendored guessit to 3.1.1
Updates python-dateutil to 2.8.2
Updates rebulk to 2.0.1
2022-11-29 00:44:48 -05:00
Labrys of Knossos
ebc9718117 Update vendored requests-oauthlib to 1.3.1 2022-11-29 00:44:48 -05:00
Labrys of Knossos
501be2c479 Update vendored requests to 2.25.1
Updates certifi to 2021.5.30
Updates chardet to 4.0.0
Updates idna to 2.10
Updates urllib3 to 1.26.13
2022-11-29 00:44:48 -05:00
Labrys of Knossos
56c6773c6b Update vendored beets to 1.6.0
Updates colorama to 0.4.6
Adds confuse version 1.7.0
Updates jellyfish to 0.9.0
Adds mediafile 0.10.1
Updates munkres to 1.1.4
Updates musicbrainzngs to 0.7.1
Updates mutagen to 1.46.0
Updates pyyaml to 6.0
Updates unidecode to 1.3.6
2022-11-29 00:44:48 -05:00
Labrys of Knossos
5073ec0c6f Update vendored pyxdg to 0.28 2022-11-29 00:44:47 -05:00
Labrys of Knossos
aed4e9261c Update vendored configobj to 5.0.6
Updates vendored six to 1.16.0
2022-11-29 00:44:47 -05:00
Labrys of Knossos
b1cefa94e5 Update vendored windows libs 2022-11-29 00:44:47 -05:00
Labrys of Knossos
f61c211655 Fix .gitignore for pyd binary files 2022-11-29 00:44:47 -05:00
Labrys of Knossos
78ed3afe29 Merge branch 'processing' into nightly
* processing:
  Add `processor` folder to folder structure
  Streamline `core.processor.nzbget.parse_status`
  Streamline `core.processor.nzbget._parse_unpack_status`
  Streamline `core.processor.nzbget._parse_health_status`
  Extract health status parsing from `core.processor.nzbget.parse_status` -> `_parse_health_status`
  Extract unpack status parsing from `core.processor.nzbget.parse_status` -> `_parse_unpack_status`
  Streamline `core.processor.nzbget._parse_par_status`
  Extract par status parsing from `core.processor.nzbget.parse_status` -> `_parse_par_status`
  Streamline `core.processor.nzbget._parse_total_status`
  Extract total status parsing from `core.processor.nzbget.parse_status` -> `_parse_total_status`
  Streamline `core.processor.nzbget.check_version`
  Streamline `core.processor.nzbget.parse_failure_link`
  Streamline `core.processor.nzbget.parse_download_id`
  Standardize processing
  Extract version checks from `core.processor.nzbget.process` -> `check_version`
  Extract status parsing from `core.processor.nzbget.process` -> `parse_status`
  Extract failure_link parsing from `core.processor.nzbget.process` -> `parse_failure_link`
  Extract download_id parsing from `core.processor.nzbget.process` -> `parse_download_id`
  Standardize processing
  Merge legacy sab parsing with 0.7.17+
  Extract manual processing from `nzbToMedia.main` -> `core.processor.manual`
  Extract sabnzb processing from `nzbToMedia.main` -> `core.processor.sabnzbd`
  Extract nzbget processing from `nzbToMedia.main` -> `core.processor.nzbget`
  Refactor `nzbToMedia.process` -> `core.processor.nzb.process`
2022-11-29 00:36:48 -05:00
Labrys of Knossos
c85ee42874 Add processor folder to folder structure 2022-11-29 00:35:40 -05:00
Labrys of Knossos
34236e8960 Streamline core.processor.nzbget.parse_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
7737d0c4be Streamline core.processor.nzbget._parse_unpack_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
c34159d881 Streamline core.processor.nzbget._parse_health_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
efee5c722b Extract health status parsing from core.processor.nzbget.parse_status -> _parse_health_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
11adb220d8 Extract unpack status parsing from core.processor.nzbget.parse_status -> _parse_unpack_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
8e96d17537 Streamline core.processor.nzbget._parse_par_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
ab006eefb2 Extract par status parsing from core.processor.nzbget.parse_status -> _parse_par_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
e5ea34b569 Streamline core.processor.nzbget._parse_total_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
fc2ebeb245 Extract total status parsing from core.processor.nzbget.parse_status -> _parse_total_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
d7c6a8e1cc Streamline core.processor.nzbget.check_version 2022-11-29 00:35:40 -05:00
Labrys of Knossos
d11dda8af8 Streamline core.processor.nzbget.parse_failure_link 2022-11-29 00:35:40 -05:00
Labrys of Knossos
9cc92ddd7b Streamline core.processor.nzbget.parse_download_id 2022-11-29 00:35:40 -05:00
Labrys of Knossos
3e676f89a5 Standardize processing 2022-11-29 00:35:40 -05:00
Labrys of Knossos
49af821bcb Extract version checks from core.processor.nzbget.process -> check_version 2022-11-29 00:35:40 -05:00
Labrys of Knossos
de06d45bb0 Extract status parsing from core.processor.nzbget.process -> parse_status 2022-11-29 00:35:40 -05:00
Labrys of Knossos
0a8e8fae9f Extract failure_link parsing from core.processor.nzbget.process -> parse_failure_link 2022-11-29 00:35:40 -05:00
Labrys of Knossos
a2b2e4f620 Extract download_id parsing from core.processor.nzbget.process -> parse_download_id 2022-11-29 00:35:40 -05:00
Labrys of Knossos
e8f5dc409a Standardize processing 2022-11-29 00:35:40 -05:00
Labrys of Knossos
637020d2bf Merge legacy sab parsing with 0.7.17+ 2022-11-29 00:35:40 -05:00
Labrys of Knossos
528cbd02cd Extract manual processing from nzbToMedia.main -> core.processor.manual 2022-11-29 00:35:40 -05:00
Labrys of Knossos
58c998712f Extract sabnzb processing from nzbToMedia.main -> core.processor.sabnzbd 2022-11-29 00:35:40 -05:00
Labrys of Knossos
073b19034b Extract nzbget processing from nzbToMedia.main -> core.processor.nzbget 2022-11-29 00:35:34 -05:00
Labrys of Knossos
7a3c2bc8a5 Refactor nzbToMedia.process -> core.processor.nzb.process 2022-11-29 00:30:50 -05:00
Labrys of Knossos
ce65ef20c6 Add Python 3.11 end-of-life 2022-11-29 00:27:18 -05:00
Clinton Hall
7436ba7716
Merge pull request #1896 from clinton-hall/nightly
Nightly
2022-08-18 16:31:35 +12:00
Clinton Hall
382675e391
Merge pull request #1895 from redhat421/rem_fix
Switch `rem_id` to a Set to prevent duplicates.
2022-08-09 10:25:29 +12:00
Nick Austin
c639fc1cf9
Switch to set for rem_id. 2022-08-04 23:46:37 -07:00
Clinton Hall
d23c2c2d3a
Merge pull request #1893 from clinton-hall/nightly
Fix issue with no Require_lan set #1856 (#1892)
2022-07-15 17:51:33 +12:00
Clinton Hall
a886350bea
Fix issue with no Require_lan set #1856 (#1892)
Thanks @BradKollmyer
2022-07-15 17:41:38 +12:00
Clinton Hall
084e404b92
Merge pull request #1891 from clinton-hall/nightly
Nightly
2022-07-15 09:24:28 +12:00
Clinton Hall
a0bccb54cc
Req lan1 (#1890)
* Multiple Req_Lan
2022-07-15 09:02:12 +12:00
Jingxuan He
566e98bc78
Fix a bug about wrong order of function arguments (#1889) 2022-06-17 07:46:51 +12:00
Clinton Hall
d956cd2b75
Updated SiCKRAGE SSO URL (#1886) (#1887)
Co-authored-by: echel0n <echel0n@sickrage.ca>
2022-06-07 10:49:17 +12:00
echel0n
7936c2c92b
Updated SiCKRAGE SSO URL (#1886) 2022-06-07 10:43:13 +12:00
clinton-hall
2766938921 V12.1.10 for merge 2022-01-01 14:11:11 +13:00
Clinton Hall
686d239ce5
Python 3.10 (#1868)
* Add Py 3.10 #1866

* Add tests for Python 3.10

* update Babelfish
2021-12-03 18:48:04 +13:00
Clinton Hall
684cab5c8a
Add Support for Radarr V4 #1862 (#1863) 2021-11-16 16:08:17 +13:00
Clinton Hall
48154d0c3c
Update URL for x264 #1860 (#1861)
* Update URL for x264 #1860
* Use Ubuntu-latest in Pipelines (16.04 image removed from Pipelines)
2021-11-10 10:52:33 +13:00
Clinton Hall
162143b1cd
Media lan check (#1856)
* Add require_lan

#1853
2021-10-11 07:16:00 +13:00
Clinton Hall
36eddcfb92
Updates to Syno Auth #1844 2021-08-26 18:03:32 +12:00
Clinton Hall
411e70ba92
Fix fork recognition when defined in cfg. #1839 (#1842) 2021-08-13 06:52:47 +12:00
Clinton Hall
8b8fda6102
Syno api version detection (#1841)
* Get max api version for login. #1840
2021-08-12 22:14:00 +12:00
Clinton Hall
4103a7dc05
Fix auto-fork detection (#1839)
* Fix Fork Detection when parameters not exact match. #1838

* Fix logging of detected fork. #1838

* Fix SickGear fork detection #1838
2021-08-10 21:32:06 +12:00
clinton-hall
f9dde62762 update to v12.1.09 for merge 2021-07-17 21:44:16 +12:00
Clinton Hall
213f1f6f10
Radarr api-v3 changes (#1834)
#1831
2021-06-09 07:27:58 +12:00
Clinton Hall
30a69d6e37
Remove State from Radarr api return 2021-06-07 21:51:36 +12:00
Clinton Hall
2280f8dee8
Update Radarr api version (#1833) 2021-06-07 15:45:32 +12:00
p0ps
ee060047b7
Check for apikey when fork=medusa-apiv2 is used. (#1828)
#1827
2021-05-07 22:00:54 +12:00
Clinton Hall
e3efbdbaee
Add subs renaming for radarr/sonarr (#1824)
* Re-added rename_subs #1823 #768
2021-04-10 19:37:32 +12:00
Christoph Stahl
6ccc4abc18
Use Response.text instead of Response.content (#1822)
`content` returns a bytes object, `text` returns a string object. The latter can be splitted by the string `\n`, the former cannot, which leads to an Exception.
2021-03-21 10:26:33 +13:00
Clinton Hall
0329cc4f98
Fix missing title when release_id (#1820) 2021-03-08 15:10:06 +13:00
clinton-hall
d64bd636d2 fix removal of duplicate parameters. 2021-02-26 20:36:03 +13:00
clinton-hall
c9e06eb555 allow new params for SickChill. 2021-02-26 20:25:47 +13:00
Henry
623e619534
Added chmod to 644 for subtitles (#1817)
I ran into problems with permissions.
Default sublimal write files with 0600 permission.
2021-02-21 22:13:05 +13:00
p0ps
06d91c6928
Pymedusa (#1815)
* Add wait_for as a valid option for pyMedusa

* Add docs.

* doc

* wrong section
2021-02-18 14:30:51 +13:00
p0ps
c2eaa72a2c
Fix other sickbeard forks errorring. (#1814)
* Update SickBeard section with is_priority param for medusa.

* Add param type to medusa-apiv2 fork.

* Extract param only when not a fork_obj
* Directly return process_result from api_call()

* Implemented classes for PymedusaApiV1 and PymedusaApiv2.

* improve linting
2021-02-17 20:31:08 +13:00
Clinton Hall
f48812eccd
Fix other sickbeard forks errorring. (#1813)
Co-authored-by: p0psicles <rogier@headshots.nl>
2021-02-15 21:28:25 +13:00
p0ps
6a6b25fece
Medusa apiv2 (#1812)
* add fork Medusa-apiV2

* Added classes for sickbeard (base) and PyMedusa.

* refactored part of the forks.py code -> InitSickBeard class.

* Add .vscode to gitignore

* Further refactor forks.py -> sickbeard.py

* Working example for pyMedusa when fork is 'medusa' (no api key)

* fix import for Py2

Co-authored-by: clinton-hall <fock_wulf@hotmail.com>
2021-02-15 15:02:15 +13:00
echel0n
0acf78f196
Added dedicated SiCKRAGE section with API version and SSO login support (#1805)
Added migration code to migrate SickBeard section with fork sickrage-api to new SiCKRAGE section
2021-01-13 13:16:41 +13:00
clinton-hall
9d64c2f478 Update to V12.1.08 2020-12-14 20:34:26 +13:00
clinton-hall
40548fa670 add configobj 2020-11-18 22:07:09 +13:00
clinton-hall
aded4e796e add updated configobj 2020-11-18 22:05:40 +13:00
Clinton Hall
d4d5f00a18
Single file downloads with clean name #1789 (#1791) 2020-10-24 18:25:35 +13:00
Clinton Hall
bf05f1b4e7
Bypass for manual execution (#1788)
* no_status_check prevents additional checks.
#1192
#1778
2020-10-16 22:55:41 +13:00
Clinton Hall
de81037d15
Py3.9 (#1787)
* Add Py3.9 support
2020-10-16 13:51:55 +13:00
Clinton Hall
a96f07c261
No status change error suppression (#1786) 2020-10-15 21:59:43 +13:00
clinton-hall
4c33b5574b Merge branch 'master' into nightly 2020-09-23 16:17:57 +12:00
Clinton Hall
b9c3ccb71d
Merge Nightly (#1783)
* Add Failed to SickGear fork detection (#1772)

* Fix for failed passed as 2,3 from SAB (#1777)

* Fix DB import (#1779)

* Sqlite3.row handling fix
* Fix import error in Python3

* make nzbToWatcher.py executable. #1780

* Update to V12.1.07 (#1782)
2020-09-23 16:08:32 +12:00
Clinton Hall
0833bf1724
Update to V12.1.07 (#1782) 2020-09-23 16:01:35 +12:00
Clinton Hall
f92f8f3952
Add .gz support (#1781)
#1715
2020-09-23 15:40:54 +12:00
clinton-hall
c21fa99bd7 make nzbToWatcher.py executable. #1780 2020-09-23 14:24:47 +12:00
Clinton Hall
beecb1b1a0
Fix DB import (#1779)
* Sqlite3.row handling fix
* Fix import error in Python3
2020-09-19 21:53:01 +12:00
Clinton Hall
a359691515
Fix for failed passed as 2,3 from SAB (#1777) 2020-09-18 16:12:56 +12:00
Clinton Hall
d1fe38b0b2
Add Failed to SickGear fork detection (#1772) 2020-09-12 12:36:49 +12:00
clinton-hall
b3dc118b52 Merge branch 'dev' 2020-09-08 10:43:12 +12:00
clinton-hall
8c8ea0f6fe Merge branch 'nightly' into dev 2020-09-08 10:42:34 +12:00
clinton-hall
b3388f959d update to version 12.1.06 2020-09-08 10:40:57 +12:00
Clinton Hall
2dfdc69487
log error when migrating #850 (#1768)
Can't display debug logging until the config is loaded to enable debugging!
So log as error to get details of the migration fault.
2020-08-26 20:27:38 +12:00
clinton-hall
f10fa03159 Use params for auto fork. #1765 2020-08-15 19:24:52 +12:00
Jelle Breuer
7f8397b516
Added missing ffmpeg settings to nzbToRadarr and nzbToNzbDrone (#1757) 2020-07-23 22:01:50 +12:00
Clinton Hall
850ba6dcea
Fix auto detection of forks. #1738 2020-04-23 10:07:16 +12:00
clinton-hall
f5e4ec0981 Merge branch 'dev' 2020-04-17 11:29:06 +12:00
clinton-hall
54534c4eed Merge branch 'nightly' into dev 2020-04-17 11:28:16 +12:00
clinton-hall
5fb3229c13 update to version 12.1.05 2020-04-17 11:26:57 +12:00
Clinton Hall
b409279254
fix py2 handling #1725 2020-03-09 06:55:13 +13:00
Clinton Hall
001f754cd3
Fix unicode check in Py2 #1725 (#1727) 2020-03-08 13:36:26 +13:00
Clinton Hall
58a6b2022b
Fix dictionary changed size. #1724 (#1726) 2020-03-08 13:35:21 +13:00
Clinton Hall
c037387fc3
always use cmd type for api. #1723 2020-03-03 12:34:02 +13:00
Clinton Hall
f8de0c1ccf
fix api check 2020-03-02 21:56:59 +13:00
Clinton Hall
4facc36e3f
fix return for incorrect command. 2020-03-02 21:38:10 +13:00
Clinton Hall
a233db0024
SickGear 403 fix (#1722)
403 from SickGear #1704
2020-03-02 18:19:56 +13:00
cheese1
c18fb17fd8
fix typos (#1714) 2020-01-29 12:53:52 +13:00
Clinton Hall
2a96311d6f
Qbittorrent patch 1 (#1711)
qBittorrenHost to qBittorrentHost (#1710)

Co-authored-by: boredazfcuk <boredazfcuk@hotmail.co.uk>
2020-01-24 23:05:16 +13:00
Clinton Hall
11f1c2ce3f
Update Syno Default port. #1671 2020-01-21 14:34:35 +13:00
Clinton Hall
0fa2a80bf6
Fix Syno Parser #1671 2020-01-21 14:32:36 +13:00
Clinton Hall
b793ce7933
Syno ds patch 1 (#1702)
* Add Syno DS parsing #1671
as per https://forum.synology.com/enu/viewtopic.php?f=38&t=92856
* add config guidance
* add syno client
2020-01-13 21:26:21 +13:00
Clinton Hall
0827c5bafe
add SABnzbd environment variable handling. #1689 (#1701) 2020-01-13 21:17:33 +13:00
clinton-hall
5a6837759d Merge branch 'dev' 2020-01-13 21:02:33 +13:00
clinton-hall
25528f8e7b Merge branch 'nightly' into dev 2020-01-13 21:01:48 +13:00
clinton-hall
43312fc642 update to v12.1.04 2020-01-13 21:00:12 +13:00
Clinton Hall
6861b9915e
fix empty dir_name #1673 (#1700) 2020-01-13 20:40:46 +13:00
Clinton Hall
bbc8f132c3
fixed typo #1698 2020-01-09 14:18:23 +13:00
Clinton Hall
b8784d71dd
Fix Json returned from Sonarr and Lidarr (#1697) 2020-01-08 07:03:11 +13:00
clinton-hall
f2c07f3c38 fix encoding checks 2020-01-05 13:39:23 +13:00
clinton-hall
71c435ba48 fix encoding issue with python3 #1694 2020-01-05 12:22:23 +13:00
clinton-hall
a320ac5a66 Merge branch 'dev' 2020-01-04 22:36:11 +13:00
clinton-hall
1cca1b7c06 Merge branch 'nightly' into dev 2020-01-04 22:35:36 +13:00
clinton-hall
6d647a2433 update to v 12.1.03 2020-01-04 22:34:42 +13:00
Clinton Hall
a5e76fc56f
Py2fix (#1693)
* Update encoding to use bytes for strings. (#1690)
* fix ffmpeg install issues for test
Co-authored-by: Jonathan Springer <jonpspri@gmail.com>
2020-01-04 22:01:13 +13:00
Clinton Hall
aeb3e0fd6d
Deluge update to V2 (#1683) Fixes #1680 2019-12-10 12:55:13 +13:00
clinton-hall
2e7d4a5863 Merge branch 'dev' 2019-12-08 14:44:16 +13:00
clinton-hall
9111f815f9 Merge branch 'nightly' into dev 2019-12-08 14:43:41 +13:00
clinton-hall
feb4e36c4c update to v12.1.02 2019-12-08 14:42:59 +13:00
clinton-hall
cbd0c25c88 Merge branch 'nightly' into dev 2019-12-08 14:37:25 +13:00
Clinton Hall
75ecbd4862
Add Submodule checks (#1682) 2019-12-08 14:35:15 +13:00
Clinton Hall
d95e4e56c8
remove redundant json.loads #1671 (#1681) 2019-12-08 12:31:46 +13:00
Clinton Hall
0d7c59f1f0
Remove Encode of directory #1671 (#1672) 2019-11-13 18:32:03 +13:00
Clinton Hall
fdaa007756
Don't write byte code (#1669) 2019-11-10 09:38:48 +13:00
Clinton Hall
5cd449632f
Py3.8 (#1659)
* Add Python3.8 and CI Tests
* Force testing of video in case ffmpeg not working
2019-11-08 14:13:07 +13:00
Clinton Hall
70ab7d3d61
Add Watcher3 Config (#1667)
* Set NZBGet config #1665
2019-11-04 13:17:38 +13:00
Clinton Hall
fde8714862
Update all qBittorrent WebAPI paths for client v4.1.0+ (#1666) 2019-11-04 12:28:35 +13:00
Sergio Cambra
c92588c3be fix downloading subtitles, no provider was registered (#1664) 2019-11-04 12:10:20 +13:00
Sergio Cambra
1814bd5ae1 add watcher3 integration (#1665) 2019-11-04 12:05:00 +13:00
Clinton Hall
80ef0d094e
Fix autofork fallback. #163 2019-09-19 20:47:13 +12:00
clinton-hall
46b2e8998c update to v12.1.01 2019-08-13 18:40:15 +12:00
clinton-hall
96f086bdc1 update to v12.1.01 2019-08-13 18:39:26 +12:00
clinton-hall
77f34261fa update to v12.1.01 2019-08-13 18:38:05 +12:00
Clinton Hall
e738727c52
Force status from SABnzbd to be integer. #1646 #1647 (#1648) 2019-08-10 19:35:50 +12:00
clinton-hall
e165bbcefc Merge v12.1.00 2019-08-06 13:19:19 +12:00
clinton-hall
ccfc3c1703 Merge V12.1.00 2019-08-06 13:18:15 +12:00
clinton-hall
dc5d43b028 update to version 12.1.00 2019-08-06 13:16:25 +12:00
clinton-hall
35c65254e7 Merge branch 'nightly' into dev 2019-08-06 13:07:50 +12:00
Clinton Hall
bde5a15f66
Fixes for user_script categories (#1645)
Fixes for user_script categories. #1643
2019-08-06 09:04:45 +12:00
Clinton Hall
5714540949
Fix uTorrent with Python3 (#1644)
* Remove temp workaround for Microsoft Azure python issues.
2019-08-02 13:02:46 +12:00
Clinton Hall
9d05d6c914
Merge pull request #1640 from clinton-hall/imdb-boundary-1
Add word boundary to imdb match. #1639
2019-07-23 14:45:06 +12:00
Clinton Hall
d7eab5d2d3
Add word boundary to imdb match. #1639
Prevents matching (and truncating) longer ids.
Thanks @currently-off-my-rocker
2019-07-23 14:24:31 +12:00
Clinton Hall
745bad3823
Merge pull request #1639 from currently-off-my-rocker/imdb-ids-8-digits
identify imdb ids with 8 digits
2019-07-23 09:00:51 +12:00
currently-off-my-rocker
5a18ee9a27
identify imdb ids with 8 digits 2019-07-22 13:07:09 +02:00
Clinton Hall
8ba8caf021
Fix3 (#1637)
* add singular fork detection for multiple runs. Fixes #1637
* Add newly identified fork variants #1630 #1637
* remove encoding of paths. #1637 #1582
2019-07-22 12:35:01 +12:00
Clinton Hall
f21e18b1bf
Fix2 (#1636)
add chnaged api handling for SickGear. Fixes #1630
2019-07-16 14:36:00 +12:00
Clinton Hall
9a958afac8
don't crash when no optionalParameters. Fixes #1630 (#1632) 2019-07-12 19:39:55 +12:00
Clinton Hall
95e4c70d9a Set theme jekyll-theme-cayman 2019-07-09 15:05:33 +12:00
Clinton Hall
9f6c068cde
Transcode patch 1 (#1627)
* Add Piping of stderr to capture transcoding failures. #1619
* Allow passing absolute nice command. #1619
* Change .cfg description for niceness
* Fix errors due to VM packages out of date (ffmpeg)
* Fix Sqlite import error on tests
* Fix Azure issues

https://developercommunity.visualstudio.com/content/problem/598264/known-issue-azure-pipelines-images-missing-sqlite3.html
2019-06-20 12:56:02 +12:00
Ubuntu
ce50a1c27d Fix allready running handling for Python3. #1626 2019-06-19 21:37:42 +00:00
clinton-hall
f1dc672056 fix deluge client for python3. Fixes #1626 2019-06-19 22:50:36 +12:00
clinton-hall
d39a7dd234 fix to make deluge client py 2 and 3 compatible. Fixes #1626 2019-06-18 20:52:19 +12:00
Clinton Hall
81895afd3f
Merge pull request #1625 from TheHolyRoger/patch-3
Don't replace apostrophes in qBittorrent input_name
2019-06-08 20:08:21 +12:00
TheHolyRoger
3237336775
Don't replace apostrophes in qBittorrent input_name
Don't replace apostrophes in qBittorrent input_name - only trim if found at beginning/end of string.

This stops nzbtomedia processing the entire download folder when asked to process a folder with apostrophes in the title
2019-06-08 00:20:12 +01:00
clinton-hall
fd1149aea1 add additional options to pass into ffmpeg. #1619 2019-06-06 21:46:56 +12:00
Clinton Hall
8c45e76507
Bluray 1 (#1620)
* added code to extract bluray images and folder structure. #1588

* add Mounting of iso files as fall-back

* add new mkv-bluray default.

* clean-up fall-back for ffmpeg not accepting -show error
2019-05-31 14:06:25 +12:00
clinton-hall
5ff056844c Fix NoExtractFailed usage. Fixes #1618 2019-05-20 21:17:54 +12:00
clinton-hall
5375d46c32 add remote path handling for LazyLibrarian #1223 2019-04-18 21:46:32 +12:00
Clinton Hall
52cae37609
Fix crash of remote_path exception. #1223 2019-04-18 08:40:11 +12:00
Labrys of Knossos
472dd8c2c7
Merge pull request #1608 from clinton-hall/fix/database
Fix IndexError on Python 2.7 when accessing database
2019-04-08 19:48:12 -04:00
Labrys of Knossos
455915907b Fix key access for sqlite3.Row on Python 2.7
Fixes #1607
2019-04-08 19:24:59 -04:00
Labrys of Knossos
d3bbcb6b63 Remove unnecessary dict factory for database. 2019-04-08 19:24:59 -04:00
Labrys of Knossos
713f1a14f3
Merge pull request #1606 from clinton-hall/flake8/future-import
Flake8/future import
2019-04-07 17:53:03 -04:00
Labrys of Knossos
424879e4b6 Add future imports 2019-04-07 17:44:25 -04:00
Labrys of Knossos
f42cc020ea Add flake8-future-import to tox.ini 2019-04-07 17:35:02 -04:00
Labrys of Knossos
e98c29010a
Merge pull request #1605 from clinton-hall/flake8/selective-tests
Add optional flake8 tests to selective testing
2019-04-07 15:38:31 -04:00
Labrys of Knossos
c6e35bd2db Add optional flake8 tests to selective testing
Ignore W505 (doc string length) for now
2019-04-07 14:20:20 -04:00
Labrys of Knossos
e4b03005a1
Merge pull request #1604 from clinton-hall/fix/flake8
Fix/flake8
2019-04-07 13:51:51 -04:00
Labrys of Knossos
9f52406d45 Fix flake8-quotes Q000 Remove bad quotes 2019-04-07 13:44:33 -04:00
Labrys of Knossos
99159acd80 Fix flake8-bugbear B007 Loop control variable not used within the loop body. 2019-04-07 13:39:48 -04:00
Labrys of Knossos
d608000345 Fix flake8-commas C819 trailing comma prohibited 2019-04-07 13:38:27 -04:00
Labrys of Knossos
81c50efcd6 Fix flake8-commas C813 missing trailing comma in Python 3 2019-04-07 13:37:17 -04:00
Labrys of Knossos
eec977d909 Fix flake8-docstrings D403 First word of the first line should be properly capitalized 2019-04-07 13:33:20 -04:00
Labrys of Knossos
093f49d5aa Fix flake8-docstrings D401 First line should be in imperative mood 2019-04-07 13:32:06 -04:00
Labrys of Knossos
73e47466b4 Fix flake8-docstrings D205 1 blank line required between summary line and description 2019-04-07 13:30:40 -04:00
Labrys of Knossos
f98b39cdbb Fix flake8-docstrings D204 1 blank line required after class docstring 2019-04-07 13:27:31 -04:00
Labrys of Knossos
70fa47394e Fix flake8-docstrings D202 No blank lines allowed after function docstring 2019-04-07 13:26:13 -04:00
Labrys of Knossos
181675722d Fix flake8 W291 trailing whitespace 2019-04-07 13:23:24 -04:00
Labrys of Knossos
90602bf154 Fix flake8 W293 blank line contains whitespace 2019-04-07 13:17:55 -04:00
Labrys of Knossos
9527a2bd67 Fix flake8 E402 module level import not at top of file 2019-04-07 13:16:35 -04:00
Labrys of Knossos
98e8fd581a Fix flake8 E303 too many blank lines 2019-04-07 13:08:31 -04:00
Labrys of Knossos
daa9819798 Fix flake8 F401 item imported but unused 2019-04-07 13:06:25 -04:00
Labrys of Knossos
9dd25f96b2 Fix flake8 E266 too many leading '#' for block comment
Ignore for NZBGET scripts
2019-04-07 12:58:31 -04:00
Labrys of Knossos
077f04bc53 Fix flake8 E265 block comment should start with '# '
Ignore for NZBGET scripts
2019-04-07 12:56:50 -04:00
Labrys of Knossos
8736642e78 Fix code quality checks to run on project root and custom libs
Fixes #1600
Fixes #1601
2019-04-07 12:46:47 -04:00
Labrys of Knossos
3a95b433f3
Merge pull request #1603 from clinton-hall/fix/flake8
Fix/flake8
2019-04-07 12:46:16 -04:00
Labrys of Knossos
28ff74d0c8 Revert "Temporarily disable some flake8 ignores for testing"
This reverts commit e7179dde1c.
2019-04-07 12:42:18 -04:00
Labrys of Knossos
e7179dde1c Temporarily disable some flake8 ignores for testing 2019-04-07 12:38:43 -04:00
Labrys of Knossos
0788a754cb Fix code quality checks to run all desired tests
Fixes #1602
2019-04-07 12:15:07 -04:00
Labrys of Knossos
aeed469c5f
Merge pull request #1599 from clinton-hall/flake8/bugbear
Flake8/bugbear
2019-04-06 23:55:00 -04:00
Labrys of Knossos
b8c2b6b073
Merge pull request #1598 from clinton-hall/flake8/docstrings
Flake8/docstrings
2019-04-06 23:51:56 -04:00
Labrys of Knossos
23a450f095
Merge pull request #1597 from clinton-hall/flake8/comprehensions
Flake8/comprehensions
2019-04-06 23:49:48 -04:00
Labrys of Knossos
72140e939c Fix flake8-bugbear B902 Invalid first argument used for instance method. 2019-04-06 23:37:20 -04:00
Labrys of Knossos
10b2eab3c5 Fix flake8-docstrings D401 First line should be in imperative mood 2019-04-06 23:37:20 -04:00
Labrys of Knossos
4c8e896bbb Fix flake8-bugbear B007 Loop control variable not used within the loop body. 2019-04-06 23:37:20 -04:00
Labrys of Knossos
e00b5cc195 Fix flake8-bugbear B010 Do not call setattr with a constant attribute value, it is not any safer than normal property access. 2019-04-06 23:37:20 -04:00
Labrys of Knossos
267d8d1632 Add flake8-bugbear to tox.ini 2019-04-06 23:37:20 -04:00
Labrys of Knossos
6f6c9bcc9d Fix flake8-docstrings D400 First line should end with a period 2019-04-06 23:37:19 -04:00
Labrys of Knossos
1d7dba8aeb Fix flake8-docstrings D205 1 blank line required between summary line and description 2019-04-06 23:37:19 -04:00
Labrys of Knossos
777bc7e35d Fix flake8-docstrings D202 No blank lines allowed after function docstring 2019-04-06 23:37:19 -04:00
Labrys of Knossos
4dd58afaf6 Fix flake8-docstrings D200 One-line docstring should fit on one line with quotes 2019-04-06 23:37:19 -04:00
Labrys of Knossos
a8043d0259 Add flake8-docstrings to tox.ini 2019-04-06 23:37:19 -04:00
Labrys of Knossos
169fcaae4a Fix flake8-comprehensions C407 Unnecessary list comprehension 2019-04-06 23:36:18 -04:00
Labrys of Knossos
b9c7eec834 Fix flake8-comprehensions C403 Unnecessary list comprehension 2019-04-06 23:36:18 -04:00
Labrys of Knossos
f2964296c5 Add flake8-comprehensions to tox.ini 2019-04-05 19:19:11 -04:00
Labrys of Knossos
0ba4b9daab
Merge pull request #1596 from clinton-hall/flake8/quotes
Flake8/quotes
2019-04-05 19:13:14 -04:00
Labrys of Knossos
94c42dbd8a Fix flake8-quotes Q000 Remove bad quotes 2019-04-05 19:04:31 -04:00
Labrys of Knossos
2995c7f391 Add flake8-quotes to tox.ini 2019-04-05 19:04:11 -04:00
Labrys of Knossos
bbcef52eb5
Merge pull request #1595 from clinton-hall/flake8/commas
Flake8/commas
2019-04-05 18:25:38 -04:00
Labrys of Knossos
c5244df510 Fix flake8-commas C819 trailing comma prohibited 2019-04-05 18:14:44 -04:00
Labrys of Knossos
14b2aa6bf4 Fix flake8-commas C812 missing trailing comma 2019-04-05 18:14:44 -04:00
Labrys of Knossos
0bcbabd681 Add flake8-commas to tox.ini 2019-04-05 18:14:44 -04:00
Labrys of Knossos
627b453d3b
Merge pull request #1594 from clinton-hall/quality/flake8
Quality/flake8
2019-04-05 17:52:56 -04:00
Labrys of Knossos
697df555ec Fix flake8 W293 blank line contains whitespace 2019-04-05 17:12:05 -04:00
Labrys of Knossos
0350521b87 Fix flake8 W291 trailing whitespace 2019-04-05 17:12:05 -04:00
Labrys of Knossos
644a11118c Fix flake8 F401 imported but unused 2019-04-05 17:12:05 -04:00
Labrys of Knossos
faa378f787 Fix flake8 E712 comparison to True should be 'if cond is True:' or 'if cond:' 2019-04-05 17:12:04 -04:00
Labrys of Knossos
d208798430 Fix flake8 E402 module level import not at top of file 2019-04-05 17:12:04 -04:00
Labrys of Knossos
8e6e2d1647 Fix flake8 E305 expected 2 blank lines after class or function definition, found 1 2019-04-05 17:12:04 -04:00
Labrys of Knossos
032f7456f9 Fix flake8 E302 expected 2 blank lines, found 1 2019-04-05 17:12:04 -04:00
Labrys of Knossos
a571fc3122 Fix flake8 E265 block comment should start with '# ' 2019-04-05 17:12:04 -04:00
Labrys of Knossos
5f633b931a Fix flake8 E261 at least two spaces before inline comment 2019-04-05 17:12:04 -04:00
Labrys of Knossos
8a22f20a8b Fix flake8 E241 multiple spaces after ':' 2019-04-05 17:12:04 -04:00
Labrys of Knossos
07ad515b50 Fix flake8 E226 missing whitespace around arithmetic operator 2019-04-05 17:12:04 -04:00
Labrys of Knossos
87e813f062 Fix flake8 E126 continuation line over-indented for hanging indent 2019-04-05 17:12:04 -04:00
Labrys of Knossos
90090d7e02 Fix flake8 E117 over-indented 2019-04-05 17:12:03 -04:00
Labrys of Knossos
a8d1cc4fe9 Add flake8 quality checks to tox.ini 2019-04-05 17:11:27 -04:00
Labrys of Knossos
51e520547b
Merge pull request #1593 from clinton-hall/quality/tox
Add tox.ini
2019-04-05 17:03:58 -04:00
Labrys of Knossos
822603d021 Add tox.ini 2019-04-05 16:53:14 -04:00
Clinton Hall
825b48a6c1
add h265 to MKV profile allow. Fixes #1592 2019-04-04 11:34:25 +13:00
Labrys of Knossos
cb3f61f137
Merge pull request #1591 from clinton-hall/tests/cleanup
Tests/cleanup
2019-03-31 12:56:18 -04:00
Labrys of Knossos
f5fdc14577 Revert "Force cleanup errors for confirming CI test"
This reverts commit 16b7c11495.
2019-03-31 12:49:32 -04:00
Labrys of Knossos
16b7c11495 Force cleanup errors for confirming CI test 2019-03-31 12:45:07 -04:00
Labrys of Knossos
02813a6eaf Add source install cleanup test 2019-03-31 12:39:12 -04:00
Labrys of Knossos
a531f4480e Add source install cleanup test 2019-03-31 12:30:27 -04:00
Labrys of Knossos
9a833565aa
Merge pull request #1590 from clinton-hall/libs/pywin32
Add pywin32 to setup.py install_requires on Windows
2019-03-31 12:29:08 -04:00
Labrys of Knossos
f20e1e4f0d Add pywin32 to setup.py install_requires on Windows 2019-03-31 11:45:04 -04:00
clinton-hall
809e642039 fix LL default branch. 2019-03-30 08:47:20 +13:00
clinton-hall
1597763d30 minor fix for LazyLibrarian api. 2019-03-29 10:38:59 +13:00
Clinton Hall
aee3b151c0
Lazylib 1 (#1587)
* add support for LazyLibrarian. Fixes #1223
2019-03-29 09:50:43 +13:00
Clinton Hall
a3db8fb4b6
Test 1 (#1586)
* add transcoder tests
2019-03-27 10:09:47 +13:00
Clinton Hall
bdec673bb9
Merge pull request #1583 from clinton-hall/fix-1
remove .encode which creates byte vs string comparison issues.
2019-03-15 20:52:39 +13:00
clinton-hall
19c3e1fd85 remove .encode which creates byte vs string comparison issues. Fixes #1582 2019-03-15 20:42:21 +13:00
Clinton Hall
0db7c3e10c
Merge pull request #1580 from clinton-hall/dev
12.0.10
2019-03-14 20:40:41 +13:00
Clinton Hall
858206de07
Merge pull request #1579 from clinton-hall/nightly
Nightly
2019-03-14 20:32:49 +13:00
clinton-hall
ac7e0b702a update to 12.0.10 2019-03-14 20:28:53 +13:00
Clinton Hall
15d4289003
Merge pull request #1578 from clinton-hall/fix-1
fix cleanup
2019-03-14 20:12:18 +13:00
clinton-hall
6aee6baf6e fix cleanup 2019-03-14 20:02:40 +13:00
Labrys of Knossos
9b31482ce3
Update PULL_REQUEST_TEMPLATE.md 2019-03-12 16:48:50 -04:00
clinton-hall
8745af2629 update to v12.0.9 2019-03-13 07:54:21 +13:00
Clinton Hall
257eb3d761
Merge pull request #1575 from clinton-hall/clean-1
cleanup supporting files.
2019-03-13 07:45:51 +13:00
clinton-hall
742d482020 cleanup supporting files. 2019-03-13 07:40:35 +13:00
Clinton Hall
410aab4c58
improve tests (#1574)
improve tests
2019-03-12 18:55:37 +13:00
Clinton Hall
f5891459c2
Set up CI with Azure Pipelines (#1573)
* Set up CI with Azure Pipelines

* test all python versions

* rename test file and set to run from subdir.
2019-03-11 22:40:59 +13:00
Labrys of Knossos
3f6b447b3e
Merge pull request #1572 from clinton-hall/refactor/configuration
Fix absolute imports for qbittorrent and utorrent in Python 2.7
2019-03-10 20:52:14 -04:00
Labrys of Knossos
a669c983b7 Fix absolute imports for qbittorrent and utorrent in Python 2.7 2019-03-10 20:45:13 -04:00
Labrys of Knossos
2f5fad7737
Merge pull request #1571 from clinton-hall/refactor/configuration
Fix missed commits during refactor
2019-03-10 20:40:35 -04:00
Labrys of Knossos
9f7f28d54e Fix missed commits during refactor 2019-03-10 20:35:05 -04:00
Clinton Hall
832ef32340
Merge pull request #1569 from clinton-hall/refactor/configuration
Refactor/configuration
2019-03-11 08:20:12 +13:00
Clinton Hall
3b3c7ca2d4
Merge pull request #1566 from clinton-hall/refactor/iso_matching
Refactor/iso matching
2019-03-11 08:17:42 +13:00
Labrys of Knossos
b6db785c92 Refactor utils.subtitles to plugins.subtitles 2019-03-10 11:28:54 -04:00
Labrys of Knossos
76b5c06a33 Refactor utils.notifications.plex_update to plugins.plex.plex_update 2019-03-10 11:25:12 -04:00
Labrys of Knossos
e12f2724e6 Refactor plex configuration to plugins.plex 2019-03-10 11:25:12 -04:00
Labrys of Knossos
1d75439441 Refactor utils.nzb to plugins.downloaders.nzb.utils 2019-03-10 11:25:12 -04:00
Labrys of Knossos
e1aa32aee7 Refactor downloader configuration to plugins.downloaders 2019-03-10 11:25:12 -04:00
Labrys of Knossos
28eed3bc92 Refactor ISO file matching to use regex only once per file. 2019-03-10 11:18:06 -04:00
Labrys of Knossos
cd64014a9d Refactor ISO file matching to decode process output a single time. 2019-03-10 11:18:06 -04:00
clinton-hall
ef950d8024 add Contributing guide 2019-03-10 22:32:55 +13:00
Clinton Hall
bb46bbad27
Merge pull request #1565 from clinton-hall/clinton-hall-patch-1
Update issue templates
2019-03-10 22:24:49 +13:00
Clinton Hall
2fc5101ef0
Update issue templates 2019-03-10 22:22:28 +13:00
Clinton Hall
aeda68fbe4
Merge pull request #1564 from clinton-hall/add-code-of-conduct-1
Add code of conduct 1
2019-03-10 22:03:26 +13:00
Clinton Hall
1c63c9fe39
Create CODE_OF_CONDUCT.md 2019-03-10 22:01:22 +13:00
clinton-hall
cedd0c1a20 Merge branch 'dev' 2019-03-10 20:41:23 +13:00
clinton-hall
a8e2b30666 Merge branch 'nightly' into dev 2019-03-10 20:39:22 +13:00
clinton-hall
d4786e10d7 rev up to 12.0.8 2019-03-10 20:37:57 +13:00
clinton-hall
392967780c don't load torrent clients for nzbs. Fixes #1563 2019-03-10 08:34:56 +13:00
clinton-hall
64862ece10 fix python3 parsing of .iso files. Fixes #1561 2019-03-09 20:30:25 +13:00
clinton-hall
f82fe0ee81 decode 7zip outut. Fixes #1561 2019-03-08 23:03:24 +13:00
clinton-hall
3f3e1415c9 change method of writing to system PATH. Fixes #830 2019-03-02 09:03:21 +13:00
clinton-hall
27cfc34577 add sys path config to find executables not in path. Fixes #830 2019-02-25 19:53:54 +13:00
Labrys of Knossos
506ede833e
Merge pull request #1554 from clinton-hall/fix/cleanup
Add exception handling for failure to return to original directory
2019-02-18 06:33:34 -05:00
Labrys of Knossos
fd8452b5c6 Add exception handling for failure to return to original directory
Fixes #1552
2019-02-16 10:17:01 -05:00
clinton-hall
45baf79753 log sucessful when returning failed download to Radarr. Fixes #1546 2019-02-09 11:08:33 +13:00
clinton-hall
8a637918d6 use list for python3 compatibility. Fixes #1545 2019-02-05 22:15:05 +13:00
clinton-hall
f47f68f699 convert byte to string from Popen. Fix Sick* failed processing. Fixes #1545 2019-02-05 22:01:20 +13:00
Labrys of Knossos
f91f40d643
Merge pull request #1543 from clinton-hall/feature/eol
Add Python End-of-Life detection
2019-02-03 19:11:53 -05:00
Labrys of Knossos
f6e620a3fd Add Python End-of-Life detection 2019-02-03 11:30:55 -05:00
clinton-hall
de86259bb0 fix first return parsing from HeadPhones. Fixes #1536 2019-01-27 22:45:04 +13:00
Labrys of Knossos
b7746f1ce5
Merge pull request #1534 from clinton-hall/fork/medusa-api
Add Medusa API
2019-01-20 10:22:00 -05:00
Labrys of Knossos
00877c2d97 Add Medusa API 2019-01-20 10:09:03 -05:00
Labrys of Knossos
80a9576fc3
Merge pull request #1532 from clinton-hall/refactor/configuration
Refactor configuration
2019-01-20 09:43:00 -05:00
Labrys of Knossos
81a6d9c4fa Refactor torrent linking configuration 2019-01-19 14:34:06 -05:00
Labrys of Knossos
9a1be36e8b Refactor torrent deletion configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
521d2b7a05 Refactor torrent permission configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
f23eccc050 Refactor torrent resuming configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
cf0fc1296f Refactor torrent categories configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
1906d62664 Refactor flatenning configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
218e082ec7 Refactor sabnzbd configuration 2019-01-19 14:34:05 -05:00
Labrys of Knossos
9c105061d6 Refactor qbittorrent configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
22dfadd65c Refactor deluge configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
44df360fbe Refactor utorrent configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
f961c476ae Refactor transmission configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
287e3aa17b Fix initializing constant 2019-01-19 14:34:04 -05:00
Labrys of Knossos
3d2070e106 Refactor utility location configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
0a58b6b6a0 Refactor section configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
4f828e0a77 Refactor torrent class configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
e85b92f1db Refactor passwords file configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
819cf7b225 Fix global declarations 2019-01-19 14:34:04 -05:00
Labrys of Knossos
2d0b5e706b Refactor transcoder configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
10710ffd4c Refactor container configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
ddf15247e3 Use context manager instead of assignment 2019-01-19 14:34:04 -05:00
Labrys of Knossos
e0c55c4f84 Refactor niceness configuration 2019-01-19 14:34:04 -05:00
Labrys of Knossos
f67f8a32aa Refactor plex configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
a5d51d6e5a Use generator exp for remote paths 2019-01-19 14:34:03 -05:00
Labrys of Knossos
c587a137a5 Refactor remote paths configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
003d181bb0 Refactor torrents configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
62aca7ed3c Refactor groups configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
b3870e0d07 Refactor nzbs configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
feffa0da41 Refactor wake on lan configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
2c963f1ffe Fix error log 2019-01-19 14:34:03 -05:00
Labrys of Knossos
bd4c830313 Fix version check conditional 2019-01-19 14:34:03 -05:00
Labrys of Knossos
750c203216 Fix CheckVersion instance creation 2019-01-19 14:34:03 -05:00
Labrys of Knossos
c9e9d9748b Refactor updates configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
2512218d4a Refactor general configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
ca17c7a562 Refactor logging configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
a31683f7e5 Refactor migration configuration 2019-01-19 14:34:03 -05:00
Labrys of Knossos
13846db0b6 Refactor locale configuration 2019-01-19 14:34:02 -05:00
Labrys of Knossos
e0de964fda Refactor process configuration 2019-01-19 14:34:02 -05:00
Labrys of Knossos
1404464ef9 Refactor locale configuration 2019-01-19 14:34:02 -05:00
Labrys of Knossos
0c98912b76 Refactor PASSWORDSFILE -> PASSWORDS_FILE
Refactor DOWNLOADINFO -> DOWNLOAD_INFO
2019-01-19 14:34:02 -05:00
Labrys of Knossos
7e52aec4af Refactor *CONTAINER 2019-01-19 14:34:02 -05:00
Labrys of Knossos
2ebe96e049 Refactor REMOTEPATHS -> REMOTE_PATHS 2019-01-19 14:34:02 -05:00
Labrys of Knossos
d973f4955f Refactor TORRENT_CLIENTAGENT -> TORRENT_CLIENT_AGENT 2019-01-19 14:34:02 -05:00
Labrys of Knossos
fafcdb4ed5 Refactor NZB_DEFAULTDIR -> NZB_DEFAULT_DIRECTORY 2019-01-19 14:34:02 -05:00
Labrys of Knossos
a0d8940f70 Refactor NZB_CLIENTAGENT -> NZB_CLIENT_AGENT 2019-01-19 14:34:02 -05:00
Labrys of Knossos
5bea8f121e Refactor SABNZBD* 2019-01-19 14:34:02 -05:00
Labrys of Knossos
4bf842b4f4 Refactor TORRENT_DEFAULTDIR -> TORRENT_DEFAULT_DIRECTORY 2019-01-19 14:34:02 -05:00
Labrys of Knossos
a24367113b Refactor OUTPUTDIRECTORY -> OUTPUT_DIRECTORY 2019-01-19 14:34:02 -05:00
Labrys of Knossos
28f1bc35c5 Refactor USELINK -> USE_LINK 2019-01-19 14:34:01 -05:00
Labrys of Knossos
d2346b0ea6 Refactor PLEX* 2019-01-19 14:34:01 -05:00
Labrys of Knossos
182a542bda Refactor QBITTORENT* 2019-01-19 14:34:01 -05:00
Labrys of Knossos
1aa0ea6e75 Refactor DELUGEPWD -> DELUGE_PASSWORD 2019-01-19 14:34:01 -05:00
Labrys of Knossos
74bc6fb5b4 Refactor DELUGEUSR -> DELUGE_USER 2019-01-19 14:34:01 -05:00
Labrys of Knossos
df5291fd4f Refactor DELUGEPORT -> DELUGE_PORT 2019-01-19 14:34:01 -05:00
Labrys of Knossos
9262ba9cd0 Refactor DELUGEHOST -> DELUGE_HOST 2019-01-19 14:34:01 -05:00
Labrys of Knossos
a62415d711 Refactor TRANSMISSIONPWD -> TRANSMISSION_PASSWORD 2019-01-19 14:34:01 -05:00
Labrys of Knossos
42dfdf73ab Refactor TRANSMISSIONUSR -> TRANSMISSION_USER 2019-01-19 14:34:01 -05:00
Labrys of Knossos
e66ad2b66d Refactor TRANSMISSIONPORT -> TRANSMISSION_PORT 2019-01-19 14:34:00 -05:00
Labrys of Knossos
5d5eb798c9 Refactor TRANSMISSIONHOST -> TRANSMISSION_HOST 2019-01-19 14:34:00 -05:00
Labrys of Knossos
39974e62cc Refactor UTORRENTPWD -> UTORRENT_PASSWORD 2019-01-19 14:34:00 -05:00
Labrys of Knossos
22d2c1b108 Refactor UTORRENTUSR -> UTORRENT_USER 2019-01-19 14:34:00 -05:00
Labrys of Knossos
20bd765a4b Refactor UTORRENTWEBUI -> UTORRENT_WEB_UI 2019-01-19 14:34:00 -05:00
Labrys of Knossos
649febdedd Refactor NZBGET_POSTPROCESS_PARCHECK -> NZBGET_POSTPROCESS_PAR_CHECK 2019-01-19 14:34:00 -05:00
Labrys of Knossos
4896848099
Merge pull request #1526 from clinton-hall/dev
Merge dev to master
2019-01-15 18:44:43 -05:00
Labrys of Knossos
66604416b4
Merge pull request #1525 from clinton-hall/nightly
Merge nightly to dev
2019-01-15 18:41:56 -05:00
Labrys of Knossos
4220b15232
Merge pull request #1524 from clinton-hall/refactor/utils
Refactor utils
2019-01-15 18:40:04 -05:00
Labrys of Knossos
30872db797 Update changelog 2019-01-15 18:37:45 -05:00
Labrys of Knossos
d9436603ab Bump version: 12.0.6 → 12.0.7 2019-01-15 18:11:31 -05:00
Labrys of Knossos
b6672ccf09 Refactor restart to utils.processes.restart 2019-01-15 18:06:05 -05:00
Labrys of Knossos
d960c432eb Refactor rchmod to utils.paths.rchmod 2019-01-15 18:02:36 -05:00
Labrys of Knossos
aa057e65d5 Refactor common utils to utils.common 2019-01-15 17:55:43 -05:00
Labrys of Knossos
844c1d15e9 Fix cleanup script output 2019-01-15 17:48:17 -05:00
Labrys of Knossos
7185e0b31b Add docstring 2019-01-15 17:48:17 -05:00
Labrys of Knossos
c4be677a62 Fix git subprocess 2019-01-15 17:32:03 -05:00
clinton-hall
243cf52c47 Merge branch 'nightly' into dev 2019-01-13 20:11:56 +13:00
clinton-hall
b5b4808293 update version details for next release. 2019-01-13 20:10:45 +13:00
clinton-hall
247da9c6cc Merge branch 'nightly' into dev 2019-01-13 19:45:58 +13:00
clinton-hall
3a2ed4bc57 fixed manual Torrent run result parsing. Fixes #1520 2019-01-13 19:41:04 +13:00
Labrys of Knossos
6f5e3ca0c0
Merge pull request #1519 from TheHolyRoger/Missed-ProcessResult
Missed ProcessResult
2019-01-11 17:54:20 -05:00
TheHolyRoger
e89bbcf9be
hotfix/processresult bug 2019-01-11 14:38:20 +00:00
Labrys of Knossos
df280c4bc3
Merge pull request #1515 from clinton-hall/refactor/utils
Refactor core.utils into a package
2019-01-06 12:12:14 -05:00
Labrys of Knossos
bd5b970bc7 Refactor network utils to utils.network 2019-01-06 12:10:50 -05:00
Labrys of Knossos
383eb5eaf2 Refactor identification utils from utils to utils.identification 2019-01-06 12:10:50 -05:00
Labrys of Knossos
648ecd4048 Refactor process_dir to use generators 2019-01-06 12:10:50 -05:00
Labrys of Knossos
a888d741d3 Refactor file type detection to utils.files 2019-01-06 12:10:50 -05:00
Labrys of Knossos
cb422a0cea Flatten process_dir 2019-01-06 12:10:50 -05:00
Labrys of Knossos
8d458f10ac Refactor get_dirs 2019-01-06 12:10:50 -05:00
Labrys of Knossos
e67f29cb7b Flatten get_dirs function 2019-01-06 12:10:50 -05:00
Labrys of Knossos
03cb11dae3 Refactor identification utils from utils to utils.identification 2019-01-06 12:10:49 -05:00
Labrys of Knossos
e44c0bb56a Refactor path functions from utils to utils.paths 2019-01-06 12:10:49 -05:00
Labrys of Knossos
36932e25c6 Fix clean_dir for Python 3
TypeError when testing str > int
2019-01-06 12:10:49 -05:00
Labrys of Knossos
6cc3df73b3 Refactor path functions from utils to utils.paths 2019-01-06 12:10:49 -05:00
Labrys of Knossos
dade3f6698 Refactor network utils to utils.network 2019-01-06 12:10:49 -05:00
Labrys of Knossos
0f7c74dd78 Refactor file type detection to utils.files 2019-01-06 12:10:49 -05:00
Labrys of Knossos
4424e21786 Streamline is_media_file 2019-01-06 12:10:49 -05:00
Labrys of Knossos
0cccecb435 Refactor file type detection to utils.files 2019-01-06 12:10:49 -05:00
Labrys of Knossos
a074e56629 Refactor naming utils to utils.naming 2019-01-06 12:10:49 -05:00
Labrys of Knossos
9b0d539423 Make replace_links more DRY and add max_depth for following links 2019-01-06 12:10:49 -05:00
Labrys of Knossos
d1f5211e78 Refactor nzbs from utils to utils.nzbs 2019-01-06 12:10:48 -05:00
Labrys of Knossos
2d4951267b Refactor subtitle utils to utils.subtitles 2019-01-06 12:10:48 -05:00
Labrys of Knossos
6a9ff96e8c Refactor encoding utils to utils.encoding 2019-01-06 12:10:48 -05:00
Labrys of Knossos
9d43e0d60b Refactor notification utils to utils.notifications 2019-01-06 12:10:48 -05:00
Labrys of Knossos
f042e014b1 Refactor naming utils to utils.naming 2019-01-06 12:10:48 -05:00
Labrys of Knossos
c4d9faeb23 Refactor network utils to utils.network 2019-01-06 12:10:48 -05:00
Labrys of Knossos
a14a286a8e Refactor links to utils.links 2019-01-06 12:10:48 -05:00
Labrys of Knossos
5c644890e8 Fix shutil.copyfileobj monkey patching 2019-01-06 12:10:48 -05:00
Labrys of Knossos
a6d2c6e96f Refactor path functions from utils to utils.paths 2019-01-06 12:10:48 -05:00
Labrys of Knossos
094fe555b8 Clean up network utils 2019-01-06 12:10:48 -05:00
Labrys of Knossos
42553df5cb Refactor network utils to utils.network 2019-01-06 12:10:48 -05:00
Labrys of Knossos
84061fea2f Fix PEP8 line length 2019-01-06 12:10:48 -05:00
Labrys of Knossos
7b8721b277 Refactor my_db -> database 2019-01-06 12:10:48 -05:00
Labrys of Knossos
542893b30b Refactor download_info db connection to module variable 2019-01-06 12:10:48 -05:00
Labrys of Knossos
04942bf6ad Refactor download info to utils.download_info 2019-01-06 12:10:48 -05:00
Labrys of Knossos
a50a5edbf7 Refactor path functions from utils to utils.paths 2019-01-06 12:10:47 -05:00
Labrys of Knossos
2d6e8034e2 Refactor parses from utils to utils.parsers 2019-01-06 12:10:47 -05:00
Labrys of Knossos
bd16f11485 Refactor nzbs from utils to utils.nzbs 2019-01-06 12:10:47 -05:00
Labrys of Knossos
4143aa77f8 Refactor torrents from utils to utils.torrents 2019-01-06 12:10:47 -05:00
Labrys of Knossos
21fa4e3896 Refactor utils.*Process -> utils.processes.*Process 2019-01-06 12:10:47 -05:00
Labrys of Knossos
3b670b895b Refactor utils module to package 2019-01-06 12:10:47 -05:00
Labrys of Knossos
22b9a484ae
Merge pull request #1514 from clinton-hall/feature/cleanup
Code cleanup
2019-01-06 12:10:24 -05:00
Labrys of Knossos
a289eef88e Remove unused variable 2019-01-06 12:09:07 -05:00
Labrys of Knossos
93ec74f1c7 Fix conditional assignment 2019-01-06 12:09:07 -05:00
Labrys of Knossos
6d0d2d3f7e Use dict literal or comprehension for dict creation 2019-01-06 12:09:07 -05:00
Labrys of Knossos
c99b497bd8 Merge branch 'hotfix/sourcecleaner' into nightly 2019-01-06 12:08:47 -05:00
Labrys of Knossos
f9f3fafb1b
Merge pull request #1513 from clinton-hall/dev
Dev
2019-01-06 00:51:02 -05:00
Labrys of Knossos
2855ef4ccb
Merge pull request #1512 from clinton-hall/hotfix/sourcecleaner
Proper fix for source cleaner
2019-01-06 00:49:13 -05:00
Labrys of Knossos
0fd570cd85 Bump version: 12.0.4 → 12.0.5 2019-01-06 00:48:08 -05:00
Labrys of Knossos
ada78a14f8 hotfix 2019-01-06 00:47:40 -05:00
Labrys of Knossos
0f1595d29c
Merge pull request #1511 from clinton-hall/hotfix/sourcecleaner
Hotfix/sourcecleaner
2019-01-05 23:08:01 -05:00
Labrys of Knossos
c6b4405aff
Merge pull request #1510 from clinton-hall/dev
Hotfix missed commit for source cleaner
2019-01-05 23:03:13 -05:00
Labrys of Knossos
b9cab56fa5
Merge pull request #1509 from clinton-hall/hotfix/sourcecleaner
Fix missed commit for source cleaner
2019-01-05 23:00:47 -05:00
Labrys of Knossos
84a7011973 Bump version: 12.0.3 → 12.0.4 2019-01-05 22:59:31 -05:00
Labrys of Knossos
f83b37d80b Fix missed commit for source cleaner 2019-01-05 22:59:20 -05:00
Labrys of Knossos
50b743ad30
Merge pull request #1508 from clinton-hall/fix/forkdetection
Fix fork detection, part 1
2019-01-05 22:50:50 -05:00
Labrys of Knossos
656957f1fc Fix excess parameter detection 2019-01-05 22:48:18 -05:00
Labrys of Knossos
f514eecf6c Fix excess parameter detection 2019-01-05 22:48:16 -05:00
Labrys of Knossos
29171baaa3 Add extra logging for fork detection. 2019-01-05 22:48:11 -05:00
Labrys of Knossos
7b2833e5f5
Merge pull request #1506 from clinton-hall/dev
Merge dev back into nightly
2019-01-05 21:44:16 -05:00
Labrys of Knossos
14300d12fd
Merge pull request #1505 from clinton-hall/dev
Merge develop into master
2019-01-05 21:39:38 -05:00
Labrys of Knossos
b86693ea8c
Merge pull request #1504 from clinton-hall/hotfix/sourcecleaner
Hotfix/sourcecleaner
2019-01-05 21:36:26 -05:00
Labrys of Knossos
6616801c38 Bump version: 12.0.2 → 12.0.3 2019-01-05 21:27:14 -05:00
Labrys of Knossos
d250e45c7b Hotfix cleaning for source installs 2019-01-05 21:26:56 -05:00
clinton-hall
f1c4c6e840 and that is why we don't make chnages using vi while on holiday! 2019-01-05 23:33:30 +13:00
clinton-hall
6d7dacf114 update a Readme to reflect recent chnages. 2019-01-05 23:30:11 +13:00
Labrys of Knossos
f14ab17dd5 Merge tag '12.0.2' into nightly 2019-01-05 01:02:45 -05:00
Labrys of Knossos
e386eaaec2 Bump version: 12.0.1 → 12.0.2 2019-01-05 00:53:32 -05:00
Labrys of Knossos
58e57c238d
Merge pull request #1502 from clinton-hall/dev
Hotfix missed process result
2019-01-05 00:44:59 -05:00
1841 changed files with 172268 additions and 69611 deletions

View file

@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 12.0.1 current_version = 12.1.13
commit = True commit = True
tag = False tag = False
@ -7,11 +7,6 @@ tag = False
search = version='{current_version}' search = version='{current_version}'
replace = version='{new_version}' replace = version='{new_version}'
[bumpversion:file:README.md]
search = v{current_version}
replace = v{new_version}
[bumpversion:file:core/__init__.py] [bumpversion:file:core/__init__.py]
search = __version__ = '{current_version}' search = __version__ = '{current_version}'
replace = __version__ = '{new_version}' replace = __version__ = '{new_version}'

76
.github/CODE_OF_CONDUCT.md vendored Normal file
View file

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at fock_wulf@hotmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

14
.github/CONTRIBUTING.md vendored Normal file
View file

@ -0,0 +1,14 @@
# Contributing
When contributing to this repository, please first check the issues list, current pull requests, and FAQ pages.
While it is prefered that all interactions be made through github, the author can be contacted directly at fock_wulf@hotmail.com
Please note we have a code of conduct, please follow it in all your interactions with the project.
## Pull Request Process
1. Please base all pull requests on the current nightly branch.
2. Include a description to explain what is achieved with a pull request.
3. Link any relevant issues that are closed or impacted by the pull request.
4. Please update the FAQ to reflect any new parameters, changed behaviour, or suggested configurations relevant to the changes.

23
.github/ISSUE_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,23 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**Technical Specs**
1. Running on (Windows, Linux, NAS Model etc) '....'
2. Python version '....'
3. Download Client (NZBget, SABnbzd, Transmission) '....'
4. Intended Media Management (SickChill, CouchPotoato, Radarr, Sonarr) '....'
**Expected behavior**
A clear and concise description of what you expected to happen.
**Log**
Please provide an extract, or full debug log that indicates the issue.

28
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,28 @@
# Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue)
## Type of change
Please delete options that are not relevant.
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
**Test Configuration**:
# Checklist:
- [ ] I have based this change on the nightly branch
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes

View file

@ -1,11 +1,13 @@
nzbToMedia v12.0.1 nzbToMedia
================== ==========
Provides an [efficient](https://github.com/clinton-hall/nzbToMedia/wiki/Efficient-on-demand-post-processing) way to handle postprocessing for [CouchPotatoServer](https://couchpota.to/ "CouchPotatoServer") and [SickBeard](http://sickbeard.com/ "SickBeard") (and its [forks](https://github.com/clinton-hall/nzbToMedia/wiki/Failed-Download-Handling-%28FDH%29#sick-beard-and-its-forks)) Provides an [efficient](https://github.com/clinton-hall/nzbToMedia/wiki/Efficient-on-demand-post-processing) way to handle postprocessing for [CouchPotatoServer](https://couchpota.to/ "CouchPotatoServer") and [SickBeard](http://sickbeard.com/ "SickBeard") (and its [forks](https://github.com/clinton-hall/nzbToMedia/wiki/Failed-Download-Handling-%28FDH%29#sick-beard-and-its-forks))
when using one of the popular NZB download clients like [SABnzbd](http://sabnzbd.org/ "SABnzbd") and [NZBGet](http://nzbget.sourceforge.net/ "NZBGet") on low performance systems like a NAS. when using one of the popular NZB download clients like [SABnzbd](http://sabnzbd.org/ "SABnzbd") and [NZBGet](https://nzbget.com/ "NZBGet") on low performance systems like a NAS.
This script is based on sabToSickBeard (written by Nic Wolfe and supplied with SickBeard), with the support for NZBGet being added by [thorli](https://github.com/thorli "thorli") and further contributions by [schumi2004](https://github.com/schumi2004 "schumi2004") and [hugbug](https://sourceforge.net/apps/phpbb/nzbget/memberlist.php?mode=viewprofile&u=67 "hugbug"). This script is based on sabToSickBeard (written by Nic Wolfe and supplied with SickBeard), with the support for NZBGet being added by [thorli](https://github.com/thorli "thorli") and further contributions by [schumi2004](https://github.com/schumi2004 "schumi2004") and [hugbug](https://sourceforge.net/apps/phpbb/nzbget/memberlist.php?mode=viewprofile&u=67 "hugbug").
Torrent suport added by [jkaberg](https://github.com/jkaberg "jkaberg") and [berkona](https://github.com/berkona "berkona") Torrent suport added by [jkaberg](https://github.com/jkaberg "jkaberg") and [berkona](https://github.com/berkona "berkona")
Corrupt video checking, auto SickBeard fork determination and a whole lot of code improvement was done by [echel0n](https://github.com/echel0n "echel0n") Corrupt video checking, auto SickBeard fork determination and a whole lot of code improvement was done by [echel0n](https://github.com/echel0n "echel0n")
Python3 compatibility, and much cleaner code base has been contributed by [Labrys of Knossos](https://github.com/labrys "Labrys of Knossos")
Introduction Introduction
------------ ------------
@ -17,7 +19,7 @@ Failed download handling for SickBeard is available by using Tolstyak's fork [Si
To use this feature, in autoProcessTV.cfg set the parameter "fork=failed". Default is "fork=default" and will work with the standard version of SickBeard and just ignores failed downloads. To use this feature, in autoProcessTV.cfg set the parameter "fork=failed". Default is "fork=default" and will work with the standard version of SickBeard and just ignores failed downloads.
Development of Tolstyak's fork ended in 2013, but newer forks exist with significant feature updates such as [Mr-Orange TPB](https://github.com/coach0742/Sick-Beard) (discontinued), [SickRageTV](https://github.com/SiCKRAGETV/SickRage) and [SickRage](https://github.com/SickRage/SickRage) (active). See [SickBeard Forks](https://github.com/clinton-hall/nzbToMedia/wiki/Failed-Download-Handling-%28FDH%29#sick-beard-and-its-forks "SickBeard Forks") for a list of known forks. Development of Tolstyak's fork ended in 2013, but newer forks exist with significant feature updates such as [Mr-Orange TPB](https://github.com/coach0742/Sick-Beard) (discontinued), [SickRageTV](https://github.com/SiCKRAGETV/SickRage) and [SickRage](https://github.com/SickRage/SickRage) (active). See [SickBeard Forks](https://github.com/clinton-hall/nzbToMedia/wiki/Failed-Download-Handling-%28FDH%29#sick-beard-and-its-forks "SickBeard Forks") for a list of known forks.
Full support is provided for [SickRageTV](https://github.com/SiCKRAGETV/SickRage), [SickRage](https://github.com/SickRage/SickRage), and [SickGear](https://github.com/SickGear/SickGear). Full support is provided for [SickChill](https://github.com/SickChill/SickChill), [SiCKRAGE](https://github.com/SiCKRAGE/SiCKRAGE), [Medusa](https://github.com/pymedusa/Medusa), and [SickGear](https://github.com/SickGear/SickGear).
Torrent support has been added with the assistance of jkaberg and berkona. Currently supports uTorrent, Transmission, Deluge and possibly more. Torrent support has been added with the assistance of jkaberg and berkona. Currently supports uTorrent, Transmission, Deluge and possibly more.
To enable Torrent extraction, on Windows, you need to install [7-zip](http://www.7-zip.org/ "7-zip") or on *nix you need to install the following packages/commands. To enable Torrent extraction, on Windows, you need to install [7-zip](http://www.7-zip.org/ "7-zip") or on *nix you need to install the following packages/commands.
@ -30,7 +32,7 @@ Installation instructions for this are available in the [wiki](https://github.co
Contribution Contribution
------------ ------------
We who have developed nzbToMedia believe in the openness of open-source, and as such we hope that any modifications will lead back to the [orignal repo](https://github.com/clinton-hall/nzbToMedia "orignal repo") via pull requests. We who have developed nzbToMedia believe in the openness of open-source, and as such we hope that any modifications will lead back to the [original repo](https://github.com/clinton-hall/nzbToMedia "orignal repo") via pull requests.
Founder: [clinton-hall](https://github.com/clinton-hall "clinton-hall") Founder: [clinton-hall](https://github.com/clinton-hall "clinton-hall")

5
.gitignore vendored
View file

@ -1,8 +1,7 @@
*.cfg *.cfg
!.bumpversion.cfg !.bumpversion.cfg
*.cfg.old *.cfg.old
*.pyc *.py[cod]
*.pyo
*.log *.log
*.pid *.pid
*.db *.db
@ -10,5 +9,7 @@
/userscripts/ /userscripts/
/logs/ /logs/
/.idea/ /.idea/
/venv/
*.dist-info *.dist-info
*.egg-info *.egg-info
/.vscode

View file

@ -1,20 +1,34 @@
#!/usr/bin/env python #!/usr/bin/env python
# coding=utf-8 # coding=utf-8
import cleanup from __future__ import (
cleanup.clean('core', 'libs') absolute_import,
division,
print_function,
unicode_literals,
)
import datetime import datetime
import os import os
import sys import sys
import eol
import cleanup
eol.check()
cleanup.clean(cleanup.FOLDER_STRUCTURE)
import core import core
from core import logger, main_db from core import logger, main_db
from core.auto_process import comics, games, movies, music, tv from core.auto_process import comics, games, movies, music, tv, books
from core.auto_process.common import ProcessResult from core.auto_process.common import ProcessResult
from core.plugins.plex import plex_update
from core.user_scripts import external_script from core.user_scripts import external_script
from core.utils import char_replace, convert_to_ascii, plex_update, replace_links from core.utils import char_replace, convert_to_ascii, replace_links
from six import text_type
try:
text_type = unicode
except NameError:
text_type = str
def process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent): def process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent):
@ -22,7 +36,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
root = 0 root = 0
found_file = 0 found_file = 0
if client_agent != 'manual' and not core.DOWNLOADINFO: if client_agent != 'manual' and not core.DOWNLOAD_INFO:
logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory)) logger.debug('Adding TORRENT download info for directory {0} to database'.format(input_directory))
my_db = main_db.DBConnection() my_db = main_db.DBConnection()
@ -56,30 +70,25 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
input_category = 'UNCAT' input_category = 'UNCAT'
usercat = input_category usercat = input_category
try:
input_name = input_name.encode(core.SYS_ENCODING)
except UnicodeError:
pass
try:
input_directory = input_directory.encode(core.SYS_ENCODING)
except UnicodeError:
pass
logger.debug('Determined Directory: {0} | Name: {1} | Category: {2}'.format logger.debug('Determined Directory: {0} | Name: {1} | Category: {2}'.format
(input_directory, input_name, input_category)) (input_directory, input_name, input_category))
# auto-detect section # auto-detect section
section = core.CFG.findsection(input_category).isenabled() section = core.CFG.findsection(input_category).isenabled()
if section is None: if section is None: #Check for user_scripts for 'ALL' and 'UNCAT'
section = core.CFG.findsection('ALL').isenabled() if usercat in core.CATEGORIES:
if section is None: section = core.CFG.findsection('ALL').isenabled()
logger.error('Category:[{0}] is not defined or is not enabled. '
'Please rename it or ensure it is enabled for the appropriate section '
'in your autoProcessMedia.cfg and try again.'.format
(input_category))
return [-1, '']
else:
usercat = 'ALL' usercat = 'ALL'
else:
section = core.CFG.findsection('UNCAT').isenabled()
usercat = 'UNCAT'
if section is None: # We haven't found any categories to process.
logger.error('Category:[{0}] is not defined or is not enabled. '
'Please rename it or ensure it is enabled for the appropriate section '
'in your autoProcessMedia.cfg and try again.'.format
(input_category))
return [-1, '']
if len(section) > 1: if len(section) > 1:
logger.error('Category:[{0}] is not unique, {1} are using it. ' logger.error('Category:[{0}] is not unique, {1} are using it. '
@ -102,7 +111,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
torrent_no_link = int(section.get('Torrent_NoLink', 0)) torrent_no_link = int(section.get('Torrent_NoLink', 0))
keep_archive = int(section.get('keep_archive', 0)) keep_archive = int(section.get('keep_archive', 0))
extract = int(section.get('extract', 0)) extract = int(section.get('extract', 0))
extensions = section.get('user_script_mediaExtensions', '').lower().split(',') extensions = section.get('user_script_mediaExtensions', '')
unique_path = int(section.get('unique_path', 1)) unique_path = int(section.get('unique_path', 1))
if client_agent != 'manual': if client_agent != 'manual':
@ -114,24 +123,20 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
basename = os.path.basename(input_directory) basename = os.path.basename(input_directory)
basename = core.sanitize_name(input_name) \ basename = core.sanitize_name(input_name) \
if input_name == basename else os.path.splitext(core.sanitize_name(input_name))[0] if input_name == basename else os.path.splitext(core.sanitize_name(input_name))[0]
output_destination = os.path.join(core.OUTPUTDIRECTORY, input_category, basename) output_destination = os.path.join(core.OUTPUT_DIRECTORY, input_category, basename)
elif unique_path: elif unique_path:
output_destination = os.path.normpath( output_destination = os.path.normpath(
core.os.path.join(core.OUTPUTDIRECTORY, input_category, core.sanitize_name(input_name).replace(' ', '.'))) core.os.path.join(core.OUTPUT_DIRECTORY, input_category, core.sanitize_name(input_name).replace(' ', '.')))
else: else:
output_destination = os.path.normpath( output_destination = os.path.normpath(
core.os.path.join(core.OUTPUTDIRECTORY, input_category)) core.os.path.join(core.OUTPUT_DIRECTORY, input_category))
try:
output_destination = output_destination.encode(core.SYS_ENCODING)
except UnicodeError:
pass
if output_destination in input_directory: if output_destination in input_directory:
output_destination = input_directory output_destination = input_directory
logger.info('Output directory set to: {0}'.format(output_destination)) logger.info('Output directory set to: {0}'.format(output_destination))
if core.SAFE_MODE and output_destination == core.TORRENT_DEFAULTDIR: if core.SAFE_MODE and output_destination == core.TORRENT_DEFAULT_DIRECTORY:
logger.error('The output directory:[{0}] is the Download Directory. ' logger.error('The output directory:[{0}] is the Download Directory. '
'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format 'Edit outputDirectory in autoProcessMedia.cfg. Exiting'.format
(input_directory)) (input_directory))
@ -166,10 +171,6 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
core.os.path.join(output_destination, os.path.basename(file_path)), full_file_name) core.os.path.join(output_destination, os.path.basename(file_path)), full_file_name)
logger.debug('Setting outputDestination to {0} to preserve folder structure'.format logger.debug('Setting outputDestination to {0} to preserve folder structure'.format
(os.path.dirname(target_file))) (os.path.dirname(target_file)))
try:
target_file = target_file.encode(core.SYS_ENCODING)
except UnicodeError:
pass
if root == 1: if root == 1:
if not found_file: if not found_file:
logger.debug('Looking for {0} in: {1}'.format(input_name, inputFile)) logger.debug('Looking for {0} in: {1}'.format(input_name, inputFile))
@ -196,7 +197,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
if torrent_no_link == 0: if torrent_no_link == 0:
try: try:
core.copy_link(inputFile, target_file, core.USELINK) core.copy_link(inputFile, target_file, core.USE_LINK)
core.remove_read_only(target_file) core.remove_read_only(target_file)
except Exception: except Exception:
logger.error('Failed to link: {0} to {1}'.format(inputFile, target_file)) logger.error('Failed to link: {0} to {1}'.format(inputFile, target_file))
@ -212,7 +213,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
core.flatten(output_destination) core.flatten(output_destination)
# Now check if video files exist in destination: # Now check if video files exist in destination:
if section_name in ['SickBeard', 'NzbDrone', 'Sonarr', 'CouchPotato', 'Radarr']: if section_name in ['SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr', 'CouchPotato', 'Radarr', 'Watcher3']:
num_videos = len( num_videos = len(
core.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False)) core.list_media_files(output_destination, media=True, audio=False, meta=False, archives=False))
if num_videos > 0: if num_videos > 0:
@ -226,7 +227,7 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
# Only these sections can handling failed downloads # Only these sections can handling failed downloads
# so make sure everything else gets through without the check for failed # so make sure everything else gets through without the check for failed
if section_name not in ['CouchPotato', 'Radarr', 'SickBeard', 'NzbDrone', 'Sonarr']: if section_name not in ['CouchPotato', 'Radarr', 'SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr', 'Watcher3']:
status = 0 status = 0
logger.info('Calling {0}:{1} to post-process:{2}'.format(section_name, usercat, input_name)) logger.info('Calling {0}:{1} to post-process:{2}'.format(section_name, usercat, input_name))
@ -240,9 +241,9 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
) )
if section_name == 'UserScript': if section_name == 'UserScript':
result = external_script(output_destination, input_name, input_category, section) result = external_script(output_destination, input_name, input_category, section)
elif section_name in ['CouchPotato', 'Radarr']: elif section_name in ['CouchPotato', 'Radarr', 'Watcher3']:
result = movies.process(section_name, output_destination, input_name, status, client_agent, input_hash, input_category) result = movies.process(section_name, output_destination, input_name, status, client_agent, input_hash, input_category)
elif section_name in ['SickBeard', 'NzbDrone', 'Sonarr']: elif section_name in ['SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr']:
if input_hash: if input_hash:
input_hash = input_hash.upper() input_hash = input_hash.upper()
result = tv.process(section_name, output_destination, input_name, status, client_agent, input_hash, input_category) result = tv.process(section_name, output_destination, input_name, status, client_agent, input_hash, input_category)
@ -252,6 +253,8 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
result = comics.process(section_name, output_destination, input_name, status, client_agent, input_category) result = comics.process(section_name, output_destination, input_name, status, client_agent, input_category)
elif section_name == 'Gamez': elif section_name == 'Gamez':
result = games.process(section_name, output_destination, input_name, status, client_agent, input_category) result = games.process(section_name, output_destination, input_name, status, client_agent, input_category)
elif section_name == 'LazyLibrarian':
result = books.process(section_name, output_destination, input_name, status, client_agent, input_category)
plex_update(input_category) plex_update(input_category)
@ -270,15 +273,15 @@ def process_torrent(input_directory, input_name, input_category, input_hash, inp
core.update_download_info_status(input_name, 1) core.update_download_info_status(input_name, 1)
# remove torrent # remove torrent
if core.USELINK == 'move-sym' and not core.DELETE_ORIGINAL == 1: if core.USE_LINK == 'move-sym' and not core.DELETE_ORIGINAL == 1:
logger.debug('Checking for sym-links to re-direct in: {0}'.format(input_directory)) logger.debug('Checking for sym-links to re-direct in: {0}'.format(input_directory))
for dirpath, dirs, files in os.walk(input_directory): for dirpath, _, files in os.walk(input_directory):
for file in files: for file in files:
logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file))) logger.debug('Checking symlink: {0}'.format(os.path.join(dirpath, file)))
replace_links(os.path.join(dirpath, file)) replace_links(os.path.join(dirpath, file))
core.remove_torrent(client_agent, input_hash, input_id, input_name) core.remove_torrent(client_agent, input_hash, input_id, input_name)
if not section_name == 'UserScript': if section_name != 'UserScript':
# for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN # for user script, we assume this is cleaned by the script or option USER_SCRIPT_CLEAN
# cleanup our processing folders of any misc unwanted files and empty directories # cleanup our processing folders of any misc unwanted files and empty directories
core.clean_dir(output_destination, section_name, input_category) core.clean_dir(output_destination, section_name, input_category)
@ -291,7 +294,7 @@ def main(args):
core.initialize() core.initialize()
# clientAgent for Torrents # clientAgent for Torrents
client_agent = core.TORRENT_CLIENTAGENT client_agent = core.TORRENT_CLIENT_AGENT
logger.info('#########################################################') logger.info('#########################################################')
logger.info('## ..::[{0}]::.. ##'.format(os.path.basename(__file__))) logger.info('## ..::[{0}]::.. ##'.format(os.path.basename(__file__)))
@ -314,6 +317,8 @@ def main(args):
if input_directory and input_name and input_hash and input_id: if input_directory and input_name and input_hash and input_id:
result = process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent) result = process_torrent(input_directory, input_name, input_category, input_hash, input_id, client_agent)
elif core.TORRENT_NO_MANUAL:
logger.warning('Invalid number of arguments received from client, and no_manual set')
else: else:
# Perform Manual Post-Processing # Perform Manual Post-Processing
logger.warning('Invalid number of arguments received from client, Switching to manual run mode ...') logger.warning('Invalid number of arguments received from client, Switching to manual run mode ...')
@ -328,11 +333,11 @@ def main(args):
logger.info('Checking database for download info for {0} ...'.format logger.info('Checking database for download info for {0} ...'.format
(os.path.basename(dir_name))) (os.path.basename(dir_name)))
core.DOWNLOADINFO = core.get_download_info(os.path.basename(dir_name), 0) core.DOWNLOAD_INFO = core.get_download_info(os.path.basename(dir_name), 0)
if core.DOWNLOADINFO: if core.DOWNLOAD_INFO:
client_agent = text_type(core.DOWNLOADINFO[0].get('client_agent', 'manual')) client_agent = text_type(core.DOWNLOAD_INFO[0]['client_agent']) or 'manual'
input_hash = text_type(core.DOWNLOADINFO[0].get('input_hash', '')) input_hash = text_type(core.DOWNLOAD_INFO[0]['input_hash']) or ''
input_id = text_type(core.DOWNLOADINFO[0].get('input_id', '')) input_id = text_type(core.DOWNLOAD_INFO[0]['input_id']) or ''
logger.info('Found download info for {0}, ' logger.info('Found download info for {0}, '
'setting variables now ...'.format(os.path.basename(dir_name))) 'setting variables now ...'.format(os.path.basename(dir_name)))
else: else:
@ -346,19 +351,11 @@ def main(args):
if client_agent.lower() not in core.TORRENT_CLIENTS: if client_agent.lower() not in core.TORRENT_CLIENTS:
continue continue
try:
dir_name = dir_name.encode(core.SYS_ENCODING)
except UnicodeError:
pass
input_name = os.path.basename(dir_name) input_name = os.path.basename(dir_name)
try:
input_name = input_name.encode(core.SYS_ENCODING)
except UnicodeError:
pass
results = process_torrent(dir_name, input_name, subsection, input_hash or None, input_id or None, results = process_torrent(dir_name, input_name, subsection, input_hash or None, input_id or None,
client_agent) client_agent)
if results[0] != 0: if results.status_code != 0:
logger.error('A problem was reported when trying to perform a manual run for {0}:{1}.'.format logger.error('A problem was reported when trying to perform a manual run for {0}:{1}.'.format
(section, subsection)) (section, subsection))
result = results result = results

1
_config.yml Normal file
View file

@ -0,0 +1 @@
theme: jekyll-theme-cayman

View file

@ -12,7 +12,7 @@
git_user = git_user =
# GitHUB branch for repo # GitHUB branch for repo
git_branch = git_branch =
# Enable/Disable forceful cleaning of leftover files following postprocess # Enable/Disable forceful cleaning of leftover files following postprocess
force_clean = 0 force_clean = 0
# Enable/Disable logging debug messages to nzbtomedia.log # Enable/Disable logging debug messages to nzbtomedia.log
log_debug = 0 log_debug = 0
@ -22,10 +22,14 @@
log_env = 0 log_env = 0
# Enable/Disable logging git output to debug nzbtomedia.log (helpful to track down update failures.) # Enable/Disable logging git output to debug nzbtomedia.log (helpful to track down update failures.)
log_git = 0 log_git = 0
# Set to the directory to search for executables if not in default system path
sys_path =
# Set to the directory where your ffmpeg/ffprobe executables are located # Set to the directory where your ffmpeg/ffprobe executables are located
ffmpeg_path = ffmpeg_path =
# Enable/Disable media file checking using ffprobe. # Enable/Disable media file checking using ffprobe.
check_media = 1 check_media = 1
# Required media audio language for media to be deemed valid. Leave blank to disregard media audio language check.
require_lan =
# Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectories by mistake. # Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectories by mistake.
safe_mode = 1 safe_mode = 1
# Turn this on to disable additional extraction attempts for failed downloads. Default = 0 will attempt to extract and verify if media is present. # Turn this on to disable additional extraction attempts for failed downloads. Default = 0 will attempt to extract and verify if media is present.
@ -34,7 +38,9 @@
[Posix] [Posix]
### Process priority setting for External commands (Extractor and Transcoder) on Posix (Unix/Linux/OSX) systems. ### Process priority setting for External commands (Extractor and Transcoder) on Posix (Unix/Linux/OSX) systems.
# Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process). # Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process).
niceness = 0 # If entering an integer e.g 'niceness = 4', this is added to the nice command and passed as 'nice -n4' (Default).
# If entering a comma separated list e.g. 'niceness = nice,4' this will be passed as 'nice 4' (Safer).
niceness = nice,-n0
# Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle.
ionice_class = 0 ionice_class = 0
# Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data. # Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data.
@ -64,6 +70,8 @@
method = renamer method = renamer
delete_failed = 0 delete_failed = 0
wait_for = 2 wait_for = 2
# Set this to suppress error if no status change after rename called
no_status_check = 0
extract = 1 extract = 1
# Set this to minimum required size to consider a media file valid (in MB) # Set this to minimum required size to consider a media file valid (in MB)
minSize = 0 minSize = 0
@ -107,6 +115,36 @@
##### Set to define import behavior Move or Copy ##### Set to define import behavior Move or Copy
importMode = Copy importMode = Copy
[Watcher3]
#### autoProcessing for Movies
#### movie - category that gets called for post-processing with CPS
[[movie]]
enabled = 0
apikey =
host = localhost
port = 9090
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
ssl = 0
web_root =
# api key for www.omdbapi.com (used as alternative to imdb)
omdbapikey =
# Enable/Disable linking for Torrents
Torrent_NoLink = 0
keep_archive = 1
delete_failed = 0
wait_for = 0
extract = 1
# Set this to minimum required size to consider a media file valid (in MB)
minSize = 0
# Enable/Disable deleting ignored files (samples and invalid media files)
delete_ignored = 0
##### Enable if Watcher3 is on a remote server for this category
remote_path = 0
##### Set to path where download client places completed downloads locally for this category
watch_dir =
##### Set the recursive directory permissions to the following (0 to disable)
chmodDirectory = 0
[SickBeard] [SickBeard]
#### autoProcessing for TV Series #### autoProcessing for TV Series
#### tv - category that gets called for post-processing with SB #### tv - category that gets called for post-processing with SB
@ -128,6 +166,52 @@
process_method = process_method =
# force processing of already processed content when running a manual scan. # force processing of already processed content when running a manual scan.
force = 0 force = 0
# Additionally to force, handle the download as a priority downlaod.
# The processed files will always replace existing qualities, also if this is a lower quality.
is_priority = 0
# tell SickRage/Medusa to delete all source files after processing.
delete_on = 0
# tell Medusa to ignore check for associated subtitle check when postponing release
ignore_subs = 0
extract = 1
nzbExtractionBy = Downloader
# Set this to minimum required size to consider a media file valid (in MB)
minSize = 0
# Enable/Disable deleting ignored files (samples and invalid media files)
delete_ignored = 0
##### Enable if SickBeard is on a remote server for this category
remote_path = 0
##### Set to path where download client places completed downloads locally for this category
watch_dir =
##### Set the recursive directory permissions to the following (0 to disable)
chmodDirectory = 0
##### pyMedusa (fork=medusa-apiv2) uses async postprocessing. Wait a maximum of x minutes for a pp result
wait_for = 10
[SiCKRAGE]
#### autoProcessing for TV Series
#### tv - category that gets called for post-processing with SR
[[tv]]
enabled = 0
host = localhost
port = 8081
apikey =
# api version 1 uses api keys
# api version 2 uses SSO user/pass
api_version = 2
# SSO login requires API v2 to be set
sso_username =
sso_password =
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
web_root =
ssl = 0
delete_failed = 0
# Enable/Disable linking for Torrents
Torrent_NoLink = 0
keep_archive = 1
process_method =
# force processing of already processed content when running a manual scan.
force = 0
# tell SickRage/Medusa to delete all source files after processing. # tell SickRage/Medusa to delete all source files after processing.
delete_on = 0 delete_on = 0
# tell Medusa to ignore check for associated subtitle check when postponing release # tell Medusa to ignore check for associated subtitle check when postponing release
@ -262,7 +346,7 @@
apikey = apikey =
host = localhost host = localhost
port = 8085 port = 8085
###### ######
library = Set to path where you want the processed games to be moved to. library = Set to path where you want the processed games to be moved to.
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ###### ###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
ssl = 0 ssl = 0
@ -280,10 +364,35 @@
##### Set to path where download client places completed downloads locally for this category ##### Set to path where download client places completed downloads locally for this category
watch_dir = watch_dir =
[LazyLibrarian]
#### autoProcessing for LazyLibrarian
#### books - category that gets called for post-processing with LazyLibrarian
[[books]]
enabled = 0
apikey =
host = localhost
port = 5299
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
ssl = 0
web_root =
# Enable/Disable linking for Torrents
Torrent_NoLink = 0
keep_archive = 1
extract = 1
# Set this to minimum required size to consider a media file valid (in MB)
minSize = 0
# Enable/Disable deleting ignored files (samples and invalid media files)
delete_ignored = 0
##### Enable if LazyLibrarian is on a remote server for this category
remote_path = 0
##### Set to path where download client places completed downloads locally for this category
watch_dir =
[Network] [Network]
# Enter Mount points as LocalPath,RemotePath and separate each pair with '|' # Enter Mount points as LocalPath,RemotePath and separate each pair with '|'
# e.g. MountPoints = /volume1/Public/,E:\|/volume2/share/,\\NAS\ # e.g. MountPoints = /volume1/Public/,E:\|/volume2/share/,\\NAS\
mount_points = mount_points =
[Nzb] [Nzb]
###### clientAgent - Supported clients: sabnzbd, nzbget ###### clientAgent - Supported clients: sabnzbd, nzbget
@ -294,15 +403,17 @@
sabnzbd_apikey = sabnzbd_apikey =
###### Enter the default path to your default download directory (non-category downloads). this directory is protected by safe_mode. ###### Enter the default path to your default download directory (non-category downloads). this directory is protected by safe_mode.
default_downloadDirectory = default_downloadDirectory =
# enable this option to prevent nzbToMedia from running in manual mode and scanning an entire directory.
no_manual = 0
[Torrent] [Torrent]
###### clientAgent - Supported clients: utorrent, transmission, deluge, rtorrent, vuze, qbittorrent, other ###### clientAgent - Supported clients: utorrent, transmission, deluge, rtorrent, vuze, qbittorrent, synods, other
clientAgent = other clientAgent = other
###### useLink - Set to hard for physical links, sym for symbolic links, move to move, move-sym to move and link back, and no to not use links (copy) ###### useLink - Set to hard for physical links, sym for symbolic links, move to move, move-sym to move and link back, and no to not use links (copy)
useLink = hard useLink = hard
###### outputDirectory - Default output directory (categories will be appended as sub directory to outputDirectory) ###### outputDirectory - Default output directory (categories will be appended as sub directory to outputDirectory)
outputDirectory = /abs/path/to/complete/ outputDirectory = /abs/path/to/complete/
###### Enter the default path to your default download directory (non-category downloads). this directory is protected by safe_mode. ###### Enter the default path to your default download directory (non-category downloads). this directory is protected by safe_mode.
default_downloadDirectory = default_downloadDirectory =
###### Other categories/labels defined for your downloader. Does not include CouchPotato, SickBeard, HeadPhones, Mylar categories. ###### Other categories/labels defined for your downloader. Does not include CouchPotato, SickBeard, HeadPhones, Mylar categories.
categories = music_videos,pictures,software,manual categories = music_videos,pictures,software,manual
@ -323,15 +434,22 @@
DelugeUSR = your username DelugeUSR = your username
DelugePWD = your password DelugePWD = your password
###### qBittorrent (You must edit this if you're using TorrentToMedia.py with qBittorrent) ###### qBittorrent (You must edit this if you're using TorrentToMedia.py with qBittorrent)
qBittorrenHost = localhost qBittorrentHost = localhost
qBittorrentPort = 8080 qBittorrentPort = 8080
qBittorrentUSR = your username qBittorrentUSR = your username
qBittorrentPWD = your password qBittorrentPWD = your password
###### Synology Download Station (You must edit this if you're using TorrentToMedia.py with Synology DS)
synoHost = localhost
synoPort = 5000
synoUSR = your username
synoPWD = your password
###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ###### ###### ADVANCED USE - ONLY EDIT IF YOU KNOW WHAT YOU'RE DOING ######
deleteOriginal = 0 deleteOriginal = 0
chmodDirectory = 0 chmodDirectory = 0
resume = 1 resume = 1
resumeOnFailure = 1 resumeOnFailure = 1
# enable this option to prevent TorrentToMedia from running in manual mode and scanning an entire directory.
no_manual = 0
[Extensions] [Extensions]
compressedExtensions = .zip,.rar,.7z,.gz,.bz,.tar,.arj,.1,.01,.001 compressedExtensions = .zip,.rar,.7z,.gz,.bz,.tar,.arj,.1,.01,.001
@ -345,15 +463,15 @@
plex_host = localhost plex_host = localhost
plex_port = 32400 plex_port = 32400
plex_token = plex_token =
plex_ssl = 0 plex_ssl = 0
# Enter Plex category to section mapping as Category,section and separate each pair with '|' # Enter Plex category to section mapping as Category,section and separate each pair with '|'
# e.g. plex_sections = movie,3|tv,4 # e.g. plex_sections = movie,3|tv,4
plex_sections = plex_sections =
[Transcoder] [Transcoder]
# getsubs. enable to download subtitles. # getsubs. enable to download subtitles.
getSubs = 0 getSubs = 0
# subLanguages. create a list of languages in the order you want them in your subtitles. # subLanguages. create a list of languages in the order you want them in your subtitles.
subLanguages = eng,spa,fra subLanguages = eng,spa,fra
# transcode. enable to use transcoder # transcode. enable to use transcoder
transcode = 0 transcode = 0
@ -368,7 +486,7 @@
# outputQualityPercent. used as -q:a value. 0 will disable this from being used. # outputQualityPercent. used as -q:a value. 0 will disable this from being used.
outputQualityPercent = 0 outputQualityPercent = 0
# outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable. # outputVideoPath. Set path you want transcoded videos moved to. Leave blank to disable.
outputVideoPath = outputVideoPath =
# processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files. # processOutput. 1 will send the outputVideoPath to SickBeard/CouchPotato. 0 will send original files.
processOutput = 0 processOutput = 0
# audioLanguage. set the 3 letter language code you want as your primary audio track. # audioLanguage. set the 3 letter language code you want as your primary audio track.
@ -387,16 +505,18 @@
externalSubDir = externalSubDir =
# hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg) # hwAccel. 1 will set ffmpeg to enable hardware acceleration (this requires a recent ffmpeg)
hwAccel = 0 hwAccel = 0
# generalOptions. Enter your additional ffmpeg options here with commas to separate each option/value (i.e replace spaces with commas). # generalOptions. Enter your additional ffmpeg options (these insert before the '-i' input files) here with commas to separate each option/value (i.e replace spaces with commas).
generalOptions = generalOptions =
# otherOptions. Enter your additional ffmpeg options (these insert after the '-i' input files and before the output file) here with commas to separate each option/value (i.e replace spaces with commas).
otherOptions =
# outputDefault. Loads default configs for the selected device. The remaining options below are ignored. # outputDefault. Loads default configs for the selected device. The remaining options below are ignored.
# If you want to use your own profile, leave this blank and set the remaining options below. # If you want to use your own profile, leave this blank and set the remaining options below.
# outputDefault profiles allowed: iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mp4-scene-release # outputDefault profiles allowed: iPad, iPad-1080p, iPad-720p, Apple-TV2, iPod, iPhone, PS3, xbox, Roku-1080p, Roku-720p, Roku-480p, mkv, mkv-bluray, mp4-scene-release
outputDefault = outputDefault =
#### Define custom settings below. #### Define custom settings below.
outputVideoExtension = .mp4 outputVideoExtension = .mp4
outputVideoCodec = libx264 outputVideoCodec = libx264
VideoCodecAllow = VideoCodecAllow =
outputVideoPreset = medium outputVideoPreset = medium
outputVideoResolution = 1920:1080 outputVideoResolution = 1920:1080
outputVideoFramerate = 24 outputVideoFramerate = 24
@ -404,15 +524,15 @@
outputVideoCRF = 19 outputVideoCRF = 19
outputVideoLevel = 3.1 outputVideoLevel = 3.1
outputAudioCodec = ac3 outputAudioCodec = ac3
AudioCodecAllow = AudioCodecAllow =
outputAudioChannels = 6 outputAudioChannels = 6
outputAudioBitrate = 640k outputAudioBitrate = 640k
outputAudioTrack2Codec = libfaac outputAudioTrack2Codec = libfaac
AudioCodec2Allow = AudioCodec2Allow =
outputAudioTrack2Channels = 2 outputAudioTrack2Channels = 2
outputAudioTrack2Bitrate = 128000 outputAudioTrack2Bitrate = 128000
outputAudioOtherCodec = libmp3lame outputAudioOtherCodec = libmp3lame
AudioOtherCodecAllow = AudioOtherCodecAllow =
outputAudioOtherChannels = outputAudioOtherChannels =
outputAudioOtherBitrate = 128000 outputAudioOtherBitrate = 128000
outputSubtitleCodec = outputSubtitleCodec =
@ -469,4 +589,4 @@
# enter a list (comma separated) of Group Tags you want removed from filenames to help with subtitle matching. # enter a list (comma separated) of Group Tags you want removed from filenames to help with subtitle matching.
# e.g remove_group = [rarbag],-NZBgeek # e.g remove_group = [rarbag],-NZBgeek
# be careful if your "group" is a common "real" word. Please report if you have any group replacements that would fall in this category. # be careful if your "group" is a common "real" word. Please report if you have any group replacements that would fall in this category.
remove_group = remove_group =

74
azure-pipelines.yml Normal file
View file

@ -0,0 +1,74 @@
# Python package
# Create and test a Python package on multiple Python versions.
# Add steps that analyze code, save the dist with the build record, publish to a PyPI-compatible index, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/python
trigger:
- master
jobs:
- job: 'Test'
pool:
vmImage: 'Ubuntu-latest'
strategy:
matrix:
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'
Python311:
python.version: '3.11'
Python312:
python.version: '3.12'
Python313:
python.version: '3.13'
maxParallel: 3
steps:
- script: |
sudo apt-get update
sudo apt-get install ffmpeg
displayName: 'Install ffmpeg'
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
architecture: 'x64'
- script: python -m pip install --upgrade pip
displayName: 'Install dependencies'
- script: |
pip install pytest
pytest tests --doctest-modules --junitxml=junit/test-results.xml
displayName: 'pytest'
- script: |
rm -rf .git
python cleanup.py
python TorrentToMedia.py
python nzbToMedia.py
displayName: 'Test source install cleanup'
- task: PublishTestResults@2
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version)'
condition: succeededOrFailed()
- job: 'Publish'
dependsOn: 'Test'
pool:
vmImage: 'Ubuntu-latest'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.x'
architecture: 'x64'
- script: |
python -m pip install setuptools
python setup.py sdist
displayName: 'Build sdist'

View file

@ -1,720 +0,0 @@
Change_LOG / History
V12.0.0
NOTE:
- This release contains major backwards-incompatible changes to the internal API
- Windows users will need to manually install pywin32
Add Python 3 support
Add cleanup script for post-update cleanup
Update all dependencies
Move vendored packages in `core` to `libs`
Move common libs to `libs/common`
Move custom libs to `libs/custom`
Move Python 2 libs to `libs/py2`
Move Windows libs to `libs/windows`
Fix PEP8
Add feature to make libs importable
Add feature to auto-update libs
Add path parent option to module path and default to using local path
Update invisible.cmd to return errorlevel
Update invisible.vbs to return exit code of 7zip
Update extractor.py for correct return code
Added debugging to extractor
Add option for windows extraction debugging
Remove surplus debug
Fix handling of None Password file
Fix invisible windows extraction
Fix execution of extraction
Start vbs directly from extractor
Delete invisible.cmd
Use args instead of Wscript.Arguments
Fix postprocessing of failed / bad downloads (#1091)
Fix release is None
Fix UnRAR failing
V11.8.1 12/29/2018
Fix cleanup for nzbToMedia installed as a git submodule
V11.8.0 12/28/2018
Add version information
Add bumpversion support
Fix automatic cleanup script
V11.7 12/25/2018
Merry Christmas and Happy Holidays!
Add cleanup script to clean up bytecode
Add automatic cleanup on update
NOTE: Cleanup will force-run every time during a transitional period to minimize issues with upcoming refactoring
V11.06 11/03/2018
updates to incorporate importMode for NzbDrone/Sonarr and Radarr.
Correct typo(s) for "Lidarr" category.
only pass id to CP if release id found.
fix issue with no release id and no imdbid.
Fixed NZBGet save of Lidarr config.
improve logging for imdb id lookup.
fix minor description error.
add better logging of movie name when added to CP.
attempt to clean up Liddar api commands.
update to use Mylar api.
set Torrent move-sym option to force SickRage process_method.
add rmDir import for HeadPhones processing.
change sickrage and sickchill names and modify api process to work with multiple sick* forks.
add NZBGet WebUI set of delete failed for HP.
fix qbittorrent to delete permanently (remove files on delete).
V11.05 27/06/2018
Add qBittorrent support.
Add SickGear support.
Add SiCKRAGE api support.
Fix for single file download.
Diable media check for failed HeadPhones downloads.
Added Lidarr flow. Still awaiting confirmation of api interface commands and return.
V11.04 30/12/2017
do not embed .sub.
add proper check of sub streams #1150 and filter out commentary.
traverse audiostreams in reverse.
add catch for OMDB api errors.
convert all listdir functions to unicode.
perform extraction, corruption checks, and transcoding when no server.
fix list indices errors when no fork set.
fix CP server responding test. Add trailing /.
use basestring to match unicode path in transcoder.
attempt autofork even if no username set.
allow long paths in Cleandir.
add Radarr handling.
minor fix for transcoder.
fix non-iterable type.
fix logging error.
DownloadedMovieScan updated to DownloadedMoviesScan.
add check to exception rename to not over-write exisiting.
don't try and process when no api/user.
Added omdbapikey functionality
force sonarr processing to "move".
already extracted archive not skipped.
fix text for keep_archive.
try to avoid spaces in outputdir.
change subtitle logging level.
Increase shutil copy buffer length from 4KB to 512KB.
improve user script media extension handling
add par2 rename/repair (linux only).
V11.03 15/01/2017
Add -o to output path for 7zip.
Try album directory then parent directory for HeadPhones variants.
Prevent duplication of audio tracks in Transcoder.
Update uTorrent Client interface.
Updated to use force_next for SickRage to prevent postprocessing in queue.
V11.02 30/11/2016
Added default "MKV-SD"
Added VideoResolution in nzbGet.
Fix Headphones direcotry parsing.
Remove proc_type when failed.
Added option "no_extract_failed"
Updated beautifulsoup 4 module.
Check for existence of codec_type key when counting streams.
Added default fallback for sabnzbd port = 8080.
V11.01 30/10/2016
Updated external modules and changed config to dict.
Started making code python 3 compatible.
Fixed auto-fork detection for new Sick* branches.
Fixed invalid indexing scope for TorrentToMedia.
Add Medusa fork and new param "ignore_subs".
Added check for language tag size, convert 3 letter language codes.
Fixed guessit call to allow guessit to work of full file path.
Add the ability to set octal permissions on the processed files prior to handing it off to Sickrage/Couchpotato.
Catch errors if not audio codec name.
Allow manual scans to continue.
Revert to 7zip if others missing.
Fixed int conversion base 8 from string or int.
Added more logging to server tests.
Added MKV-SD Profile.
Check for preferred codec even if not preferred language.
Don't convert VobSub to mov_text.
V10.15 29/05/2016
Don't copy archives when set to extract.
Specifically check for failed download handing regardless of fork.
sort Media file results by pathlength.
Synchronize changed SickRage directory param.
Don't remove release group information from base folder.
Don't add imdb id to file name when move-sym in use.
Fix string and integer concat error.
V10.14 13/03/2016
Add option move-sym to create symlink to renamed files.
Transmission comment fix.
Prevent int errors in chmod.
Fix urllib warnings.
Create unique directory in output incase of rename error in sick/couch.
Add -strict -2 to dts codec.
Added support to handle archives in SickRage.
Report Downloader failures to SickRage.
Continue on encoding detection failure.
Strip trailing and leading whitespaces from `mount_points`.
Also check sabnzbd history for nzoid.
Add generic run mode (manually enter parameters for execution).
V10.13 11/12/2015
Always add -strict -2 to aac codec.
Add "delete_on" for SickRage.
Add https handling for SABnzbd.
Added the ability to chmod Torrent diretory before processing.
Add option to not resume failed torrent.
Add Option to not resume successful torrent.
Add procees name to final SABnzbd message.
Fix SSL warnings forcomic processing.
Add .ts to mediaExtensions.
Don't update plex on failed.
Add option to preserve archive files after extraction.
Force_Clean doesn't over-ride delete_failed.
Added support for SickRageTV and SickRage branches.
V10.12 21/09/2015
Updated Requests Module to Latest Version. Works with Python 2.7.10
Add .img files to transcoder extraction routines.
V10.11 28/05/2015
Use socket to verify if running on Linux. Prevents issues with stale pid.
Add timeouts and improve single instance handling.
Prevent Scale Up.
Improve regex for rename script.
Improve safe rename functionality.
Ignore .bts extensions.
Don't process output when no transcoding needed.
Ignore Thumbs.db on manual run.
Rename nzbtomedia to core. To prevent erros on non-case sensitive file systems.
Mark as bad if no media files found.
Increase server responding timeout.
Don't use last modified entry for CP renamer when no imdb id found.
Add plex library update.
V10.10 29/01/2015
Fix error when extracting on windows. (added import of subprocess)
Fix subtitles download and emdedding.
V10.9 19/01/2015
Prevent Errors when trying next release from CouchPotato (CouchPotato failed handling)
Prevent check for status change when using Manage scan (CouchPotato)
Better Tooltip for "host" in NZBGet settings.
Continue if failed to connect to Torrent Client.
Fixed resolution settings in Transcoder.
Make Windows Linking and extraction invisible.
V10.8 15/12/2014
Impacts All
Removed "stand alone" scripts DeleteSamples and ResetDateTimes. These are now in https://github.com/clinton-hall/GetScripts
Removed chp.exe and replaced with vb script.
Improved Sonarr(NZBDrone) CDH support.
Use folder Permissions to set permissions for sub directories and files following extract.
Added support fro new SickRage Login.
Impacts NZBs
Get NZOID from SABnzbd for better release matching.
Impacts Torrents
Now gets Label from Deluge.
Changed SSL version for updated Deluge (0.3.11+)
Impacts Transcoding
Fixed reported bugs.
Fix Audio mapping.
Fix Subtitle mapping from external files.
Fixed scaling errors.
V10.7 06/10/2014
Impacts All
Add Transcoding of iso/images and VIDEO_TS structures.
Improved multiple session handling.
Improve NZBDrone handling (including Torrent Branch).
Multiple bug-fixes.
Impacts NZBs
Add custom "group" replacements to allow better subtitle searching.
Impacts Torrents
Add Vuze Torrent Client support.
V10.6 26/08/2014
Impacts All
Bug Fixes.
Impacts NZBs
Added FailureLink style feedback to dognzb for failed and corrupt downloads.
V10.5 05/08/2014
Impacts All
Bug Fixes for Transcoder.
Support for lib-av as well as ffmpeg.
Fixed SickBeard aut-fork detection.
V10.4 30/07/2014
Impacts All
Supress printed messages from extractor.
Allow no sub languages to be specified.
Ignore hdmv_pgs_subtitle codecs in transcoder.
Fix remote directory use with HeadPhones.
Only use nice and ionice when available.
Impacts NZBs
Cleaner exit logging for SABnzbd.
Impacts Torrents
Improved manual run handling.
V10.3 15/07/2014
Impacts All
Fix auto-fork to identify default fork.
V10.2 15/07/2014
Impacts All
Bug Fixes.
If extracting files and extraction not successful, return Failure and Don't delete archives.
V10.1 11/07/2014
Impacts All
Improved Transcoder
Minor Bug Fixes
Now accepts Number of Audio Channels for Transcoder options.
Userscript can perform video corruption check first.
Improved extraction. Extract all subdirs and multiple "unique" archives in a directory.
Check if already running and wait for complete before continuing.
Impacts NZBs
Allow UserScript for NZBs
Impacts Torrents
Do Extraction Before Flatten
V10.0 03/07/2014
Impacts All
Changed to python2 (some systems now come with python = python3 as default).
Major changes to Transcoder. Only copy streams where possible.
Pre-defined Transcode options for some devices.
Added log_env option to capture environment variables.
Improved remote directory handling.
Various fixes.
V9.3 09/06/2014
Impacts Torrents
Allow Headphones to remove torrents and data after processing.
Delete torrent if uselink = move
Added forceClean for outputDir. Works in file permissions prevent CP/SB from moving files.
Ignore .x264 from archive "part" checks.
Changed handling of TPB/Pistachitos SB forks. Default is to link/extract here. Disabled by Torrent_NoLink = 1.
Changed handling for HeadPhones Now that HeadPhones allows process directory to be defined.
Restructured Flow and streamlines process
Impacts NZBs
Fix setting of Mylar config from NZBGet.
Created sheel scripts to nzbTo{App}. All now call the common nzbToMedia.py
Impacts All
Changes to Couchpotato API for [nosql] added. Keeps aligned with current CouchPotato develop branch.
Add Auto Detection of SickBeard Fork. Thanks @echel0n
Added config class, re-coded migratecfg, misc bugfixes and code cleanup.
Added dynamic timeout based on directory size.
Added process_Method for SickBeard.
Changed configuration migrate process.
Major structure and process re-format.
Improved Manual Call Handling
Now prints github version into log when available.
Changed log location and format.
Added autoUpdate option via git.
All calls now use requests, not urllib.
All details now saved into Database. Can be used for more features later ;)
Improved status checking to ensure we only cleanup when successfully processed.
Huge Thanks @echel0n
V9.2 05/03/2014
Impacts All
Change default "wait_for" to 5 mins. CouchPotato can take more than 2 minutes to return on renamer.scan request.
Added SickBeard "wait_for" to bw customizable to prevent unwanted timeouts.
Fixed ascii conversion of directory name.
Added list of common sample ids and a way to set deletion of All media files less than the sample file size limit.
Added urlquote to dirName for CouchPotato (allows special characters in directory name)
Impacts NZBs
Fix Error with manual run of nzbToMedia
Make sure SickBeard receives the individula download dir.
Added option to set SickBeard extraction as either Downlaoder or Destination (SickBeard).
Fixed Health Check handling for NZBGet.
Impacts Torrents
Added option to run userscript once only (on directory).
Added Option to not flatten specific categories.
Added rtorrent integration.
Fixes for HeadPhones use (no flatten), no move/sym, and fix move back to original.
V9.1 24/01/2014
Impacts All
Don't wait to verify status change in CouchPotato when no initial status (manual run)
Now use "wait_for" timing as socket timeout on the renamer.scan. It appears to now be delayed in confirming success.
V9.0 19/01/2014
Impacts NZBs
SABnzbd 0.7.17+ now uses 8 arguments, not 7. These scripts now support the extra argument.
Impacts Torrents
Always pause before processing.
Moved delete to end of routine, only when succesful process occurs.
Don't flatten hp category (in case multi cd album)
Added UserScript to be called for un-categorized downloads and other defined categories.
Added Torrent Hash to Deluge to assist with movie ID.
Added passwords option to attempt extraction od passworded archives.
Impacts All
Added default socket timeout to prevent script hanging when the destination servers don't respond to http requests.
Made processing Category Centric as an option for people running multiple versions of SickBeard and CouchPotato etc.
Added TPB version of SickBeard processing. This now uses a fork pass-in instead of failed_fork.
Added new option to convert files, directories, and parameters to ASCII. To be used if you regularly download "foreign" titles and have problems with CP/SB.
Now only parse results from CouchPotato 50 at a time to prevent error with large wanted list.
V8.5 05/10/2013
Impacts Torrents
Added Transmission RPC client.
Now pauses and resumes or removes from transmission.
Added debugging of input arguments from torrent clients.
Impacts NZBs
Removed obsolete NZBget (pre V11) code.
Impacts All.
Fixed HeadPhones processing.
Fixed movie parsing in CPS api.
V8.4 14/09/2013
Impacts Torrents
Don't include 720p or 1080p as parts for extracting.
Extracts all sub-folders.
Added option to Move files.
Fix for single file torrents linked to subfolder of same name.
Impacts All
Added option for SickBeard delay (for forks that use 1 minute check.
Updated to new api call in CouchPotato (movie.searcher.try_next)
V8.3 11/07/2013
Impacts All
Allow use of experimental AAC codec in transcoder.
Remove username and password when api key is used.
Add .m4v as media
Added ResetDateTime.py
Manual Opion for Mylar script.
Fixes for Gamez script.
Impacts NZBs
Added option to remove folder path when CouchPotato on different system to downlaoder.
NZBGet v11.0 stable now current.
V8.2 26/05/2013
Impacts All
Add option to set the "wait_for" period. This is how long the script waits to see if the movie changes status in CouchPotato.
minSampleSize now moved to [extensions] section and availabe for nzbs and torrents.
New option in transcoder to use "niceness" on Linux.
Remove excess logging from transcoder.
Impacts NZBs
Added Flatten of input directory and test for media files (including sample deletion) in autoProcessTV
Impacts Torrents
Fixed Delete_Original option
Fix type which caused crash if not sickbeard or couchpotato.
V8.1 04/05/2013
Impacts All
Improved exception logging for error conditions
Impacts Torrents
Fixed an import error when extracting
Impacts NZBs
Fixed passthrough of inputName from NZBGet to pass the .nzb extension (required for SickBeard's failed fork)
V8.0 28/04/2013
Impacts All
Added download_id pass through for CouchPotato release matching
Uses single directory scanning for CouchPotato renamer
Matches imdb_id, download_id, clientAgent with CPS database
Impacts NZB
Addeed direct configuration support via nzbget webUI (nzbget v11+)
All nzb scripts are now directly callabale in nzbget v11
Settings made in nzbget webUI will be applied to the auotPorcessMedia.cfg when the scripts are run from nzbget.
Fixed TLS support for NZBGet email notifications (for V10 support)
V7.1 28/03/2013
Impacts Torrents
Added test for chp.exe. If not found, calls 7zip directly
Added test for multi-part archives. Will only extract part1
Impacts NZB
Fixed failed download handling from nzbget (won't delete or move root!!!)
Fixed sendEmail for nzbget to use html with <br> line breaks
V7.0 21/03/2013
Impacts Torrents
Added option to delete torrent and original files after processing (utorrent)
Impacts NZB
Added nzbget windows script (to be compiled)
Changed nzbget folders to previous X.X, current-stable, testing X.X format
Fix nzbget change directory failure problem
Improved nzbget logging
Add logging to nzbget email notification
Synchronised v10 to latest nzbget testing scripts
Added failed download folder for failed downloads in nzbget
Added option to delete failed in nzbget
Created a single nzbToMedia.py script for all categories (will be the only nzb script compiled for windows)
Impacts All
Added rotating log file handler
Added ffmpeg transcoder
Added CouchPotato status check to provide confirmation of renamer complete
CouchPotato status check will timeout after 2 minutes in case something goes wrong
Improved logging.
Improved scen exception handling.
Major changes to code layout
Better efficiency
Added support for Mylar, Gamez, and HeadPhones
Moved many of the "support" files to the autoProcess directory so that they aren't visible (looks neater)
Added migration tool to update .cfg file on first run following update.
V6.0 03/03/2013
Impacts Torrents
Bundled 7zip binaries and created extraction functions.
Now pauses uTorrent seeding before calling renamer in SickBeard/CouchPotatoServer
uTorrent Resumes seeding after files (hardlinks) have been renamed
Impacts NZB
Added local file logging.
Impacts All
Added scene exception handling. Currently for "QoQ"
Improved code layout.
V5.1 22/02/2013
Improved category search to loop through directory structure.
Added support for deluge and potentially other Torrent clients.
uTorrent now must pass "utorrent" before "%D" "%N" "%L"
added test for date modified (less than 5 mins ago) if root directory and no torrent name.
".cp(ttxxxxxx)" tag preserved in directory name for CPS renaming.
All changes affect Torrent handling. Should not impact NZB handling.
V5.0 20/02/2013
Fixed Extarction and Hard-Linking support in TorrentToMedia
Added new config options for movie file extensions, metadata extensions, compressed file extensions.
Added braid to sync linktastic.
Windows Builds now run without console displaying.
All changes affect Torrent handling. Should not impact NZB handling.
V4.3 17/02/2013
Added Logger in TorrentToMedia.py
Added nzbget V10.0 script.
Delete sample files in nzbget postprocessing
Single Version for all files.
V4.2 12/02/2013
Fixes to TorrentToMedia
V4.1 02/02/2013
Added Torrent Support (µTorrent and Transmission).
Added manual run option for nzbToSickBeard.
Changed nzbGet script to use move not copy and remove.
Merged all .cfg scripts into one (autoProcessMedia.cfg).
Made all scripts execitable (755) on github.
Added category limits for email support in nzbget.
Fixed issue with replacements (of paths) in email messages in nzbget.
V4.0 21/12/2012
Changed name from nzbToCouchPotato to nzbToMedia; Now supports mltiple post-processing from two nzb download clients.
Added email support for nzbget.
Version printing now for each of the nzbTo* scripts.
Added "custom" post-process support in nzbget.
Added post-process script output logging in nzbget.
V3.2 11/12/2012
Added failed handling from NZBGet. Thanks to schumi2004.
Also added support for the "failed download" development branch of SickBeard from https://github.com/Tolstyak/Sick-Beard.git
V3.1 02/12/2012
Added conversion to ensure the status passed to the autoProcessTV and autoProcessMovie is always handled as an integer.
V3.0 30/11/2012
Changed name from sabToCouchPotato to nzbToCouchPotato as this now included NZBGet support.
Packaged the NZBGet postprocess files as well as modified version of nzbToSickBeard (from sabToSickBeard).
V2.2 05/10/2012
Re-wrote the failed downlaod handling to just search for the imdb ttXXXX identifier (as received from the nzb name)
Now issues only two api calls. movie.list and searcher.try_next
Should be more robust with regards changes to CPS and also utilises less resources (i.e. less api call and and less processing).
V2.1 04/10/2012
detected a change in the movie release info format. Fixed the script to work with new format.
V2.0 04/10/2012
Fixed an issue with the failed download handling in that the status id for "snatched" can be different on each installation. now performs a status.list via api to verify the status.
Also including a version print (currently 2.0... yeah original I know) so you know if you are current.
removed the multiple versions. The former _recue version will perform the standard renamer only if "postprocess only verified downloads" (default) is enabled in SABnzbd. Also, the "unix" version works fine in Windows, only the "dos" version gave issue in Linux. In other words, this one version should work for all systems.
For historical reasons, the former download stats apply to the old versions:
sabToCouchPotato-dos - downloaded 143 times
sabToCouchPotato-unix - downloaded 205 times
sabToCouchPotato_recue - downloaded 105 times
Also updated the Windows Build to include the same changes. I have removed the link to the linux build as this didn't work on all systems and it really shouldn't be necessary. Let me know if you need this updated.
V1.9 18/09/2012
compiled (build) versions of sabToSickBeard and sabToCouchPotato added for both Linux and Windows. links at top of post.
V1.9 16/09/2012
Added a compiled .exe version for windows. Should prevent the "python not recognised" issue and allow this to be used in conjunction with the windows build on systems that do not have python installed.
This is the full (_recue version) if sabnzbd is set to post ptocess only verified jobs, this will not recue and will function as a standard renamer.
V1.9 27/08/2012
Following the latest CPS update on the master branch, this script is not really needed as CPS actually polls the SABnzbd api and does the same as this script (internally).
However, if you have any issues with CPS constantly downloading the same movies, or filling the log with polling SABnzbd for completed movies, or otherwise prefer to use this method, then you can still use this script and make the following changes in CPS:
Settings, renamer, run every (advanced) = set to 1440 (or some longer interval)
Settings, renamer, next On_failed = off
Settings, downloaders, SABnzbd, Delete failed = off.
V1.9 06/08/2012
Also added the integer handling of status in the sabToSickBeard.py script to prevent SickBeard trying to postprocess a failed TV download. Only impacts the _recue version
V1.8 05/08/2012
Modified the _recue version as SABnzbd 0.7.3 now appears to pass the "status" variable as a string not an integer!!! (or i had it wrong on first attempt :~)
This causes the old script to identify completed downloads as failed and recues the next download!
The fix here should work with any conceivable subsequent updates in that I now make the sys.argv[7] an integer before passing it. if the variable already is an integer, this shouldn't cause any issues.
status = int(sys.argv[7])
autoProcessMovie.process(sys.argv[1], sys.argv[2], status)
V1.7 02/08/2012
Added a new version sabToCouchPotato_recue
This works the same as the other versions, but includes support for recuing failed downloads.
This is new, and only tested once (with success ) at my end.
To get this to run you will need to uncheck the "post-process only verified jobs" option in SABnzbd. Also, to avoid issues with SickBeard postprocessing, I have included a modified postprocessing for SickBeard that just checks for failed status and then exits (the SickBeard Team are currently working on failed download handling and I will hopefully make this script work with that in the future)
This re-cue works as follows:
Performs an api call to CPS to get a list of all wanted movies (with all data including the releases and status etc)
It finds the nzbname (from SABnzbd) in the json list returned from the api call (movie.list) and identifies the movie id and release id.
It performs an api call to make the release as "ignore" and then performs another api call to refresh the movie.
If another (next best) release that meets your criteria is already available it will send that to SABnzbd, otherwise it will wait until a new release becomes availabe.
I have left the old versions here for now for those who don't want to try this. Also, if you don't uncheck the "post-process only verified jobs" in SABnzbd this code will perform the same as the previous versions.
The next issue to tackle (if this works) is automating the deletion of failed download files in SABnzbd.... but I figured this was a start.
V1.6 22/07/2012
no functionality change, but providing scripts in both unix and dos format to prevent exit(127) errors.
if you are using windows, use the dos format. if you are using linux, use the unix format and unzip the files in linux.
V1.5 17/07/2012
add back the web_root parameter to set the URL base.
V1.4 17/07/2012
Have uploaded the latest version.
changes
Removed support for a movie.downlaoded api call that was only used in a seperate branch and is not expected to be merged.
Modified the passthrough to allow a manual call to this script (i.e. does not need to be called from SABnzbd).
Have added a helpfile that explains the setup options in a bit more detail.
Modified the .cfg.sample file to use 60 as a default delay and now specify that 60 should be your minimum to ensure the renamer.scan finds newly extracted movies.
V1.3 and earlier were not fully tracked, as the script itself (not files) was posted on the QNAP forums.

View file

@ -1,14 +1,39 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import print_function from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import subprocess import subprocess
import sys import sys
import shutil
sys.dont_write_bytecode = True
FOLDER_STRUCTURE = {
'libs': [
'common',
'custom',
'py2',
'win',
],
'core': [
'auto_process',
'extractor',
'plugins',
'processor',
'utils',
],
}
class WorkingDirectory(object): class WorkingDirectory(object):
"""Context manager for changing current working directory.""" """Context manager for changing current working directory."""
def __init__(self, new, original=None): def __init__(self, new, original=None):
self.working_directory = new self.working_directory = new
self.original_directory = os.getcwd() if original is None else original self.original_directory = os.getcwd() if original is None else original
@ -18,10 +43,27 @@ class WorkingDirectory(object):
return self return self
def __exit__(self, exc_type, exc_val, exc_tb): def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.original_directory) try:
os.chdir(self.original_directory)
except OSError as error:
print(
'Unable to return to {original_directory}: {error}\n'
'Continuing in {working_directory}'.format(
original_directory=self.original_directory,
error=error,
working_directory=self.working_directory,
),
)
def module_path(module=__file__, parent=False): def module_path(module=__file__, parent=False):
"""
Detect path for a module.
:param module: The module who's path is being detected. Defaults to current module.
:param parent: True to return the parent folder of the current module.
:return: The absolute normalized path to the module or its parent.
"""
try: try:
path = module.__file__ path = module.__file__
except AttributeError: except AttributeError:
@ -74,6 +116,7 @@ def clean_bytecode():
result = git_clean( result = git_clean(
remove_directories=True, remove_directories=True,
force=True, force=True,
ignore_rules=True,
exclude=[ exclude=[
'*.*', # exclude everything '*.*', # exclude everything
'!*.py[co]', # except bytecode '!*.py[co]', # except bytecode
@ -106,8 +149,34 @@ def clean_folders(*paths):
return result return result
def clean(*paths): def force_clean_folder(path, required):
"""
Force clean a folder and exclude any required subfolders.
:param path: Target folder to remove subfolders
:param required: Keep only the required subfolders
"""
root, dirs, files = next(os.walk(path))
required = sorted(required)
if required:
print('Skipping required subfolders', required)
remove = sorted(set(dirs).difference(required))
missing = sorted(set(required).difference(dirs))
for path in remove:
pathname = os.path.join(root, path)
print('Removing', pathname)
shutil.rmtree(pathname)
if missing:
raise Exception('Required subfolders missing:', missing)
def clean(paths):
"""Clean up bytecode and obsolete folders.""" """Clean up bytecode and obsolete folders."""
def _report_error(msg):
print('WARNING: Automatic cleanup could not be executed.')
print(' If errors occur, manual cleanup may be required.')
print('REASON : {}'.format(msg))
with WorkingDirectory(module_path()) as cwd: with WorkingDirectory(module_path()) as cwd:
if cwd.working_directory != cwd.original_directory: if cwd.working_directory != cwd.original_directory:
print('Changing to directory:', cwd.working_directory) print('Changing to directory:', cwd.working_directory)
@ -116,20 +185,28 @@ def clean(*paths):
try: try:
result = clean_bytecode() result = clean_bytecode()
except SystemExit as error: except SystemExit as error:
print(error) _report_error(error)
else: else:
print(result or 'No bytecode to clean') print(result or 'No bytecode to clean')
if paths and os.path.exists('.git'): if paths and os.path.exists('.git'):
print('\n-- Cleaning folders: {} --'.format(paths)) print('\n-- Cleaning folders: {} --'.format(list(paths)))
try: try:
result = clean_folders(*paths) result = clean_folders(*paths)
except SystemExit as error: except SystemExit as error:
print(error) _report_error(error)
else: else:
print(result or 'No folders to clean\n') print(result or 'No folders to clean\n')
else: else:
print('Directory is not a git repository') print('\nDirectory is not a git repository')
try:
items = paths.items()
except AttributeError:
_report_error('Failed to clean, no subfolder structure given')
else:
for folder, subfolders in items:
print('\nForce cleaning folder:', folder)
force_clean_folder(folder, subfolders)
if cwd.working_directory != cwd.original_directory: if cwd.working_directory != cwd.original_directory:
print('Returning to directory: ', cwd.original_directory) print('Returning to directory: ', cwd.original_directory)
@ -138,4 +215,4 @@ def clean(*paths):
if __name__ == '__main__': if __name__ == '__main__':
clean('libs', 'core') clean(FOLDER_STRUCTURE)

View file

@ -1,6 +1,11 @@
# coding=utf-8 # coding=utf-8
from __future__ import print_function from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import itertools import itertools
import locale import locale
@ -11,6 +16,7 @@ import subprocess
import sys import sys
import time import time
import eol
import libs.autoload import libs.autoload
import libs.util import libs.util
@ -45,47 +51,80 @@ from six.moves import reload_module
from core import logger, main_db, version_check, databases, transcoder from core import logger, main_db, version_check, databases, transcoder
from core.configuration import config from core.configuration import config
from core.plugins.downloaders.configuration import (
configure_nzbs,
configure_torrents,
configure_torrent_class,
)
from core.plugins.downloaders.utils import (
pause_torrent,
remove_torrent,
resume_torrent,
)
from core.plugins.plex import configure_plex
from core.utils import ( from core.utils import (
RunningProcess, wake_up, category_search, clean_dir, clean_dir, copy_link, RunningProcess,
create_torrent_class, extract_files, flatten, get_dirs, get_download_info, category_search,
list_media_files, make_dir, parse_args, pause_torrent, remove_torrent, clean_dir,
resume_torrent, remove_dir, remove_read_only, sanitize_name, update_download_info_status, copy_link,
extract_files,
flatten,
get_dirs,
get_download_info,
list_media_files,
make_dir,
parse_args,
rchmod,
remove_dir,
remove_read_only,
restart,
sanitize_name,
update_download_info_status,
wake_up,
) )
__version__ = '12.0.1' __version__ = '12.1.13'
# Client Agents # Client Agents
NZB_CLIENTS = ['sabnzbd', 'nzbget', 'manual'] NZB_CLIENTS = ['sabnzbd', 'nzbget', 'manual']
TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'qbittorrent', 'other', 'manual'] TORRENT_CLIENTS = ['transmission', 'deluge', 'utorrent', 'rtorrent', 'qbittorrent', 'other', 'manual']
# sabnzbd constants
SABNZB_NO_OF_ARGUMENTS = 8
SABNZB_0717_NO_OF_ARGUMENTS = 9
# sickbeard fork/branch constants # sickbeard fork/branch constants
FORKS = {}
FORK_DEFAULT = 'default' FORK_DEFAULT = 'default'
FORK_FAILED = 'failed' FORK_FAILED = 'failed'
FORK_FAILED_TORRENT = 'failed-torrent' FORK_FAILED_TORRENT = 'failed-torrent'
FORK_SICKRAGE = 'SickRage'
FORK_SICKCHILL = 'SickChill' FORK_SICKCHILL = 'SickChill'
FORK_SICKCHILL_API = 'SickChill-api'
FORK_SICKBEARD_API = 'SickBeard-api' FORK_SICKBEARD_API = 'SickBeard-api'
FORK_MEDUSA = 'Medusa' FORK_MEDUSA = 'Medusa'
FORK_MEDUSA_API = 'Medusa-api'
FORK_MEDUSA_APIV2 = 'Medusa-apiv2'
FORK_SICKGEAR = 'SickGear' FORK_SICKGEAR = 'SickGear'
FORK_SICKGEAR_API = 'SickGear-api'
FORK_STHENO = 'Stheno' FORK_STHENO = 'Stheno'
FORKS[FORK_DEFAULT] = {'dir': None}
FORKS[FORK_FAILED] = {'dirName': None, 'failed': None} FORKS = {
FORKS[FORK_FAILED_TORRENT] = {'dir': None, 'failed': None, 'process_method': None} FORK_DEFAULT: {'dir': None},
FORKS[FORK_SICKRAGE] = {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None} FORK_FAILED: {'dirName': None, 'failed': None},
FORKS[FORK_SICKCHILL] = {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'force_next': None} FORK_FAILED_TORRENT: {'dir': None, 'failed': None, 'process_method': None},
FORKS[FORK_SICKBEARD_API] = {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None} FORK_SICKCHILL: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'force_next': None},
FORKS[FORK_MEDUSA] = {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'ignore_subs': None} FORK_SICKCHILL_API: {'path': None, 'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'is_priority': None, 'cmd': 'postprocess'},
FORKS[FORK_SICKGEAR] = {'dir': None, 'failed': None, 'process_method': None, 'force': None} FORK_SICKBEARD_API: {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete': None, 'force_next': None, 'cmd': 'postprocess'},
FORKS[FORK_STHENO] = {"proc_dir": None, "failed": None, "process_method": None, "force": None, "delete_on": None, "ignore_subs": None} FORK_MEDUSA: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'ignore_subs': None},
FORK_MEDUSA_API: {'path': None, 'failed': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'delete_files': None, 'is_priority': None, 'cmd': 'postprocess'},
FORK_MEDUSA_APIV2: {'proc_dir': None, 'resource': None, 'failed': None, 'process_method': None, 'force': None, 'type': None, 'delete_on': None, 'is_priority': None},
FORK_SICKGEAR: {'dir': None, 'failed': None, 'process_method': None, 'force': None},
FORK_SICKGEAR_API: {'path': None, 'process_method': None, 'force_replace': None, 'return_data': None, 'type': None, 'is_priority': None, 'failed': None, 'cmd': 'sg.postprocess'},
FORK_STHENO: {'proc_dir': None, 'failed': None, 'process_method': None, 'force': None, 'delete_on': None, 'ignore_subs': None},
}
ALL_FORKS = {k: None for k in set(list(itertools.chain.from_iterable([FORKS[x].keys() for x in FORKS.keys()])))} ALL_FORKS = {k: None for k in set(list(itertools.chain.from_iterable([FORKS[x].keys() for x in FORKS.keys()])))}
# SiCKRAGE OAuth2
SICKRAGE_OAUTH_CLIENT_ID = 'nzbtomedia'
SICKRAGE_OAUTH_TOKEN_URL = 'https://auth.sickrage.ca/realms/sickrage/protocol/openid-connect/token'
# NZBGet Exit Codes # NZBGet Exit Codes
NZBGET_POSTPROCESS_PARCHECK = 92 NZBGET_POSTPROCESS_PAR_CHECK = 92
NZBGET_POSTPROCESS_SUCCESS = 93 NZBGET_POSTPROCESS_SUCCESS = 93
NZBGET_POSTPROCESS_ERROR = 94 NZBGET_POSTPROCESS_ERROR = 94
NZBGET_POSTPROCESS_NONE = 95 NZBGET_POSTPROCESS_NONE = 95
@ -99,7 +138,7 @@ SYS_ENCODING = None
FAILED = False FAILED = False
AUTO_UPDATE = None AUTO_UPDATE = None
NZBTOMEDIA_VERSION = None NZBTOMEDIA_VERSION = __version__
NEWEST_VERSION = None NEWEST_VERSION = None
NEWEST_VERSION_STRING = None NEWEST_VERSION_STRING = None
VERSION_NOTIFY = None VERSION_NOTIFY = None
@ -111,69 +150,78 @@ FORCE_CLEAN = None
SAFE_MODE = None SAFE_MODE = None
NOEXTRACTFAILED = None NOEXTRACTFAILED = None
NZB_CLIENTAGENT = None NZB_CLIENT_AGENT = None
SABNZBDHOST = None SABNZBD_HOST = None
SABNZBDPORT = None SABNZBD_PORT = None
SABNZBDAPIKEY = None SABNZBD_APIKEY = None
NZB_DEFAULTDIR = None NZB_DEFAULT_DIRECTORY = None
TORRENT_CLIENTAGENT = None TORRENT_CLIENT_AGENT = None
TORRENT_CLASS = None TORRENT_CLASS = None
USELINK = None USE_LINK = None
OUTPUTDIRECTORY = None OUTPUT_DIRECTORY = None
NOFLATTEN = [] NOFLATTEN = []
DELETE_ORIGINAL = None DELETE_ORIGINAL = None
TORRENT_CHMOD_DIRECTORY = None TORRENT_CHMOD_DIRECTORY = None
TORRENT_DEFAULTDIR = None TORRENT_DEFAULT_DIRECTORY = None
TORRENT_RESUME = None TORRENT_RESUME = None
TORRENT_RESUME_ON_FAILURE = None TORRENT_RESUME_ON_FAILURE = None
REMOTEPATHS = [] REMOTE_PATHS = []
UTORRENTWEBUI = None UTORRENT_WEB_UI = None
UTORRENTUSR = None UTORRENT_USER = None
UTORRENTPWD = None UTORRENT_PASSWORD = None
TRANSMISSIONHOST = None TRANSMISSION_HOST = None
TRANSMISSIONPORT = None TRANSMISSION_PORT = None
TRANSMISSIONUSR = None TRANSMISSION_USER = None
TRANSMISSIONPWD = None TRANSMISSION_PASSWORD = None
DELUGEHOST = None SYNO_HOST = None
DELUGEPORT = None SYNO_PORT = None
DELUGEUSR = None SYNO_USER = None
DELUGEPWD = None SYNO_PASSWORD = None
QBITTORRENTHOST = None DELUGE_HOST = None
QBITTORRENTPORT = None DELUGE_PORT = None
QBITTORRENTUSR = None DELUGE_USER = None
QBITTORRENTPWD = None DELUGE_PASSWORD = None
PLEXSSL = None QBITTORRENT_HOST = None
PLEXHOST = None QBITTORRENT_PORT = None
PLEXPORT = None QBITTORRENT_USER = None
PLEXTOKEN = None QBITTORRENT_PASSWORD = None
PLEXSEC = []
EXTCONTAINER = [] PLEX_SSL = None
COMPRESSEDCONTAINER = [] PLEX_HOST = None
MEDIACONTAINER = [] PLEX_PORT = None
AUDIOCONTAINER = [] PLEX_TOKEN = None
METACONTAINER = [] PLEX_SECTION = []
EXT_CONTAINER = []
COMPRESSED_CONTAINER = []
MEDIA_CONTAINER = []
AUDIO_CONTAINER = []
META_CONTAINER = []
SECTIONS = [] SECTIONS = []
CATEGORIES = [] CATEGORIES = []
FORK_SET = []
MOUNTED = None
GETSUBS = False GETSUBS = False
TRANSCODE = None TRANSCODE = None
CONCAT = None CONCAT = None
FFMPEG_PATH = None FFMPEG_PATH = None
SYS_PATH = None
DUPLICATE = None DUPLICATE = None
IGNOREEXTENSIONS = [] IGNOREEXTENSIONS = []
VEXTENSION = None VEXTENSION = None
OUTPUTVIDEOPATH = None OUTPUTVIDEOPATH = None
PROCESSOUTPUT = False PROCESSOUTPUT = False
GENERALOPTS = [] GENERALOPTS = []
OTHEROPTS = []
ALANGUAGE = None ALANGUAGE = None
AINCLUDE = False AINCLUDE = False
SLANGUAGES = [] SLANGUAGES = []
@ -213,11 +261,12 @@ SHOWEXTRACT = 0
PAR2CMD = None PAR2CMD = None
FFPROBE = None FFPROBE = None
CHECK_MEDIA = None CHECK_MEDIA = None
REQUIRE_LAN = None
NICENESS = [] NICENESS = []
HWACCEL = False HWACCEL = False
PASSWORDSFILE = None PASSWORDS_FILE = None
DOWNLOADINFO = None DOWNLOAD_INFO = None
GROUPS = None GROUPS = None
USER_SCRIPT_MEDIAEXTENSIONS = None USER_SCRIPT_MEDIAEXTENSIONS = None
@ -231,28 +280,9 @@ USER_SCRIPT_RUNONCE = None
__INITIALIZED__ = False __INITIALIZED__ = False
def initialize(section=None): def configure_logging():
global NZBGET_POSTPROCESS_ERROR, NZBGET_POSTPROCESS_NONE, NZBGET_POSTPROCESS_PARCHECK, NZBGET_POSTPROCESS_SUCCESS, \ global LOG_FILE
NZBTOMEDIA_TIMEOUT, FORKS, FORK_DEFAULT, FORK_FAILED_TORRENT, FORK_FAILED, NOEXTRACTFAILED, SHOWEXTRACT, \ global LOG_DIR
NZBTOMEDIA_BRANCH, NZBTOMEDIA_VERSION, NEWEST_VERSION, NEWEST_VERSION_STRING, VERSION_NOTIFY, SYS_ARGV, CFG, \
SABNZB_NO_OF_ARGUMENTS, SABNZB_0717_NO_OF_ARGUMENTS, CATEGORIES, TORRENT_CLIENTAGENT, USELINK, OUTPUTDIRECTORY, \
NOFLATTEN, UTORRENTPWD, UTORRENTUSR, UTORRENTWEBUI, DELUGEHOST, DELUGEPORT, DELUGEUSR, DELUGEPWD, VLEVEL, \
TRANSMISSIONHOST, TRANSMISSIONPORT, TRANSMISSIONPWD, TRANSMISSIONUSR, COMPRESSEDCONTAINER, MEDIACONTAINER, \
METACONTAINER, SECTIONS, ALL_FORKS, TEST_FILE, GENERALOPTS, LOG_GIT, GROUPS, SEVENZIP, CONCAT, VCRF, \
__INITIALIZED__, AUTO_UPDATE, APP_FILENAME, USER_DELAY, APP_NAME, TRANSCODE, DEFAULTS, GIT_PATH, GIT_USER, \
GIT_BRANCH, GIT_REPO, SYS_ENCODING, NZB_CLIENTAGENT, SABNZBDHOST, SABNZBDPORT, SABNZBDAPIKEY, \
DUPLICATE, IGNOREEXTENSIONS, VEXTENSION, OUTPUTVIDEOPATH, PROCESSOUTPUT, VCODEC, VCODEC_ALLOW, VPRESET, \
VFRAMERATE, LOG_DB, VBITRATE, VRESOLUTION, ALANGUAGE, AINCLUDE, ACODEC, ACODEC_ALLOW, ABITRATE, FAILED, \
ACODEC2, ACODEC2_ALLOW, ABITRATE2, ACODEC3, ACODEC3_ALLOW, ABITRATE3, ALLOWSUBS, SEXTRACT, SEMBED, SLANGUAGES, \
SINCLUDE, SUBSDIR, SCODEC, OUTPUTFASTSTART, OUTPUTQUALITYPERCENT, BURN, GETSUBS, HWACCEL, LOG_DIR, LOG_FILE, \
NICENESS, LOG_DEBUG, FORCE_CLEAN, FFMPEG_PATH, FFMPEG, FFPROBE, AUDIOCONTAINER, EXTCONTAINER, TORRENT_CLASS, \
DELETE_ORIGINAL, TORRENT_CHMOD_DIRECTORY, PASSWORDSFILE, USER_DELAY, USER_SCRIPT, USER_SCRIPT_CLEAN, USER_SCRIPT_MEDIAEXTENSIONS, \
USER_SCRIPT_PARAM, USER_SCRIPT_RUNONCE, USER_SCRIPT_SUCCESSCODES, DOWNLOADINFO, CHECK_MEDIA, SAFE_MODE, \
TORRENT_DEFAULTDIR, TORRENT_RESUME_ON_FAILURE, NZB_DEFAULTDIR, REMOTEPATHS, LOG_ENV, PID_FILE, MYAPP, ACHANNELS, ACHANNELS2, ACHANNELS3, \
PLEXSSL, PLEXHOST, PLEXPORT, PLEXTOKEN, PLEXSEC, TORRENT_RESUME, PAR2CMD, QBITTORRENTHOST, QBITTORRENTPORT, QBITTORRENTUSR, QBITTORRENTPWD
if __INITIALIZED__:
return False
if 'NTM_LOGFILE' in os.environ: if 'NTM_LOGFILE' in os.environ:
LOG_FILE = os.environ['NTM_LOGFILE'] LOG_FILE = os.environ['NTM_LOGFILE']
@ -261,11 +291,19 @@ def initialize(section=None):
if not make_dir(LOG_DIR): if not make_dir(LOG_DIR):
print('No log folder, logging to screen only') print('No log folder, logging to screen only')
def configure_process():
global MYAPP
MYAPP = RunningProcess() MYAPP = RunningProcess()
while MYAPP.alreadyrunning(): while MYAPP.alreadyrunning():
print('Waiting for existing session to end') print('Waiting for existing session to end')
time.sleep(30) time.sleep(30)
def configure_locale():
global SYS_ENCODING
try: try:
locale.setlocale(locale.LC_ALL, '') locale.setlocale(locale.LC_ALL, '')
SYS_ENCODING = locale.getpreferredencoding() SYS_ENCODING = locale.getpreferredencoding()
@ -293,8 +331,10 @@ def initialize(section=None):
else: else:
sys.exit(1) sys.exit(1)
# init logging
logger.ntm_log_instance.init_logging() def configure_migration():
global CONFIG_FILE
global CFG
# run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options. # run migrate to convert old cfg to new style cfg plus fix any cfg missing values/options.
if not config.migrate(): if not config.migrate():
@ -312,9 +352,16 @@ def initialize(section=None):
logger.info('Loading config from [{0}]'.format(CONFIG_FILE)) logger.info('Loading config from [{0}]'.format(CONFIG_FILE))
CFG = config() CFG = config()
def configure_logging_part_2():
global LOG_DB
global LOG_DEBUG
global LOG_ENV
global LOG_GIT
# Enable/Disable DEBUG Logging # Enable/Disable DEBUG Logging
LOG_DEBUG = int(CFG['General']['log_debug'])
LOG_DB = int(CFG['General']['log_db']) LOG_DB = int(CFG['General']['log_db'])
LOG_DEBUG = int(CFG['General']['log_debug'])
LOG_ENV = int(CFG['General']['log_env']) LOG_ENV = int(CFG['General']['log_env'])
LOG_GIT = int(CFG['General']['log_git']) LOG_GIT = int(CFG['General']['log_git'])
@ -322,148 +369,206 @@ def initialize(section=None):
for item in os.environ: for item in os.environ:
logger.info('{0}: {1}'.format(item, os.environ[item]), 'ENVIRONMENT') logger.info('{0}: {1}'.format(item, os.environ[item]), 'ENVIRONMENT')
# initialize the main SB database
main_db.upgrade_database(main_db.DBConnection(), databases.InitialSchema) def configure_general():
global VERSION_NOTIFY
global GIT_REPO
global GIT_PATH
global GIT_USER
global GIT_BRANCH
global FORCE_CLEAN
global FFMPEG_PATH
global SYS_PATH
global CHECK_MEDIA
global REQUIRE_LAN
global SAFE_MODE
global NOEXTRACTFAILED
# Set Version and GIT variables # Set Version and GIT variables
NZBTOMEDIA_VERSION = '11.06'
VERSION_NOTIFY = int(CFG['General']['version_notify']) VERSION_NOTIFY = int(CFG['General']['version_notify'])
AUTO_UPDATE = int(CFG['General']['auto_update'])
GIT_REPO = 'nzbToMedia' GIT_REPO = 'nzbToMedia'
GIT_PATH = CFG['General']['git_path'] GIT_PATH = CFG['General']['git_path']
GIT_USER = CFG['General']['git_user'] or 'clinton-hall' GIT_USER = CFG['General']['git_user'] or 'clinton-hall'
GIT_BRANCH = CFG['General']['git_branch'] or 'master' GIT_BRANCH = CFG['General']['git_branch'] or 'master'
FORCE_CLEAN = int(CFG['General']['force_clean']) FORCE_CLEAN = int(CFG['General']['force_clean'])
FFMPEG_PATH = CFG['General']['ffmpeg_path'] FFMPEG_PATH = CFG['General']['ffmpeg_path']
SYS_PATH = CFG['General']['sys_path']
CHECK_MEDIA = int(CFG['General']['check_media']) CHECK_MEDIA = int(CFG['General']['check_media'])
REQUIRE_LAN = None if not CFG['General']['require_lan'] else CFG['General']['require_lan'].split(',')
SAFE_MODE = int(CFG['General']['safe_mode']) SAFE_MODE = int(CFG['General']['safe_mode'])
NOEXTRACTFAILED = int(CFG['General']['no_extract_failed']) NOEXTRACTFAILED = int(CFG['General']['no_extract_failed'])
def configure_updates():
global AUTO_UPDATE
global MYAPP
AUTO_UPDATE = int(CFG['General']['auto_update'])
version_checker = version_check.CheckVersion()
# Check for updates via GitHUB # Check for updates via GitHUB
if version_check.CheckVersion().check_for_new_version(): if version_checker.check_for_new_version() and AUTO_UPDATE:
if AUTO_UPDATE == 1: logger.info('Auto-Updating nzbToMedia, Please wait ...')
logger.info('Auto-Updating nzbToMedia, Please wait ...') if version_checker.update():
updated = version_check.CheckVersion().update() # restart nzbToMedia
if updated: try:
# restart nzbToMedia del MYAPP
try: except Exception:
del MYAPP pass
except Exception: restart()
pass else:
restart() logger.error('Update failed, not restarting. Check your log for more information.')
else:
logger.error('Update wasn\'t successful, not restarting. Check your log for more information.')
# Set Current Version # Set Current Version
logger.info('nzbToMedia Version:{version} Branch:{branch} ({system} {release})'.format logger.info('nzbToMedia Version:{version} Branch:{branch} ({system} {release})'.format
(version=NZBTOMEDIA_VERSION, branch=GIT_BRANCH, (version=NZBTOMEDIA_VERSION, branch=GIT_BRANCH,
system=platform.system(), release=platform.release())) system=platform.system(), release=platform.release()))
if int(CFG['WakeOnLan']['wake']) == 1:
def configure_wake_on_lan():
if int(CFG['WakeOnLan']['wake']):
wake_up() wake_up()
NZB_CLIENTAGENT = CFG['Nzb']['clientAgent'] # sabnzbd
SABNZBDHOST = CFG['Nzb']['sabnzbd_host'] def configure_groups():
SABNZBDPORT = int(CFG['Nzb']['sabnzbd_port'] or 8080) # defaults to accomodate NzbGet global GROUPS
SABNZBDAPIKEY = CFG['Nzb']['sabnzbd_apikey']
NZB_DEFAULTDIR = CFG['Nzb']['default_downloadDirectory']
GROUPS = CFG['Custom']['remove_group'] GROUPS = CFG['Custom']['remove_group']
if isinstance(GROUPS, str): if isinstance(GROUPS, str):
GROUPS = GROUPS.split(',') GROUPS = GROUPS.split(',')
if GROUPS == ['']: if GROUPS == ['']:
GROUPS = None GROUPS = None
TORRENT_CLIENTAGENT = CFG['Torrent']['clientAgent'] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent |other
USELINK = CFG['Torrent']['useLink'] # no | hard | sym
OUTPUTDIRECTORY = CFG['Torrent']['outputDirectory'] # /abs/path/to/complete/
TORRENT_DEFAULTDIR = CFG['Torrent']['default_downloadDirectory']
CATEGORIES = (CFG['Torrent']['categories']) # music,music_videos,pictures,software
NOFLATTEN = (CFG['Torrent']['noFlatten'])
if isinstance(NOFLATTEN, str):
NOFLATTEN = NOFLATTEN.split(',')
if isinstance(CATEGORIES, str):
CATEGORIES = CATEGORIES.split(',')
DELETE_ORIGINAL = int(CFG['Torrent']['deleteOriginal'])
TORRENT_CHMOD_DIRECTORY = int(str(CFG['Torrent']['chmodDirectory']), 8)
TORRENT_RESUME_ON_FAILURE = int(CFG['Torrent']['resumeOnFailure'])
TORRENT_RESUME = int(CFG['Torrent']['resume'])
UTORRENTWEBUI = CFG['Torrent']['uTorrentWEBui'] # http://localhost:8090/gui/
UTORRENTUSR = CFG['Torrent']['uTorrentUSR'] # mysecretusr
UTORRENTPWD = CFG['Torrent']['uTorrentPWD'] # mysecretpwr
TRANSMISSIONHOST = CFG['Torrent']['TransmissionHost'] # localhost def configure_remote_paths():
TRANSMISSIONPORT = int(CFG['Torrent']['TransmissionPort']) global REMOTE_PATHS
TRANSMISSIONUSR = CFG['Torrent']['TransmissionUSR'] # mysecretusr
TRANSMISSIONPWD = CFG['Torrent']['TransmissionPWD'] # mysecretpwr
DELUGEHOST = CFG['Torrent']['DelugeHost'] # localhost REMOTE_PATHS = CFG['Network']['mount_points'] or []
DELUGEPORT = int(CFG['Torrent']['DelugePort']) # 8084
DELUGEUSR = CFG['Torrent']['DelugeUSR'] # mysecretusr
DELUGEPWD = CFG['Torrent']['DelugePWD'] # mysecretpwr
QBITTORRENTHOST = CFG['Torrent']['qBittorrenHost'] # localhost if REMOTE_PATHS:
QBITTORRENTPORT = int(CFG['Torrent']['qBittorrentPort']) # 8080 if isinstance(REMOTE_PATHS, list):
QBITTORRENTUSR = CFG['Torrent']['qBittorrentUSR'] # mysecretusr REMOTE_PATHS = ','.join(REMOTE_PATHS) # fix in case this imported as list.
QBITTORRENTPWD = CFG['Torrent']['qBittorrentPWD'] # mysecretpwr
REMOTEPATHS = CFG['Network']['mount_points'] or [] REMOTE_PATHS = (
if REMOTEPATHS: # /volume1/Public/,E:\|/volume2/share/,\\NAS\
if isinstance(REMOTEPATHS, list): tuple(item.split(','))
REMOTEPATHS = ','.join(REMOTEPATHS) # fix in case this imported as list. for item in REMOTE_PATHS.split('|')
REMOTEPATHS = [tuple(item.split(',')) for item in )
REMOTEPATHS.split('|')] # /volume1/Public/,E:\|/volume2/share/,\\NAS\
REMOTEPATHS = [(local.strip(), remote.strip()) for local, remote in
REMOTEPATHS] # strip trailing and leading whitespaces
PLEXSSL = int(CFG['Plex']['plex_ssl']) REMOTE_PATHS = [
PLEXHOST = CFG['Plex']['plex_host'] # strip trailing and leading whitespaces
PLEXPORT = CFG['Plex']['plex_port'] (local.strip(), remote.strip())
PLEXTOKEN = CFG['Plex']['plex_token'] for local, remote in REMOTE_PATHS
PLEXSEC = CFG['Plex']['plex_sections'] or [] ]
if PLEXSEC:
if isinstance(PLEXSEC, list):
PLEXSEC = ','.join(PLEXSEC) # fix in case this imported as list.
PLEXSEC = [tuple(item.split(',')) for item in PLEXSEC.split('|')]
devnull = open(os.devnull, 'w')
try: def configure_niceness():
subprocess.Popen(['nice'], stdout=devnull, stderr=devnull).communicate() global NICENESS
NICENESS.extend(['nice', '-n{0}'.format(int(CFG['Posix']['niceness']))])
except Exception: with open(os.devnull, 'w') as devnull:
pass
try:
subprocess.Popen(['ionice'], stdout=devnull, stderr=devnull).communicate()
try: try:
NICENESS.extend(['ionice', '-c{0}'.format(int(CFG['Posix']['ionice_class']))]) subprocess.Popen(['nice'], stdout=devnull, stderr=devnull).communicate()
except Exception: if len(CFG['Posix']['niceness'].split(',')) > 1: #Allow passing of absolute command, not just value.
pass NICENESS.extend(CFG['Posix']['niceness'].split(','))
try:
if 'ionice' in NICENESS:
NICENESS.extend(['-n{0}'.format(int(CFG['Posix']['ionice_classdata']))])
else: else:
NICENESS.extend(['ionice', '-n{0}'.format(int(CFG['Posix']['ionice_classdata']))]) NICENESS.extend(['nice', '-n{0}'.format(int(CFG['Posix']['niceness']))])
except Exception:
pass
try:
subprocess.Popen(['ionice'], stdout=devnull, stderr=devnull).communicate()
try:
NICENESS.extend(['ionice', '-c{0}'.format(int(CFG['Posix']['ionice_class']))])
except Exception:
pass
try:
if 'ionice' in NICENESS:
NICENESS.extend(['-n{0}'.format(int(CFG['Posix']['ionice_classdata']))])
else:
NICENESS.extend(['ionice', '-n{0}'.format(int(CFG['Posix']['ionice_classdata']))])
except Exception:
pass
except Exception: except Exception:
pass pass
except Exception:
pass
devnull.close()
COMPRESSEDCONTAINER = [re.compile(r'.r\d{2}$', re.I),
re.compile(r'.part\d+.rar$', re.I),
re.compile('.rar$', re.I)]
COMPRESSEDCONTAINER += [re.compile('{0}$'.format(ext), re.I) for ext in CFG['Extensions']['compressedExtensions']]
MEDIACONTAINER = CFG['Extensions']['mediaExtensions']
AUDIOCONTAINER = CFG['Extensions']['audioExtensions']
METACONTAINER = CFG['Extensions']['metaExtensions'] # .nfo,.sub,.srt
if isinstance(COMPRESSEDCONTAINER, str):
COMPRESSEDCONTAINER = COMPRESSEDCONTAINER.split(',')
if isinstance(MEDIACONTAINER, str):
MEDIACONTAINER = MEDIACONTAINER.split(',')
if isinstance(AUDIOCONTAINER, str):
AUDIOCONTAINER = AUDIOCONTAINER.split(',')
if isinstance(METACONTAINER, str):
METACONTAINER = METACONTAINER.split(',')
def configure_containers():
global COMPRESSED_CONTAINER
global MEDIA_CONTAINER
global AUDIO_CONTAINER
global META_CONTAINER
COMPRESSED_CONTAINER = [re.compile(r'.r\d{2}$', re.I),
re.compile(r'.part\d+.rar$', re.I),
re.compile('.rar$', re.I)]
COMPRESSED_CONTAINER += [re.compile('{0}$'.format(ext), re.I) for ext in
CFG['Extensions']['compressedExtensions']]
MEDIA_CONTAINER = CFG['Extensions']['mediaExtensions']
AUDIO_CONTAINER = CFG['Extensions']['audioExtensions']
META_CONTAINER = CFG['Extensions']['metaExtensions'] # .nfo,.sub,.srt
if isinstance(COMPRESSED_CONTAINER, str):
COMPRESSED_CONTAINER = COMPRESSED_CONTAINER.split(',')
if isinstance(MEDIA_CONTAINER, str):
MEDIA_CONTAINER = MEDIA_CONTAINER.split(',')
if isinstance(AUDIO_CONTAINER, str):
AUDIO_CONTAINER = AUDIO_CONTAINER.split(',')
if isinstance(META_CONTAINER, str):
META_CONTAINER = META_CONTAINER.split(',')
def configure_transcoder():
global MOUNTED
global GETSUBS
global TRANSCODE
global DUPLICATE
global CONCAT
global IGNOREEXTENSIONS
global OUTPUTFASTSTART
global GENERALOPTS
global OTHEROPTS
global OUTPUTQUALITYPERCENT
global OUTPUTVIDEOPATH
global PROCESSOUTPUT
global ALANGUAGE
global AINCLUDE
global SLANGUAGES
global SINCLUDE
global SEXTRACT
global SEMBED
global SUBSDIR
global VEXTENSION
global VCODEC
global VPRESET
global VFRAMERATE
global VBITRATE
global VRESOLUTION
global VCRF
global VLEVEL
global VCODEC_ALLOW
global ACODEC
global ACODEC_ALLOW
global ACHANNELS
global ABITRATE
global ACODEC2
global ACODEC2_ALLOW
global ACHANNELS2
global ABITRATE2
global ACODEC3
global ACODEC3_ALLOW
global ACHANNELS3
global ABITRATE3
global SCODEC
global BURN
global HWACCEL
global ALLOWSUBS
global DEFAULTS
MOUNTED = None
GETSUBS = int(CFG['Transcoder']['getSubs']) GETSUBS = int(CFG['Transcoder']['getSubs'])
TRANSCODE = int(CFG['Transcoder']['transcode']) TRANSCODE = int(CFG['Transcoder']['transcode'])
DUPLICATE = int(CFG['Transcoder']['duplicate']) DUPLICATE = int(CFG['Transcoder']['duplicate'])
@ -481,6 +586,11 @@ def initialize(section=None):
GENERALOPTS.append('-fflags') GENERALOPTS.append('-fflags')
if '+genpts' not in GENERALOPTS: if '+genpts' not in GENERALOPTS:
GENERALOPTS.append('+genpts') GENERALOPTS.append('+genpts')
OTHEROPTS = (CFG['Transcoder']['otherOptions'])
if isinstance(OTHEROPTS, str):
OTHEROPTS = OTHEROPTS.split(',')
if OTHEROPTS == ['']:
OTHEROPTS = []
try: try:
OUTPUTQUALITYPERCENT = int(CFG['Transcoder']['outputQualityPercent']) OUTPUTQUALITYPERCENT = int(CFG['Transcoder']['outputQualityPercent'])
except Exception: except Exception:
@ -574,7 +684,7 @@ def initialize(section=None):
codec_alias = { codec_alias = {
'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'], 'libx264': ['libx264', 'h264', 'h.264', 'AVC', 'MPEG-4'],
'libmp3lame': ['libmp3lame', 'mp3'], 'libmp3lame': ['libmp3lame', 'mp3'],
'libfaac': ['libfaac', 'aac', 'faac'] 'libfaac': ['libfaac', 'aac', 'faac'],
} }
transcode_defaults = { transcode_defaults = {
'iPad': { 'iPad': {
@ -583,7 +693,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'iPad-1080p': { 'iPad-1080p': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -591,7 +701,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'iPad-720p': { 'iPad-720p': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -599,7 +709,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': None, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'Apple-TV': { 'Apple-TV': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -607,7 +717,7 @@ def initialize(section=None):
'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6,
'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'iPod': { 'iPod': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -615,7 +725,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2,
'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'iPhone': { 'iPhone': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -623,7 +733,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2,
'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'PS3': { 'PS3': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -631,7 +741,7 @@ def initialize(section=None):
'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6,
'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2, 'ACODEC2': 'aac', 'ACODEC2_ALLOW': ['libfaac'], 'ABITRATE2': None, 'ACHANNELS2': 2,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'xbox': { 'xbox': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -639,7 +749,7 @@ def initialize(section=None):
'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6, 'ACODEC': 'ac3', 'ACODEC_ALLOW': ['ac3'], 'ABITRATE': None, 'ACHANNELS': 6,
'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'Roku-480p': { 'Roku-480p': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -647,7 +757,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'Roku-720p': { 'Roku-720p': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -655,7 +765,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'Roku-1080p': { 'Roku-1080p': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -663,7 +773,7 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 160000, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'mkv': { 'mkv': {
'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
@ -673,13 +783,21 @@ def initialize(section=None):
'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text'
}, },
'mkv-bluray': {
'VEXTENSION': '.mkv', 'VCODEC': 'libx265', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': None, 'VLEVEL': None,
'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'hevc', 'h265', 'libx265', 'h.265', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'],
'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8,
'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None,
'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8,
'SCODEC': 'mov_text',
},
'mp4-scene-release': { 'mp4-scene-release': {
'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': 19, 'VLEVEL': '3.1', 'VEXTENSION': '.mp4', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': None, 'VCRF': 19, 'VLEVEL': '3.1',
'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'], 'VRESOLUTION': None, 'VCODEC_ALLOW': ['libx264', 'h264', 'h.264', 'AVC', 'avc', 'mpeg4', 'msmpeg4', 'MPEG-4', 'mpeg2video'],
'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8, 'ACODEC': 'dts', 'ACODEC_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE': None, 'ACHANNELS': 8,
'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None, 'ACODEC2': None, 'ACODEC2_ALLOW': [], 'ABITRATE2': None, 'ACHANNELS2': None,
'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8, 'ACODEC3': 'ac3', 'ACODEC3_ALLOW': ['libfaac', 'dts', 'ac3', 'mp2', 'mp3'], 'ABITRATE3': None, 'ACHANNELS3': 8,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
}, },
'MKV-SD': { 'MKV-SD': {
'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': '1200k', 'VCRF': None, 'VLEVEL': None, 'VEXTENSION': '.mkv', 'VCODEC': 'libx264', 'VPRESET': None, 'VFRAMERATE': None, 'VBITRATE': '1200k', 'VCRF': None, 'VLEVEL': None,
@ -687,8 +805,8 @@ def initialize(section=None):
'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2, 'ACODEC': 'aac', 'ACODEC_ALLOW': ['libfaac'], 'ABITRATE': 128000, 'ACHANNELS': 2,
'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6, 'ACODEC2': 'ac3', 'ACODEC2_ALLOW': ['ac3'], 'ABITRATE2': None, 'ACHANNELS2': 6,
'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None, 'ACODEC3': None, 'ACODEC3_ALLOW': [], 'ABITRATE3': None, 'ACHANNELS3': None,
'SCODEC': 'mov_text' 'SCODEC': 'mov_text',
} },
} }
if DEFAULTS and DEFAULTS in transcode_defaults: if DEFAULTS and DEFAULTS in transcode_defaults:
VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION'] VEXTENSION = transcode_defaults[DEFAULTS]['VEXTENSION']
@ -743,9 +861,33 @@ def initialize(section=None):
if codec in codec_alias: if codec in codec_alias:
extra = [item for item in codec_alias[codec] if item not in ACODEC3_ALLOW] extra = [item for item in codec_alias[codec] if item not in ACODEC3_ALLOW]
ACODEC3_ALLOW.extend(extra) ACODEC3_ALLOW.extend(extra)
codec_alias = {} # clear memory
PASSWORDSFILE = CFG['passwords']['PassWordFile']
def configure_passwords_file():
global PASSWORDS_FILE
PASSWORDS_FILE = CFG['passwords']['PassWordFile']
def configure_sections(section):
global SECTIONS
global CATEGORIES
# check for script-defied section and if None set to allow sections
SECTIONS = CFG[
tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled())
if not section else (section,)
]
for section, subsections in SECTIONS.items():
CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()])
CATEGORIES = list(set(CATEGORIES))
def configure_utility_locations():
global SHOWEXTRACT
global SEVENZIP
global FFMPEG
global FFPROBE
global PAR2CMD
# Setup FFMPEG, FFPROBE and SEVENZIP locations # Setup FFMPEG, FFPROBE and SEVENZIP locations
if platform.system() == 'Windows': if platform.system() == 'Windows':
@ -766,18 +908,20 @@ def initialize(section=None):
logger.warning('Install ffmpeg with x264 support to enable this feature ...') logger.warning('Install ffmpeg with x264 support to enable this feature ...')
else: else:
if SYS_PATH:
os.environ['PATH'] += ':' + SYS_PATH
try: try:
SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7z'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not SEVENZIP: if not SEVENZIP:
try: try:
SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7zr'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not SEVENZIP: if not SEVENZIP:
try: try:
SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip() SEVENZIP = subprocess.Popen(['which', '7za'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not SEVENZIP: if not SEVENZIP:
@ -785,7 +929,7 @@ def initialize(section=None):
logger.warning( logger.warning(
'Failed to locate 7zip. Transcoding of disk images and extraction of .7z files will not be possible!') 'Failed to locate 7zip. Transcoding of disk images and extraction of .7z files will not be possible!')
try: try:
PAR2CMD = subprocess.Popen(['which', 'par2'], stdout=subprocess.PIPE).communicate()[0].strip() PAR2CMD = subprocess.Popen(['which', 'par2'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not PAR2CMD: if not PAR2CMD:
@ -800,12 +944,12 @@ def initialize(section=None):
FFMPEG = os.path.join(FFMPEG_PATH, 'avconv') FFMPEG = os.path.join(FFMPEG_PATH, 'avconv')
else: else:
try: try:
FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip() FFMPEG = subprocess.Popen(['which', 'ffmpeg'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not FFMPEG: if not FFMPEG:
try: try:
FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip() FFMPEG = subprocess.Popen(['which', 'avconv'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not FFMPEG: if not FFMPEG:
@ -821,12 +965,12 @@ def initialize(section=None):
FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe') FFPROBE = os.path.join(FFMPEG_PATH, 'avprobe')
else: else:
try: try:
FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip() FFPROBE = subprocess.Popen(['which', 'ffprobe'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not FFPROBE: if not FFPROBE:
try: try:
FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip() FFPROBE = subprocess.Popen(['which', 'avprobe'], stdout=subprocess.PIPE).communicate()[0].strip().decode()
except Exception: except Exception:
pass pass
if not FFPROBE: if not FFPROBE:
@ -835,47 +979,77 @@ def initialize(section=None):
logger.warning('Failed to locate ffprobe. Video corruption detection disabled!') logger.warning('Failed to locate ffprobe. Video corruption detection disabled!')
logger.warning('Install ffmpeg with x264 support to enable this feature ...') logger.warning('Install ffmpeg with x264 support to enable this feature ...')
# check for script-defied section and if None set to allow sections
SECTIONS = CFG[tuple(x for x in CFG if CFG[x].sections and CFG[x].isenabled()) if not section else (section,)]
for section, subsections in SECTIONS.items():
CATEGORIES.extend([subsection for subsection in subsections if CFG[section][subsection].isenabled()])
CATEGORIES = list(set(CATEGORIES))
# create torrent class def check_python():
TORRENT_CLASS = create_torrent_class(TORRENT_CLIENTAGENT) """Check End-of-Life status for Python version."""
# Raise if end of life
eol.check()
# finished initalizing # Warn if within grace period
return True grace_period = 365 # days
eol.warn_for_status(grace_period=-grace_period)
# Log warning if within grace period
days_left = eol.lifetime()
if days_left > 0:
logger.info(
'Python v{major}.{minor} will reach end of life in {x} days.'.format(
major=sys.version_info[0],
minor=sys.version_info[1],
x=days_left,
),
)
else:
logger.info(
'Python v{major}.{minor} reached end of life {x} days ago.'.format(
major=sys.version_info[0],
minor=sys.version_info[1],
x=-days_left,
),
)
if days_left <= grace_period:
logger.warning('Please upgrade to a more recent Python version.')
def restart(): def initialize(section=None):
install_type = version_check.CheckVersion().install_type global __INITIALIZED__
status = 0 if __INITIALIZED__:
popen_list = [] return False
if install_type in ('git', 'source'): configure_logging()
popen_list = [sys.executable, APP_FILENAME] configure_process()
configure_locale()
if popen_list: # init logging
popen_list += SYS_ARGV logger.ntm_log_instance.init_logging()
logger.log(u'Restarting nzbToMedia with {args}'.format(args=popen_list))
logger.close()
p = subprocess.Popen(popen_list, cwd=os.getcwd())
p.wait()
status = p.returncode
os._exit(status) configure_migration()
configure_logging_part_2()
# check python version
check_python()
def rchmod(path, mod): # initialize the main SB database
logger.log('Changing file mode of {0} to {1}'.format(path, oct(mod))) main_db.upgrade_database(main_db.DBConnection(), databases.InitialSchema)
os.chmod(path, mod)
if not os.path.isdir(path):
return # Skip files
for root, dirs, files in os.walk(path): configure_general()
for d in dirs: configure_updates()
os.chmod(os.path.join(root, d), mod) configure_wake_on_lan()
for f in files: configure_nzbs(CFG)
os.chmod(os.path.join(root, f), mod) configure_torrents(CFG)
configure_remote_paths()
configure_plex(CFG)
configure_niceness()
configure_containers()
configure_transcoder()
configure_passwords_file()
configure_utility_locations()
configure_sections(section)
configure_torrent_class()
configure_groups()
__INITIALIZED__ = True
# finished initializing
return __INITIALIZED__

View file

@ -0,0 +1,83 @@
# coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import requests
import core
from core import logger
from core.auto_process.common import ProcessResult
from core.utils import (
convert_to_ascii,
remote_dir,
server_responding,
)
requests.packages.urllib3.disable_warnings()
def process(section, dir_name, input_name=None, status=0, client_agent='manual', input_category=None):
status = int(status)
cfg = dict(core.CFG[section][input_category])
host = cfg['host']
port = cfg['port']
apikey = cfg['apikey']
ssl = int(cfg.get('ssl', 0))
web_root = cfg.get('web_root', '')
protocol = 'https://' if ssl else 'http://'
remote_path = int(cfg.get('remote_path', 0))
url = '{0}{1}:{2}{3}/api'.format(protocol, host, port, web_root)
if not server_responding(url):
logger.error('Server did not respond. Exiting', section)
return ProcessResult(
message='{0}: Failed to post-process - {0} did not respond.'.format(section),
status_code=1,
)
input_name, dir_name = convert_to_ascii(input_name, dir_name)
params = {
'apikey': apikey,
'cmd': 'forceProcess',
'dir': remote_dir(dir_name) if remote_path else dir_name,
}
logger.debug('Opening URL: {0} with params: {1}'.format(url, params), section)
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 300))
except requests.ConnectionError:
logger.error('Unable to open URL')
return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {1}'.format(section, section),
status_code=1,
)
logger.postprocess('{0}'.format(r.text), section)
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(r.status_code), section)
return ProcessResult(
message='{0}: Failed to post-process - Server returned status {1}'.format(section, r.status_code),
status_code=1,
)
elif r.text == 'OK':
logger.postprocess('SUCCESS: ForceProcess for {0} has been started in LazyLibrarian'.format(dir_name), section)
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(section, input_name),
status_code=0,
)
else:
logger.error('FAILED: ForceProcess of {0} has Failed in LazyLibrarian'.format(dir_name), section)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(section),
status_code=1,
)

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import requests import requests
@ -60,7 +67,7 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
logger.error('Unable to open URL', section) logger.error('Unable to open URL', section)
return ProcessResult( return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {0}'.format(section), message='{0}: Failed to post-process - Unable to connect to {0}'.format(section),
status_code=1 status_code=1,
) )
if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]: if r.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(r.status_code), section) logger.error('Server returned status {0}'.format(r.status_code), section)
@ -69,7 +76,7 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
status_code=1, status_code=1,
) )
result = r.content result = r.text
if not type(result) == list: if not type(result) == list:
result = result.split('\n') result = result.split('\n')
for line in result: for line in result:

View file

@ -1,3 +1,10 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import requests import requests
from core import logger from core import logger
@ -17,7 +24,7 @@ class ProcessResult(object):
def __str__(self): def __str__(self):
return 'Processing {0}: {1}'.format( return 'Processing {0}: {1}'.format(
'succeeded' if bool(self) else 'failed', 'succeeded' if bool(self) else 'failed',
self.message self.message,
) )
def __repr__(self): def __repr__(self):
@ -38,7 +45,7 @@ def command_complete(url, params, headers, section):
return None return None
else: else:
try: try:
return r.json()['state'] return r.json()['status']
except (ValueError, KeyError): except (ValueError, KeyError):
# ValueError catches simplejson's JSONDecodeError and json's ValueError # ValueError catches simplejson's JSONDecodeError and json's ValueError
logger.error('{0} did not return expected json data.'.format(section), section) logger.error('{0} did not return expected json data.'.format(section), section)

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import shutil import shutil
@ -46,7 +53,7 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
'api_key': apikey, 'api_key': apikey,
'mode': 'UPDATEREQUESTEDSTATUS', 'mode': 'UPDATEREQUESTEDSTATUS',
'db_id': gamez_id, 'db_id': gamez_id,
'status': download_status 'status': download_status,
} }
logger.debug('Opening URL: {0}'.format(url), section) logger.debug('Opening URL: {0}'.format(url), section)

View file

@ -0,0 +1,155 @@
import time
from core import logger
from core.auto_process.common import ProcessResult
from core.auto_process.managers.sickbeard import SickBeard
import requests
class PyMedusa(SickBeard):
"""PyMedusa class."""
def __init__(self, sb_init):
super(PyMedusa, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
class PyMedusaApiV1(SickBeard):
"""PyMedusa apiv1 class."""
def __init__(self, sb_init):
super(PyMedusaApiV1, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/api/{4}/'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root, self.sb_init.apikey)
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0} with params: {1}'.format(url, self.sb_init.fork_params), self.sb_init.section)
try:
response = self.session.get(url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to open URL: {0}'.format(url), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {0}'.format(self.sb_init.section),
status_code=1,
)
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(response.status_code), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Server returned status {1}'.format(self.sb_init.section, response.status_code),
status_code=1,
)
if response.json()['result'] == 'success':
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)
class PyMedusaApiV2(SickBeard):
"""PyMedusa apiv2 class."""
def __init__(self, sb_init):
super(PyMedusaApiV2, self).__init__(sb_init)
# Check for an apikey, as this is required with using fork = medusa-apiv2
if not sb_init.apikey:
raise Exception('For the section SickBeard `fork = medusa-apiv2` you also need to configure an `apikey`')
def _create_url(self):
return '{0}{1}:{2}{3}/api/v2/postprocess'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
def _get_identifier_status(self, url):
# Loop through requesting medusa for the status on the queueitem.
try:
response = self.session.get(url, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to get postprocess identifier status', self.sb_init.section)
return False
try:
jdata = response.json()
except ValueError:
return False
return jdata
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0}'.format(url), self.sb_init.section)
payload = self.sb_init.fork_params
payload['resource'] = self.sb_init.fork_params['nzbName']
del payload['nzbName']
# Update the session with the x-api-key
self.session.headers.update({
'x-api-key': self.sb_init.apikey,
'Content-type': 'application/json'
})
# Send postprocess request
try:
response = self.session.post(url, json=payload, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to send postprocess request', self.sb_init.section)
return ProcessResult(
message='{0}: Unable to send postprocess request to PyMedusa',
status_code=1,
)
# Get UUID
if response:
try:
jdata = response.json()
except ValueError:
logger.debug('No data returned from provider')
return False
if not jdata.get('status') or not jdata['status'] == 'success':
return False
queueitem_identifier = jdata['queueItem']['identifier']
wait_for = int(self.sb_init.config.get('wait_for', 2))
n = 0
response = {}
url = '{0}/{1}'.format(url, queueitem_identifier)
while n < 12: # set up wait_for minutes to see if command completes..
time.sleep(5 * wait_for)
response = self._get_identifier_status(url)
if response and response.get('success'):
break
if 'error' in response:
break
n += 1
# Log Medusa's PP logs here.
if response.get('output'):
for line in response['output']:
logger.postprocess('{0}'.format(line), self.sb_init.section)
# For now this will most likely always be True. But in the future we could return an exit state
# for when the PP in medusa didn't yield an expected result.
if response.get('success'):
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)

View file

@ -0,0 +1,500 @@
# coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import copy
import core
from core import logger
from core.auto_process.common import (
ProcessResult,
)
from core.utils import remote_dir
from oauthlib.oauth2 import LegacyApplicationClient
import requests
from requests_oauthlib import OAuth2Session
import six
from six import iteritems
class InitSickBeard(object):
"""Sickbeard init class.
Used to determin which sickbeard fork object to initialize.
"""
def __init__(self, cfg, section, input_category):
# As a bonus let's also put the config on self.
self.config = cfg
self.section = section
self.input_category = input_category
self.host = cfg['host']
self.port = cfg['port']
self.ssl = int(cfg.get('ssl', 0))
self.web_root = cfg.get('web_root', '')
self.protocol = 'https://' if self.ssl else 'http://'
self.username = cfg.get('username', '')
self.password = cfg.get('password', '')
self.apikey = cfg.get('apikey', '')
self.api_version = int(cfg.get('api_version', 2))
self.sso_username = cfg.get('sso_username', '')
self.sso_password = cfg.get('sso_password', '')
self.fork = ''
self.fork_params = None
self.fork_obj = None
replace = {
'medusa': 'Medusa',
'medusa-api': 'Medusa-api',
'sickbeard-api': 'SickBeard-api',
'sickgear': 'SickGear',
'sickchill': 'SickChill',
'stheno': 'Stheno',
}
_val = cfg.get('fork', 'auto')
f1 = replace.get(_val, _val)
try:
self.fork = f1, core.FORKS[f1]
except KeyError:
self.fork = 'auto'
self.protocol = 'https://' if self.ssl else 'http://'
def auto_fork(self):
# auto-detect correct section
# config settings
if core.FORK_SET: # keep using determined fork for multiple (manual) post-processing
logger.info('{section}:{category} fork already set to {fork}'.format
(section=self.section, category=self.input_category, fork=core.FORK_SET[0]))
return core.FORK_SET[0], core.FORK_SET[1]
cfg = dict(core.CFG[self.section][self.input_category])
replace = {
'medusa': 'Medusa',
'medusa-api': 'Medusa-api',
'medusa-apiv1': 'Medusa-api',
'medusa-apiv2': 'Medusa-apiv2',
'sickbeard-api': 'SickBeard-api',
'sickgear': 'SickGear',
'sickchill': 'SickChill',
'stheno': 'Stheno',
}
_val = cfg.get('fork', 'auto')
f1 = replace.get(_val.lower(), _val)
try:
self.fork = f1, core.FORKS[f1]
except KeyError:
self.fork = 'auto'
protocol = 'https://' if self.ssl else 'http://'
if self.section == 'NzbDrone':
logger.info('Attempting to verify {category} fork'.format
(category=self.input_category))
url = '{protocol}{host}:{port}{root}/api/rootfolder'.format(
protocol=protocol, host=self.host, port=self.port, root=self.web_root,
)
headers = {'X-Api-Key': self.apikey}
try:
r = requests.get(url, headers=headers, stream=True, verify=False)
except requests.ConnectionError:
logger.warning('Could not connect to {0}:{1} to verify fork!'.format(self.section, self.input_category))
if not r.ok:
logger.warning('Connection to {section}:{category} failed! '
'Check your configuration'.format
(section=self.section, category=self.input_category))
self.fork = ['default', {}]
elif self.section == 'SiCKRAGE':
logger.info('Attempting to verify {category} fork'.format
(category=self.input_category))
if self.api_version >= 2:
url = '{protocol}{host}:{port}{root}/api/v{api_version}/ping'.format(
protocol=protocol, host=self.host, port=self.port, root=self.web_root, api_version=self.api_version
)
api_params = {}
else:
url = '{protocol}{host}:{port}{root}/api/v{api_version}/{apikey}/'.format(
protocol=protocol, host=self.host, port=self.port, root=self.web_root, api_version=self.api_version, apikey=self.apikey,
)
api_params = {'cmd': 'postprocess', 'help': '1'}
try:
if self.api_version >= 2 and self.sso_username and self.sso_password:
oauth = OAuth2Session(client=LegacyApplicationClient(client_id=core.SICKRAGE_OAUTH_CLIENT_ID))
oauth_token = oauth.fetch_token(client_id=core.SICKRAGE_OAUTH_CLIENT_ID,
token_url=core.SICKRAGE_OAUTH_TOKEN_URL,
username=self.sso_username,
password=self.sso_password)
r = requests.get(url, headers={'Authorization': 'Bearer ' + oauth_token['access_token']}, stream=True, verify=False)
else:
r = requests.get(url, params=api_params, stream=True, verify=False)
if not r.ok:
logger.warning('Connection to {section}:{category} failed! '
'Check your configuration'.format(
section=self.section, category=self.input_category
))
except requests.ConnectionError:
logger.warning('Could not connect to {0}:{1} to verify API version!'.format(self.section, self.input_category))
params = {
'path': None,
'failed': None,
'process_method': None,
'force_replace': None,
'return_data': None,
'type': None,
'delete': None,
'force_next': None,
'is_priority': None
}
self.fork = ['default', params]
elif self.fork == 'auto':
self.detect_fork()
logger.info('{section}:{category} fork set to {fork}'.format
(section=self.section, category=self.input_category, fork=self.fork[0]))
core.FORK_SET = self.fork
self.fork, self.fork_params = self.fork[0], self.fork[1]
# This will create the fork object, and attach to self.fork_obj.
self._init_fork()
return self.fork, self.fork_params
@staticmethod
def _api_check(r, params, rem_params):
try:
json_data = r.json()
except ValueError:
logger.error('Failed to get JSON data from response')
logger.debug('Response received')
raise
try:
json_data = json_data['data']
except KeyError:
logger.error('Failed to get data from JSON')
logger.debug('Response received: {}'.format(json_data))
raise
else:
if six.PY3:
str_type = (str)
else:
str_type = (str, unicode)
if isinstance(json_data, str_type):
return rem_params, False
json_data = json_data.get('data', json_data)
try:
optional_parameters = json_data['optionalParameters'].keys()
# Find excess parameters
excess_parameters = set(params).difference(optional_parameters)
excess_parameters.remove('cmd') # Don't remove cmd from api params
logger.debug('Removing excess parameters: {}'.format(sorted(excess_parameters)))
rem_params.extend(excess_parameters)
return rem_params, True
except:
logger.error('Failed to identify optionalParameters')
return rem_params, False
def detect_fork(self):
"""Try to detect a specific fork."""
detected = False
params = core.ALL_FORKS
rem_params = []
logger.info('Attempting to auto-detect {category} fork'.format(category=self.input_category))
# define the order to test. Default must be first since the default fork doesn't reject parameters.
# then in order of most unique parameters.
if self.apikey:
url = '{protocol}{host}:{port}{root}/api/{apikey}/'.format(
protocol=self.protocol, host=self.host, port=self.port, root=self.web_root, apikey=self.apikey,
)
api_params = {'cmd': 'sg.postprocess', 'help': '1'}
else:
url = '{protocol}{host}:{port}{root}/home/postprocess/'.format(
protocol=self.protocol, host=self.host, port=self.port, root=self.web_root,
)
api_params = {}
# attempting to auto-detect fork
try:
s = requests.Session()
if not self.apikey and self.username and self.password:
login = '{protocol}{host}:{port}{root}/login'.format(
protocol=self.protocol, host=self.host, port=self.port, root=self.web_root)
login_params = {'username': self.username, 'password': self.password}
r = s.get(login, verify=False, timeout=(30, 60))
if r.status_code in [401, 403] and r.cookies.get('_xsrf'):
login_params['_xsrf'] = r.cookies.get('_xsrf')
s.post(login, data=login_params, stream=True, verify=False)
r = s.get(url, auth=(self.username, self.password), params=api_params, verify=False)
except requests.ConnectionError:
logger.info('Could not connect to {section}:{category} to perform auto-fork detection!'.format
(section=self.section, category=self.input_category))
r = []
if r and r.ok:
if self.apikey:
rem_params, found = self._api_check(r, params, rem_params)
if found:
params['cmd'] = 'sg.postprocess'
else: # try different api set for non-SickGear forks.
api_params = {'cmd': 'help', 'subject': 'postprocess'}
try:
if not self.apikey and self.username and self.password:
r = s.get(url, auth=(self.username, self.password), params=api_params, verify=False)
else:
r = s.get(url, params=api_params, verify=False)
except requests.ConnectionError:
logger.info('Could not connect to {section}:{category} to perform auto-fork detection!'.format
(section=self.section, category=self.input_category))
rem_params, found = self._api_check(r, params, rem_params)
params['cmd'] = 'postprocess'
else:
# Find excess parameters
rem_params.extend(
param
for param in params
if 'name="{param}"'.format(param=param) not in r.text
)
# Remove excess params
for param in rem_params:
params.pop(param)
for fork in sorted(iteritems(core.FORKS), reverse=False):
if params == fork[1]:
detected = True
break
if detected:
self.fork = fork
logger.info('{section}:{category} fork auto-detection successful ...'.format
(section=self.section, category=self.input_category))
elif rem_params:
logger.info('{section}:{category} fork auto-detection found custom params {params}'.format
(section=self.section, category=self.input_category, params=params))
self.fork = ['custom', params]
else:
logger.info('{section}:{category} fork auto-detection failed'.format
(section=self.section, category=self.input_category))
self.fork = list(core.FORKS.items())[list(core.FORKS.keys()).index(core.FORK_DEFAULT)]
def _init_fork(self):
# These need to be imported here, to prevent a circular import.
from .pymedusa import PyMedusa, PyMedusaApiV1, PyMedusaApiV2
mapped_forks = {
'Medusa': PyMedusa,
'Medusa-api': PyMedusaApiV1,
'Medusa-apiv2': PyMedusaApiV2
}
logger.debug('Create object for fork {fork}'.format(fork=self.fork))
if self.fork and mapped_forks.get(self.fork):
# Create the fork object and pass self (SickBeardInit) to it for all the data, like Config.
self.fork_obj = mapped_forks[self.fork](self)
else:
logger.debug('{section}:{category} Could not create a fork object for {fork}. Probaly class not added yet.'.format(
section=self.section, category=self.input_category, fork=self.fork)
)
class SickBeard(object):
"""Sickbeard base class."""
def __init__(self, sb_init):
"""SB constructor."""
self.sb_init = sb_init
self.session = requests.Session()
self.failed = None
self.status = None
self.input_name = None
self.dir_name = None
self.delete_failed = int(self.sb_init.config.get('delete_failed', 0))
self.nzb_extraction_by = self.sb_init.config.get('nzbExtractionBy', 'Downloader')
self.process_method = self.sb_init.config.get('process_method')
self.remote_path = int(self.sb_init.config.get('remote_path', 0))
self.wait_for = int(self.sb_init.config.get('wait_for', 2))
self.force = int(self.sb_init.config.get('force', 0))
self.delete_on = int(self.sb_init.config.get('delete_on', 0))
self.ignore_subs = int(self.sb_init.config.get('ignore_subs', 0))
self.is_priority = int(self.sb_init.config.get('is_priority', 0))
# get importmode, default to 'Move' for consistency with legacy
self.import_mode = self.sb_init.config.get('importMode', 'Move')
# Keep track of result state
self.success = False
def initialize(self, dir_name, input_name=None, failed=False, client_agent='manual'):
"""We need to call this explicitely because we need some variables.
We can't pass these directly through the constructor.
"""
self.dir_name = dir_name
self.input_name = input_name
self.failed = failed
self.status = int(self.failed)
if self.status > 0 and core.NOEXTRACTFAILED:
self.extract = 0
else:
self.extract = int(self.sb_init.config.get('extract', 0))
if client_agent == core.TORRENT_CLIENT_AGENT and core.USE_LINK == 'move-sym':
self.process_method = 'symlink'
def _create_url(self):
if self.sb_init.apikey:
return '{0}{1}:{2}{3}/api/{4}/'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root, self.sb_init.apikey)
return '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
def _process_fork_prarams(self):
# configure SB params to pass
fork_params = self.sb_init.fork_params
fork_params['quiet'] = 1
fork_params['proc_type'] = 'manual'
if self.input_name is not None:
fork_params['nzbName'] = self.input_name
for param in copy.copy(fork_params):
if param == 'failed':
if self.failed > 1:
self.failed = 1
fork_params[param] = self.failed
if 'proc_type' in fork_params:
del fork_params['proc_type']
if 'type' in fork_params:
del fork_params['type']
if param == 'return_data':
fork_params[param] = 0
if 'quiet' in fork_params:
del fork_params['quiet']
if param == 'type':
if 'type' in fork_params: # only set if we haven't already deleted for 'failed' above.
fork_params[param] = 'manual'
if 'proc_type' in fork_params:
del fork_params['proc_type']
if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']:
fork_params[param] = self.dir_name
if self.remote_path:
fork_params[param] = remote_dir(self.dir_name)
# SickChill allows multiple path types. Only retunr 'path'
if param == 'proc_dir' and 'path' in fork_params:
del fork_params['proc_dir']
if param == 'process_method':
if self.process_method:
fork_params[param] = self.process_method
else:
del fork_params[param]
if param in ['force', 'force_replace']:
if self.force:
fork_params[param] = self.force
else:
del fork_params[param]
if param in ['delete_on', 'delete']:
if self.delete_on:
fork_params[param] = self.delete_on
else:
del fork_params[param]
if param == 'ignore_subs':
if self.ignore_subs:
fork_params[param] = self.ignore_subs
else:
del fork_params[param]
if param == 'is_priority':
if self.is_priority:
fork_params[param] = self.is_priority
else:
del fork_params[param]
if param == 'force_next':
fork_params[param] = 1
# delete any unused params so we don't pass them to SB by mistake
[fork_params.pop(k) for k, v in list(fork_params.items()) if v is None]
def api_call(self):
"""Perform a base sickbeard api call."""
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0} with params: {1}'.format(url, self.sb_init.fork_params), self.sb_init.section)
try:
if not self.sb_init.apikey and self.sb_init.username and self.sb_init.password:
# If not using the api, we need to login using user/pass first.
login = '{0}{1}:{2}{3}/login'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
login_params = {'username': self.sb_init.username, 'password': self.sb_init.password}
r = self.session.get(login, verify=False, timeout=(30, 60))
if r.status_code in [401, 403] and r.cookies.get('_xsrf'):
login_params['_xsrf'] = r.cookies.get('_xsrf')
self.session.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
response = self.session.get(url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to open URL: {0}'.format(url), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {0}'.format(self.sb_init.section),
status_code=1,
)
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(response.status_code), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Server returned status {1}'.format(self.sb_init.section, response.status_code),
status_code=1,
)
return self.process_response(response)
def process_response(self, response):
"""Iterate over the lines returned, and log.
:param response: Streamed Requests response object.
This method will need to be overwritten in the forks, for alternative response handling.
"""
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
logger.postprocess('{0}'.format(line), self.sb_init.section)
# if 'Moving file from' in line:
# input_name = os.path.split(line)[1]
# if 'added to the queue' in line:
# queued = True
# For the refactoring i'm only considering vanilla sickbeard, as for the base class.
if 'Processing succeeded' in line or 'Successfully processed' in line:
self.success = True
if self.success:
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import json import json
import os import os
import time import time
@ -8,9 +15,23 @@ import requests
import core import core
from core import logger, transcoder from core import logger, transcoder
from core.auto_process.common import command_complete, completed_download_handling, ProcessResult from core.auto_process.common import (
ProcessResult,
command_complete,
completed_download_handling,
)
from core.plugins.downloaders.nzb.utils import report_nzb
from core.plugins.subtitles import import_subs, rename_subs
from core.scene_exceptions import process_all_exceptions from core.scene_exceptions import process_all_exceptions
from core.utils import convert_to_ascii, find_download, find_imdbid, import_subs, list_media_files, remote_dir, remove_dir, report_nzb, server_responding from core.utils import (
convert_to_ascii,
find_download,
find_imdbid,
list_media_files,
remote_dir,
remove_dir,
server_responding,
)
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
@ -38,19 +59,22 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
remote_path = int(cfg.get('remote_path', 0)) remote_path = int(cfg.get('remote_path', 0))
protocol = 'https://' if ssl else 'http://' protocol = 'https://' if ssl else 'http://'
omdbapikey = cfg.get('omdbapikey', '') omdbapikey = cfg.get('omdbapikey', '')
no_status_check = int(cfg.get('no_status_check', 0))
status = int(status) status = int(status)
if status > 0 and core.NOEXTRACTFAILED: if status > 0 and core.NOEXTRACTFAILED:
extract = 0 extract = 0
else: else:
extract = int(cfg.get('extract', 0)) extract = int(cfg.get('extract', 0))
imdbid = find_imdbid(dir_name, input_name, omdbapikey) imdbid, dir_name = find_imdbid(dir_name, input_name, omdbapikey)
if section == 'CouchPotato': if section == 'CouchPotato':
base_url = '{0}{1}:{2}{3}/api/{4}/'.format(protocol, host, port, web_root, apikey) base_url = '{0}{1}:{2}{3}/api/{4}/'.format(protocol, host, port, web_root, apikey)
if section == 'Radarr': if section == 'Radarr':
base_url = '{0}{1}:{2}{3}/api/command'.format(protocol, host, port, web_root) base_url = '{0}{1}:{2}{3}/api/v3/command'.format(protocol, host, port, web_root)
url2 = '{0}{1}:{2}{3}/api/config/downloadClient'.format(protocol, host, port, web_root) url2 = '{0}{1}:{2}{3}/api/v3/config/downloadClient'.format(protocol, host, port, web_root)
headers = {'X-Api-Key': apikey} headers = {'X-Api-Key': apikey, 'Content-Type': 'application/json'}
if section == 'Watcher3':
base_url = '{0}{1}:{2}{3}/postprocessing'.format(protocol, host, port, web_root)
if not apikey: if not apikey:
logger.info('No CouchPotato or Radarr apikey entered. Performing transcoder functions only') logger.info('No CouchPotato or Radarr apikey entered. Performing transcoder functions only')
release = None release = None
@ -100,24 +124,32 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
input_name, dir_name = convert_to_ascii(input_name, dir_name) input_name, dir_name = convert_to_ascii(input_name, dir_name)
good_files = 0 good_files = 0
valid_files = 0
num_files = 0 num_files = 0
# Check video files for corruption # Check video files for corruption
for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
num_files += 1 num_files += 1
if transcoder.is_video_good(video, status): if transcoder.is_video_good(video, status):
import_subs(video)
good_files += 1 good_files += 1
if num_files and good_files == num_files: if not core.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=core.REQUIRE_LAN):
valid_files += 1
import_subs(video)
rename_subs(dir_name)
if num_files and valid_files == num_files:
if status: if status:
logger.info('Status shown as failed from Downloader, but {0} valid video files found. Setting as success.'.format(good_files), section) logger.info('Status shown as failed from Downloader, but {0} valid video files found. Setting as success.'.format(good_files), section)
status = 0 status = 0
elif num_files and good_files < num_files: elif num_files and valid_files < num_files:
logger.info('Status shown as success from Downloader, but corrupt video files found. Setting as failed.', section) logger.info('Status shown as success from Downloader, but corrupt video files found. Setting as failed.', section)
status = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failure_link: if good_files == num_files:
logger.debug('Video marked as failed due to missing required language: {0}'.format(core.REQUIRE_LAN), section)
else:
logger.debug('Video marked as failed due to missing playable audio or video', section)
if good_files < num_files and failure_link: # only report corrupt files
failure_link += '&corrupt=true' failure_link += '&corrupt=true'
status = 1
elif client_agent == 'manual': elif client_agent == 'manual':
logger.warning('No media files found in directory {0} to manually process.'.format(dir_name), section) logger.warning('No media files found in directory {0} to manually process.'.format(dir_name), section)
return ProcessResult( return ProcessResult(
@ -152,28 +184,29 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
if not release and '.cp(tt' not in video and imdbid: if not release and '.cp(tt' not in video and imdbid:
video_name, video_ext = os.path.splitext(video) video_name, video_ext = os.path.splitext(video)
video2 = '{0}.cp({1}){2}'.format(video_name, imdbid, video_ext) video2 = '{0}.cp({1}){2}'.format(video_name, imdbid, video_ext)
if not (client_agent in [core.TORRENT_CLIENTAGENT, 'manual'] and core.USELINK == 'move-sym'): if not (client_agent in [core.TORRENT_CLIENT_AGENT, 'manual'] and core.USE_LINK == 'move-sym'):
logger.debug('Renaming: {0} to: {1}'.format(video, video2)) logger.debug('Renaming: {0} to: {1}'.format(video, video2))
os.rename(video, video2) os.rename(video, video2)
if not apikey: # If only using Transcoder functions, exit here. if not apikey: # If only using Transcoder functions, exit here.
logger.info('No CouchPotato or Radarr apikey entered. Processing completed.') logger.info('No CouchPotato or Radarr or Watcher3 apikey entered. Processing completed.')
return ProcessResult( return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(section, input_name), message='{0}: Successfully post-processed {1}'.format(section, input_name),
status_code=0, status_code=0,
) )
params = {} params = {
'media_folder': remote_dir(dir_name) if remote_path else dir_name,
}
if download_id and release_id: if download_id and release_id:
params['downloader'] = downloader or client_agent params['downloader'] = downloader or client_agent
params['download_id'] = download_id params['download_id'] = download_id
params['media_folder'] = remote_dir(dir_name) if remote_path else dir_name
if section == 'CouchPotato': if section == 'CouchPotato':
if method == 'manage': if method == 'manage':
command = 'manage.update' command = 'manage.update'
params = {} params.clear()
else: else:
command = 'renamer.scan' command = 'renamer.scan'
@ -188,9 +221,20 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
logger.debug('Opening URL: {0} with PARAMS: {1}'.format(base_url, payload), section) logger.debug('Opening URL: {0} with PARAMS: {1}'.format(base_url, payload), section)
logger.postprocess('Starting DownloadedMoviesScan scan for {0}'.format(input_name), section) logger.postprocess('Starting DownloadedMoviesScan scan for {0}'.format(input_name), section)
if section == 'Watcher3':
if input_name and os.path.isfile(os.path.join(dir_name, input_name)):
params['media_folder'] = os.path.join(params['media_folder'], input_name)
payload = {'apikey': apikey, 'path': params['media_folder'], 'guid': download_id, 'mode': 'complete'}
if not download_id:
payload.pop('guid')
logger.debug('Opening URL: {0} with PARAMS: {1}'.format(base_url, payload), section)
logger.postprocess('Starting postprocessing scan for {0}'.format(input_name), section)
try: try:
if section == 'CouchPotato': if section == 'CouchPotato':
r = requests.get(url, params=params, verify=False, timeout=(30, 1800)) r = requests.get(url, params=params, verify=False, timeout=(30, 1800))
elif section == 'Watcher3':
r = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800))
else: else:
r = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800)) r = requests.post(base_url, data=json.dumps(payload), headers=headers, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError: except requests.ConnectionError:
@ -215,15 +259,27 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
status_code=0, status_code=0,
) )
elif section == 'Radarr': elif section == 'Radarr':
logger.postprocess('Radarr response: {0}'.format(result['state']))
try: try:
res = json.loads(r.content) if isinstance(result, list):
scan_id = int(res['id']) scan_id = int(result[0]['id'])
else:
scan_id = int(result['id'])
logger.debug('Scan started with id: {0}'.format(scan_id), section) logger.debug('Scan started with id: {0}'.format(scan_id), section)
started = True
except Exception as e: except Exception as e:
logger.warning('No scan id was returned due to: {0}'.format(e), section) logger.warning('No scan id was returned due to: {0}'.format(e), section)
scan_id = None scan_id = None
elif section == 'Watcher3' and result['status'] == 'finished':
logger.postprocess('Watcher3 updated status to {0}'.format(result['tasks']['update_movie_status']))
if result['tasks']['update_movie_status'] == 'Finished':
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(section, input_name),
status_code=status,
)
else:
return ProcessResult(
message='{0}: Failed to post-process - changed status to {1}'.format(section, result['tasks']['update_movie_status']),
status_code=1,
)
else: else:
logger.error('FAILED: {0} scan was unable to finish for folder {1}. exiting!'.format(method, dir_name), logger.error('FAILED: {0} scan was unable to finish for folder {1}. exiting!'.format(method, dir_name),
section) section)
@ -238,11 +294,25 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
report_nzb(failure_link, client_agent) report_nzb(failure_link, client_agent)
if section == 'Radarr': if section == 'Radarr':
logger.postprocess('FAILED: The download failed. Sending failed download to {0} for CDH processing'.format(section), section) logger.postprocess('SUCCESS: Sending failed download to {0} for CDH processing'.format(section), section)
return ProcessResult( return ProcessResult(
message='{0}: Download Failed. Sending back to {0}'.format(section), message='{0}: Sending failed download back to {0}'.format(section),
status_code=1, # Return as failed to flag this in the downloader. status_code=1, # Return as failed to flag this in the downloader.
) ) # Return failed flag, but log the event as successful.
elif section == 'Watcher3':
logger.postprocess('Sending failed download to {0} for CDH processing'.format(section), section)
path = remote_dir(dir_name) if remote_path else dir_name
if input_name and os.path.isfile(os.path.join(dir_name, input_name)):
path = os.path.join(path, input_name)
payload = {'apikey': apikey, 'path': path, 'guid': download_id, 'mode': 'failed'}
r = requests.post(base_url, data=payload, verify=False, timeout=(30, 1800))
result = r.json()
logger.postprocess('Watcher3 response: {0}'.format(result))
if result['status'] == 'finished':
return ProcessResult(
message='{0}: Sending failed download back to {0}'.format(section),
status_code=1, # Return as failed to flag this in the downloader.
) # Return failed flag, but log the event as successful.
if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name: if delete_failed and os.path.isdir(dir_name) and not os.path.dirname(dir_name) == dir_name:
logger.postprocess('Deleting failed files and folder {0}'.format(dir_name), section) logger.postprocess('Deleting failed files and folder {0}'.format(dir_name), section)
@ -327,6 +397,12 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
if not release: if not release:
download_id = None # we don't want to filter new releases based on this. download_id = None # we don't want to filter new releases based on this.
if no_status_check:
return ProcessResult(
status_code=0,
message='{0}: Successfully processed but no change in status confirmed'.format(section),
)
# we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing. # we will now check to see if CPS has finished renaming before returning to TorrentToMedia and unpausing.
timeout = time.time() + 60 * wait_for timeout = time.time() + 60 * wait_for
while time.time() < timeout: # only wait 2 (default) minutes, then return. while time.time() < timeout: # only wait 2 (default) minutes, then return.
@ -339,9 +415,9 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
if release: if release:
try: try:
release_id = list(release.keys())[0] release_id = list(release.keys())[0]
title = release[release_id]['title']
release_status_new = release[release_id]['status'] release_status_new = release[release_id]['status']
if release_status_old is None: # we didn't have a release before, but now we do. if release_status_old is None: # we didn't have a release before, but now we do.
title = release[release_id]['title']
logger.postprocess('SUCCESS: Movie {0} has now been added to CouchPotato with release status of [{1}]'.format( logger.postprocess('SUCCESS: Movie {0} has now been added to CouchPotato with release status of [{1}]'.format(
title, str(release_status_new).upper()), section) title, str(release_status_new).upper()), section)
return ProcessResult( return ProcessResult(
@ -350,8 +426,8 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
) )
if release_status_new != release_status_old: if release_status_new != release_status_old:
logger.postprocess('SUCCESS: Release for {0} has now been marked with a status of [{1}]'.format( logger.postprocess('SUCCESS: Release {0} has now been marked with a status of [{1}]'.format(
title, str(release_status_new).upper()), section) release_id, str(release_status_new).upper()), section)
return ProcessResult( return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(section, input_name), message='{0}: Successfully post-processed {1}'.format(section, input_name),
status_code=0, status_code=0,
@ -359,22 +435,22 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
except Exception: except Exception:
pass pass
elif scan_id: elif scan_id:
url = '{0}/{1}'.format(base_url, scan_id) url = '{0}/{1}'.format(base_url, scan_id)
command_status = command_complete(url, params, headers, section) command_status = command_complete(url, params, headers, section)
if command_status: if command_status:
logger.debug('The Scan command return status: {0}'.format(command_status), section) logger.debug('The Scan command return status: {0}'.format(command_status), section)
if command_status in ['completed']: if command_status in ['completed']:
logger.debug('The Scan command has completed successfully. Renaming was successful.', section) logger.debug('The Scan command has completed successfully. Renaming was successful.', section)
return ProcessResult( return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(section, input_name), message='{0}: Successfully post-processed {1}'.format(section, input_name),
status_code=0, status_code=0,
) )
elif command_status in ['failed']: elif command_status in ['failed']:
logger.debug('The Scan command has failed. Renaming was not successful.', section) logger.debug('The Scan command has failed. Renaming was not successful.', section)
# return ProcessResult( # return ProcessResult(
# message='{0}: Failed to post-process {1}'.format(section, input_name), # message='{0}: Failed to post-process {1}'.format(section, input_name),
# status_code=1, # status_code=1,
# ) # )
if not os.path.isdir(dir_name): if not os.path.isdir(dir_name):
logger.postprocess('SUCCESS: Input Directory [{0}] has been processed and removed'.format( logger.postprocess('SUCCESS: Input Directory [{0}] has been processed and removed'.format(
@ -406,6 +482,7 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
'{0} does not appear to have changed status after {1} minutes, Please check your logs.'.format(input_name, wait_for), '{0} does not appear to have changed status after {1} minutes, Please check your logs.'.format(input_name, wait_for),
section, section,
) )
return ProcessResult( return ProcessResult(
status_code=1, status_code=1,
message='{0}: Failed to post-process - No change in status'.format(section), message='{0}: Failed to post-process - No change in status'.format(section),
@ -489,21 +566,27 @@ def get_release(base_url, imdb_id=None, download_id=None, release_id=None):
# Narrow results by removing old releases by comparing their last_edit field # Narrow results by removing old releases by comparing their last_edit field
if len(results) > 1: if len(results) > 1:
rem_id = set()
for id1, x1 in results.items(): for id1, x1 in results.items():
for id2, x2 in results.items(): for x2 in results.values():
try: try:
if x2['last_edit'] > x1['last_edit']: if x2['last_edit'] > x1['last_edit']:
results.pop(id1) rem_id.add(id1)
except Exception: except Exception:
continue continue
for id in rem_id:
results.pop(id)
# Search downloads on clients for a match to try and narrow our results down to 1 # Search downloads on clients for a match to try and narrow our results down to 1
if len(results) > 1: if len(results) > 1:
rem_id = set()
for cur_id, x in results.items(): for cur_id, x in results.items():
try: try:
if not find_download(str(x['download_info']['downloader']).lower(), x['download_info']['id']): if not find_download(str(x['download_info']['downloader']).lower(), x['download_info']['id']):
results.pop(cur_id) rem_id.add(cur_id)
except Exception: except Exception:
continue continue
for id in rem_id:
results.pop(id)
return results return results

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import json import json
import os import os
import time import time
@ -73,17 +80,17 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
params = { params = {
'apikey': apikey, 'apikey': apikey,
'cmd': 'forceProcess', 'cmd': 'forceProcess',
'dir': remote_dir(dir_name) if remote_path else dir_name 'dir': remote_dir(dir_name) if remote_path else dir_name,
} }
res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) res = force_process(params, url, apikey, input_name, dir_name, section, wait_for)
if res[0] in [0, 1]: if res.status_code in [0, 1]:
return res return res
params = { params = {
'apikey': apikey, 'apikey': apikey,
'cmd': 'forceProcess', 'cmd': 'forceProcess',
'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0] 'dir': os.path.split(remote_dir(dir_name))[0] if remote_path else os.path.split(dir_name)[0],
} }
res = force_process(params, url, apikey, input_name, dir_name, section, wait_for) res = force_process(params, url, apikey, input_name, dir_name, section, wait_for)
@ -117,18 +124,12 @@ def process(section, dir_name, input_name=None, status=0, client_agent='manual',
status_code=1, status_code=1,
) )
success = False
queued = False
started = False
try: try:
res = json.loads(r.content) res = r.json()
scan_id = int(res['id']) scan_id = int(res['id'])
logger.debug('Scan started with id: {0}'.format(scan_id), section) logger.debug('Scan started with id: {0}'.format(scan_id), section)
started = True
except Exception as e: except Exception as e:
logger.warning('No scan id was returned due to: {0}'.format(e), section) logger.warning('No scan id was returned due to: {0}'.format(e), section)
scan_id = None
started = False
return ProcessResult( return ProcessResult(
message='{0}: Failed to post-process - Unable to start scan'.format(section), message='{0}: Failed to post-process - Unable to start scan'.format(section),
status_code=1, status_code=1,
@ -193,7 +194,7 @@ def get_status(url, apikey, dir_name):
params = { params = {
'apikey': apikey, 'apikey': apikey,
'cmd': 'getHistory' 'cmd': 'getHistory',
} }
logger.debug('Opening URL: {0} with PARAMS: {1}'.format(url, params)) logger.debug('Opening URL: {0} with PARAMS: {1}'.format(url, params))

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import copy import copy
import errno import errno
import json import json
@ -7,19 +14,34 @@ import os
import time import time
import requests import requests
from oauthlib.oauth2 import LegacyApplicationClient
from requests_oauthlib import OAuth2Session
import core import core
from core import logger, transcoder from core import logger, transcoder
from core.auto_process.common import command_complete, completed_download_handling, ProcessResult from core.auto_process.common import (
from core.forks import auto_fork ProcessResult,
command_complete,
completed_download_handling,
)
from core.auto_process.managers.sickbeard import InitSickBeard
from core.plugins.downloaders.nzb.utils import report_nzb
from core.plugins.subtitles import import_subs, rename_subs
from core.scene_exceptions import process_all_exceptions from core.scene_exceptions import process_all_exceptions
from core.utils import convert_to_ascii, flatten, import_subs, list_media_files, remote_dir, remove_dir, report_nzb, server_responding from core.utils import (
convert_to_ascii,
flatten,
list_media_files,
remote_dir,
remove_dir,
server_responding,
)
requests.packages.urllib3.disable_warnings() requests.packages.urllib3.disable_warnings()
def process(section, dir_name, input_name=None, failed=False, client_agent='manual', download_id=None, input_category=None, failure_link=None): def process(section, dir_name, input_name=None, failed=False, client_agent='manual', download_id=None, input_category=None, failure_link=None):
cfg = dict(core.CFG[section][input_category]) cfg = dict(core.CFG[section][input_category])
host = cfg['host'] host = cfg['host']
@ -30,12 +52,21 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
username = cfg.get('username', '') username = cfg.get('username', '')
password = cfg.get('password', '') password = cfg.get('password', '')
apikey = cfg.get('apikey', '') apikey = cfg.get('apikey', '')
api_version = int(cfg.get('api_version', 2))
sso_username = cfg.get('sso_username', '')
sso_password = cfg.get('sso_password', '')
# Refactor into an OO structure.
# For now let's do botch the OO and the serialized code, until everything has been migrated.
init_sickbeard = InitSickBeard(cfg, section, input_category)
if server_responding('{0}{1}:{2}{3}'.format(protocol, host, port, web_root)): if server_responding('{0}{1}:{2}{3}'.format(protocol, host, port, web_root)):
# auto-detect correct fork # auto-detect correct fork
fork, fork_params = auto_fork(section, input_category) # During reactor we also return fork, fork_params. But these are also stored in the object.
elif not username and not apikey: # Should be changed after refactor.
logger.info('No SickBeard username or Sonarr apikey entered. Performing transcoder functions only') fork, fork_params = init_sickbeard.auto_fork()
elif not username and not apikey and not sso_username:
logger.info('No SickBeard / SiCKRAGE username or Sonarr apikey entered. Performing transcoder functions only')
fork, fork_params = 'None', {} fork, fork_params = 'None', {}
else: else:
logger.error('Server did not respond. Exiting', section) logger.error('Server did not respond. Exiting', section)
@ -47,7 +78,7 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
delete_failed = int(cfg.get('delete_failed', 0)) delete_failed = int(cfg.get('delete_failed', 0))
nzb_extraction_by = cfg.get('nzbExtractionBy', 'Downloader') nzb_extraction_by = cfg.get('nzbExtractionBy', 'Downloader')
process_method = cfg.get('process_method') process_method = cfg.get('process_method')
if client_agent == core.TORRENT_CLIENTAGENT and core.USELINK == 'move-sym': if client_agent == core.TORRENT_CLIENT_AGENT and core.USE_LINK == 'move-sym':
process_method = 'symlink' process_method = 'symlink'
remote_path = int(cfg.get('remote_path', 0)) remote_path = int(cfg.get('remote_path', 0))
wait_for = int(cfg.get('wait_for', 2)) wait_for = int(cfg.get('wait_for', 2))
@ -75,12 +106,13 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
# Attempt to create the directory if it doesn't exist and ignore any # Attempt to create the directory if it doesn't exist and ignore any
# error stating that it already exists. This fixes a bug where SickRage # error stating that it already exists. This fixes a bug where SickRage
# won't process the directory because it doesn't exist. # won't process the directory because it doesn't exist.
try: if dir_name:
os.makedirs(dir_name) # Attempt to create the directory try:
except OSError as e: os.makedirs(dir_name) # Attempt to create the directory
# Re-raise the error if it wasn't about the directory not existing except OSError as e:
if e.errno != errno.EEXIST: # Re-raise the error if it wasn't about the directory not existing
raise if e.errno != errno.EEXIST:
raise
if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != 'Destination'): if 'process_method' not in fork_params or (client_agent in ['nzbget', 'sabnzbd'] and nzb_extraction_by != 'Destination'):
if input_name: if input_name:
@ -99,24 +131,32 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
# Check video files for corruption # Check video files for corruption
good_files = 0 good_files = 0
valid_files = 0
num_files = 0 num_files = 0
for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False): for video in list_media_files(dir_name, media=True, audio=False, meta=False, archives=False):
num_files += 1 num_files += 1
if transcoder.is_video_good(video, status): if transcoder.is_video_good(video, status):
good_files += 1 good_files += 1
import_subs(video) if not core.REQUIRE_LAN or transcoder.is_video_good(video, status, require_lan=core.REQUIRE_LAN):
valid_files += 1
import_subs(video)
rename_subs(dir_name)
if num_files > 0: if num_files > 0:
if good_files == num_files and not status == 0: if valid_files == num_files and not status == 0:
logger.info('Found Valid Videos. Setting status Success') logger.info('Found Valid Videos. Setting status Success')
status = 0 status = 0
failed = 0 failed = 0
if good_files < num_files and status == 0: if valid_files < num_files and status == 0:
logger.info('Found corrupt videos. Setting status Failed') logger.info('Found corrupt videos. Setting status Failed')
status = 1 status = 1
failed = 1 failed = 1
if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0': if 'NZBOP_VERSION' in os.environ and os.environ['NZBOP_VERSION'][0:5] >= '14.0':
print('[NZB] MARK=BAD') print('[NZB] MARK=BAD')
if failure_link: if good_files == num_files:
logger.debug('Video marked as failed due to missing required language: {0}'.format(core.REQUIRE_LAN), section)
else:
logger.debug('Video marked as failed due to missing playable audio or video', section)
if good_files < num_files and failure_link: # only report corrupt files
failure_link += '&corrupt=true' failure_link += '&corrupt=true'
elif client_agent == 'manual': elif client_agent == 'manual':
logger.warning('No media files found in directory {0} to manually process.'.format(dir_name), section) logger.warning('No media files found in directory {0} to manually process.'.format(dir_name), section)
@ -159,62 +199,74 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
status_code=1, status_code=1,
) )
# Part of the refactor
if init_sickbeard.fork_obj:
init_sickbeard.fork_obj.initialize(dir_name, input_name, failed, client_agent='manual')
# configure SB params to pass # configure SB params to pass
fork_params['quiet'] = 1 # We don't want to remove params, for the Forks that have been refactored.
fork_params['proc_type'] = 'manual' # As we don't want to duplicate this part of the code.
if input_name is not None: if not init_sickbeard.fork_obj:
fork_params['nzbName'] = input_name fork_params['quiet'] = 1
fork_params['proc_type'] = 'manual'
if input_name is not None:
fork_params['nzbName'] = input_name
for param in copy.copy(fork_params): for param in copy.copy(fork_params):
if param == 'failed': if param == 'failed':
fork_params[param] = failed if failed > 1:
del fork_params['proc_type'] failed = 1
if 'type' in fork_params: fork_params[param] = failed
del fork_params['type'] if 'proc_type' in fork_params:
del fork_params['proc_type']
if 'type' in fork_params:
del fork_params['type']
if param == 'return_data': if param == 'return_data':
fork_params[param] = 0 fork_params[param] = 0
del fork_params['quiet'] if 'quiet' in fork_params:
del fork_params['quiet']
if param == 'type': if param == 'type':
fork_params[param] = 'manual' if 'type' in fork_params: # only set if we haven't already deleted for 'failed' above.
if 'proc_type' in fork_params: fork_params[param] = 'manual'
del fork_params['proc_type'] if 'proc_type' in fork_params:
del fork_params['proc_type']
if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']: if param in ['dir_name', 'dir', 'proc_dir', 'process_directory', 'path']:
fork_params[param] = dir_name fork_params[param] = dir_name
if remote_path: if remote_path:
fork_params[param] = remote_dir(dir_name) fork_params[param] = remote_dir(dir_name)
if param == 'process_method': if param == 'process_method':
if process_method: if process_method:
fork_params[param] = process_method fork_params[param] = process_method
else: else:
del fork_params[param] del fork_params[param]
if param in ['force', 'force_replace']: if param in ['force', 'force_replace']:
if force: if force:
fork_params[param] = force fork_params[param] = force
else: else:
del fork_params[param] del fork_params[param]
if param in ['delete_on', 'delete']: if param in ['delete_on', 'delete']:
if delete_on: if delete_on:
fork_params[param] = delete_on fork_params[param] = delete_on
else: else:
del fork_params[param] del fork_params[param]
if param == 'ignore_subs': if param == 'ignore_subs':
if ignore_subs: if ignore_subs:
fork_params[param] = ignore_subs fork_params[param] = ignore_subs
else: else:
del fork_params[param] del fork_params[param]
if param == 'force_next': if param == 'force_next':
fork_params[param] = 1 fork_params[param] = 1
# delete any unused params so we don't pass them to SB by mistake # delete any unused params so we don't pass them to SB by mistake
[fork_params.pop(k) for k, v in fork_params.items() if v is None] [fork_params.pop(k) for k, v in list(fork_params.items()) if v is None]
if status == 0: if status == 0:
if section == 'NzbDrone' and not apikey: if section == 'NzbDrone' and not apikey:
@ -249,15 +301,25 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
url = None url = None
if section == 'SickBeard': if section == 'SickBeard':
if apikey: if apikey:
url = '{0}{1}:{2}{3}/api/{4}/?cmd=postprocess'.format(protocol, host, port, web_root, apikey) url = '{0}{1}:{2}{3}/api/{4}/'.format(protocol, host, port, web_root, apikey)
if not 'cmd' in fork_params:
if 'SickGear' in fork:
fork_params['cmd'] = 'sg.postprocess'
else:
fork_params['cmd'] = 'postprocess'
elif fork == 'Stheno': elif fork == 'Stheno':
url = "{0}{1}:{2}{3}/home/postprocess/process_episode".format(protocol, host, port, web_root) url = '{0}{1}:{2}{3}/home/postprocess/process_episode'.format(protocol, host, port, web_root)
else: else:
url = '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(protocol, host, port, web_root) url = '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(protocol, host, port, web_root)
elif section == 'SiCKRAGE':
if api_version >= 2:
url = '{0}{1}:{2}{3}/api/v{4}/postprocess'.format(protocol, host, port, web_root, api_version)
else:
url = '{0}{1}:{2}{3}/api/v{4}/{5}/'.format(protocol, host, port, web_root, api_version, apikey)
elif section == 'NzbDrone': elif section == 'NzbDrone':
url = '{0}{1}:{2}{3}/api/command'.format(protocol, host, port, web_root) url = '{0}{1}:{2}{3}/api/v3/command'.format(protocol, host, port, web_root)
url2 = '{0}{1}:{2}{3}/api/config/downloadClient'.format(protocol, host, port, web_root) url2 = '{0}{1}:{2}{3}/api/v3/config/downloadClient'.format(protocol, host, port, web_root)
headers = {'X-Api-Key': apikey} headers = {'X-Api-Key': apikey, "Content-Type": "application/json"}
# params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'} # params = {'sortKey': 'series.title', 'page': 1, 'pageSize': 1, 'sortDir': 'asc'}
if remote_path: if remote_path:
logger.debug('remote_path: {0}'.format(remote_dir(dir_name)), section) logger.debug('remote_path: {0}'.format(remote_dir(dir_name)), section)
@ -271,16 +333,45 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
try: try:
if section == 'SickBeard': if section == 'SickBeard':
logger.debug('Opening URL: {0} with params: {1}'.format(url, fork_params), section) if init_sickbeard.fork_obj:
return init_sickbeard.fork_obj.api_call()
else:
s = requests.Session()
logger.debug('Opening URL: {0} with params: {1}'.format(url, fork_params), section)
if not apikey and username and password:
login = '{0}{1}:{2}{3}/login'.format(protocol, host, port, web_root)
login_params = {'username': username, 'password': password}
r = s.get(login, verify=False, timeout=(30, 60))
if r.status_code in [401, 403] and r.cookies.get('_xsrf'):
login_params['_xsrf'] = r.cookies.get('_xsrf')
s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60))
r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800))
elif section == 'SiCKRAGE':
s = requests.Session() s = requests.Session()
if not apikey and username and password:
login = '{0}{1}:{2}{3}/login'.format(protocol, host, port, web_root) if api_version >= 2 and sso_username and sso_password:
login_params = {'username': username, 'password': password} oauth = OAuth2Session(client=LegacyApplicationClient(client_id=core.SICKRAGE_OAUTH_CLIENT_ID))
r = s.get(login, verify=False, timeout=(30, 60)) oauth_token = oauth.fetch_token(client_id=core.SICKRAGE_OAUTH_CLIENT_ID,
if r.status_code == 401 and r.cookies.get('_xsrf'): token_url=core.SICKRAGE_OAUTH_TOKEN_URL,
login_params['_xsrf'] = r.cookies.get('_xsrf') username=sso_username,
s.post(login, data=login_params, stream=True, verify=False, timeout=(30, 60)) password=sso_password)
r = s.get(url, auth=(username, password), params=fork_params, stream=True, verify=False, timeout=(30, 1800)) s.headers.update({'Authorization': 'Bearer ' + oauth_token['access_token']})
params = {
'path': fork_params['path'],
'failed': str(bool(fork_params['failed'])).lower(),
'processMethod': 'move',
'forceReplace': str(bool(fork_params['force_replace'])).lower(),
'returnData': str(bool(fork_params['return_data'])).lower(),
'delete': str(bool(fork_params['delete'])).lower(),
'forceNext': str(bool(fork_params['force_next'])).lower(),
'nzbName': fork_params['nzbName']
}
else:
params = fork_params
r = s.get(url, params=params, stream=True, verify=False, timeout=(30, 1800))
elif section == 'NzbDrone': elif section == 'NzbDrone':
logger.debug('Opening URL: {0} with data: {1}'.format(url, data), section) logger.debug('Opening URL: {0} with data: {1}'.format(url, data), section)
r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800)) r = requests.post(url, data=data, headers=headers, stream=True, verify=False, timeout=(30, 1800))
@ -319,9 +410,15 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
if queued: if queued:
time.sleep(60) time.sleep(60)
elif section == 'SiCKRAGE':
if api_version >= 2:
success = True
else:
if r.json()['result'] == 'success':
success = True
elif section == 'NzbDrone': elif section == 'NzbDrone':
try: try:
res = json.loads(r.content) res = r.json()
scan_id = int(res['id']) scan_id = int(res['id'])
logger.debug('Scan started with id: {0}'.format(scan_id), section) logger.debug('Scan started with id: {0}'.format(scan_id), section)
started = True started = True
@ -370,7 +467,8 @@ def process(section, dir_name, input_name=None, failed=False, client_agent='manu
# status_code=1, # status_code=1,
# ) # )
if completed_download_handling(url2, headers, section=section): if completed_download_handling(url2, headers, section=section):
logger.debug('The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.'.format(section), section) logger.debug('The Scan command did not return status completed, but complete Download Handling is enabled. Passing back to {0}.'.format(section),
section)
return ProcessResult( return ProcessResult(
message='{0}: Complete DownLoad Handling is enabled. Passing back to {0}'.format(section), message='{0}: Complete DownLoad Handling is enabled. Passing back to {0}'.format(section),
status_code=status, status_code=status,

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import copy import copy
import os import os
import shutil import shutil
@ -13,17 +20,17 @@ from core import logger
class Section(configobj.Section, object): class Section(configobj.Section, object):
def isenabled(section): def isenabled(self):
# checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {} # checks if subsection enabled, returns true/false if subsection specified otherwise returns true/false in {}
if not section.sections: if not self.sections:
try: try:
value = list(ConfigObj.find_key(section, 'enabled'))[0] value = list(ConfigObj.find_key(self, 'enabled'))[0]
except Exception: except Exception:
value = 0 value = 0
if int(value) == 1: if int(value) == 1:
return section return self
else: else:
to_return = copy.deepcopy(section) to_return = copy.deepcopy(self)
for section_name, subsections in to_return.items(): for section_name, subsections in to_return.items():
for subsection in subsections: for subsection in subsections:
try: try:
@ -40,8 +47,8 @@ class Section(configobj.Section, object):
return to_return return to_return
def findsection(section, key): def findsection(self, key):
to_return = copy.deepcopy(section) to_return = copy.deepcopy(self)
for subsection in to_return: for subsection in to_return:
try: try:
value = list(ConfigObj.find_key(to_return[subsection], key))[0] value = list(ConfigObj.find_key(to_return[subsection], key))[0]
@ -120,7 +127,7 @@ class ConfigObj(configobj.ConfigObj, Section):
shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE) shutil.copyfile(core.CONFIG_SPEC_FILE, core.CONFIG_FILE)
CFG_OLD = config(core.CONFIG_FILE) CFG_OLD = config(core.CONFIG_FILE)
except Exception as error: except Exception as error:
logger.debug('Error {msg} when copying to .cfg'.format(msg=error)) logger.error('Error {msg} when copying to .cfg'.format(msg=error))
try: try:
# check for autoProcessMedia.cfg.spec and create if it does not exist # check for autoProcessMedia.cfg.spec and create if it does not exist
@ -128,7 +135,7 @@ class ConfigObj(configobj.ConfigObj, Section):
shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE) shutil.copyfile(core.CONFIG_FILE, core.CONFIG_SPEC_FILE)
CFG_NEW = config(core.CONFIG_SPEC_FILE) CFG_NEW = config(core.CONFIG_SPEC_FILE)
except Exception as error: except Exception as error:
logger.debug('Error {msg} when copying to .spec'.format(msg=error)) logger.error('Error {msg} when copying to .spec'.format(msg=error))
# check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail # check for autoProcessMedia.cfg and autoProcessMedia.cfg.spec and if they don't exist return and fail
if CFG_NEW is None or CFG_OLD is None: if CFG_NEW is None or CFG_OLD is None:
@ -136,14 +143,24 @@ class ConfigObj(configobj.ConfigObj, Section):
subsections = {} subsections = {}
# gather all new-style and old-style sub-sections # gather all new-style and old-style sub-sections
for newsection, newitems in CFG_NEW.items(): for newsection in CFG_NEW:
if CFG_NEW[newsection].sections: if CFG_NEW[newsection].sections:
subsections.update({newsection: CFG_NEW[newsection].sections}) subsections.update({newsection: CFG_NEW[newsection].sections})
for section, items in CFG_OLD.items():
for section in CFG_OLD:
if CFG_OLD[section].sections: if CFG_OLD[section].sections:
subsections.update({section: CFG_OLD[section].sections}) subsections.update({section: CFG_OLD[section].sections})
for option, value in CFG_OLD[section].items(): for option, value in CFG_OLD[section].items():
if option in ['category', 'cpsCategory', 'sbCategory', 'hpCategory', 'mlCategory', 'gzCategory', 'raCategory', 'ndCategory']: if option in ['category',
'cpsCategory',
'sbCategory',
'srCategory',
'hpCategory',
'mlCategory',
'gzCategory',
'raCategory',
'ndCategory',
'W3Category']:
if not isinstance(value, list): if not isinstance(value, list):
value = [value] value = [value]
@ -161,7 +178,7 @@ class ConfigObj(configobj.ConfigObj, Section):
if section in ['CouchPotato', 'HeadPhones', 'Gamez', 'Mylar']: if section in ['CouchPotato', 'HeadPhones', 'Gamez', 'Mylar']:
if option in ['username', 'password']: if option in ['username', 'password']:
values.pop(option) values.pop(option)
if section in ['SickBeard', 'Mylar']: if section in ['Mylar']:
if option == 'wait_for': # remove old format if option == 'wait_for': # remove old format
values.pop(option) values.pop(option)
if section in ['SickBeard', 'NzbDrone']: if section in ['SickBeard', 'NzbDrone']:
@ -184,6 +201,9 @@ class ConfigObj(configobj.ConfigObj, Section):
if option == 'forceClean': if option == 'forceClean':
CFG_NEW['General']['force_clean'] = value CFG_NEW['General']['force_clean'] = value
values.pop(option) values.pop(option)
if option == 'qBittorrenHost': # We had a typo that is now fixed.
CFG_NEW['Torrent']['qBittorrentHost'] = value
values.pop(option)
if section in ['Transcoder']: if section in ['Transcoder']:
if option in ['niceness']: if option in ['niceness']:
CFG_NEW['Posix'][option] = value CFG_NEW['Posix'][option] = value
@ -194,6 +214,7 @@ class ConfigObj(configobj.ConfigObj, Section):
elif not value: elif not value:
value = 0 value = 0
values[option] = value values[option] = value
# remove any options that we no longer need so they don't migrate into our new config # remove any options that we no longer need so they don't migrate into our new config
if not list(ConfigObj.find_key(CFG_NEW, option)): if not list(ConfigObj.find_key(CFG_NEW, option)):
try: try:
@ -238,6 +259,20 @@ class ConfigObj(configobj.ConfigObj, Section):
elif section in CFG_OLD.keys(): elif section in CFG_OLD.keys():
process_section(section, subsection) process_section(section, subsection)
# migrate SiCRKAGE settings from SickBeard section to new dedicated SiCRKAGE section
if CFG_OLD['SickBeard']['tv']['enabled'] and CFG_OLD['SickBeard']['tv']['fork'] == 'sickrage-api':
for option, value in iteritems(CFG_OLD['SickBeard']['tv']):
if option in CFG_NEW['SiCKRAGE']['tv']:
CFG_NEW['SiCKRAGE']['tv'][option] = value
# set API version to 1 if API key detected and no SSO username is set
if CFG_NEW['SiCKRAGE']['tv']['apikey'] and not CFG_NEW['SiCKRAGE']['tv']['sso_username']:
CFG_NEW['SiCKRAGE']['tv']['api_version'] = 1
# disable SickBeard section
CFG_NEW['SickBeard']['tv']['enabled'] = 0
CFG_NEW['SickBeard']['tv']['fork'] = 'auto'
# create a backup of our old config # create a backup of our old config
CFG_OLD.filename = '{config}.old'.format(config=core.CONFIG_FILE) CFG_OLD.filename = '{config}.old'.format(config=core.CONFIG_FILE)
CFG_OLD.write() CFG_OLD.write()
@ -264,6 +299,16 @@ class ConfigObj(configobj.ConfigObj, Section):
logger.warning('{x} category is set for CouchPotato and Radarr. ' logger.warning('{x} category is set for CouchPotato and Radarr. '
'Please check your config in NZBGet'.format 'Please check your config in NZBGet'.format
(x=os.environ['NZBPO_RACATEGORY'])) (x=os.environ['NZBPO_RACATEGORY']))
if 'NZBPO_RACATEGORY' in os.environ and 'NZBPO_W3CATEGORY' in os.environ:
if os.environ['NZBPO_RACATEGORY'] == os.environ['NZBPO_W3CATEGORY']:
logger.warning('{x} category is set for Watcher3 and Radarr. '
'Please check your config in NZBGet'.format
(x=os.environ['NZBPO_RACATEGORY']))
if 'NZBPO_W3CATEGORY' in os.environ and 'NZBPO_CPSCATEGORY' in os.environ:
if os.environ['NZBPO_W3CATEGORY'] == os.environ['NZBPO_CPSCATEGORY']:
logger.warning('{x} category is set for CouchPotato and Watcher3. '
'Please check your config in NZBGet'.format
(x=os.environ['NZBPO_W3CATEGORY']))
if 'NZBPO_LICATEGORY' in os.environ and 'NZBPO_HPCATEGORY' in os.environ: if 'NZBPO_LICATEGORY' in os.environ and 'NZBPO_HPCATEGORY' in os.environ:
if os.environ['NZBPO_LICATEGORY'] == os.environ['NZBPO_HPCATEGORY']: if os.environ['NZBPO_LICATEGORY'] == os.environ['NZBPO_HPCATEGORY']:
logger.warning('{x} category is set for HeadPhones and Lidarr. ' logger.warning('{x} category is set for HeadPhones and Lidarr. '
@ -277,8 +322,8 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][option] = value cfg_new[section][option] = value
section = 'General' section = 'General'
env_keys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'SAFE_MODE', 'NO_EXTRACT_FAILED'] env_keys = ['AUTO_UPDATE', 'CHECK_MEDIA', 'REQUIRE_LAN', 'SAFE_MODE', 'NO_EXTRACT_FAILED']
cfg_keys = ['auto_update', 'check_media', 'safe_mode', 'no_extract_failed'] cfg_keys = ['auto_update', 'check_media', 'require_lan', 'safe_mode', 'no_extract_failed']
for index in range(len(env_keys)): for index in range(len(env_keys)):
key = 'NZBPO_{index}'.format(index=env_keys[index]) key = 'NZBPO_{index}'.format(index=env_keys[index])
if key in os.environ: if key in os.environ:
@ -314,13 +359,36 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['Radarr'].sections: if os.environ[env_cat_key] in cfg_new['Radarr'].sections:
cfg_new['Radarr'][env_cat_key]['enabled'] = 0 cfg_new['Radarr'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['Watcher3'].sections:
cfg_new['Watcher3'][env_cat_key]['enabled'] = 0
section = 'Watcher3'
env_cat_key = 'NZBPO_W3CATEGORY'
env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'METHOD', 'DELETE_FAILED', 'REMOTE_PATH',
'WAIT_FOR', 'WATCH_DIR', 'OMDBAPIKEY']
cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'method', 'delete_failed', 'remote_path',
'wait_for', 'watch_dir', 'omdbapikey']
if env_cat_key in os.environ:
for index in range(len(env_keys)):
key = 'NZBPO_W3{index}'.format(index=env_keys[index])
if key in os.environ:
option = cfg_keys[index]
value = os.environ[key]
if os.environ[env_cat_key] not in cfg_new[section].sections:
cfg_new[section][os.environ[env_cat_key]] = {}
cfg_new[section][os.environ[env_cat_key]][option] = value
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['Radarr'].sections:
cfg_new['Radarr'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections:
cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0
section = 'SickBeard' section = 'SickBeard'
env_cat_key = 'NZBPO_SBCATEGORY' env_cat_key = 'NZBPO_SBCATEGORY'
env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'USERNAME', 'PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', 'TORRENT_NOLINK',
'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD'] 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD']
cfg_keys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', cfg_keys = ['enabled', 'host', 'port', 'apikey', 'username', 'password', 'ssl', 'web_root', 'watch_dir', 'fork', 'delete_failed', 'Torrent_NoLink',
'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method'] 'nzbExtractionBy', 'remote_path', 'process_method']
if env_cat_key in os.environ: if env_cat_key in os.environ:
for index in range(len(env_keys)): for index in range(len(env_keys)):
key = 'NZBPO_SB{index}'.format(index=env_keys[index]) key = 'NZBPO_SB{index}'.format(index=env_keys[index])
@ -331,6 +399,29 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][os.environ[env_cat_key]] = {} cfg_new[section][os.environ[env_cat_key]] = {}
cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]][option] = value
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['SiCKRAGE'].sections:
cfg_new['SiCKRAGE'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections:
cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0
section = 'SiCKRAGE'
env_cat_key = 'NZBPO_SRCATEGORY'
env_keys = ['ENABLED', 'HOST', 'PORT', 'APIKEY', 'API_VERSION', 'SSO_USERNAME', 'SSO_PASSWORD', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK',
'DELETE_FAILED', 'TORRENT_NOLINK', 'NZBEXTRACTIONBY', 'REMOTE_PATH', 'PROCESS_METHOD']
cfg_keys = ['enabled', 'host', 'port', 'apikey', 'api_version', 'sso_username', 'sso_password', 'ssl', 'web_root', 'watch_dir', 'fork',
'delete_failed', 'Torrent_NoLink', 'nzbExtractionBy', 'remote_path', 'process_method']
if env_cat_key in os.environ:
for index in range(len(env_keys)):
key = 'NZBPO_SR{index}'.format(index=env_keys[index])
if key in os.environ:
option = cfg_keys[index]
value = os.environ[key]
if os.environ[env_cat_key] not in cfg_new[section].sections:
cfg_new[section][os.environ[env_cat_key]] = {}
cfg_new[section][os.environ[env_cat_key]][option] = value
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['SickBeard'].sections:
cfg_new['SickBeard'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections: if os.environ[env_cat_key] in cfg_new['NzbDrone'].sections:
cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0 cfg_new['NzbDrone'][env_cat_key]['enabled'] = 0
@ -383,6 +474,21 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][os.environ[env_cat_key]][option] = value cfg_new[section][os.environ[env_cat_key]][option] = value
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
section = 'LazyLibrarian'
env_cat_key = 'NZBPO_LLCATEGORY'
env_keys = ['ENABLED', 'APIKEY', 'HOST', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'REMOTE_PATH']
cfg_keys = ['enabled', 'apikey', 'host', 'port', 'ssl', 'web_root', 'watch_dir', 'remote_path']
if env_cat_key in os.environ:
for index in range(len(env_keys)):
key = 'NZBPO_LL{index}'.format(index=env_keys[index])
if key in os.environ:
option = cfg_keys[index]
value = os.environ[key]
if os.environ[env_cat_key] not in cfg_new[section].sections:
cfg_new[section][os.environ[env_cat_key]] = {}
cfg_new[section][os.environ[env_cat_key]][option] = value
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
section = 'NzbDrone' section = 'NzbDrone'
env_cat_key = 'NZBPO_NDCATEGORY' env_cat_key = 'NZBPO_NDCATEGORY'
env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED', env_keys = ['ENABLED', 'HOST', 'APIKEY', 'PORT', 'SSL', 'WEB_ROOT', 'WATCH_DIR', 'FORK', 'DELETE_FAILED',
@ -402,6 +508,8 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['SickBeard'].sections: if os.environ[env_cat_key] in cfg_new['SickBeard'].sections:
cfg_new['SickBeard'][env_cat_key]['enabled'] = 0 cfg_new['SickBeard'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['SiCKRAGE'].sections:
cfg_new['SiCKRAGE'][env_cat_key]['enabled'] = 0
section = 'Radarr' section = 'Radarr'
env_cat_key = 'NZBPO_RACATEGORY' env_cat_key = 'NZBPO_RACATEGORY'
@ -422,6 +530,8 @@ class ConfigObj(configobj.ConfigObj, Section):
cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1 cfg_new[section][os.environ[env_cat_key]]['enabled'] = 1
if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections: if os.environ[env_cat_key] in cfg_new['CouchPotato'].sections:
cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0 cfg_new['CouchPotato'][env_cat_key]['enabled'] = 0
if os.environ[env_cat_key] in cfg_new['Wacther3'].sections:
cfg_new['Watcher3'][env_cat_key]['enabled'] = 0
section = 'Lidarr' section = 'Lidarr'
env_cat_key = 'NZBPO_LICATEGORY' env_cat_key = 'NZBPO_LICATEGORY'

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from core import logger, main_db from core import logger, main_db
from core.utils import backup_versioned_file from core.utils import backup_versioned_file
@ -33,7 +40,7 @@ class InitialSchema(main_db.SchemaUpgrade):
queries = [ queries = [
'CREATE TABLE db_version (db_version INTEGER);', 'CREATE TABLE db_version (db_version INTEGER);',
'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));', 'CREATE TABLE downloads (input_directory TEXT, input_name TEXT, input_hash TEXT, input_id TEXT, client_agent TEXT, status INTEGER, last_update NUMERIC, CONSTRAINT pk_downloadID PRIMARY KEY (input_directory, input_name));',
'INSERT INTO db_version (db_version) VALUES (2);' 'INSERT INTO db_version (db_version) VALUES (2);',
] ]
for query in queries: for query in queries:
self.connection.action(query) self.connection.action(query)
@ -59,7 +66,7 @@ class InitialSchema(main_db.SchemaUpgrade):
'INSERT INTO downloads2 SELECT * FROM downloads;', 'INSERT INTO downloads2 SELECT * FROM downloads;',
'DROP TABLE IF EXISTS downloads;', 'DROP TABLE IF EXISTS downloads;',
'ALTER TABLE downloads2 RENAME TO downloads;', 'ALTER TABLE downloads2 RENAME TO downloads;',
'INSERT INTO db_version (db_version) VALUES (2);' 'INSERT INTO db_version (db_version) VALUES (2);',
] ]
for query in queries: for query in queries:
self.connection.action(query) self.connection.action(query)

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import platform import platform
import shutil import shutil
@ -21,11 +28,11 @@ def extract(file_path, output_destination):
wscriptlocation = os.path.join(os.environ['WINDIR'], 'system32', 'wscript.exe') wscriptlocation = os.path.join(os.environ['WINDIR'], 'system32', 'wscript.exe')
invislocation = os.path.join(core.APP_ROOT, 'core', 'extractor', 'bin', 'invisible.vbs') invislocation = os.path.join(core.APP_ROOT, 'core', 'extractor', 'bin', 'invisible.vbs')
cmd_7zip = [wscriptlocation, invislocation, str(core.SHOWEXTRACT), core.SEVENZIP, 'x', '-y'] cmd_7zip = [wscriptlocation, invislocation, str(core.SHOWEXTRACT), core.SEVENZIP, 'x', '-y']
ext_7zip = ['.rar', '.zip', '.tar.gz', 'tgz', '.tar.bz2', '.tbz', '.tar.lzma', '.tlz', '.7z', '.xz'] ext_7zip = ['.rar', '.zip', '.tar.gz', 'tgz', '.tar.bz2', '.tbz', '.tar.lzma', '.tlz', '.7z', '.xz', '.gz']
extract_commands = dict.fromkeys(ext_7zip, cmd_7zip) extract_commands = dict.fromkeys(ext_7zip, cmd_7zip)
# Using unix # Using unix
else: else:
required_cmds = ['unrar', 'unzip', 'tar', 'unxz', 'unlzma', '7zr', 'bunzip2'] required_cmds = ['unrar', 'unzip', 'tar', 'unxz', 'unlzma', '7zr', 'bunzip2', 'gunzip']
# ## Possible future suport: # ## Possible future suport:
# gunzip: gz (cmd will delete original archive) # gunzip: gz (cmd will delete original archive)
# ## the following do not extract to dest dir # ## the following do not extract to dest dir
@ -42,6 +49,7 @@ def extract(file_path, output_destination):
'.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'], '.tar.lzma': ['tar', '--lzma', '-xf'], '.tlz': ['tar', '--lzma', '-xf'],
'.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'], '.tar.xz': ['tar', '--xz', '-xf'], '.txz': ['tar', '--xz', '-xf'],
'.7z': ['7zr', 'x'], '.7z': ['7zr', 'x'],
'.gz': ['gunzip'],
} }
# Test command exists and if not, remove # Test command exists and if not, remove
if not os.getenv('TR_TORRENT_DIR'): if not os.getenv('TR_TORRENT_DIR'):
@ -75,6 +83,8 @@ def extract(file_path, output_destination):
# Check if this is a tar # Check if this is a tar
if os.path.splitext(ext[0])[1] == '.tar': if os.path.splitext(ext[0])[1] == '.tar':
cmd = extract_commands['.tar{ext}'.format(ext=ext[1])] cmd = extract_commands['.tar{ext}'.format(ext=ext[1])]
else: # Try gunzip
cmd = extract_commands[ext[1]]
elif ext[1] in ('.1', '.01', '.001') and os.path.splitext(ext[0])[1] in ('.rar', '.zip', '.7z'): elif ext[1] in ('.1', '.01', '.001') and os.path.splitext(ext[0])[1] in ('.rar', '.zip', '.7z'):
cmd = extract_commands[os.path.splitext(ext[0])[1]] cmd = extract_commands[os.path.splitext(ext[0])[1]]
elif ext[1] in ('.cb7', '.cba', '.cbr', '.cbt', '.cbz'): # don't extract these comic book archives. elif ext[1] in ('.cb7', '.cba', '.cbr', '.cbt', '.cbz'): # don't extract these comic book archives.
@ -90,8 +100,8 @@ def extract(file_path, output_destination):
# Create outputDestination folder # Create outputDestination folder
core.make_dir(output_destination) core.make_dir(output_destination)
if core.PASSWORDSFILE and os.path.isfile(os.path.normpath(core.PASSWORDSFILE)): if core.PASSWORDS_FILE and os.path.isfile(os.path.normpath(core.PASSWORDS_FILE)):
passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDSFILE))] passwords = [line.strip() for line in open(os.path.normpath(core.PASSWORDS_FILE))]
else: else:
passwords = [] passwords = []
@ -121,14 +131,15 @@ def extract(file_path, output_destination):
else: else:
cmd = core.NICENESS + cmd cmd = core.NICENESS + cmd
cmd2 = cmd cmd2 = cmd
cmd2.append('-p-') # don't prompt for password. if not 'gunzip' in cmd: #gunzip doesn't support password
cmd2.append('-p-') # don't prompt for password.
p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine. p = Popen(cmd2, stdout=devnull, stderr=devnull, startupinfo=info) # should extract files fine.
res = p.wait() res = p.wait()
if res == 0: # Both Linux and Windows return 0 for successful. if res == 0: # Both Linux and Windows return 0 for successful.
core.logger.info('EXTRACTOR: Extraction was successful for {file} to {destination}'.format core.logger.info('EXTRACTOR: Extraction was successful for {file} to {destination}'.format
(file=file_path, destination=output_destination)) (file=file_path, destination=output_destination))
success = 1 success = 1
elif len(passwords) > 0: elif len(passwords) > 0 and not 'gunzip' in cmd:
core.logger.info('EXTRACTOR: Attempting to extract with passwords') core.logger.info('EXTRACTOR: Attempting to extract with passwords')
for password in passwords: for password in passwords:
if password == '': # if edited in windows or otherwise if blank lines. if password == '': # if edited in windows or otherwise if blank lines.

View file

@ -1,114 +0,0 @@
# coding=utf-8
import requests
from six import iteritems
import core
from core import logger
def auto_fork(section, input_category):
# auto-detect correct section
# config settings
cfg = dict(core.CFG[section][input_category])
host = cfg.get('host')
port = cfg.get('port')
username = cfg.get('username')
password = cfg.get('password')
apikey = cfg.get('apikey')
ssl = int(cfg.get('ssl', 0))
web_root = cfg.get('web_root', '')
replace = {'sickrage': 'SickRage', 'sickchill': 'SickChill', 'sickgear': 'SickGear', 'medusa': 'Medusa', 'sickbeard-api': 'SickBeard-api', 'stheno': 'Stheno'}
f1 = replace[cfg.get('fork', 'auto')] if cfg.get('fork', 'auto') in replace else cfg.get('fork', 'auto')
try:
fork = f1, core.FORKS[f1]
except KeyError:
fork = 'auto'
protocol = 'https://' if ssl else 'http://'
detected = False
if section == 'NzbDrone':
logger.info('Attempting to verify {category} fork'.format
(category=input_category))
url = '{protocol}{host}:{port}{root}/api/rootfolder'.format(
protocol=protocol, host=host, port=port, root=web_root)
headers = {'X-Api-Key': apikey}
try:
r = requests.get(url, headers=headers, stream=True, verify=False)
except requests.ConnectionError:
logger.warning('Could not connect to {0}:{1} to verify fork!'.format(section, input_category))
if not r.ok:
logger.warning('Connection to {section}:{category} failed! '
'Check your configuration'.format
(section=section, category=input_category))
fork = ['default', {}]
elif fork == 'auto':
params = core.ALL_FORKS
rem_params = []
logger.info('Attempting to auto-detect {category} fork'.format(category=input_category))
# define the order to test. Default must be first since the default fork doesn't reject parameters.
# then in order of most unique parameters.
if apikey:
url = '{protocol}{host}:{port}{root}/api/{apikey}/?cmd=help&subject=postprocess'.format(
protocol=protocol, host=host, port=port, root=web_root, apikey=apikey)
else:
url = '{protocol}{host}:{port}{root}/home/postprocess/'.format(
protocol=protocol, host=host, port=port, root=web_root)
# attempting to auto-detect fork
try:
s = requests.Session()
if not apikey and username and password:
login = '{protocol}{host}:{port}{root}/login'.format(
protocol=protocol, host=host, port=port, root=web_root)
login_params = {'username': username, 'password': password}
r = s.get(login, verify=False, timeout=(30, 60))
if r.status_code == 401 and r.cookies.get('_xsrf'):
login_params['_xsrf'] = r.cookies.get('_xsrf')
s.post(login, data=login_params, stream=True, verify=False)
r = s.get(url, auth=(username, password), verify=False)
except requests.ConnectionError:
logger.info('Could not connect to {section}:{category} to perform auto-fork detection!'.format
(section=section, category=input_category))
r = []
if r and r.ok:
if apikey:
optional_parameters = []
try:
optional_parameters = r.json()['data']['optionalParameters'].keys()
except Exception:
optional_parameters = r.json()['data']['data']['optionalParameters'].keys()
for param in params:
if param not in optional_parameters:
rem_params.append(param)
else:
for param in params:
if 'name="{param}"'.format(param=param) not in r.text:
rem_params.append(param)
for param in rem_params:
params.pop(param)
for fork in sorted(iteritems(core.FORKS), reverse=False):
if params == fork[1]:
detected = True
break
if detected:
logger.info('{section}:{category} fork auto-detection successful ...'.format
(section=section, category=input_category))
elif rem_params:
logger.info('{section}:{category} fork auto-detection found custom params {params}'.format
(section=section, category=input_category, params=params))
fork = ['custom', params]
else:
logger.info('{section}:{category} fork auto-detection failed'.format
(section=section, category=input_category))
fork = core.FORKS.items()[core.FORKS.keys().index(core.FORK_DEFAULT)]
logger.info('{section}:{category} fork set to {fork}'.format
(section=section, category=input_category, fork=fork[0]))
return fork[0], fork[1]

View file

@ -1,12 +1,17 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import requests import requests
class GitHub(object): class GitHub(object):
""" """Simple api wrapper for the Github API v3."""
Simple api wrapper for the Github API v3.
"""
def __init__(self, github_repo_user, github_repo, branch='master'): def __init__(self, github_repo_user, github_repo, branch='master'):
@ -15,16 +20,14 @@ class GitHub(object):
self.branch = branch self.branch = branch
def _access_api(self, path, params=None): def _access_api(self, path, params=None):
""" """Access API at given an API path and optional parameters."""
Access the API at the path given and with the optional params given.
"""
url = 'https://api.github.com/{path}'.format(path='/'.join(path)) url = 'https://api.github.com/{path}'.format(path='/'.join(path))
data = requests.get(url, params=params, verify=False) data = requests.get(url, params=params, verify=False)
return data.json() if data.ok else [] return data.json() if data.ok else []
def commits(self): def commits(self):
""" """
Uses the API to get a list of the 100 most recent commits from the specified user/repo/branch, starting from HEAD. Get the 100 most recent commits from the specified user/repo/branch, starting from HEAD.
user: The github username of the person whose repo you're querying user: The github username of the person whose repo you're querying
repo: The repo name to query repo: The repo name to query
@ -39,7 +42,7 @@ class GitHub(object):
def compare(self, base, head, per_page=1): def compare(self, base, head, per_page=1):
""" """
Uses the API to get a list of compares between base and head. Get compares between base and head.
user: The github username of the person whose repo you're querying user: The github username of the person whose repo you're querying
repo: The repo name to query repo: The repo name to query

View file

@ -1,11 +1,19 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import logging import logging
import os import os
import sys import sys
import threading import threading
import core import core
import functools
# number of log files to keep # number of log files to keep
NUM_LOGS = 3 NUM_LOGS = 3
@ -85,9 +93,9 @@ class NTMRotatingLogHandler(object):
console.setFormatter(DispatchingFormatter( console.setFormatter(DispatchingFormatter(
{'nzbtomedia': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), {'nzbtomedia': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'),
'postprocess': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'), 'postprocess': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'),
'db': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S') 'db': logging.Formatter('[%(asctime)s] [%(levelname)s]::%(message)s', '%H:%M:%S'),
}, },
logging.Formatter('%(message)s'), )) logging.Formatter('%(message)s')))
# add the handler to the root logger # add the handler to the root logger
logging.getLogger('nzbtomedia').addHandler(console) logging.getLogger('nzbtomedia').addHandler(console)
@ -111,10 +119,7 @@ class NTMRotatingLogHandler(object):
self.close_log(old_handler) self.close_log(old_handler)
def _config_handler(self): def _config_handler(self):
""" """Configure a file handler to log at file_name and return it."""
Configure a file handler to log at file_name and return it.
"""
file_handler = logging.FileHandler(self.log_file_path, encoding='utf-8') file_handler = logging.FileHandler(self.log_file_path, encoding='utf-8')
file_handler.setLevel(DB) file_handler.setLevel(DB)
@ -122,29 +127,29 @@ class NTMRotatingLogHandler(object):
file_handler.setFormatter(DispatchingFormatter( file_handler.setFormatter(DispatchingFormatter(
{'nzbtomedia': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), {'nzbtomedia': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'),
'postprocess': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'), 'postprocess': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'),
'db': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S') 'db': logging.Formatter('%(asctime)s %(levelname)-8s::%(message)s', '%Y-%m-%d %H:%M:%S'),
}, },
logging.Formatter('%(message)s'), )) logging.Formatter('%(message)s')))
return file_handler return file_handler
def _log_file_name(self, i): def _log_file_name(self, i):
""" """
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends Return a numbered log file name depending on i.
it to the extension (blah.log.3 for i == 3)
If i==0 it just uses logName, if not it appends it to the extension
e.g. (blah.log.3 for i == 3)
i: Log number to ues i: Log number to ues
""" """
return self.log_file_path + ('.{0}'.format(i) if i else '') return self.log_file_path + ('.{0}'.format(i) if i else '')
def _num_logs(self): def _num_logs(self):
""" """
Scans the log folder and figures out how many log files there are already on disk Scan the log folder and figure out how many log files there are already on disk.
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1 Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
""" """
cur_log = 0 cur_log = 0
while os.path.isfile(self._log_file_name(cur_log)): while os.path.isfile(self._log_file_name(cur_log)):
cur_log += 1 cur_log += 1
@ -202,9 +207,8 @@ class NTMRotatingLogHandler(object):
ntm_logger = logging.getLogger('nzbtomedia') ntm_logger = logging.getLogger('nzbtomedia')
pp_logger = logging.getLogger('postprocess') pp_logger = logging.getLogger('postprocess')
db_logger = logging.getLogger('db') db_logger = logging.getLogger('db')
setattr(pp_logger, 'postprocess', lambda *args: pp_logger.log(POSTPROCESS, *args)) pp_logger.postprocess = functools.partial(pp_logger.log, POSTPROCESS)
setattr(db_logger, 'db', lambda *args: db_logger.log(DB, *args)) db_logger.db = functools.partial(db_logger.log, DB)
try: try:
if log_level == DEBUG: if log_level == DEBUG:
if core.LOG_DEBUG == 1: if core.LOG_DEBUG == 1:

View file

@ -1,19 +1,55 @@
# coding=utf-8 # coding=utf-8
from __future__ import print_function from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os.path
import re import re
import sqlite3 import sqlite3
import sys
import time import time
from six import text_type from six import text_type, PY2
import core import core
from core import logger from core import logger
from core import permissions
if PY2:
class Row(sqlite3.Row, object):
"""
Row factory that uses Byte Strings for keys.
The sqlite3.Row in Python 2 does not support unicode keys.
This overrides __getitem__ to attempt to encode the key to bytes first.
"""
def __getitem__(self, item):
"""
Get an item from the row by index or key.
:param item: Index or Key of item to return.
:return: An item from the sqlite3.Row.
"""
try:
# sqlite3.Row column names should be Bytes in Python 2
item = item.encode()
except AttributeError:
pass # assume item is a numeric index
return super(Row, self).__getitem__(item)
else:
from sqlite3 import Row
def db_filename(filename='nzbtomedia.db', suffix=None): def db_filename(filename='nzbtomedia.db', suffix=None):
""" """
Return the correct location of the database file.
@param filename: The sqlite database filename to use. If not specified, @param filename: The sqlite database filename to use. If not specified,
will be made to be nzbtomedia.db will be made to be nzbtomedia.db
@param suffix: The suffix to append to the filename. A '.' will be added @param suffix: The suffix to append to the filename. A '.' will be added
@ -27,13 +63,29 @@ def db_filename(filename='nzbtomedia.db', suffix=None):
class DBConnection(object): class DBConnection(object):
def __init__(self, filename='nzbtomedia.db', suffix=None, row_type=None): def __init__(self, filename='nzbtomedia.db', suffix=None, row_type=None):
self.filename = filename self.filename = filename
self.connection = sqlite3.connect(db_filename(filename), 20) path = db_filename(filename)
if row_type == 'dict': try:
self.connection.row_factory = self._dict_factory self.connection = sqlite3.connect(path, 20)
except sqlite3.OperationalError as error:
if os.path.exists(path):
logger.error('Please check permissions on database: {0}'.format(path))
else:
logger.error('Database file does not exist')
logger.error('Please check permissions on directory: {0}'.format(path))
path = os.path.dirname(path)
mode = permissions.mode(path)
owner, group = permissions.ownership(path)
logger.error(
"=== PERMISSIONS ===========================\n"
" Path : {0}\n"
" Mode : {1}\n"
" Owner: {2}\n"
" Group: {3}\n"
"===========================================".format(path, mode, owner, group),
)
else: else:
self.connection.row_factory = sqlite3.Row self.connection.row_factory = Row
def check_db_version(self): def check_db_version(self):
result = None result = None
@ -183,9 +235,9 @@ class DBConnection(object):
'WHERE {conditions}'.format( 'WHERE {conditions}'.format(
table=table_name, table=table_name,
params=', '.join(gen_params(value_dict)), params=', '.join(gen_params(value_dict)),
conditions=' AND '.join(gen_params(key_dict)) conditions=' AND '.join(gen_params(key_dict)),
), ),
items items,
) )
if self.connection.total_changes == changes_before: if self.connection.total_changes == changes_before:
@ -194,25 +246,18 @@ class DBConnection(object):
'VALUES ({values})'.format( 'VALUES ({values})'.format(
table=table_name, table=table_name,
columns=', '.join(map(text_type, value_dict.keys())), columns=', '.join(map(text_type, value_dict.keys())),
values=', '.join(['?'] * len(value_dict.values())) values=', '.join(['?'] * len(value_dict.values())),
), ),
list(value_dict.values()) list(value_dict.values()),
) )
def table_info(self, table_name): def table_info(self, table_name):
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually # FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
cursor = self.connection.execute('PRAGMA table_info({0})'.format(table_name)) cursor = self.connection.execute('PRAGMA table_info({0})'.format(table_name))
columns = {} return {
for column in cursor: column['name']: {'type': column['type']}
columns[column['name']] = {'type': column['type']} for column in cursor
return columns }
# http://stackoverflow.com/questions/3300464/how-can-i-get-dict-from-sqlite-query
def _dict_factory(self, cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def sanity_check_database(connection, sanity_check): def sanity_check_database(connection, sanity_check):
@ -233,7 +278,11 @@ class DBSanityCheck(object):
def upgrade_database(connection, schema): def upgrade_database(connection, schema):
logger.log(u'Checking database structure...', logger.MESSAGE) logger.log(u'Checking database structure...', logger.MESSAGE)
_process_upgrade(connection, schema) try:
_process_upgrade(connection, schema)
except Exception as error:
logger.error(error)
sys.exit(1)
def pretty_name(class_name): def pretty_name(class_name):

88
core/permissions.py Normal file
View file

@ -0,0 +1,88 @@
import os
import sys
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
WINDOWS = sys.platform == 'win32'
POSIX = not WINDOWS
try:
import pwd
import grp
except ImportError:
if POSIX:
raise
try:
from win32security import GetNamedSecurityInfo
from win32security import LookupAccountSid
from win32security import GROUP_SECURITY_INFORMATION
from win32security import OWNER_SECURITY_INFORMATION
from win32security import SE_FILE_OBJECT
except ImportError:
if WINDOWS:
raise
def mode(path):
"""Get permissions."""
stat_result = os.stat(path) # Get information from path
permissions_mask = 0o777 # Set mask for permissions info
# Get only the permissions part of st_mode as an integer
int_mode = stat_result.st_mode & permissions_mask
oct_mode = oct(int_mode) # Convert to octal representation
return oct_mode[2:] # Return mode but strip octal prefix
def nt_ownership(path):
"""Get the owner and group for a file or directory."""
def fully_qualified_name(sid):
"""Return a fully qualified account name."""
# Look up the account information for the given SID
# https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-lookupaccountsida
name, domain, _acct_type = LookupAccountSid(None, sid)
# Return account information formatted as DOMAIN\ACCOUNT_NAME
return '{}\\{}'.format(domain, name)
# Get the Windows security descriptor for the path
# https://learn.microsoft.com/en-us/windows/win32/api/aclapi/nf-aclapi-getnamedsecurityinfoa
security_descriptor = GetNamedSecurityInfo(
path, # Name of the item to query
SE_FILE_OBJECT, # Type of item to query (file or directory)
# Add OWNER and GROUP security information to result
OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION,
)
# Get the Security Identifier for the owner and group from the security descriptor
# https://learn.microsoft.com/en-us/windows/win32/api/securitybaseapi/nf-securitybaseapi-getsecuritydescriptorowner
# https://learn.microsoft.com/en-us/windows/win32/api/securitybaseapi/nf-securitybaseapi-getsecuritydescriptorgroup
owner_sid = security_descriptor.GetSecurityDescriptorOwner()
group_sid = security_descriptor.GetSecurityDescriptorGroup()
# Get the fully qualified account name (e.g. DOMAIN\ACCOUNT_NAME)
owner = fully_qualified_name(owner_sid)
group = fully_qualified_name(group_sid)
return owner, group
def posix_ownership(path):
"""Get the owner and group for a file or directory."""
# Get path information
stat_result = os.stat(path)
# Get account name from path stat result
owner = pwd.getpwuid(stat_result.st_uid).pw_name
group = grp.getgrgid(stat_result.st_gid).gr_name
return owner, group
# Select the ownership function appropriate for the platform
if WINDOWS:
ownership = nt_ownership
else:
ownership = posix_ownership

View file

@ -0,0 +1,5 @@
from core.plugins.downloaders.nzb.configuration import configure_nzbs
from core.plugins.downloaders.torrent.configuration import (
configure_torrents,
configure_torrent_class,
)

View file

@ -0,0 +1,23 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import core
def configure_nzbs(config):
nzb_config = config['Nzb']
core.NZB_CLIENT_AGENT = nzb_config['clientAgent'] # sabnzbd
core.NZB_DEFAULT_DIRECTORY = nzb_config['default_downloadDirectory']
core.NZB_NO_MANUAL = int(nzb_config['no_manual'], 0)
configure_sabnzbd(nzb_config)
def configure_sabnzbd(config):
core.SABNZBD_HOST = config['sabnzbd_host']
core.SABNZBD_PORT = int(config['sabnzbd_port'] or 8080) # defaults to accommodate NzbGet
core.SABNZBD_APIKEY = config['sabnzbd_apikey']

View file

@ -0,0 +1,77 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import requests
import core
from core import logger
def get_nzoid(input_name):
nzoid = None
slots = []
logger.debug('Searching for nzoid from SAbnzbd ...')
if 'http' in core.SABNZBD_HOST:
base_url = '{0}:{1}/api'.format(core.SABNZBD_HOST, core.SABNZBD_PORT)
else:
base_url = 'http://{0}:{1}/api'.format(core.SABNZBD_HOST, core.SABNZBD_PORT)
url = base_url
params = {
'apikey': core.SABNZBD_APIKEY,
'mode': 'queue',
'output': 'json',
}
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except requests.ConnectionError:
logger.error('Unable to open URL')
return nzoid # failure
try:
result = r.json()
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
slots.extend([(slot['nzo_id'], slot['filename']) for slot in result['queue']['slots']])
except Exception:
logger.warning('Data from SABnzbd queue could not be parsed')
params['mode'] = 'history'
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except requests.ConnectionError:
logger.error('Unable to open URL')
return nzoid # failure
try:
result = r.json()
clean_name = os.path.splitext(os.path.split(input_name)[1])[0]
slots.extend([(slot['nzo_id'], slot['name']) for slot in result['history']['slots']])
except Exception:
logger.warning('Data from SABnzbd history could not be parsed')
try:
for nzo_id, name in slots:
if name in [input_name, clean_name]:
nzoid = nzo_id
logger.debug('Found nzoid: {0}'.format(nzoid))
break
except Exception:
logger.warning('Data from SABnzbd could not be parsed')
return nzoid
def report_nzb(failure_link, client_agent):
# Contact indexer site
logger.info('Sending failure notification to indexer site')
if client_agent == 'nzbget':
headers = {'User-Agent': 'NZBGet / nzbToMedia.py'}
elif client_agent == 'sabnzbd':
headers = {'User-Agent': 'SABnzbd / nzbToMedia.py'}
else:
return
try:
requests.post(failure_link, headers=headers, timeout=(30, 300))
except Exception as e:
logger.error('Unable to open URL {0} due to {1}'.format(failure_link, e))
return

View file

@ -0,0 +1,97 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import core
from core.plugins.downloaders.torrent.utils import create_torrent_class
def configure_torrents(config):
torrent_config = config['Torrent']
core.TORRENT_CLIENT_AGENT = torrent_config['clientAgent'] # utorrent | deluge | transmission | rtorrent | vuze | qbittorrent | synods | other
core.OUTPUT_DIRECTORY = torrent_config['outputDirectory'] # /abs/path/to/complete/
core.TORRENT_DEFAULT_DIRECTORY = torrent_config['default_downloadDirectory']
core.TORRENT_NO_MANUAL = int(torrent_config['no_manual'], 0)
configure_torrent_linking(torrent_config)
configure_flattening(torrent_config)
configure_torrent_deletion(torrent_config)
configure_torrent_categories(torrent_config)
configure_torrent_permissions(torrent_config)
configure_torrent_resuming(torrent_config)
configure_utorrent(torrent_config)
configure_transmission(torrent_config)
configure_deluge(torrent_config)
configure_qbittorrent(torrent_config)
configure_syno(torrent_config)
def configure_torrent_linking(config):
core.USE_LINK = config['useLink'] # no | hard | sym
def configure_flattening(config):
core.NOFLATTEN = (config['noFlatten'])
if isinstance(core.NOFLATTEN, str):
core.NOFLATTEN = core.NOFLATTEN.split(',')
def configure_torrent_categories(config):
core.CATEGORIES = (config['categories']) # music,music_videos,pictures,software
if isinstance(core.CATEGORIES, str):
core.CATEGORIES = core.CATEGORIES.split(',')
def configure_torrent_resuming(config):
core.TORRENT_RESUME_ON_FAILURE = int(config['resumeOnFailure'])
core.TORRENT_RESUME = int(config['resume'])
def configure_torrent_permissions(config):
core.TORRENT_CHMOD_DIRECTORY = int(str(config['chmodDirectory']), 8)
def configure_torrent_deletion(config):
core.DELETE_ORIGINAL = int(config['deleteOriginal'])
def configure_utorrent(config):
core.UTORRENT_WEB_UI = config['uTorrentWEBui'] # http://localhost:8090/gui/
core.UTORRENT_USER = config['uTorrentUSR'] # mysecretusr
core.UTORRENT_PASSWORD = config['uTorrentPWD'] # mysecretpwr
def configure_transmission(config):
core.TRANSMISSION_HOST = config['TransmissionHost'] # localhost
core.TRANSMISSION_PORT = int(config['TransmissionPort'])
core.TRANSMISSION_USER = config['TransmissionUSR'] # mysecretusr
core.TRANSMISSION_PASSWORD = config['TransmissionPWD'] # mysecretpwr
def configure_syno(config):
core.SYNO_HOST = config['synoHost'] # localhost
core.SYNO_PORT = int(config['synoPort'])
core.SYNO_USER = config['synoUSR'] # mysecretusr
core.SYNO_PASSWORD = config['synoPWD'] # mysecretpwr
def configure_deluge(config):
core.DELUGE_HOST = config['DelugeHost'] # localhost
core.DELUGE_PORT = int(config['DelugePort']) # 8084
core.DELUGE_USER = config['DelugeUSR'] # mysecretusr
core.DELUGE_PASSWORD = config['DelugePWD'] # mysecretpwr
def configure_qbittorrent(config):
core.QBITTORRENT_HOST = config['qBittorrentHost'] # localhost
core.QBITTORRENT_PORT = int(config['qBittorrentPort']) # 8080
core.QBITTORRENT_USER = config['qBittorrentUSR'] # mysecretusr
core.QBITTORRENT_PASSWORD = config['qBittorrentPWD'] # mysecretpwr
def configure_torrent_class():
# create torrent class
core.TORRENT_CLASS = create_torrent_class(core.TORRENT_CLIENT_AGENT)

View file

@ -0,0 +1,28 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from deluge_client.client import DelugeRPCClient
import core
from core import logger
def configure_client():
agent = 'deluge'
host = core.DELUGE_HOST
port = core.DELUGE_PORT
user = core.DELUGE_USER
password = core.DELUGE_PASSWORD
logger.debug('Connecting to {0}: http://{1}:{2}'.format(agent, host, port))
client = DelugeRPCClient(host, port, user, password)
try:
client.connect()
except Exception:
logger.error('Failed to connect to Deluge')
else:
return client

View file

@ -0,0 +1,31 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from qbittorrent import Client as qBittorrentClient
import core
from core import logger
def configure_client():
agent = 'qbittorrent'
host = core.QBITTORRENT_HOST
port = core.QBITTORRENT_PORT
user = core.QBITTORRENT_USER
password = core.QBITTORRENT_PASSWORD
logger.debug(
'Connecting to {0}: http://{1}:{2}'.format(agent, host, port),
)
client = qBittorrentClient('http://{0}:{1}/'.format(host, port))
try:
client.login(user, password)
except Exception:
logger.error('Failed to connect to qBittorrent')
else:
return client

View file

@ -0,0 +1,27 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from syno.downloadstation import DownloadStation
import core
from core import logger
def configure_client():
agent = 'synology'
host = core.SYNO_HOST
port = core.SYNO_PORT
user = core.SYNO_USER
password = core.SYNO_PASSWORD
logger.debug('Connecting to {0}: http://{1}:{2}'.format(agent, host, port))
try:
client = DownloadStation(host, port, user, password)
except Exception:
logger.error('Failed to connect to synology')
else:
return client

View file

@ -0,0 +1,27 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from transmissionrpc.client import Client as TransmissionClient
import core
from core import logger
def configure_client():
agent = 'transmission'
host = core.TRANSMISSION_HOST
port = core.TRANSMISSION_PORT
user = core.TRANSMISSION_USER
password = core.TRANSMISSION_PASSWORD
logger.debug('Connecting to {0}: http://{1}:{2}'.format(agent, host, port))
try:
client = TransmissionClient(host, port, user, password)
except Exception:
logger.error('Failed to connect to Transmission')
else:
return client

View file

@ -0,0 +1,94 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import time
import core
from core import logger
from .deluge import configure_client as deluge_client
from .qbittorrent import configure_client as qbittorrent_client
from .transmission import configure_client as transmission_client
from .utorrent import configure_client as utorrent_client
from .synology import configure_client as synology_client
torrent_clients = {
'deluge': deluge_client,
'qbittorrent': qbittorrent_client,
'transmission': transmission_client,
'utorrent': utorrent_client,
'synods': synology_client,
}
def create_torrent_class(client_agent):
if not core.APP_NAME == 'TorrentToMedia.py':
return # Skip loading Torrent for NZBs.
client = torrent_clients.get(client_agent)
if client:
return client()
def pause_torrent(client_agent, input_hash, input_id, input_name):
logger.debug('Stopping torrent {0} in {1} while processing'.format(input_name, client_agent))
try:
if client_agent == 'utorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.stop(input_hash)
if client_agent == 'transmission' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.stop_torrent(input_id)
if client_agent == 'synods' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.pause_task(input_id)
if client_agent == 'deluge' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.core.pause_torrent([input_id])
if client_agent == 'qbittorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.pause(input_hash)
time.sleep(5)
except Exception:
logger.warning('Failed to stop torrent {0} in {1}'.format(input_name, client_agent))
def resume_torrent(client_agent, input_hash, input_id, input_name):
if not core.TORRENT_RESUME == 1:
return
logger.debug('Starting torrent {0} in {1}'.format(input_name, client_agent))
try:
if client_agent == 'utorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.start(input_hash)
if client_agent == 'transmission' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.start_torrent(input_id)
if client_agent == 'synods' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.resume_task(input_id)
if client_agent == 'deluge' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.core.resume_torrent([input_id])
if client_agent == 'qbittorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.resume(input_hash)
time.sleep(5)
except Exception:
logger.warning('Failed to start torrent {0} in {1}'.format(input_name, client_agent))
def remove_torrent(client_agent, input_hash, input_id, input_name):
if core.DELETE_ORIGINAL == 1 or core.USE_LINK == 'move':
logger.debug('Deleting torrent {0} from {1}'.format(input_name, client_agent))
try:
if client_agent == 'utorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.removedata(input_hash)
core.TORRENT_CLASS.remove(input_hash)
if client_agent == 'transmission' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.remove_torrent(input_id, True)
if client_agent == 'synods' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.delete_task(input_id)
if client_agent == 'deluge' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.core.remove_torrent(input_id, True)
if client_agent == 'qbittorrent' and core.TORRENT_CLASS != '':
core.TORRENT_CLASS.delete_permanently(input_hash)
time.sleep(5)
except Exception:
logger.warning('Failed to delete torrent {0} in {1}'.format(input_name, client_agent))
else:
resume_torrent(client_agent, input_hash, input_id, input_name)

View file

@ -0,0 +1,26 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from utorrent.client import UTorrentClient
import core
from core import logger
def configure_client():
agent = 'utorrent'
web_ui = core.UTORRENT_WEB_UI
user = core.UTORRENT_USER
password = core.UTORRENT_PASSWORD
logger.debug('Connecting to {0}: {1}'.format(agent, web_ui))
try:
client = UTorrentClient(web_ui, user, password)
except Exception:
logger.error('Failed to connect to uTorrent')
else:
return client

View file

@ -0,0 +1,5 @@
from core.plugins.downloaders.torrent.utils import (
pause_torrent,
remove_torrent,
resume_torrent,
)

53
core/plugins/plex.py Normal file
View file

@ -0,0 +1,53 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import requests
import core
from core import logger
def configure_plex(config):
core.PLEX_SSL = int(config['Plex']['plex_ssl'])
core.PLEX_HOST = config['Plex']['plex_host']
core.PLEX_PORT = config['Plex']['plex_port']
core.PLEX_TOKEN = config['Plex']['plex_token']
plex_section = config['Plex']['plex_sections'] or []
if plex_section:
if isinstance(plex_section, list):
plex_section = ','.join(plex_section) # fix in case this imported as list.
plex_section = [
tuple(item.split(','))
for item in plex_section.split('|')
]
core.PLEX_SECTION = plex_section
def plex_update(category):
if core.FAILED:
return
url = '{scheme}://{host}:{port}/library/sections/'.format(
scheme='https' if core.PLEX_SSL else 'http',
host=core.PLEX_HOST,
port=core.PLEX_PORT,
)
section = None
if not core.PLEX_SECTION:
return
logger.debug('Attempting to update Plex Library for category {0}.'.format(category), 'PLEX')
for item in core.PLEX_SECTION:
if item[0] == category:
section = item[1]
if section:
url = '{url}{section}/refresh?X-Plex-Token={token}'.format(url=url, section=section, token=core.PLEX_TOKEN)
requests.get(url, timeout=(60, 120), verify=False)
logger.debug('Plex Library has been refreshed.', 'PLEX')
else:
logger.debug('Could not identify section for plex update', 'PLEX')

107
core/plugins/subtitles.py Normal file
View file

@ -0,0 +1,107 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from babelfish import Language
import subliminal
import core
from core import logger
import os
import re
for provider in subliminal.provider_manager.internal_extensions:
if provider not in [str(x) for x in subliminal.provider_manager.list_entry_points()]:
subliminal.provider_manager.register(str(provider))
def import_subs(filename):
if not core.GETSUBS:
return
try:
subliminal.region.configure('dogpile.cache.dbm', arguments={'filename': 'cachefile.dbm'})
except Exception:
pass
languages = set()
for item in core.SLANGUAGES:
try:
languages.add(Language(item))
except Exception:
pass
if not languages:
return
logger.info('Attempting to download subtitles for {0}'.format(filename), 'SUBTITLES')
try:
video = subliminal.scan_video(filename)
subtitles = subliminal.download_best_subtitles({video}, languages)
subliminal.save_subtitles(video, subtitles[video])
for subtitle in subtitles[video]:
subtitle_path = subliminal.subtitle.get_subtitle_path(video.name, subtitle.language)
os.chmod(subtitle_path, 0o644)
except Exception as e:
logger.error('Failed to download subtitles for {0} due to: {1}'.format(filename, e), 'SUBTITLES')
def rename_subs(path):
filepaths = []
sub_ext = ['.srt', '.sub', '.idx']
vidfiles = core.list_media_files(path, media=True, audio=False, meta=False, archives=False)
if not vidfiles or len(vidfiles) > 1: # If there is more than 1 video file, or no video files, we can't rename subs.
return
name = os.path.splitext(os.path.split(vidfiles[0])[1])[0]
for directory, _, filenames in os.walk(path):
for filename in filenames:
filepaths.extend([os.path.join(directory, filename)])
subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext]
subfiles.sort() #This should sort subtitle names by language (alpha) and Number (where multiple)
renamed = []
for sub in subfiles:
subname, ext = os.path.splitext(os.path.basename(sub))
if name in subname: # The sub file name already includes the video name.
continue
words = re.findall('[a-zA-Z]+',str(subname)) # find whole words in string
# parse the words for language descriptors.
lan = None
for word in words:
try:
if len(word) == 2:
lan = Language.fromalpha2(word.lower())
elif len(word) == 3:
lan = Language(word.lower())
elif len(word) > 3:
lan = Language.fromname(word.lower())
if lan:
break
except: #if we didn't find a language, try next word.
continue
# rename the sub file as name.lan.ext
if not lan:
# could call ffprobe to parse the sub information and get language if lan unknown here.
new_sub_name = name
else:
new_sub_name = '{name}.{lan}'.format(name=name, lan=str(lan))
new_sub = os.path.join(directory, new_sub_name) # full path and name less ext
if '{new_sub}{ext}'.format(new_sub=new_sub, ext=ext) in renamed: # If duplicate names, add unique number before ext.
for i in range(1,len(renamed)+1):
if '{new_sub}.{i}{ext}'.format(new_sub=new_sub, i=i, ext=ext) in renamed:
continue
new_sub = '{new_sub}.{i}'.format(new_sub=new_sub, i=i)
break
new_sub = '{new_sub}{ext}'.format(new_sub=new_sub, ext=ext) # add extension now
if os.path.isfile(new_sub): # Don't copy over existing - final check.
logger.debug('Unable to rename sub file {old} as destination {new} already exists'.format(old=sub, new=new_sub))
continue
logger.debug('Renaming sub file from {old} to {new}'.format
(old=sub, new=new_sub))
renamed.append(new_sub)
try:
os.rename(sub, new_sub)
except Exception as error:
logger.error('Unable to rename sub file due to: {error}'.format(error=error))
return

View file

72
core/processor/manual.py Normal file
View file

@ -0,0 +1,72 @@
import os
import core
from core import logger
from core.auto_process.common import ProcessResult
from core.processor import nzb
from core.utils import (
get_dirs,
get_download_info,
)
try:
text_type = unicode
except NameError:
text_type = str
def process():
# Perform Manual Post-Processing
logger.warning(
'Invalid number of arguments received from client, Switching to manual run mode ...')
# Post-Processing Result
result = ProcessResult(
message='',
status_code=0,
)
for section, subsections in core.SECTIONS.items():
for subsection in subsections:
if not core.CFG[section][subsection].isenabled():
continue
for dir_name in get_dirs(section, subsection, link='move'):
logger.info(
'Starting manual run for {0}:{1} - Folder: {2}'.format(
section, subsection, dir_name))
logger.info(
'Checking database for download info for {0} ...'.format(
os.path.basename(dir_name)))
core.DOWNLOAD_INFO = get_download_info(
os.path.basename(dir_name), 0)
if core.DOWNLOAD_INFO:
logger.info('Found download info for {0}, '
'setting variables now ...'.format
(os.path.basename(dir_name)))
client_agent = text_type(
core.DOWNLOAD_INFO[0]['client_agent']) or 'manual'
download_id = text_type(
core.DOWNLOAD_INFO[0]['input_id']) or ''
else:
logger.info('Unable to locate download info for {0}, '
'continuing to try and process this release ...'.format
(os.path.basename(dir_name)))
client_agent = 'manual'
download_id = ''
if client_agent and client_agent.lower() not in core.NZB_CLIENTS:
continue
input_name = os.path.basename(dir_name)
results = nzb.process(dir_name, input_name, 0,
client_agent=client_agent,
download_id=download_id or None,
input_category=subsection)
if results.status_code != 0:
logger.error(
'A problem was reported when trying to perform a manual run for {0}:{1}.'.format
(section, subsection))
result = results
return result

154
core/processor/nzb.py Normal file
View file

@ -0,0 +1,154 @@
import datetime
import core
from core import logger, main_db
from core.auto_process import comics, games, movies, music, tv, books
from core.auto_process.common import ProcessResult
from core.plugins.downloaders.nzb.utils import get_nzoid
from core.plugins.plex import plex_update
from core.user_scripts import external_script
from core.utils import (
char_replace,
clean_dir,
convert_to_ascii,
extract_files,
update_download_info_status,
)
try:
text_type = unicode
except NameError:
text_type = str
def process(input_directory, input_name=None, status=0, client_agent='manual', download_id=None, input_category=None, failure_link=None):
if core.SAFE_MODE and input_directory == core.NZB_DEFAULT_DIRECTORY:
logger.error(
'The input directory:[{0}] is the Default Download Directory. Please configure category directories to prevent processing of other media.'.format(
input_directory))
return ProcessResult(
message='',
status_code=-1,
)
if not download_id and client_agent == 'sabnzbd':
download_id = get_nzoid(input_name)
if client_agent != 'manual' and not core.DOWNLOAD_INFO:
logger.debug('Adding NZB download info for directory {0} to database'.format(input_directory))
my_db = main_db.DBConnection()
input_directory1 = input_directory
input_name1 = input_name
try:
encoded, input_directory1 = char_replace(input_directory)
encoded, input_name1 = char_replace(input_name)
except Exception:
pass
control_value_dict = {'input_directory': text_type(input_directory1)}
new_value_dict = {
'input_name': text_type(input_name1),
'input_hash': text_type(download_id),
'input_id': text_type(download_id),
'client_agent': text_type(client_agent),
'status': 0,
'last_update': datetime.date.today().toordinal(),
}
my_db.upsert('downloads', new_value_dict, control_value_dict)
# auto-detect section
if input_category is None:
input_category = 'UNCAT'
usercat = input_category
section = core.CFG.findsection(input_category).isenabled()
if section is None:
section = core.CFG.findsection('ALL').isenabled()
if section is None:
logger.error(
'Category:[{0}] is not defined or is not enabled. Please rename it or ensure it is enabled for the appropriate section in your autoProcessMedia.cfg and try again.'.format(
input_category))
return ProcessResult(
message='',
status_code=-1,
)
else:
usercat = 'ALL'
if len(section) > 1:
logger.error(
'Category:[{0}] is not unique, {1} are using it. Please rename it or disable all other sections using the same category name in your autoProcessMedia.cfg and try again.'.format(
input_category, section.keys()))
return ProcessResult(
message='',
status_code=-1,
)
if section:
section_name = section.keys()[0]
logger.info('Auto-detected SECTION:{0}'.format(section_name))
else:
logger.error('Unable to locate a section with subsection:{0} enabled in your autoProcessMedia.cfg, exiting!'.format(
input_category))
return ProcessResult(
status_code=-1,
message='',
)
cfg = dict(core.CFG[section_name][usercat])
extract = int(cfg.get('extract', 0))
try:
if int(cfg.get('remote_path')) and not core.REMOTE_PATHS:
logger.error('Remote Path is enabled for {0}:{1} but no Network mount points are defined. Please check your autoProcessMedia.cfg, exiting!'.format(
section_name, input_category))
return ProcessResult(
status_code=-1,
message='',
)
except Exception:
logger.error('Remote Path {0} is not valid for {1}:{2} Please set this to either 0 to disable or 1 to enable!'.format(
cfg.get('remote_path'), section_name, input_category))
input_name, input_directory = convert_to_ascii(input_name, input_directory)
if extract == 1 and not (status > 0 and core.NOEXTRACTFAILED):
logger.debug('Checking for archives to extract in directory: {0}'.format(input_directory))
extract_files(input_directory)
logger.info('Calling {0}:{1} to post-process:{2}'.format(section_name, input_category, input_name))
if section_name in ['CouchPotato', 'Radarr', 'Watcher3']:
result = movies.process(section_name, input_directory, input_name, status, client_agent, download_id, input_category, failure_link)
elif section_name in ['SickBeard', 'SiCKRAGE', 'NzbDrone', 'Sonarr']:
result = tv.process(section_name, input_directory, input_name, status, client_agent, download_id, input_category, failure_link)
elif section_name in ['HeadPhones', 'Lidarr']:
result = music.process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == 'Mylar':
result = comics.process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == 'Gamez':
result = games.process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == 'LazyLibrarian':
result = books.process(section_name, input_directory, input_name, status, client_agent, input_category)
elif section_name == 'UserScript':
result = external_script(input_directory, input_name, input_category, section[usercat])
else:
result = ProcessResult(
message='',
status_code=-1,
)
plex_update(input_category)
if result.status_code == 0:
if client_agent != 'manual':
# update download status in our DB
update_download_info_status(input_name, 1)
if section_name not in ['UserScript', 'NzbDrone', 'Sonarr', 'Radarr', 'Lidarr']:
# cleanup our processing folders of any misc unwanted files and empty directories
clean_dir(input_directory, section_name, input_category)
return result

108
core/processor/nzbget.py Normal file
View file

@ -0,0 +1,108 @@
import os
import sys
import core
from core import logger
from core.processor import nzb
def parse_download_id():
"""Parse nzbget download_id from environment."""
download_id_keys = [
'NZBPR_COUCHPOTATO',
'NZBPR_DRONE',
'NZBPR_SONARR',
'NZBPR_RADARR',
'NZBPR_LIDARR',
]
for download_id_key in download_id_keys:
try:
return os.environ[download_id_key]
except KeyError:
pass
else:
return ''
def parse_failure_link():
"""Parse nzbget failure_link from environment."""
return os.environ.get('NZBPR__DNZB_FAILURE')
def _parse_total_status():
status_summary = os.environ['NZBPP_TOTALSTATUS']
if status_summary != 'SUCCESS':
status = os.environ['NZBPP_STATUS']
logger.info('Download failed with status {0}.'.format(status))
return 1
return 0
def _parse_par_status():
"""Parse nzbget par status from environment."""
par_status = os.environ['NZBPP_PARSTATUS']
if par_status == '1' or par_status == '4':
logger.warning('Par-repair failed, setting status \'failed\'')
return 1
return 0
def _parse_unpack_status():
if os.environ['NZBPP_UNPACKSTATUS'] == '1':
logger.warning('Unpack failed, setting status \'failed\'')
return 1
return 0
def _parse_health_status():
"""Parse nzbget download health from environment."""
status = 0
unpack_status_value = os.environ['NZBPP_UNPACKSTATUS']
par_status_value = os.environ['NZBPP_PARSTATUS']
if unpack_status_value == '0' and par_status_value == '0':
# Unpack was skipped due to nzb-file properties
# or due to errors during par-check
if int(os.environ['NZBPP_HEALTH']) < 1000:
logger.warning('Download health is compromised and Par-check/repair disabled or no .par2 files found. Setting status \'failed\'')
status = 1
else:
logger.info('Par-check/repair disabled or no .par2 files found, and Unpack not required. Health is ok so handle as though download successful')
logger.info('Please check your Par-check/repair settings for future downloads.')
return status
def parse_status():
if 'NZBPP_TOTALSTATUS' in os.environ: # Called from nzbget 13.0 or later
status = _parse_total_status()
else:
par_status = _parse_par_status()
unpack_status = _parse_unpack_status()
health_status = _parse_health_status()
status = par_status or unpack_status or health_status
return status
def check_version():
"""Check nzbget version and if version is unsupported, exit."""
version = os.environ['NZBOP_VERSION']
# Check if the script is called from nzbget 11.0 or later
if version[0:5] < '11.0':
logger.error('NZBGet Version {0} is not supported. Please update NZBGet.'.format(version))
sys.exit(core.NZBGET_POSTPROCESS_ERROR)
logger.info('Script triggered from NZBGet Version {0}.'.format(version))
def process():
check_version()
status = parse_status()
download_id = parse_download_id()
failure_link = parse_failure_link()
return nzb.process(
input_directory=os.environ['NZBPP_DIRECTORY'],
input_name=os.environ['NZBPP_NZBNAME'],
status=status,
client_agent='nzbget',
download_id=download_id,
input_category=os.environ['NZBPP_CATEGORY'],
failure_link=failure_link,
)

50
core/processor/sab.py Normal file
View file

@ -0,0 +1,50 @@
import os
from core import logger
from core.processor import nzb
# Constants
MINIMUM_ARGUMENTS = 8
def process_script():
version = os.environ['SAB_VERSION']
logger.info('Script triggered from SABnzbd {0}.'.format(version))
return nzb.process(
input_directory=os.environ['SAB_COMPLETE_DIR'],
input_name=os.environ['SAB_FINAL_NAME'],
status=int(os.environ['SAB_PP_STATUS']),
client_agent='sabnzbd',
download_id=os.environ['SAB_NZO_ID'],
input_category=os.environ['SAB_CAT'],
failure_link=os.environ['SAB_FAILURE_URL'],
)
def process(args):
"""
SABnzbd arguments:
1. The final directory of the job (full path)
2. The original name of the NZB file
3. Clean version of the job name (no path info and '.nzb' removed)
4. Indexer's report number (if supported)
5. User-defined category
6. Group that the NZB was posted in e.g. alt.binaries.x
7. Status of post processing:
0 = OK
1 = failed verification
2 = failed unpack
3 = 1+2
8. Failure URL
"""
version = '0.7.17+' if len(args) > MINIMUM_ARGUMENTS else ''
logger.info('Script triggered from SABnzbd {}'.format(version))
return nzb.process(
input_directory=args[1],
input_name=args[2],
status=int(args[7]),
input_category=args[5],
client_agent='sabnzbd',
download_id='',
failure_link=''.join(args[8:]),
)

View file

@ -1,5 +1,12 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import platform import platform
import re import re
@ -25,7 +32,7 @@ media_list = [r'\.s\d{2}e\d{2}\.', r'\.1080[pi]\.', r'\.720p\.', r'\.576[pi]', r
r'\.internal\.', r'\bac3\b', r'\.ntsc\.', r'\.pal\.', r'\.secam\.', r'\bdivx\b', r'\bxvid\b'] r'\.internal\.', r'\bac3\b', r'\.ntsc\.', r'\.pal\.', r'\.secam\.', r'\bdivx\b', r'\bxvid\b']
media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE) media_pattern = re.compile('|'.join(media_list), flags=re.IGNORECASE)
garbage_name = re.compile(r'^[a-zA-Z0-9]*$') garbage_name = re.compile(r'^[a-zA-Z0-9]*$')
char_replace = [[r'(\w)1\.(\w)', r'\1i\2'] char_replace = [[r'(\w)1\.(\w)', r'\1i\2'],
] ]
@ -121,7 +128,7 @@ def reverse_filename(filename, dirname, name):
def rename_script(dirname): def rename_script(dirname):
rename_file = '' rename_file = ''
for directory, directories, files in os.walk(dirname): for directory, _, files in os.walk(dirname):
for file in files: for file in files:
if re.search(r'(rename\S*\.(sh|bat)$)', file, re.IGNORECASE): if re.search(r'(rename\S*\.(sh|bat)$)', file, re.IGNORECASE):
rename_file = os.path.join(directory, file) rename_file = os.path.join(directory, file)
@ -148,7 +155,6 @@ def rename_script(dirname):
def par2(dirname): def par2(dirname):
newlist = []
sofar = 0 sofar = 0
parfile = '' parfile = ''
objects = [] objects = []
@ -172,7 +178,7 @@ def par2(dirname):
cmd = '' cmd = ''
for item in command: for item in command:
cmd = '{cmd} {item}'.format(cmd=cmd, item=item) cmd = '{cmd} {item}'.format(cmd=cmd, item=item)
logger.debug('calling command:{0}'.format(cmd), 'PAR2') logger.debug('calling command:{0}'.format(cmd), 'PAR2')
try: try:
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket)
proc.communicate() proc.communicate()

View file

@ -1,8 +1,17 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import errno import errno
import json import json
import sys
import os import os
import time
import platform import platform
import re import re
import shutil import shutil
@ -18,11 +27,11 @@ from core.utils import make_dir
__author__ = 'Justin' __author__ = 'Justin'
def is_video_good(videofile, status): def is_video_good(videofile, status, require_lan=None):
file_name_ext = os.path.basename(videofile) file_name_ext = os.path.basename(videofile)
file_name, file_ext = os.path.splitext(file_name_ext) file_name, file_ext = os.path.splitext(file_name_ext)
disable = False disable = False
if file_ext not in core.MEDIACONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED): if file_ext not in core.MEDIA_CONTAINER or not core.FFPROBE or not core.CHECK_MEDIA or file_ext in ['.iso'] or (status > 0 and core.NOEXTRACTFAILED):
disable = True disable = True
else: else:
test_details, res = get_video_details(core.TEST_FILE) test_details, res = get_video_details(core.TEST_FILE)
@ -54,7 +63,11 @@ def is_video_good(videofile, status):
if video_details.get('streams'): if video_details.get('streams'):
video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video'] video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video']
audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio'] audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio']
if len(video_streams) > 0 and len(audio_streams) > 0: if require_lan:
valid_audio = [item for item in audio_streams if 'tags' in item and 'language' in item['tags'] and item['tags']['language'] in require_lan ]
else:
valid_audio = audio_streams
if len(video_streams) > 0 and len(valid_audio) > 0:
logger.info('SUCCESS: [{0}] has no corruption.'.format(file_name_ext), 'TRANSCODER') logger.info('SUCCESS: [{0}] has no corruption.'.format(file_name_ext), 'TRANSCODER')
return True return True
else: else:
@ -66,7 +79,10 @@ def is_video_good(videofile, status):
def zip_out(file, img, bitbucket): def zip_out(file, img, bitbucket):
procin = None procin = None
cmd = [core.SEVENZIP, '-so', 'e', img, file] if os.path.isfile(file):
cmd = ['cat', file]
else:
cmd = [core.SEVENZIP, '-so', 'e', img, file]
try: try:
procin = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) procin = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
except Exception: except Exception:
@ -80,10 +96,7 @@ def get_video_details(videofile, img=None, bitbucket=None):
file = videofile file = videofile
if not core.FFPROBE: if not core.FFPROBE:
return video_details, result return video_details, result
if 'avprobe' in core.FFPROBE: print_format = '-of' if 'avprobe' in core.FFPROBE else '-print_format'
print_format = '-of'
else:
print_format = '-print_format'
try: try:
if img: if img:
videofile = '-' videofile = '-'
@ -98,26 +111,40 @@ def get_video_details(videofile, img=None, bitbucket=None):
proc = subprocess.Popen(command, stdout=subprocess.PIPE) proc = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = proc.communicate() out, err = proc.communicate()
result = proc.returncode result = proc.returncode
video_details = json.loads(out) video_details = json.loads(out.decode())
except Exception: except Exception:
pass try: # try this again without -show error in case of ffmpeg limitation
if not video_details:
try:
command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile] command = [core.FFPROBE, '-v', 'quiet', print_format, 'json', '-show_format', '-show_streams', videofile]
print_cmd(command)
if img: if img:
procin = zip_out(file, img) procin = zip_out(file, img, bitbucket)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=procin.stdout) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=procin.stdout)
procin.stdout.close() procin.stdout.close()
else: else:
proc = subprocess.Popen(command, stdout=subprocess.PIPE) proc = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = proc.communicate() out, err = proc.communicate()
result = proc.returncode result = proc.returncode
video_details = json.loads(out) video_details = json.loads(out.decode())
except Exception: except Exception:
logger.error('Checking [{0}] has failed'.format(file), 'TRANSCODER') logger.error('Checking [{0}] has failed'.format(file), 'TRANSCODER')
return video_details, result return video_details, result
def check_vid_file(video_details, result):
if result != 0:
return False
if video_details.get('error'):
return False
if not video_details.get('streams'):
return False
video_streams = [item for item in video_details['streams'] if item['codec_type'] == 'video']
audio_streams = [item for item in video_details['streams'] if item['codec_type'] == 'audio']
if len(video_streams) > 0 and len(audio_streams) > 0:
return True
else:
return False
def build_commands(file, new_dir, movie_name, bitbucket): def build_commands(file, new_dir, movie_name, bitbucket):
if isinstance(file, string_types): if isinstance(file, string_types):
input_file = file input_file = file
@ -135,9 +162,18 @@ def build_commands(file, new_dir, movie_name, bitbucket):
name = re.sub('([ ._=:-]+[cC][dD][0-9])', '', name) name = re.sub('([ ._=:-]+[cC][dD][0-9])', '', name)
if ext == core.VEXTENSION and new_dir == directory: # we need to change the name to prevent overwriting itself. if ext == core.VEXTENSION and new_dir == directory: # we need to change the name to prevent overwriting itself.
core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext' core.VEXTENSION = '-transcoded{ext}'.format(ext=core.VEXTENSION) # adds '-transcoded.ext'
new_file = file
else: else:
img, data = next(iteritems(file)) img, data = next(iteritems(file))
name = data['name'] name = data['name']
new_file = []
rem_vid = []
for vid in data['files']:
video_details, result = get_video_details(vid, img, bitbucket)
if not check_vid_file(video_details, result): #lets not transcode menu or other clips that don't have audio and video.
rem_vid.append(vid)
data['files'] = [ f for f in data['files'] if f not in rem_vid ]
new_file = {img: {'name': data['name'], 'files': data['files']}}
video_details, result = get_video_details(data['files'][0], img, bitbucket) video_details, result = get_video_details(data['files'][0], img, bitbucket)
input_file = '-' input_file = '-'
file = '-' file = '-'
@ -443,7 +479,7 @@ def build_commands(file, new_dir, movie_name, bitbucket):
burnt = 1 burnt = 1
if not core.ALLOWSUBS: if not core.ALLOWSUBS:
break break
if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and core.SCODEC == 'mov_text': # We can't convert these. if sub['codec_name'] in ['dvd_subtitle', 'dvb_subtitle', 'VobSub'] and core.SCODEC == 'mov_text': # We can't convert these.
continue continue
map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])])
s_mapped.extend([sub['index']]) s_mapped.extend([sub['index']])
@ -454,13 +490,15 @@ def build_commands(file, new_dir, movie_name, bitbucket):
break break
if sub['index'] in s_mapped: if sub['index'] in s_mapped:
continue continue
if sub['codec_name'] in ['dvd_subtitle', 'VobSub'] and core.SCODEC == 'mov_text': # We can't convert these. if sub['codec_name'] in ['dvd_subtitle', 'dvb_subtitle', 'VobSub'] and core.SCODEC == 'mov_text': # We can't convert these.
continue continue
map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])]) map_cmd.extend(['-map', '0:{index}'.format(index=sub['index'])])
s_mapped.extend([sub['index']]) s_mapped.extend([sub['index']])
if core.OUTPUTFASTSTART: if core.OUTPUTFASTSTART:
other_cmd.extend(['-movflags', '+faststart']) other_cmd.extend(['-movflags', '+faststart'])
if core.OTHEROPTS:
other_cmd.extend(core.OTHEROPTS)
command = [core.FFMPEG, '-loglevel', 'warning'] command = [core.FFMPEG, '-loglevel', 'warning']
@ -478,7 +516,7 @@ def build_commands(file, new_dir, movie_name, bitbucket):
continue continue
if core.SCODEC == 'mov_text': if core.SCODEC == 'mov_text':
subcode = [stream['codec_name'] for stream in sub_details['streams']] subcode = [stream['codec_name'] for stream in sub_details['streams']]
if set(subcode).intersection(['dvd_subtitle', 'VobSub']): # We can't convert these. if set(subcode).intersection(['dvd_subtitle', 'dvb_subtitle', 'VobSub']): # We can't convert these.
continue continue
command.extend(['-i', subfile]) command.extend(['-i', subfile])
lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split('-')[0] lan = os.path.splitext(os.path.splitext(subfile)[0])[1][1:].split('-')[0]
@ -514,7 +552,7 @@ def build_commands(file, new_dir, movie_name, bitbucket):
command.append(newfile_path) command.append(newfile_path)
if platform.system() != 'Windows': if platform.system() != 'Windows':
command = core.NICENESS + command command = core.NICENESS + command
return command return command, new_file
def get_subs(file): def get_subs(file):
@ -522,7 +560,7 @@ def get_subs(file):
sub_ext = ['.srt', '.sub', '.idx'] sub_ext = ['.srt', '.sub', '.idx']
name = os.path.splitext(os.path.split(file)[1])[0] name = os.path.splitext(os.path.split(file)[1])[0]
path = os.path.split(file)[0] path = os.path.split(file)[0]
for directory, directories, filenames in os.walk(path): for directory, _, filenames in os.walk(path):
for filename in filenames: for filename in filenames:
filepaths.extend([os.path.join(directory, filename)]) filepaths.extend([os.path.join(directory, filename)])
subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext and name in item] subfiles = [item for item in filepaths if os.path.splitext(item)[1] in sub_ext and name in item]
@ -573,7 +611,7 @@ def extract_subs(file, newfile_path, bitbucket):
result = 1 # set result to failed in case call fails. result = 1 # set result to failed in case call fails.
try: try:
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket)
proc.communicate() out, err = proc.communicate()
result = proc.returncode result = proc.returncode
except Exception: except Exception:
logger.error('Extracting subtitle has failed') logger.error('Extracting subtitle has failed')
@ -593,6 +631,7 @@ def process_list(it, new_dir, bitbucket):
new_list = [] new_list = []
combine = [] combine = []
vts_path = None vts_path = None
mts_path = None
success = True success = True
for item in it: for item in it:
ext = os.path.splitext(item)[1].lower() ext = os.path.splitext(item)[1].lower()
@ -608,6 +647,14 @@ def process_list(it, new_dir, bitbucket):
except Exception: except Exception:
vts_path = os.path.split(item)[0] vts_path = os.path.split(item)[0]
rem_list.append(item) rem_list.append(item)
elif re.match('.+BDMV[/\\]SOURCE[/\\][0-9]+[0-9].[Mm][Tt][Ss]', item) and '.mts' not in core.IGNOREEXTENSIONS:
logger.debug('Found MTS image file: {0}'.format(item), 'TRANSCODER')
if not mts_path:
try:
mts_path = re.match('(.+BDMV[/\\]SOURCE)', item).groups()[0]
except Exception:
mts_path = os.path.split(item)[0]
rem_list.append(item)
elif re.match('.+VIDEO_TS.', item) or re.match('.+VTS_[0-9][0-9]_[0-9].', item): elif re.match('.+VIDEO_TS.', item) or re.match('.+VTS_[0-9][0-9]_[0-9].', item):
rem_list.append(item) rem_list.append(item)
elif core.CONCAT and re.match('.+[cC][dD][0-9].', item): elif core.CONCAT and re.match('.+[cC][dD][0-9].', item):
@ -617,6 +664,8 @@ def process_list(it, new_dir, bitbucket):
continue continue
if vts_path: if vts_path:
new_list.extend(combine_vts(vts_path)) new_list.extend(combine_vts(vts_path))
if mts_path:
new_list.extend(combine_mts(mts_path))
if combine: if combine:
new_list.extend(combine_cd(combine)) new_list.extend(combine_cd(combine))
for file in new_list: for file in new_list:
@ -635,48 +684,118 @@ def process_list(it, new_dir, bitbucket):
return it, rem_list, new_list, success return it, rem_list, new_list, success
def mount_iso(item, new_dir, bitbucket): #Currently only supports Linux Mount when permissions allow.
if platform.system() == 'Windows':
logger.error('No mounting options available under Windows for image file {0}'.format(item), 'TRANSCODER')
return []
mount_point = os.path.join(os.path.dirname(os.path.abspath(item)),'temp')
make_dir(mount_point)
cmd = ['mount', '-o', 'loop', item, mount_point]
print_cmd(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
out, err = proc.communicate()
core.MOUNTED = mount_point # Allows us to verify this has been done and then cleanup.
for root, dirs, files in os.walk(mount_point):
for file in files:
full_path = os.path.join(root, file)
if re.match('.+VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', full_path) and '.vob' not in core.IGNOREEXTENSIONS:
logger.debug('Found VIDEO_TS image file: {0}'.format(full_path), 'TRANSCODER')
try:
vts_path = re.match('(.+VIDEO_TS)', full_path).groups()[0]
except Exception:
vts_path = os.path.split(full_path)[0]
return combine_vts(vts_path)
elif re.match('.+BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]', full_path) and '.mts' not in core.IGNOREEXTENSIONS:
logger.debug('Found MTS image file: {0}'.format(full_path), 'TRANSCODER')
try:
mts_path = re.match('(.+BDMV[/\\]STREAM)', full_path).groups()[0]
except Exception:
mts_path = os.path.split(full_path)[0]
return combine_mts(mts_path)
logger.error('No VIDEO_TS or BDMV/SOURCE folder found in image file {0}'.format(mount_point), 'TRANSCODER')
return ['failure'] # If we got here, nothing matched our criteria
def rip_iso(item, new_dir, bitbucket): def rip_iso(item, new_dir, bitbucket):
new_files = [] new_files = []
failure_dir = 'failure' failure_dir = 'failure'
# Mount the ISO in your OS and call combineVTS. # Mount the ISO in your OS and call combineVTS.
if not core.SEVENZIP: if not core.SEVENZIP:
logger.error('No 7zip installed. Can\'t extract image file {0}'.format(item), 'TRANSCODER') logger.debug('No 7zip installed. Attempting to mount image file {0}'.format(item), 'TRANSCODER')
new_files = [failure_dir] try:
new_files = mount_iso(item, new_dir, bitbucket) # Currently only works for Linux.
except Exception:
logger.error('Failed to mount and extract from image file {0}'.format(item), 'TRANSCODER')
new_files = [failure_dir]
return new_files return new_files
cmd = [core.SEVENZIP, 'l', item] cmd = [core.SEVENZIP, 'l', item]
try: try:
logger.debug('Attempting to extract .vob from image file {0}'.format(item), 'TRANSCODER') logger.debug('Attempting to extract .vob or .mts from image file {0}'.format(item), 'TRANSCODER')
print_cmd(cmd) print_cmd(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
out, err = proc.communicate() out, err = proc.communicate()
file_list = [re.match(r'.+(VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])', line).groups()[0] for line in file_match_gen = (
out.splitlines() if re.match(r'.+VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb]', line)] re.match(r'.+(VIDEO_TS[/\\]VTS_[0-9][0-9]_[0-9].[Vv][Oo][Bb])', line)
for line in out.decode().splitlines()
)
file_list = [
file_match.groups()[0]
for file_match in file_match_gen
if file_match
]
combined = [] combined = []
for n in range(99): if file_list: # handle DVD
concat = [] for n in range(99):
m = 1 concat = []
while True: m = 1
vts_name = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m) while True:
if vts_name in file_list: vts_name = 'VIDEO_TS{0}VTS_{1:02d}_{2:d}.VOB'.format(os.sep, n + 1, m)
concat.append(vts_name) if vts_name in file_list:
m += 1 concat.append(vts_name)
else: m += 1
else:
break
if not concat:
break break
if not concat: if core.CONCAT:
break combined.extend(concat)
if core.CONCAT: continue
combined.extend(concat) name = '{name}.cd{x}'.format(
continue name=os.path.splitext(os.path.split(item)[1])[0], x=n + 1
name = '{name}.cd{x}'.format( )
name=os.path.splitext(os.path.split(item)[1])[0], x=n + 1 new_files.append({item: {'name': name, 'files': concat}})
else: #check BlueRay for BDMV/STREAM/XXXX.MTS
mts_list_gen = (
re.match(r'.+(BDMV[/\\]STREAM[/\\][0-9]+[0-9].[Mm]).', line)
for line in out.decode().splitlines()
) )
new_files.append({item: {'name': name, 'files': concat}}) mts_list = [
if core.CONCAT: file_match.groups()[0]
for file_match in mts_list_gen
if file_match
]
if sys.version_info[0] == 2: # Python2 sorting
mts_list.sort(key=lambda f: int(filter(str.isdigit, f))) # Sort all .mts files in numerical order
else: # Python3 sorting
mts_list.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
n = 0
for mts_name in mts_list:
concat = []
n += 1
concat.append(mts_name)
if core.CONCAT:
combined.extend(concat)
continue
name = '{name}.cd{x}'.format(
name=os.path.splitext(os.path.split(item)[1])[0], x=n
)
new_files.append({item: {'name': name, 'files': concat}})
if core.CONCAT and combined:
name = os.path.splitext(os.path.split(item)[1])[0] name = os.path.splitext(os.path.split(item)[1])[0]
new_files.append({item: {'name': name, 'files': combined}}) new_files.append({item: {'name': name, 'files': combined}})
if not new_files: if not new_files:
logger.error('No VIDEO_TS folder found in image file {0}'.format(item), 'TRANSCODER') logger.error('No VIDEO_TS or BDMV/SOURCE folder found in image file. Attempting to mount and scan {0}'.format(item), 'TRANSCODER')
new_files = [failure_dir] new_files = mount_iso(item, new_dir, bitbucket)
except Exception: except Exception:
logger.error('Failed to extract from image file {0}'.format(item), 'TRANSCODER') logger.error('Failed to extract from image file {0}'.format(item), 'TRANSCODER')
new_files = [failure_dir] new_files = [failure_dir]
@ -685,31 +804,69 @@ def rip_iso(item, new_dir, bitbucket):
def combine_vts(vts_path): def combine_vts(vts_path):
new_files = [] new_files = []
combined = '' combined = []
name = re.match(r'(.+)[/\\]VIDEO_TS', vts_path).groups()[0]
if os.path.basename(name) == 'temp':
name = os.path.basename(os.path.dirname(name))
else:
name = os.path.basename(name)
for n in range(99): for n in range(99):
concat = '' concat = []
m = 1 m = 1
while True: while True:
vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m) vts_name = 'VTS_{0:02d}_{1:d}.VOB'.format(n + 1, m)
if os.path.isfile(os.path.join(vts_path, vts_name)): if os.path.isfile(os.path.join(vts_path, vts_name)):
concat += '{file}|'.format(file=os.path.join(vts_path, vts_name)) concat.append(os.path.join(vts_path, vts_name))
m += 1 m += 1
else: else:
break break
if not concat: if not concat:
break break
if core.CONCAT: if core.CONCAT:
combined += '{files}|'.format(files=concat) combined.extend(concat)
continue continue
new_files.append('concat:{0}'.format(concat[:-1])) name = '{name}.cd{x}'.format(
name=name, x=n + 1
)
new_files.append({vts_path: {'name': name, 'files': concat}})
if core.CONCAT: if core.CONCAT:
new_files.append('concat:{0}'.format(combined[:-1])) new_files.append({vts_path: {'name': name, 'files': combined}})
return new_files
def combine_mts(mts_path):
new_files = []
combined = []
name = re.match(r'(.+)[/\\]BDMV[/\\]STREAM', mts_path).groups()[0]
if os.path.basename(name) == 'temp':
name = os.path.basename(os.path.dirname(name))
else:
name = os.path.basename(name)
n = 0
mts_list = [f for f in os.listdir(mts_path) if os.path.isfile(os.path.join(mts_path, f))]
if sys.version_info[0] == 2: # Python2 sorting
mts_list.sort(key=lambda f: int(filter(str.isdigit, f)))
else: # Python3 sorting
mts_list.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
for mts_name in mts_list: ### need to sort all files [1 - 998].mts in order
concat = []
concat.append(os.path.join(mts_path, mts_name))
if core.CONCAT:
combined.extend(concat)
continue
name = '{name}.cd{x}'.format(
name=name, x=n + 1
)
new_files.append({mts_path: {'name': name, 'files': concat}})
n += 1
if core.CONCAT:
new_files.append({mts_path: {'name': name, 'files': combined}})
return new_files return new_files
def combine_cd(combine): def combine_cd(combine):
new_files = [] new_files = []
for item in set([re.match('(.+)[cC][dD][0-9].', item).groups()[0] for item in combine]): for item in {re.match('(.+)[cC][dD][0-9].', item).groups()[0] for item in combine}:
concat = '' concat = ''
for n in range(99): for n in range(99):
files = [file for file in combine if files = [file for file in combine if
@ -757,7 +914,7 @@ def transcode_directory(dir_name):
for file in file_list: for file in file_list:
if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS: if isinstance(file, string_types) and os.path.splitext(file)[1] in core.IGNOREEXTENSIONS:
continue continue
command = build_commands(file, new_dir, movie_name, bitbucket) command, file = build_commands(file, new_dir, movie_name, bitbucket)
newfile_path = command[-1] newfile_path = command[-1]
# transcoding files may remove the original file, so make sure to extract subtitles first # transcoding files may remove the original file, so make sure to extract subtitles first
@ -777,16 +934,19 @@ def transcode_directory(dir_name):
result = 1 # set result to failed in case call fails. result = 1 # set result to failed in case call fails.
try: try:
if isinstance(file, string_types): if isinstance(file, string_types):
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket) proc = subprocess.Popen(command, stdout=bitbucket, stderr=subprocess.PIPE)
else: else:
img, data = next(iteritems(file)) img, data = next(iteritems(file))
proc = subprocess.Popen(command, stdout=bitbucket, stderr=bitbucket, stdin=subprocess.PIPE) proc = subprocess.Popen(command, stdout=bitbucket, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
for vob in data['files']: for vob in data['files']:
procin = zip_out(vob, img, bitbucket) procin = zip_out(vob, img, bitbucket)
if procin: if procin:
logger.debug('Feeding in file: {0} to Transcoder'.format(vob))
shutil.copyfileobj(procin.stdout, proc.stdin) shutil.copyfileobj(procin.stdout, proc.stdin)
procin.stdout.close() procin.stdout.close()
proc.communicate() out, err = proc.communicate()
if err:
logger.error('Transcoder returned:{0} has failed'.format(err))
result = proc.returncode result = proc.returncode
except Exception: except Exception:
logger.error('Transcoding of video {0} has failed'.format(newfile_path)) logger.error('Transcoding of video {0} has failed'.format(newfile_path))
@ -815,6 +975,15 @@ def transcode_directory(dir_name):
logger.error('Transcoding of video to {0} failed with result {1}'.format(newfile_path, result)) logger.error('Transcoding of video to {0} failed with result {1}'.format(newfile_path, result))
# this will be 0 (successful) it all are successful, else will return a positive integer for failure. # this will be 0 (successful) it all are successful, else will return a positive integer for failure.
final_result = final_result + result final_result = final_result + result
if core.MOUNTED: # In case we mounted an .iso file, unmount here.
time.sleep(5) # play it safe and avoid failing to unmount.
cmd = ['umount', '-l', core.MOUNTED]
print_cmd(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=bitbucket)
out, err = proc.communicate()
time.sleep(5)
os.rmdir(core.MOUNTED)
core.MOUNTED = None
if final_result == 0 and not core.DUPLICATE: if final_result == 0 and not core.DUPLICATE:
for file in rem_list: for file in rem_list:
try: try:
@ -824,7 +993,7 @@ def transcode_directory(dir_name):
if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it. if not os.listdir(text_type(new_dir)): # this is an empty directory and we didn't transcode into it.
os.rmdir(new_dir) os.rmdir(new_dir)
new_dir = dir_name new_dir = dir_name
if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB if not core.PROCESSOUTPUT and core.DUPLICATE: # We postprocess the original files to CP/SB
new_dir = dir_name new_dir = dir_name
bitbucket.close() bitbucket.close()
return final_result, new_dir return final_result, new_dir

View file

@ -1,38 +1,59 @@
# coding=utf-8 # coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
from subprocess import Popen from subprocess import Popen
import core import core
from core import logger, transcoder from core import logger, transcoder
from core.utils import import_subs, list_media_files, remove_dir from core.plugins.subtitles import import_subs
from core.utils import list_media_files, remove_dir
from core.auto_process.common import (
ProcessResult,
)
def external_script(output_destination, torrent_name, torrent_label, settings): def external_script(output_destination, torrent_name, torrent_label, settings):
final_result = 0 # start at 0. final_result = 0 # start at 0.
num_files = 0 num_files = 0
core.USER_SCRIPT_MEDIAEXTENSIONS = settings.get('user_script_mediaExtensions', '')
try: try:
core.USER_SCRIPT_MEDIAEXTENSIONS = settings['user_script_mediaExtensions'].lower()
if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str): if isinstance(core.USER_SCRIPT_MEDIAEXTENSIONS, str):
core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.split(',') core.USER_SCRIPT_MEDIAEXTENSIONS = core.USER_SCRIPT_MEDIAEXTENSIONS.lower().split(',')
except Exception: except Exception:
logger.error('user_script_mediaExtensions could not be set', 'USERSCRIPT')
core.USER_SCRIPT_MEDIAEXTENSIONS = [] core.USER_SCRIPT_MEDIAEXTENSIONS = []
core.USER_SCRIPT = settings.get('user_script_path') core.USER_SCRIPT = settings.get('user_script_path', '')
if not core.USER_SCRIPT or core.USER_SCRIPT == 'None': # do nothing and return success. if not core.USER_SCRIPT or core.USER_SCRIPT == 'None':
return [0, ''] # do nothing and return success. This allows the user an option to Link files only and not run a script.
return ProcessResult(
status_code=0,
message='No user script defined',
)
core.USER_SCRIPT_PARAM = settings.get('user_script_param', '')
try: try:
core.USER_SCRIPT_PARAM = settings['user_script_param']
if isinstance(core.USER_SCRIPT_PARAM, str): if isinstance(core.USER_SCRIPT_PARAM, str):
core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',') core.USER_SCRIPT_PARAM = core.USER_SCRIPT_PARAM.split(',')
except Exception: except Exception:
logger.error('user_script_params could not be set', 'USERSCRIPT')
core.USER_SCRIPT_PARAM = [] core.USER_SCRIPT_PARAM = []
core.USER_SCRIPT_SUCCESSCODES = settings.get('user_script_successCodes', 0)
try: try:
core.USER_SCRIPT_SUCCESSCODES = settings['user_script_successCodes']
if isinstance(core.USER_SCRIPT_SUCCESSCODES, str): if isinstance(core.USER_SCRIPT_SUCCESSCODES, str):
core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',') core.USER_SCRIPT_SUCCESSCODES = core.USER_SCRIPT_SUCCESSCODES.split(',')
except Exception: except Exception:
logger.error('user_script_successCodes could not be set', 'USERSCRIPT')
core.USER_SCRIPT_SUCCESSCODES = 0 core.USER_SCRIPT_SUCCESSCODES = 0
core.USER_SCRIPT_CLEAN = int(settings.get('user_script_clean', 1)) core.USER_SCRIPT_CLEAN = int(settings.get('user_script_clean', 1))
@ -46,11 +67,12 @@ def external_script(output_destination, torrent_name, torrent_label, settings):
logger.info('Corrupt video file found {0}. Deleting.'.format(video), 'USERSCRIPT') logger.info('Corrupt video file found {0}. Deleting.'.format(video), 'USERSCRIPT')
os.unlink(video) os.unlink(video)
for dirpath, dirnames, filenames in os.walk(output_destination): for dirpath, _, filenames in os.walk(output_destination):
for file in filenames: for file in filenames:
file_path = core.os.path.join(dirpath, file) file_path = core.os.path.join(dirpath, file)
file_name, file_extension = os.path.splitext(file) file_name, file_extension = os.path.splitext(file)
logger.debug('Checking file {0} to see if this should be processed.'.format(file), 'USERSCRIPT')
if file_extension in core.USER_SCRIPT_MEDIAEXTENSIONS or 'all' in core.USER_SCRIPT_MEDIAEXTENSIONS: if file_extension in core.USER_SCRIPT_MEDIAEXTENSIONS or 'all' in core.USER_SCRIPT_MEDIAEXTENSIONS:
num_files += 1 num_files += 1
@ -101,7 +123,7 @@ def external_script(output_destination, torrent_name, torrent_label, settings):
final_result += result final_result += result
num_files_new = 0 num_files_new = 0
for dirpath, dirnames, filenames in os.walk(output_destination): for _, _, filenames in os.walk(output_destination):
for file in filenames: for file in filenames:
file_name, file_extension = os.path.splitext(file) file_name, file_extension = os.path.splitext(file)
@ -114,4 +136,7 @@ def external_script(output_destination, torrent_name, torrent_label, settings):
elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0: elif core.USER_SCRIPT_CLEAN == int(1) and num_files_new != 0:
logger.info('{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.'.format( logger.info('{0} files were processed, but {1} still remain. outputDirectory will not be cleaned.'.format(
num_files, num_files_new)) num_files, num_files_new))
return [final_result, ''] return ProcessResult(
status_code=final_result,
message='User Script Completed',
)

File diff suppressed because it is too large Load diff

54
core/utils/__init__.py Normal file
View file

@ -0,0 +1,54 @@
# coding=utf-8
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import requests
from core.utils import shutil_custom
from core.utils.common import clean_dir, flatten, get_dirs, process_dir
from core.utils.download_info import get_download_info, update_download_info_status
from core.utils.encoding import char_replace, convert_to_ascii
from core.utils.files import (
backup_versioned_file,
extract_files,
is_archive_file,
is_media_file,
is_min_size,
list_media_files,
move_file,
)
from core.utils.identification import category_search, find_imdbid
from core.utils.links import copy_link, replace_links
from core.utils.naming import clean_file_name, is_sample, sanitize_name
from core.utils.network import find_download, server_responding, test_connection, wake_on_lan, wake_up
from core.utils.parsers import (
parse_args,
parse_deluge,
parse_other,
parse_qbittorrent,
parse_rtorrent,
parse_transmission,
parse_utorrent,
parse_vuze,
)
from core.utils.paths import (
clean_directory,
flatten_dir,
get_dir_size,
make_dir,
onerror,
rchmod,
remote_dir,
remove_dir,
remove_empty_folders,
remove_read_only,
)
from core.utils.processes import RunningProcess, restart
requests.packages.urllib3.disable_warnings()
shutil_custom.monkey_patch()

120
core/utils/common.py Normal file
View file

@ -0,0 +1,120 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os.path
from six import text_type
import core
from core import logger
from core.utils.files import list_media_files, move_file
from core.utils.paths import clean_directory, flatten_dir
def flatten(output_destination):
return flatten_dir(output_destination, list_media_files(output_destination))
def clean_dir(path, section, subsection):
cfg = dict(core.CFG[section][subsection])
min_size = int(cfg.get('minSize', 0))
delete_ignored = int(cfg.get('delete_ignored', 0))
try:
files = list_media_files(path, min_size=min_size, delete_ignored=delete_ignored)
except Exception:
files = []
return clean_directory(path, files)
def process_dir(path, link):
folders = []
logger.info('Searching {0} for mediafiles to post-process ...'.format(path))
dir_contents = os.listdir(text_type(path))
# search for single files and move them into their own folder for post-processing
# Generate list of sync files
sync_files = (
item for item in dir_contents
if os.path.splitext(item)[1] in ['.!sync', '.bts']
)
# Generate a list of file paths
filepaths = (
os.path.join(path, item) for item in dir_contents
if item not in ['Thumbs.db', 'thumbs.db']
)
# Generate a list of media files
mediafiles = (
item for item in filepaths
if os.path.isfile(item)
)
if any(sync_files):
logger.info('')
else:
for mediafile in mediafiles:
try:
move_file(mediafile, path, link)
except Exception as e:
logger.error('Failed to move {0} to its own directory: {1}'.format(os.path.split(mediafile)[1], e))
# removeEmptyFolders(path, removeRoot=False)
# Generate all path contents
path_contents = (
os.path.join(path, item)
for item in os.listdir(text_type(path))
)
# Generate all directories from path contents
directories = (
path for path in path_contents
if os.path.isdir(path)
)
for directory in directories:
dir_contents = os.listdir(directory)
sync_files = (
item for item in dir_contents
if os.path.splitext(item)[1] in ['.!sync', '.bts']
)
if not any(dir_contents) or any(sync_files):
continue
folders.append(directory)
return folders
def get_dirs(section, subsection, link='hard'):
to_return = []
watch_directory = core.CFG[section][subsection]['watch_dir']
directory = os.path.join(watch_directory, subsection)
if not os.path.exists(directory):
directory = watch_directory
try:
to_return.extend(process_dir(directory, link))
except Exception as e:
logger.error('Failed to add directories from {0} for post-processing: {1}'.format(watch_directory, e))
if core.USE_LINK == 'move':
try:
output_directory = os.path.join(core.OUTPUT_DIRECTORY, subsection)
if os.path.exists(output_directory):
to_return.extend(process_dir(output_directory, link))
except Exception as e:
logger.error('Failed to add directories from {0} for post-processing: {1}'.format(core.OUTPUT_DIRECTORY, e))
if not to_return:
logger.debug('No directories identified in {0}:{1} for post-processing'.format(section, subsection))
return list(set(to_return))

View file

@ -0,0 +1,30 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import datetime
from six import text_type
from core import logger, main_db
database = main_db.DBConnection()
def update_download_info_status(input_name, status):
msg = 'Updating DB download status of {0} to {1}'
action = 'UPDATE downloads SET status=?, last_update=? WHERE input_name=?'
args = [status, datetime.date.today().toordinal(), text_type(input_name)]
logger.db(msg.format(input_name, status))
database.action(action, args)
def get_download_info(input_name, status):
msg = 'Getting download info for {0} from the DB'
action = 'SELECT * FROM downloads WHERE input_name=? AND status=?'
args = [text_type(input_name), status]
logger.db(msg.format(input_name))
return database.select(action, args)

129
core/utils/encoding.py Normal file
View file

@ -0,0 +1,129 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
from six import text_type
from six import PY2
import core
from core import logger
if not PY2:
from builtins import bytes
def char_replace(name_in):
# Special character hex range:
# CP850: 0x80-0xA5 (fortunately not used in ISO-8859-15)
# UTF-8: 1st hex code 0xC2-0xC3 followed by a 2nd hex code 0xA1-0xFF
# ISO-8859-15: 0xA6-0xFF
# The function will detect if Name contains a special character
# If there is special character, detects if it is a UTF-8, CP850 or ISO-8859-15 encoding
encoded = False
encoding = None
if isinstance(name_in, text_type):
return encoded, name_in
if PY2:
name = name_in
for Idx in range(len(name)):
# print('Trying to intuit the encoding')
# /!\ detection is done 2char by 2char for UTF-8 special character
if (len(name) != 1) & (Idx < (len(name) - 1)):
# Detect UTF-8
if ((name[Idx] == '\xC2') | (name[Idx] == '\xC3')) & (
(name[Idx + 1] >= '\xA0') & (name[Idx + 1] <= '\xFF')):
encoding = 'utf-8'
break
# Detect CP850
elif (name[Idx] >= '\x80') & (name[Idx] <= '\xA5'):
encoding = 'cp850'
break
# Detect ISO-8859-15
elif (name[Idx] >= '\xA6') & (name[Idx] <= '\xFF'):
encoding = 'iso-8859-15'
break
else:
# Detect CP850
if (name[Idx] >= '\x80') & (name[Idx] <= '\xA5'):
encoding = 'cp850'
break
# Detect ISO-8859-15
elif (name[Idx] >= '\xA6') & (name[Idx] <= '\xFF'):
encoding = 'iso-8859-15'
break
else:
name = bytes(name_in)
for Idx in range(len(name)):
# print('Trying to intuit the encoding')
# /!\ detection is done 2char by 2char for UTF-8 special character
if (len(name) != 1) & (Idx < (len(name) - 1)):
# Detect UTF-8
if ((name[Idx] == 0xC2) | (name[Idx] == 0xC3)) & (
(name[Idx + 1] >= 0xA0) & (name[Idx + 1] <= 0xFF)):
encoding = 'utf-8'
break
# Detect CP850
elif (name[Idx] >= 0x80) & (name[Idx] <= 0xA5):
encoding = 'cp850'
break
# Detect ISO-8859-15
elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF):
encoding = 'iso-8859-15'
break
else:
# Detect CP850
if (name[Idx] >= 0x80) & (name[Idx] <= 0xA5):
encoding = 'cp850'
break
# Detect ISO-8859-15
elif (name[Idx] >= 0xA6) & (name[Idx] <= 0xFF):
encoding = 'iso-8859-15'
break
if encoding:
encoded = True
name = name.decode(encoding)
elif not PY2:
name = name.decode()
return encoded, name
def convert_to_ascii(input_name, dir_name):
ascii_convert = int(core.CFG['ASCII']['convert'])
if ascii_convert == 0 or os.name == 'nt': # just return if we don't want to convert or on windows os and '\' is replaced!.
return input_name, dir_name
encoded, input_name = char_replace(input_name)
directory, base = os.path.split(dir_name)
if not base: # ended with '/'
directory, base = os.path.split(directory)
encoded, base2 = char_replace(base)
if encoded:
dir_name = os.path.join(directory, base2)
logger.info('Renaming directory to: {0}.'.format(base2), 'ENCODER')
os.rename(os.path.join(directory, base), dir_name)
if 'NZBOP_SCRIPTDIR' in os.environ:
print('[NZB] DIRECTORY={0}'.format(dir_name))
for dirname, dirnames, _ in os.walk(dir_name, topdown=False):
for subdirname in dirnames:
encoded, subdirname2 = char_replace(subdirname)
if encoded:
logger.info('Renaming directory to: {0}.'.format(subdirname2), 'ENCODER')
os.rename(os.path.join(dirname, subdirname), os.path.join(dirname, subdirname2))
for dirname, _, filenames in os.walk(dir_name):
for filename in filenames:
encoded, filename2 = char_replace(filename)
if encoded:
logger.info('Renaming file to: {0}.'.format(filename2), 'ENCODER')
os.rename(os.path.join(dirname, filename), os.path.join(dirname, filename2))
return input_name, dir_name

238
core/utils/files.py Normal file
View file

@ -0,0 +1,238 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import re
import shutil
import stat
import time
import mediafile as mediafiletool
import guessit
from six import text_type
import core
from core import extractor, logger
from core.utils.links import copy_link
from core.utils.naming import is_sample, sanitize_name
from core.utils.paths import get_dir_size, make_dir
def move_file(mediafile, path, link):
logger.debug('Found file {0} in root directory {1}.'.format(os.path.split(mediafile)[1], path))
new_path = None
file_ext = os.path.splitext(mediafile)[1]
try:
if file_ext in core.AUDIO_CONTAINER:
f = mediafiletool.MediaFile(mediafile)
# get artist and album info
artist = f.artist
album = f.album
# create new path
new_path = os.path.join(path, '{0} - {1}'.format(sanitize_name(artist), sanitize_name(album)))
elif file_ext in core.MEDIA_CONTAINER:
f = guessit.guessit(mediafile)
# get title
title = f.get('series') or f.get('title')
if not title:
title = os.path.splitext(os.path.basename(mediafile))[0]
new_path = os.path.join(path, sanitize_name(title))
except Exception as e:
logger.error('Exception parsing name for media file: {0}: {1}'.format(os.path.split(mediafile)[1], e))
if not new_path:
title = os.path.splitext(os.path.basename(mediafile))[0]
new_path = os.path.join(path, sanitize_name(title))
# Removed as encoding of directory no-longer required
#try:
# new_path = new_path.encode(core.SYS_ENCODING)
#except Exception:
# pass
# Just fail-safe incase we already have afile with this clean-name (was actually a bug from earlier code, but let's be safe).
if os.path.isfile(new_path):
new_path2 = os.path.join(os.path.join(os.path.split(new_path)[0], 'new'), os.path.split(new_path)[1])
new_path = new_path2
# create new path if it does not exist
if not os.path.exists(new_path):
make_dir(new_path)
newfile = os.path.join(new_path, sanitize_name(os.path.split(mediafile)[1]))
try:
newfile = newfile.encode(core.SYS_ENCODING)
except Exception:
pass
# link file to its new path
copy_link(mediafile, newfile, link)
def is_min_size(input_name, min_size):
file_name, file_ext = os.path.splitext(os.path.basename(input_name))
# audio files we need to check directory size not file size
input_size = os.path.getsize(input_name)
if file_ext in core.AUDIO_CONTAINER:
try:
input_size = get_dir_size(os.path.dirname(input_name))
except Exception:
logger.error('Failed to get file size for {0}'.format(input_name), 'MINSIZE')
return True
# Ignore files under a certain size
if input_size > min_size * 1048576:
return True
def is_archive_file(filename):
"""Check if the filename is allowed for the Archive."""
for regext in core.COMPRESSED_CONTAINER:
if regext.search(filename):
return regext.split(filename)[0]
return False
def is_media_file(mediafile, media=True, audio=True, meta=True, archives=True, other=False, otherext=None):
if otherext is None:
otherext = []
file_name, file_ext = os.path.splitext(mediafile)
try:
# ignore MAC OS's 'resource fork' files
if file_name.startswith('._'):
return False
except Exception:
pass
return any([
(media and file_ext.lower() in core.MEDIA_CONTAINER),
(audio and file_ext.lower() in core.AUDIO_CONTAINER),
(meta and file_ext.lower() in core.META_CONTAINER),
(archives and is_archive_file(mediafile)),
(other and (file_ext.lower() in otherext or 'all' in otherext)),
])
def list_media_files(path, min_size=0, delete_ignored=0, media=True, audio=True, meta=True, archives=True, other=False, otherext=None):
if otherext is None:
otherext = []
files = []
if not os.path.isdir(path):
if os.path.isfile(path): # Single file downloads.
cur_file = os.path.split(path)[1]
if is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files
if is_sample(path) or not is_min_size(path, min_size):
if delete_ignored == 1:
try:
os.unlink(path)
logger.debug('Ignored file {0} has been removed ...'.format
(cur_file))
except Exception:
pass
else:
files.append(path)
return files
for cur_file in os.listdir(text_type(path)):
full_cur_file = os.path.join(path, cur_file)
# if it's a folder do it recursively
if os.path.isdir(full_cur_file) and not cur_file.startswith('.'):
files += list_media_files(full_cur_file, min_size, delete_ignored, media, audio, meta, archives, other, otherext)
elif is_media_file(cur_file, media, audio, meta, archives, other, otherext):
# Optionally ignore sample files
if is_sample(full_cur_file) or not is_min_size(full_cur_file, min_size):
if delete_ignored == 1:
try:
os.unlink(full_cur_file)
logger.debug('Ignored file {0} has been removed ...'.format
(cur_file))
except Exception:
pass
continue
files.append(full_cur_file)
return sorted(files, key=len)
def extract_files(src, dst=None, keep_archive=None):
extracted_folder = []
extracted_archive = []
for inputFile in list_media_files(src, media=False, audio=False, meta=False, archives=True):
dir_path = os.path.dirname(inputFile)
full_file_name = os.path.basename(inputFile)
archive_name = os.path.splitext(full_file_name)[0]
archive_name = re.sub(r'part[0-9]+', '', archive_name)
if dir_path in extracted_folder and archive_name in extracted_archive:
continue # no need to extract this, but keep going to look for other archives and sub directories.
try:
if extractor.extract(inputFile, dst or dir_path):
extracted_folder.append(dir_path)
extracted_archive.append(archive_name)
except Exception:
logger.error('Extraction failed for: {0}'.format(full_file_name))
for folder in extracted_folder:
for inputFile in list_media_files(folder, media=False, audio=False, meta=False, archives=True):
full_file_name = os.path.basename(inputFile)
archive_name = os.path.splitext(full_file_name)[0]
archive_name = re.sub(r'part[0-9]+', '', archive_name)
if archive_name not in extracted_archive or keep_archive:
continue # don't remove if we haven't extracted this archive, or if we want to preserve them.
logger.info('Removing extracted archive {0} from folder {1} ...'.format(full_file_name, folder))
try:
if not os.access(inputFile, os.W_OK):
os.chmod(inputFile, stat.S_IWUSR)
os.remove(inputFile)
time.sleep(1)
except Exception as e:
logger.error('Unable to remove file {0} due to: {1}'.format(inputFile, e))
def backup_versioned_file(old_file, version):
num_tries = 0
new_file = '{old}.v{version}'.format(old=old_file, version=version)
while not os.path.isfile(new_file):
if not os.path.isfile(old_file):
logger.log(u'Not creating backup, {file} doesn\'t exist'.format(file=old_file), logger.DEBUG)
break
try:
logger.log(u'Trying to back up {old} to {new]'.format(old=old_file, new=new_file), logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log(u'Backup done', logger.DEBUG)
break
except Exception as error:
logger.log(u'Error while trying to back up {old} to {new} : {msg}'.format
(old=old_file, new=new_file, msg=error), logger.WARNING)
num_tries += 1
time.sleep(1)
logger.log(u'Trying again.', logger.DEBUG)
if num_tries >= 10:
logger.log(u'Unable to back up {old} to {new} please do it manually.'.format(old=old_file, new=new_file), logger.ERROR)
return False
return True

View file

@ -0,0 +1,189 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import re
import guessit
import requests
from six import text_type
from core import logger
from core.utils.naming import sanitize_name
def find_imdbid(dir_name, input_name, omdb_api_key):
imdbid = None
logger.info('Attemping imdbID lookup for {0}'.format(input_name))
# find imdbid in dirName
logger.info('Searching folder and file names for imdbID ...')
m = re.search(r'\b(tt\d{7,8})\b', dir_name + input_name)
if m:
imdbid = m.group(1)
logger.info('Found imdbID [{0}]'.format(imdbid))
return imdbid, dir_name
if os.path.isdir(dir_name):
for file in os.listdir(text_type(dir_name)):
m = re.search(r'\b(tt\d{7,8})\b', file)
if m:
imdbid = m.group(1)
logger.info('Found imdbID [{0}] via file name'.format(imdbid))
return imdbid, dir_name
if 'NZBPR__DNZB_MOREINFO' in os.environ:
dnzb_more_info = os.environ.get('NZBPR__DNZB_MOREINFO', '')
if dnzb_more_info != '':
regex = re.compile(r'^http://www.imdb.com/title/(tt[0-9]+)/$', re.IGNORECASE)
m = regex.match(dnzb_more_info)
if m:
imdbid = m.group(1)
logger.info('Found imdbID [{0}] from DNZB-MoreInfo'.format(imdbid))
return imdbid, dir_name
logger.info('Searching IMDB for imdbID ...')
try:
guess = guessit.guessit(input_name)
except Exception:
guess = None
if guess:
# Movie Title
title = None
if 'title' in guess:
title = guess['title']
# Movie Year
year = None
if 'year' in guess:
year = guess['year']
url = 'http://www.omdbapi.com'
if not omdb_api_key:
logger.info('Unable to determine imdbID: No api key provided for omdbapi.com.')
return imdbid, dir_name
logger.debug('Opening URL: {0}'.format(url))
try:
r = requests.get(url, params={'apikey': omdb_api_key, 'y': year, 't': title},
verify=False, timeout=(60, 300))
except requests.ConnectionError:
logger.error('Unable to open URL {0}'.format(url))
return imdbid, dir_name
try:
results = r.json()
except Exception:
logger.error('No json data returned from omdbapi.com')
try:
imdbid = results['imdbID']
except Exception:
logger.error('No imdbID returned from omdbapi.com')
if imdbid:
logger.info('Found imdbID [{0}]'.format(imdbid))
new_dir_name = '{}.cp({})'.format(dir_name, imdbid)
os.rename(dir_name, new_dir_name)
return imdbid, new_dir_name
logger.warning('Unable to find a imdbID for {0}'.format(input_name))
return imdbid, dir_name
def category_search(input_directory, input_name, input_category, root, categories):
tordir = False
if input_directory is None: # =Nothing to process here.
return input_directory, input_name, input_category, root
pathlist = os.path.normpath(input_directory).split(os.sep)
if input_category and input_category in pathlist:
logger.debug('SEARCH: Found the Category: {0} in directory structure'.format(input_category))
elif input_category:
logger.debug('SEARCH: Could not find the category: {0} in the directory structure'.format(input_category))
else:
try:
input_category = list(set(pathlist) & set(categories))[-1] # assume last match is most relevant category.
logger.debug('SEARCH: Found Category: {0} in directory structure'.format(input_category))
except IndexError:
input_category = ''
logger.debug('SEARCH: Could not find a category in the directory structure')
if not os.path.isdir(input_directory) and os.path.isfile(input_directory): # If the input directory is a file
if not input_name:
input_name = os.path.split(os.path.normpath(input_directory))[1]
return input_directory, input_name, input_category, root
if input_category and os.path.isdir(os.path.join(input_directory, input_category)):
logger.info(
'SEARCH: Found category directory {0} in input directory directory {1}'.format(input_category, input_directory))
input_directory = os.path.join(input_directory, input_category)
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
if input_name and os.path.isdir(os.path.join(input_directory, input_name)):
logger.info('SEARCH: Found torrent directory {0} in input directory directory {1}'.format(input_name, input_directory))
input_directory = os.path.join(input_directory, input_name)
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
tordir = True
elif input_name and os.path.isdir(os.path.join(input_directory, sanitize_name(input_name))):
logger.info('SEARCH: Found torrent directory {0} in input directory directory {1}'.format(
sanitize_name(input_name), input_directory))
input_directory = os.path.join(input_directory, sanitize_name(input_name))
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
tordir = True
elif input_name and os.path.isfile(os.path.join(input_directory, input_name)):
logger.info('SEARCH: Found torrent file {0} in input directory directory {1}'.format(input_name, input_directory))
input_directory = os.path.join(input_directory, input_name)
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
tordir = True
elif input_name and os.path.isfile(os.path.join(input_directory, sanitize_name(input_name))):
logger.info('SEARCH: Found torrent file {0} in input directory directory {1}'.format(
sanitize_name(input_name), input_directory))
input_directory = os.path.join(input_directory, sanitize_name(input_name))
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
tordir = True
elif input_name and os.path.isdir(input_directory):
for file in os.listdir(text_type(input_directory)):
if os.path.splitext(file)[0] in [input_name, sanitize_name(input_name)]:
logger.info('SEARCH: Found torrent file {0} in input directory directory {1}'.format(file, input_directory))
input_directory = os.path.join(input_directory, file)
logger.info('SEARCH: Setting input_directory to {0}'.format(input_directory))
input_name = file
tordir = True
break
imdbid = [item for item in pathlist if '.cp(tt' in item] # This looks for the .cp(tt imdb id in the path.
if imdbid and '.cp(tt' not in input_name:
input_name = imdbid[0] # This ensures the imdb id is preserved and passed to CP
tordir = True
if input_category and not tordir:
try:
index = pathlist.index(input_category)
if index + 1 < len(pathlist):
tordir = True
logger.info('SEARCH: Found a unique directory {0} in the category directory'.format
(pathlist[index + 1]))
if not input_name:
input_name = pathlist[index + 1]
except ValueError:
pass
if input_name and not tordir:
if input_name in pathlist or sanitize_name(input_name) in pathlist:
logger.info('SEARCH: Found torrent directory {0} in the directory structure'.format(input_name))
tordir = True
else:
root = 1
if not tordir:
root = 2
if root > 0:
logger.info('SEARCH: Could not find a unique directory for this download. Assume a common directory.')
logger.info('SEARCH: We will try and determine which files to process, individually')
return input_directory, input_name, input_category, root

94
core/utils/links.py Normal file
View file

@ -0,0 +1,94 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import shutil
import linktastic
from core import logger
from core.utils.paths import make_dir
try:
from jaraco.windows.filesystem import islink, readlink
except ImportError:
if os.name == 'nt':
raise
else:
from os.path import islink
from os import readlink
def copy_link(src, target_link, use_link):
logger.info('MEDIAFILE: [{0}]'.format(os.path.basename(target_link)), 'COPYLINK')
logger.info('SOURCE FOLDER: [{0}]'.format(os.path.dirname(src)), 'COPYLINK')
logger.info('TARGET FOLDER: [{0}]'.format(os.path.dirname(target_link)), 'COPYLINK')
if src != target_link and os.path.exists(target_link):
logger.info('MEDIAFILE already exists in the TARGET folder, skipping ...', 'COPYLINK')
return True
elif src == target_link and os.path.isfile(target_link) and os.path.isfile(src):
logger.info('SOURCE AND TARGET files are the same, skipping ...', 'COPYLINK')
return True
elif src == os.path.dirname(target_link):
logger.info('SOURCE AND TARGET folders are the same, skipping ...', 'COPYLINK')
return True
make_dir(os.path.dirname(target_link))
try:
if use_link == 'dir':
logger.info('Directory linking SOURCE FOLDER -> TARGET FOLDER', 'COPYLINK')
linktastic.dirlink(src, target_link)
return True
if use_link == 'junction':
logger.info('Directory junction linking SOURCE FOLDER -> TARGET FOLDER', 'COPYLINK')
linktastic.dirlink(src, target_link)
return True
elif use_link == 'hard':
logger.info('Hard linking SOURCE MEDIAFILE -> TARGET FOLDER', 'COPYLINK')
linktastic.link(src, target_link)
return True
elif use_link == 'sym':
logger.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER', 'COPYLINK')
linktastic.symlink(src, target_link)
return True
elif use_link == 'move-sym':
logger.info('Sym linking SOURCE MEDIAFILE -> TARGET FOLDER', 'COPYLINK')
shutil.move(src, target_link)
linktastic.symlink(target_link, src)
return True
elif use_link == 'move':
logger.info('Moving SOURCE MEDIAFILE -> TARGET FOLDER', 'COPYLINK')
shutil.move(src, target_link)
return True
except Exception as e:
logger.warning('Error: {0}, copying instead ... '.format(e), 'COPYLINK')
logger.info('Copying SOURCE MEDIAFILE -> TARGET FOLDER', 'COPYLINK')
shutil.copy(src, target_link)
return True
def replace_links(link, max_depth=10):
link_depth = 0
target = link
for attempt in range(0, max_depth):
if not islink(target):
break
target = readlink(target)
link_depth = attempt
if not link_depth:
logger.debug('{0} is not a link'.format(link))
elif link_depth > max_depth or (link_depth == max_depth and islink(target)):
logger.warning('Exceeded maximum depth {0} while following link {1}'.format(max_depth, link))
else:
logger.info('Changing sym-link: {0} to point directly to file: {1}'.format(link, target), 'COPYLINK')
os.unlink(link)
linktastic.symlink(target, link)

53
core/utils/naming.py Normal file
View file

@ -0,0 +1,53 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import re
def sanitize_name(name):
"""
Remove bad chars from the filename.
>>> sanitize_name('a/b/c')
'a-b-c'
>>> sanitize_name('abc')
'abc'
>>> sanitize_name('a"b')
'ab'
>>> sanitize_name('.a.b..')
'a.b'
"""
name = re.sub(r'[\\/*]', '-', name)
name = re.sub(r'[:\'<>|?]', '', name)
# remove leading/trailing periods and spaces
name = name.strip(' .')
return name
def clean_file_name(filename):
"""
Clean up nzb name by removing any . and _ characters and trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
"""
filename = re.sub(r'(\D)\.(?!\s)(\D)', r'\1 \2', filename)
filename = re.sub(r'(\d)\.(\d{4})', r'\1 \2', filename) # if it ends in a year then don't keep the dot
filename = re.sub(r'(\D)\.(?!\s)', r'\1 ', filename)
filename = re.sub(r'\.(?!\s)(\D)', r' \1', filename)
filename = filename.replace('_', ' ')
filename = re.sub('-$', '', filename)
filename = re.sub(r'^\[.*]', '', filename)
return filename.strip()
def is_sample(input_name):
# Ignore 'sample' in files
if re.search('(^|[\\W_])sample\\d*[\\W_]', input_name.lower()):
return True

131
core/utils/network.py Normal file
View file

@ -0,0 +1,131 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import socket
import struct
import time
import requests
import core
from core import logger
def make_wake_on_lan_packet(mac_address):
"""Build the Wake-On-LAN 'Magic Packet'."""
address = (
int(value, 16)
for value in mac_address.split(':')
)
fmt = 'BBBBBB'
hardware_address = struct.pack(fmt, *address)
broadcast_address = b'\xFF' * 6 # FF:FF:FF:FF:FF:FF
return broadcast_address + hardware_address * 16
def wake_on_lan(ethernet_address):
"""Send a WakeOnLan request."""
# Create the WoL magic packet
magic_packet = make_wake_on_lan_packet(ethernet_address)
# ...and send it to the broadcast address using UDP
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as connection:
connection.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
connection.sendto(magic_packet, ('<broadcast>', 9))
logger.info('WakeOnLan sent for mac: {0}'.format(ethernet_address))
def test_connection(host, port):
"""Test network connection."""
address = host, port
try:
socket.create_connection(address)
except socket.error:
return 'Down'
else:
return 'Up'
def wake_up():
wol = core.CFG['WakeOnLan']
host = wol['host']
port = int(wol['port'])
mac = wol['mac']
max_attempts = 4
logger.info('Trying to wake On lan.')
for attempt in range(0, max_attempts):
logger.info('Attempt {0} of {1}'.format(attempt + 1, max_attempts, mac))
if test_connection(host, port) == 'Up':
logger.info('System with mac: {0} has been woken.'.format(mac))
break
wake_on_lan(mac)
time.sleep(20)
else:
if test_connection(host, port) == 'Down': # final check.
msg = 'System with mac: {0} has not woken after {1} attempts.'
logger.warning(msg.format(mac, max_attempts))
logger.info('Continuing with the rest of the script.')
def server_responding(base_url):
logger.debug('Attempting to connect to server at {0}'.format(base_url), 'SERVER')
try:
requests.get(base_url, timeout=(60, 120), verify=False)
except (requests.ConnectionError, requests.exceptions.Timeout):
logger.error('Server failed to respond at {0}'.format(base_url), 'SERVER')
return False
else:
logger.debug('Server responded at {0}'.format(base_url), 'SERVER')
return True
def find_download(client_agent, download_id):
logger.debug('Searching for Download on {0} ...'.format(client_agent))
if client_agent == 'utorrent':
torrents = core.TORRENT_CLASS.list()[1]['torrents']
for torrent in torrents:
if download_id in torrent:
return True
if client_agent == 'transmission':
torrents = core.TORRENT_CLASS.get_torrents()
for torrent in torrents:
torrent_hash = torrent.hashString
if torrent_hash == download_id:
return True
if client_agent == 'deluge':
return False
if client_agent == 'qbittorrent':
torrents = core.TORRENT_CLASS.torrents()
for torrent in torrents:
if torrent['hash'] == download_id:
return True
if client_agent == 'sabnzbd':
if 'http' in core.SABNZBD_HOST:
base_url = '{0}:{1}/api'.format(core.SABNZBD_HOST, core.SABNZBD_PORT)
else:
base_url = 'http://{0}:{1}/api'.format(core.SABNZBD_HOST, core.SABNZBD_PORT)
url = base_url
params = {
'apikey': core.SABNZBD_APIKEY,
'mode': 'get_files',
'output': 'json',
'value': download_id,
}
try:
r = requests.get(url, params=params, verify=False, timeout=(30, 120))
except requests.ConnectionError:
logger.error('Unable to open URL')
return False # failure
result = r.json()
if result['files']:
return True
return False

199
core/utils/parsers.py Normal file
View file

@ -0,0 +1,199 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import core
from core import logger
def parse_other(args):
return os.path.normpath(args[1]), '', '', '', ''
def parse_rtorrent(args):
# rtorrent usage: system.method.set_key = event.download.finished,TorrentToMedia,
# 'execute={/path/to/nzbToMedia/TorrentToMedia.py,\'$d.get_base_path=\',\'$d.get_name=\',\'$d.get_custom1=\',\'$d.get_hash=\'}'
input_directory = os.path.normpath(args[1])
try:
input_name = args[2]
except Exception:
input_name = ''
try:
input_category = args[3]
except Exception:
input_category = ''
try:
input_hash = args[4]
except Exception:
input_hash = ''
try:
input_id = args[4]
except Exception:
input_id = ''
return input_directory, input_name, input_category, input_hash, input_id
def parse_utorrent(args):
# uTorrent usage: call TorrentToMedia.py '%D' '%N' '%L' '%I'
input_directory = os.path.normpath(args[1])
input_name = args[2]
try:
input_category = args[3]
except Exception:
input_category = ''
try:
input_hash = args[4]
except Exception:
input_hash = ''
try:
input_id = args[4]
except Exception:
input_id = ''
return input_directory, input_name, input_category, input_hash, input_id
def parse_deluge(args):
# Deluge usage: call TorrentToMedia.py TORRENT_ID TORRENT_NAME TORRENT_DIR
input_directory = os.path.normpath(args[3])
input_name = args[2]
input_hash = args[1]
input_id = args[1]
try:
input_category = core.TORRENT_CLASS.core.get_torrent_status(input_id, ['label']).get(b'label').decode()
except Exception:
input_category = ''
return input_directory, input_name, input_category, input_hash, input_id
def parse_transmission(args):
# Transmission usage: call TorrenToMedia.py (%TR_TORRENT_DIR% %TR_TORRENT_NAME% is passed on as environmental variables)
input_directory = os.path.normpath(os.getenv('TR_TORRENT_DIR'))
input_name = os.getenv('TR_TORRENT_NAME')
input_category = '' # We dont have a category yet
input_hash = os.getenv('TR_TORRENT_HASH')
input_id = os.getenv('TR_TORRENT_ID')
return input_directory, input_name, input_category, input_hash, input_id
def parse_synods(args):
# Synology/Transmission usage: call TorrenToMedia.py (%TR_TORRENT_DIR% %TR_TORRENT_NAME% is passed on as environmental variables)
input_directory = ''
input_id = ''
input_category = ''
input_name = os.getenv('TR_TORRENT_NAME')
input_hash = os.getenv('TR_TORRENT_HASH')
if not input_name: # No info passed. Assume manual download.
return input_directory, input_name, input_category, input_hash, input_id
input_id = 'dbid_{0}'.format(os.getenv('TR_TORRENT_ID'))
#res = core.TORRENT_CLASS.tasks_list(additional_param='detail')
res = core.TORRENT_CLASS.tasks_info(input_id, additional_param='detail')
logger.debug('result from syno {0}'.format(res))
if res['success']:
try:
tasks = res['data']['tasks']
task = [ task for task in tasks if task['id'] == input_id ][0]
input_id = task['id']
input_directory = task['additional']['detail']['destination']
except:
logger.error('unable to find download details in Synology DS')
#Syno paths appear to be relative. Let's test to see if the returned path exists, and if not append to /volume1/
if not os.path.isdir(input_directory):
for root in ['/volume1/', '/volume2/', '/volume3/', '/volume4/']:
if os.path.isdir(os.path.join(root, input_directory)):
input_directory = os.path.join(root, input_directory)
break
return input_directory, input_name, input_category, input_hash, input_id
def parse_vuze(args):
# vuze usage: C:\full\path\to\nzbToMedia\TorrentToMedia.py '%D%N%L%I%K%F'
try:
cur_input = args[1].split(',')
except Exception:
cur_input = []
try:
input_directory = os.path.normpath(cur_input[0])
except Exception:
input_directory = ''
try:
input_name = cur_input[1]
except Exception:
input_name = ''
try:
input_category = cur_input[2]
except Exception:
input_category = ''
try:
input_hash = cur_input[3]
except Exception:
input_hash = ''
try:
input_id = cur_input[3]
except Exception:
input_id = ''
try:
if cur_input[4] == 'single':
input_name = cur_input[5]
except Exception:
pass
return input_directory, input_name, input_category, input_hash, input_id
def parse_qbittorrent(args):
# qbittorrent usage: C:\full\path\to\nzbToMedia\TorrentToMedia.py '%D|%N|%L|%I'
try:
cur_input = args[1].split('|')
except Exception:
cur_input = []
try:
input_directory = os.path.normpath(cur_input[0].replace('\'', ''))
except Exception:
input_directory = ''
try:
input_name = cur_input[1]
if input_name[0] == '\'':
input_name = input_name[1:]
if input_name[-1] == '\'':
input_name = input_name[:-1]
except Exception:
input_name = ''
try:
input_category = cur_input[2].replace('\'', '')
except Exception:
input_category = ''
try:
input_hash = cur_input[3].replace('\'', '')
except Exception:
input_hash = ''
try:
input_id = cur_input[3].replace('\'', '')
except Exception:
input_id = ''
return input_directory, input_name, input_category, input_hash, input_id
def parse_args(client_agent, args):
clients = {
'other': parse_other,
'rtorrent': parse_rtorrent,
'utorrent': parse_utorrent,
'deluge': parse_deluge,
'transmission': parse_transmission,
'qbittorrent': parse_qbittorrent,
'vuze': parse_vuze,
'synods': parse_synods,
}
try:
return clients[client_agent](args)
except Exception:
return None, None, None, None, None

170
core/utils/paths.py Normal file
View file

@ -0,0 +1,170 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from functools import partial
import os
import re
import shutil
import stat
from six import text_type
import core
from core import logger
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise Exception
def remove_dir(dir_name):
logger.info('Deleting {0}'.format(dir_name))
try:
shutil.rmtree(text_type(dir_name), onerror=onerror)
except Exception:
logger.error('Unable to delete folder {0}'.format(dir_name))
def make_dir(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
except Exception:
return False
return True
def remote_dir(path):
if not core.REMOTE_PATHS:
return path
for local, remote in core.REMOTE_PATHS:
if local in path:
base_dirs = path.replace(local, '').split(os.sep)
if '/' in remote:
remote_sep = '/'
else:
remote_sep = '\\'
new_path = remote_sep.join([remote] + base_dirs)
new_path = re.sub(r'(\S)(\\+)', r'\1\\', new_path)
new_path = re.sub(r'(/+)', r'/', new_path)
new_path = re.sub(r'([/\\])$', r'', new_path)
return new_path
return path
def get_dir_size(input_path):
prepend = partial(os.path.join, input_path)
return sum(
(os.path.getsize(f) if os.path.isfile(f) else get_dir_size(f))
for f in map(prepend, os.listdir(text_type(input_path)))
)
def remove_empty_folders(path, remove_root=True):
"""Remove empty folders."""
if not os.path.isdir(path):
return
# remove empty subfolders
logger.debug('Checking for empty folders in:{0}'.format(path))
files = os.listdir(text_type(path))
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
remove_empty_folders(fullpath)
# if folder empty, delete it
files = os.listdir(text_type(path))
if len(files) == 0 and remove_root:
logger.debug('Removing empty folder:{}'.format(path))
os.rmdir(path)
def remove_read_only(filename):
if os.path.isfile(filename):
# check first the read-only attribute
file_attribute = os.stat(filename)[0]
if not file_attribute & stat.S_IWRITE:
# File is read-only, so make it writeable
logger.debug('Read only mode on file {name}. Attempting to make it writeable'.format
(name=filename))
try:
os.chmod(filename, stat.S_IWRITE)
except Exception:
logger.warning('Cannot change permissions of {file}'.format(file=filename), logger.WARNING)
def flatten_dir(destination, files):
logger.info('FLATTEN: Flattening directory: {0}'.format(destination))
for outputFile in files:
dir_path = os.path.dirname(outputFile)
file_name = os.path.basename(outputFile)
if dir_path == destination:
continue
target = os.path.join(destination, file_name)
try:
shutil.move(outputFile, target)
except Exception:
logger.error('Could not flatten {0}'.format(outputFile), 'FLATTEN')
remove_empty_folders(destination) # Cleanup empty directories
def clean_directory(path, files):
if not os.path.exists(path):
logger.info('Directory {0} has been processed and removed ...'.format(path), 'CLEANDIR')
return
if core.FORCE_CLEAN and not core.FAILED:
logger.info('Doing Forceful Clean of {0}'.format(path), 'CLEANDIR')
remove_dir(path)
return
if files:
logger.info(
'Directory {0} still contains {1} unprocessed file(s), skipping ...'.format(path, len(files)),
'CLEANDIRS',
)
return
logger.info('Directory {0} has been processed, removing ...'.format(path), 'CLEANDIRS')
try:
shutil.rmtree(path, onerror=onerror)
except Exception:
logger.error('Unable to delete directory {0}'.format(path))
def rchmod(path, mod):
logger.log('Changing file mode of {0} to {1}'.format(path, oct(mod)))
os.chmod(path, mod)
if not os.path.isdir(path):
return # Skip files
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root, d), mod)
for f in files:
os.chmod(os.path.join(root, f), mod)

122
core/utils/processes.py Normal file
View file

@ -0,0 +1,122 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import socket
import subprocess
import sys
import core
from core import logger, version_check, APP_FILENAME, SYS_ARGV
if os.name == 'nt':
from win32event import CreateMutex
from win32api import CloseHandle, GetLastError
from winerror import ERROR_ALREADY_EXISTS
class WindowsProcess(object):
def __init__(self):
self.mutex = None
self.mutexname = 'nzbtomedia_{pid}'.format(pid=core.PID_FILE.replace('\\', '/')) # {D0E858DF-985E-4907-B7FB-8D732C3FC3B9}'
self.CreateMutex = CreateMutex
self.CloseHandle = CloseHandle
self.GetLastError = GetLastError
self.ERROR_ALREADY_EXISTS = ERROR_ALREADY_EXISTS
def alreadyrunning(self):
self.mutex = self.CreateMutex(None, 0, self.mutexname)
self.lasterror = self.GetLastError()
if self.lasterror == self.ERROR_ALREADY_EXISTS:
self.CloseHandle(self.mutex)
return True
else:
return False
def __del__(self):
if self.mutex:
self.CloseHandle(self.mutex)
class PosixProcess(object):
def __init__(self):
self.pidpath = core.PID_FILE
self.lock_socket = None
def alreadyrunning(self):
try:
self.lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.lock_socket.bind('\0{path}'.format(path=self.pidpath))
self.lasterror = False
return self.lasterror
except socket.error as e:
if 'Address already in use' in str(e):
self.lasterror = True
return self.lasterror
except AttributeError:
pass
if os.path.exists(self.pidpath):
# Make sure it is not a 'stale' pidFile
try:
pid = int(open(self.pidpath, 'r').read().strip())
except Exception:
pid = None
# Check list of running pids, if not running it is stale so overwrite
if isinstance(pid, int):
try:
os.kill(pid, 0)
self.lasterror = True
except OSError:
self.lasterror = False
else:
self.lasterror = False
else:
self.lasterror = False
if not self.lasterror:
# Write my pid into pidFile to keep multiple copies of program from running
try:
fp = open(self.pidpath, 'w')
fp.write(str(os.getpid()))
fp.close()
except Exception:
pass
return self.lasterror
def __del__(self):
if not self.lasterror:
if self.lock_socket:
self.lock_socket.close()
if os.path.isfile(self.pidpath):
os.unlink(self.pidpath)
if os.name == 'nt':
RunningProcess = WindowsProcess
else:
RunningProcess = PosixProcess
def restart():
install_type = version_check.CheckVersion().install_type
status = 0
popen_list = []
if install_type in ('git', 'source'):
popen_list = [sys.executable, APP_FILENAME]
if popen_list:
popen_list += SYS_ARGV
logger.log(u'Restarting nzbToMedia with {args}'.format(args=popen_list))
logger.close()
p = subprocess.Popen(popen_list, cwd=os.getcwd())
p.wait()
status = p.returncode
os._exit(status)

View file

@ -0,0 +1,18 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from functools import partial
import shutil
from six import PY2
def monkey_patch(length=512 * 1024):
if PY2:
# On Python 2 monkey patch shutil.copyfileobj()
# to adjust the buffer length to 512KB rather than 4KB
original_copyfileobj = shutil.copyfileobj
shutil.copyfileobj = partial(original_copyfileobj, length=length)

View file

@ -2,6 +2,13 @@
# Author: Nic Wolfe <nic@wolfeden.ca> # Author: Nic Wolfe <nic@wolfeden.ca>
# Modified by: echel0n # Modified by: echel0n
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import platform import platform
import re import re
@ -19,9 +26,7 @@ from core import github_api as github, logger
class CheckVersion(object): class CheckVersion(object):
""" """Version checker that runs in a thread with the SB scheduler."""
Version check class meant to run as a thread object with the SB scheduler.
"""
def __init__(self): def __init__(self):
self.install_type = self.find_install_type() self.install_type = self.find_install_type()
@ -40,16 +45,15 @@ class CheckVersion(object):
def find_install_type(self): def find_install_type(self):
""" """
Determines how this copy of SB was installed. Determine how this copy of SB was installed.
returns: type of installation. Possible values are: returns: type of installation. Possible values are:
'win': any compiled windows build 'win': any compiled windows build
'git': running from source using git 'git': running from source using git
'source': running from source without git 'source': running from source without git
""" """
# check if we're a windows build # check if we're a windows build
if os.path.isdir(os.path.join(core.APP_ROOT, u'.git')): if os.path.exists(os.path.join(core.APP_ROOT, u'.git')):
install_type = 'git' install_type = 'git'
else: else:
install_type = 'source' install_type = 'source'
@ -58,13 +62,12 @@ class CheckVersion(object):
def check_for_new_version(self, force=False): def check_for_new_version(self, force=False):
""" """
Checks the internet for a newer version. Check the internet for a newer version.
returns: bool, True for new version or False for no new version. returns: bool, True for new version or False for no new version.
force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
""" """
if not core.VERSION_NOTIFY and not force: if not core.VERSION_NOTIFY and not force:
logger.log(u'Version checking is disabled, not checking for the newest version') logger.log(u'Version checking is disabled, not checking for the newest version')
return False return False
@ -81,7 +84,7 @@ class CheckVersion(object):
def update(self): def update(self):
if self.updater.need_update(): if self.updater.need_update():
result = self.updater.update() result = self.updater.update()
cleanup.clean('core', 'libs') cleanup.clean(cleanup.FOLDER_STRUCTURE)
return result return result
@ -116,7 +119,7 @@ class GitUpdateManager(UpdateManager):
test_cmd = 'version' test_cmd = 'version'
if core.GIT_PATH: if core.GIT_PATH:
main_git = '\'{git}\''.format(git=core.GIT_PATH) main_git = '"{git}"'.format(git=core.GIT_PATH)
else: else:
main_git = 'git' main_git = 'git'
@ -199,8 +202,8 @@ class GitUpdateManager(UpdateManager):
logger.log(u'{cmd} : returned successful'.format(cmd=cmd), logger.DEBUG) logger.log(u'{cmd} : returned successful'.format(cmd=cmd), logger.DEBUG)
exit_status = 0 exit_status = 0
elif core.LOG_GIT and exit_status in (1, 128): elif core.LOG_GIT and exit_status in (1, 128):
logger.log(u'{cmd} returned : {output}'.format logger.log(u'{cmd} returned : {output}'.format
(cmd=cmd, output=output), logger.DEBUG) (cmd=cmd, output=output), logger.DEBUG)
else: else:
if core.LOG_GIT: if core.LOG_GIT:
logger.log(u'{cmd} returned : {output}, treat as error for now'.format logger.log(u'{cmd} returned : {output}, treat as error for now'.format
@ -211,13 +214,12 @@ class GitUpdateManager(UpdateManager):
def _find_installed_version(self): def _find_installed_version(self):
""" """
Attempts to find the currently installed version of Sick Beard. Attempt to find the currently installed version of Sick Beard.
Uses git show to get commit version. Uses git show to get commit version.
Returns: True for success or False for failure Returns: True for success or False for failure
""" """
output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') # @UnusedVariable output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') # @UnusedVariable
if exit_status == 0 and output: if exit_status == 0 and output:
@ -244,10 +246,12 @@ class GitUpdateManager(UpdateManager):
def _check_github_for_update(self): def _check_github_for_update(self):
""" """
Uses git commands to check if there is a newer version that the provided Check Github for a new version.
commit hash. If there is a newer version it sets _num_commits_behind.
"""
Uses git commands to check if there is a newer version than
the provided commit hash. If there is a newer version it
sets _num_commits_behind.
"""
self._newest_commit_hash = None self._newest_commit_hash = None
self._num_commits_behind = 0 self._num_commits_behind = 0
self._num_commits_ahead = 0 self._num_commits_ahead = 0
@ -324,10 +328,11 @@ class GitUpdateManager(UpdateManager):
def update(self): def update(self):
""" """
Calls git pull origin <branch> in order to update Sick Beard. Returns a bool depending Check git for a new version.
on the call's success.
"""
Calls git pull origin <branch> in order to update Sick Beard.
Returns a bool depending on the call's success.
"""
output, err, exit_status = self._run_git(self._git_path, 'pull origin {branch}'.format(branch=self.branch)) # @UnusedVariable output, err, exit_status = self._run_git(self._git_path, 'pull origin {branch}'.format(branch=self.branch)) # @UnusedVariable
if exit_status == 0: if exit_status == 0:
@ -382,12 +387,14 @@ class SourceUpdateManager(UpdateManager):
def _check_github_for_update(self): def _check_github_for_update(self):
""" """
Uses pygithub to ask github if there is a newer version that the provided Check Github for a new version.
commit hash. If there is a newer version it sets Sick Beard's version text.
Uses pygithub to ask github if there is a newer version than
the provided commit hash. If there is a newer version it sets
Sick Beard's version text.
commit_hash: hash that we're checking against commit_hash: hash that we're checking against
""" """
self._num_commits_behind = 0 self._num_commits_behind = 0
self._newest_commit_hash = None self._newest_commit_hash = None
@ -435,9 +442,7 @@ class SourceUpdateManager(UpdateManager):
return return
def update(self): def update(self):
""" """Download and install latest source tarball from github."""
Downloads the latest source tarball from github and installs it over the existing version.
"""
tar_download_url = 'https://github.com/{org}/{repo}/tarball/{branch}'.format( tar_download_url = 'https://github.com/{org}/{repo}/tarball/{branch}'.format(
org=self.github_repo_user, repo=self.github_repo, branch=self.branch) org=self.github_repo_user, repo=self.github_repo, branch=self.branch)
version_path = os.path.join(core.APP_ROOT, u'version.txt') version_path = os.path.join(core.APP_ROOT, u'version.txt')
@ -489,7 +494,7 @@ class SourceUpdateManager(UpdateManager):
# walk temp folder and move files to main folder # walk temp folder and move files to main folder
logger.log(u'Moving files from {source} to {destination}'.format logger.log(u'Moving files from {source} to {destination}'.format
(source=content_dir, destination=core.APP_ROOT)) (source=content_dir, destination=core.APP_ROOT))
for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable for dirname, _, filenames in os.walk(content_dir): # @UnusedVariable
dirname = dirname[len(content_dir) + 1:] dirname = dirname[len(content_dir) + 1:]
for curfile in filenames: for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile) old_path = os.path.join(content_dir, dirname, curfile)

192
eol.py Normal file
View file

@ -0,0 +1,192 @@
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import datetime
import sys
import warnings
__version__ = '1.0.0'
def date(string, fmt='%Y-%m-%d'):
"""
Convert date string to date.
:param string: A date string
:param fmt: Format to use when parsing the date string
:return: A datetime.date
"""
return datetime.datetime.strptime(string, fmt).date()
# https://devguide.python.org/
# https://devguide.python.org/devcycle/#devcycle
PYTHON_EOL = {
(3, 13): date('2029-10-1'),
(3, 12): date('2028-10-1'),
(3, 11): date('2027-10-1'),
(3, 10): date('2026-10-01'),
(3, 9): date('2025-10-05'),
(3, 8): date('2024-10-14'),
(3, 7): date('2023-06-27'),
(3, 6): date('2021-12-23'),
(3, 5): date('2020-09-13'),
(3, 4): date('2019-03-16'),
(3, 3): date('2017-09-29'),
(3, 2): date('2016-02-20'),
(3, 1): date('2012-04-09'),
(3, 0): date('2009-01-13'),
(2, 7): date('2020-01-01'),
(2, 6): date('2013-10-29'),
}
class Error(Exception):
"""An error has occurred."""
class LifetimeError(Error):
"""Lifetime has been exceeded and upgrade is required."""
class LifetimeWarning(Warning):
"""Lifetime has been exceeded and is no longer supported."""
def lifetime(version=None):
"""
Calculate days left till End-of-Life for a version.
:param version: An optional tuple with version information
If a version is not provided, the current system version will be used.
:return: Days left until End-of-Life
"""
if version is None:
version = sys.version_info
major = version[0]
minor = version[1]
now = datetime.datetime.now().date()
time_left = PYTHON_EOL[(major, minor)] - now
return time_left.days
def expiration(version=None, grace_period=0):
"""
Calculate expiration date for a version given a grace period.
:param version: An optional tuple with version information
If a version is not provided, the current system version will be used.
:param grace_period: An optional number of days grace period
:return: Total days till expiration
"""
days_left = lifetime(version)
return days_left + grace_period
def check(version=None, grace_period=0):
"""
Raise an exception if end of life has been reached and recommend upgrade.
:param version: An optional tuple with version information
If a version is not provided, the current system version will be used.
:param grace_period: An optional number of days grace period
If a grace period is not provided, a default 60 days grace period will
be used.
:return: None
"""
try:
raise_for_status(version, grace_period)
except LifetimeError as error:
print('Please use a newer version of Python.')
print_statuses()
sys.exit(error)
def raise_for_status(version=None, grace_period=0):
"""
Raise an exception if end of life has been reached.
:param version: An optional tuple with version information
If a version is not provided, the current system version will be used.
:param grace_period: An optional number of days grace period
If a grace period is not provided, a default 60 days grace period will
be used.
:return: None
"""
if version is None:
version = sys.version_info
days_left = lifetime(version)
expires = days_left + grace_period
if expires <= 0:
msg = 'Python {major}.{minor} is no longer supported.'.format(
major=version[0],
minor=version[1],
)
raise LifetimeError(msg)
def warn_for_status(version=None, grace_period=0):
"""
Warn if end of life has been reached.
:param version: An optional tuple with version information
If a version is not provided, the current system version will be used.
:param grace_period: An optional number of days grace period
:return: None
"""
if version is None:
version = sys.version_info
days_left = lifetime(version)
expires = days_left + grace_period
if expires <= 0:
msg = 'Python {major}.{minor} is no longer supported.'.format(
major=version[0],
minor=version[1],
)
warnings.warn(msg, LifetimeWarning)
def print_statuses(show_expired=False):
"""
Print end-of-life statuses of known python versions.
:param show_expired: If true also print expired python version statuses
"""
lifetimes = sorted(
(lifetime(python_version), python_version)
for python_version in PYTHON_EOL
)
print('Python End-of-Life for current versions:')
for days_left, python_version in lifetimes:
if days_left >= 0:
print(
'v{major}.{minor} in {remaining:>4} days'.format(
major=python_version[0],
minor=python_version[1],
remaining=days_left,
),
)
if not show_expired:
return
print()
print('Python End-of-Life for expired versions:')
for days_left, python_version in lifetimes:
if days_left < 0:
print(
'v{major}.{minor} {remaining:>4} days ago'.format(
major=python_version[0],
minor=python_version[1],
remaining=-days_left,
),
)
if __name__ == '__main__':
print_statuses(show_expired=True)

View file

@ -3,7 +3,7 @@
# get ffmpeg/yasm/x264 # get ffmpeg/yasm/x264
git clone git://source.ffmpeg.org/ffmpeg.git FFmpeg git clone git://source.ffmpeg.org/ffmpeg.git FFmpeg
git clone git://github.com/yasm/yasm.git FFmpeg/yasm git clone git://github.com/yasm/yasm.git FFmpeg/yasm
git clone git://git.videolan.org/x264.git FFmpeg/x264 git clone https://code.videolan.org/videolan/x264.git FFmpeg/x264
# compile/install yasm # compile/install yasm
cd FFmpeg/yasm cd FFmpeg/yasm
@ -25,4 +25,4 @@ cd -
cd FFmpeg cd FFmpeg
./configure --disable-asm --enable-libx264 --enable-gpl ./configure --disable-asm --enable-libx264 --enable-gpl
make install make install
cd - cd -

View file

@ -1,4 +1,11 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os import os
import site import site
import sys import sys

View file

@ -1,4 +1,11 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import shutil import shutil
import os import os
import time import time

View file

@ -1,4 +1,11 @@
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import libs import libs
__all__ = ['completed'] __all__ = ['completed']

1
libs/common/__init__.py Normal file
View file

@ -0,0 +1 @@

Binary file not shown.

View file

@ -0,0 +1,33 @@
# This is a stub package designed to roughly emulate the _yaml
# extension module, which previously existed as a standalone module
# and has been moved into the `yaml` package namespace.
# It does not perfectly mimic its old counterpart, but should get
# close enough for anyone who's relying on it even when they shouldn't.
import yaml
# in some circumstances, the yaml module we imoprted may be from a different version, so we need
# to tread carefully when poking at it here (it may not have the attributes we expect)
if not getattr(yaml, '__with_libyaml__', False):
from sys import version_info
exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
raise exc("No module named '_yaml'")
else:
from yaml._yaml import *
import warnings
warnings.warn(
'The _yaml extension module is now located at yaml._yaml'
' and its location is subject to change. To use the'
' LibYAML-based parser and emitter, import from `yaml`:'
' `from yaml import CLoader as Loader, CDumper as Dumper`.',
DeprecationWarning
)
del warnings
# Don't `del yaml` here because yaml is actually an existing
# namespace member of _yaml.
__name__ = '_yaml'
# If the module is top-level (i.e. not a part of any specific package)
# then the attribute should be set to ''.
# https://docs.python.org/3.8/library/types.html
__package__ = ''

View file

@ -13,8 +13,8 @@ See <http://github.com/ActiveState/appdirs> for details and usage.
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html # - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 3) __version__ = "1.4.4"
__version__ = '.'.join(map(str, __version_info__)) __version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys import sys

View file

@ -4,12 +4,6 @@
# Use of this source code is governed by the 3-clause BSD license # Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file. # that can be found in the LICENSE file.
# #
__title__ = 'babelfish'
__version__ = '0.5.5-dev'
__author__ = 'Antoine Bertin'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015 the BabelFish authors'
import sys import sys
if sys.version_info[0] >= 3: if sys.version_info[0] >= 3:

View file

@ -2,17 +2,22 @@
# Use of this source code is governed by the 3-clause BSD license # Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file. # that can be found in the LICENSE file.
# #
import collections
from pkg_resources import iter_entry_points, EntryPoint from pkg_resources import iter_entry_points, EntryPoint
from ..exceptions import LanguageConvertError, LanguageReverseError from ..exceptions import LanguageConvertError, LanguageReverseError
try:
# Python 3.3+
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
# from https://github.com/kennethreitz/requests/blob/master/requests/structures.py # from https://github.com/kennethreitz/requests/blob/master/requests/structures.py
class CaseInsensitiveDict(collections.MutableMapping): class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object. """A case-insensitive ``dict``-like object.
Implements all methods and operations of Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also ``collections.abc.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``. provides ``lower_items``.
All keys are expected to be strings. The structure remembers the All keys are expected to be strings. The structure remembers the
@ -63,7 +68,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
) )
def __eq__(self, other): def __eq__(self, other):
if isinstance(other, collections.Mapping): if isinstance(other, Mapping):
other = CaseInsensitiveDict(other) other = CaseInsensitiveDict(other)
else: else:
return NotImplemented return NotImplemented

View file

@ -14,10 +14,10 @@ class OpenSubtitlesConverter(LanguageReverseConverter):
def __init__(self): def __init__(self):
self.alpha3b_converter = language_converters['alpha3b'] self.alpha3b_converter = language_converters['alpha3b']
self.alpha2_converter = language_converters['alpha2'] self.alpha2_converter = language_converters['alpha2']
self.to_opensubtitles = {('por', 'BR'): 'pob', ('gre', None): 'ell', ('srp', None): 'scc', ('srp', 'ME'): 'mne'} self.to_opensubtitles = {('por', 'BR'): 'pob', ('gre', None): 'ell', ('srp', None): 'scc', ('srp', 'ME'): 'mne', ('chi', 'TW'): 'zht'}
self.from_opensubtitles = CaseInsensitiveDict({'pob': ('por', 'BR'), 'pb': ('por', 'BR'), 'ell': ('ell', None), self.from_opensubtitles = CaseInsensitiveDict({'pob': ('por', 'BR'), 'pb': ('por', 'BR'), 'ell': ('ell', None),
'scc': ('srp', None), 'mne': ('srp', 'ME')}) 'scc': ('srp', None), 'mne': ('srp', 'ME'), 'zht': ('zho', 'TW')})
self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(['pob', 'pb', 'scc', 'mne'])) self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(self.from_opensubtitles.keys()))
def convert(self, alpha3, country=None, script=None): def convert(self, alpha3, country=None, script=None):
alpha3b = self.alpha3b_converter.convert(alpha3, country, script) alpha3b = self.alpha3b_converter.convert(alpha3, country, script)

View file

@ -1,373 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
import re
import sys
import pickle
from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
from pkg_resources import resource_stream # @UnresolvedImport
from babelfish import (LANGUAGES, Language, Country, Script, language_converters, country_converters,
LanguageReverseConverter, LanguageConvertError, LanguageReverseError, CountryReverseError)
if sys.version_info[:2] <= (2, 6):
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _Py26FixTestCase(object):
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
else:
class _Py26FixTestCase(object):
pass
class TestScript(TestCase, _Py26FixTestCase):
def test_wrong_script(self):
self.assertRaises(ValueError, lambda: Script('Azer'))
def test_eq(self):
self.assertEqual(Script('Latn'), Script('Latn'))
def test_ne(self):
self.assertNotEqual(Script('Cyrl'), Script('Latn'))
def test_hash(self):
self.assertEqual(hash(Script('Hira')), hash('Hira'))
def test_pickle(self):
self.assertEqual(pickle.loads(pickle.dumps(Script('Latn'))), Script('Latn'))
class TestCountry(TestCase, _Py26FixTestCase):
def test_wrong_country(self):
self.assertRaises(ValueError, lambda: Country('ZZ'))
def test_eq(self):
self.assertEqual(Country('US'), Country('US'))
def test_ne(self):
self.assertNotEqual(Country('GB'), Country('US'))
self.assertIsNotNone(Country('US'))
def test_hash(self):
self.assertEqual(hash(Country('US')), hash('US'))
def test_pickle(self):
for country in [Country('GB'), Country('US')]:
self.assertEqual(pickle.loads(pickle.dumps(country)), country)
def test_converter_name(self):
self.assertEqual(Country('US').name, 'UNITED STATES')
self.assertEqual(Country.fromname('UNITED STATES'), Country('US'))
self.assertEqual(Country.fromcode('UNITED STATES', 'name'), Country('US'))
self.assertRaises(CountryReverseError, lambda: Country.fromname('ZZZZZ'))
self.assertEqual(len(country_converters['name'].codes), 249)
class TestLanguage(TestCase, _Py26FixTestCase):
def test_languages(self):
self.assertEqual(len(LANGUAGES), 7874)
def test_wrong_language(self):
self.assertRaises(ValueError, lambda: Language('zzz'))
def test_unknown_language(self):
self.assertEqual(Language('zzzz', unknown='und'), Language('und'))
def test_converter_alpha2(self):
self.assertEqual(Language('eng').alpha2, 'en')
self.assertEqual(Language.fromalpha2('en'), Language('eng'))
self.assertEqual(Language.fromcode('en', 'alpha2'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha2('zz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha2)
self.assertEqual(len(language_converters['alpha2'].codes), 184)
def test_converter_alpha3b(self):
self.assertEqual(Language('fra').alpha3b, 'fre')
self.assertEqual(Language.fromalpha3b('fre'), Language('fra'))
self.assertEqual(Language.fromcode('fre', 'alpha3b'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3b('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3b)
self.assertEqual(len(language_converters['alpha3b'].codes), 418)
def test_converter_alpha3t(self):
self.assertEqual(Language('fra').alpha3t, 'fra')
self.assertEqual(Language.fromalpha3t('fra'), Language('fra'))
self.assertEqual(Language.fromcode('fra', 'alpha3t'), Language('fra'))
self.assertRaises(LanguageReverseError, lambda: Language.fromalpha3t('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').alpha3t)
self.assertEqual(len(language_converters['alpha3t'].codes), 418)
def test_converter_name(self):
self.assertEqual(Language('eng').name, 'English')
self.assertEqual(Language.fromname('English'), Language('eng'))
self.assertEqual(Language.fromcode('English', 'name'), Language('eng'))
self.assertRaises(LanguageReverseError, lambda: Language.fromname('Zzzzzzzzz'))
self.assertEqual(len(language_converters['name'].codes), 7874)
def test_converter_scope(self):
self.assertEqual(language_converters['scope'].codes, set(['I', 'S', 'M']))
self.assertEqual(Language('eng').scope, 'individual')
self.assertEqual(Language('und').scope, 'special')
def test_converter_type(self):
self.assertEqual(language_converters['type'].codes, set(['A', 'C', 'E', 'H', 'L', 'S']))
self.assertEqual(Language('eng').type, 'living')
self.assertEqual(Language('und').type, 'special')
def test_converter_opensubtitles(self):
self.assertEqual(Language('fra').opensubtitles, Language('fra').alpha3b)
self.assertEqual(Language('por', 'BR').opensubtitles, 'pob')
self.assertEqual(Language.fromopensubtitles('fre'), Language('fra'))
self.assertEqual(Language.fromopensubtitles('pob'), Language('por', 'BR'))
self.assertEqual(Language.fromopensubtitles('pb'), Language('por', 'BR'))
# Montenegrin is not recognized as an ISO language (yet?) but for now it is
# unofficially accepted as Serbian from Montenegro
self.assertEqual(Language.fromopensubtitles('mne'), Language('srp', 'ME'))
self.assertEqual(Language.fromcode('pob', 'opensubtitles'), Language('por', 'BR'))
self.assertRaises(LanguageReverseError, lambda: Language.fromopensubtitles('zzz'))
self.assertRaises(LanguageConvertError, lambda: Language('aaa').opensubtitles)
self.assertEqual(len(language_converters['opensubtitles'].codes), 606)
# test with all the LANGUAGES from the opensubtitles api
# downloaded from: http://www.opensubtitles.org/addons/export_languages.php
f = resource_stream('babelfish', 'data/opensubtitles_languages.txt')
f.readline()
for l in f:
idlang, alpha2, _, upload_enabled, web_enabled = l.decode('utf-8').strip().split('\t')
if not int(upload_enabled) and not int(web_enabled):
# do not test LANGUAGES that are too esoteric / not widely available
continue
self.assertEqual(Language.fromopensubtitles(idlang).opensubtitles, idlang)
if alpha2:
self.assertEqual(Language.fromopensubtitles(idlang), Language.fromopensubtitles(alpha2))
f.close()
def test_fromietf_country_script(self):
language = Language.fromietf('fra-FR-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_no_script(self):
language = Language.fromietf('fra-FR')
self.assertEqual(language.alpha3, 'fra')
self.assertEqual(language.country, Country('FR'))
self.assertIsNone(language.script)
def test_fromietf_no_country_script(self):
language = Language.fromietf('fra-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_alpha2_language(self):
language = Language.fromietf('fr-Latn')
self.assertEqual(language.alpha3, 'fra')
self.assertIsNone(language.country)
self.assertEqual(language.script, Script('Latn'))
def test_fromietf_wrong_language(self):
self.assertRaises(ValueError, lambda: Language.fromietf('xyz-FR'))
def test_fromietf_wrong_country(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-YZ'))
def test_fromietf_wrong_script(self):
self.assertRaises(ValueError, lambda: Language.fromietf('fra-FR-Wxyz'))
def test_eq(self):
self.assertEqual(Language('eng'), Language('eng'))
def test_ne(self):
self.assertNotEqual(Language('fra'), Language('eng'))
self.assertIsNotNone(Language('fra'))
def test_nonzero(self):
self.assertFalse(bool(Language('und')))
self.assertTrue(bool(Language('eng')))
def test_language_hasattr(self):
self.assertTrue(hasattr(Language('fra'), 'alpha3'))
self.assertTrue(hasattr(Language('fra'), 'alpha2'))
self.assertFalse(hasattr(Language('bej'), 'alpha2'))
def test_country_hasattr(self):
self.assertTrue(hasattr(Country('US'), 'name'))
self.assertTrue(hasattr(Country('FR'), 'alpha2'))
self.assertFalse(hasattr(Country('BE'), 'none'))
def test_country(self):
self.assertEqual(Language('por', 'BR').country, Country('BR'))
self.assertEqual(Language('eng', Country('US')).country, Country('US'))
def test_eq_with_country(self):
self.assertEqual(Language('eng', 'US'), Language('eng', Country('US')))
def test_ne_with_country(self):
self.assertNotEqual(Language('eng', 'US'), Language('eng', Country('GB')))
def test_script(self):
self.assertEqual(Language('srp', script='Latn').script, Script('Latn'))
self.assertEqual(Language('srp', script=Script('Cyrl')).script, Script('Cyrl'))
def test_eq_with_script(self):
self.assertEqual(Language('srp', script='Latn'), Language('srp', script=Script('Latn')))
def test_ne_with_script(self):
self.assertNotEqual(Language('srp', script='Latn'), Language('srp', script=Script('Cyrl')))
def test_eq_with_country_and_script(self):
self.assertEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Latn')))
def test_ne_with_country_and_script(self):
self.assertNotEqual(Language('srp', 'SR', 'Latn'), Language('srp', Country('SR'), Script('Cyrl')))
def test_hash(self):
self.assertEqual(hash(Language('fra')), hash('fr'))
self.assertEqual(hash(Language('ace')), hash('ace'))
self.assertEqual(hash(Language('por', 'BR')), hash('pt-BR'))
self.assertEqual(hash(Language('srp', script='Cyrl')), hash('sr-Cyrl'))
self.assertEqual(hash(Language('eng', 'US', 'Latn')), hash('en-US-Latn'))
def test_pickle(self):
for lang in [Language('fra'),
Language('eng', 'US'),
Language('srp', script='Latn'),
Language('eng', 'US', 'Latn')]:
self.assertEqual(pickle.loads(pickle.dumps(lang)), lang)
def test_str(self):
self.assertEqual(Language.fromietf(str(Language('eng', 'US', 'Latn'))), Language('eng', 'US', 'Latn'))
self.assertEqual(Language.fromietf(str(Language('fra', 'FR'))), Language('fra', 'FR'))
self.assertEqual(Language.fromietf(str(Language('bel'))), Language('bel'))
def test_register_converter(self):
class TestConverter(LanguageReverseConverter):
def __init__(self):
self.to_test = {'fra': 'test1', 'eng': 'test2'}
self.from_test = {'test1': 'fra', 'test2': 'eng'}
def convert(self, alpha3, country=None, script=None):
if alpha3 not in self.to_test:
raise LanguageConvertError(alpha3, country, script)
return self.to_test[alpha3]
def reverse(self, test):
if test not in self.from_test:
raise LanguageReverseError(test)
return (self.from_test[test], None)
language = Language('fra')
self.assertFalse(hasattr(language, 'test'))
language_converters['test'] = TestConverter()
self.assertTrue(hasattr(language, 'test'))
self.assertIn('test', language_converters)
self.assertEqual(Language('fra').test, 'test1')
self.assertEqual(Language.fromtest('test2').alpha3, 'eng')
del language_converters['test']
self.assertNotIn('test', language_converters)
self.assertRaises(KeyError, lambda: Language.fromtest('test1'))
self.assertRaises(AttributeError, lambda: Language('fra').test)
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(TestScript))
suite.addTest(TestLoader().loadTestsFromTestCase(TestCountry))
suite.addTest(TestLoader().loadTestsFromTestCase(TestLanguage))
return suite
if __name__ == '__main__':
TextTestRunner().run(suite())

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -13,30 +12,29 @@
# The above copyright notice and this permission notice shall be # The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os import confuse
from sys import stderr
from beets.util import confit __version__ = '1.6.0'
__author__ = 'Adrian Sampson <adrian@radbox.org>'
__version__ = u'1.4.7'
__author__ = u'Adrian Sampson <adrian@radbox.org>'
class IncludeLazyConfig(confit.LazyConfig): class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confit's LazyConfig that also merges in data from """A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting. YAML files specified in an `include` setting.
""" """
def read(self, user=True, defaults=True): def read(self, user=True, defaults=True):
super(IncludeLazyConfig, self).read(user, defaults) super().read(user, defaults)
try: try:
for view in self['include']: for view in self['include']:
filename = view.as_filename() self.set_file(view.as_filename())
if os.path.isfile(filename): except confuse.NotFoundError:
self.set_file(filename)
except confit.NotFoundError:
pass pass
except confuse.ConfigReadError as err:
stderr.write("configuration `import` failed: {}"
.format(err.reason))
config = IncludeLazyConfig('beets', __name__) config = IncludeLazyConfig('beets', __name__)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2017, Adrian Sampson. # Copyright 2017, Adrian Sampson.
# #
@ -17,7 +16,6 @@
`python -m beets`. `python -m beets`.
""" """
from __future__ import division, absolute_import, print_function
import sys import sys
from .ui import main from .ui import main

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -17,7 +16,6 @@
music and items' embedded album art. music and items' embedded album art.
""" """
from __future__ import division, absolute_import, print_function
import subprocess import subprocess
import platform import platform
@ -26,7 +24,7 @@ import os
from beets.util import displayable_path, syspath, bytestring_path from beets.util import displayable_path, syspath, bytestring_path
from beets.util.artresizer import ArtResizer from beets.util.artresizer import ArtResizer
from beets import mediafile import mediafile
def mediafile_image(image_path, maxwidth=None): def mediafile_image(image_path, maxwidth=None):
@ -43,7 +41,7 @@ def get_art(log, item):
try: try:
mf = mediafile.MediaFile(syspath(item.path)) mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc: except mediafile.UnreadableFileError as exc:
log.warning(u'Could not extract art from {0}: {1}', log.warning('Could not extract art from {0}: {1}',
displayable_path(item.path), exc) displayable_path(item.path), exc)
return return
@ -51,26 +49,27 @@ def get_art(log, item):
def embed_item(log, item, imagepath, maxwidth=None, itempath=None, def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
compare_threshold=0, ifempty=False, as_album=False): compare_threshold=0, ifempty=False, as_album=False, id3v23=None,
quality=0):
"""Embed an image into the item's media file. """Embed an image into the item's media file.
""" """
# Conditions and filters. # Conditions and filters.
if compare_threshold: if compare_threshold:
if not check_art_similarity(log, item, imagepath, compare_threshold): if not check_art_similarity(log, item, imagepath, compare_threshold):
log.info(u'Image not similar; skipping.') log.info('Image not similar; skipping.')
return return
if ifempty and get_art(log, item): if ifempty and get_art(log, item):
log.info(u'media file already contained art') log.info('media file already contained art')
return return
if maxwidth and not as_album: if maxwidth and not as_album:
imagepath = resize_image(log, imagepath, maxwidth) imagepath = resize_image(log, imagepath, maxwidth, quality)
# Get the `Image` object from the file. # Get the `Image` object from the file.
try: try:
log.debug(u'embedding {0}', displayable_path(imagepath)) log.debug('embedding {0}', displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth) image = mediafile_image(imagepath, maxwidth)
except IOError as exc: except OSError as exc:
log.warning(u'could not read image file: {0}', exc) log.warning('could not read image file: {0}', exc)
return return
# Make sure the image kind is safe (some formats only support PNG # Make sure the image kind is safe (some formats only support PNG
@ -80,36 +79,39 @@ def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
image.mime_type) image.mime_type)
return return
item.try_write(path=itempath, tags={'images': [image]}) item.try_write(path=itempath, tags={'images': [image]}, id3v23=id3v23)
def embed_album(log, album, maxwidth=None, quiet=False, def embed_album(log, album, maxwidth=None, quiet=False, compare_threshold=0,
compare_threshold=0, ifempty=False): ifempty=False, quality=0):
"""Embed album art into all of the album's items. """Embed album art into all of the album's items.
""" """
imagepath = album.artpath imagepath = album.artpath
if not imagepath: if not imagepath:
log.info(u'No album art present for {0}', album) log.info('No album art present for {0}', album)
return return
if not os.path.isfile(syspath(imagepath)): if not os.path.isfile(syspath(imagepath)):
log.info(u'Album art not found at {0} for {1}', log.info('Album art not found at {0} for {1}',
displayable_path(imagepath), album) displayable_path(imagepath), album)
return return
if maxwidth: if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth) imagepath = resize_image(log, imagepath, maxwidth, quality)
log.info(u'Embedding album art into {0}', album) log.info('Embedding album art into {0}', album)
for item in album.items(): for item in album.items():
embed_item(log, item, imagepath, maxwidth, None, embed_item(log, item, imagepath, maxwidth, None, compare_threshold,
compare_threshold, ifempty, as_album=True) ifempty, as_album=True, quality=quality)
def resize_image(log, imagepath, maxwidth): def resize_image(log, imagepath, maxwidth, quality):
"""Returns path to an image resized to maxwidth. """Returns path to an image resized to maxwidth and encoded with the
specified quality level.
""" """
log.debug(u'Resizing album art to {0} pixels wide', maxwidth) log.debug('Resizing album art to {0} pixels wide and encoding at quality \
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath)) level {1}', maxwidth, quality)
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath),
quality=quality)
return imagepath return imagepath
@ -131,7 +133,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
syspath(art, prefix=False), syspath(art, prefix=False),
'-colorspace', 'gray', 'MIFF:-'] '-colorspace', 'gray', 'MIFF:-']
compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:'] compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:']
log.debug(u'comparing images with pipeline {} | {}', log.debug('comparing images with pipeline {} | {}',
convert_cmd, compare_cmd) convert_cmd, compare_cmd)
convert_proc = subprocess.Popen( convert_proc = subprocess.Popen(
convert_cmd, convert_cmd,
@ -155,7 +157,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
convert_proc.wait() convert_proc.wait()
if convert_proc.returncode: if convert_proc.returncode:
log.debug( log.debug(
u'ImageMagick convert failed with status {}: {!r}', 'ImageMagick convert failed with status {}: {!r}',
convert_proc.returncode, convert_proc.returncode,
convert_stderr, convert_stderr,
) )
@ -165,7 +167,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
stdout, stderr = compare_proc.communicate() stdout, stderr = compare_proc.communicate()
if compare_proc.returncode: if compare_proc.returncode:
if compare_proc.returncode != 1: if compare_proc.returncode != 1:
log.debug(u'ImageMagick compare failed: {0}, {1}', log.debug('ImageMagick compare failed: {0}, {1}',
displayable_path(imagepath), displayable_path(imagepath),
displayable_path(art)) displayable_path(art))
return return
@ -176,10 +178,10 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
try: try:
phash_diff = float(out_str) phash_diff = float(out_str)
except ValueError: except ValueError:
log.debug(u'IM output is not a number: {0!r}', out_str) log.debug('IM output is not a number: {0!r}', out_str)
return return
log.debug(u'ImageMagick compare score: {0}', phash_diff) log.debug('ImageMagick compare score: {0}', phash_diff)
return phash_diff <= compare_threshold return phash_diff <= compare_threshold
return True return True
@ -189,18 +191,18 @@ def extract(log, outpath, item):
art = get_art(log, item) art = get_art(log, item)
outpath = bytestring_path(outpath) outpath = bytestring_path(outpath)
if not art: if not art:
log.info(u'No album art present in {0}, skipping.', item) log.info('No album art present in {0}, skipping.', item)
return return
# Add an extension to the filename. # Add an extension to the filename.
ext = mediafile.image_extension(art) ext = mediafile.image_extension(art)
if not ext: if not ext:
log.warning(u'Unknown image type in {0}.', log.warning('Unknown image type in {0}.',
displayable_path(item.path)) displayable_path(item.path))
return return
outpath += bytestring_path('.' + ext) outpath += bytestring_path('.' + ext)
log.info(u'Extracting album art from: {0} to: {1}', log.info('Extracting album art from: {0} to: {1}',
item, displayable_path(outpath)) item, displayable_path(outpath))
with open(syspath(outpath), 'wb') as f: with open(syspath(outpath), 'wb') as f:
f.write(art) f.write(art)
@ -216,7 +218,7 @@ def extract_first(log, outpath, items):
def clear(log, lib, query): def clear(log, lib, query):
items = lib.items(query) items = lib.items(query)
log.info(u'Clearing album art from {0} items', len(items)) log.info('Clearing album art from {0} items', len(items))
for item in items: for item in items:
log.debug(u'Clearing art for {0}', item) log.debug('Clearing art for {0}', item)
item.try_write(tags={'images': None}) item.try_write(tags={'images': None})

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -16,19 +15,59 @@
"""Facilities for automatically determining files' correct metadata. """Facilities for automatically determining files' correct metadata.
""" """
from __future__ import division, absolute_import, print_function
from beets import logging from beets import logging
from beets import config from beets import config
# Parts of external interface. # Parts of external interface.
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa from .hooks import ( # noqa
AlbumInfo,
TrackInfo,
AlbumMatch,
TrackMatch,
Distance,
)
from .match import tag_item, tag_album, Proposal # noqa from .match import tag_item, tag_album, Proposal # noqa
from .match import Recommendation # noqa from .match import Recommendation # noqa
# Global logger. # Global logger.
log = logging.getLogger('beets') log = logging.getLogger('beets')
# Metadata fields that are already hardcoded, or where the tag name changes.
SPECIAL_FIELDS = {
'album': (
'va',
'releasegroup_id',
'artist_id',
'album_id',
'mediums',
'tracks',
'year',
'month',
'day',
'artist',
'artist_credit',
'artist_sort',
'data_url'
),
'track': (
'track_alt',
'artist_id',
'release_track_id',
'medium',
'index',
'medium_index',
'title',
'artist_credit',
'artist_sort',
'artist',
'track_id',
'medium_total',
'data_url',
'length'
)
}
# Additional utilities for the main interface. # Additional utilities for the main interface.
@ -43,17 +82,14 @@ def apply_item_metadata(item, track_info):
item.mb_releasetrackid = track_info.release_track_id item.mb_releasetrackid = track_info.release_track_id
if track_info.artist_id: if track_info.artist_id:
item.mb_artistid = track_info.artist_id item.mb_artistid = track_info.artist_id
if track_info.data_source:
item.data_source = track_info.data_source
if track_info.lyricist is not None: for field, value in track_info.items():
item.lyricist = track_info.lyricist # We only overwrite fields that are not already hardcoded.
if track_info.composer is not None: if field in SPECIAL_FIELDS['track']:
item.composer = track_info.composer continue
if track_info.composer_sort is not None: if value is None:
item.composer_sort = track_info.composer_sort continue
if track_info.arranger is not None: item[field] = value
item.arranger = track_info.arranger
# At the moment, the other metadata is left intact (including album # At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied? # and track number). Perhaps these should be emptied?
@ -142,33 +178,24 @@ def apply_metadata(album_info, mapping):
# Compilation flag. # Compilation flag.
item.comp = album_info.va item.comp = album_info.va
# Miscellaneous metadata. # Track alt.
for field in ('albumtype',
'label',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'data_source',):
value = getattr(album_info, field)
if value is not None:
item[field] = value
if track_info.disctitle is not None:
item.disctitle = track_info.disctitle
if track_info.media is not None:
item.media = track_info.media
if track_info.lyricist is not None:
item.lyricist = track_info.lyricist
if track_info.composer is not None:
item.composer = track_info.composer
if track_info.composer_sort is not None:
item.composer_sort = track_info.composer_sort
if track_info.arranger is not None:
item.arranger = track_info.arranger
item.track_alt = track_info.track_alt item.track_alt = track_info.track_alt
# Don't overwrite fields with empty values unless the
# field is explicitly allowed to be overwritten
for field, value in album_info.items():
if field in SPECIAL_FIELDS['album']:
continue
clobber = field in config['overwrite_null']['album'].as_str_seq()
if value is None and not clobber:
continue
item[field] = value
for field, value in track_info.items():
if field in SPECIAL_FIELDS['track']:
continue
clobber = field in config['overwrite_null']['track'].as_str_seq()
value = getattr(track_info, field)
if value is None and not clobber:
continue
item[field] = value

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -14,7 +13,6 @@
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
"""Glue between metadata sources and the matching logic.""" """Glue between metadata sources and the matching logic."""
from __future__ import division, absolute_import, print_function
from collections import namedtuple from collections import namedtuple
from functools import total_ordering from functools import total_ordering
@ -27,14 +25,36 @@ from beets.util import as_string
from beets.autotag import mb from beets.autotag import mb
from jellyfish import levenshtein_distance from jellyfish import levenshtein_distance
from unidecode import unidecode from unidecode import unidecode
import six
log = logging.getLogger('beets') log = logging.getLogger('beets')
# The name of the type for patterns in re changed in Python 3.7.
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
# Classes used to represent candidate options. # Classes used to represent candidate options.
class AttrDict(dict):
"""A dictionary that supports attribute ("dot") access, so `d.field`
is equivalent to `d['field']`.
"""
class AlbumInfo(object): def __getattr__(self, attr):
if attr in self:
return self.get(attr)
else:
raise AttributeError
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __hash__(self):
return id(self)
class AlbumInfo(AttrDict):
"""Describes a canonical release that may be used to match a release """Describes a canonical release that may be used to match a release
in the library. Consists of these data members: in the library. Consists of these data members:
@ -43,38 +63,22 @@ class AlbumInfo(object):
- ``artist``: name of the release's primary artist - ``artist``: name of the release's primary artist
- ``artist_id`` - ``artist_id``
- ``tracks``: list of TrackInfo objects making up the release - ``tracks``: list of TrackInfo objects making up the release
- ``asin``: Amazon ASIN
- ``albumtype``: string describing the kind of release
- ``va``: boolean: whether the release has "various artists"
- ``year``: release year
- ``month``: release month
- ``day``: release day
- ``label``: music label responsible for the release
- ``mediums``: the number of discs in this release
- ``artist_sort``: name of the release's artist for sorting
- ``releasegroup_id``: MBID for the album's release group
- ``catalognum``: the label's catalog number for the release
- ``script``: character set used for metadata
- ``language``: human language of the metadata
- ``country``: the release country
- ``albumstatus``: MusicBrainz release status (Official, etc.)
- ``media``: delivery mechanism (Vinyl, etc.)
- ``albumdisambig``: MusicBrainz release disambiguation comment
- ``artist_credit``: Release-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
The fields up through ``tracks`` are required. The others are ``mediums`` along with the fields up through ``tracks`` are required.
optional and may be None. The others are optional and may be None.
""" """
def __init__(self, album, album_id, artist, artist_id, tracks, asin=None,
albumtype=None, va=False, year=None, month=None, day=None, def __init__(self, tracks, album=None, album_id=None, artist=None,
label=None, mediums=None, artist_sort=None, artist_id=None, asin=None, albumtype=None, va=False,
releasegroup_id=None, catalognum=None, script=None, year=None, month=None, day=None, label=None, mediums=None,
language=None, country=None, albumstatus=None, media=None, artist_sort=None, releasegroup_id=None, catalognum=None,
albumdisambig=None, artist_credit=None, original_year=None, script=None, language=None, country=None, style=None,
original_month=None, original_day=None, data_source=None, genre=None, albumstatus=None, media=None, albumdisambig=None,
data_url=None): releasegroupdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source=None, data_url=None,
discogs_albumid=None, discogs_labelid=None,
discogs_artistid=None, **kwargs):
self.album = album self.album = album
self.album_id = album_id self.album_id = album_id
self.artist = artist self.artist = artist
@ -94,15 +98,22 @@ class AlbumInfo(object):
self.script = script self.script = script
self.language = language self.language = language
self.country = country self.country = country
self.style = style
self.genre = genre
self.albumstatus = albumstatus self.albumstatus = albumstatus
self.media = media self.media = media
self.albumdisambig = albumdisambig self.albumdisambig = albumdisambig
self.releasegroupdisambig = releasegroupdisambig
self.artist_credit = artist_credit self.artist_credit = artist_credit
self.original_year = original_year self.original_year = original_year
self.original_month = original_month self.original_month = original_month
self.original_day = original_day self.original_day = original_day
self.data_source = data_source self.data_source = data_source
self.data_url = data_url self.data_url = data_url
self.discogs_albumid = discogs_albumid
self.discogs_labelid = discogs_labelid
self.discogs_artistid = discogs_artistid
self.update(kwargs)
# Work around a bug in python-musicbrainz-ngs that causes some # Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode. # strings to be bytes rather than Unicode.
@ -112,54 +123,46 @@ class AlbumInfo(object):
constituent `TrackInfo` objects, are decoded to Unicode. constituent `TrackInfo` objects, are decoded to Unicode.
""" """
for fld in ['album', 'artist', 'albumtype', 'label', 'artist_sort', for fld in ['album', 'artist', 'albumtype', 'label', 'artist_sort',
'catalognum', 'script', 'language', 'country', 'catalognum', 'script', 'language', 'country', 'style',
'albumstatus', 'albumdisambig', 'artist_credit', 'media']: 'genre', 'albumstatus', 'albumdisambig',
'releasegroupdisambig', 'artist_credit',
'media', 'discogs_albumid', 'discogs_labelid',
'discogs_artistid']:
value = getattr(self, fld) value = getattr(self, fld)
if isinstance(value, bytes): if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore')) setattr(self, fld, value.decode(codec, 'ignore'))
if self.tracks: for track in self.tracks:
for track in self.tracks: track.decode(codec)
track.decode(codec)
def copy(self):
dupe = AlbumInfo([])
dupe.update(self)
dupe.tracks = [track.copy() for track in self.tracks]
return dupe
class TrackInfo(object): class TrackInfo(AttrDict):
"""Describes a canonical track present on a release. Appears as part """Describes a canonical track present on a release. Appears as part
of an AlbumInfo's ``tracks`` list. Consists of these data members: of an AlbumInfo's ``tracks`` list. Consists of these data members:
- ``title``: name of the track - ``title``: name of the track
- ``track_id``: MusicBrainz ID; UUID fragment only - ``track_id``: MusicBrainz ID; UUID fragment only
- ``release_track_id``: MusicBrainz ID respective to a track on a
particular release; UUID fragment only
- ``artist``: individual track artist name
- ``artist_id``
- ``length``: float: duration of the track in seconds
- ``index``: position on the entire release
- ``media``: delivery mechanism (Vinyl, etc.)
- ``medium``: the disc number this track appears on in the album
- ``medium_index``: the track's position on the disc
- ``medium_total``: the number of tracks on the item's disc
- ``artist_sort``: name of the track artist for sorting
- ``disctitle``: name of the individual medium (subtitle)
- ``artist_credit``: Recording-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
- ``lyricist``: individual track lyricist name
- ``composer``: individual track composer name
- ``composer_sort``: individual track composer sort name
- ``arranger`: individual track arranger name
- ``track_alt``: alternative track number (tape, vinyl, etc.)
Only ``title`` and ``track_id`` are required. The rest of the fields Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index`` may be None. The indices ``index``, ``medium``, and ``medium_index``
are all 1-based. are all 1-based.
""" """
def __init__(self, title, track_id, release_track_id=None, artist=None,
artist_id=None, length=None, index=None, medium=None, def __init__(self, title=None, track_id=None, release_track_id=None,
medium_index=None, medium_total=None, artist_sort=None, artist=None, artist_id=None, length=None, index=None,
disctitle=None, artist_credit=None, data_source=None, medium=None, medium_index=None, medium_total=None,
data_url=None, media=None, lyricist=None, composer=None, artist_sort=None, disctitle=None, artist_credit=None,
composer_sort=None, arranger=None, track_alt=None): data_source=None, data_url=None, media=None, lyricist=None,
composer=None, composer_sort=None, arranger=None,
track_alt=None, work=None, mb_workid=None,
work_disambig=None, bpm=None, initial_key=None, genre=None,
**kwargs):
self.title = title self.title = title
self.track_id = track_id self.track_id = track_id
self.release_track_id = release_track_id self.release_track_id = release_track_id
@ -181,6 +184,13 @@ class TrackInfo(object):
self.composer_sort = composer_sort self.composer_sort = composer_sort
self.arranger = arranger self.arranger = arranger
self.track_alt = track_alt self.track_alt = track_alt
self.work = work
self.mb_workid = mb_workid
self.work_disambig = work_disambig
self.bpm = bpm
self.initial_key = initial_key
self.genre = genre
self.update(kwargs)
# As above, work around a bug in python-musicbrainz-ngs. # As above, work around a bug in python-musicbrainz-ngs.
def decode(self, codec='utf-8'): def decode(self, codec='utf-8'):
@ -193,6 +203,11 @@ class TrackInfo(object):
if isinstance(value, bytes): if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore')) setattr(self, fld, value.decode(codec, 'ignore'))
def copy(self):
dupe = TrackInfo()
dupe.update(self)
return dupe
# Candidate distance scoring. # Candidate distance scoring.
@ -220,8 +235,8 @@ def _string_dist_basic(str1, str2):
transliteration/lowering to ASCII characters. Normalized by string transliteration/lowering to ASCII characters. Normalized by string
length. length.
""" """
assert isinstance(str1, six.text_type) assert isinstance(str1, str)
assert isinstance(str2, six.text_type) assert isinstance(str2, str)
str1 = as_string(unidecode(str1)) str1 = as_string(unidecode(str1))
str2 = as_string(unidecode(str2)) str2 = as_string(unidecode(str2))
str1 = re.sub(r'[^a-z0-9]', '', str1.lower()) str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
@ -249,9 +264,9 @@ def string_dist(str1, str2):
# "something, the". # "something, the".
for word in SD_END_WORDS: for word in SD_END_WORDS:
if str1.endswith(', %s' % word): if str1.endswith(', %s' % word):
str1 = '%s %s' % (word, str1[:-len(word) - 2]) str1 = '{} {}'.format(word, str1[:-len(word) - 2])
if str2.endswith(', %s' % word): if str2.endswith(', %s' % word):
str2 = '%s %s' % (word, str2[:-len(word) - 2]) str2 = '{} {}'.format(word, str2[:-len(word) - 2])
# Perform a couple of basic normalizing substitutions. # Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE: for pat, repl in SD_REPLACE:
@ -289,11 +304,12 @@ def string_dist(str1, str2):
return base_dist + penalty return base_dist + penalty
class LazyClassProperty(object): class LazyClassProperty:
"""A decorator implementing a read-only property that is *lazy* in """A decorator implementing a read-only property that is *lazy* in
the sense that the getter is only invoked once. Subsequent accesses the sense that the getter is only invoked once. Subsequent accesses
through *any* instance use the cached result. through *any* instance use the cached result.
""" """
def __init__(self, getter): def __init__(self, getter):
self.getter = getter self.getter = getter
self.computed = False self.computed = False
@ -306,17 +322,17 @@ class LazyClassProperty(object):
@total_ordering @total_ordering
@six.python_2_unicode_compatible class Distance:
class Distance(object):
"""Keeps track of multiple distance penalties. Provides a single """Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance weighted distance for all penalties as well as a weighted distance
for each individual penalty. for each individual penalty.
""" """
def __init__(self): def __init__(self):
self._penalties = {} self._penalties = {}
@LazyClassProperty @LazyClassProperty
def _weights(cls): # noqa def _weights(cls): # noqa: N805
"""A dictionary from keys to floating-point weights. """A dictionary from keys to floating-point weights.
""" """
weights_view = config['match']['distance_weights'] weights_view = config['match']['distance_weights']
@ -394,7 +410,7 @@ class Distance(object):
return other - self.distance return other - self.distance
def __str__(self): def __str__(self):
return "{0:.2f}".format(self.distance) return f"{self.distance:.2f}"
# Behave like a dict. # Behave like a dict.
@ -421,7 +437,7 @@ class Distance(object):
""" """
if not isinstance(dist, Distance): if not isinstance(dist, Distance):
raise ValueError( raise ValueError(
u'`dist` must be a Distance object, not {0}'.format(type(dist)) '`dist` must be a Distance object, not {}'.format(type(dist))
) )
for key, penalties in dist._penalties.items(): for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties) self._penalties.setdefault(key, []).extend(penalties)
@ -433,7 +449,7 @@ class Distance(object):
be a compiled regular expression, in which case it will be be a compiled regular expression, in which case it will be
matched against `value2`. matched against `value2`.
""" """
if isinstance(value1, re._pattern_type): if isinstance(value1, Pattern):
return bool(value1.match(value2)) return bool(value1.match(value2))
return value1 == value2 return value1 == value2
@ -445,7 +461,7 @@ class Distance(object):
""" """
if not 0.0 <= dist <= 1.0: if not 0.0 <= dist <= 1.0:
raise ValueError( raise ValueError(
u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist) f'`dist` must be between 0.0 and 1.0, not {dist}'
) )
self._penalties.setdefault(key, []).append(dist) self._penalties.setdefault(key, []).append(dist)
@ -541,7 +557,7 @@ def album_for_mbid(release_id):
try: try:
album = mb.album_for_id(release_id) album = mb.album_for_id(release_id)
if album: if album:
plugins.send(u'albuminfo_received', info=album) plugins.send('albuminfo_received', info=album)
return album return album
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
@ -554,7 +570,7 @@ def track_for_mbid(recording_id):
try: try:
track = mb.track_for_id(recording_id) track = mb.track_for_id(recording_id)
if track: if track:
plugins.send(u'trackinfo_received', info=track) plugins.send('trackinfo_received', info=track)
return track return track
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
@ -567,7 +583,7 @@ def albums_for_id(album_id):
yield a yield a
for a in plugins.album_for_id(album_id): for a in plugins.album_for_id(album_id):
if a: if a:
plugins.send(u'albuminfo_received', info=a) plugins.send('albuminfo_received', info=a)
yield a yield a
@ -578,40 +594,43 @@ def tracks_for_id(track_id):
yield t yield t
for t in plugins.track_for_id(track_id): for t in plugins.track_for_id(track_id):
if t: if t:
plugins.send(u'trackinfo_received', info=t) plugins.send('trackinfo_received', info=t)
yield t yield t
@plugins.notify_info_yielded(u'albuminfo_received') @plugins.notify_info_yielded('albuminfo_received')
def album_candidates(items, artist, album, va_likely): def album_candidates(items, artist, album, va_likely, extra_tags):
"""Search for album matches. ``items`` is a list of Item objects """Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be names (strings), which may be derived from the item list or may be
entered by the user. ``va_likely`` is a boolean indicating whether entered by the user. ``va_likely`` is a boolean indicating whether
the album is likely to be a "various artists" release. the album is likely to be a "various artists" release. ``extra_tags``
is an optional dictionary of additional tags used to further
constrain the search.
""" """
# Base candidates if we have album and artist to match. # Base candidates if we have album and artist to match.
if artist and album: if artist and album:
try: try:
for candidate in mb.match_album(artist, album, len(items)): yield from mb.match_album(artist, album, len(items),
yield candidate extra_tags)
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Also add VA matches from MusicBrainz where appropriate. # Also add VA matches from MusicBrainz where appropriate.
if va_likely and album: if va_likely and album:
try: try:
for candidate in mb.match_album(None, album, len(items)): yield from mb.match_album(None, album, len(items),
yield candidate extra_tags)
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Candidates from plugins. # Candidates from plugins.
for candidate in plugins.candidates(items, artist, album, va_likely): yield from plugins.candidates(items, artist, album, va_likely,
yield candidate extra_tags)
@plugins.notify_info_yielded(u'trackinfo_received') @plugins.notify_info_yielded('trackinfo_received')
def item_candidates(item, artist, title): def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched. """Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or ``artist`` and ``title`` are strings and either reflect the item or
@ -621,11 +640,9 @@ def item_candidates(item, artist, title):
# MusicBrainz candidates. # MusicBrainz candidates.
if artist and title: if artist and title:
try: try:
for candidate in mb.match_track(artist, title): yield from mb.match_track(artist, title)
yield candidate
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Plugin candidates. # Plugin candidates.
for candidate in plugins.item_candidates(item, artist, title): yield from plugins.item_candidates(item, artist, title)
yield candidate

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -17,7 +16,6 @@
releases and tracks. releases and tracks.
""" """
from __future__ import division, absolute_import, print_function
import datetime import datetime
import re import re
@ -35,7 +33,7 @@ from beets.util.enumeration import OrderedEnum
# album level to determine whether a given release is likely a VA # album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for # release and also on the track level to to remove the penalty for
# differing artists. # differing artists.
VA_ARTISTS = (u'', u'various artists', u'various', u'va', u'unknown') VA_ARTISTS = ('', 'various artists', 'various', 'va', 'unknown')
# Global logger. # Global logger.
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -108,7 +106,7 @@ def assign_items(items, tracks):
log.debug('...done.') log.debug('...done.')
# Produce the output matching. # Produce the output matching.
mapping = dict((items[i], tracks[j]) for (i, j) in matching) mapping = {items[i]: tracks[j] for (i, j) in matching}
extra_items = list(set(items) - set(mapping.keys())) extra_items = list(set(items) - set(mapping.keys()))
extra_items.sort(key=lambda i: (i.disc, i.track, i.title)) extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
extra_tracks = list(set(tracks) - set(mapping.values())) extra_tracks = list(set(tracks) - set(mapping.values()))
@ -276,16 +274,16 @@ def match_by_id(items):
try: try:
first = next(albumids) first = next(albumids)
except StopIteration: except StopIteration:
log.debug(u'No album ID found.') log.debug('No album ID found.')
return None return None
# Is there a consensus on the MB album ID? # Is there a consensus on the MB album ID?
for other in albumids: for other in albumids:
if other != first: if other != first:
log.debug(u'No album ID consensus.') log.debug('No album ID consensus.')
return None return None
# If all album IDs are equal, look up the album. # If all album IDs are equal, look up the album.
log.debug(u'Searching for discovered album ID: {0}', first) log.debug('Searching for discovered album ID: {0}', first)
return hooks.album_for_mbid(first) return hooks.album_for_mbid(first)
@ -351,23 +349,23 @@ def _add_candidate(items, results, info):
checking the track count, ordering the items, checking for checking the track count, ordering the items, checking for
duplicates, and calculating the distance. duplicates, and calculating the distance.
""" """
log.debug(u'Candidate: {0} - {1} ({2})', log.debug('Candidate: {0} - {1} ({2})',
info.artist, info.album, info.album_id) info.artist, info.album, info.album_id)
# Discard albums with zero tracks. # Discard albums with zero tracks.
if not info.tracks: if not info.tracks:
log.debug(u'No tracks.') log.debug('No tracks.')
return return
# Don't duplicate. # Don't duplicate.
if info.album_id in results: if info.album_id in results:
log.debug(u'Duplicate.') log.debug('Duplicate.')
return return
# Discard matches without required tags. # Discard matches without required tags.
for req_tag in config['match']['required'].as_str_seq(): for req_tag in config['match']['required'].as_str_seq():
if getattr(info, req_tag) is None: if getattr(info, req_tag) is None:
log.debug(u'Ignored. Missing required tag: {0}', req_tag) log.debug('Ignored. Missing required tag: {0}', req_tag)
return return
# Find mapping between the items and the track info. # Find mapping between the items and the track info.
@ -380,10 +378,10 @@ def _add_candidate(items, results, info):
penalties = [key for key, _ in dist] penalties = [key for key, _ in dist]
for penalty in config['match']['ignored'].as_str_seq(): for penalty in config['match']['ignored'].as_str_seq():
if penalty in penalties: if penalty in penalties:
log.debug(u'Ignored. Penalty: {0}', penalty) log.debug('Ignored. Penalty: {0}', penalty)
return return
log.debug(u'Success. Distance: {0}', dist) log.debug('Success. Distance: {0}', dist)
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping, results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks) extra_items, extra_tracks)
@ -411,7 +409,7 @@ def tag_album(items, search_artist=None, search_album=None,
likelies, consensus = current_metadata(items) likelies, consensus = current_metadata(items)
cur_artist = likelies['artist'] cur_artist = likelies['artist']
cur_album = likelies['album'] cur_album = likelies['album']
log.debug(u'Tagging {0} - {1}', cur_artist, cur_album) log.debug('Tagging {0} - {1}', cur_artist, cur_album)
# The output result (distance, AlbumInfo) tuples (keyed by MB album # The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID). # ID).
@ -420,7 +418,7 @@ def tag_album(items, search_artist=None, search_album=None,
# Search by explicit ID. # Search by explicit ID.
if search_ids: if search_ids:
for search_id in search_ids: for search_id in search_ids:
log.debug(u'Searching for album ID: {0}', search_id) log.debug('Searching for album ID: {0}', search_id)
for id_candidate in hooks.albums_for_id(search_id): for id_candidate in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, id_candidate) _add_candidate(items, candidates, id_candidate)
@ -431,13 +429,13 @@ def tag_album(items, search_artist=None, search_album=None,
if id_info: if id_info:
_add_candidate(items, candidates, id_info) _add_candidate(items, candidates, id_info)
rec = _recommendation(list(candidates.values())) rec = _recommendation(list(candidates.values()))
log.debug(u'Album ID match recommendation is {0}', rec) log.debug('Album ID match recommendation is {0}', rec)
if candidates and not config['import']['timid']: if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately. # If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based # Otherwise, this match will compete against metadata-based
# matches. # matches.
if rec == Recommendation.strong: if rec == Recommendation.strong:
log.debug(u'ID match.') log.debug('ID match.')
return cur_artist, cur_album, \ return cur_artist, cur_album, \
Proposal(list(candidates.values()), rec) Proposal(list(candidates.values()), rec)
@ -445,22 +443,29 @@ def tag_album(items, search_artist=None, search_album=None,
if not (search_artist and search_album): if not (search_artist and search_album):
# No explicit search terms -- use current metadata. # No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album search_artist, search_album = cur_artist, cur_album
log.debug(u'Search terms: {0} - {1}', search_artist, search_album) log.debug('Search terms: {0} - {1}', search_artist, search_album)
extra_tags = None
if config['musicbrainz']['extra_tags']:
tag_list = config['musicbrainz']['extra_tags'].get()
extra_tags = {k: v for (k, v) in likelies.items() if k in tag_list}
log.debug('Additional search terms: {0}', extra_tags)
# Is this album likely to be a "various artist" release? # Is this album likely to be a "various artist" release?
va_likely = ((not consensus['artist']) or va_likely = ((not consensus['artist']) or
(search_artist.lower() in VA_ARTISTS) or (search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items)) any(item.comp for item in items))
log.debug(u'Album might be VA: {0}', va_likely) log.debug('Album might be VA: {0}', va_likely)
# Get the results from the data sources. # Get the results from the data sources.
for matched_candidate in hooks.album_candidates(items, for matched_candidate in hooks.album_candidates(items,
search_artist, search_artist,
search_album, search_album,
va_likely): va_likely,
extra_tags):
_add_candidate(items, candidates, matched_candidate) _add_candidate(items, candidates, matched_candidate)
log.debug(u'Evaluating {0} candidates.', len(candidates)) log.debug('Evaluating {0} candidates.', len(candidates))
# Sort and get the recommendation. # Sort and get the recommendation.
candidates = _sort_candidates(candidates.values()) candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates) rec = _recommendation(candidates)
@ -485,7 +490,7 @@ def tag_item(item, search_artist=None, search_title=None,
trackids = search_ids or [t for t in [item.mb_trackid] if t] trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids: if trackids:
for trackid in trackids: for trackid in trackids:
log.debug(u'Searching for track ID: {0}', trackid) log.debug('Searching for track ID: {0}', trackid)
for track_info in hooks.tracks_for_id(trackid): for track_info in hooks.tracks_for_id(trackid):
dist = track_distance(item, track_info, incl_artist=True) dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \ candidates[track_info.track_id] = \
@ -494,7 +499,7 @@ def tag_item(item, search_artist=None, search_title=None,
rec = _recommendation(_sort_candidates(candidates.values())) rec = _recommendation(_sort_candidates(candidates.values()))
if rec == Recommendation.strong and \ if rec == Recommendation.strong and \
not config['import']['timid']: not config['import']['timid']:
log.debug(u'Track ID match.') log.debug('Track ID match.')
return Proposal(_sort_candidates(candidates.values()), rec) return Proposal(_sort_candidates(candidates.values()), rec)
# If we're searching by ID, don't proceed. # If we're searching by ID, don't proceed.
@ -507,7 +512,7 @@ def tag_item(item, search_artist=None, search_title=None,
# Search terms. # Search terms.
if not (search_artist and search_title): if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title search_artist, search_title = item.artist, item.title
log.debug(u'Item search terms: {0} - {1}', search_artist, search_title) log.debug('Item search terms: {0} - {1}', search_artist, search_title)
# Get and evaluate candidate metadata. # Get and evaluate candidate metadata.
for track_info in hooks.item_candidates(item, search_artist, search_title): for track_info in hooks.item_candidates(item, search_artist, search_title):
@ -515,7 +520,7 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info) candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation. # Sort by distance and return with recommendation.
log.debug(u'Found {0} candidates.', len(candidates)) log.debug('Found {0} candidates.', len(candidates))
candidates = _sort_candidates(candidates.values()) candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates) rec = _recommendation(candidates)
return Proposal(candidates, rec) return Proposal(candidates, rec)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -15,57 +14,72 @@
"""Searches for albums in the MusicBrainz database. """Searches for albums in the MusicBrainz database.
""" """
from __future__ import division, absolute_import, print_function
import musicbrainzngs import musicbrainzngs
import re import re
import traceback import traceback
from six.moves.urllib.parse import urljoin
from beets import logging from beets import logging
from beets import plugins
import beets.autotag.hooks import beets.autotag.hooks
import beets import beets
from beets import util from beets import util
from beets import config from beets import config
import six from collections import Counter
from urllib.parse import urljoin
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377' VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED: BASE_URL = 'https://musicbrainz.org/'
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]'] SKIPPED_TRACKS = ['[data track]']
FIELDS_TO_MB_KEYS = {
'catalognum': 'catno',
'country': 'country',
'label': 'label',
'media': 'format',
'year': 'date',
}
musicbrainzngs.set_useragent('beets', beets.__version__, musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.io/') 'https://beets.io/')
class MusicBrainzAPIError(util.HumanReadableException): class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the """An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type. parameter to the action and may have any type.
""" """
def __init__(self, reason, verb, query, tb=None): def __init__(self, reason, verb, query, tb=None):
self.query = query self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError): if isinstance(reason, musicbrainzngs.WebServiceError):
reason = u'MusicBrainz not reachable' reason = 'MusicBrainz not reachable'
super(MusicBrainzAPIError, self).__init__(reason, verb, tb) super().__init__(reason, verb, tb)
def get_message(self): def get_message(self):
return u'{0} in {1} with query {2}'.format( return '{} in {} with query {}'.format(
self._reasonstr(), self.verb, repr(self.query) self._reasonstr(), self.verb, repr(self.query)
) )
log = logging.getLogger('beets') log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups', RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases', 'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels', 'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels'] 'work-level-rels', 'artist-rels', 'isrcs']
TRACK_INCLUDES = ['artists', 'aliases'] BROWSE_INCLUDES = ['artist-credits', 'work-rels',
'artist-rels', 'recording-rels', 'release-rels']
if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES['recording']:
BROWSE_INCLUDES.append("work-level-rels")
BROWSE_CHUNKSIZE = 100
BROWSE_MAXTRACKS = 500
TRACK_INCLUDES = ['artists', 'aliases', 'isrcs']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']: if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels'] TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
RELEASE_INCLUDES += ['genres']
def track_url(trackid): def track_url(trackid):
@ -81,7 +95,11 @@ def configure():
from the beets configuration. This should be called at startup. from the beets configuration. This should be called at startup.
""" """
hostname = config['musicbrainz']['host'].as_str() hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname) https = config['musicbrainz']['https'].get(bool)
# Only call set_hostname when a custom server is configured. Since
# musicbrainz-ngs connects to musicbrainz.org with HTTPS by default
if hostname != "musicbrainz.org":
musicbrainzngs.set_hostname(hostname, https)
musicbrainzngs.set_rate_limit( musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(), config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int), config['musicbrainz']['ratelimit'].get(int),
@ -138,7 +156,7 @@ def _flatten_artist_credit(credit):
artist_sort_parts = [] artist_sort_parts = []
artist_credit_parts = [] artist_credit_parts = []
for el in credit: for el in credit:
if isinstance(el, six.string_types): if isinstance(el, str):
# Join phrase. # Join phrase.
artist_parts.append(el) artist_parts.append(el)
artist_credit_parts.append(el) artist_credit_parts.append(el)
@ -185,13 +203,13 @@ def track_info(recording, index=None, medium=None, medium_index=None,
the number of tracks on the medium. Each number is a 1-based index. the number of tracks on the medium. Each number is a 1-based index.
""" """
info = beets.autotag.hooks.TrackInfo( info = beets.autotag.hooks.TrackInfo(
recording['title'], title=recording['title'],
recording['id'], track_id=recording['id'],
index=index, index=index,
medium=medium, medium=medium,
medium_index=medium_index, medium_index=medium_index,
medium_total=medium_total, medium_total=medium_total,
data_source=u'MusicBrainz', data_source='MusicBrainz',
data_url=track_url(recording['id']), data_url=track_url(recording['id']),
) )
@ -207,12 +225,22 @@ def track_info(recording, index=None, medium=None, medium_index=None,
if recording.get('length'): if recording.get('length'):
info.length = int(recording['length']) / (1000.0) info.length = int(recording['length']) / (1000.0)
info.trackdisambig = recording.get('disambiguation')
if recording.get('isrc-list'):
info.isrc = ';'.join(recording['isrc-list'])
lyricist = [] lyricist = []
composer = [] composer = []
composer_sort = [] composer_sort = []
for work_relation in recording.get('work-relation-list', ()): for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance': if work_relation['type'] != 'performance':
continue continue
info.work = work_relation['work']['title']
info.mb_workid = work_relation['work']['id']
if 'disambiguation' in work_relation['work']:
info.work_disambig = work_relation['work']['disambiguation']
for artist_relation in work_relation['work'].get( for artist_relation in work_relation['work'].get(
'artist-relation-list', ()): 'artist-relation-list', ()):
if 'type' in artist_relation: if 'type' in artist_relation:
@ -224,10 +252,10 @@ def track_info(recording, index=None, medium=None, medium_index=None,
composer_sort.append( composer_sort.append(
artist_relation['artist']['sort-name']) artist_relation['artist']['sort-name'])
if lyricist: if lyricist:
info.lyricist = u', '.join(lyricist) info.lyricist = ', '.join(lyricist)
if composer: if composer:
info.composer = u', '.join(composer) info.composer = ', '.join(composer)
info.composer_sort = u', '.join(composer_sort) info.composer_sort = ', '.join(composer_sort)
arranger = [] arranger = []
for artist_relation in recording.get('artist-relation-list', ()): for artist_relation in recording.get('artist-relation-list', ()):
@ -236,7 +264,12 @@ def track_info(recording, index=None, medium=None, medium_index=None,
if type == 'arranger': if type == 'arranger':
arranger.append(artist_relation['artist']['name']) arranger.append(artist_relation['artist']['name'])
if arranger: if arranger:
info.arranger = u', '.join(arranger) info.arranger = ', '.join(arranger)
# Supplementary fields provided by plugins
extra_trackdatas = plugins.send('mb_track_extract', data=recording)
for extra_trackdata in extra_trackdatas:
info.update(extra_trackdata)
info.decode() info.decode()
return info return info
@ -270,6 +303,26 @@ def album_info(release):
artist_name, artist_sort_name, artist_credit_name = \ artist_name, artist_sort_name, artist_credit_name = \
_flatten_artist_credit(release['artist-credit']) _flatten_artist_credit(release['artist-credit'])
ntracks = sum(len(m['track-list']) for m in release['medium-list'])
# The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list'
# when the release has more than 500 tracks. So we use browse_recordings
# on chunks of tracks to recover the same information in this case.
if ntracks > BROWSE_MAXTRACKS:
log.debug('Album {} has too many tracks', release['id'])
recording_list = []
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
log.debug('Retrieving tracks starting at {}', i)
recording_list.extend(musicbrainzngs.browse_recordings(
release=release['id'], limit=BROWSE_CHUNKSIZE,
includes=BROWSE_INCLUDES,
offset=i)['recording-list'])
track_map = {r['id']: r for r in recording_list}
for medium in release['medium-list']:
for recording in medium['track-list']:
recording_info = track_map[recording['recording']['id']]
recording['recording'] = recording_info
# Basic info. # Basic info.
track_infos = [] track_infos = []
index = 0 index = 0
@ -281,7 +334,8 @@ def album_info(release):
continue continue
all_tracks = medium['track-list'] all_tracks = medium['track-list']
if 'data-track-list' in medium: if ('data-track-list' in medium
and not config['match']['ignore_data_tracks']):
all_tracks += medium['data-track-list'] all_tracks += medium['data-track-list']
track_count = len(all_tracks) track_count = len(all_tracks)
@ -327,15 +381,15 @@ def album_info(release):
track_infos.append(ti) track_infos.append(ti)
info = beets.autotag.hooks.AlbumInfo( info = beets.autotag.hooks.AlbumInfo(
release['title'], album=release['title'],
release['id'], album_id=release['id'],
artist_name, artist=artist_name,
release['artist-credit'][0]['artist']['id'], artist_id=release['artist-credit'][0]['artist']['id'],
track_infos, tracks=track_infos,
mediums=len(release['medium-list']), mediums=len(release['medium-list']),
artist_sort=artist_sort_name, artist_sort=artist_sort_name,
artist_credit=artist_credit_name, artist_credit=artist_credit_name,
data_source=u'MusicBrainz', data_source='MusicBrainz',
data_url=album_url(release['id']), data_url=album_url(release['id']),
) )
info.va = info.artist_id == VARIOUS_ARTISTS_ID info.va = info.artist_id == VARIOUS_ARTISTS_ID
@ -345,13 +399,12 @@ def album_info(release):
info.releasegroup_id = release['release-group']['id'] info.releasegroup_id = release['release-group']['id']
info.albumstatus = release.get('status') info.albumstatus = release.get('status')
# Build up the disambiguation string from the release group and release. # Get the disambiguation strings at the release and release group level.
disambig = []
if release['release-group'].get('disambiguation'): if release['release-group'].get('disambiguation'):
disambig.append(release['release-group'].get('disambiguation')) info.releasegroupdisambig = \
release['release-group'].get('disambiguation')
if release.get('disambiguation'): if release.get('disambiguation'):
disambig.append(release.get('disambiguation')) info.albumdisambig = release.get('disambiguation')
info.albumdisambig = u', '.join(disambig)
# Get the "classic" Release type. This data comes from a legacy API # Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types. # feature before MusicBrainz supported multiple release types.
@ -360,18 +413,17 @@ def album_info(release):
if reltype: if reltype:
info.albumtype = reltype.lower() info.albumtype = reltype.lower()
# Log the new-style "primary" and "secondary" release types. # Set the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log albumtypes = []
# it for now to help understand the differences.
if 'primary-type' in release['release-group']: if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type'] rel_primarytype = release['release-group']['primary-type']
if rel_primarytype: if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower()) albumtypes.append(rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']: if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']: if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join( for sec_type in release['release-group']['secondary-type-list']:
[secondarytype.lower() for secondarytype in albumtypes.append(sec_type.lower())
release['release-group']['secondary-type-list']])) info.albumtypes = '; '.join(albumtypes)
# Release events. # Release events.
info.country, release_date = _preferred_release_event(release) info.country, release_date = _preferred_release_event(release)
@ -402,17 +454,33 @@ def album_info(release):
first_medium = release['medium-list'][0] first_medium = release['medium-list'][0]
info.media = first_medium.get('format') info.media = first_medium.get('format')
if config['musicbrainz']['genres']:
sources = [
release['release-group'].get('genre-list', []),
release.get('genre-list', []),
]
genres = Counter()
for source in sources:
for genreitem in source:
genres[genreitem['name']] += int(genreitem['count'])
info.genre = '; '.join(g[0] for g in sorted(genres.items(),
key=lambda g: -g[1]))
extra_albumdatas = plugins.send('mb_album_extract', data=release)
for extra_albumdata in extra_albumdatas:
info.update(extra_albumdata)
info.decode() info.decode()
return info return info
def match_album(artist, album, tracks=None): def match_album(artist, album, tracks=None, extra_tags=None):
"""Searches for a single album ("release" in MusicBrainz parlance) """Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError. MusicBrainzAPIError.
The query consists of an artist name, an album name, and, The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album. optionally, a number of tracks on the album and any other extra tags.
""" """
# Build search criteria. # Build search criteria.
criteria = {'release': album.lower().strip()} criteria = {'release': album.lower().strip()}
@ -422,14 +490,24 @@ def match_album(artist, album, tracks=None):
# Various Artists search. # Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None: if tracks is not None:
criteria['tracks'] = six.text_type(tracks) criteria['tracks'] = str(tracks)
# Additional search cues from existing metadata.
if extra_tags:
for tag in extra_tags:
key = FIELDS_TO_MB_KEYS[tag]
value = str(extra_tags.get(tag, '')).lower().strip()
if key == 'catno':
value = value.replace(' ', '')
if value:
criteria[key] = value
# Abort if we have no search terms. # Abort if we have no search terms.
if not any(criteria.values()): if not any(criteria.values()):
return return
try: try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria) log.debug('Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases( res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria) limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
@ -470,7 +548,7 @@ def _parse_id(s):
no ID can be found, return None. no ID can be found, return None.
""" """
# Find the first thing that looks like a UUID/MBID. # Find the first thing that looks like a UUID/MBID.
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match: if match:
return match.group() return match.group()
@ -480,19 +558,19 @@ def album_for_id(releaseid):
object or None if the album is not found. May raise a object or None if the album is not found. May raise a
MusicBrainzAPIError. MusicBrainzAPIError.
""" """
log.debug(u'Requesting MusicBrainz release {}', releaseid) log.debug('Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid) albumid = _parse_id(releaseid)
if not albumid: if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid) log.debug('Invalid MBID ({0}).', releaseid)
return return
try: try:
res = musicbrainzngs.get_release_by_id(albumid, res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES) RELEASE_INCLUDES)
except musicbrainzngs.ResponseError: except musicbrainzngs.ResponseError:
log.debug(u'Album ID match failed.') log.debug('Album ID match failed.')
return None return None
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get release by ID', albumid, raise MusicBrainzAPIError(exc, 'get release by ID', albumid,
traceback.format_exc()) traceback.format_exc())
return album_info(res['release']) return album_info(res['release'])
@ -503,14 +581,14 @@ def track_for_id(releaseid):
""" """
trackid = _parse_id(releaseid) trackid = _parse_id(releaseid)
if not trackid: if not trackid:
log.debug(u'Invalid MBID ({0}).', releaseid) log.debug('Invalid MBID ({0}).', releaseid)
return return
try: try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES) res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError: except musicbrainzngs.ResponseError:
log.debug(u'Track ID match failed.') log.debug('Track ID match failed.')
return None return None
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid, raise MusicBrainzAPIError(exc, 'get recording by ID', trackid,
traceback.format_exc()) traceback.format_exc())
return track_info(res['recording']) return track_info(res['recording'])

View file

@ -7,6 +7,7 @@ import:
move: no move: no
link: no link: no
hardlink: no hardlink: no
reflink: no
delete: no delete: no
resume: ask resume: ask
incremental: no incremental: no
@ -44,10 +45,20 @@ replace:
'^\s+': '' '^\s+': ''
'^-': _ '^-': _
path_sep_replace: _ path_sep_replace: _
drive_sep_replace: _
asciify_paths: false asciify_paths: false
art_filename: cover art_filename: cover
max_filename_length: 0 max_filename_length: 0
aunique:
keys: albumartist album
disambiguators: albumtype year label catalognum albumdisambig releasegroupdisambig
bracket: '[]'
overwrite_null:
album: []
track: []
plugins: [] plugins: []
pluginpath: [] pluginpath: []
threaded: yes threaded: yes
@ -91,9 +102,12 @@ statefile: state.pickle
musicbrainz: musicbrainz:
host: musicbrainz.org host: musicbrainz.org
https: no
ratelimit: 1 ratelimit: 1
ratelimit_interval: 1.0 ratelimit_interval: 1.0
searchlimit: 5 searchlimit: 5
extra_tags: []
genres: no
match: match:
strong_rec_thresh: 0.04 strong_rec_thresh: 0.04
@ -129,6 +143,7 @@ match:
ignored: [] ignored: []
required: [] required: []
ignored_media: [] ignored_media: []
ignore_data_tracks: yes
ignore_video_tracks: yes ignore_video_tracks: yes
track_length_grace: 10 track_length_grace: 10
track_length_max: 30 track_length_max: 30

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -16,7 +15,6 @@
"""DBCore is an abstract database package that forms the basis for beets' """DBCore is an abstract database package that forms the basis for beets'
Library. Library.
""" """
from __future__ import division, absolute_import, print_function
from .db import Model, Database from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -15,22 +14,21 @@
"""The central Model and Database constructs for DBCore. """The central Model and Database constructs for DBCore.
""" """
from __future__ import division, absolute_import, print_function
import time import time
import os import os
import re
from collections import defaultdict from collections import defaultdict
import threading import threading
import sqlite3 import sqlite3
import contextlib import contextlib
import collections
import beets import beets
from beets.util.functemplate import Template from beets.util import functemplate
from beets.util import py3_path from beets.util import py3_path
from beets.dbcore import types from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery from .query import MatchQuery, NullSort, TrueQuery
import six from collections.abc import Mapping
class DBAccessError(Exception): class DBAccessError(Exception):
@ -42,20 +40,30 @@ class DBAccessError(Exception):
""" """
class FormattedMapping(collections.Mapping): class FormattedMapping(Mapping):
"""A `dict`-like formatted view of a model. """A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string. `model[key]` as a unicode string.
The `included_keys` parameter allows filtering the fields that are
returned. By default all fields are returned. Limiting to specific keys can
avoid expensive per-item database queries.
If `for_path` is true, all path separators in the formatted values If `for_path` is true, all path separators in the formatted values
are replaced. are replaced.
""" """
def __init__(self, model, for_path=False): ALL_KEYS = '*'
def __init__(self, model, included_keys=ALL_KEYS, for_path=False):
self.for_path = for_path self.for_path = for_path
self.model = model self.model = model
self.model_keys = model.keys(True) if included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
self.model_keys = self.model.keys(True)
else:
self.model_keys = included_keys
def __getitem__(self, key): def __getitem__(self, key):
if key in self.model_keys: if key in self.model_keys:
@ -72,7 +80,7 @@ class FormattedMapping(collections.Mapping):
def get(self, key, default=None): def get(self, key, default=None):
if default is None: if default is None:
default = self.model._type(key).format(None) default = self.model._type(key).format(None)
return super(FormattedMapping, self).get(key, default) return super().get(key, default)
def _get_formatted(self, model, key): def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key)) value = model._type(key).format(model.get(key))
@ -81,6 +89,11 @@ class FormattedMapping(collections.Mapping):
if self.for_path: if self.for_path:
sep_repl = beets.config['path_sep_replace'].as_str() sep_repl = beets.config['path_sep_replace'].as_str()
sep_drive = beets.config['drive_sep_replace'].as_str()
if re.match(r'^\w:', value):
value = re.sub(r'(?<=^\w):', sep_drive, value)
for sep in (os.path.sep, os.path.altsep): for sep in (os.path.sep, os.path.altsep):
if sep: if sep:
value = value.replace(sep, sep_repl) value = value.replace(sep, sep_repl)
@ -88,11 +101,105 @@ class FormattedMapping(collections.Mapping):
return value return value
class LazyConvertDict:
"""Lazily convert types for attributes fetched from the database
"""
def __init__(self, model_cls):
"""Initialize the object empty
"""
self.data = {}
self.model_cls = model_cls
self._converted = {}
def init(self, data):
"""Set the base data that should be lazily converted
"""
self.data = data
def _convert(self, key, value):
"""Convert the attribute type according the the SQL type
"""
return self.model_cls._type(key).from_sql(value)
def __setitem__(self, key, value):
"""Set an attribute value, assume it's already converted
"""
self._converted[key] = value
def __getitem__(self, key):
"""Get an attribute value, converting the type on demand
if needed
"""
if key in self._converted:
return self._converted[key]
elif key in self.data:
value = self._convert(key, self.data[key])
self._converted[key] = value
return value
def __delitem__(self, key):
"""Delete both converted and base data
"""
if key in self._converted:
del self._converted[key]
if key in self.data:
del self.data[key]
def keys(self):
"""Get a list of available field names for this object.
"""
return list(self._converted.keys()) + list(self.data.keys())
def copy(self):
"""Create a copy of the object.
"""
new = self.__class__(self.model_cls)
new.data = self.data.copy()
new._converted = self._converted.copy()
return new
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict.
"""
for key, value in values.items():
self[key] = value
def items(self):
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key):
"""Determine whether `key` is an attribute on this object.
"""
return key in self.keys()
def __iter__(self):
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Abstract base for model classes. # Abstract base for model classes.
class Model(object): class Model:
"""An abstract object representing an object in the database. Model """An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., the allow subscript access like objects act like dictionaries (i.e., they allow subscript access like
``obj['field']``). The same field set is available via attribute ``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available: available:
@ -143,12 +250,22 @@ class Model(object):
are subclasses of `Sort`. are subclasses of `Sort`.
""" """
_queries = {}
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
_always_dirty = False _always_dirty = False
"""By default, fields only become "dirty" when their value actually """By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`). value is the same as the old value (e.g., `o.f = o.f`).
""" """
_revision = -1
"""A revision number from when the model was loaded from or written
to the database.
"""
@classmethod @classmethod
def _getters(cls): def _getters(cls):
"""Return a mapping from field names to getter functions. """Return a mapping from field names to getter functions.
@ -172,8 +289,8 @@ class Model(object):
""" """
self._db = db self._db = db
self._dirty = set() self._dirty = set()
self._values_fixed = {} self._values_fixed = LazyConvertDict(self)
self._values_flex = {} self._values_flex = LazyConvertDict(self)
# Initial contents. # Initial contents.
self.update(values) self.update(values)
@ -187,23 +304,25 @@ class Model(object):
ordinary construction are bypassed. ordinary construction are bypassed.
""" """
obj = cls(db) obj = cls(db)
for key, value in fixed_values.items():
obj._values_fixed[key] = cls._type(key).from_sql(value) obj._values_fixed.init(fixed_values)
for key, value in flex_values.items(): obj._values_flex.init(flex_values)
obj._values_flex[key] = cls._type(key).from_sql(value)
return obj return obj
def __repr__(self): def __repr__(self):
return '{0}({1})'.format( return '{}({})'.format(
type(self).__name__, type(self).__name__,
', '.join('{0}={1!r}'.format(k, v) for k, v in dict(self).items()), ', '.join(f'{k}={v!r}' for k, v in dict(self).items()),
) )
def clear_dirty(self): def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to """Mark all fields as *clean* (i.e., not needing to be stored to
the database). the database). Also update the revision.
""" """
self._dirty = set() self._dirty = set()
if self._db:
self._revision = self._db.revision
def _check_db(self, need_id=True): def _check_db(self, need_id=True):
"""Ensure that this object is associated with a database row: it """Ensure that this object is associated with a database row: it
@ -212,10 +331,10 @@ class Model(object):
""" """
if not self._db: if not self._db:
raise ValueError( raise ValueError(
u'{0} has no database'.format(type(self).__name__) '{} has no database'.format(type(self).__name__)
) )
if need_id and not self.id: if need_id and not self.id:
raise ValueError(u'{0} has no id'.format(type(self).__name__)) raise ValueError('{} has no id'.format(type(self).__name__))
def copy(self): def copy(self):
"""Create a copy of the model object. """Create a copy of the model object.
@ -243,19 +362,32 @@ class Model(object):
""" """
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def __getitem__(self, key): def _get(self, key, default=None, raise_=False):
"""Get the value for a field. Raise a KeyError if the field is """Get the value for a field, or `default`. Alternatively,
not available. raise a KeyError if the field is not available.
""" """
getters = self._getters() getters = self._getters()
if key in getters: # Computed. if key in getters: # Computed.
return getters[key](self) return getters[key](self)
elif key in self._fields: # Fixed. elif key in self._fields: # Fixed.
return self._values_fixed.get(key, self._type(key).null) if key in self._values_fixed:
return self._values_fixed[key]
else:
return self._type(key).null
elif key in self._values_flex: # Flexible. elif key in self._values_flex: # Flexible.
return self._values_flex[key] return self._values_flex[key]
else: elif raise_:
raise KeyError(key) raise KeyError(key)
else:
return default
get = _get
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
return self._get(key, raise_=True)
def _setitem(self, key, value): def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value """Assign the value for a field, return whether new and old value
@ -290,12 +422,12 @@ class Model(object):
if key in self._values_flex: # Flexible. if key in self._values_flex: # Flexible.
del self._values_flex[key] del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store. self._dirty.add(key) # Mark for dropping on store.
elif key in self._fields: # Fixed
setattr(self, key, self._type(key).null)
elif key in self._getters(): # Computed. elif key in self._getters(): # Computed.
raise KeyError(u'computed field {0} cannot be deleted'.format(key)) raise KeyError(f'computed field {key} cannot be deleted')
elif key in self._fields: # Fixed.
raise KeyError(u'fixed field {0} cannot be deleted'.format(key))
else: else:
raise KeyError(u'no such field {0}'.format(key)) raise KeyError(f'no such field {key}')
def keys(self, computed=False): def keys(self, computed=False):
"""Get a list of available field names for this object. The """Get a list of available field names for this object. The
@ -330,19 +462,10 @@ class Model(object):
for key in self: for key in self:
yield key, self[key] yield key, self[key]
def get(self, key, default=None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key): def __contains__(self, key):
"""Determine whether `key` is an attribute on this object. """Determine whether `key` is an attribute on this object.
""" """
return key in self.keys(True) return key in self.keys(computed=True)
def __iter__(self): def __iter__(self):
"""Iterate over the available field names (excluding computed """Iterate over the available field names (excluding computed
@ -354,22 +477,22 @@ class Model(object):
def __getattr__(self, key): def __getattr__(self, key):
if key.startswith('_'): if key.startswith('_'):
raise AttributeError(u'model has no attribute {0!r}'.format(key)) raise AttributeError(f'model has no attribute {key!r}')
else: else:
try: try:
return self[key] return self[key]
except KeyError: except KeyError:
raise AttributeError(u'no such field {0!r}'.format(key)) raise AttributeError(f'no such field {key!r}')
def __setattr__(self, key, value): def __setattr__(self, key, value):
if key.startswith('_'): if key.startswith('_'):
super(Model, self).__setattr__(key, value) super().__setattr__(key, value)
else: else:
self[key] = value self[key] = value
def __delattr__(self, key): def __delattr__(self, key):
if key.startswith('_'): if key.startswith('_'):
super(Model, self).__delattr__(key) super().__delattr__(key)
else: else:
del self[key] del self[key]
@ -398,7 +521,7 @@ class Model(object):
with self._db.transaction() as tx: with self._db.transaction() as tx:
# Main table update. # Main table update.
if assignments: if assignments:
query = 'UPDATE {0} SET {1} WHERE id=?'.format( query = 'UPDATE {} SET {} WHERE id=?'.format(
self._table, assignments self._table, assignments
) )
subvars.append(self.id) subvars.append(self.id)
@ -409,7 +532,7 @@ class Model(object):
if key in self._dirty: if key in self._dirty:
self._dirty.remove(key) self._dirty.remove(key)
tx.mutate( tx.mutate(
'INSERT INTO {0} ' 'INSERT INTO {} '
'(entity_id, key, value) ' '(entity_id, key, value) '
'VALUES (?, ?, ?);'.format(self._flex_table), 'VALUES (?, ?, ?);'.format(self._flex_table),
(self.id, key, value), (self.id, key, value),
@ -418,7 +541,7 @@ class Model(object):
# Deleted flexible attributes. # Deleted flexible attributes.
for key in self._dirty: for key in self._dirty:
tx.mutate( tx.mutate(
'DELETE FROM {0} ' 'DELETE FROM {} '
'WHERE entity_id=? AND key=?'.format(self._flex_table), 'WHERE entity_id=? AND key=?'.format(self._flex_table),
(self.id, key) (self.id, key)
) )
@ -427,12 +550,18 @@ class Model(object):
def load(self): def load(self):
"""Refresh the object's metadata from the library database. """Refresh the object's metadata from the library database.
If check_revision is true, the database is only queried loaded when a
transaction has been committed since the item was last loaded.
""" """
self._check_db() self._check_db()
if not self._dirty and self._db.revision == self._revision:
# Exit early
return
stored_obj = self._db._get(type(self), self.id) stored_obj = self._db._get(type(self), self.id)
assert stored_obj is not None, u"object {0} not in DB".format(self.id) assert stored_obj is not None, f"object {self.id} not in DB"
self._values_fixed = {} self._values_fixed = LazyConvertDict(self)
self._values_flex = {} self._values_flex = LazyConvertDict(self)
self.update(dict(stored_obj)) self.update(dict(stored_obj))
self.clear_dirty() self.clear_dirty()
@ -442,11 +571,11 @@ class Model(object):
self._check_db() self._check_db()
with self._db.transaction() as tx: with self._db.transaction() as tx:
tx.mutate( tx.mutate(
'DELETE FROM {0} WHERE id=?'.format(self._table), f'DELETE FROM {self._table} WHERE id=?',
(self.id,) (self.id,)
) )
tx.mutate( tx.mutate(
'DELETE FROM {0} WHERE entity_id=?'.format(self._flex_table), f'DELETE FROM {self._flex_table} WHERE entity_id=?',
(self.id,) (self.id,)
) )
@ -464,7 +593,7 @@ class Model(object):
with self._db.transaction() as tx: with self._db.transaction() as tx:
new_id = tx.mutate( new_id = tx.mutate(
'INSERT INTO {0} DEFAULT VALUES'.format(self._table) f'INSERT INTO {self._table} DEFAULT VALUES'
) )
self.id = new_id self.id = new_id
self.added = time.time() self.added = time.time()
@ -479,11 +608,11 @@ class Model(object):
_formatter = FormattedMapping _formatter = FormattedMapping
def formatted(self, for_path=False): def formatted(self, included_keys=_formatter.ALL_KEYS, for_path=False):
"""Get a mapping containing all values on this object formatted """Get a mapping containing all values on this object formatted
as human-readable unicode strings. as human-readable unicode strings.
""" """
return self._formatter(self, for_path) return self._formatter(self, included_keys, for_path)
def evaluate_template(self, template, for_path=False): def evaluate_template(self, template, for_path=False):
"""Evaluate a template (a string or a `Template` object) using """Evaluate a template (a string or a `Template` object) using
@ -491,9 +620,9 @@ class Model(object):
separators will be added to the template. separators will be added to the template.
""" """
# Perform substitution. # Perform substitution.
if isinstance(template, six.string_types): if isinstance(template, str):
template = Template(template) template = functemplate.template(template)
return template.substitute(self.formatted(for_path), return template.substitute(self.formatted(for_path=for_path),
self._template_funcs()) self._template_funcs())
# Parsing. # Parsing.
@ -502,8 +631,8 @@ class Model(object):
def _parse(cls, key, string): def _parse(cls, key, string):
"""Parse a string as a value for the given key. """Parse a string as a value for the given key.
""" """
if not isinstance(string, six.string_types): if not isinstance(string, str):
raise TypeError(u"_parse() argument must be a string") raise TypeError("_parse() argument must be a string")
return cls._type(key).parse(string) return cls._type(key).parse(string)
@ -515,11 +644,13 @@ class Model(object):
# Database controller and supporting interfaces. # Database controller and supporting interfaces.
class Results(object): class Results:
"""An item query result set. Iterating over the collection lazily """An item query result set. Iterating over the collection lazily
constructs LibModel objects that reflect database rows. constructs LibModel objects that reflect database rows.
""" """
def __init__(self, model_class, rows, db, query=None, sort=None):
def __init__(self, model_class, rows, db, flex_rows,
query=None, sort=None):
"""Create a result set that will construct objects of type """Create a result set that will construct objects of type
`model_class`. `model_class`.
@ -539,6 +670,7 @@ class Results(object):
self.db = db self.db = db
self.query = query self.query = query
self.sort = sort self.sort = sort
self.flex_rows = flex_rows
# We keep a queue of rows we haven't yet consumed for # We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of # materialization. We preserve the original total number of
@ -560,6 +692,10 @@ class Results(object):
a `Results` object a second time should be much faster than the a `Results` object a second time should be much faster than the
first. first.
""" """
# Index flexible attributes by the item ID, so we have easier access
flex_attrs = self._get_indexed_flex_attrs()
index = 0 # Position in the materialized objects. index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows: while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce? # Are there previously-materialized objects to produce?
@ -572,7 +708,7 @@ class Results(object):
else: else:
while self._rows: while self._rows:
row = self._rows.pop(0) row = self._rows.pop(0)
obj = self._make_model(row) obj = self._make_model(row, flex_attrs.get(row['id'], {}))
# If there is a slow-query predicate, ensurer that the # If there is a slow-query predicate, ensurer that the
# object passes it. # object passes it.
if not self.query or self.query.match(obj): if not self.query or self.query.match(obj):
@ -594,20 +730,24 @@ class Results(object):
# Objects are pre-sorted (i.e., by the database). # Objects are pre-sorted (i.e., by the database).
return self._get_objects() return self._get_objects()
def _make_model(self, row): def _get_indexed_flex_attrs(self):
# Get the flexible attributes for the object. """ Index flexible attributes by the entity id they belong to
with self.db.transaction() as tx: """
flex_rows = tx.query( flex_values = {}
'SELECT * FROM {0} WHERE entity_id=?'.format( for row in self.flex_rows:
self.model_class._flex_table if row['entity_id'] not in flex_values:
), flex_values[row['entity_id']] = {}
(row['id'],)
)
flex_values[row['entity_id']][row['key']] = row['value']
return flex_values
def _make_model(self, row, flex_values={}):
""" Create a Model object for the given row
"""
cols = dict(row) cols = dict(row)
values = dict((k, v) for (k, v) in cols.items() values = {k: v for (k, v) in cols.items()
if not k[:4] == 'flex') if not k[:4] == 'flex'}
flex_values = dict((row['key'], row['value']) for row in flex_rows)
# Construct the Python object # Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values) obj = self.model_class._awaken(self.db, values, flex_values)
@ -656,7 +796,7 @@ class Results(object):
next(it) next(it)
return next(it) return next(it)
except StopIteration: except StopIteration:
raise IndexError(u'result index {0} out of range'.format(n)) raise IndexError(f'result index {n} out of range')
def get(self): def get(self):
"""Return the first matching object, or None if no objects """Return the first matching object, or None if no objects
@ -669,10 +809,16 @@ class Results(object):
return None return None
class Transaction(object): class Transaction:
"""A context manager for safe, concurrent access to the database. """A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction. All SQL commands should be executed through a transaction.
""" """
_mutated = False
"""A flag storing whether a mutation has been executed in the
current transaction.
"""
def __init__(self, db): def __init__(self, db):
self.db = db self.db = db
@ -694,12 +840,15 @@ class Transaction(object):
entered but not yet exited transaction. If it is the last active entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed. transaction, the database updates are committed.
""" """
# Beware of races; currently secured by db._db_lock
self.db.revision += self._mutated
with self.db._tx_stack() as stack: with self.db._tx_stack() as stack:
assert stack.pop() is self assert stack.pop() is self
empty = not stack empty = not stack
if empty: if empty:
# Ending a "root" transaction. End the SQLite transaction. # Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit() self.db._connection().commit()
self._mutated = False
self.db._db_lock.release() self.db._db_lock.release()
def query(self, statement, subvals=()): def query(self, statement, subvals=()):
@ -715,7 +864,6 @@ class Transaction(object):
""" """
try: try:
cursor = self.db._connection().execute(statement, subvals) cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
except sqlite3.OperationalError as e: except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing # In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as # the underlying database file. We surface these exceptions as
@ -725,26 +873,41 @@ class Transaction(object):
raise DBAccessError(e.args[0]) raise DBAccessError(e.args[0])
else: else:
raise raise
else:
self._mutated = True
return cursor.lastrowid
def script(self, statements): def script(self, statements):
"""Execute a string containing multiple SQL statements.""" """Execute a string containing multiple SQL statements."""
# We don't know whether this mutates, but quite likely it does.
self._mutated = True
self.db._connection().executescript(statements) self.db._connection().executescript(statements)
class Database(object): class Database:
"""A container for Model objects that wraps an SQLite database as """A container for Model objects that wraps an SQLite database as
the backend. the backend.
""" """
_models = () _models = ()
"""The Model subclasses representing tables in this database. """The Model subclasses representing tables in this database.
""" """
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
"""Whether or not the current version of SQLite supports extensions"""
revision = 0
"""The current revision of the database. To be increased whenever
data is written in a transaction.
"""
def __init__(self, path, timeout=5.0): def __init__(self, path, timeout=5.0):
self.path = path self.path = path
self.timeout = timeout self.timeout = timeout
self._connections = {} self._connections = {}
self._tx_stacks = defaultdict(list) self._tx_stacks = defaultdict(list)
self._extensions = []
# A lock to protect the _connections and _tx_stacks maps, which # A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources. # both map thread IDs to private resources.
@ -794,6 +957,13 @@ class Database(object):
py3_path(self.path), timeout=self.timeout py3_path(self.path), timeout=self.timeout
) )
if self.supports_extensions:
conn.enable_load_extension(True)
# Load any extension that are already loaded for other connections.
for path in self._extensions:
conn.load_extension(path)
# Access SELECT results like dictionaries. # Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
return conn return conn
@ -822,6 +992,18 @@ class Database(object):
""" """
return Transaction(self) return Transaction(self)
def load_extension(self, path):
"""Load an SQLite extension into all open connections."""
if not self.supports_extensions:
raise ValueError(
'this sqlite3 installation does not support extensions')
self._extensions.append(path)
# Load the extension into every open connection.
for conn in self._connections.values():
conn.load_extension(path)
# Schema setup and migration. # Schema setup and migration.
def _make_table(self, table, fields): def _make_table(self, table, fields):
@ -831,7 +1013,7 @@ class Database(object):
# Get current schema. # Get current schema.
with self.transaction() as tx: with self.transaction() as tx:
rows = tx.query('PRAGMA table_info(%s)' % table) rows = tx.query('PRAGMA table_info(%s)' % table)
current_fields = set([row[1] for row in rows]) current_fields = {row[1] for row in rows}
field_names = set(fields.keys()) field_names = set(fields.keys())
if current_fields.issuperset(field_names): if current_fields.issuperset(field_names):
@ -842,9 +1024,9 @@ class Database(object):
# No table exists. # No table exists.
columns = [] columns = []
for name, typ in fields.items(): for name, typ in fields.items():
columns.append('{0} {1}'.format(name, typ.sql)) columns.append(f'{name} {typ.sql}')
setup_sql = 'CREATE TABLE {0} ({1});\n'.format(table, setup_sql = 'CREATE TABLE {} ({});\n'.format(table,
', '.join(columns)) ', '.join(columns))
else: else:
# Table exists does not match the field set. # Table exists does not match the field set.
@ -852,7 +1034,7 @@ class Database(object):
for name, typ in fields.items(): for name, typ in fields.items():
if name in current_fields: if name in current_fields:
continue continue
setup_sql += 'ALTER TABLE {0} ADD COLUMN {1} {2};\n'.format( setup_sql += 'ALTER TABLE {} ADD COLUMN {} {};\n'.format(
table, name, typ.sql table, name, typ.sql
) )
@ -888,17 +1070,31 @@ class Database(object):
where, subvals = query.clause() where, subvals = query.clause()
order_by = sort.order_clause() order_by = sort.order_clause()
sql = ("SELECT * FROM {0} WHERE {1} {2}").format( sql = ("SELECT * FROM {} WHERE {} {}").format(
model_cls._table, model_cls._table,
where or '1', where or '1',
"ORDER BY {0}".format(order_by) if order_by else '', f"ORDER BY {order_by}" if order_by else '',
)
# Fetch flexible attributes for items matching the main query.
# Doing the per-item filtering in python is faster than issuing
# one query per item to sqlite.
flex_sql = ("""
SELECT * FROM {} WHERE entity_id IN
(SELECT id FROM {} WHERE {});
""".format(
model_cls._flex_table,
model_cls._table,
where or '1',
)
) )
with self.transaction() as tx: with self.transaction() as tx:
rows = tx.query(sql, subvals) rows = tx.query(sql, subvals)
flex_rows = tx.query(flex_sql, subvals)
return Results( return Results(
model_cls, rows, self, model_cls, rows, self, flex_rows,
None if where else query, # Slow query component. None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component. sort if sort.is_slow() else None, # Slow sort component.
) )

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -15,7 +14,6 @@
"""The Query type hierarchy for DBCore. """The Query type hierarchy for DBCore.
""" """
from __future__ import division, absolute_import, print_function
import re import re
from operator import mul from operator import mul
@ -23,10 +21,6 @@ from beets import util
from datetime import datetime, timedelta from datetime import datetime, timedelta
import unicodedata import unicodedata
from functools import reduce from functools import reduce
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
class ParsingError(ValueError): class ParsingError(ValueError):
@ -44,8 +38,8 @@ class InvalidQueryError(ParsingError):
def __init__(self, query, explanation): def __init__(self, query, explanation):
if isinstance(query, list): if isinstance(query, list):
query = " ".join(query) query = " ".join(query)
message = u"'{0}': {1}".format(query, explanation) message = f"'{query}': {explanation}"
super(InvalidQueryError, self).__init__(message) super().__init__(message)
class InvalidQueryArgumentValueError(ParsingError): class InvalidQueryArgumentValueError(ParsingError):
@ -56,13 +50,13 @@ class InvalidQueryArgumentValueError(ParsingError):
""" """
def __init__(self, what, expected, detail=None): def __init__(self, what, expected, detail=None):
message = u"'{0}' is not {1}".format(what, expected) message = f"'{what}' is not {expected}"
if detail: if detail:
message = u"{0}: {1}".format(message, detail) message = f"{message}: {detail}"
super(InvalidQueryArgumentValueError, self).__init__(message) super().__init__(message)
class Query(object): class Query:
"""An abstract class representing a query into the item database. """An abstract class representing a query into the item database.
""" """
@ -82,7 +76,7 @@ class Query(object):
raise NotImplementedError raise NotImplementedError
def __repr__(self): def __repr__(self):
return "{0.__class__.__name__}()".format(self) return f"{self.__class__.__name__}()"
def __eq__(self, other): def __eq__(self, other):
return type(self) == type(other) return type(self) == type(other)
@ -129,7 +123,7 @@ class FieldQuery(Query):
"{0.fast})".format(self)) "{0.fast})".format(self))
def __eq__(self, other): def __eq__(self, other):
return super(FieldQuery, self).__eq__(other) and \ return super().__eq__(other) and \
self.field == other.field and self.pattern == other.pattern self.field == other.field and self.pattern == other.pattern
def __hash__(self): def __hash__(self):
@ -151,17 +145,13 @@ class NoneQuery(FieldQuery):
"""A query that checks whether a field is null.""" """A query that checks whether a field is null."""
def __init__(self, field, fast=True): def __init__(self, field, fast=True):
super(NoneQuery, self).__init__(field, None, fast) super().__init__(field, None, fast)
def col_clause(self): def col_clause(self):
return self.field + " IS NULL", () return self.field + " IS NULL", ()
@classmethod def match(self, item):
def match(cls, item): return item.get(self.field) is None
try:
return item[cls.field] is None
except KeyError:
return True
def __repr__(self): def __repr__(self):
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self) return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
@ -214,14 +204,14 @@ class RegexpQuery(StringFieldQuery):
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(RegexpQuery, self).__init__(field, pattern, fast) super().__init__(field, pattern, fast)
pattern = self._normalize(pattern) pattern = self._normalize(pattern)
try: try:
self.pattern = re.compile(self.pattern) self.pattern = re.compile(self.pattern)
except re.error as exc: except re.error as exc:
# Invalid regular expression. # Invalid regular expression.
raise InvalidQueryArgumentValueError(pattern, raise InvalidQueryArgumentValueError(pattern,
u"a regular expression", "a regular expression",
format(exc)) format(exc))
@staticmethod @staticmethod
@ -242,8 +232,8 @@ class BooleanQuery(MatchQuery):
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(BooleanQuery, self).__init__(field, pattern, fast) super().__init__(field, pattern, fast)
if isinstance(pattern, six.string_types): if isinstance(pattern, str):
self.pattern = util.str2bool(pattern) self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern) self.pattern = int(self.pattern)
@ -256,16 +246,16 @@ class BytesQuery(MatchQuery):
""" """
def __init__(self, field, pattern): def __init__(self, field, pattern):
super(BytesQuery, self).__init__(field, pattern) super().__init__(field, pattern)
# Use a buffer/memoryview representation of the pattern for SQLite # Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary # matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode. # rather than encoded Unicode.
if isinstance(self.pattern, (six.text_type, bytes)): if isinstance(self.pattern, (str, bytes)):
if isinstance(self.pattern, six.text_type): if isinstance(self.pattern, str):
self.pattern = self.pattern.encode('utf-8') self.pattern = self.pattern.encode('utf-8')
self.buf_pattern = buffer(self.pattern) self.buf_pattern = memoryview(self.pattern)
elif isinstance(self.pattern, buffer): elif isinstance(self.pattern, memoryview):
self.buf_pattern = self.pattern self.buf_pattern = self.pattern
self.pattern = bytes(self.pattern) self.pattern = bytes(self.pattern)
@ -297,10 +287,10 @@ class NumericQuery(FieldQuery):
try: try:
return float(s) return float(s)
except ValueError: except ValueError:
raise InvalidQueryArgumentValueError(s, u"an int or a float") raise InvalidQueryArgumentValueError(s, "an int or a float")
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(NumericQuery, self).__init__(field, pattern, fast) super().__init__(field, pattern, fast)
parts = pattern.split('..', 1) parts = pattern.split('..', 1)
if len(parts) == 1: if len(parts) == 1:
@ -318,7 +308,7 @@ class NumericQuery(FieldQuery):
if self.field not in item: if self.field not in item:
return False return False
value = item[self.field] value = item[self.field]
if isinstance(value, six.string_types): if isinstance(value, str):
value = self._convert(value) value = self._convert(value)
if self.point is not None: if self.point is not None:
@ -335,14 +325,14 @@ class NumericQuery(FieldQuery):
return self.field + '=?', (self.point,) return self.field + '=?', (self.point,)
else: else:
if self.rangemin is not None and self.rangemax is not None: if self.rangemin is not None and self.rangemax is not None:
return (u'{0} >= ? AND {0} <= ?'.format(self.field), return ('{0} >= ? AND {0} <= ?'.format(self.field),
(self.rangemin, self.rangemax)) (self.rangemin, self.rangemax))
elif self.rangemin is not None: elif self.rangemin is not None:
return u'{0} >= ?'.format(self.field), (self.rangemin,) return f'{self.field} >= ?', (self.rangemin,)
elif self.rangemax is not None: elif self.rangemax is not None:
return u'{0} <= ?'.format(self.field), (self.rangemax,) return f'{self.field} <= ?', (self.rangemax,)
else: else:
return u'1', () return '1', ()
class CollectionQuery(Query): class CollectionQuery(Query):
@ -387,7 +377,7 @@ class CollectionQuery(Query):
return "{0.__class__.__name__}({0.subqueries!r})".format(self) return "{0.__class__.__name__}({0.subqueries!r})".format(self)
def __eq__(self, other): def __eq__(self, other):
return super(CollectionQuery, self).__eq__(other) and \ return super().__eq__(other) and \
self.subqueries == other.subqueries self.subqueries == other.subqueries
def __hash__(self): def __hash__(self):
@ -411,7 +401,7 @@ class AnyFieldQuery(CollectionQuery):
subqueries = [] subqueries = []
for field in self.fields: for field in self.fields:
subqueries.append(cls(field, pattern, True)) subqueries.append(cls(field, pattern, True))
super(AnyFieldQuery, self).__init__(subqueries) super().__init__(subqueries)
def clause(self): def clause(self):
return self.clause_with_joiner('or') return self.clause_with_joiner('or')
@ -427,7 +417,7 @@ class AnyFieldQuery(CollectionQuery):
"{0.query_class.__name__})".format(self)) "{0.query_class.__name__})".format(self))
def __eq__(self, other): def __eq__(self, other):
return super(AnyFieldQuery, self).__eq__(other) and \ return super().__eq__(other) and \
self.query_class == other.query_class self.query_class == other.query_class
def __hash__(self): def __hash__(self):
@ -453,7 +443,7 @@ class AndQuery(MutableCollectionQuery):
return self.clause_with_joiner('and') return self.clause_with_joiner('and')
def match(self, item): def match(self, item):
return all([q.match(item) for q in self.subqueries]) return all(q.match(item) for q in self.subqueries)
class OrQuery(MutableCollectionQuery): class OrQuery(MutableCollectionQuery):
@ -463,7 +453,7 @@ class OrQuery(MutableCollectionQuery):
return self.clause_with_joiner('or') return self.clause_with_joiner('or')
def match(self, item): def match(self, item):
return any([q.match(item) for q in self.subqueries]) return any(q.match(item) for q in self.subqueries)
class NotQuery(Query): class NotQuery(Query):
@ -477,7 +467,7 @@ class NotQuery(Query):
def clause(self): def clause(self):
clause, subvals = self.subquery.clause() clause, subvals = self.subquery.clause()
if clause: if clause:
return 'not ({0})'.format(clause), subvals return f'not ({clause})', subvals
else: else:
# If there is no clause, there is nothing to negate. All the logic # If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries. # is handled by match() for slow queries.
@ -490,7 +480,7 @@ class NotQuery(Query):
return "{0.__class__.__name__}({0.subquery!r})".format(self) return "{0.__class__.__name__}({0.subquery!r})".format(self)
def __eq__(self, other): def __eq__(self, other):
return super(NotQuery, self).__eq__(other) and \ return super().__eq__(other) and \
self.subquery == other.subquery self.subquery == other.subquery
def __hash__(self): def __hash__(self):
@ -546,7 +536,7 @@ def _parse_periods(pattern):
return (start, end) return (start, end)
class Period(object): class Period:
"""A period of time given by a date, time and precision. """A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all Example: 2014-01-01 10:50:30 with precision 'month' represents all
@ -572,7 +562,7 @@ class Period(object):
or "second"). or "second").
""" """
if precision not in Period.precisions: if precision not in Period.precisions:
raise ValueError(u'Invalid precision {0}'.format(precision)) raise ValueError(f'Invalid precision {precision}')
self.date = date self.date = date
self.precision = precision self.precision = precision
@ -653,10 +643,10 @@ class Period(object):
elif 'second' == precision: elif 'second' == precision:
return date + timedelta(seconds=1) return date + timedelta(seconds=1)
else: else:
raise ValueError(u'unhandled precision {0}'.format(precision)) raise ValueError(f'unhandled precision {precision}')
class DateInterval(object): class DateInterval:
"""A closed-open interval of dates. """A closed-open interval of dates.
A left endpoint of None means since the beginning of time. A left endpoint of None means since the beginning of time.
@ -665,7 +655,7 @@ class DateInterval(object):
def __init__(self, start, end): def __init__(self, start, end):
if start is not None and end is not None and not start < end: if start is not None and end is not None and not start < end:
raise ValueError(u"start date {0} is not before end date {1}" raise ValueError("start date {} is not before end date {}"
.format(start, end)) .format(start, end))
self.start = start self.start = start
self.end = end self.end = end
@ -686,7 +676,7 @@ class DateInterval(object):
return True return True
def __str__(self): def __str__(self):
return '[{0}, {1})'.format(self.start, self.end) return f'[{self.start}, {self.end})'
class DateQuery(FieldQuery): class DateQuery(FieldQuery):
@ -700,7 +690,7 @@ class DateQuery(FieldQuery):
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(DateQuery, self).__init__(field, pattern, fast) super().__init__(field, pattern, fast)
start, end = _parse_periods(pattern) start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end) self.interval = DateInterval.from_periods(start, end)
@ -759,12 +749,12 @@ class DurationQuery(NumericQuery):
except ValueError: except ValueError:
raise InvalidQueryArgumentValueError( raise InvalidQueryArgumentValueError(
s, s,
u"a M:SS string or a float") "a M:SS string or a float")
# Sorting. # Sorting.
class Sort(object): class Sort:
"""An abstract class representing a sort operation for a query into """An abstract class representing a sort operation for a query into
the item database. the item database.
""" """
@ -851,13 +841,13 @@ class MultipleSort(Sort):
return items return items
def __repr__(self): def __repr__(self):
return 'MultipleSort({!r})'.format(self.sorts) return f'MultipleSort({self.sorts!r})'
def __hash__(self): def __hash__(self):
return hash(tuple(self.sorts)) return hash(tuple(self.sorts))
def __eq__(self, other): def __eq__(self, other):
return super(MultipleSort, self).__eq__(other) and \ return super().__eq__(other) and \
self.sorts == other.sorts self.sorts == other.sorts
@ -878,14 +868,14 @@ class FieldSort(Sort):
def key(item): def key(item):
field_val = item.get(self.field, '') field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, six.text_type): if self.case_insensitive and isinstance(field_val, str):
field_val = field_val.lower() field_val = field_val.lower()
return field_val return field_val
return sorted(objs, key=key, reverse=not self.ascending) return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self): def __repr__(self):
return '<{0}: {1}{2}>'.format( return '<{}: {}{}>'.format(
type(self).__name__, type(self).__name__,
self.field, self.field,
'+' if self.ascending else '-', '+' if self.ascending else '-',
@ -895,7 +885,7 @@ class FieldSort(Sort):
return hash((self.field, self.ascending)) return hash((self.field, self.ascending))
def __eq__(self, other): def __eq__(self, other):
return super(FieldSort, self).__eq__(other) and \ return super().__eq__(other) and \
self.field == other.field and \ self.field == other.field and \
self.ascending == other.ascending self.ascending == other.ascending
@ -913,7 +903,7 @@ class FixedFieldSort(FieldSort):
'ELSE {0} END)'.format(self.field) 'ELSE {0} END)'.format(self.field)
else: else:
field = self.field field = self.field
return "{0} {1}".format(field, order) return f"{field} {order}"
class SlowFieldSort(FieldSort): class SlowFieldSort(FieldSort):

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -15,12 +14,10 @@
"""Parsing of strings into DBCore queries. """Parsing of strings into DBCore queries.
""" """
from __future__ import division, absolute_import, print_function
import re import re
import itertools import itertools
from . import query from . import query
import beets
PARSE_QUERY_PART_REGEX = re.compile( PARSE_QUERY_PART_REGEX = re.compile(
# Non-capturing optional segment for the keyword. # Non-capturing optional segment for the keyword.
@ -89,7 +86,7 @@ def parse_query_part(part, query_classes={}, prefixes={},
assert match # Regex should always match assert match # Regex should always match
negate = bool(match.group(1)) negate = bool(match.group(1))
key = match.group(2) key = match.group(2)
term = match.group(3).replace('\:', ':') term = match.group(3).replace('\\:', ':')
# Check whether there's a prefix in the query and use the # Check whether there's a prefix in the query and use the
# corresponding query type. # corresponding query type.
@ -119,12 +116,13 @@ def construct_query_part(model_cls, prefixes, query_part):
if not query_part: if not query_part:
return query.TrueQuery() return query.TrueQuery()
# Use `model_cls` to build up a map from field names to `Query` # Use `model_cls` to build up a map from field (or query) names to
# classes. # `Query` classes.
query_classes = {} query_classes = {}
for k, t in itertools.chain(model_cls._fields.items(), for k, t in itertools.chain(model_cls._fields.items(),
model_cls._types.items()): model_cls._types.items()):
query_classes[k] = t.query query_classes[k] = t.query
query_classes.update(model_cls._queries) # Non-field queries.
# Parse the string. # Parse the string.
key, pattern, query_class, negate = \ key, pattern, query_class, negate = \
@ -137,26 +135,27 @@ def construct_query_part(model_cls, prefixes, query_part):
# The query type matches a specific field, but none was # The query type matches a specific field, but none was
# specified. So we use a version of the query that matches # specified. So we use a version of the query that matches
# any field. # any field.
q = query.AnyFieldQuery(pattern, model_cls._search_fields, out_query = query.AnyFieldQuery(pattern, model_cls._search_fields,
query_class) query_class)
if negate:
return query.NotQuery(q)
else:
return q
else: else:
# Non-field query type. # Non-field query type.
if negate: out_query = query_class(pattern)
return query.NotQuery(query_class(pattern))
else:
return query_class(pattern)
# Otherwise, this must be a `FieldQuery`. Use the field name to # Field queries get constructed according to the name of the field
# construct the query object. # they are querying.
key = key.lower() elif issubclass(query_class, query.FieldQuery):
q = query_class(key.lower(), pattern, key in model_cls._fields) key = key.lower()
out_query = query_class(key.lower(), pattern, key in model_cls._fields)
# Non-field (named) query.
else:
out_query = query_class(pattern)
# Apply negation.
if negate: if negate:
return query.NotQuery(q) return query.NotQuery(out_query)
return q else:
return out_query
def query_from_strings(query_cls, model_cls, prefixes, query_parts): def query_from_strings(query_cls, model_cls, prefixes, query_parts):
@ -172,11 +171,13 @@ def query_from_strings(query_cls, model_cls, prefixes, query_parts):
return query_cls(subqueries) return query_cls(subqueries)
def construct_sort_part(model_cls, part): def construct_sort_part(model_cls, part, case_insensitive=True):
"""Create a `Sort` from a single string criterion. """Create a `Sort` from a single string criterion.
`model_cls` is the `Model` being queried. `part` is a single string `model_cls` is the `Model` being queried. `part` is a single string
ending in ``+`` or ``-`` indicating the sort. ending in ``+`` or ``-`` indicating the sort. `case_insensitive`
indicates whether or not the sort should be performed in a case
sensitive manner.
""" """
assert part, "part must be a field name and + or -" assert part, "part must be a field name and + or -"
field = part[:-1] field = part[:-1]
@ -185,7 +186,6 @@ def construct_sort_part(model_cls, part):
assert direction in ('+', '-'), "part must end with + or -" assert direction in ('+', '-'), "part must end with + or -"
is_ascending = direction == '+' is_ascending = direction == '+'
case_insensitive = beets.config['sort_case_insensitive'].get(bool)
if field in model_cls._sorts: if field in model_cls._sorts:
sort = model_cls._sorts[field](model_cls, is_ascending, sort = model_cls._sorts[field](model_cls, is_ascending,
case_insensitive) case_insensitive)
@ -197,21 +197,23 @@ def construct_sort_part(model_cls, part):
return sort return sort
def sort_from_strings(model_cls, sort_parts): def sort_from_strings(model_cls, sort_parts, case_insensitive=True):
"""Create a `Sort` from a list of sort criteria (strings). """Create a `Sort` from a list of sort criteria (strings).
""" """
if not sort_parts: if not sort_parts:
sort = query.NullSort() sort = query.NullSort()
elif len(sort_parts) == 1: elif len(sort_parts) == 1:
sort = construct_sort_part(model_cls, sort_parts[0]) sort = construct_sort_part(model_cls, sort_parts[0], case_insensitive)
else: else:
sort = query.MultipleSort() sort = query.MultipleSort()
for part in sort_parts: for part in sort_parts:
sort.add_sort(construct_sort_part(model_cls, part)) sort.add_sort(construct_sort_part(model_cls, part,
case_insensitive))
return sort return sort
def parse_sorted_query(model_cls, parts, prefixes={}): def parse_sorted_query(model_cls, parts, prefixes={},
case_insensitive=True):
"""Given a list of strings, create the `Query` and `Sort` that they """Given a list of strings, create the `Query` and `Sort` that they
represent. represent.
""" """
@ -222,8 +224,8 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
# Split up query in to comma-separated subqueries, each representing # Split up query in to comma-separated subqueries, each representing
# an AndQuery, which need to be joined together in one OrQuery # an AndQuery, which need to be joined together in one OrQuery
subquery_parts = [] subquery_parts = []
for part in parts + [u',']: for part in parts + [',']:
if part.endswith(u','): if part.endswith(','):
# Ensure we can catch "foo, bar" as well as "foo , bar" # Ensure we can catch "foo, bar" as well as "foo , bar"
last_subquery_part = part[:-1] last_subquery_part = part[:-1]
if last_subquery_part: if last_subquery_part:
@ -237,8 +239,8 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
else: else:
# Sort parts (1) end in + or -, (2) don't have a field, and # Sort parts (1) end in + or -, (2) don't have a field, and
# (3) consist of more than just the + or -. # (3) consist of more than just the + or -.
if part.endswith((u'+', u'-')) \ if part.endswith(('+', '-')) \
and u':' not in part \ and ':' not in part \
and len(part) > 1: and len(part) > 1:
sort_parts.append(part) sort_parts.append(part)
else: else:
@ -246,5 +248,5 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
# Avoid needlessly wrapping single statements in an OR # Avoid needlessly wrapping single statements in an OR
q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0] q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0]
s = sort_from_strings(model_cls, sort_parts) s = sort_from_strings(model_cls, sort_parts, case_insensitive)
return q, s return q, s

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -15,25 +14,20 @@
"""Representation of type information for DBCore model fields. """Representation of type information for DBCore model fields.
""" """
from __future__ import division, absolute_import, print_function
from . import query from . import query
from beets.util import str2bool from beets.util import str2bool
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
# Abstract base. # Abstract base.
class Type(object): class Type:
"""An object encapsulating the type of a model field. Includes """An object encapsulating the type of a model field. Includes
information about how to store, query, format, and parse a given information about how to store, query, format, and parse a given
field. field.
""" """
sql = u'TEXT' sql = 'TEXT'
"""The SQLite column type for the value. """The SQLite column type for the value.
""" """
@ -41,7 +35,7 @@ class Type(object):
"""The `Query` subclass to be used when querying the field. """The `Query` subclass to be used when querying the field.
""" """
model_type = six.text_type model_type = str
"""The Python type that is used to represent the value in the model. """The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field The model is guaranteed to return a value of this type if the field
@ -63,11 +57,11 @@ class Type(object):
value = self.null value = self.null
# `self.null` might be `None` # `self.null` might be `None`
if value is None: if value is None:
value = u'' value = ''
if isinstance(value, bytes): if isinstance(value, bytes):
value = value.decode('utf-8', 'ignore') value = value.decode('utf-8', 'ignore')
return six.text_type(value) return str(value)
def parse(self, string): def parse(self, string):
"""Parse a (possibly human-written) string and return the """Parse a (possibly human-written) string and return the
@ -97,16 +91,16 @@ class Type(object):
For fixed fields the type of `value` is determined by the column For fixed fields the type of `value` is determined by the column
type affinity given in the `sql` property and the SQL to Python type affinity given in the `sql` property and the SQL to Python
mapping of the database adapter. For more information see: mapping of the database adapter. For more information see:
http://www.sqlite.org/datatype3.html https://www.sqlite.org/datatype3.html
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `buffer`/`memoryview` or a `unicode` object` `sql_value` is either a `memoryview` or a `unicode` object`
and the method must handle these in addition. and the method must handle these in addition.
""" """
if isinstance(sql_value, buffer): if isinstance(sql_value, memoryview):
sql_value = bytes(sql_value).decode('utf-8', 'ignore') sql_value = bytes(sql_value).decode('utf-8', 'ignore')
if isinstance(sql_value, six.text_type): if isinstance(sql_value, str):
return self.parse(sql_value) return self.parse(sql_value)
else: else:
return self.normalize(sql_value) return self.normalize(sql_value)
@ -127,10 +121,18 @@ class Default(Type):
class Integer(Type): class Integer(Type):
"""A basic integer type. """A basic integer type.
""" """
sql = u'INTEGER' sql = 'INTEGER'
query = query.NumericQuery query = query.NumericQuery
model_type = int model_type = int
def normalize(self, value):
try:
return self.model_type(round(float(value)))
except ValueError:
return self.null
except TypeError:
return self.null
class PaddedInt(Integer): class PaddedInt(Integer):
"""An integer field that is formatted with a given number of digits, """An integer field that is formatted with a given number of digits,
@ -140,19 +142,25 @@ class PaddedInt(Integer):
self.digits = digits self.digits = digits
def format(self, value): def format(self, value):
return u'{0:0{1}d}'.format(value or 0, self.digits) return '{0:0{1}d}'.format(value or 0, self.digits)
class NullPaddedInt(PaddedInt):
"""Same as `PaddedInt`, but does not normalize `None` to `0.0`.
"""
null = None
class ScaledInt(Integer): class ScaledInt(Integer):
"""An integer whose formatting operation scales the number by a """An integer whose formatting operation scales the number by a
constant and adds a suffix. Good for units with large magnitudes. constant and adds a suffix. Good for units with large magnitudes.
""" """
def __init__(self, unit, suffix=u''): def __init__(self, unit, suffix=''):
self.unit = unit self.unit = unit
self.suffix = suffix self.suffix = suffix
def format(self, value): def format(self, value):
return u'{0}{1}'.format((value or 0) // self.unit, self.suffix) return '{}{}'.format((value or 0) // self.unit, self.suffix)
class Id(Integer): class Id(Integer):
@ -163,18 +171,22 @@ class Id(Integer):
def __init__(self, primary=True): def __init__(self, primary=True):
if primary: if primary:
self.sql = u'INTEGER PRIMARY KEY' self.sql = 'INTEGER PRIMARY KEY'
class Float(Type): class Float(Type):
"""A basic floating-point type. """A basic floating-point type. The `digits` parameter specifies how
many decimal places to use in the human-readable representation.
""" """
sql = u'REAL' sql = 'REAL'
query = query.NumericQuery query = query.NumericQuery
model_type = float model_type = float
def __init__(self, digits=1):
self.digits = digits
def format(self, value): def format(self, value):
return u'{0:.1f}'.format(value or 0.0) return '{0:.{1}f}'.format(value or 0, self.digits)
class NullFloat(Float): class NullFloat(Float):
@ -186,19 +198,25 @@ class NullFloat(Float):
class String(Type): class String(Type):
"""A Unicode string type. """A Unicode string type.
""" """
sql = u'TEXT' sql = 'TEXT'
query = query.SubstringQuery query = query.SubstringQuery
def normalize(self, value):
if value is None:
return self.null
else:
return self.model_type(value)
class Boolean(Type): class Boolean(Type):
"""A boolean type. """A boolean type.
""" """
sql = u'INTEGER' sql = 'INTEGER'
query = query.BooleanQuery query = query.BooleanQuery
model_type = bool model_type = bool
def format(self, value): def format(self, value):
return six.text_type(bool(value)) return str(bool(value))
def parse(self, string): def parse(self, string):
return str2bool(string) return str2bool(string)

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
@ -13,7 +12,6 @@
# The above copyright notice and this permission notice shall be # The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
"""Provides the basic, interface-agnostic workflow for importing and """Provides the basic, interface-agnostic workflow for importing and
autotagging music files. autotagging music files.
@ -40,7 +38,7 @@ from beets import config
from beets.util import pipeline, sorted_walk, ancestry, MoveOperation from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
from beets.util import syspath, normpath, displayable_path from beets.util import syspath, normpath, displayable_path
from enum import Enum from enum import Enum
from beets import mediafile import mediafile
action = Enum('action', action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG']) ['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
@ -75,7 +73,7 @@ def _open_state():
# unpickling, including ImportError. We use a catch-all # unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a # exception to avoid enumerating them all (the docs don't even have a
# full list!). # full list!).
log.debug(u'state file could not be read: {0}', exc) log.debug('state file could not be read: {0}', exc)
return {} return {}
@ -84,8 +82,8 @@ def _save_state(state):
try: try:
with open(config['statefile'].as_filename(), 'wb') as f: with open(config['statefile'].as_filename(), 'wb') as f:
pickle.dump(state, f) pickle.dump(state, f)
except IOError as exc: except OSError as exc:
log.error(u'state file could not be written: {0}', exc) log.error('state file could not be written: {0}', exc)
# Utilities for reading and writing the beets progress file, which # Utilities for reading and writing the beets progress file, which
@ -174,10 +172,11 @@ def history_get():
# Abstract session class. # Abstract session class.
class ImportSession(object): class ImportSession:
"""Controls an import action. Subclasses should implement methods to """Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions. communicate with the user or otherwise make decisions.
""" """
def __init__(self, lib, loghandler, paths, query): def __init__(self, lib, loghandler, paths, query):
"""Create a session. `lib` is a Library object. `loghandler` is a """Create a session. `lib` is a Library object. `loghandler` is a
logging.Handler. Either `paths` or `query` is non-null and indicates logging.Handler. Either `paths` or `query` is non-null and indicates
@ -187,7 +186,7 @@ class ImportSession(object):
self.logger = self._setup_logging(loghandler) self.logger = self._setup_logging(loghandler)
self.paths = paths self.paths = paths
self.query = query self.query = query
self._is_resuming = dict() self._is_resuming = {}
self._merged_items = set() self._merged_items = set()
self._merged_dirs = set() self._merged_dirs = set()
@ -222,19 +221,31 @@ class ImportSession(object):
iconfig['resume'] = False iconfig['resume'] = False
iconfig['incremental'] = False iconfig['incremental'] = False
# Copy, move, link, and hardlink are mutually exclusive. if iconfig['reflink']:
iconfig['reflink'] = iconfig['reflink'] \
.as_choice(['auto', True, False])
# Copy, move, reflink, link, and hardlink are mutually exclusive.
if iconfig['move']: if iconfig['move']:
iconfig['copy'] = False iconfig['copy'] = False
iconfig['link'] = False iconfig['link'] = False
iconfig['hardlink'] = False iconfig['hardlink'] = False
iconfig['reflink'] = False
elif iconfig['link']: elif iconfig['link']:
iconfig['copy'] = False iconfig['copy'] = False
iconfig['move'] = False iconfig['move'] = False
iconfig['hardlink'] = False iconfig['hardlink'] = False
iconfig['reflink'] = False
elif iconfig['hardlink']: elif iconfig['hardlink']:
iconfig['copy'] = False iconfig['copy'] = False
iconfig['move'] = False iconfig['move'] = False
iconfig['link'] = False iconfig['link'] = False
iconfig['reflink'] = False
elif iconfig['reflink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
iconfig['hardlink'] = False
# Only delete when copying. # Only delete when copying.
if not iconfig['copy']: if not iconfig['copy']:
@ -246,7 +257,7 @@ class ImportSession(object):
"""Log a message about a given album to the importer log. The status """Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged. should reflect the reason the album couldn't be tagged.
""" """
self.logger.info(u'{0} {1}', status, displayable_path(paths)) self.logger.info('{0} {1}', status, displayable_path(paths))
def log_choice(self, task, duplicate=False): def log_choice(self, task, duplicate=False):
"""Logs the task's current choice if it should be logged. If """Logs the task's current choice if it should be logged. If
@ -257,17 +268,17 @@ class ImportSession(object):
if duplicate: if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump). # Duplicate: log all three choices (skip, keep both, and trump).
if task.should_remove_duplicates: if task.should_remove_duplicates:
self.tag_log(u'duplicate-replace', paths) self.tag_log('duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY): elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log(u'duplicate-keep', paths) self.tag_log('duplicate-keep', paths)
elif task.choice_flag is (action.SKIP): elif task.choice_flag is (action.SKIP):
self.tag_log(u'duplicate-skip', paths) self.tag_log('duplicate-skip', paths)
else: else:
# Non-duplicate: log "skip" and "asis" choices. # Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS: if task.choice_flag is action.ASIS:
self.tag_log(u'asis', paths) self.tag_log('asis', paths)
elif task.choice_flag is action.SKIP: elif task.choice_flag is action.SKIP:
self.tag_log(u'skip', paths) self.tag_log('skip', paths)
def should_resume(self, path): def should_resume(self, path):
raise NotImplementedError raise NotImplementedError
@ -284,7 +295,7 @@ class ImportSession(object):
def run(self): def run(self):
"""Run the import task. """Run the import task.
""" """
self.logger.info(u'import started {0}', time.asctime()) self.logger.info('import started {0}', time.asctime())
self.set_config(config['import']) self.set_config(config['import'])
# Set up the pipeline. # Set up the pipeline.
@ -368,8 +379,8 @@ class ImportSession(object):
"""Mark paths and directories as merged for future reimport tasks. """Mark paths and directories as merged for future reimport tasks.
""" """
self._merged_items.update(paths) self._merged_items.update(paths)
dirs = set([os.path.dirname(path) if os.path.isfile(path) else path dirs = {os.path.dirname(path) if os.path.isfile(path) else path
for path in paths]) for path in paths}
self._merged_dirs.update(dirs) self._merged_dirs.update(dirs)
def is_resuming(self, toppath): def is_resuming(self, toppath):
@ -389,7 +400,7 @@ class ImportSession(object):
# Either accept immediately or prompt for input to decide. # Either accept immediately or prompt for input to decide.
if self.want_resume is True or \ if self.want_resume is True or \
self.should_resume(toppath): self.should_resume(toppath):
log.warning(u'Resuming interrupted import of {0}', log.warning('Resuming interrupted import of {0}',
util.displayable_path(toppath)) util.displayable_path(toppath))
self._is_resuming[toppath] = True self._is_resuming[toppath] = True
else: else:
@ -399,11 +410,12 @@ class ImportSession(object):
# The importer task class. # The importer task class.
class BaseImportTask(object): class BaseImportTask:
"""An abstract base class for importer tasks. """An abstract base class for importer tasks.
Tasks flow through the importer pipeline. Each stage can update Tasks flow through the importer pipeline. Each stage can update
them. """ them. """
def __init__(self, toppath, paths, items): def __init__(self, toppath, paths, items):
"""Create a task. The primary fields that define a task are: """Create a task. The primary fields that define a task are:
@ -457,8 +469,9 @@ class ImportTask(BaseImportTask):
* `finalize()` Update the import progress and cleanup the file * `finalize()` Update the import progress and cleanup the file
system. system.
""" """
def __init__(self, toppath, paths, items): def __init__(self, toppath, paths, items):
super(ImportTask, self).__init__(toppath, paths, items) super().__init__(toppath, paths, items)
self.choice_flag = None self.choice_flag = None
self.cur_album = None self.cur_album = None
self.cur_artist = None self.cur_artist = None
@ -550,28 +563,34 @@ class ImportTask(BaseImportTask):
def remove_duplicates(self, lib): def remove_duplicates(self, lib):
duplicate_items = self.duplicate_items(lib) duplicate_items = self.duplicate_items(lib)
log.debug(u'removing {0} old duplicated items', len(duplicate_items)) log.debug('removing {0} old duplicated items', len(duplicate_items))
for item in duplicate_items: for item in duplicate_items:
item.remove() item.remove()
if lib.directory in util.ancestry(item.path): if lib.directory in util.ancestry(item.path):
log.debug(u'deleting duplicate {0}', log.debug('deleting duplicate {0}',
util.displayable_path(item.path)) util.displayable_path(item.path))
util.remove(item.path) util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path), util.prune_dirs(os.path.dirname(item.path),
lib.directory) lib.directory)
def set_fields(self): def set_fields(self, lib):
"""Sets the fields given at CLI or configuration to the specified """Sets the fields given at CLI or configuration to the specified
values. values, for both the album and all its items.
""" """
items = self.imported_items()
for field, view in config['import']['set_fields'].items(): for field, view in config['import']['set_fields'].items():
value = view.get() value = view.get()
log.debug(u'Set field {1}={2} for {0}', log.debug('Set field {1}={2} for {0}',
displayable_path(self.paths), displayable_path(self.paths),
field, field,
value) value)
self.album[field] = value self.album[field] = value
self.album.store() for item in items:
item[field] = value
with lib.transaction():
for item in items:
item.store()
self.album.store()
def finalize(self, session): def finalize(self, session):
"""Save progress, clean up files, and emit plugin event. """Save progress, clean up files, and emit plugin event.
@ -655,7 +674,7 @@ class ImportTask(BaseImportTask):
return [] return []
duplicates = [] duplicates = []
task_paths = set(i.path for i in self.items if i) task_paths = {i.path for i in self.items if i}
duplicate_query = dbcore.AndQuery(( duplicate_query = dbcore.AndQuery((
dbcore.MatchQuery('albumartist', artist), dbcore.MatchQuery('albumartist', artist),
dbcore.MatchQuery('album', album), dbcore.MatchQuery('album', album),
@ -665,7 +684,7 @@ class ImportTask(BaseImportTask):
# Check whether the album paths are all present in the task # Check whether the album paths are all present in the task
# i.e. album is being completely re-imported by the task, # i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced). # in which case it is not a duplicate (will be replaced).
album_paths = set(i.path for i in album.items()) album_paths = {i.path for i in album.items()}
if not (album_paths <= task_paths): if not (album_paths <= task_paths):
duplicates.append(album) duplicates.append(album)
return duplicates return duplicates
@ -707,7 +726,7 @@ class ImportTask(BaseImportTask):
item.update(changes) item.update(changes)
def manipulate_files(self, operation=None, write=False, session=None): def manipulate_files(self, operation=None, write=False, session=None):
""" Copy, move, link or hardlink (depending on `operation`) the files """ Copy, move, link, hardlink or reflink (depending on `operation`) the files
as well as write metadata. as well as write metadata.
`operation` should be an instance of `util.MoveOperation`. `operation` should be an instance of `util.MoveOperation`.
@ -754,6 +773,8 @@ class ImportTask(BaseImportTask):
self.record_replaced(lib) self.record_replaced(lib)
self.remove_replaced(lib) self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items()) self.album = lib.add_album(self.imported_items())
if 'data_source' in self.imported_items()[0]:
self.album.data_source = self.imported_items()[0].data_source
self.reimport_metadata(lib) self.reimport_metadata(lib)
def record_replaced(self, lib): def record_replaced(self, lib):
@ -772,7 +793,7 @@ class ImportTask(BaseImportTask):
if (not dup_item.album_id or if (not dup_item.album_id or
dup_item.album_id in replaced_album_ids): dup_item.album_id in replaced_album_ids):
continue continue
replaced_album = dup_item.get_album() replaced_album = dup_item._cached_album
if replaced_album: if replaced_album:
replaced_album_ids.add(dup_item.album_id) replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album self.replaced_albums[replaced_album.path] = replaced_album
@ -789,8 +810,8 @@ class ImportTask(BaseImportTask):
self.album.artpath = replaced_album.artpath self.album.artpath = replaced_album.artpath
self.album.store() self.album.store()
log.debug( log.debug(
u'Reimported album: added {0}, flexible ' 'Reimported album: added {0}, flexible '
u'attributes {1} from album {2} for {3}', 'attributes {1} from album {2} for {3}',
self.album.added, self.album.added,
replaced_album._values_flex.keys(), replaced_album._values_flex.keys(),
replaced_album.id, replaced_album.id,
@ -803,16 +824,16 @@ class ImportTask(BaseImportTask):
if dup_item.added and dup_item.added != item.added: if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added item.added = dup_item.added
log.debug( log.debug(
u'Reimported item added {0} ' 'Reimported item added {0} '
u'from item {1} for {2}', 'from item {1} for {2}',
item.added, item.added,
dup_item.id, dup_item.id,
displayable_path(item.path) displayable_path(item.path)
) )
item.update(dup_item._values_flex) item.update(dup_item._values_flex)
log.debug( log.debug(
u'Reimported item flexible attributes {0} ' 'Reimported item flexible attributes {0} '
u'from item {1} for {2}', 'from item {1} for {2}',
dup_item._values_flex.keys(), dup_item._values_flex.keys(),
dup_item.id, dup_item.id,
displayable_path(item.path) displayable_path(item.path)
@ -825,10 +846,10 @@ class ImportTask(BaseImportTask):
""" """
for item in self.imported_items(): for item in self.imported_items():
for dup_item in self.replaced_items[item]: for dup_item in self.replaced_items[item]:
log.debug(u'Replacing item {0}: {1}', log.debug('Replacing item {0}: {1}',
dup_item.id, displayable_path(item.path)) dup_item.id, displayable_path(item.path))
dup_item.remove() dup_item.remove()
log.debug(u'{0} of {1} items replaced', log.debug('{0} of {1} items replaced',
sum(bool(l) for l in self.replaced_items.values()), sum(bool(l) for l in self.replaced_items.values()),
len(self.imported_items())) len(self.imported_items()))
@ -866,7 +887,7 @@ class SingletonImportTask(ImportTask):
""" """
def __init__(self, toppath, item): def __init__(self, toppath, item):
super(SingletonImportTask, self).__init__(toppath, [item.path], [item]) super().__init__(toppath, [item.path], [item])
self.item = item self.item = item
self.is_album = False self.is_album = False
self.paths = [item.path] self.paths = [item.path]
@ -932,13 +953,13 @@ class SingletonImportTask(ImportTask):
def reload(self): def reload(self):
self.item.load() self.item.load()
def set_fields(self): def set_fields(self, lib):
"""Sets the fields given at CLI or configuration to the specified """Sets the fields given at CLI or configuration to the specified
values. values, for the singleton item.
""" """
for field, view in config['import']['set_fields'].items(): for field, view in config['import']['set_fields'].items():
value = view.get() value = view.get()
log.debug(u'Set field {1}={2} for {0}', log.debug('Set field {1}={2} for {0}',
displayable_path(self.paths), displayable_path(self.paths),
field, field,
value) value)
@ -959,7 +980,7 @@ class SentinelImportTask(ImportTask):
""" """
def __init__(self, toppath, paths): def __init__(self, toppath, paths):
super(SentinelImportTask, self).__init__(toppath, paths, ()) super().__init__(toppath, paths, ())
# TODO Remove the remaining attributes eventually # TODO Remove the remaining attributes eventually
self.should_remove_duplicates = False self.should_remove_duplicates = False
self.is_album = True self.is_album = True
@ -1003,7 +1024,7 @@ class ArchiveImportTask(SentinelImportTask):
""" """
def __init__(self, toppath): def __init__(self, toppath):
super(ArchiveImportTask, self).__init__(toppath, ()) super().__init__(toppath, ())
self.extracted = False self.extracted = False
@classmethod @classmethod
@ -1032,14 +1053,20 @@ class ArchiveImportTask(SentinelImportTask):
cls._handlers = [] cls._handlers = []
from zipfile import is_zipfile, ZipFile from zipfile import is_zipfile, ZipFile
cls._handlers.append((is_zipfile, ZipFile)) cls._handlers.append((is_zipfile, ZipFile))
from tarfile import is_tarfile, TarFile import tarfile
cls._handlers.append((is_tarfile, TarFile)) cls._handlers.append((tarfile.is_tarfile, tarfile.open))
try: try:
from rarfile import is_rarfile, RarFile from rarfile import is_rarfile, RarFile
except ImportError: except ImportError:
pass pass
else: else:
cls._handlers.append((is_rarfile, RarFile)) cls._handlers.append((is_rarfile, RarFile))
try:
from py7zr import is_7zfile, SevenZipFile
except ImportError:
pass
else:
cls._handlers.append((is_7zfile, SevenZipFile))
return cls._handlers return cls._handlers
@ -1047,7 +1074,7 @@ class ArchiveImportTask(SentinelImportTask):
"""Removes the temporary directory the archive was extracted to. """Removes the temporary directory the archive was extracted to.
""" """
if self.extracted: if self.extracted:
log.debug(u'Removing extracted directory: {0}', log.debug('Removing extracted directory: {0}',
displayable_path(self.toppath)) displayable_path(self.toppath))
shutil.rmtree(self.toppath) shutil.rmtree(self.toppath)
@ -1059,9 +1086,9 @@ class ArchiveImportTask(SentinelImportTask):
if path_test(util.py3_path(self.toppath)): if path_test(util.py3_path(self.toppath)):
break break
extract_to = mkdtemp()
archive = handler_class(util.py3_path(self.toppath), mode='r')
try: try:
extract_to = mkdtemp()
archive = handler_class(util.py3_path(self.toppath), mode='r')
archive.extractall(extract_to) archive.extractall(extract_to)
finally: finally:
archive.close() archive.close()
@ -1069,10 +1096,11 @@ class ArchiveImportTask(SentinelImportTask):
self.toppath = extract_to self.toppath = extract_to
class ImportTaskFactory(object): class ImportTaskFactory:
"""Generate album and singleton import tasks for all media files """Generate album and singleton import tasks for all media files
indicated by a path. indicated by a path.
""" """
def __init__(self, toppath, session): def __init__(self, toppath, session):
"""Create a new task factory. """Create a new task factory.
@ -1110,14 +1138,12 @@ class ImportTaskFactory(object):
if self.session.config['singletons']: if self.session.config['singletons']:
for path in paths: for path in paths:
tasks = self._create(self.singleton(path)) tasks = self._create(self.singleton(path))
for task in tasks: yield from tasks
yield task
yield self.sentinel(dirs) yield self.sentinel(dirs)
else: else:
tasks = self._create(self.album(paths, dirs)) tasks = self._create(self.album(paths, dirs))
for task in tasks: yield from tasks
yield task
# Produce the final sentinel for this toppath to indicate that # Produce the final sentinel for this toppath to indicate that
# it is finished. This is usually just a SentinelImportTask, but # it is finished. This is usually just a SentinelImportTask, but
@ -1165,7 +1191,7 @@ class ImportTaskFactory(object):
"""Return a `SingletonImportTask` for the music file. """Return a `SingletonImportTask` for the music file.
""" """
if self.session.already_imported(self.toppath, [path]): if self.session.already_imported(self.toppath, [path]):
log.debug(u'Skipping previously-imported path: {0}', log.debug('Skipping previously-imported path: {0}',
displayable_path(path)) displayable_path(path))
self.skipped += 1 self.skipped += 1
return None return None
@ -1186,10 +1212,10 @@ class ImportTaskFactory(object):
return None return None
if dirs is None: if dirs is None:
dirs = list(set(os.path.dirname(p) for p in paths)) dirs = list({os.path.dirname(p) for p in paths})
if self.session.already_imported(self.toppath, dirs): if self.session.already_imported(self.toppath, dirs):
log.debug(u'Skipping previously-imported path: {0}', log.debug('Skipping previously-imported path: {0}',
displayable_path(dirs)) displayable_path(dirs))
self.skipped += 1 self.skipped += 1
return None return None
@ -1219,22 +1245,22 @@ class ImportTaskFactory(object):
if not (self.session.config['move'] or if not (self.session.config['move'] or
self.session.config['copy']): self.session.config['copy']):
log.warning(u"Archive importing requires either " log.warning("Archive importing requires either "
u"'copy' or 'move' to be enabled.") "'copy' or 'move' to be enabled.")
return return
log.debug(u'Extracting archive: {0}', log.debug('Extracting archive: {0}',
displayable_path(self.toppath)) displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath) archive_task = ArchiveImportTask(self.toppath)
try: try:
archive_task.extract() archive_task.extract()
except Exception as exc: except Exception as exc:
log.error(u'extraction failed: {0}', exc) log.error('extraction failed: {0}', exc)
return return
# Now read albums from the extracted directory. # Now read albums from the extracted directory.
self.toppath = archive_task.toppath self.toppath = archive_task.toppath
log.debug(u'Archive extracted to: {0}', self.toppath) log.debug('Archive extracted to: {0}', self.toppath)
return archive_task return archive_task
def read_item(self, path): def read_item(self, path):
@ -1250,9 +1276,9 @@ class ImportTaskFactory(object):
# Silently ignore non-music files. # Silently ignore non-music files.
pass pass
elif isinstance(exc.reason, mediafile.UnreadableFileError): elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warning(u'unreadable file: {0}', displayable_path(path)) log.warning('unreadable file: {0}', displayable_path(path))
else: else:
log.error(u'error reading {0}: {1}', log.error('error reading {0}: {1}',
displayable_path(path), exc) displayable_path(path), exc)
@ -1291,17 +1317,16 @@ def read_tasks(session):
# Generate tasks. # Generate tasks.
task_factory = ImportTaskFactory(toppath, session) task_factory = ImportTaskFactory(toppath, session)
for t in task_factory.tasks(): yield from task_factory.tasks()
yield t
skipped += task_factory.skipped skipped += task_factory.skipped
if not task_factory.imported: if not task_factory.imported:
log.warning(u'No files imported from {0}', log.warning('No files imported from {0}',
displayable_path(toppath)) displayable_path(toppath))
# Show skipped directories (due to incremental/resume). # Show skipped directories (due to incremental/resume).
if skipped: if skipped:
log.info(u'Skipped {0} paths.', skipped) log.info('Skipped {0} paths.', skipped)
def query_tasks(session): def query_tasks(session):
@ -1319,7 +1344,7 @@ def query_tasks(session):
else: else:
# Search for albums. # Search for albums.
for album in session.lib.albums(session.query): for album in session.lib.albums(session.query):
log.debug(u'yielding album {0}: {1} - {2}', log.debug('yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album) album.id, album.albumartist, album.album)
items = list(album.items()) items = list(album.items())
_freshen_items(items) _freshen_items(items)
@ -1342,7 +1367,7 @@ def lookup_candidates(session, task):
return return
plugins.send('import_task_start', session=session, task=task) plugins.send('import_task_start', session=session, task=task)
log.debug(u'Looking up: {0}', displayable_path(task.paths)) log.debug('Looking up: {0}', displayable_path(task.paths))
# Restrict the initial lookup to IDs specified by the user via the -m # Restrict the initial lookup to IDs specified by the user via the -m
# option. Currently all the IDs are passed onto the tasks directly. # option. Currently all the IDs are passed onto the tasks directly.
@ -1381,8 +1406,7 @@ def user_query(session, task):
def emitter(task): def emitter(task):
for item in task.items: for item in task.items:
task = SingletonImportTask(task.toppath, item) task = SingletonImportTask(task.toppath, item)
for new_task in task.handle_created(session): yield from task.handle_created(session)
yield new_task
yield SentinelImportTask(task.toppath, task.paths) yield SentinelImportTask(task.toppath, task.paths)
return _extend_pipeline(emitter(task), return _extend_pipeline(emitter(task),
@ -1428,30 +1452,30 @@ def resolve_duplicates(session, task):
if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG): if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG):
found_duplicates = task.find_duplicates(session.lib) found_duplicates = task.find_duplicates(session.lib)
if found_duplicates: if found_duplicates:
log.debug(u'found duplicates: {}'.format( log.debug('found duplicates: {}'.format(
[o.id for o in found_duplicates] [o.id for o in found_duplicates]
)) ))
# Get the default action to follow from config. # Get the default action to follow from config.
duplicate_action = config['import']['duplicate_action'].as_choice({ duplicate_action = config['import']['duplicate_action'].as_choice({
u'skip': u's', 'skip': 's',
u'keep': u'k', 'keep': 'k',
u'remove': u'r', 'remove': 'r',
u'merge': u'm', 'merge': 'm',
u'ask': u'a', 'ask': 'a',
}) })
log.debug(u'default action for duplicates: {0}', duplicate_action) log.debug('default action for duplicates: {0}', duplicate_action)
if duplicate_action == u's': if duplicate_action == 's':
# Skip new. # Skip new.
task.set_choice(action.SKIP) task.set_choice(action.SKIP)
elif duplicate_action == u'k': elif duplicate_action == 'k':
# Keep both. Do nothing; leave the choice intact. # Keep both. Do nothing; leave the choice intact.
pass pass
elif duplicate_action == u'r': elif duplicate_action == 'r':
# Remove old. # Remove old.
task.should_remove_duplicates = True task.should_remove_duplicates = True
elif duplicate_action == u'm': elif duplicate_action == 'm':
# Merge duplicates together # Merge duplicates together
task.should_merge_duplicates = True task.should_merge_duplicates = True
else: else:
@ -1471,7 +1495,7 @@ def import_asis(session, task):
if task.skip: if task.skip:
return return
log.info(u'{}', displayable_path(task.paths)) log.info('{}', displayable_path(task.paths))
task.set_choice(action.ASIS) task.set_choice(action.ASIS)
apply_choice(session, task) apply_choice(session, task)
@ -1496,7 +1520,7 @@ def apply_choice(session, task):
# because then the ``ImportTask`` won't have an `album` for which # because then the ``ImportTask`` won't have an `album` for which
# it can set the fields. # it can set the fields.
if config['import']['set_fields']: if config['import']['set_fields']:
task.set_fields() task.set_fields(session.lib)
@pipeline.mutator_stage @pipeline.mutator_stage
@ -1534,6 +1558,8 @@ def manipulate_files(session, task):
operation = MoveOperation.LINK operation = MoveOperation.LINK
elif session.config['hardlink']: elif session.config['hardlink']:
operation = MoveOperation.HARDLINK operation = MoveOperation.HARDLINK
elif session.config['reflink']:
operation = MoveOperation.REFLINK
else: else:
operation = None operation = None
@ -1552,11 +1578,11 @@ def log_files(session, task):
"""A coroutine (pipeline stage) to log each file to be imported. """A coroutine (pipeline stage) to log each file to be imported.
""" """
if isinstance(task, SingletonImportTask): if isinstance(task, SingletonImportTask):
log.info(u'Singleton: {0}', displayable_path(task.item['path'])) log.info('Singleton: {0}', displayable_path(task.item['path']))
elif task.items: elif task.items:
log.info(u'Album: {0}', displayable_path(task.paths[0])) log.info('Album: {0}', displayable_path(task.paths[0]))
for item in task.items: for item in task.items:
log.info(u' {0}', displayable_path(item['path'])) log.info(' {0}', displayable_path(item['path']))
def group_albums(session): def group_albums(session):

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more