diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7dd35dd --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +Huntarr.io-6.3.6/ diff --git a/Huntarr.io-6.3.6/.github/FUNDING.yml b/Huntarr.io-6.3.6/.github/FUNDING.yml new file mode 100644 index 0000000..8d6142a --- /dev/null +++ b/Huntarr.io-6.3.6/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +polar: # Replace with a single Polar username +buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +thanks_dev: # Replace with a single thanks.dev username +paypal: # https://www.paypal.com/donate?hosted_button_id=58AYJ68VVMGSC diff --git a/Huntarr.io-6.3.6/.github/workflows/docker-image.yml b/Huntarr.io-6.3.6/.github/workflows/docker-image.yml new file mode 100644 index 0000000..a131aed --- /dev/null +++ b/Huntarr.io-6.3.6/.github/workflows/docker-image.yml @@ -0,0 +1,110 @@ +name: Docker Build and Push +on: + push: + branches: + - '*' # This will trigger on any branch push + tags: + - "*" # This will trigger on any tag push + pull_request: + branches: + - main +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + # 1) Check out your repository code with full depth + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + # 2) List files to verify huntarr.py is present + - name: List files in directory + run: ls -la + + # 3) Set up QEMU for multi-architecture builds + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: arm64,amd64 + + # 4) Set up Docker Buildx + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + # 5) Log in to Docker Hub + - name: Log in to Docker Hub + if: github.event_name != 'pull_request' + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + # 6) Extract metadata (version, branch name, etc.) + - name: Extract metadata + id: meta + run: | + if [[ "${{ github.ref }}" == refs/tags/* ]]; then + echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT + echo "IS_TAG=true" >> $GITHUB_OUTPUT + else + echo "BRANCH=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT + echo "IS_TAG=false" >> $GITHUB_OUTPUT + fi + + # 7a) Build & Push if on 'main' branch + - name: Build and Push (main) + if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request' + uses: docker/build-push-action@v3 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64 + tags: | + huntarr/huntarr:latest + huntarr/huntarr:${{ github.sha }} + + # 7b) Build & Push if on 'dev' branch + - name: Build and Push (dev) + if: github.ref == 'refs/heads/dev' && github.event_name != 'pull_request' + uses: docker/build-push-action@v3 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64 + tags: | + huntarr/huntarr:dev + huntarr/huntarr:${{ github.sha }} + + # 7c) Build & Push if it's a tag/release + - name: Build and Push (release) + if: steps.meta.outputs.IS_TAG == 'true' && github.event_name != 'pull_request' + uses: docker/build-push-action@v3 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64 + tags: | + huntarr/huntarr:${{ steps.meta.outputs.VERSION }} + huntarr/huntarr:latest + + # 7d) Build & Push for any other branch + - name: Build and Push (feature branch) + if: github.ref != 'refs/heads/main' && github.ref != 'refs/heads/dev' && steps.meta.outputs.IS_TAG != 'true' && github.event_name != 'pull_request' + uses: docker/build-push-action@v3 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64 + tags: | + huntarr/huntarr:${{ steps.meta.outputs.BRANCH }} + huntarr/huntarr:${{ github.sha }} + + # 7e) Just build on pull requests + - name: Build (PR) + if: github.event_name == 'pull_request' + uses: docker/build-push-action@v3 + with: + context: . + push: false + platforms: linux/amd64,linux/arm64 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/.gitignore b/Huntarr.io-6.3.6/.gitignore new file mode 100644 index 0000000..ddafd42 --- /dev/null +++ b/Huntarr.io-6.3.6/.gitignore @@ -0,0 +1,230 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# vertual enviornments +.venv/ + +# React files +.idea/ +.vscode/ +build/ +*.tgz +template/src/__tests__/__snapshots__/ +lerna-debug.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +/.changelog +.npm/ + +.DS_STORE +node_modules +scripts/flow/*/.flowconfig +.flowconfig +*~ +*.pyc +.grunt +_SpecRunner.html +__benchmarks__ +build/ +remote-repo/ +coverage/ +.module-cache +fixtures/dom/public/react-dom.js +fixtures/dom/public/react.js +test/the-files-to-test.generated.js +*.log* +chrome-user-data +*.sublime-project +*.sublime-workspace +.idea +*.iml +.vscode +*.swp +*.swo + +packages/react-devtools-core/dist +packages/react-devtools-extensions/chrome/build +packages/react-devtools-extensions/chrome/*.crx +packages/react-devtools-extensions/chrome/*.pem +packages/react-devtools-extensions/firefox/build +packages/react-devtools-extensions/firefox/*.xpi +packages/react-devtools-extensions/firefox/*.pem +packages/react-devtools-extensions/shared/build +packages/react-devtools-extensions/.tempUserDataDir +packages/react-devtools-fusebox/dist +packages/react-devtools-inline/dist +packages/react-devtools-shell/dist +packages/react-devtools-timeline/dist + + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc \ No newline at end of file diff --git a/Huntarr.io-6.3.6/Dockerfile b/Huntarr.io-6.3.6/Dockerfile new file mode 100644 index 0000000..91d4421 --- /dev/null +++ b/Huntarr.io-6.3.6/Dockerfile @@ -0,0 +1,24 @@ +FROM python:3.9-slim + +WORKDIR /app + +# Install required packages from the root requirements file +COPY requirements.txt /app/ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY . /app/ + +# Create necessary directories +RUN mkdir -p /config/settings /config/stateful /config/user /config/logs +RUN chmod -R 755 /config + +# Set environment variables +ENV PYTHONPATH=/app +# ENV APP_TYPE=sonarr # APP_TYPE is likely managed via config now, remove if not needed + +# Expose port +EXPOSE 9705 + +# Run the main application using the new entry point +CMD ["python3", "main.py"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/LICENSE b/Huntarr.io-6.3.6/LICENSE new file mode 100644 index 0000000..b01123c --- /dev/null +++ b/Huntarr.io-6.3.6/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Huntarr.io-6.3.6/README.md b/Huntarr.io-6.3.6/README.md new file mode 100644 index 0000000..dbcd486 --- /dev/null +++ b/Huntarr.io-6.3.6/README.md @@ -0,0 +1,230 @@ +

Huntarr - Find Missing & Upgrade Media Items

+ +

+ Huntarr Logo +

+ +--- + +

Want to Help? Click the Star in the Upper-Right Corner! ⭐

+ + + +| Application | Status | +| :---------- | :------------ | +| Sonarr | **✅ Ready** | +| Radarr | **✅ Ready** | +| Lidarr | **✅ Ready** | +| Readarr | **✅ Ready** | +| Whisparr v2 | **✅ Ready** | +| Whisparr v3 | **✅ Ready** | +| Bazarr | **❌ Not Ready** | + + +Keep in mind this is very early in program development. If you have a very special hand picked collection (because some users are extra special), test before you deploy. + +## Table of Contents +- [Overview](#overview) +- [Other Projects](#other-projects) +- [Community](#community) +- [Indexers Approving of Huntarr](#indexers-approving-of-huntarr) +- [How It Works](#how-it-works) +- [Web Interface](#web-interface) + - [How to Access](#how-to-access) + - [Web UI Settings](#web-ui-settings) + - [Volume Mapping](#volume-mapping) +- [Installation Methods](#installation-methods) + - [Docker Run](#docker-run) + - [Docker Compose](#docker-compose) + - [Unraid Users](#unraid-users) +- [Tips](#tips) +- [Troubleshooting](#troubleshooting) +- [Change Log](#change-log) + +## Overview + +This application continually searches your media libraries for missing content and items that need quality upgrades. It automatically triggers searches for both missing items and those below your quality cutoff. It's designed to run continuously while being gentle on your indexers, helping you gradually complete your media collection with the best available quality. + +For detailed documentation, please visit our [Wiki](https://github.com/plexguide/Huntarr/wiki). + +## Other Projects + +* [Unraid Intel ARC Deployment](https://github.com/plexguide/Unraid_Intel-ARC_Deployment) - Convert videos to AV1 Format (I've saved 325TB encoding to AV1) +* Visit [PlexGuide](https://plexguide.com) for more great scripts + +## Community + +

+ Join the community on Discord! +
+ + Discord + +

+ +## PayPal Donations – For My Daughter's College Fund + +My 12-year-old daughter is passionate about singing, dancing, and exploring STEM. She consistently earns A-B honors! Every donation goes directly into her college fund! + +[![Donate with PayPal button](https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif)](https://www.paypal.com/donate?hosted_button_id=58AYJ68VVMGSC) + +## Indexers Approving of Huntarr: +* https://ninjacentral.co.za + +## How It Works + +### 🔄 Continuous Automation Cycle + +#### 1️⃣ Connect & Analyze +Huntarr connects to your Sonarr/Radarr/Lidarr/Readarr instance and analyzes your media library to identify both missing content and potential quality upgrades. + +#### 2️⃣ Hunt Missing Content +- 📊 **Smart Selection:** Choose between random or sequential processing +- 🔍 **Efficient Refreshing:** Optionally skip metadata refresh to reduce disk I/O +- 🔮 **Future-Aware:** Automatically skip content with future release dates +- 🎯 **Precise Control:** Set exactly how many items to process per cycle + +#### 3️⃣ Hunt Quality Upgrades +- ⬆️ **Quality Improvement:** Find content below your quality cutoff settings +- 📦 **Batch Processing:** Configure exactly how many upgrades to process at once +- 📚 **Large Library Support:** Smart pagination handles even massive libraries +- 🔀 **Flexible Modes:** Choose between random or sequential processing + +#### 4️⃣ State Management +- 📝 **History Tracking:** Remembers which items have been processed +- 💾 **Persistent Storage:** State data is saved in the `/config` directory +- ⏱️ **Automatic Reset:** State is cleared after your configured time period (default: 7 days) + +#### 5️⃣ Repeat & Rest +Huntarr waits for your configured interval (adjustable in settings) before starting the next cycle. This ensures your indexers aren't overloaded while maintaining continuous improvement of your library. + +## Web Interface + +Huntarr's live homepage will provide you statics about how many hunts have been pursed regarding missing media and upgrade searches! Note: Numbers reflected are but all required for testing... damn you Whisparr! + +

+ image +
+ Homepage +

+ +Huntarr includes a real-time log viewer and settings management web interface that allows you to monitor and configure its operation directly from your browser. + +

+ image +
+ Logger UI +

+ +### How to Access + +The web interface is available on port 9705. Simply navigate to: + +``` +http://YOUR_SERVER_IP:9705 +``` + +The URL will be displayed in the logs when Huntarr starts, using the same hostname you configured for your API_URL. + +### Web UI Settings + +The web interface allows you to configure all of Huntarr's settings: + +

+ image +
+ Settings UI +

+ +### Volume Mapping + +To ensure data persistence, make sure you map the `/config` directory to a persistent volume on your host system: + +```bash +-v /your-path/appdata/huntarr:/config +``` + +--- + +## Installation Methods + +### Docker Run + +The simplest way to run Huntarr is via Docker (all configuration is done via the web UI): + +```bash +docker run -d --name huntarr \ + --restart always \ + -p 9705:9705 \ + -v /your-path/huntarr:/config \ + -e TZ=America/New_York \ + huntarr/huntarr:latest +``` + +To check on the status of the program, you can use the web interface at http://YOUR_SERVER_IP:9705 or check the logs with: +```bash +docker logs huntarr +``` + +### Docker Compose + +For those who prefer Docker Compose, add this to your `docker-compose.yml` file: + +```yaml +services: + huntarr: + image: huntarr/huntarr:latest + container_name: huntarr + restart: always + ports: + - "9705:9705" + volumes: + - /your-path/huntarr:/config + environment: + - TZ=America/New_York +``` + +Then run: + +```bash +docker-compose up -d huntarr +``` + +### Unraid Users + +Run this from Command Line in Unraid: + +```bash +docker run -d --name huntarr \ + --restart always \ + -p 9705:9705 \ + -v /mnt/user/appdata/huntarr:/config \ + -e TZ=America/New_York \ + huntarr/huntarr:latest +``` +## Tips + +- **First-Time Setup**: Navigate to the web interface after installation to create your admin account with 2FA option +- **API Connections**: Configure connections to your *Arr applications through the dedicated settings pages +- **Search Frequency**: Adjust Sleep Duration (default: 900 seconds) based on your indexer's rate limits. +- **Batch Processing**: Set Hunt Missing and Upgrade values to control how many items are processed per cycle +- **Queue Management**: Use Minimum Download Queue Size to pause searching when downloads are backed up +- **Skip Processing**: Enable Skip Series/Movie Refresh to significantly reduce disk I/O and database load +- **Future Content**: Keep Skip Future Items enabled to avoid searching for unreleased content +- **Authentication**: Enable two-factor authentication for additional security on your Huntarr instance + +## Troubleshooting + +- **API Connection Issues**: Verify your API key and URL in the Settings page (check for missing http:// or https://) +- **Config URLs**: It is best practice to omit the trailing slash (/) at the end of the URL for each service. i.e. For Sonarr, instead of http://10.10.10.1:8989/ use http://10.10.10.1:8989. This is the most common cause of errors seen in the log each time a cycle runs. +- **Authentication Problems**: If you forget your password, delete `/config/user/credentials.json` and restart +- **Two-Factor Authentication**: If locked out of 2FA, remove credentials file to reset your account +- **Web Interface Not Loading**: Confirm port 9705 is correctly mapped and not blocked by firewalls +- **Logs Not Showing**: Check permissions on the `/config/logs/` directory inside your container +- **Missing State Data**: State files in `/config/stateful/` track processed items; verify permissions +- **Docker Volume Issues**: Ensure your volume mount for `/config` has correct permissions and ownership +- **Command Timeouts**: Adjust command_wait_attempts and command_wait_delay in advanced settings +- **Debug Information**: Enable Debug Mode temporarily to see detailed API responses in the logs + +## Change Log +Visit: https://github.com/plexguide/Huntarr/releases/ diff --git a/Huntarr.io-6.3.6/docker-compose.yml b/Huntarr.io-6.3.6/docker-compose.yml new file mode 100644 index 0000000..5ad7715 --- /dev/null +++ b/Huntarr.io-6.3.6/docker-compose.yml @@ -0,0 +1,17 @@ +services: + huntarr: + build: + context: . + dockerfile: Dockerfile + container_name: huntarr + ports: + - "9705:9705" + volumes: + - huntarr-config:/config + environment: + - TZ=America/New_York + restart: unless-stopped + +volumes: + huntarr-config: + name: huntarr-config \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/src/routes/api/settings/+server.js b/Huntarr.io-6.3.6/frontend/src/routes/api/settings/+server.js new file mode 100644 index 0000000..ce00f33 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/src/routes/api/settings/+server.js @@ -0,0 +1,123 @@ +import fs from 'fs'; +import path from 'path'; +import { json } from '@sveltejs/kit'; +import { invalidateCache } from '$lib/config'; // Assuming config.js handles huntarr.json read/write + +const CONFIG_FILE = path.resolve('huntarr.json'); // Path to the main config file +const DEFAULT_CONFIGS_DIR = path.resolve('src/primary/default_configs'); // Path to new default configs + +// Helper function to load default settings for a specific app +function loadDefaultAppSettings(appName) { + const defaultFile = path.join(DEFAULT_CONFIGS_DIR, `${appName}.json`); + try { + if (fs.existsSync(defaultFile)) { + const data = fs.readFileSync(defaultFile, 'utf8'); + return JSON.parse(data); + } else { + console.warn(`Default settings file not found for app: ${appName}`); + return {}; + } + } catch (error) { + console.error(`Error loading default settings for ${appName}:`, error); + return {}; + } +} + +// Helper function to get all default settings combined +function getAllDefaultSettings() { + const allDefaults = {}; + const appNames = ['sonarr', 'radarr', 'lidarr', 'readarr']; // Define known apps + appNames.forEach(appName => { + const defaults = loadDefaultAppSettings(appName); + if (Object.keys(defaults).length > 0) { + allDefaults[appName] = defaults; + } + }); + // Add a default 'ui' section if needed by the frontend directly + // allDefaults.ui = { theme: 'dark', ... }; + return allDefaults; +} + + +// Helper to read config, creating it from defaults if it doesn't exist +function readConfig() { + try { + if (fs.existsSync(CONFIG_FILE)) { + const data = fs.readFileSync(CONFIG_FILE, 'utf8'); + // Handle potentially empty file + if (data.trim() === '') { + console.warn(`Config file ${CONFIG_FILE} is empty. Creating with defaults.`); + const defaultSettings = getAllDefaultSettings(); + writeConfig(defaultSettings); // Write defaults back + return defaultSettings; + } + let parsedData = JSON.parse(data); + + // Optional: Merge with defaults to ensure all keys exist? + // This might be better handled client-side or on save. + // For now, just return what's in the file. If file is missing/empty, defaults are used. + + // Remove legacy sections if present + if (parsedData.global) delete parsedData.global; + // Keep UI section if it exists and is used + // if (parsedData.ui) ... + + return parsedData; + } else { + // Create file with defaults if it doesn't exist + console.log(`Config file ${CONFIG_FILE} not found. Creating with defaults.`); + const defaultSettings = getAllDefaultSettings(); + writeConfig(defaultSettings); // Write defaults to the file + return defaultSettings; + } + } catch (error) { + console.error('Error reading or parsing config file:', error); + // Fallback to defaults in case of error + return getAllDefaultSettings(); + } +} + +// Helper to write config +function writeConfig(config) { + try { + const configDir = path.dirname(CONFIG_FILE); + if (!fs.existsSync(configDir)) { + fs.mkdirSync(configDir, { recursive: true }); + } + fs.writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8'); + invalidateCache(); // Invalidate cache after writing + return true; + } catch (error) { + console.error('Error writing config file:', error); + return false; + } +} + +// GET request handler +export async function GET() { + const config = readConfig(); + return json(config); +} + +// POST request handler +export async function POST({ request }) { + try { + const newSettings = await request.json(); + + // Optional: Validate or sanitize newSettings here + + // Read current config to potentially merge or just overwrite + // let currentConfig = readConfig(); + // Merge logic could go here if needed, e.g., preserving a 'ui' section + // For simplicity, this example overwrites the entire config + + if (writeConfig(newSettings)) { + return json({ success: true, message: 'Settings saved successfully.' }); + } else { + return json({ success: false, message: 'Failed to write settings.' }, { status: 500 }); + } + } catch (error) { + console.error('Error processing POST request:', error); + return json({ success: false, message: 'Invalid request data.' }, { status: 400 }); + } +} diff --git a/Huntarr.io-6.3.6/frontend/src/routes/settings/+page.svelte b/Huntarr.io-6.3.6/frontend/src/routes/settings/+page.svelte new file mode 100644 index 0000000..4f58c4c --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/src/routes/settings/+page.svelte @@ -0,0 +1,116 @@ + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/static/arrs/48-lidarr.png b/Huntarr.io-6.3.6/frontend/static/arrs/48-lidarr.png new file mode 100644 index 0000000..2ff9a0c Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/arrs/48-lidarr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/arrs/48-radarr.png b/Huntarr.io-6.3.6/frontend/static/arrs/48-radarr.png new file mode 100644 index 0000000..cb21512 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/arrs/48-radarr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/arrs/48-readarr.png b/Huntarr.io-6.3.6/frontend/static/arrs/48-readarr.png new file mode 100644 index 0000000..8bc680d Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/arrs/48-readarr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/arrs/48-sonarr.png b/Huntarr.io-6.3.6/frontend/static/arrs/48-sonarr.png new file mode 100644 index 0000000..00a4a7d Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/arrs/48-sonarr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/arrs/48-whisparr.png b/Huntarr.io-6.3.6/frontend/static/arrs/48-whisparr.png new file mode 100644 index 0000000..85054af Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/arrs/48-whisparr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/css/new-style.css b/Huntarr.io-6.3.6/frontend/static/css/new-style.css new file mode 100644 index 0000000..1000631 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/css/new-style.css @@ -0,0 +1,1818 @@ +:root { + /* Light Theme Colors */ + --bg-primary: #f8f9fa; + --bg-secondary: #ffffff; + --bg-tertiary: #f1f3f5; + --text-primary: #212529; + --text-secondary: #495057; + --text-muted: #6c757d; + --border-color: #dee2e6; + --accent-color: #3498db; + --accent-hover: #2980b9; + --success-color: #27ae60; + --warning-color: #f39c12; + --error-color: #e74c3c; + --info-color: #2980b9; + --debug-color: #7f8c8d; + + /* Component Colors */ + --sidebar-bg: #2c3e50; + --sidebar-text: #ecf0f1; + --sidebar-item-hover: #34495e; + --sidebar-item-active: #3498db; + --topbar-bg: var(--bg-secondary); + --card-bg: var(--bg-secondary); + --switch-bg: #cbd2d9; + --switch-active: #3498db; + + /* Button Colors */ + --button-primary-bg: #3498db; + --button-primary-text: #ffffff; + --button-primary-hover: #2980b9; + --button-danger-bg: #e74c3c; + --button-danger-hover: #c0392b; + --button-success-bg: #27ae60; + --button-success-hover: #219955; + + /* Status Colors */ + --status-connected: #27ae60; + --status-not-connected: #e74c3c; + + /* Logs Colors */ + --log-bg: var(--bg-secondary); + --log-border: var(--border-color); +} + +.dark-theme { + --bg-primary: #1a1d24; + --bg-secondary: #252a34; + --bg-tertiary: #2d3748; + --text-primary: #f8f9fa; + --text-secondary: #e9ecef; + --text-muted: #adb5bd; + --border-color: #4a5568; + --accent-color: #3498db; + --accent-hover: #2980b9; + + /* Component Colors */ + --sidebar-bg: #121212; + --sidebar-text: #ecf0f1; + --sidebar-item-hover: #2d3748; + --sidebar-item-active: #3498db; + --topbar-bg: #252a34; + --card-bg: #252a34; + --switch-bg: #4a5568; + + /* Logs Colors */ + --log-bg: #252a34; + --log-border: #4a5568; +} + +/* Base Styles */ +body, html { + font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; + margin: 0; + padding: 0; + background-color: var(--bg-primary); + color: var(--text-primary); + height: 100%; + width: 100%; + overflow: auto !important; /* Allow scrolling on body and html */ +} + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + line-height: 1.6; + transition: background-color 0.3s, color 0.3s; + height: 100vh; +} + +/* Global scrollbar management */ +.content-section { + overflow: auto !important; /* Allow scrolling on content sections */ +} + +/* Styling for scrollbars */ +::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +::-webkit-scrollbar-track { + background: #262a36; + border-radius: 4px; +} + +::-webkit-scrollbar-thumb { + background: #3d4353; + border-radius: 4px; + transition: background-color 0.3s; +} + +::-webkit-scrollbar-thumb:hover { + background: #4d5569; +} + +/* Allow scrolling on main elements */ +body, html, .main-content, .section-wrapper { + overflow: auto !important; + scrollbar-width: thin !important; +} + +/* Only allow scrollbars on specific elements */ +.single-scroll-container { + scrollbar-width: thin !important; + overflow-y: auto !important; +} + +/* Hide scrollbars on all table elements */ +table, tbody, tr, td, th { + overflow: visible !important; +} + +/* Hide all WebKit scrollbars except on allowed containers */ +body::-webkit-scrollbar, +html::-webkit-scrollbar, +.content-section::-webkit-scrollbar, +.main-content::-webkit-scrollbar, +.section-wrapper::-webkit-scrollbar { + width: 8px !important; + height: 8px !important; + display: block !important; +} + +table::-webkit-scrollbar, +tr::-webkit-scrollbar, +tbody::-webkit-scrollbar, +th::-webkit-scrollbar, +td::-webkit-scrollbar { + width: 0 !important; + height: 0 !important; + display: none !important; +} + +a { + text-decoration: none; + color: var(--accent-color); +} + +button { + cursor: pointer; + font-family: inherit; +} + +/* Layout Structure */ +.app-container { + display: flex; + height: 100vh; + overflow: hidden; +} + +/* Sidebar */ +.sidebar { + width: 240px; + background-color: var(--sidebar-bg); + color: var(--sidebar-text); + display: flex; + flex-direction: column; + padding: 20px 0; + height: 100%; + overflow: auto; + box-shadow: 2px 0 10px rgba(0, 0, 0, 0.1); + flex-shrink: 0; +} + +.logo-container { + display: flex; + align-items: center; + justify-content: center; + padding: 0 20px 20px; + margin-bottom: 10px; + border-bottom: 1px solid rgba(255, 255, 255, 0.1); + min-height: 64px; /* Ensures container doesn't collapse while loading */ +} + +/* Updated logo size */ +.logo { + width: 64px; + height: 64px; + margin-right: 10px; + object-fit: contain; + transition: none !important; /* Prevent transition animations */ +} + +.login-logo { + width: 64px; + height: 64px; + margin-bottom: 10px; + object-fit: contain; + transition: none !important; /* Prevent transition animations */ +} + +.sidebar h1 { + font-size: 1.5rem; + font-weight: bold; +} + +.nav-menu { + display: flex; + flex-direction: column; + flex-grow: 1; + margin-top: 20px; +} + +.nav-item { + display: flex; + align-items: center; + padding: 12px 20px; + color: var(--sidebar-text); + transition: all 0.3s; +} + +.nav-item i { + margin-right: 12px; + font-size: 18px; + width: 24px; + text-align: center; +} + +.nav-item:hover { + background-color: var(--sidebar-item-hover); +} + +.nav-item.active { + background-color: var(--sidebar-item-active); + font-weight: 600; +} + +/* Theme Switch */ +.theme-switcher { + padding: 20px; + margin-top: 20px; + border-top: 1px solid rgba(255, 255, 255, 0.1); +} + +.switch-label { + display: flex; + justify-content: space-between; + margin-bottom: 8px; + font-size: 14px; +} + +.light-icon, .dark-icon { + color: var(--sidebar-text); +} + +.switch { + position: relative; + display: inline-block; + width: 100%; + height: 26px; +} + +.switch input { + opacity: 0; + width: 0; + height: 0; +} + +.slider { + position: absolute; + cursor: pointer; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--switch-bg); + transition: .4s; +} + +.slider:before { + position: absolute; + content: ""; + height: 18px; + width: 18px; + left: 4px; + bottom: 4px; + background-color: white; + transition: .4s; +} + +input:checked + .slider { + background-color: var(--switch-active); +} + +input:checked + .slider:before { + transform: translateX(calc(100% + 4px)); +} + +.slider.round { + border-radius: 34px; + width: 48px; +} + +.slider.round:before { + border-radius: 50%; +} + +/* Main Content */ +.main-content { + flex-grow: 1; + display: flex; + flex-direction: column; + overflow: auto; /* Changed from hidden to auto to allow scrolling */ +} + +/* Top Bar */ +.top-bar { + height: 60px; + background-color: var(--topbar-bg); + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; + border-bottom: 1px solid var(--border-color); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); +} + +.page-title { + font-size: 1.3rem; + font-weight: 600; + color: var(--text-primary); +} + +.user-info { + display: flex; + align-items: center; + font-size: 14px; +} + +.user-info span { + margin-right: 12px; +} + +.logout-btn { + color: var(--text-secondary); + transition: color 0.3s; +} + +.logout-btn:hover { + color: var(--error-color); +} + +/* Content Sections - Strict section isolation */ +.content-section { + display: none !important; + visibility: hidden; + position: absolute; + left: 0; + right: 0; + height: 0; + overflow: hidden; + opacity: 0; + z-index: -1; +} + +.content-section.active { + display: block !important; + visibility: visible; + position: relative; + height: auto; + overflow: auto !important; /* Allow scrolling when content is tall */ + opacity: 1; + z-index: 1; + padding: 20px; +} + +/* Cards */ +.dashboard-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 20px; +} + +.card { + background-color: var(--card-bg); + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05); + padding: 20px; +} + +.card h2, .card h3 { + margin-bottom: 15px; + font-weight: 600; + display: flex; + align-items: center; +} + +.card h2 i, .card h3 i { + margin-right: 10px; + color: var(--accent-color); +} + +.welcome-card { + grid-column: 1 / -1; +} + +/* Status Card */ +.status-list { + display: flex; + flex-direction: column; + gap: 12px; +} + +.status-item { + display: flex; + justify-content: space-between; + align-items: center; +} + +.status-badge { + padding: 6px 12px; + border-radius: 20px; + font-size: 13px; + font-weight: 500; + display: flex; + align-items: center; +} + +.status-badge i { + margin-right: 5px; +} + +.status-badge.connected { + background-color: rgba(39, 174, 96, 0.2); + color: var(--status-connected); +} + +.status-badge.not-connected { + background-color: rgba(231, 76, 60, 0.2); + color: var(--status-not-connected); +} + +/* Stats Card */ +.stats-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 10px; +} + +.stat-item { + display: flex; + flex-direction: column; + align-items: center; + padding: 10px; + border-radius: 6px; + background-color: var(--bg-tertiary); +} + +.stat-value { + font-size: 1.8rem; + font-weight: bold; + color: var(--accent-color); +} + +.stat-label { + font-size: 0.9rem; + color: var(--text-secondary); + margin-top: 5px; +} + +/* Action Card */ +.action-buttons { + display: flex; + gap: 10px; +} + +.action-button { + padding: 12px 20px; + border-radius: 6px; + border: none; + font-weight: 600; + display: flex; + align-items: center; + justify-content: center; + flex: 1; + transition: background-color 0.3s, transform 0.2s; +} + +.action-button i { + margin-right: 8px; +} + +.action-button.start { + background-color: var(--button-success-bg); + color: white; +} + +.action-button.start:hover { + background-color: var(--button-success-hover); + transform: translateY(-2px); +} + +.action-button.stop { + background-color: var(--button-danger-bg); + color: white; +} + +.action-button.stop:hover { + background-color: var(--button-danger-hover); + transform: translateY(-2px); +} + +/* Logs Section */ +.section-header { + margin-bottom: 20px; + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; + gap: 15px; +} + +.app-tabs, .log-controls { + display: flex; + gap: 10px; + align-items: center; +} + +.app-tab, .settings-tab { + padding: 8px 20px; + background-color: var(--bg-tertiary); + border: none; + border-radius: 20px; + color: var(--text-secondary); + font-weight: 500; + transition: all 0.3s; +} + +.app-tab.active, .settings-tab.active { + background-color: var(--accent-color); + color: white; +} + +.app-tab:hover, .settings-tab:hover { + background-color: var(--accent-hover); + color: white; +} + +.log-tabs { + display: flex; + gap: 10px; + align-items: center; + margin-bottom: 15px; /* Add some space below the tabs */ +} + +.log-tab { + padding: 8px 20px; + background-color: var(--bg-tertiary); + border: none; + border-radius: 20px; + color: var(--text-secondary); + font-weight: 500; + transition: all 0.3s; + cursor: pointer; /* Add cursor pointer */ +} + +.log-tab.active { + background-color: var(--accent-color); + color: white; +} + +.log-tab:hover { + background-color: var(--accent-hover); + color: white; +} + +/* Adjust log controls layout if needed */ +.log-controls { + display: flex; + justify-content: flex-end; /* Align controls to the right */ + align-items: center; + gap: 15px; + flex-grow: 1; /* Allow controls to take remaining space */ +} + +/* Ensure section header handles flex layout properly */ +.section-header { + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; /* Allow wrapping on smaller screens */ + gap: 15px; + margin-bottom: 20px; +} + +.app-tabs, .log-controls { + display: flex; + gap: 10px; + align-items: center; +} + +.app-tab, .settings-tab { + padding: 8px 20px; + background-color: var(--bg-tertiary); + border: none; + border-radius: 20px; + color: var(--text-secondary); + font-weight: 500; + transition: all 0.3s; +} + +.app-tab.active, .settings-tab.active { + background-color: var(--accent-color); + color: white; +} + +.app-tab:hover, .settings-tab:hover { + background-color: var(--accent-hover); + color: white; +} + +.log-options { + display: flex; + align-items: center; + gap: 15px; +} + +.auto-scroll { + display: flex; + align-items: center; + gap: 5px; + font-size: 14px; +} + +.clear-button { + padding: 6px 12px; + background-color: var(--button-danger-bg); + color: white; + border: none; + border-radius: 4px; + display: flex; + align-items: center; + gap: 5px; + transition: background-color 0.3s; +} + +.clear-button:hover { + background-color: var(--button-danger-hover); +} + +.logs { + /* Reduce height by 15% from original calc(100vh - 160px) */ + height: calc(85vh - 160px); + background-color: var(--log-bg); + border: 1px solid var(--log-border); + border-radius: 8px; + padding: 15px; + overflow-y: auto; + font-family: monospace; + white-space: pre-wrap; + word-wrap: break-word; + line-height: 1.5; + font-size: 14px; + margin-bottom: 20px; /* Add margin to create space between logs and footer */ +} + +.log-entry { + margin-bottom: 5px; + padding: 2px 5px; /* Add some horizontal padding */ + display: flex; /* Use flexbox for alignment */ + align-items: baseline; /* Align items based on text baseline */ + gap: 8px; /* Space between elements */ +} + +.log-timestamp { + color: var(--text-muted); /* Muted color for timestamp */ + font-size: 0.85em; + white-space: nowrap; /* Prevent timestamp from wrapping */ +} + +.log-app { + color: var(--accent-color); /* Use accent color for app name */ + font-weight: bold; + font-size: 0.9em; + white-space: nowrap; +} + +.log-level { + font-weight: bold; + padding: 1px 5px; + border-radius: 3px; + font-size: 0.8em; + text-transform: uppercase; + white-space: nowrap; +} + +.log-level-info { + background-color: var(--info-bg, #d1ecf1); /* Use CSS variables with fallbacks */ + color: var(--info-color, #0c5460); +} + +.log-level-warning { + background-color: var(--warning-bg, #fff3cd); + color: var(--warning-color, #856404); +} + +.log-level-error { + background-color: var(--error-bg, #f8d7da); + color: var(--error-color, #721c24); +} + +.log-level-debug { + background-color: var(--debug-bg, #e2e3e5); + color: var(--debug-color, #383d41); +} + +/* Apply level colors directly to the entry for fallback */ +.log-info .log-message { + color: var(--info-color, #0c5460); +} + +.log-warning .log-message { + color: var(--warning-color, #856404); +} + +.log-error .log-message { + color: var(--error-color, #721c24); +} + +.log-debug .log-message { + color: var(--debug-color, #383d41); +} + +.log-logger { + color: var(--text-muted); /* Muted color for logger name */ + font-size: 0.85em; + white-space: nowrap; +} + +.log-message { + flex-grow: 1; /* Allow message to take remaining space */ + word-break: break-word; /* Break long words */ +} + +.status-connected { + color: var(--status-connected); + font-weight: 600; +} + +.status-disconnected { + color: var(--status-not-connected); + font-weight: 600; +} + +/* Settings Section */ +.settings-actions { + display: flex; + gap: 10px; +} + +.save-button, .reset-button { + padding: 8px 16px; + border: none; + border-radius: 4px; + font-weight: 500; + display: flex; + align-items: center; + gap: 6px; + transition: background-color 0.3s; +} + +.save-button { + background-color: var(--button-success-bg); + color: white; +} + +.save-button:hover { + background-color: var(--button-success-hover); +} + +.reset-button { + background-color: var(--button-danger-bg); + color: white; +} + +.reset-button:hover { + background-color: var(--button-danger-hover); +} + +.settings-form { + padding: 0; + overflow-y: auto; + height: calc(100vh - 150px); +} + +.app-settings-panel { + display: none; +} + +.app-settings-panel.active { + display: block; +} + +.settings-group { + background-color: var(--bg-secondary); + border: 1px solid var(--border-color); + border-radius: 8px; + padding: 20px; + margin-bottom: 20px; + position: relative; +} + +.settings-group h3 { + margin-bottom: 15px; + padding-bottom: 10px; + /* border-bottom: 1px solid var(--border-color); */ /* Removed to prevent double border */ + font-size: 1.1rem; +} + +.setting-item { + margin-bottom: 20px; + display: flex; + flex-wrap: wrap; + align-items: center; +} + +.setting-item label { + width: 200px; + font-weight: 500; + margin-right: 15px; +} + +.setting-item input[type="text"], +.setting-item input[type="number"], +.setting-item input[type="password"] { + width: 300px; + padding: 8px 12px; + border: 1px solid var(--border-color); + border-radius: 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); +} + +/* Make number inputs shorter where appropriate */ +.short-number-input { + width: 100px !important; /* Use !important to override wider default if needed */ +} + +/* Make number inputs for intervals shorter */ +.short-number-input { + width: 80px !important; /* Use !important to override potential broader input styles */ +} + +.setting-help { + width: 100%; + margin-top: 5px; + margin-left: 215px; + font-size: 13px; + color: var(--text-muted); +} + +/* Stateful management header row with reset button */ +.stateful-header-row { + display: flex; + justify-content: space-between; + align-items: center; + padding-bottom: 10px; /* Reduced padding */ + margin-bottom: 15px; /* Spacing below header */ + /* border-bottom: 1px solid var(--border-color); Removed border */ + width: 100%; +} + +.stateful-header-row h3 { + margin: 0; + font-size: 1.1rem; /* Match other group headers */ + font-weight: 600; + color: var(--text-primary); +} + +/* Reset button styling */ +#reset_stateful_btn { + background-color: var(--button-danger-bg); /* Use variable */ + color: white; + border: none; + padding: 5px 12px; + border-radius: 4px; + font-size: 13px; + font-weight: 500; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 5px; + transition: background-color 0.2s ease; +} + +#reset_stateful_btn:hover { + background-color: var(--button-danger-hover); /* Use variable */ +} + +#reset_stateful_btn i { + font-size: 13px; +} + +/* Stateful Management Section Styling */ +#generalSettings .setting-info-block { + background-color: var(--bg-tertiary); + border: 1px solid var(--border-color); + border-radius: 6px; + padding: 15px; + margin-top: 0; /* Remove top margin as header row provides spacing */ + margin-bottom: 20px; +} + +#generalSettings .info-container { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); /* Responsive columns */ + gap: 15px; + /* margin-bottom: 15px; Removed margin, handled by parent */ +} + +#generalSettings .date-info-block { + background-color: var(--bg-secondary); + padding: 12px 15px; /* Adjusted padding */ + border-radius: 4px; + border-left: 3px solid var(--accent-color); +} + +#generalSettings .date-label { + font-size: 0.85em; /* Slightly smaller */ + color: var(--text-secondary); + margin-bottom: 5px; /* Increased spacing */ +} + +#generalSettings .date-value { + font-size: 0.95em; /* Adjusted size */ + font-weight: 500; + color: var(--text-primary); + font-family: inherit; /* Use standard font */ + background-color: transparent; /* Remove background */ + padding: 0; /* Remove padding */ +} + +/* Style the State Reset Interval item specifically */ +#generalSettings .setting-item input#stateful_management_hours { + width: 100px; /* Make input smaller */ +} + +#generalSettings .setting-item .reset-help { + color: var(--warning-color); /* Use warning color */ + font-style: normal; + font-size: 0.85em; +} + +/* Responsive Adjustments */ +@media (max-width: 950px) { + .sidebar { + width: 70px; + } + + .sidebar h1, .nav-item span, .switch-label { + display: none; + } + + .nav-item i { + margin-right: 0; + font-size: 22px; + } + + .logo-container { + justify-content: center; + } + + .logo { + margin-right: 0; + } +} + +@media (max-width: 992px) { + .settings-group { + padding: 15px; + } + + .setting-item label { + width: 100%; + margin-bottom: 8px; + } + + .setting-help { + margin-left: 0; + } + + .stats-grid { + grid-template-columns: 1fr; + } + + /* Make inputs responsive on narrow screens */ + .setting-item input[type="text"], + .setting-item input[type="number"], + .setting-item input[type="password"] { + width: 100%; + max-width: 300px; + } + + /* Specific width for short number inputs */ + .setting-item input.short-number-input { + width: 80px; /* Adjust width as needed */ + max-width: 80px; + } +} + +@media (max-width: 768px) { + .dashboard-grid { + grid-template-columns: 1fr; + } + + .section-header { + flex-direction: column; + align-items: flex-start; + } + + .app-tabs, .settings-actions { + width: 100%; + overflow-x: auto; + padding-bottom: 5px; /* Prevent cut-off of button shadows */ + } + + .app-tab, .settings-tab { + flex: 1; + white-space: nowrap; + min-width: 100px; /* Minimum button width */ + } + + /* Layout improvements for user cards on mobile */ + .user-card { + padding: 15px; + } + + .verification-container { + flex-direction: column; + align-items: center; + } + + .verification-input { + width: 100%; + max-width: 200px; + margin-right: 0; + margin-bottom: 10px; + } +} + +/* Ensure very small screens can still access everything */ +@media (max-width: 480px) { + .top-bar { + flex-direction: column; + align-items: flex-start; + height: auto; + padding: 10px 15px; + } + + .user-info { + margin-top: 10px; + } + + .action-buttons { + flex-direction: column; + } + + .action-button { + margin-bottom: 10px; + } +} + +/* Footer */ +.footer { + text-align: center; + padding: 20px; + margin-top: 20px; + background-color: var(--bg-secondary); + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05); + color: var(--text-secondary); + font-size: 14px; +} + +.footer p { + margin: 5px 0; +} + +.footer a { + color: var(--accent-color); + text-decoration: none; + font-weight: 500; + transition: color 0.3s; +} + +.footer a:hover { + text-decoration: underline; + color: var(--accent-hover); +} + +/* Notification styles for async operations */ +.notification { + position: fixed; + top: 20px; + right: 20px; + padding: 15px 25px; + border-radius: 8px; + color: white; + font-weight: 500; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); + z-index: 1000; + transform: translateY(-20px); + opacity: 0; + transition: transform 0.3s, opacity 0.3s; +} + +.notification.show { + transform: translateY(0); + opacity: 1; +} + +.notification.success { + background-color: var(--success-color); +} + +.notification.error { + background-color: var(--error-color); +} + +.notification.info { + background-color: var(--info-color); +} + +/* Multi-instance styles */ +.instances-container { + margin-bottom: 20px; +} + +.instance-item { + border: 1px solid var(--border-color); + border-radius: 8px; + margin-bottom: 16px; + background-color: var(--card-bg); +} + +.instance-header { + padding: 10px 15px; + display: flex; + justify-content: space-between; + align-items: center; + border-bottom: 1px solid var(--border-color); + background-color: var(--card-header-bg); + border-radius: 8px 8px 0 0; +} + +.instance-header h4 { + margin: 0; + font-size: 16px; + font-weight: 500; +} + +.instance-actions { + display: flex; + align-items: center; + gap: 10px; +} + +.instance-toggle { + margin-right: 5px; +} + +.instance-content { + padding: 15px; +} + +.add-instance-container { + display: flex; + justify-content: center; + margin: 15px 0; +} + +.add-instance-btn { + padding: 8px 16px; + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 4px; + cursor: pointer; + font-weight: 500; + transition: background-color 0.2s; +} + +.add-instance-btn:hover { + background-color: var(--accent-color-dark); +} + +.add-instance-btn:disabled { + background-color: #ccc; + cursor: not-allowed; +} + +.remove-instance-btn { + padding: 5px 10px; + background-color: var(--delete-color); + color: white; + border: none; + border-radius: 4px; + cursor: pointer; + font-size: 12px; + transition: background-color 0.2s; +} + +.remove-instance-btn:hover { + background-color: var(--delete-color-dark); +} + +.test-connection-btn { + padding: 6px 12px; + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 4px; + cursor: pointer; + transition: background-color 0.2s; +} + +.test-connection-btn:hover { + background-color: var(--accent-color-dark); +} + +.connection-status { + margin-left: 10px; + font-size: 14px; +} + +.connection-status.success { + color: var(--success-color); +} + +.connection-status.error { + color: var(--error-color); +} + +.connection-status.testing { + color: var(--accent-color); +} + +/* Swaparr specific styles */ +.swaparr-panel { + margin-bottom: 15px; + border-radius: 8px; + background-color: var(--bg-secondary); + border-left: 4px solid var(--accent-color); +} + +.swaparr-config { + padding: 12px; +} + +.swaparr-config h3 { + margin-top: 0; + margin-bottom: 10px; + color: var(--accent-color); +} + +.swaparr-config-content { + display: flex; + flex-wrap: wrap; + gap: 15px; +} + +.swaparr-config-content span { + background-color: var(--bg-tertiary); + padding: 5px 10px; + border-radius: 4px; + font-family: monospace; +} + +.swaparr-table { + width: 100%; + overflow-x: auto; + margin-bottom: 15px; +} + +.swaparr-table table { + width: 100%; + border-collapse: collapse; + background-color: var(--bg-secondary); + border-radius: 8px; + overflow: hidden; +} + +.swaparr-table th { + background-color: var(--bg-tertiary); + padding: 10px; + text-align: left; + color: var(--text-secondary); +} + +.swaparr-table td { + padding: 10px; + border-bottom: 1px solid var(--bg-tertiary); +} + +.swaparr-status-striked { + background-color: rgba(255, 193, 7, 0.1); +} + +.swaparr-status-pending { + background-color: rgba(13, 110, 253, 0.1); +} + +.swaparr-status-ignored { + background-color: rgba(108, 117, 125, 0.1); +} + +.swaparr-status-normal { + background-color: rgba(25, 135, 84, 0.1); +} + +.swaparr-status-removed { + background-color: rgba(220, 53, 69, 0.1); +} + +/* When in dark mode */ +.dark-theme .swaparr-status-striked { + background-color: rgba(255, 193, 7, 0.2); +} + +.dark-theme .swaparr-status-pending { + background-color: rgba(13, 110, 253, 0.2); +} + +.dark-theme .swaparr-status-ignored { + background-color: rgba(108, 117, 125, 0.2); +} + +.dark-theme .swaparr-status-normal { + background-color: rgba(25, 135, 84, 0.2); +} + +.dark-theme .swaparr-status-removed { + background-color: rgba(220, 53, 69, 0.2); +} + +/* Log Dropdown Styles */ +.log-dropdown-container { + position: relative; + margin-bottom: 15px; +} + +.log-dropdown { + position: relative; + display: inline-block; +} + +.log-dropdown-btn { + padding: 10px 20px; + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 20px; + font-weight: 500; + transition: all 0.3s; + cursor: pointer; + display: flex; + align-items: center; + gap: 8px; + min-width: 140px; + justify-content: space-between; +} + +.log-dropdown-btn:hover { + background-color: var(--accent-hover); +} + +.log-dropdown-content { + display: none; + position: absolute; + top: 100%; + left: 0; + background-color: var(--bg-secondary); + min-width: 180px; + z-index: 10; + border-radius: 8px; + box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); + margin-top: 8px; + border: 1px solid var(--border-color); + max-height: 400px; + overflow-y: auto; +} + +.log-dropdown-content.show { + display: block; +} + +.log-option { + display: block; + padding: 10px 15px; + text-decoration: none; + color: var(--text-primary); + transition: all 0.2s; + text-align: left; +} + +.log-option:hover { + background-color: var(--bg-tertiary); +} + +.log-option.active { + color: var(--accent-color); + font-weight: 600; + background-color: rgba(var(--accent-color-rgb), 0.1); +} + +/* Settings Dropdown Styles - Matching log dropdown styles */ +.settings-dropdown-container { + position: relative; + margin-bottom: 15px; +} + +.settings-dropdown { + position: relative; + display: inline-block; +} + +.settings-dropdown-btn { + padding: 10px 20px; + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 20px; + font-weight: 500; + transition: all 0.3s; + cursor: pointer; + display: flex; + align-items: center; + gap: 8px; + min-width: 140px; + justify-content: space-between; +} + +.settings-dropdown-btn:hover { + background-color: var(--accent-hover); +} + +.settings-dropdown-content { + display: none; + position: absolute; + top: 100%; + left: 0; + background-color: var(--bg-secondary); + min-width: 180px; + z-index: 10; + border-radius: 8px; + box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); + margin-top: 8px; + border: 1px solid var(--border-color); + max-height: 400px; + overflow-y: auto; +} + +.settings-dropdown-content.show { + display: block; +} + +.settings-option { + display: block; + padding: 10px 15px; + text-decoration: none; + color: var(--text-primary); + transition: all 0.2s; + text-align: left; +} + +.settings-option:hover { + background-color: var(--bg-tertiary); +} + +.settings-option.active { + color: var(--accent-color); + font-weight: 600; + background-color: rgba(var(--accent-color-rgb), 0.1); +} + +/* History Dropdown Styles - Matching log dropdown styles */ +.history-dropdown-container { + display: flex; + align-items: center; +} + +.history-dropdown { + position: relative; + display: inline-block; +} + +.history-dropdown-btn { + background-color: var(--bg-tertiary); + border: 1px solid var(--border-color); + border-radius: 4px; + padding: 8px 15px; + display: flex; + align-items: center; + gap: 10px; + font-size: 14px; + color: var(--text-primary); + transition: all 0.2s; + cursor: pointer; +} + +.history-dropdown-btn:hover { + background-color: var(--bg-primary); +} + +.history-dropdown-content { + display: none; + position: absolute; + background-color: var(--bg-secondary); + min-width: 160px; + box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); + z-index: 1001; + border-radius: 4px; + max-height: 400px; + overflow-y: auto; +} + +.history-dropdown-content a { + color: var(--text-primary); + padding: 12px 16px; + text-decoration: none; + display: block; + transition: all 0.2s; +} + +.history-dropdown-content a:hover { + background-color: var(--bg-tertiary); +} + +.history-dropdown-content a.active { + background-color: var(--accent-color); + color: white; +} + +.history-dropdown-content.show { + display: block; +} + +/* Placeholder message styling */ +.placeholder-message { + display: flex; + justify-content: center; + align-items: center; + height: calc(100% - 60px); /* Adjust based on header height */ +} + +.message-container { + text-align: center; + padding: 30px; + background-color: var(--bg-secondary); + border-radius: 8px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + max-width: 500px; +} + +.message-container i { + color: var(--accent-color); + margin-bottom: 15px; +} + +.message-container h3 { + margin-bottom: 10px; + font-size: 1.5rem; +} + +.message-container p { + color: var(--text-secondary); + line-height: 1.5; +} + +/* History Table Styles */ +.history-container { + position: relative; + height: calc(100% - 120px); /* Adjust based on header and pagination height */ + overflow: auto; + background-color: var(--bg-secondary); + border-radius: 4px; + box-shadow: 0 1px 3px rgba(0,0,0,0.1); +} + +.history-table { + width: 100%; + border-collapse: collapse; + border-spacing: 0; + font-size: 14px; +} + +.history-table thead { + position: sticky; + top: 0; + background-color: var(--bg-tertiary); + z-index: 1; +} + +.history-table th { + padding: 12px 15px; + text-align: left; + font-weight: 600; + color: var(--text-primary); + border-bottom: 1px solid var(--border-color); +} + +.history-table td { + padding: 10px 15px; + border-bottom: 1px solid var(--border-color); + vertical-align: middle; +} + +.history-table tbody tr:nth-child(even) { + background-color: var(--bg-tertiary); +} + +.history-table tbody tr:hover { + background-color: rgba(var(--accent-color-rgb), 0.1); +} + +/* Operation Type Styles in History Table */ +.operation-missing { + background-color: #005a9e; + color: white; + padding: 3px 8px; + border-radius: 4px; + font-size: 0.9em; + display: inline-block; +} + +.operation-upgrade { + background-color: #1e40af; /* Changed from #2a9d8f to a more vibrant blue */ + color: white; + padding: 3px 8px; + border-radius: 4px; + font-size: 0.9em; + display: inline-block; +} + +/* History Controls */ +.history-controls { + display: flex; + justify-content: flex-end; + align-items: center; + gap: 15px; +} + +.history-search { + display: flex; + align-items: center; + margin-right: auto; /* Push to left side */ +} + +.history-search input { + padding: 8px 15px; + border: 1px solid var(--border-color); + border-radius: 4px 0 0 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); + width: 200px; +} + +.history-search button { + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 0 4px 4px 0; + padding: 8px 12px; + cursor: pointer; +} + +.history-search button:hover { + background-color: var(--accent-hover); +} + +.history-page-size { + display: flex; + align-items: center; + gap: 8px; +} + +.history-page-size select { + padding: 7px 10px; + border: 1px solid var(--border-color); + border-radius: 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); +} + +/* Pagination Controls */ +.pagination-controls { + display: flex; + justify-content: center; + align-items: center; + padding: 15px 0; + gap: 15px; +} + +.pagination-button { + padding: 8px 15px; + border: 1px solid var(--border-color); + border-radius: 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); + cursor: pointer; + transition: all 0.2s; +} + +.pagination-button:hover { + background-color: var(--accent-color); + color: white; +} + +.pagination-button:disabled { + opacity: 0.5; + cursor: not-allowed; + background-color: var(--bg-tertiary); + color: var(--text-muted); +} + +#historyPageInfo { + color: var(--text-secondary); + font-size: 14px; +} + +/* Empty state and loading */ +.empty-state-message, .loading-indicator { + display: none; + flex-direction: column; + align-items: center; + justify-content: center; + height: 200px; + color: var(--text-muted); + text-align: center; + padding: 20px; +} + +.empty-state-message i, .loading-indicator i { + margin-bottom: 15px; + color: var(--accent-color); +} + +/* Responsive adjustments */ +@media (max-width: 992px) { + .history-table { + font-size: 13px; + } + + .history-table th, .history-table td { + padding: 8px 10px; + } + + .history-search input { + width: 150px; + } +} + +@media (max-width: 768px) { + .history-controls { + flex-wrap: wrap; + gap: 10px; + } + + .history-search { + width: 100%; + margin-bottom: 10px; + } + + .history-search input { + width: calc(100% - 40px); + } +} + +.styled-select { + background: var(--accent-color); + color: #fff; + border: none; + border-radius: 20px; + padding: 10px 20px; + font-weight: 500; + font-size: 1rem; + min-width: 140px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + appearance: none; + -webkit-appearance: none; + -moz-appearance: none; + cursor: pointer; + outline: none; + transition: background 0.3s; + margin-right: 10px; +} +.styled-select:focus, .styled-select:hover { + background: var(--accent-hover); +} +.styled-select option { + color: #222; + background: #fff; +} diff --git a/Huntarr.io-6.3.6/frontend/static/css/style.css b/Huntarr.io-6.3.6/frontend/static/css/style.css new file mode 100644 index 0000000..053e64b --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/css/style.css @@ -0,0 +1,1540 @@ +:root { + /* Light Theme Colors */ + --bg-primary: #f8f9fa; + --bg-secondary: #ffffff; + --bg-tertiary: #f1f3f5; + --text-primary: #212529; + --text-secondary: #495057; + --text-muted: #6c757d; + --border-color: #dee2e6; + --accent-color: #3498db; + --accent-hover: #2980b9; + --success-color: #27ae60; + --warning-color: #f39c12; + --error-color: #e74c3c; + --info-color: #2980b9; + --debug-color: #7f8c8d; + + /* Component Colors */ + --sidebar-bg: #2c3e50; + --sidebar-text: #ecf0f1; + --sidebar-item-hover: #34495e; + --sidebar-item-active: #3498db; + --topbar-bg: var(--bg-secondary); + --card-bg: var(--bg-secondary); + --switch-bg: #cbd2d9; + --switch-active: #3498db; + + /* Button Colors */ + --button-primary-bg: #3498db; + --button-primary-text: #ffffff; + --button-primary-hover: #2980b9; + --button-danger-bg: #e74c3c; + --button-danger-hover: #c0392b; + --button-success-bg: #27ae60; + --button-success-hover: #219955; + + /* Status Colors */ + --status-connected: #27ae60; + --status-not-connected: #e74c3c; + + /* Logs Colors */ + --log-bg: var(--bg-secondary); + --log-border: var(--border-color); +} + +.dark-theme { + --bg-primary: #1a1d24; + --bg-secondary: #252a34; + --bg-tertiary: #2d3748; + --text-primary: #f8f9fa; + --text-secondary: #e9ecef; + --text-muted: #adb5bd; + --border-color: #4a5568; + --accent-color: #3498db; + --accent-hover: #2980b9; + + /* Component Colors */ + --sidebar-bg: #121212; + --sidebar-text: #ecf0f1; + --sidebar-item-hover: #2d3748; + --sidebar-item-active: #3498db; + --topbar-bg: #252a34; + --card-bg: #252a34; + --switch-bg: #4a5568; + + /* Logs Colors */ + --log-bg: #252a34; + --log-border: #4a5568; +} + +/* Base Styles */ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; + background-color: var(--bg-primary); + color: var(--text-primary); + line-height: 1.6; + transition: background-color 0.3s, color 0.3s; + height: 100vh; + overflow: hidden; +} + +a { + text-decoration: none; + color: var(--accent-color); +} + +button { + cursor: pointer; + font-family: inherit; +} + +/* Layout Structure */ +.app-container { + display: flex; + height: 100vh; + width: 100%; + overflow: hidden; +} + +.sidebar { + width: 250px; + height: 100vh; + background: linear-gradient(180deg, rgba(22, 26, 34, 0.98), rgba(18, 22, 30, 0.95)); + border-right: 1px solid rgba(90, 109, 137, 0.15); + box-shadow: 2px 0 10px rgba(0, 0, 0, 0.2); + display: flex; + flex-direction: column; + z-index: 100; + flex-shrink: 0; + overflow-y: auto; +} + +.main-content { + flex: 1; + min-width: 0; /* Important for flex child to respect parent constraints */ + height: 100vh; + overflow-y: auto; + position: relative; + box-sizing: border-box; +} + +/* Content Section styles */ +.content-section { + display: none; + padding: 20px; + height: calc(100vh - 60px); /* 60px is topbar height */ + overflow-y: auto; + box-sizing: border-box; +} + +.content-section.active { + display: block; +} + +/* Sidebar */ +.logo-container { + display: flex; + align-items: center; + justify-content: center; + padding: 0 20px 20px; + margin-bottom: 10px; + border-bottom: 1px solid rgba(255, 255, 255, 0.1); +} + +.logo { + width: 40px; + height: 40px; + margin-right: 10px; +} + +.sidebar h1 { + font-size: 1.5rem; + font-weight: bold; +} + +.nav-menu { + display: flex; + flex-direction: column; + flex-grow: 1; + margin-top: 20px; +} + +.nav-item { + display: flex; + align-items: center; + padding: 12px 20px; + color: var(--sidebar-text); + transition: all 0.3s; +} + +.nav-item i { + margin-right: 12px; + font-size: 18px; + width: 24px; + text-align: center; +} + +.nav-item:hover { + background-color: var(--sidebar-item-hover); +} + +.nav-item.active { + background-color: var(--sidebar-item-active); + font-weight: 600; +} + +/* Theme Switch */ +.theme-switcher { + padding: 20px; + margin-top: 20px; + border-top: 1px solid rgba(255, 255, 255, 0.1); +} + +.switch-label { + display: flex; + justify-content: space-between; + margin-bottom: 8px; + font-size: 14px; +} + +.light-icon, .dark-icon { + color: var(--sidebar-text); +} + +.switch { + position: relative; + display: inline-block; + width: 100%; + height: 26px; +} + +.switch input { + opacity: 0; + width: 0; + height: 0; +} + +.slider { + position: absolute; + cursor: pointer; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--switch-bg); + transition: .4s; +} + +.slider:before { + position: absolute; + content: ""; + height: 18px; + width: 18px; + left: 4px; + bottom: 4px; + background-color: white; + transition: .4s; +} + +input:checked + .slider { + background-color: var(--switch-active); +} + +input:checked + .slider:before { + transform: translateX(calc(100% + 4px)); +} + +.slider.round { + border-radius: 34px; +} + +.slider.round:before { + border-radius: 50%; +} + +/* Main Content */ +.main-content { + flex-grow: 1; + display: flex; + flex-direction: column; + overflow: hidden; +} + +/* Top Bar */ +.top-bar { + height: 60px; + background-color: var(--topbar-bg); + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; + border-bottom: 1px solid var(--border-color); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); +} + +.page-title { + font-size: 1.3rem; + font-weight: 600; + color: var(--text-primary); +} + +.user-info { + display: flex; + align-items: center; + font-size: 14px; +} + +.user-info span { + margin-right: 12px; +} + +.logout-btn { + color: var(--text-secondary); + transition: color 0.3s; +} + +.logout-btn:hover { + color: var(--error-color); +} + +/* Content Sections */ +.content-section { + display: none; + height: calc(100vh - 60px); + overflow-y: auto; + padding: 20px; +} + +.content-section.active { + display: block; +} + +/* Cards */ +.dashboard-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 20px; +} + +.card { + background-color: var(--card-bg); + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05); + padding: 20px; +} + +.card h2, .card h3 { + margin-bottom: 15px; + font-weight: 600; + display: flex; + align-items: center; +} + +.card h2 i, .card h3 i { + margin-right: 10px; + color: var(--accent-color); +} + +.welcome-card { + grid-column: 1 / -1; +} + +/* Status Card */ +.status-list { + display: flex; + flex-direction: column; + gap: 12px; +} + +.status-item { + display: flex; + justify-content: space-between; + align-items: center; +} + +.status-badge { + padding: 6px 12px; + border-radius: 20px; + font-size: 13px; + font-weight: 500; + display: flex; + align-items: center; +} + +.status-badge i { + margin-right: 5px; +} + +.status-badge.connected { + background-color: rgba(39, 174, 96, 0.2); + color: var(--status-connected); +} + +.status-badge.not-connected { + background-color: rgba(231, 76, 60, 0.2); + color: var(--status-not-connected); +} + +/* Stats Card */ +.stats-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 10px; +} + +.stat-item { + display: flex; + flex-direction: column; + align-items: center; + padding: 10px; + border-radius: 6px; + background-color: var(--bg-tertiary); +} + +.stat-value { + font-size: 1.8rem; + font-weight: bold; + color: var(--accent-color); +} + +.stat-label { + font-size: 0.9rem; + color: var(--text-secondary); + margin-top: 5px; +} + +/* Action Card */ +.action-buttons { + display: flex; + gap: 10px; +} + +.action-button { + padding: 12px 20px; + border-radius: 6px; + border: none; + font-weight: 600; + display: flex; + align-items: center; + justify-content: center; + flex: 1; + transition: background-color 0.3s, transform 0.2s; +} + +.action-button i { + margin-right: 8px; +} + +.action-button.start { + background-color: var(--button-success-bg); + color: white; +} + +.action-button.start:hover { + background-color: var(--button-success-hover); + transform: translateY(-2px); +} + +.action-button.stop { + background-color: var(--button-danger-bg); + color: white; +} + +.action-button.stop:hover { + background-color: var(--button-danger-hover); + transform: translateY(-2px); +} + +/* Small action button */ +.action-button-small { + padding: 8px 12px; + border-radius: 4px; + border: none; + font-weight: 600; + background-color: var(--button-danger-bg); + color: white; + font-size: 0.9em; + cursor: pointer; + transition: background-color 0.3s, transform 0.2s; +} + +.action-button-small:hover { + background-color: var(--button-danger-hover); + transform: translateY(-1px); +} + +/* Danger action button */ +.action-button.danger { + background-color: #e74c3c; + color: #fff; + border: none; + border-radius: 4px; + padding: 6px 12px; + font-size: 13px; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 5px; + transition: background-color 0.2s ease, transform 0.1s ease; + width: fit-content; + max-width: max-content; + white-space: nowrap; +} + +.action-button.danger:hover { + background-color: #c0392b; +} + +.action-button.danger:active { + transform: translateY(0); +} + +.action-button.danger i { + font-size: 12px; + margin-right: 1px; +} + +/* Logs Section */ +.section-header { + margin-bottom: 20px; + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; + gap: 15px; +} + +.app-tabs, .log-controls { + display: flex; + gap: 10px; + align-items: center; +} + +.app-tab, .settings-tab { + padding: 8px 20px; + background-color: var(--bg-tertiary); + border: none; + border-radius: 20px; + color: var(--text-secondary); + font-weight: 500; + transition: all 0.3s; +} + +.app-tab.active, .settings-tab.active { + background-color: var(--accent-color); + color: white; +} + +.app-tab:hover, .settings-tab:hover { + background-color: var(--accent-hover); + color: white; +} + +.log-options { + display: flex; + align-items: center; + gap: 15px; +} + +.auto-scroll { + display: flex; + align-items: center; + gap: 5px; + font-size: 14px; +} + +.clear-button { + padding: 6px 12px; + background-color: var(--button-danger-bg); + color: white; + border: none; + border-radius: 4px; + display: flex; + align-items: center; + gap: 5px; + transition: background-color 0.3s; +} + +.clear-button:hover { + background-color: var(--button-danger-hover); +} + +.logs { + height: calc(100vh - 160px); + background-color: var(--log-bg); + border: 1px solid var(--log-border); + border-radius: 8px; + padding: 15px; + overflow-y: auto; + font-family: monospace; + white-space: pre-wrap; + word-wrap: break-word; + line-height: 1.5; + font-size: 14px; +} + +.log-entry { + margin-bottom: 5px; + padding: 2px 0; +} + +.log-info { + color: var(--info-color); +} + +.log-warning { + color: var(--warning-color); +} + +.log-error { + color: var(--error-color); +} + +.log-debug { + color: var(--debug-color); +} + +.status-connected { + color: var(--status-connected); + font-weight: 600; +} + +.status-disconnected { + color: var(--status-not-connected); + font-weight: 600; +} + +/* Settings Section */ +.settings-actions { + display: flex; + gap: 10px; +} + +.save-button, .reset-button { + padding: 8px 16px; + border: none; + border-radius: 4px; + font-weight: 500; + display: flex; + align-items: center; + gap: 6px; + transition: background-color 0.3s; +} + +.save-button { + background-color: var(--button-success-bg); + color: white; +} + +.save-button:hover { + background-color: var(--button-success-hover); +} + +.reset-button { + background-color: var(--button-danger-bg); + color: white; +} + +.reset-button:hover { + background-color: var(--button-danger-hover); +} + +.settings-form { + padding: 0; + overflow-y: auto; + height: calc(100vh - 150px); +} + +.app-settings-panel { + display: none; +} + +.app-settings-panel.active { + display: block; +} + +.settings-group { + background-color: var(--bg-secondary); + border: 1px solid var(--border-color); + border-radius: 8px; + padding: 20px; + margin-bottom: 20px; + position: relative; +} + +.settings-group h3 { + margin-bottom: 15px; + padding-bottom: 10px; + border-bottom: 1px solid var(--border-color); + font-size: 1.1rem; +} + +.setting-item { + margin-bottom: 20px; + display: flex; + flex-wrap: wrap; + align-items: center; +} + +.setting-item label { + width: 200px; + font-weight: 500; + margin-right: 15px; +} + +.setting-item input[type="text"], +.setting-item input[type="number"], +.setting-item input[type="password"] { + width: 300px; + padding: 8px 12px; + border: 1px solid var(--border-color); + border-radius: 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); +} + +.setting-help { + width: 100%; + margin-top: 5px; + margin-left: 215px; + font-size: 13px; + color: var(--text-muted); +} + +/* Toggle Switch for Settings */ +.toggle-switch { + position: relative; + display: inline-block; + width: 40px; /* Changed to match login page toggle (40px) */ + height: 20px; + flex-shrink: 0; +} + +.toggle-switch input { + opacity: 0; + width: 0; + height: 0; +} + +.toggle-slider { + position: absolute; + cursor: pointer; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: var(--switch-bg); + transition: .4s; + border-radius: 24px; +} + +.toggle-slider:before { + position: absolute; + content: ""; + height: 12px; /* Changed to match login page toggle (12px) */ + width: 12px; /* Changed to match login page toggle (12px) */ + left: 4px; /* Changed to match login page toggle */ + bottom: 4px; /* Changed to match login page toggle */ + background-color: white; + transition: .4s; + border-radius: 50%; +} + +input:checked + .toggle-slider { + background-color: var(--switch-active); +} + +input:checked + .toggle-slider:before { + transform: translateX(20px); /* Changed to match login page toggle (20px) */ +} + +/* Stateful Management Styling */ +.stateful-header-wrapper { + position: relative; + width: 100%; + margin-bottom: 25px; + padding-bottom: 15px; + border-bottom: 1px solid #2d3748; /* Darker border matching the screenshot */ +} + +.stateful-header-wrapper h3 { + margin: 0; + font-size: 18px; + font-weight: 600; + color: var(--text-primary); + padding-bottom: 15px; +} + +.header-line { + display: none; +} + +.stateful-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 15px; + padding-bottom: 10px; + border-bottom: 1px solid var(--border-color); +} + +.stateful-title { + font-size: 16px; + font-weight: 600; + color: var(--text-primary); +} + +.stateful-reset-btn { + padding: 8px 16px; + font-size: 13px; + font-weight: 500; + background-color: var(--button-danger-bg); + color: white; + border: none; + border-radius: 4px; + cursor: pointer; + transition: background-color 0.2s ease; + white-space: nowrap; +} + +.stateful-reset-btn:hover { + background-color: var(--button-danger-hover); +} + +.info-container { + display: flex; + flex-direction: column; + gap: 12px; + padding: 12px; + background-color: var(--bg-tertiary); + border-radius: 6px; + margin-top: 10px; +} + +.date-info-block { + display: flex; + justify-content: space-between; + align-items: center; + padding: 8px 10px; + background-color: var(--bg-secondary); + border-radius: 4px; + border-left: 4px solid var(--accent-color); +} + +.date-label { + font-weight: 500; + color: var(--text-secondary); +} + +.date-value { + font-family: monospace; + font-size: 14px; + font-weight: 600; + color: var(--accent-color); + padding: 4px 8px; + background-color: rgba(52, 152, 219, 0.1); + border-radius: 4px; +} + +.reset-help { + margin-top: 8px; + font-style: italic; + color: var(--error-color); + font-size: 12px; +} + +/* Settings Stateful Management */ +.setting-info-block { + background-color: var(--bg-tertiary); + border: 1px solid var(--border-color); + border-radius: 8px; + padding: 15px; + margin: 10px 0; +} + +.setting-info-block .info-row { + display: flex; + justify-content: space-between; + padding: 5px 0; + border-bottom: 1px solid var(--border-color); +} + +.setting-info-block .info-row:last-child { + border-bottom: none; +} + +.danger-button { + background-color: var(--button-danger-bg); + color: #fff; + border: none; + border-radius: 4px; + padding: 8px 15px; + cursor: pointer; + transition: background-color 0.3s; +} + +.danger-button:hover { + background-color: var(--button-danger-hover); +} + +/* Custom reset button that matches the screenshot exactly */ +.danger-reset-button { + background-color: #e74c3c; /* Solid red to match the image */ + color: white; + border: none; + padding: 6px 12px; + border-radius: 4px; + font-size: 13px; + font-weight: 500; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 4px; + transition: background-color 0.2s ease; + width: fit-content; + white-space: nowrap; + position: absolute; + top: 0; + right: 0; +} + +.danger-reset-button:hover { + background-color: #c0392b; +} + +.danger-reset-button i { + font-size: 13px; +} + +/* Make sure settings-group has the right positioning for absolute elements */ +.settings-group { + position: relative; +} + +/* Responsive Adjustments */ +@media (max-width: 768px) { + .sidebar { + width: 60px !important; + min-width: 60px !important; + max-width: 60px !important; + } + + .main-content { + margin-left: 0 !important; + width: calc(100% - 60px) !important; + } + + /* Navbar item adjustments */ + .nav-item { + padding: 10px 0; + justify-content: center; + } + + .nav-item span { + display: none !important; + } + + .nav-icon-wrapper { + margin-right: 0 !important; + } + + /* Logo container */ + .logo-container { + justify-content: center !important; + padding: 15px 0 !important; + } + + .logo-container h1 { + display: none !important; + } + + .logo { + width: 40px !important; + height: 40px !important; + } + + /* Fix active/hover state */ + .nav-item:hover, + .nav-item.active { + background: rgba(65, 105, 225, 0.2) !important; + width: 50px !important; + margin: 0 auto !important; + border-radius: 8px !important; + } + + /* Topbar adjustments */ + .topbar-section.center { + position: relative !important; + left: 0 !important; + transform: none !important; + justify-content: center !important; + } + + /* Version bar */ + .version-bar { + flex-wrap: wrap !important; + gap: 8px !important; + } + + .version-item, .developer-credit { + font-size: 12px !important; + } +} + +@media (max-width: 480px) { + .community-links { + flex-direction: column; + } + + .community-link-card { + width: 100%; + } + + .app-stats-grid { + grid-template-columns: 1fr; + } + + .sponsors-list { + grid-template-columns: repeat(2, 1fr); + } + + .version-bar { + gap: 8px; + } + + .version-divider { + display: none; + } +} + +/* Footer */ +.footer { + text-align: center; + padding: 20px; + margin-top: 20px; + background-color: var(--bg-secondary); + border-radius: 8px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05); + color: var(--text-secondary); + font-size: 14px; +} + +.footer p { + margin: 5px 0; +} + +.footer a { + color: var(--accent-color); + text-decoration: none; + font-weight: 500; + transition: color 0.3s; +} + +.footer a:hover { + text-decoration: underline; + color: var(--accent-hover); +} + +/* Notification styles for async operations */ +.notification { + position: fixed; + top: 20px; + right: 20px; + padding: 15px 25px; + border-radius: 8px; + color: white; + font-weight: 500; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); + z-index: 1000; + transform: translateY(-20px); + opacity: 0; + transition: transform 0.3s, opacity 0.3s; +} + +.notification.show { + transform: translateY(0); + opacity: 1; +} + +.notification.success { + background-color: var(--success-color); +} + +.notification.error { + background-color: var(--error-color); +} + +.notification.info { + background-color: var(--info-color); +} + +/* Login Page Styles */ +.login-page { + display: flex; + justify-content: center; + align-items: center; + height: 100vh; + background-color: var(--bg-primary); +} + +.login-container { + width: 100%; + max-width: 400px; + background-color: var(--bg-secondary); + border-radius: 10px; + box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1); + overflow: hidden; +} + +.login-header { + background-color: var(--sidebar-bg); + color: var(--sidebar-text); + padding: 25px 20px; + text-align: center; + display: flex; + flex-direction: column; + align-items: center; +} + +.login-logo { + width: 60px; + height: 60px; + margin-bottom: 10px; +} + +.login-header h1 { + margin: 0; + font-size: 1.8rem; + font-weight: 600; +} + +.login-form { + padding: 25px; +} + +.login-form h2 { + margin: 0 0 20px; + font-size: 1.3rem; + text-align: center; + color: var(--text-primary); +} + +.form-group { + margin-bottom: 20px; +} + +.form-group label { + display: block; + margin-bottom: 8px; + color: var(--text-secondary); + font-weight: 500; +} + +.form-group label i { + margin-right: 8px; + color: var(--accent-color); +} + +.form-group input { + width: 100%; + padding: 12px 15px; + border: 1px solid var(--border-color); + border-radius: 4px; + background-color: var(--bg-tertiary); + color: var(--text-primary); + font-size: 16px; + transition: border-color 0.3s; +} + +.form-group input:focus { + outline: none; + border-color: var(--accent-color); + box-shadow: 0 0 0 2px rgba(52, 152, 219, 0.2); +} + +.login-button { + width: 100%; + padding: 12px; + background-color: var(--accent-color); + color: white; + border: none; + border-radius: 4px; + font-size: 16px; + font-weight: 500; + cursor: pointer; + display: flex; + align-items: center; + gap: 10px; + transition: background-color 0.3s; +} + +.login-button:hover { + background-color: var(--accent-hover); +} + +.error-message { + color: var(--error-color); + margin: 15px 0; + font-size: 14px; + text-align: center; + min-height: 20px; +} + +.login-form .theme-toggle { + display: flex; + align-items: center; + justify-content: center; + gap: 10px; + margin-top: 25px; + color: var(--text-secondary); + font-size: 14px; +} + +.login-form .switch { + width: 40px; + height: 20px; +} + +.login-form .slider:before { + height: 12px; + width: 12px; + left: 4px; + bottom: 4px; +} + +.login-form input:checked + .slider:before { + transform: translateX(20px); +} + +@media (max-width: 480px) { + .login-container { + max-width: 90%; + margin: 0 15px; + } +} + +/* Section header with action button */ +.section-header-with-action { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 15px; + position: relative; + border-bottom: 1px solid var(--border-color); + padding-bottom: 10px; +} + +.section-header-with-action h3 { + margin: 0; + color: var(--text-primary); + font-size: 18px; + font-weight: 600; +} + +.section-header-with-action .icon-button { + padding: 7px 12px; + font-size: 13px; + font-weight: 500; + background-color: var(--button-danger-bg); + color: white; + border: none; + border-radius: 4px; + cursor: pointer; + transition: all 0.2s ease; + display: flex; + align-items: center; + gap: 5px; + margin-left: auto; /* Push to the right edge */ + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.15); +} + +.section-header-with-action .icon-button i { + font-size: 14px; +} + +.section-header-with-action .icon-button:hover { + background-color: var(--button-danger-hover); + transform: translateY(-1px); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.2); +} + +.section-header-with-action .icon-button:active { + transform: translateY(0); + box-shadow: 0 1px 2px rgba(0, 0, 0, 0.2); +} + +/* Fix for trash icon positioning */ +.fa-trash-alt { + display: inline-block; + margin-right: 2px; +} + +/* Reset button in top right corner */ +.top-right-button { + position: absolute !important; + top: 0; + right: 0; + margin: 0 !important; + padding: 6px 10px !important; + font-size: 12px !important; + border-radius: 3px !important; +} + +.top-right-button i { + margin-right: 3px; +} + +/* Stateful management header row with reset button */ +.stateful-header-row { + display: flex; + justify-content: space-between; + align-items: center; + padding-bottom: 12px; + margin-bottom: 20px; + border-bottom: 1px solid #2d3748; /* Dark border line matching the screenshot */ + width: 100%; +} + +.stateful-header-row h3 { + margin: 0; + font-size: 18px; + font-weight: 600; + color: var(--text-primary); +} + +/* Reset button styling exactly matching the screenshot */ +#reset_stateful_btn { + background-color: #e74c3c; + color: white; + border: none; + padding: 5px 12px; + border-radius: 4px; + font-size: 13px; + font-weight: 500; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 5px; + transition: background-color 0.2s ease; +} + +#reset_stateful_btn:hover { + background-color: #c0392b; +} + +#reset_stateful_btn i { + font-size: 13px; +} + +/* Apps Section */ +/* Use the existing log dropdown styles for app section. No custom CSS needed for the dropdown itself. */ + +/* App settings content styling */ +.settings-content { + margin-top: 20px; +} + +.app-apps-panel { + display: none; + width: 100%; +} + +.app-apps-panel.active { + display: block; +} + +/* Instance panel styling */ +.instance-panel { + background-color: var(--bg-secondary, #2c2c2c); + border-radius: 4px; + padding: 15px; + margin-bottom: 15px; + border: 1px solid var(--border-color, #3c3c3c); +} + +.instance-header { + display: flex; + align-items: center; + margin-bottom: 15px; + gap: 10px; + padding-bottom: 10px; + border-bottom: 1px solid var(--border-color, #3c3c3c); +} + +.instance-name { + flex: 1; + padding: 8px; + background-color: var(--bg-tertiary, #252525); + border: 1px solid var(--border-color, #3c3c3c); + border-radius: 4px; + color: var(--text-primary, white); + font-size: 14px; +} + +.form-field { + margin-bottom: 15px; +} + +.form-field label { + display: block; + margin-bottom: 5px; + font-weight: 400; + color: var(--text-primary, #f0f0f0); + font-size: 14px; +} + +.form-field input { + padding: 8px; + background-color: var(--bg-tertiary, #252525); + border: 1px solid var(--border-color, #3c3c3c); + border-radius: 4px; + color: var(--text-primary, white); + width: 100%; + max-width: 500px; + font-size: 14px; +} + +/* Button styling */ +.add-instance-btn { + background-color: var(--accent-color, #007bff); + color: white; + border: none; + padding: 8px 16px; + border-radius: 4px; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 5px; + font-size: 14px; + margin-top: 15px; +} + +.add-instance-btn:hover { + background-color: var(--accent-hover, #0069d9); +} + +.remove-instance-btn { + background-color: #dc3545; + color: white; + border: none; + width: 30px; + height: 30px; + border-radius: 4px; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; +} + +.remove-instance-btn:hover { + background-color: #c82333; +} + +.test-connection-btn { + background-color: #28a745; + color: white; + border: none; + padding: 6px 12px; + border-radius: 4px; + cursor: pointer; + display: inline-flex; + align-items: center; + gap: 5px; + font-size: 13px; + margin-top: 5px; + transition: background-color 0.2s; +} + +.test-connection-btn:hover { + background-color: #218838; +} + +.test-connection-btn.test-success { + background-color: #28a745; +} + +.test-connection-btn.test-failed { + background-color: #dc3545; +} + +.test-connection-btn:disabled { + opacity: 0.7; + cursor: not-allowed; +} + +/* Match styling with existing settings UI */ +#appsSection .section-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 20px; + border-bottom: 1px solid var(--border-color, #3c3c3c); + padding-bottom: 10px; +} + +#appsSection .settings-group { + margin-top: 20px; + margin-bottom: 30px; + background-color: var(--bg-secondary, #252525); + border-radius: 4px; + padding: 20px; +} + +#appsSection .settings-group-header { + margin-bottom: 15px; + padding-bottom: 10px; + border-bottom: 1px solid var(--border-color, #363636); + font-size: 16px; + font-weight: 500; + color: var(--text-primary, #f0f0f0); +} + +.loading-panel { + display: flex; + align-items: center; + justify-content: center; + padding: 20px; + color: var(--text-primary, #f0f0f0); + gap: 10px; +} + +.error-panel { + display: flex; + align-items: center; + justify-content: center; + padding: 20px; + color: #dc3545; + gap: 10px; +} + +/* App content panel styling - eliminate box effect */ +.app-content-panel { + border: none; + box-shadow: none; + background: transparent; + padding: 0; +} + +/* Style the direct children of app panels that are creating nested boxes */ +.app-apps-panel > .settings-group { + border: none; + box-shadow: none; + background: transparent; + padding: 0; +} + +/* Instance panel styling - keep these as boxes */ +.instance-panel { + background-color: var(--bg-secondary, #2c2c2c); + border-radius: 4px; + padding: 15px; + margin-bottom: 15px; + border: 1px solid var(--border-color, #3c3c3c); +} \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/static/images/discord.png b/Huntarr.io-6.3.6/frontend/static/images/discord.png new file mode 100644 index 0000000..0f20692 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/images/discord.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps.js b/Huntarr.io-6.3.6/frontend/static/js/apps.js new file mode 100644 index 0000000..5294d27 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/js/apps.js @@ -0,0 +1,739 @@ +/** + * Huntarr - Apps Module + * Handles displaying and managing app settings for media server applications + */ + +const appsModule = { + // State + currentApp: null, + isLoading: false, + settingsChanged: false, // Flag to track unsaved settings changes + originalSettings: {}, // Store original settings to compare + appsWithChanges: [], // Track which apps have unsaved changes + + // DOM elements + elements: {}, + + // Initialize the apps module + init: function() { + // Initialize state + this.currentApp = null; + this.settingsChanged = false; // Flag to track unsaved settings changes + this.originalSettings = {}; // Store original settings to compare + + // Set a global flag to indicate we've loaded + window._appsModuleLoaded = true; + + // Add global variable to track if we're in the middle of saving + window._appsCurrentlySaving = false; + + // Add global variable to disable change detection temporarily + window._appsSuppressChangeDetection = false; + + // Cache DOM elements + this.cacheElements(); + + // Set up event listeners + this.setupEventListeners(); + + // Initialize state + this.settingsChanged = false; + + // Load apps for initial display + this.loadApps(); + + // Register with the main unsaved changes system if available + this.registerUnsavedChangesHandler(); + }, + + // Register with the main unsaved changes system + registerUnsavedChangesHandler: function() { + // Check if we already have the event listener + if (!this._unsavedChangesHandlerRegistered) { + this._unsavedChangesHandlerRegistered = true; + + document.addEventListener('click', (event) => { + // Skip handling if currently saving or change detection is suppressed + if (window._appsCurrentlySaving || window._appsSuppressChangeDetection) { + return; + } + + // Only check for navigation away from apps section + const navItem = event.target.closest('.nav-item, a'); + if (navItem) { + const href = navItem.getAttribute('href'); + + // Skip if clicking within the apps section or on external links + if (!href || href === '#apps' || href.startsWith('http') || navItem.getAttribute('target') === '_blank') { + return; + } + + // Immediately clear the settingsChanged flag if we're not actually on the apps page + if (window.location.hash !== '#apps') { + this.settingsChanged = false; + return; + } + + // Check for unsaved changes + if (this.hasUnsavedChanges()) { + if (!confirm('You have unsaved changes. Are you sure you want to leave? Changes will be lost.')) { + event.preventDefault(); + } else { + // User clicked OK, reset the settings changed flag + this.settingsChanged = false; + } + } + } + }); + + // Also handle browser back/forward navigation + window.addEventListener('beforeunload', (event) => { + // Skip if currently saving, suppression active, or not on apps page + if (window._appsCurrentlySaving || + window._appsSuppressChangeDetection || + window.location.hash !== '#apps') { + return; + } + + // Check for unsaved changes + if (this.hasUnsavedChanges()) { + // Show standard browser confirmation + event.preventDefault(); + event.returnValue = 'You have unsaved changes. Are you sure you want to leave? Changes will be lost.'; + return event.returnValue; + } + }); + } + }, + + // Check for unsaved changes before navigating away + hasUnsavedChanges: function() { + // If test connection suppression is active, return false to prevent dialog + if (window._suppressUnsavedChangesDialog === true) { + console.log('Unsaved changes check suppressed due to test connection'); + return false; + } + + // If the app is currently saving, don't consider it as having unsaved changes + if (window._appsCurrentlySaving) { + console.log('Skipping unsaved changes check because app is currently saving'); + return false; + } + + // Check if settings have changed + return this.settingsChanged === true; + }, + + // Cache DOM elements + cacheElements: function() { + this.elements = { + // Apps dropdown + appsOptions: document.querySelectorAll('#appsSection .log-option'), + currentAppsApp: document.getElementById('current-apps-app'), + appsDropdownBtn: document.querySelector('#appsSection .log-dropdown-btn'), + appsDropdownContent: document.querySelector('#appsSection .log-dropdown-content'), + + // Apps panels + appAppsPanels: document.querySelectorAll('.app-apps-panel'), + + // Controls + saveAppsButton: document.getElementById('saveAppsButton') + }; + }, + + // Set up event listeners + setupEventListeners: function() { + // App selection via +

Friendly name for this Sonarr instance

+ +
+ + +

Base URL for Sonarr (e.g., http://localhost:8989)

+
+
+ + +

API key for Sonarr

+
+
+ + +
+ + + `; + }); + + instancesHtml += ` + +
+ +
+ + `; + + // Search Settings + let searchSettingsHtml = ` +
+

Search Settings

+
+ + +

How to search for missing Sonarr content (Season Packs recommended for torrent users)

+
+
+ + +

Number of missing items to search per cycle (0 to disable)

+
+
+ + +

Number of episodes to upgrade per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for episodes with future air dates

+
+
+ + +

Skip refreshing series metadata before searching

+
+
+ `; + + // Set the content + container.innerHTML = instancesHtml + searchSettingsHtml; + + // Setup instance management (add/remove/test) + SettingsForms.setupInstanceManagement(container, 'sonarr', settings.instances.length); + }, + + // Generate Radarr settings form + generateRadarrForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'radarr'); + + // Make sure the instances array exists + if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) { + settings.instances = [{ + name: "Default", + api_url: settings.api_url || "", + api_key: settings.api_key || "", + enabled: true + }]; + } + + // Create a container for instances with a scrollable area for many instances + let instancesHtml = ` +
+

Radarr Instances

+
+ `; + + // Generate form elements for each instance + settings.instances.forEach((instance, index) => { + instancesHtml += ` +
+
+

Instance ${index + 1}: ${instance.name || 'Unnamed'}

+
+ ${index > 0 ? '' : ''} + +
+
+
+
+ + +

Friendly name for this Radarr instance

+
+
+ + +

Base URL for Radarr (e.g., http://localhost:7878)

+
+
+ + +

API key for Radarr

+
+
+ + +
+
+
+ `; + }); + + // Add a button to add new instances (limit to 9 total) + instancesHtml += ` +
+
+ +
+
+ `; + + // Continue with the rest of the settings form + container.innerHTML = ` + ${instancesHtml} + +
+

Search Settings

+
+ + +

Number of missing movies to search per cycle (0 to disable)

+
+
+ + +

Number of movies to search for quality upgrades per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for movies with future release dates

+
+
+ + +

Select which release date type to use when determining if a movie is considered a future release

+
+
+ + +

Skip refreshing movie metadata before searching

+
+
+ `; + + // Add event listeners for the instance management + SettingsForms.setupInstanceManagement(container, 'radarr', settings.instances.length); + + // Set up event listeners for the skip_future_releases checkbox + const skipFutureCheckbox = container.querySelector('#skip_future_releases'); + const releaseTypeContainer = container.querySelector('#future_release_type_container'); + + if (skipFutureCheckbox) { + skipFutureCheckbox.addEventListener('change', function() { + if (this.checked) { + releaseTypeContainer.style.display = ''; + } else { + releaseTypeContainer.style.display = 'none'; + } + }); + } + }, + + // Generate Lidarr settings form + generateLidarrForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'lidarr'); + + // Make sure the instances array exists + if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) { + settings.instances = [{ + name: "Default", + api_url: settings.api_url || "", // Legacy support + api_key: settings.api_key || "", // Legacy support + enabled: true + }]; + } + + // Create a container for instances + let instancesHtml = ` +
+

Lidarr Instances

+
+ `; + + // Generate form elements for each instance + settings.instances.forEach((instance, index) => { + instancesHtml += ` +
+
+

Instance ${index + 1}: ${instance.name || 'Unnamed'}

+
+ ${index > 0 ? '' : ''} + +
+
+
+
+ + +

Friendly name for this Lidarr instance

+
+
+ + +

Base URL for Lidarr (e.g., http://localhost:8686)

+
+
+ + +

API key for Lidarr

+
+
+ + +
+
+
+ `; + }); + + instancesHtml += ` +
+
+ +
+
+ `; + + // Continue with the rest of the settings form + container.innerHTML = ` + ${instancesHtml} + +
+

Search Settings

+
+ + +

Whether to search by artist (all missing albums for artist) or individual albums

+
+
+ + +

Number of artists with missing albums to search per cycle (0 to disable)

+
+ +
+ + +

Number of albums to search for quality upgrades per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for albums with future release dates

+
+
+ + +

Skip refreshing artist metadata before searching

+
+
+ `; + + // Add event listeners for the instance management + SettingsForms.setupInstanceManagement(container, 'lidarr', settings.instances.length); + }, + + // Generate Readarr settings form + generateReadarrForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'readarr'); + + // Make sure the instances array exists + if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) { + settings.instances = [{ + name: "Default", + api_url: settings.api_url || "", // Legacy support + api_key: settings.api_key || "", // Legacy support + enabled: true + }]; + } + + // Create a container for instances + let instancesHtml = ` +
+

Readarr Instances

+
+ `; + + // Generate form elements for each instance + settings.instances.forEach((instance, index) => { + instancesHtml += ` +
+
+

Instance ${index + 1}: ${instance.name || 'Unnamed'}

+
+ ${index > 0 ? '' : ''} + +
+
+
+
+ + +

Friendly name for this Readarr instance

+
+
+ + +

Base URL for Readarr (e.g., http://localhost:8787)

+
+
+ + +

API key for Readarr

+
+
+ + +
+
+
+ `; + }); + + instancesHtml += ` +
+
+ +
+
+ `; + + // Continue with the rest of the settings form + container.innerHTML = ` + ${instancesHtml} + +
+

Search Settings

+
+ + +

Number of missing books to search per cycle (0 to disable)

+
+
+ + +

Number of books to search for quality upgrades per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for books with future release dates

+
+
+ + +

Skip refreshing author metadata before searching

+
+
+ `; + + // Add event listeners for the instance management + SettingsForms.setupInstanceManagement(container, 'readarr', settings.instances.length); + }, + + // Generate Whisparr settings form + generateWhisparrForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'whisparr'); + + // Make sure the instances array exists + if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) { + settings.instances = [{ + name: "Default", + api_url: "", + api_key: "", + enabled: true + }]; + } + + // Create a container for instances + let instancesHtml = ` +
+

Whisparr V2 Instances

+
+ `; + + // Generate form elements for each instance + settings.instances.forEach((instance, index) => { + instancesHtml += ` +
+
+

Instance ${index + 1}: ${instance.name || 'Unnamed'}

+
+ ${index > 0 ? '' : ''} + +
+
+
+
+ + +

Friendly name for this Whisparr V2 instance

+
+
+ + +

Base URL for Whisparr V2 (e.g., http://localhost:6969)

+
+
+ + +

API key for Whisparr V2

+
+
+ + +
+
+
+ `; + }); + + instancesHtml += ` +
+
+ +
+
+ `; + + // Search Settings + let searchSettingsHtml = ` +
+

Search Settings

+
+ + +

Number of missing items to search per cycle (0 to disable)

+
+
+ + +

Number of items to search for quality upgrades per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for scenes with future release dates

+
+
+ + +

Skip refreshing series metadata before searching

+
+
+ + +

Skip refreshing scene info before searching

+
+
+ `; + + // Set the content + container.innerHTML = instancesHtml + searchSettingsHtml; + + // Add event listeners for the instance management + this.setupInstanceManagement(container, 'whisparr', settings.instances.length); + + // Update duration display + this.updateDurationDisplay(); + }, + + // Generate Eros settings form + generateErosForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'eros'); + + // Make sure the instances array exists + if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) { + settings.instances = [{ + name: "Default", + api_url: "", + api_key: "", + enabled: true + }]; + } + + // Create a container for instances + let instancesHtml = ` +
+

Whisparr V3 Instances

+
+ `; + + // Generate form elements for each instance + settings.instances.forEach((instance, index) => { + instancesHtml += ` +
+
+

Instance ${index + 1}: ${instance.name || 'Unnamed'}

+
+ ${index > 0 ? '' : ''} + +
+
+
+
+ + +

Friendly name for this Whisparr V3 instance

+
+
+ + +

Base URL for Whisparr V3 (e.g., http://localhost:6969)

+
+
+ + +

API key for Whisparr V3

+
+
+ + +
+
+
+ `; + }); + + instancesHtml += ` +
+
+ +
+
+ `; + + // Search Mode dropdown + let searchSettingsHtml = ` +
+

Search Settings

+
+ + +

How to search for missing and upgradable Whisparr V3 content (Movie-based or Scene-based)

+
+
+ + +

Number of missing items to search per cycle (0 to disable)

+
+
+ + +

Number of items to search for quality upgrades per cycle (0 to disable)

+
+
+ + +

Time in seconds between processing cycles

+
+
+ +
+

Additional Options

+
+ + +

Only search for monitored items

+
+
+ + +

Skip searching for scenes with future release dates

+
+
+ + +

Skip refreshing movie metadata before searching (strongly recommended to enable this for Whisparr V3)

+
+
+ `; + + // Set the content + container.innerHTML = instancesHtml + searchSettingsHtml; + + // Add event listeners for the instance management + this.setupInstanceManagement(container, 'eros', settings.instances.length); + + // Update duration display + this.updateDurationDisplay(); + }, + + // Generate Swaparr settings form + generateSwaparrForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'swaparr'); + + container.innerHTML = ` +
+

Swaparr (Beta) - Only For Torrent Users

+
+

Swaparr addresses the issue of stalled downloads and I rewrote it to support Huntarr. Visit Swaparr's GitHub for more information and support the developer!

+
+
+ +
+

Swaparr Settings

+
+ + +

Enable automatic handling of stalled downloads

+
+
+ + +

Number of strikes before removing a stalled download

+
+
+ + +

Maximum time a download can be stalled (e.g., 30m, 2h, 1d)

+
+
+ + +

Ignore files larger than this size (e.g., 1GB, 25GB, 1TB)

+
+
+ + +

Remove the download from the torrent/usenet client when removed

+
+
+ + +

Log actions but don't actually remove downloads. Useful for testing the first time!

+
+
+ +
+

Swaparr Status

+
+
+ +
+
+

Loading Swaparr status...

+
+
+
+ `; + + // Load Swaparr status automatically + const resetStrikesBtn = container.querySelector('#reset_swaparr_strikes'); + const statusContainer = container.querySelector('#swaparr_status'); + + fetch('/api/swaparr/status') + .then(response => response.json()) + .then(data => { + let statusHTML = ''; + + // Add stats for each app if available + if (data.statistics && Object.keys(data.statistics).length > 0) { + statusHTML += '
    '; + + for (const [app, stats] of Object.entries(data.statistics)) { + statusHTML += `
  • ${app.toUpperCase()}: `; + if (stats.error) { + statusHTML += `Error: ${stats.error}
  • `; + } else { + statusHTML += `${stats.currently_striked} currently striked, ${stats.removed} removed (${stats.total_tracked} total tracked)`; + } + } + + statusHTML += '
'; + } else { + statusHTML += '

No statistics available yet.

'; + } + + statusContainer.innerHTML = statusHTML; + }) + .catch(error => { + console.error('Error loading Swaparr status:', error); + statusContainer.innerHTML = `

Error fetching status: ${error.message}

`; + }); + + // Add event listener for the Reset Strikes button + if (resetStrikesBtn) { + resetStrikesBtn.addEventListener('click', function() { + if (confirm('Are you sure you want to reset all Swaparr strikes? This will clear the strike history for all apps.')) { + statusContainer.innerHTML = '

Resetting strikes...

'; + + fetch('/api/swaparr/reset', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({}) + }) + .then(response => response.json()) + .then(data => { + if (data.success) { + statusContainer.innerHTML = `

Success: ${data.message}

`; + // Reload status after a short delay + setTimeout(() => { + fetch('/api/swaparr/status') + .then(response => response.json()) + .then(data => { + let statusHTML = ''; + if (data.statistics && Object.keys(data.statistics).length > 0) { + statusHTML += '
    '; + for (const [app, stats] of Object.entries(data.statistics)) { + statusHTML += `
  • ${app.toUpperCase()}: `; + if (stats.error) { + statusHTML += `Error: ${stats.error}
  • `; + } else { + statusHTML += `${stats.currently_striked} currently striked, ${stats.removed} removed (${stats.total_tracked} total tracked)`; + } + } + statusHTML += '
'; + } else { + statusHTML += '

No statistics available yet.

'; + } + statusContainer.innerHTML = statusHTML; + }); + }, 1000); + } else { + statusContainer.innerHTML = `

Error: ${data.message}

`; + } + }) + .catch(error => { + statusContainer.innerHTML = `

Error resetting strikes: ${error.message}

`; + }); + } + }); + } else if (!resetStrikesBtn) { + console.warn('Could not find #reset_swaparr_strikes to attach listener.'); + } else { + console.warn('huntarrUI or huntarrUI.resetStatefulManagement is not available.'); + } + + // Add confirmation dialog for local access bypass toggle + const localAccessBypassCheckbox = container.querySelector('#local_access_bypass'); + if (localAccessBypassCheckbox) { + // Store original state + const originalState = localAccessBypassCheckbox.checked; + + localAccessBypassCheckbox.addEventListener('change', function() { + const newState = this.checked; + + // Preview the UI changes immediately, but they'll be reverted if user doesn't save + if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.updateUIForLocalAccessBypass === 'function') { + huntarrUI.updateUIForLocalAccessBypass(newState); + } + // Also ensure the main app knows settings have changed if the preview runs + if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.markSettingsAsChanged === 'function') { + huntarrUI.markSettingsAsChanged(); + } + }); + } + }, + + // Format date nicely for display + formatDate: function(date) { + if (!date) return 'Never'; + + const options = { + year: 'numeric', + month: 'short', + day: 'numeric', + hour: '2-digit', + minute: '2-digit', + hour12: true + }; + + return date.toLocaleString('en-US', options); + }, + + // Get settings from form + getFormSettings: function(container, appType) { + let settings = {}; + + // Helper function to get input value with fallback + function getInputValue(selector, defaultValue) { + const element = container.querySelector(selector); + if (!element) return defaultValue; + + if (element.type === 'checkbox') { + return element.checked; + } else if (element.type === 'number') { + const parsedValue = parseInt(element.value); + return !isNaN(parsedValue) ? parsedValue : defaultValue; + } else { + return element.value || defaultValue; + } + } + + // For the general settings form, collect settings including advanced settings + if (appType === 'general') { + settings.themeName = getInputValue('#theme-select', 'dark'); + settings.resetInterval = getInputValue('#resetInterval', 168); + settings.clearCache = getInputValue('#clearCache', false); + settings.refreshSchedule = getInputValue('#refreshSchedule', false); + settings.disableSorting = getInputValue('#disableSorting', false); + settings.disableNotifications = getInputValue('#disableNotifications', false); + settings.openInNewTab = getInputValue('#openInNewTab', false); + settings.saveColumnSortState = getInputValue('#saveColumnSortState', true); + settings.disableAnimation = getInputValue('#disableAnimation', false); + settings.useCompactLayout = getInputValue('#useCompactLayout', false); + settings.disableAllowListPopup = getInputValue('#disableAllowListPopup', false); + settings.maxHistoryItems = getInputValue('#maxHistoryItems', 100); + settings.maxLogItems = getInputValue('#maxLogItems', 200); + settings.stateful_management_hours = getInputValue('#stateful_management_hours', 168); + settings.autoLoginWithoutPassword = getInputValue('#autoLoginWithoutPassword', false); + + // Add collection of advanced settings + settings.api_timeout = getInputValue('#api_timeout', 120); + settings.command_wait_delay = getInputValue('#command_wait_delay', 1); + settings.command_wait_attempts = getInputValue('#command_wait_attempts', 600); + settings.minimum_download_queue_size = getInputValue('#minimum_download_queue_size', -1); + settings.log_refresh_interval_seconds = getInputValue('#log_refresh_interval_seconds', 30); + } + + // For other app types, collect settings + else { + // Handle instances differently + const instances = []; + // Find instance containers with both old and new class names + const instanceContainers = container.querySelectorAll('.instance-item, .instance-panel'); + + // Collect instance data with improved error handling + instanceContainers.forEach((instance, index) => { + const nameInput = instance.querySelector('input[name="name"]'); + const urlInput = instance.querySelector('input[name="api_url"]'); + const keyInput = instance.querySelector('input[name="api_key"]'); + const enabledInput = instance.querySelector('input[name="enabled"]'); + + const name = nameInput ? nameInput.value : null; + const url = urlInput ? urlInput.value : null; + const key = keyInput ? keyInput.value : null; + const enabled = enabledInput ? enabledInput.checked : true; // Default to enabled if checkbox not found + + if (!name || !url || !key) { + console.warn(`Instance ${index} is missing required fields`); + } + + const instanceObj = { + name: name || `Instance ${index + 1}`, + api_url: url || "", + api_key: key || "", + enabled: enabled + }; + + instances.push(instanceObj); + }); + + // Ensure we always have at least one instance + if (instances.length === 0) { + console.warn('No instances found, adding a default empty instance'); + instances.push({ + name: 'Default', + api_url: '', + api_key: '', + enabled: true + }); + } + + settings.instances = instances; + + // Add app-specific settings + if (appType === 'sonarr') { + settings.hunt_missing_mode = getInputValue('#sonarr-hunt-missing-mode', 'episodes'); + settings.hunt_missing_items = getInputValue('#sonarr-hunt-missing-items', 1); + settings.hunt_upgrade_items = getInputValue('#sonarr-hunt-upgrade-items', 0); + settings.sleep_duration = getInputValue('#sonarr_sleep_duration', 900); + settings.monitored_only = getInputValue('#sonarr_monitored_only', true); + settings.skip_future_episodes = getInputValue('#sonarr_skip_future_episodes', true); + settings.skip_series_refresh = getInputValue('#sonarr_skip_series_refresh', false); + } + else if (appType === 'radarr') { + settings.hunt_missing_movies = getInputValue('#radarr_hunt_missing_movies', 1); + settings.hunt_upgrade_movies = getInputValue('#radarr_hunt_upgrade_movies', 0); + settings.monitored_only = getInputValue('#radarr_monitored_only', true); + settings.skip_future_releases = getInputValue('#skip_future_releases', true); + settings.skip_movie_refresh = getInputValue('#skip_movie_refresh', false); + settings.sleep_duration = getInputValue('#radarr_sleep_duration', 900); + settings.release_type = getInputValue('#release_type', 'physical'); + } + else if (appType === 'lidarr') { + settings.hunt_missing_items = getInputValue('#lidarr_hunt_missing_items', 1); + settings.hunt_upgrade_items = getInputValue('#lidarr_hunt_upgrade_items', 0); + settings.hunt_missing_mode = getInputValue('#lidarr_hunt_missing_mode', 'artist'); + settings.monitored_only = getInputValue('#lidarr_monitored_only', true); + settings.sleep_duration = getInputValue('#lidarr_sleep_duration', 900); + } + else if (appType === 'readarr') { + settings.hunt_missing_books = getInputValue('#readarr_hunt_missing_books', 1); + settings.hunt_upgrade_books = getInputValue('#readarr_hunt_upgrade_books', 0); + settings.monitored_only = getInputValue('#readarr_monitored_only', true); + settings.skip_future_releases = getInputValue('#readarr_skip_future_releases', true); + settings.skip_author_refresh = getInputValue('#skip_author_refresh', false); + settings.sleep_duration = getInputValue('#readarr_sleep_duration', 900); + } + else if (appType === 'whisparr') { + settings.hunt_missing_items = getInputValue('#whisparr_hunt_missing_items', 1); + settings.hunt_upgrade_items = getInputValue('#whisparr_hunt_upgrade_items', 0); + settings.monitored_only = getInputValue('#whisparr_monitored_only', true); + settings.whisparr_version = getInputValue('#whisparr-api-version', 'v3'); + settings.skip_future_releases = getInputValue('#whisparr_skip_future_releases', true); + settings.sleep_duration = getInputValue('#whisparr_sleep_duration', 900); + } + else if (appType === 'eros') { + settings.search_mode = getInputValue('#eros_search_mode', 'movie'); + settings.hunt_missing_items = getInputValue('#eros_hunt_missing_items', 1); + settings.hunt_upgrade_items = getInputValue('#eros_hunt_upgrade_items', 0); + settings.monitored_only = getInputValue('#eros_monitored_only', true); + settings.skip_future_releases = getInputValue('#eros_skip_future_releases', true); + settings.skip_item_refresh = getInputValue('#eros_skip_item_refresh', false); + settings.sleep_duration = getInputValue('#eros_sleep_duration', 900); + } + else if (appType === 'swaparr') { + settings.enabled = getInputValue('#swaparr_enabled', false); + settings.max_strikes = getInputValue('#swaparr_max_strikes', 3); + settings.max_download_time = getInputValue('#swaparr_max_download_time', '2h'); + settings.ignore_above_size = getInputValue('#swaparr_ignore_above_size', '25GB'); + settings.remove_from_client = getInputValue('#swaparr_remove_from_client', true); + settings.dry_run = getInputValue('#swaparr_dry_run', false); + } + } + + console.log('Collected settings for', appType, settings); + return settings; + }, + + // Generate General settings form + generateGeneralForm: function(container, settings = {}) { + // Add data-app-type attribute to container + container.setAttribute('data-app-type', 'general'); + + container.innerHTML = ` +
+

System Settings

+
+ + +

Automatically check for Huntarr updates

+
+
+ + +

Enable verbose logging for troubleshooting (applies to all apps)

+
+
+ + +

Interval in seconds to refresh log display (applies to all apps)

+
+
+ +
+
+

Stateful Management

+ +
+
+ +
+
+
Initial State Created:
+
Loading...
+
+
+
State Reset Date:
+
Loading...
+
+
+
+
+ + +

Hours before resetting processed media state (${((settings.stateful_management_hours || 168) / 24).toFixed(1)} days)

+

Reset clears all processed media IDs to allow reprocessing

+
+
+ +
+

Security

+
+ + +

Allow access without login when connecting from local network IP addresses (e.g., 192.168.x.x, 10.x.x.x)

+
+
+ +
+

Advanced Settings

+
+ + +

API request timeout in seconds

+
+
+ + +

Delay between command status checks in seconds

+
+
+ + +

Maximum number of attempts to check command status

+
+
+ + + If the current download queue for an app instance exceeds this value, downloads will be skipped until the queue reduces. Set to -1 to disable this limit. This setting applies per app instance. +
+
+ + +

How often Huntarr refreshes logs from apps (seconds)

+
+
+ `; + + // Get hours input and days span elements once + const statefulHoursInput = container.querySelector('#stateful_management_hours'); + const statefulDaysSpan = container.querySelector('#stateful_management_days'); + + if (statefulHoursInput && statefulDaysSpan) { + statefulHoursInput.addEventListener('input', function() { + const hours = parseInt(this.value); + const days = (hours / 24).toFixed(1); + statefulDaysSpan.textContent = `${days} days`; + }); + } + + // Load stateful management info + const createdDateEl = document.getElementById('stateful_initial_state'); + const expiresDateEl = document.getElementById('stateful_expires_date'); + + // Skip loading if huntarrUI has already loaded this data to prevent flashing + if (window.huntarrUI && window.huntarrUI._cachedStatefulData) { + console.log('[SettingsForms] Using existing huntarrUI cached stateful data'); + return; // Exit early - main.js already has this covered + } + + // Only set to Loading if not already populated + if (createdDateEl && (!createdDateEl.textContent || createdDateEl.textContent === 'N/A')) { + createdDateEl.textContent = 'Loading...'; + } + if (expiresDateEl && (!expiresDateEl.textContent || expiresDateEl.textContent === 'N/A')) { + expiresDateEl.textContent = 'Loading...'; + } + + // Check if data is already cached in localStorage + const cachedStatefulData = localStorage.getItem('huntarr-stateful-data'); + if (cachedStatefulData) { + try { + const parsedData = JSON.parse(cachedStatefulData); + const cacheAge = Date.now() - parsedData.timestamp; + + // Use cache if it's less than 5 minutes old + if (cacheAge < 300000) { + console.log('[SettingsForms] Using cached stateful data'); + + if (createdDateEl && parsedData.created_at_ts) { + const createdDate = new Date(parsedData.created_at_ts * 1000); + createdDateEl.textContent = this.formatDate(createdDate); + } + + if (expiresDateEl && parsedData.expires_at_ts) { + const expiresDate = new Date(parsedData.expires_at_ts * 1000); + expiresDateEl.textContent = this.formatDate(expiresDate); + } + + // Still fetch fresh data in the background, but don't update UI + fetchStatefulInfoSilently(); + return; + } + } catch (e) { + console.warn('[SettingsForms] Error parsing cached stateful data:', e); + } + } + + fetch('/api/stateful/info', { + cache: 'no-cache', + headers: { + 'Cache-Control': 'no-cache, no-store, must-revalidate', + 'Pragma': 'no-cache', + 'Expires': '0' + } + }) + .then(response => { + if (!response.ok) { + throw new Error(`HTTP error! Status: ${response.status}`); + } + return response.json(); + }) + .then(data => { + // Cache the response with a timestamp for future use + localStorage.setItem('huntarr-stateful-data', JSON.stringify({ + ...data, + timestamp: Date.now() + })); + + if (createdDateEl) { + if (data.created_at_ts) { + const createdDate = new Date(data.created_at_ts * 1000); + createdDateEl.textContent = this.formatDate(createdDate); + } else { + createdDateEl.textContent = 'Not yet created'; + } + } + + if (expiresDateEl) { + if (data.expires_at_ts) { + const expiresDate = new Date(data.expires_at_ts * 1000); + expiresDateEl.textContent = this.formatDate(expiresDate); + } else { + expiresDateEl.textContent = 'Not set'; + } + } + + // Store data for other components to use + if (window.huntarrUI) { + window.huntarrUI._cachedStatefulData = data; + } + }) + .catch(error => { + console.error('Error loading stateful info:', error); + + // Try using cached data as fallback + if (cachedStatefulData) { + try { + const parsedData = JSON.parse(cachedStatefulData); + + if (createdDateEl && parsedData.created_at_ts) { + const createdDate = new Date(parsedData.created_at_ts * 1000); + createdDateEl.textContent = this.formatDate(createdDate) + ' (cached)'; + } else if (createdDateEl) { + createdDateEl.textContent = 'Not available'; + } + + if (expiresDateEl && parsedData.expires_at_ts) { + const expiresDate = new Date(parsedData.expires_at_ts * 1000); + expiresDateEl.textContent = this.formatDate(expiresDate) + ' (cached)'; + } else if (expiresDateEl) { + expiresDateEl.textContent = 'Not available'; + } + } catch (e) { + if (createdDateEl) createdDateEl.textContent = 'Not available'; + if (expiresDateEl) expiresDateEl.textContent = 'Not available'; + } + } else { + if (createdDateEl) createdDateEl.textContent = 'Not available'; + if (expiresDateEl) expiresDateEl.textContent = 'Not available'; + } + }); + + // Helper function to fetch data silently without updating UI + function fetchStatefulInfoSilently() { + fetch('/api/stateful/info', { + cache: 'no-cache', + headers: { + 'Cache-Control': 'no-cache, no-store, must-revalidate', + 'Pragma': 'no-cache', + 'Expires': '0' + } + }) + .then(response => response.ok ? response.json() : null) + .then(data => { + if (data && data.success) { + localStorage.setItem('huntarr-stateful-data', JSON.stringify({ + ...data, + timestamp: Date.now() + })); + + if (window.huntarrUI) { + window.huntarrUI._cachedStatefulData = data; + } + } + }) + .catch(error => console.warn('Silent stateful info fetch failed:', error)); + } + }, + + // Update duration display - e.g., convert seconds to hours + updateDurationDisplay: function() { + // Function to update a specific sleep duration display + const updateSleepDisplay = function(inputId, spanId) { + const input = document.getElementById(inputId); + const span = document.getElementById(spanId); + if (!input || !span) return; + + const seconds = parseInt(input.value); + if (isNaN(seconds)) return; + + const hours = (seconds / 3600).toFixed(1); + if (hours < 1) { + const minutes = Math.round(seconds / 60); + span.textContent = `${minutes} minutes`; + } else { + span.textContent = `${hours} hours`; + } + }; + + // Update for each app + updateSleepDisplay('sleep_duration', 'sleep_duration_hours'); + updateSleepDisplay('radarr_sleep_duration', 'radarr_sleep_duration_hours'); + updateSleepDisplay('lidarr_sleep_duration', 'lidarr_sleep_duration_hours'); + updateSleepDisplay('readarr_sleep_duration', 'readarr_sleep_duration_hours'); + updateSleepDisplay('whisparr_sleep_duration', 'whisparr_sleep_duration_hours'); // Added Whisparr + }, + + // Setup instance management - test connection buttons and add/remove instance buttons + setupInstanceManagement: function(container, appType, initialCount) { + console.log(`Setting up instance management for ${appType} with ${initialCount} instances`); + + // Make sure container has the app type set + const form = container.closest('.settings-form'); + if (form && !form.hasAttribute('data-app-type')) { + form.setAttribute('data-app-type', appType); + } + + // Add listeners for test connection buttons + const testButtons = container.querySelectorAll('.test-connection-btn'); + testButtons.forEach(button => { + button.addEventListener('click', (e) => { + // Prevent any default form submission + e.preventDefault(); + + console.log('Test connection button clicked'); + + // Get the instance panel containing this button - look for both old and new class names + const instancePanel = button.closest('.instance-item') || button.closest('.instance-panel'); + if (!instancePanel) { + console.error('Could not find instance panel for test button', button); + alert('Error: Could not find instance panel'); + return; + } + + // Get the URL and API key inputs directly within this instance panel + const urlInput = instancePanel.querySelector('input[name="api_url"]'); + const keyInput = instancePanel.querySelector('input[name="api_key"]'); + + console.log('Found inputs:', urlInput, keyInput); + + if (!urlInput || !keyInput) { + console.error('Could not find URL or API key inputs in panel', instancePanel); + alert('Error: Could not find URL or API key inputs'); + return; + } + + const url = urlInput.value.trim(); + const apiKey = keyInput.value.trim(); + + console.log(`Testing connection for ${appType} - URL: ${url}, API Key: ${apiKey.substring(0, 5)}...`); + + if (!url) { + alert('Please enter a valid URL'); + urlInput.focus(); + return; + } + + if (!apiKey) { + alert('Please enter a valid API key'); + keyInput.focus(); + return; + } + + // Show testing status + const originalButtonHTML = button.innerHTML; + button.innerHTML = ' Testing...'; + button.disabled = true; + + // Make the API request + fetch(`/api/${appType}/test-connection`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + api_url: url, + api_key: apiKey + }) + }) + .then(response => { + if (!response.ok) { + throw new Error(`HTTP error ${response.status}: ${response.statusText}`); + } + return response.json(); + }) + .then(data => { + console.log(`Test connection response:`, data); + + // Reset button + button.disabled = false; + + if (data.success) { + // Success + button.innerHTML = ' Connected!'; + button.classList.add('test-success'); + + let successMessage = `Successfully connected to ${appType.charAt(0).toUpperCase() + appType.slice(1)}`; + if (data.version) { + successMessage += ` (version ${data.version})`; + } + + // Alert the user of success + alert(successMessage); + + // Reset button after delay + setTimeout(() => { + button.innerHTML = originalButtonHTML; + button.classList.remove('test-success'); + }, 3000); + } else { + // Failure + button.innerHTML = ' Failed'; + button.classList.add('test-failed'); + + alert(`Connection failed: ${data.message || 'Unknown error'}`); + + setTimeout(() => { + button.innerHTML = originalButtonHTML; + button.classList.remove('test-failed'); + }, 3000); + } + }) + .catch(error => { + console.error(`Test connection error:`, error); + + button.disabled = false; + button.innerHTML = ' Error'; + button.classList.add('test-failed'); + + alert(`Connection test failed: ${error.message}`); + + setTimeout(() => { + button.innerHTML = originalButtonHTML; + button.classList.remove('test-failed'); + }, 3000); + }); + }); + }); + + // Add a button to add new instances (limit to 9 total) + const addBtn = container.querySelector(`.add-${appType}-instance-btn`); + if (addBtn) { + // Function to update the button text with current instance count + const updateAddButtonText = () => { + const instancesContainer = container.querySelector('.instances-container'); + if (!instancesContainer) return; + const currentCount = instancesContainer.querySelectorAll('.instance-item').length; + addBtn.innerHTML = ` Add ${appType.charAt(0).toUpperCase() + appType.slice(1)} Instance (${currentCount}/9)`; + + // Disable button if we've reached the maximum + if (currentCount >= 9) { + addBtn.disabled = true; + addBtn.title = "Maximum number of instances reached"; + } else { + addBtn.disabled = false; + addBtn.title = ""; + } + }; + + // Initialize button text + updateAddButtonText(); + + addBtn.addEventListener('click', function() { + const instancesContainer = container.querySelector('.instances-container'); + if (!instancesContainer) return; + + // Count current instances + const currentCount = instancesContainer.querySelectorAll('.instance-item').length; + + // Don't add more if we have 9 already + if (currentCount >= 9) { + alert("Maximum of 9 instances allowed"); + return; + } + + // Create new instance div + const newInstanceDiv = document.createElement('div'); + newInstanceDiv.className = 'instance-item'; // Use instance-item + newInstanceDiv.dataset.instanceId = currentCount; + + // Set content for the new instance using the updated structure + newInstanceDiv.innerHTML = ` +
+

Instance ${currentCount + 1}: Instance ${currentCount + 1}

+
+ + +
+
+
+
+ + +

Friendly name for this ${appType} instance

+
+
+ + +

Base URL for ${appType} (e.g., http://localhost:8989)

+
+
+ + +

API key for ${appType}

+
+
+ + +
+
+ `; + + // Add the new instance to the container + instancesContainer.appendChild(newInstanceDiv); + + // Update the button text with new count + updateAddButtonText(); + + // Add event listener for the remove button + const removeBtn = newInstanceDiv.querySelector('.remove-instance-btn'); + if (removeBtn) { + removeBtn.addEventListener('click', function() { + instancesContainer.removeChild(newInstanceDiv); + + // Update the add button text after removing + updateAddButtonText(); + + // Trigger change event to update save button state + const changeEvent = new Event('change'); + container.dispatchEvent(changeEvent); + }); + } + + // Add event listener for test connection button + const testBtn = newInstanceDiv.querySelector('.test-connection-btn'); + if (testBtn) { + testBtn.addEventListener('click', function() { + // Get the URL and API key inputs from the parent instance item + const instanceContainer = testBtn.closest('.instance-item') || testBtn.closest('.instance-panel'); + if (!instanceContainer) { + alert('Error: Could not find instance container'); + return; + } + + const urlInput = instanceContainer.querySelector('input[name="api_url"]'); + const keyInput = instanceContainer.querySelector('input[name="api_key"]'); + + if (!urlInput || !keyInput) { + alert('Error: Could not find URL or API key inputs'); + return; + } + + const url = urlInput.value.trim(); + const apiKey = keyInput.value.trim(); + + if (!url) { + alert('Please enter a valid URL'); + urlInput.focus(); + return; + } + + if (!apiKey) { + alert('Please enter a valid API key'); + keyInput.focus(); + return; + } + + // Call the test connection function + SettingsForms.testConnection(appType, url, apiKey, testBtn); + }); + } + + // Trigger change event to update save button state + const changeEvent = new Event('change'); + container.dispatchEvent(changeEvent); + }); + } + + // Set up remove buttons for existing instances + const removeButtons = container.querySelectorAll('.remove-instance-btn'); + removeButtons.forEach(btn => { + btn.addEventListener('click', function() { + const instancePanel = btn.closest('.instance-item') || btn.closest('.instance-panel'); + if (instancePanel && instancePanel.parentNode) { + instancePanel.parentNode.removeChild(instancePanel); + + // Update the button text with new count if updateAddButtonText exists + if (typeof updateAddButtonText === 'function') { + updateAddButtonText(); + } + + // Trigger change event to update save button state + const changeEvent = new Event('change'); + container.dispatchEvent(changeEvent); + } + }); + }); + }, + + // Test connection to an *arr API + testConnection: function(app, url, apiKey, buttonElement) { + // Temporarily suppress change detection to prevent the unsaved changes dialog + if (window.huntarrUI && window.huntarrUI.suppressUnsavedChangesCheck) { + window.huntarrUI.suppressUnsavedChangesCheck = true; + } + + // Also set a global flag used by the apps module + window._suppressUnsavedChangesDialog = true; + + // Find or create a status message element next to the button + let statusElement = buttonElement.closest('.instance-actions').querySelector('.connection-message'); + if (!statusElement) { + statusElement = document.createElement('span'); + statusElement.className = 'connection-message'; + statusElement.style.marginLeft = '10px'; + statusElement.style.fontWeight = 'bold'; + buttonElement.closest('.instance-actions').insertBefore(statusElement, buttonElement); + } + + // Show testing status + const originalButtonHTML = buttonElement.innerHTML; + buttonElement.innerHTML = ' Testing...'; + buttonElement.disabled = true; + statusElement.textContent = 'Testing connection...'; + statusElement.style.color = '#888'; + + console.log(`Testing connection for ${app} - URL: ${url}, API Key: ${apiKey.substring(0, 5)}...`); + + if (!url) { + statusElement.textContent = 'Please enter a valid URL'; + statusElement.style.color = 'red'; + buttonElement.innerHTML = originalButtonHTML; + buttonElement.disabled = false; + // Reset suppression flags + this._resetSuppressionFlags(); + return; + } + + if (!apiKey) { + statusElement.textContent = 'Please enter a valid API key'; + statusElement.style.color = 'red'; + buttonElement.innerHTML = originalButtonHTML; + buttonElement.disabled = false; + // Reset suppression flags + this._resetSuppressionFlags(); + return; + } + + // Make the API request + fetch(`/api/${app}/test-connection`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + api_url: url, + api_key: apiKey + }) + }) + .then(response => { + if (!response.ok) { + throw new Error(`HTTP error ${response.status}: ${response.statusText}`); + } + return response.json(); + }) + .then(data => { + console.log(`Test connection response:`, data); + + // Reset button + buttonElement.disabled = false; + + if (data.success) { + // Success + buttonElement.innerHTML = ' Test Connection'; + + let successMessage = `Connected successfully`; + if (data.version) { + successMessage += ` (v${data.version})`; + } + + // Show success message + statusElement.textContent = successMessage; + statusElement.style.color = 'green'; + } else { + // Failure + buttonElement.innerHTML = ' Test Connection'; + + // Show error message + const errorMsg = data.message || 'Connection failed'; + statusElement.textContent = errorMsg; + statusElement.style.color = 'red'; + } + + // Reset suppression flags after a short delay to handle any potential redirects + setTimeout(() => { + this._resetSuppressionFlags(); + }, 500); + }) + .catch(error => { + console.error(`Connection test error:`, error); + + // Reset button + buttonElement.innerHTML = originalButtonHTML; + buttonElement.disabled = false; + + // Show error message + statusElement.textContent = `Error: ${error.message}`; + statusElement.style.color = 'red'; + + // Reset suppression flags + this._resetSuppressionFlags(); + }); + }, + + // Helper method to reset unsaved changes suppression flags + _resetSuppressionFlags: function() { + // Reset all suppression flags + if (window.huntarrUI) { + window.huntarrUI.suppressUnsavedChangesCheck = false; + } + window._suppressUnsavedChangesDialog = false; + }, +}; diff --git a/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js b/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js new file mode 100644 index 0000000..21f9907 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js @@ -0,0 +1,80 @@ +/** + * Stats Reset Handler + * Provides a unified way to handle stats reset operations + */ + +document.addEventListener('DOMContentLoaded', function() { + // Find the reset button on the home page + const resetButton = document.getElementById('reset-stats'); + + if (resetButton) { + console.log('Stats reset button found, attaching handler'); + + resetButton.addEventListener('click', function(e) { + e.preventDefault(); + + // Prevent double-clicks + if (this.disabled) return; + + // First update the UI immediately for responsive feedback + resetStatsUI(); + + // Then make the API call to persist the changes + resetStatsAPI() + .then(response => { + console.log('Stats reset response:', response); + if (!response.success) { + console.warn('Server reported an error with stats reset:', response.error); + } + }) + .catch(error => { + console.error('Error during stats reset:', error); + }); + }); + } +}); + +/** + * Reset the stats UI immediately for responsive feedback + */ +function resetStatsUI() { + // Find all stat counters and reset them to 0 + const statCounters = document.querySelectorAll('.stat-number'); + statCounters.forEach(counter => { + if (counter && counter.textContent) { + counter.textContent = '0'; + } + }); + + // Show success notification if available + if (window.huntarrUI && typeof window.huntarrUI.showNotification === 'function') { + window.huntarrUI.showNotification('Statistics reset successfully', 'success'); + } +} + +/** + * Make the API call to reset stats on the server + * @param {string|null} appType - Optional specific app to reset + * @returns {Promise} - Promise resolving to the API response + */ +function resetStatsAPI(appType = null) { + const requestBody = appType ? { app_type: appType } : {}; + + // Use the public endpoint that doesn't require authentication + return fetch('/api/stats/reset_public', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(requestBody) + }) + .then(response => { + if (!response.ok) { + throw new Error('Server responded with status: ' + response.status); + } + return response.json(); + }); +} + +// Make resetStatsAPI available globally so other scripts can use it +window.resetStatsAPI = resetStatsAPI; diff --git a/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js b/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js new file mode 100644 index 0000000..92befe1 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js @@ -0,0 +1,108 @@ +(function() { + // Store logo URL consistently across the app - use local path instead of GitHub + const LOGO_URL = '/static/logo/256.png'; + + // Create and preload image with local path + const preloadImg = new Image(); + preloadImg.src = LOGO_URL; + + // Always enforce dark theme + document.documentElement.classList.add('dark-theme'); + localStorage.setItem('huntarr-dark-mode', 'true'); + + // Add inline style to immediately set background color + // This prevents flash before the CSS files load + const style = document.createElement('style'); + style.textContent = ` + body, html { + background-color: #1a1d24 !important; + color: #f8f9fa !important; + } + .sidebar { + background-color: #121212 !important; + } + .top-bar { + background-color: #252a34 !important; + } + .login-container { + background-color: #252a34 !important; + } + .login-header { + background-color: #121212 !important; + } + `; + document.head.appendChild(style); + + // Store the logo URL in localStorage for persistence across page loads + localStorage.setItem('huntarr-logo-url', LOGO_URL); + + // Create a global function to apply the logo to all logo elements + window.applyLogoToAllElements = function() { + const logoUrl = localStorage.getItem('huntarr-logo-url') || LOGO_URL; + const logoElements = document.querySelectorAll('.logo, .login-logo'); + + logoElements.forEach(img => { + if (!img.src || img.src !== logoUrl) { + img.src = logoUrl; + } + + // Handle image load event properly + if (img.complete) { + img.classList.add('loaded'); + } else { + img.onload = function() { + this.classList.add('loaded'); + }; + img.onerror = function() { + // Fallback if local path fails + console.warn('Logo failed to load, trying alternate source'); + if (this.src !== '/logo/256.png') { + this.src = '/logo/256.png'; + } + }; + } + }); + + // Check if the logo source needs updating + document.querySelectorAll('img[alt*="Logo"]').forEach(img => { + // Check if the src is not the correct static path + const currentSrc = new URL(img.src, window.location.origin).pathname; + if (currentSrc !== LOGO_URL) { + // Check against the old incorrect path as well, just in case + if (currentSrc === '/logo/64.png') { + img.src = LOGO_URL; + } + // You might want to add more specific checks or broader updates here + // For now, we only correct the specific incorrect path found + } + }); + }; + + // Apply logo as soon as DOM is interactive + if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', window.applyLogoToAllElements); + } else { + // DOMContentLoaded already fired + window.applyLogoToAllElements(); + } + + // Set up MutationObserver to catch any dynamically added logo elements + document.addEventListener('DOMContentLoaded', function() { + const observer = new MutationObserver(function(mutations) { + let shouldApplyLogos = false; + mutations.forEach(function(mutation) { + if (mutation.addedNodes.length) { + shouldApplyLogos = true; + } + }); + if (shouldApplyLogos) { + window.applyLogoToAllElements(); + } + }); + + observer.observe(document.body, { childList: true, subtree: true }); + }); + + // Ensure logo is loaded when navigating with AJAX + window.addEventListener('load', window.applyLogoToAllElements); +})(); diff --git a/Huntarr.io-6.3.6/frontend/static/js/user.js b/Huntarr.io-6.3.6/frontend/static/js/user.js new file mode 100644 index 0000000..8670b97 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/js/user.js @@ -0,0 +1,53 @@ +/** + * Huntarr - User Settings Page + * Handles user profile management functionality + */ + +document.addEventListener('DOMContentLoaded', function() { + // This file serves as a placeholder for any additional user management + // functionality that might be needed in the future + + console.log('User settings page loaded'); + + // Most of the user functionality is implemented inline in the HTML page + // The following functions could be moved here in the future: + + // Function to load user information + function loadUserInfo() { + fetch('/api/user/info') + .then(response => response.json()) + .then(data => { + if (data.username) { + document.getElementById('username').textContent = data.username; + document.getElementById('currentUsername').value = data.username; + } + }) + .catch(error => console.error('Error loading user info:', error)); + } + + // Function to check 2FA status + function check2FAStatus() { + fetch('/api/user/2fa-status') + .then(response => response.json()) + .then(data => { + const enable2FACheckbox = document.getElementById('enable2FA'); + const setup2FAContainer = document.getElementById('setup2FAContainer'); + const remove2FAContainer = document.getElementById('remove2FAContainer'); + + if (data.enabled) { + enable2FACheckbox.checked = true; + setup2FAContainer.style.display = 'none'; + remove2FAContainer.style.display = 'block'; + } else { + enable2FACheckbox.checked = false; + setup2FAContainer.style.display = 'none'; + remove2FAContainer.style.display = 'none'; + } + }) + .catch(error => console.error('Error checking 2FA status:', error)); + } + + // Call these functions if needed + // loadUserInfo(); + // check2FAStatus(); +}); diff --git a/Huntarr.io-6.3.6/frontend/static/js/utils.js b/Huntarr.io-6.3.6/frontend/static/js/utils.js new file mode 100644 index 0000000..040cbab --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/js/utils.js @@ -0,0 +1,71 @@ +/** + * Huntarr - Utility Functions + * Shared functions for use across the application + */ + +const HuntarrUtils = { + /** + * Fetch with timeout using the global settings + * @param {string} url - The URL to fetch + * @param {Object} options - Fetch options + * @returns {Promise} - Fetch promise with timeout handling + */ + fetchWithTimeout: function(url, options = {}) { + // Get the API timeout from global settings, default to 120 seconds if not set + let apiTimeout = 120000; // Default 120 seconds in milliseconds + + // Try to get timeout from huntarrUI if available + if (window.huntarrUI && window.huntarrUI.originalSettings && + window.huntarrUI.originalSettings.general && + window.huntarrUI.originalSettings.general.api_timeout) { + apiTimeout = window.huntarrUI.originalSettings.general.api_timeout * 1000; + } + + // Create abort controller for timeout + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), apiTimeout); + + // Merge options with signal from AbortController + const fetchOptions = { + ...options, + signal: controller.signal + }; + + return fetch(url, fetchOptions) + .then(response => { + clearTimeout(timeoutId); + return response; + }) + .catch(error => { + clearTimeout(timeoutId); + // Customize the error if it was a timeout + if (error.name === 'AbortError') { + throw new Error(`Request timeout after ${apiTimeout / 1000} seconds`); + } + throw error; + }); + }, + + /** + * Get the global API timeout value in seconds + * @returns {number} - API timeout in seconds + */ + getApiTimeout: function() { + // Default value + let timeout = 120; + + // Try to get from global settings + if (window.huntarrUI && window.huntarrUI.originalSettings && + window.huntarrUI.originalSettings.general && + window.huntarrUI.originalSettings.general.api_timeout) { + timeout = window.huntarrUI.originalSettings.general.api_timeout; + } + + return timeout; + } +}; + +// If running in Node.js environment +if (typeof module !== 'undefined' && module.exports) { + module.exports = HuntarrUtils; +} diff --git a/Huntarr.io-6.3.6/frontend/static/logo/128.png b/Huntarr.io-6.3.6/frontend/static/logo/128.png new file mode 100644 index 0000000..aeb4179 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/128.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/16.png b/Huntarr.io-6.3.6/frontend/static/logo/16.png new file mode 100644 index 0000000..987f868 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/16.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/256.png b/Huntarr.io-6.3.6/frontend/static/logo/256.png new file mode 100644 index 0000000..b1d4977 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/256.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/32.png b/Huntarr.io-6.3.6/frontend/static/logo/32.png new file mode 100644 index 0000000..c3f4e1b Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/32.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/40.png b/Huntarr.io-6.3.6/frontend/static/logo/40.png new file mode 100644 index 0000000..30c260c Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/40.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/400.png b/Huntarr.io-6.3.6/frontend/static/logo/400.png new file mode 100644 index 0000000..ce1d294 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/400.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/48.png b/Huntarr.io-6.3.6/frontend/static/logo/48.png new file mode 100644 index 0000000..1aad9a7 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/48.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/512.png b/Huntarr.io-6.3.6/frontend/static/logo/512.png new file mode 100644 index 0000000..3740908 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/512.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/64.png b/Huntarr.io-6.3.6/frontend/static/logo/64.png new file mode 100644 index 0000000..8544ec0 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/64.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/800.png b/Huntarr.io-6.3.6/frontend/static/logo/800.png new file mode 100644 index 0000000..69e4fd4 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/800.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/864.png b/Huntarr.io-6.3.6/frontend/static/logo/864.png new file mode 100644 index 0000000..7914fd8 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/864.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg b/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg new file mode 100644 index 0000000..e79f9a6 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png b/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png new file mode 100644 index 0000000..4bf2f32 Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png differ diff --git a/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico b/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico new file mode 100644 index 0000000..45c70ed Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico differ diff --git a/Huntarr.io-6.3.6/frontend/templates/base.html b/Huntarr.io-6.3.6/frontend/templates/base.html new file mode 100644 index 0000000..c971f1a --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/base.html @@ -0,0 +1,27 @@ + + + + + + My App + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html b/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html new file mode 100644 index 0000000..3794a1b --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html @@ -0,0 +1,233 @@ +
+
+
+ +
+ +
+ + +
+
+ +
+ +
+ + +
+
+
+
+
+
+
+
+
+
+
+
+ + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html b/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html new file mode 100644 index 0000000..d2e7643 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html @@ -0,0 +1,426 @@ +
+
+
+ +
+

Cleanuperr

+
+ +
+
+
+ +
+

Cleanuperr by Flaminel

+
+
+ +
+ + View on GitHub + +
+ + Loading... +
+
+ +
+

Cleanuperr is a tool for automating the cleanup of unwanted or blocked files in Sonarr, Radarr, and supported download clients like qBittorrent. It removes incomplete or blocked downloads, updates queues, and enforces blacklists or whitelists to manage file selection. After removing blocked content, Cleanuperr can also trigger a search to replace the deleted shows/movies.

+ +

Key Features:

+
    +
  • Strike system to mark stalled or downloads stuck in metadata downloading
  • +
  • Remove and block downloads that reached a maximum number of strikes
  • +
  • Remove and block downloads that have a low download speed or high estimated completion time
  • +
  • Remove downloads blocked by qBittorrent or by Cleanuperr's content blocker
  • +
  • Trigger a search for downloads removed from the *arrs
  • +
  • Clean up downloads that have been seeding for a certain amount of time
  • +
  • Notify on strike or download removal
  • +
  • Ignore certain torrent hashes, categories, tags or trackers from being processed
  • +
+ +
+

Cleanuperr was created primarily to address malicious files, such as *.lnk or *.zipx, that were getting stuck in Sonarr/Radarr and required manual intervention. It supports both qBittorrent's built-in exclusion features and its own blocklist-based system.

+
+ +
+
+ Flaminel +
+

About the Author

+

Cleanuperr is developed by Flaminel, a passionate developer focused on creating tools that enhance the media server experience.

+
+
+
+ +
+

Huntarr is proud to feature Cleanuperr as part of our commitment to helping other projects grow. We believe in collaboration across the media server community to create better tools for everyone.

+
+ + +
+
+
+
+ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/footer.html b/Huntarr.io-6.3.6/frontend/templates/components/footer.html new file mode 100644 index 0000000..f32c515 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/footer.html @@ -0,0 +1,68 @@ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/head.html b/Huntarr.io-6.3.6/frontend/templates/components/head.html new file mode 100644 index 0000000..9caa105 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/head.html @@ -0,0 +1,19 @@ + + + + + + + + + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/history_section.html b/Huntarr.io-6.3.6/frontend/templates/components/history_section.html new file mode 100644 index 0000000..f963ab0 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/history_section.html @@ -0,0 +1,703 @@ +
+
+ +
+ +
+ + +
+ + +
+ + +
+ + +
+
+ +
+
+ + + + + + + + + + + + + + +
Date and TimeProcessed InformationOperationID NumberName of InstanceHow Long Ago
+
+ + +
+ +

No history found. Items will appear here when media is processed.

+
+ + +
+ +

Loading history...

+
+
+ + +
+ + Page 1 of 1 + +
+
+ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/home_section.html b/Huntarr.io-6.3.6/frontend/templates/components/home_section.html new file mode 100644 index 0000000..ade5801 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/home_section.html @@ -0,0 +1,959 @@ +
+
+ + + + + + + +
+
+

Live Hunts Executed

+ +
+
+
+ +
+
+ Loading... + +
+
+
+ +
+

Sonarr

+
+
+
+
0
+
Searches Triggered
+
+
+
0
+
Upgrades Triggered
+
+
+
+ + +
+
+ Loading... + +
+
+
+ +
+

Radarr

+
+
+
+ 0 + Searches Triggered +
+
+ 0 + Upgrades Triggered +
+
+
+ + +
+
+ Loading... + +
+
+
+ +
+

Lidarr

+
+
+
+ 0 + Searches Triggered +
+
+ 0 + Upgrades Triggered +
+
+
+ + +
+
+ Loading... + +
+
+
+ +
+

Readarr

+
+
+
+ 0 + Searches Triggered +
+
+ 0 + Upgrades Triggered +
+
+
+ + +
+
+ Loading... + +
+
+
+ +
+

Whisparr V2

+
+
+
+ 0 + Searches Triggered +
+
+ 0 + Upgrades Triggered +
+
+
+ + +
+
+ Loading... + +
+
+
+ +
+

Whisparr V3

+
+
+
+ 0 + Searches Triggered +
+
+ 0 + Upgrades Triggered +
+
+
+
+
+
+ +
+
+ + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html b/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html new file mode 100644 index 0000000..4602768 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html @@ -0,0 +1,426 @@ +
+
+ +
+ +
+
+
+ Status: Disconnected +
+
+ + +
+
+
+ +
+
+ + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/scripts.html b/Huntarr.io-6.3.6/frontend/templates/components/scripts.html new file mode 100644 index 0000000..447707d --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/scripts.html @@ -0,0 +1,3 @@ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html b/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html new file mode 100644 index 0000000..f86f96e --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html @@ -0,0 +1,69 @@ +
+
+

General Settings

+ +
+ +
+
+ +
+
+
+
+ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html b/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html new file mode 100644 index 0000000..8ad1e29 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html @@ -0,0 +1,245 @@ + + + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/topbar.html b/Huntarr.io-6.3.6/frontend/templates/components/topbar.html new file mode 100644 index 0000000..169fef2 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/topbar.html @@ -0,0 +1,267 @@ +
+
+
+
+
+
+
+ + Version 6.2.1 +
+
+
+ + Latest: 6.2.1 +
+
+
+ + Beta: Info +
+
+
+ + 764 +
+
+
+ Thanks 4 Using Huntarr - Admin9705 +
+
+
+
+ +
+
+ + + + diff --git a/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html b/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html new file mode 100644 index 0000000..78110a8 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html @@ -0,0 +1,137 @@ +
+
+

Change Username

+
+ +
Loading...
+
+
+ + +
+
+ +
+ +
+ +
+

Change Password

+
+ +
+ + +
+
+
+ +
+ + +
+
+
+ +
+ + +
+
+
+ +
+ +
+ +
+

Two-Factor Authentication

+
+ +
Loading...
+
+ + + + + + +
+
+ + diff --git a/Huntarr.io-6.3.6/frontend/templates/index.html b/Huntarr.io-6.3.6/frontend/templates/index.html new file mode 100644 index 0000000..685f277 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/index.html @@ -0,0 +1,50 @@ + + + + {% include 'components/head.html' %} + Huntarr - Home + + +
+ {% include 'components/sidebar.html' %} + +
+ {% include 'components/topbar.html' %} + + + {% include 'components/home_section.html' %} + + + {% include 'components/logs_section.html' %} + + + {% include 'components/history_section.html' %} + + + {% include 'components/apps_section.html' %} + + + {% include 'components/cleanuperr_section.html' %} + + + {% include 'components/settings_section.html' %} + +
+
+ + {% include 'components/footer.html' %} + + {% include 'components/scripts.html' %} + + + + + + + + + + + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/templates/login.html b/Huntarr.io-6.3.6/frontend/templates/login.html new file mode 100644 index 0000000..576b965 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/login.html @@ -0,0 +1,400 @@ + + + + + + Huntarr Login + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/templates/setup.html b/Huntarr.io-6.3.6/frontend/templates/setup.html new file mode 100644 index 0000000..7c765ca --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/setup.html @@ -0,0 +1,768 @@ + + + + + + Setup - Huntarr + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/frontend/templates/user.html b/Huntarr.io-6.3.6/frontend/templates/user.html new file mode 100644 index 0000000..34e0736 --- /dev/null +++ b/Huntarr.io-6.3.6/frontend/templates/user.html @@ -0,0 +1,439 @@ + + + + User Settings - Huntarr + {% include 'components/head.html' %} + + + +
+ {% include 'components/sidebar.html' %} + +
+ {% include 'components/topbar.html' %} + +
+
+

Change Username

+
+ + Loading... +
+
+ + +
+
+ + +
+
+ +
+ +
+ +
+

Change Password

+
+ + +
+
+ + +
+
+ + +
+
+ +
+ +
+ +
+

Two-Factor Authentication

+
+ + +
+ + + + + + +
+
+
+
+ + {% include 'components/scripts.html' %} + + + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/main.py b/Huntarr.io-6.3.6/main.py new file mode 100644 index 0000000..654484a --- /dev/null +++ b/Huntarr.io-6.3.6/main.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +""" +Main entry point for Huntarr +Starts both the web server and the background processing tasks. +""" + +import os +import threading +import sys +import signal +import logging # Use standard logging for initial setup + +# Ensure the 'src' directory is in the Python path +# This allows importing modules from 'src.primary' etc. +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))) + +# --- Early Logging Setup (Before importing app components) --- +# Basic logging to capture early errors during import or setup +log_level = logging.DEBUG if os.environ.get('DEBUG', 'false').lower() == 'true' else logging.INFO +logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') +root_logger = logging.getLogger("HuntarrRoot") # Specific logger for this entry point +root_logger.info("--- Huntarr Main Process Starting ---") +root_logger.info(f"Python sys.path: {sys.path}") + +# Check for Windows service commands +if sys.platform == 'win32' and len(sys.argv) > 1: + if sys.argv[1] == '--install-service': + try: + from src.primary.windows_service import install_service + success = install_service() + sys.exit(0 if success else 1) + except ImportError: + root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.") + sys.exit(1) + except Exception as e: + root_logger.exception(f"Error installing Windows service: {e}") + sys.exit(1) + elif sys.argv[1] == '--remove-service': + try: + from src.primary.windows_service import remove_service + success = remove_service() + sys.exit(0 if success else 1) + except ImportError: + root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.") + sys.exit(1) + except Exception as e: + root_logger.exception(f"Error removing Windows service: {e}") + sys.exit(1) + elif sys.argv[1] in ['--start', '--stop', '--restart', '--debug', '--update']: + try: + import win32serviceutil + service_name = "Huntarr" + if sys.argv[1] == '--start': + win32serviceutil.StartService(service_name) + print(f"Started {service_name} service") + elif sys.argv[1] == '--stop': + win32serviceutil.StopService(service_name) + print(f"Stopped {service_name} service") + elif sys.argv[1] == '--restart': + win32serviceutil.RestartService(service_name) + print(f"Restarted {service_name} service") + elif sys.argv[1] == '--debug': + # Run the service in debug mode directly + from src.primary.windows_service import HuntarrService + win32serviceutil.HandleCommandLine(HuntarrService) + elif sys.argv[1] == '--update': + # Update the service + win32serviceutil.StopService(service_name) + from src.primary.windows_service import install_service + install_service() + win32serviceutil.StartService(service_name) + print(f"Updated {service_name} service") + sys.exit(0) + except ImportError: + root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.") + sys.exit(1) + except Exception as e: + root_logger.exception(f"Error managing Windows service: {e}") + sys.exit(1) + +try: + # Import the Flask app instance + from primary.web_server import app + # Import the background task starter function and shutdown helpers from the renamed file + from primary.background import start_huntarr, stop_event, shutdown_threads + # Configure logging first + import logging + sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src")) + from primary.utils.logger import setup_main_logger, get_logger + + # Initialize main logger + huntarr_logger = setup_main_logger() + huntarr_logger.info("Successfully imported application components.") +except ImportError as e: + root_logger.critical(f"Fatal Error: Failed to import application components: {e}", exc_info=True) + root_logger.critical("Please ensure the application structure is correct, dependencies are installed (`pip install -r requirements.txt`), and the script is run from the project root.") + sys.exit(1) +except Exception as e: + root_logger.critical(f"Fatal Error: An unexpected error occurred during initial imports: {e}", exc_info=True) + sys.exit(1) + + +def run_background_tasks(): + """Runs the Huntarr background processing.""" + bg_logger = get_logger("HuntarrBackground") # Use app's logger + try: + bg_logger.info("Starting Huntarr background tasks...") + start_huntarr() # This function contains the main loop and shutdown logic + except Exception as e: + bg_logger.exception(f"Critical error in Huntarr background tasks: {e}") + finally: + bg_logger.info("Huntarr background tasks stopped.") + +def run_web_server(): + """Runs the Flask web server using Waitress in production.""" + web_logger = get_logger("WebServer") # Use app's logger + debug_mode = os.environ.get('DEBUG', 'false').lower() == 'true' + host = os.environ.get('FLASK_HOST', '0.0.0.0') + port = int(os.environ.get('PORT', 9705)) # Use PORT for consistency + + web_logger.info(f"Starting web server on {host}:{port} (Debug: {debug_mode})...") + + if debug_mode: + # Use Flask's development server for debugging (less efficient, auto-reloads) + # Note: use_reloader=True can cause issues with threads starting twice. + web_logger.warning("Running in DEBUG mode with Flask development server.") + try: + app.run(host=host, port=port, debug=True, use_reloader=False) + except Exception as e: + web_logger.exception(f"Flask development server failed: {e}") + # Signal background thread to stop if server fails critically + if not stop_event.is_set(): + stop_event.set() + else: + # Use Waitress for production + try: + from waitress import serve + web_logger.info("Running with Waitress production server.") + # Adjust threads as needed, default is 4 + serve(app, host=host, port=port, threads=8) + except ImportError: + web_logger.error("Waitress not found. Falling back to Flask development server (NOT recommended for production).") + web_logger.error("Install waitress ('pip install waitress') for production use.") + try: + app.run(host=host, port=port, debug=False, use_reloader=False) + except Exception as e: + web_logger.exception(f"Flask development server (fallback) failed: {e}") + # Signal background thread to stop if server fails critically + if not stop_event.is_set(): + stop_event.set() + except Exception as e: + web_logger.exception(f"Waitress server failed: {e}") + # Signal background thread to stop if server fails critically + if not stop_event.is_set(): + stop_event.set() + +def main_shutdown_handler(signum, frame): + """Gracefully shut down the application.""" + huntarr_logger.warning(f"Received signal {signal.Signals(signum).name}. Initiating shutdown...") + if not stop_event.is_set(): + stop_event.set() + # The rest of the cleanup happens after run_web_server() returns or in the finally block. + +if __name__ == '__main__': + # Register signal handlers for graceful shutdown in the main process + signal.signal(signal.SIGINT, main_shutdown_handler) + signal.signal(signal.SIGTERM, main_shutdown_handler) + + background_thread = None + try: + # Start background tasks in a daemon thread + # Daemon threads exit automatically if the main thread exits unexpectedly, + # but we'll try to join() them for a graceful shutdown. + background_thread = threading.Thread(target=run_background_tasks, name="HuntarrBackground", daemon=True) + background_thread.start() + + # Start the web server in the main thread (blocking) + # This will run until the server is stopped (e.g., by Ctrl+C) + run_web_server() + + except KeyboardInterrupt: + huntarr_logger.info("KeyboardInterrupt received in main thread. Shutting down...") + if not stop_event.is_set(): + stop_event.set() + except Exception as e: + huntarr_logger.exception(f"An unexpected error occurred in the main execution block: {e}") + if not stop_event.is_set(): + stop_event.set() # Ensure shutdown is triggered on unexpected errors + finally: + # --- Cleanup --- + huntarr_logger.info("Web server has stopped. Initiating final shutdown sequence...") + + # Ensure the stop event is set (might already be set by signal handler or error) + if not stop_event.is_set(): + huntarr_logger.warning("Stop event was not set before final cleanup. Setting now.") + stop_event.set() + + # Wait for the background thread to finish cleanly + if background_thread and background_thread.is_alive(): + huntarr_logger.info("Waiting for background tasks to complete...") + background_thread.join(timeout=30) # Wait up to 30 seconds + + if background_thread.is_alive(): + huntarr_logger.warning("Background thread did not stop gracefully within the timeout.") + elif background_thread: + huntarr_logger.info("Background thread already stopped.") + else: + huntarr_logger.info("Background thread was not started.") + + # Call the shutdown_threads function from primary.main (if it does more than just join) + # This might be redundant if start_huntarr handles its own cleanup via stop_event + # huntarr_logger.info("Calling shutdown_threads()...") + # shutdown_threads() # Uncomment if primary.main.shutdown_threads() does more cleanup + + huntarr_logger.info("--- Huntarr Main Process Exiting ---") + # Use os._exit(0) for a more forceful exit if necessary, but sys.exit(0) is generally preferred + sys.exit(0) \ No newline at end of file diff --git a/Huntarr.io-6.3.6/requirements.txt b/Huntarr.io-6.3.6/requirements.txt new file mode 100644 index 0000000..419b6de --- /dev/null +++ b/Huntarr.io-6.3.6/requirements.txt @@ -0,0 +1,7 @@ +Flask==3.0.0 +requests==2.31.0 +waitress==2.1.2 +bcrypt==4.1.2 +qrcode[pil]==7.4.2 # Added qrcode with PIL support +pyotp==2.9.0 # Added pyotp +pywin32==306; sys_platform == 'win32' # For Windows service support \ No newline at end of file diff --git a/Huntarr.io-6.3.6/routes.py b/Huntarr.io-6.3.6/routes.py new file mode 100644 index 0000000..fe3f483 --- /dev/null +++ b/Huntarr.io-6.3.6/routes.py @@ -0,0 +1,62 @@ +from flask import Flask, render_template, request, redirect, send_file + +app = Flask(__name__) + +import os +import json + +def get_ui_preference(): + """Determine which UI to use based on config and user preference""" + # Check if ui_settings.json exists + config_file = os.path.join(os.path.dirname(__file__), 'config/ui_settings.json') + + use_new_ui = False + + if os.path.exists(config_file): + try: + with open(config_file, 'r') as f: + settings = json.load(f) + use_new_ui = settings.get('use_new_ui', False) + except Exception as e: + print(f"Error loading UI settings: {e}") + + # Allow URL parameter to override + ui_param = request.args.get('ui', None) + if ui_param == 'new': + use_new_ui = True + elif ui_param == 'classic': + use_new_ui = False + + return use_new_ui + +@app.route('/') +def index(): + """Root route with UI switching capability""" + if get_ui_preference(): + return redirect('/new') + else: + return render_template('index.html') + +@app.route('/user') +def user_page(): + """User settings page with UI switching capability""" + return render_template('user.html') + +@app.route('/user/new') +def user_new_page(): + """User settings page for new UI""" + return render_template('user.html') + +@app.route('/version.txt') +def version_txt(): + """Serve version.txt file directly""" + version_path = os.path.join(os.path.dirname(__file__), 'version.txt') + print(f"Serving version.txt from path: {version_path}") # Debug log + try: + return send_file(version_path, mimetype='text/plain') + except Exception as e: + print(f"Error serving version.txt: {e}") # Log any errors + return str(e), 500 # Return error message and 500 status code + +if __name__ == '__main__': + app.run(debug=True) \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/__init__.py b/Huntarr.io-6.3.6/src/primary/__init__.py new file mode 100644 index 0000000..851281f --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/__init__.py @@ -0,0 +1,6 @@ +""" +Huntarr - Find Missing & Upgrade Media Items +A unified tool for Sonarr, Radarr, Lidarr, and Readarr +""" + +__version__ = "4.0.0" \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/api.py b/Huntarr.io-6.3.6/src/primary/api.py new file mode 100644 index 0000000..50d0e5b --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/api.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python3 +""" +Arr API Helper Functions +Handles all communication with the Arr API +""" + +import requests +import time +from typing import List, Dict, Any, Optional, Union +from primary.utils.logger import logger, debug_log +from primary.config import API_KEY, API_URL, API_TIMEOUT, COMMAND_WAIT_DELAY, COMMAND_WAIT_ATTEMPTS, APP_TYPE +from src.primary.stats_manager import get_stats, reset_stats + +# Create a session for reuse +session = requests.Session() + +def arr_request(endpoint: str, method: str = "GET", data: Dict = None) -> Optional[Union[Dict, List]]: + """ + Make a request to the Arr API. + `endpoint` should be something like 'series', 'command', 'wanted/cutoff', etc. + """ + # Determine the API version based on app type + if APP_TYPE == "sonarr": + api_base = "api/v3" + elif APP_TYPE == "radarr": + api_base = "api/v3" + elif APP_TYPE == "lidarr": + api_base = "api/v1" + elif APP_TYPE == "readarr": + api_base = "api/v1" + else: + # Default to v3 for unknown app types + api_base = "api/v3" + + url = f"{API_URL}/{api_base}/{endpoint}" + headers = { + "X-Api-Key": API_KEY, + "Content-Type": "application/json" + } + + try: + if method.upper() == "GET": + response = session.get(url, headers=headers, timeout=API_TIMEOUT) + elif method.upper() == "POST": + response = session.post(url, headers=headers, json=data, timeout=API_TIMEOUT) + else: + logger.error(f"Unsupported HTTP method: {method}") + return None + + # Check for 401 Unauthorized or other error status codes + if response.status_code == 401: + logger.error(f"API request error: 401 Client Error: Unauthorized for url: {url}") + return None + + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + logger.error(f"API request error: {e}") + return None + +def check_connection(app_type: str = None) -> bool: + """ + Check if we can connect to the Arr API. + Returns True if connection is successful, False otherwise. + + Args: + app_type: Optional app type to check connection for (sonarr, radarr, etc.). + If None, uses the global APP_TYPE. + """ + # Determine which app type to use + current_app_type = app_type or APP_TYPE + + # Get API credentials for the specified app type + from primary import keys_manager + api_url, api_key = keys_manager.get_api_keys(current_app_type) + + # First explicitly check if API URL and Key are configured + if not api_url: + logger.error(f"API URL is not configured for {current_app_type} in settings. Please set it up in the Settings page.") + return False + + if not api_key: + logger.error(f"API Key is not configured for {current_app_type} in settings. Please set it up in the Settings page.") + return False + + # Log what we're attempting to connect to + logger.debug(f"Attempting to connect to {current_app_type.title()} at {api_url}") + + # Try to access the system/status endpoint which should be available on all Arr applications + try: + endpoint = "system/status" + + # Determine the API version based on app type + if current_app_type == "sonarr": + api_base = "api/v3" + elif current_app_type == "radarr": + api_base = "api/v3" + elif current_app_type == "lidarr": + api_base = "api/v1" + elif current_app_type == "readarr": + api_base = "api/v1" + else: + # Default to v3 for unknown app types + api_base = "api/v3" + + url = f"{api_url}/{api_base}/{endpoint}" + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + logger.debug(f"Testing connection with URL: {url}") + response = session.get(url, headers=headers, timeout=API_TIMEOUT) + + if response.status_code == 401: + logger.error(f"Connection test failed: 401 Client Error: Unauthorized - Invalid API key for {current_app_type.title()}") + return False + + response.raise_for_status() + logger.info(f"Connection to {current_app_type.title()} at {api_url} successful") + return True + except requests.exceptions.RequestException as e: + logger.error(f"Connection test failed for {current_app_type}: {e}") + return False + +def wait_for_command(command_id: int): + logger.debug(f"Waiting for command {command_id} to complete...") + attempts = 0 + while True: + try: + time.sleep(COMMAND_WAIT_DELAY) + response = arr_request(f"command/{command_id}") + logger.debug(f"Command {command_id} Status: {response['status']}") + except Exception as error: + logger.error(f"Error fetching command status on attempt {attempts + 1}: {error}") + return False + + attempts += 1 + + if response['status'].lower() in ['complete', 'completed'] or attempts >= COMMAND_WAIT_ATTEMPTS: + break + + if response['status'].lower() not in ['complete', 'completed']: + logger.warning(f"Command {command_id} did not complete within the allowed attempts.") + return False + + time.sleep(0.5) + + return response['status'].lower() in ['complete', 'completed'] + +# Sonarr-specific functions +def get_series() -> List[Dict]: + """Get all series from Sonarr.""" + if APP_TYPE != "sonarr": + logger.error("get_series() called but APP_TYPE is not sonarr") + return [] + + series_list = arr_request("series") + if series_list: + debug_log("Raw series API response sample:", series_list[:2] if len(series_list) > 2 else series_list) + return series_list or [] + +def refresh_series(series_id: int) -> bool: + """ + POST /api/v5/command + { + "name": "RefreshSeries", + "seriesId": + } + """ + if APP_TYPE != "sonarr": + logger.error("refresh_series() called but APP_TYPE is not sonarr") + return False + + data = { + "name": "RefreshSeries", + "seriesId": series_id + } + response = arr_request("command", method="POST", data=data) + if not response or 'id' not in response: + return False + return wait_for_command(response['id']) + +def episode_search_episodes(episode_ids: List[int]) -> bool: + """ + POST /api/v5/command + { + "name": "EpisodeSearch", + "episodeIds": [...] + } + """ + if APP_TYPE != "sonarr": + logger.error("episode_search_episodes() called but APP_TYPE is not sonarr") + return False + + data = { + "name": "EpisodeSearch", + "episodeIds": episode_ids + } + response = arr_request("command", method="POST", data=data) + if not response or 'id' not in response: + return False + return wait_for_command(response['id']) + +def get_download_queue_size() -> int: + """ + GET /api/v5/queue + Returns total number of items in the queue with the status 'downloading'. + """ + # Endpoint is the same for all apps + response = arr_request("queue?status=downloading") + if not response: + return 0 + + total_records = response.get("totalRecords", 0) + if not isinstance(total_records, int): + total_records = 0 + logger.debug(f"Download Queue Size: {total_records}") + + return total_records + +def get_cutoff_unmet(page: int = 1) -> Optional[Dict]: + """ + GET /api/v5/wanted/cutoff?sortKey=airDateUtc&sortDirection=descending&includeSeriesInformation=true + &page=&pageSize=200 + Returns JSON with a "records" array and "totalRecords". + """ + if APP_TYPE != "sonarr": + logger.error("get_cutoff_unmet() called but APP_TYPE is not sonarr") + return None + + endpoint = ( + "wanted/cutoff?" + "sortKey=airDateUtc&sortDirection=descending&includeSeriesInformation=true" + f"&page={page}&pageSize=200" + ) + return arr_request(endpoint, method="GET") + +def get_cutoff_unmet_total_pages() -> int: + """ + To find total pages, call the endpoint with page=1&pageSize=1, read totalRecords, + then compute how many pages if each pageSize=200. + """ + if APP_TYPE != "sonarr": + logger.error("get_cutoff_unmet_total_pages() called but APP_TYPE is not sonarr") + return 0 + + response = arr_request("wanted/cutoff?page=1&pageSize=1") + if not response or "totalRecords" not in response: + return 0 + + total_records = response.get("totalRecords", 0) + if not isinstance(total_records, int) or total_records < 1: + return 0 + + # Each page has up to 200 episodes + total_pages = (total_records + 200 - 1) // 200 + return max(total_pages, 1) + +def get_episodes_for_series(series_id: int) -> Optional[List[Dict]]: + """Get all episodes for a specific series""" + if APP_TYPE != "sonarr": + logger.error("get_episodes_for_series() called but APP_TYPE is not sonarr") + return None + + return arr_request(f"episode?seriesId={series_id}", method="GET") + +def get_missing_episodes(pageSize: int = 1000) -> Optional[Dict]: + """ + GET /api/v5/wanted/missing?pageSize=&includeSeriesInformation=true + Returns JSON with a "records" array of missing episodes and "totalRecords". + """ + if APP_TYPE != "sonarr": + logger.error("get_missing_episodes() called but APP_TYPE is not sonarr") + return None + + endpoint = f"wanted/missing?pageSize={pageSize}&includeSeriesInformation=true" + result = arr_request(endpoint, method="GET") + + # Better debugging for missing episodes query + if result: + logger.debug(f"Found {result.get('totalRecords', 0)} total missing episodes") + if result.get('records'): + logger.debug(f"First few missing episodes: {result['records'][:2] if len(result['records']) > 2 else result['records']}") + else: + logger.warning("Missing episodes query returned no data") + + return result + +def get_series_with_missing_episodes() -> List[Dict]: + """ + Fetch all shows that have missing episodes using the wanted/missing endpoint. + Returns a list of series objects with an additional 'missingEpisodes' field + containing the list of missing episodes for that series. + """ + if APP_TYPE != "sonarr": + logger.error("get_series_with_missing_episodes() called but APP_TYPE is not sonarr") + return [] + + # Log request attempt + logger.debug("Requesting missing episodes from Sonarr API") + + missing_data = get_missing_episodes() + if not missing_data or "records" not in missing_data: + logger.error("Failed to get missing episodes data or no 'records' field in response") + return [] + + # Group missing episodes by series ID + series_with_missing = {} + for episode in missing_data.get("records", []): + series_id = episode.get("seriesId") + if not series_id: + logger.warning(f"Found episode without seriesId: {episode}") + continue + + series_title = None + + # Try to get series info from the episode record + if "series" in episode and isinstance(episode["series"], dict): + series_info = episode["series"] + series_title = series_info.get("title") + + # Initialize the series entry if it doesn't exist + if series_id not in series_with_missing: + series_with_missing[series_id] = { + "id": series_id, + "title": series_title or "Unknown Show", + "monitored": series_info.get("monitored", False), + "missingEpisodes": [] + } + else: + # If we don't have series info, need to fetch it + if series_id not in series_with_missing: + # Get series info directly + series_info = arr_request(f"series/{series_id}", method="GET") + if series_info: + series_with_missing[series_id] = { + "id": series_id, + "title": series_info.get("title", "Unknown Show"), + "monitored": series_info.get("monitored", False), + "missingEpisodes": [] + } + else: + logger.warning(f"Could not get series info for ID {series_id}, skipping episode") + continue + + # Add the episode to the series record + if series_id in series_with_missing: + series_with_missing[series_id]["missingEpisodes"].append(episode) + + # Convert to list and add count for convenience + result = [] + for series_id, series_data in series_with_missing.items(): + series_data["missingEpisodeCount"] = len(series_data["missingEpisodes"]) + result.append(series_data) + + logger.debug(f"Processed missing episodes data into {len(result)} series with missing episodes") + return result + +def get_media_stats(): + """Get statistics for hunted and upgraded media""" + try: + stats = get_stats() + return jsonify({ + "success": True, + "stats": stats + }) + except Exception as e: + logger.error(f"Error retrieving media statistics: {e}") + return jsonify({ + "success": False, + "message": "Error retrieving media statistics." + }), 500 + +def reset_media_stats(): + """Reset statistics for hunted and upgraded media""" + try: + app_type = request.json.get('app_type') if request.json else None + reset_stats(app_type) + return jsonify({ + "success": True, + "message": f"Successfully reset statistics for {'all apps' if app_type is None else app_type}." + }) + except Exception as e: + logger.error(f"Error resetting media statistics: {e}") + return jsonify({ + "success": False, + "message": "Error resetting media statistics." + }), 500 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/app.py b/Huntarr.io-6.3.6/src/primary/app.py new file mode 100644 index 0000000..5e78d06 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/app.py @@ -0,0 +1,129 @@ +import logging +import json +import pathlib +from datetime import datetime +import time + +class WebAddressFilter(logging.Filter): + """Filter out web interface availability messages""" + def filter(self, record): + if "Web interface available at http://" in record.getMessage(): + return False + return True + +def configure_logging(): + # Get timezone set in the environment (this will be updated when user changes the timezone in UI) + try: + # Create a custom formatter that includes timezone information + class TimezoneFormatter(logging.Formatter): + def formatTime(self, record, datefmt=None): + ct = self.converter(record.created) + if datefmt: + return time.strftime(datefmt, ct) + else: + # Include timezone in the timestamp + return time.strftime("%Y-%m-%d %H:%M:%S %z", ct) + + # Configure the formatter for all handlers + formatter = TimezoneFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + # Reset the root logger and reconfigure with proper timezone handling + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + + logging.basicConfig(level=logging.INFO) + + # Apply the formatter to all handlers + for handler in logging.root.handlers: + handler.setFormatter(formatter) + + except Exception as e: + # Fallback to basic logging if any issues + logging.basicConfig(level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') + logging.error(f"Error setting up timezone-aware logging: {e}") + + # Add filter to remove web interface URL logs + for handler in logging.root.handlers: + handler.addFilter(WebAddressFilter()) + + logging.info("Logging is configured.") + +def migrate_settings(): + """Migrate settings from nested to flat structure""" + # Settings file path + SETTINGS_DIR = pathlib.Path("/config") + SETTINGS_FILE = SETTINGS_DIR / "huntarr.json" + + if not SETTINGS_FILE.exists(): + logging.info(f"Settings file {SETTINGS_FILE} does not exist, nothing to migrate.") + return + + try: + # Read current settings + with open(SETTINGS_FILE, "r", encoding="utf-8") as file: + settings = json.load(file) + + # Flag to track if changes were made + changes_made = False + + # Check and migrate each app's settings + for app in ["sonarr", "radarr", "lidarr", "readarr"]: + if app in settings and "huntarr" in settings[app]: + logging.info(f"Found nested huntarr section in {app}, migrating...") + + # Move all settings from app.huntarr to app level + for key, value in settings[app]["huntarr"].items(): + if key not in settings[app]: + settings[app][key] = value + + # Remove the huntarr section + del settings[app]["huntarr"] + changes_made = True + + # Check for advanced section + if app in settings and "advanced" in settings[app]: + logging.info(f"Found advanced section in {app}, migrating...") + + # Move all settings from app.advanced to app level + for key, value in settings[app]["advanced"].items(): + if key not in settings[app]: + settings[app][key] = value + + # Remove the advanced section + del settings[app]["advanced"] + changes_made = True + + # Remove global section if present + if "global" in settings: + logging.info("Removing global section...") + del settings["global"] + changes_made = True + + # Remove UI section if present + if "ui" in settings: + logging.info("Removing UI section...") + del settings["ui"] + changes_made = True + + # Save changes if needed + if changes_made: + with open(SETTINGS_FILE, "w", encoding="utf-8") as file: + json.dump(settings, file, indent=2) + logging.info("Settings migration completed successfully.") + else: + logging.info("No changes needed, settings are already in the correct format.") + + except Exception as e: + logging.error(f"Error migrating settings: {e}") + +if __name__ == "__main__": + configure_logging() + logging.info("Starting Huntarr application") + + # Migrate settings to flat structure + migrate_settings() + + # Using filtered logging + logging.info("Web interface available at http://localhost:8080") + logging.info("Application started") \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/app_manager.py b/Huntarr.io-6.3.6/src/primary/app_manager.py new file mode 100644 index 0000000..7eb80d4 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/app_manager.py @@ -0,0 +1,41 @@ +# If this file doesn't exist, we'll create it + +import os +from src.primary.utils.logger import get_logger +from src.primary.settings_manager import load_settings + +logger = get_logger("app_manager") + +# List of supported app types +SUPPORTED_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"] + +def initialize_apps(): + """Initialize all supported applications""" + for app_type in SUPPORTED_APP_TYPES: + initialize_app(app_type) + + # Also load general settings but don't treat it as a regular app + load_general_settings() + +def initialize_app(app_type): + """Initialize a specific application""" + if app_type not in SUPPORTED_APP_TYPES: + logger.warning(f"Attempted to initialize unsupported app type: {app_type}") + return False + + # Load settings for this app + settings = load_settings(app_type) + + # Additional initialization as needed + # ... + + return True + +def load_general_settings(): + """Load general settings without treating it as a regular app""" + settings = load_settings("general") + logger.info("--- Configuration for general ---") + # Log the settings as needed + # ... + logger.info("--- End Configuration for general ---") + return settings diff --git a/Huntarr.io-6.3.6/src/primary/apps/blueprints.py b/Huntarr.io-6.3.6/src/primary/apps/blueprints.py new file mode 100644 index 0000000..786e29e --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/blueprints.py @@ -0,0 +1,24 @@ +""" +Centralized blueprint imports +This module provides a single location to import all app blueprints +to avoid circular import issues +""" + +# Import blueprints from the renamed route files +from src.primary.apps.sonarr_routes import sonarr_bp +from src.primary.apps.radarr_routes import radarr_bp +from src.primary.apps.lidarr_routes import lidarr_bp +from src.primary.apps.readarr_routes import readarr_bp +from src.primary.apps.whisparr_routes import whisparr_bp +from src.primary.apps.swaparr_routes import swaparr_bp +from src.primary.apps.eros_routes import eros_bp + +__all__ = [ + "sonarr_bp", + "radarr_bp", + "lidarr_bp", + "readarr_bp", + "whisparr_bp", + "swaparr_bp", + "eros_bp" +] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros.py b/Huntarr.io-6.3.6/src/primary/apps/eros.py new file mode 100644 index 0000000..9bbd866 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros.py @@ -0,0 +1,171 @@ +from flask import Blueprint, request, jsonify +import datetime, os, requests +from primary import keys_manager +from src.primary.utils.logger import get_logger +from src.primary.state import get_state_file_path +from src.primary.settings_manager import load_settings, settings_manager + +eros_bp = Blueprint('eros', __name__) +eros_logger = get_logger("eros") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("eros", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("eros", "processed_upgrades") + +@eros_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to an Eros API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + eros_logger.info(f"Testing connection to Eros API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + eros_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try multiple API path combinations to handle different Whisparr V3/Eros setups + api_paths = [ + "/api/v3/system/status", # Standard V3 path + "/api/system/status", # Standard V2 path that might still work + "/system/status" # Direct path without /api prefix + ] + + success = False + last_error = None + response_data = None + + for api_path in api_paths: + test_url = f"{api_url.rstrip('/')}{api_path}" + headers = {'X-Api-Key': api_key} + eros_logger.debug(f"Trying Eros API path: {test_url}") + + try: + # Use a connection timeout separate from read timeout + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # Log HTTP status code for diagnostic purposes + eros_logger.debug(f"Eros API status code: {response.status_code} for path {api_path}") + + # Check HTTP status code + if response.status_code == 404: + # Try next path if 404 + continue + + response.raise_for_status() + + # Ensure the response is valid JSON + try: + response_data = response.json() + eros_logger.debug(f"Eros API response: {response_data}") + + # Verify this is actually an Eros API by checking for version + version = response_data.get('version', None) + if not version: + # No version info, try next path + last_error = "API response doesn't contain version information" + continue + + # The version number should start with 3 for Eros + if version.startswith('3'): + eros_logger.info(f"Successfully connected to Eros API version {version} using path {api_path}") + success = True + break + elif version.startswith('2'): + error_msg = f"Connected to Whisparr V2 (version {version}). Use the Whisparr integration for V2." + eros_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + else: + # Connected to some other version, try next path + last_error = f"Connected to unknown version {version}, but Huntarr requires Eros V3" + continue + + except ValueError: + last_error = "Invalid JSON response from API" + continue + + except requests.exceptions.Timeout: + last_error = f"Connection timed out after {api_timeout} seconds" + continue + + except requests.exceptions.ConnectionError: + last_error = "Failed to connect. Check that the URL is correct and that Eros is running." + continue + + except requests.exceptions.HTTPError as e: + last_error = f"HTTP error: {str(e)}" + continue + + except Exception as e: + last_error = f"Unexpected error: {str(e)}" + continue + + # After trying all paths + if success: + return jsonify({ + "success": True, + "message": f"Successfully connected to Eros (version {response_data.get('version')})", + "version": response_data.get('version') + }) + else: + error_msg = last_error or "Failed to connect to Eros API. Please check your URL and API key." + eros_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + +# Function to check if Eros is configured +def is_configured(): + """Check if Eros API credentials are configured""" + try: + settings = load_settings("eros") + instances = settings.get("instances", []) + + for instance in instances: + if instance.get("enabled", True): + return True + + return False + except Exception as e: + eros_logger.error(f"Error checking if Eros is configured: {str(e)}") + return False + +# Get all valid instances from settings +def get_configured_instances(): + """Get all configured and enabled Eros instances""" + try: + settings = load_settings("eros") + instances = settings.get("instances", []) + + enabled_instances = [] + for instance in instances: + if not instance.get("enabled", True): + continue + + api_url = instance.get("api_url") + api_key = instance.get("api_key") + + if not api_url or not api_key: + continue + + # Add name and timeout + instance_name = instance.get("name", "Default") + api_timeout = instance.get("api_timeout", 90) + + enabled_instances.append({ + "api_url": api_url, + "api_key": api_key, + "instance_name": instance_name, + "api_timeout": api_timeout + }) + + return enabled_instances + except Exception as e: + eros_logger.error(f"Error getting configured Eros instances: {str(e)}") + return [] diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py new file mode 100644 index 0000000..896c14a --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py @@ -0,0 +1,95 @@ +""" +Eros app module for Huntarr +Contains functionality for missing items and quality upgrades in Eros + +Exclusively supports the v3 API. +""" + +# Module exports +from src.primary.apps.eros.missing import process_missing_items +from src.primary.apps.eros.upgrade import process_cutoff_upgrades +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +# Define logger for this module +eros_logger = get_logger("eros") + +# For backward compatibility +process_missing_scenes = process_missing_items + +def get_configured_instances(): + """Get all configured and enabled Eros instances""" + settings = load_settings("eros") + instances = [] + # Use debug level to avoid log spam on new installations + eros_logger.debug(f"Loaded Eros settings for instance check: {settings}") + + if not settings: + eros_logger.debug("No settings found for Eros") + return instances + + # Always use Eros V3 API + # Use debug level to avoid log spam on new installations + eros_logger.debug("Using Eros API v3 exclusively") + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + # Use debug level to avoid log spam on new installations + eros_logger.debug(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") + for idx, instance in enumerate(settings["instances"]): + eros_logger.debug(f"Checking instance #{idx}: {instance}") + # Enhanced validation + api_url = instance.get("api_url", "").strip() + api_key = instance.get("api_key", "").strip() + + # Enhanced URL validation - ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + eros_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + eros_logger.warning(f"Auto-correcting URL to: {api_url}") + + is_enabled = instance.get("enabled", True) + + # Only include properly configured instances + if is_enabled and api_url and api_key: + instance_name = instance.get("name", "Default") + + # Create a settings object for this instance by combining global settings with instance-specific ones + instance_settings = settings.copy() + + # Remove instances list to avoid confusion + if "instances" in instance_settings: + del instance_settings["instances"] + + # Override with instance-specific settings + instance_settings["api_url"] = api_url + instance_settings["api_key"] = api_key + instance_settings["instance_name"] = instance_name + + # Add timeout setting with default if not present + if "api_timeout" not in instance_settings: + instance_settings["api_timeout"] = 30 + + # Use debug level to prevent log spam + eros_logger.debug(f"Adding configured Eros instance: {instance_name}") + instances.append(instance_settings) + else: + name = instance.get("name", "Unnamed") + if not is_enabled: + eros_logger.debug(f"Skipping disabled instance: {name}") + else: + # For brand new installations, don't spam logs with warnings about default instances + if name == 'Default': + # Use debug level for default instances to avoid log spam on new installations + eros_logger.debug(f"Skipping instance {name} due to missing API URL or API Key") + else: + # Still log warnings for non-default instances + eros_logger.warning(f"Skipping instance {name} due to missing API URL or API Key") + else: + eros_logger.debug("No instances array found in settings or it's empty") + + # Use debug level to avoid spamming logs, especially with 0 instances + eros_logger.debug(f"Found {len(instances)} configured and enabled Eros instances") + return instances + +__all__ = ["process_missing_items", "process_missing_scenes", "process_cutoff_upgrades", "get_configured_instances"] diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/api.py b/Huntarr.io-6.3.6/src/primary/apps/eros/api.py new file mode 100644 index 0000000..b403bda --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros/api.py @@ -0,0 +1,517 @@ +#!/usr/bin/env python3 +""" +Eros-specific API functions +Handles all communication with the Eros API + +Exclusively uses the Eros API v3 +""" + +import requests +import json +import time +import datetime +import traceback +import sys +from typing import List, Dict, Any, Optional, Union +from src.primary.utils.logger import get_logger + +# Get logger for the Eros app +eros_logger = get_logger("eros") + +# Use a session for better performance +session = requests.Session() + +def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any: + """ + Make a request to the Eros API. + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data to send with the request + + Returns: + The JSON response from the API, or None if the request failed + """ + if not api_url or not api_key: + eros_logger.error("API URL or API key is missing. Check your settings.") + return None + + # Always use v3 API path + api_base = "api/v3" + eros_logger.debug(f"Using Eros API path: {api_base}") + + # Full URL - ensure no double slashes + url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}" + + # Add debug logging for the exact URL being called + eros_logger.debug(f"Making {method} request to: {url}") + + # Headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + if method == "GET": + response = session.get(url, headers=headers, timeout=api_timeout) + elif method == "POST": + response = session.post(url, headers=headers, json=data, timeout=api_timeout) + elif method == "PUT": + response = session.put(url, headers=headers, json=data, timeout=api_timeout) + elif method == "DELETE": + response = session.delete(url, headers=headers, timeout=api_timeout) + else: + eros_logger.error(f"Unsupported HTTP method: {method}") + return None + + # Check if the request was successful + try: + response.raise_for_status() + except requests.exceptions.HTTPError as e: + eros_logger.error(f"Error during {method} request to {endpoint}: {e}, Status Code: {response.status_code}") + eros_logger.debug(f"Response content: {response.text[:200]}") + return None + + # Try to parse JSON response + try: + if response.text: + result = response.json() + eros_logger.debug(f"Response from {response.url}: Status {response.status_code}, JSON parsed successfully") + return result + else: + eros_logger.debug(f"Response from {response.url}: Status {response.status_code}, Empty response") + return {} + except json.JSONDecodeError: + eros_logger.error(f"Invalid JSON response from API: {response.text[:200]}") + return None + + except requests.exceptions.RequestException as e: + eros_logger.error(f"Request failed: {e}") + return None + except Exception as e: + eros_logger.error(f"Unexpected error during API request: {e}") + return None + +def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int: + """ + Get the current size of the download queue. + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + The number of items in the download queue, or -1 if the request failed + """ + response = arr_request(api_url, api_key, api_timeout, "queue") + + if response is None: + return -1 + + # V3 API returns a list directly + if isinstance(response, list): + return len(response) + # Fallback to records format if needed + elif isinstance(response, dict) and "records" in response: + return len(response["records"]) + else: + return -1 + +def get_items_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, search_mode: str = "movie") -> List[Dict[str, Any]]: + """ + Get a list of items with missing files (not downloaded/available). + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored items. + search_mode: The search mode to use - 'movie' for movie-based or 'scene' for scene-based + + Returns: + A list of item objects with missing files, or None if the request failed. + """ + try: + eros_logger.debug(f"Retrieving missing items using search mode: {search_mode}...") + + if search_mode == "movie": + # In movie mode, we get all movies and filter for ones without files + endpoint = "movie" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + return None + + # Extract the movies with missing files + items = [] + if isinstance(response, list): + # Filter for movies that don't have files (hasFile = false) + items = [item for item in response if not item.get("hasFile", True)] + elif isinstance(response, dict) and "records" in response: + # Fallback to old format if somehow it returns in this format + items = [item for item in response["records"] if not item.get("hasFile", True)] + + elif search_mode == "scene": + # In scene mode, we try to use scene-specific endpoints + # First check if the movie-scene endpoint exists + endpoint = "scene/missing?pageSize=1000" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + # Fallback to regular movie filtering if scene endpoint doesn't exist + eros_logger.warning("Scene endpoint not available, falling back to movie mode") + return get_items_with_missing(api_url, api_key, api_timeout, monitored_only, "movie") + + # Extract the scenes + items = [] + if isinstance(response, dict) and "records" in response: + items = response["records"] + elif isinstance(response, list): + items = response + + else: + # Invalid search mode + eros_logger.error(f"Invalid search mode: {search_mode}. Must be 'movie' or 'scene'") + return None + + # Filter monitored if needed + if monitored_only: + items = [item for item in items if item.get("monitored", False)] + + eros_logger.debug(f"Found {len(items)} missing items using {search_mode} mode") + + return items + + except Exception as e: + eros_logger.error(f"Error retrieving missing items: {str(e)}") + return None + +def get_cutoff_unmet_items(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """ + Get a list of items that don't meet their quality profile cutoff. + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored items. + + Returns: + A list of item objects that need quality upgrades, or None if the request failed. + """ + try: + eros_logger.debug(f"Retrieving cutoff unmet items...") + + # Endpoint + endpoint = "wanted/cutoff?pageSize=1000&sortKey=airDateUtc&sortDirection=descending" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + return None + + # Extract the episodes/items + items = [] + if isinstance(response, dict) and "records" in response: + items = response["records"] + elif isinstance(response, list): + items = response + + eros_logger.debug(f"Found {len(items)} cutoff unmet items") + + # Just filter monitored if needed + if monitored_only: + items = [item for item in items if item.get("monitored", False)] + eros_logger.debug(f"Found {len(items)} cutoff unmet items after filtering monitored") + + return items + + except Exception as e: + eros_logger.error(f"Error retrieving cutoff unmet items: {str(e)}") + return None + +def get_quality_upgrades(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, search_mode: str = "movie") -> List[Dict[str, Any]]: + """ + Get a list of items that can be upgraded to better quality. + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored items. + search_mode: The search mode to use - 'movie' for movie-based or 'scene' for scene-based + + Returns: + A list of item objects that need quality upgrades, or None if the request failed. + """ + try: + eros_logger.debug(f"Retrieving quality upgrade items using search mode: {search_mode}...") + + if search_mode == "movie": + # In movie mode, we get all movies and filter for ones that have files but need quality upgrades + endpoint = "movie" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + return None + + # Extract movies that have files but need quality upgrades + items = [] + if isinstance(response, list): + # Filter for movies that have files but haven't met quality cutoff + items = [item for item in response if item.get("hasFile", False) and item.get("qualityCutoffNotMet", False)] + elif isinstance(response, dict) and "records" in response: + # Fallback to old format if somehow it returns in this format + items = [item for item in response["records"] if item.get("hasFile", False) and item.get("qualityCutoffNotMet", False)] + + elif search_mode == "scene": + # In scene mode, try to use scene-specific endpoints + endpoint = "scene/cutoff?pageSize=1000" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + # Fallback to regular movie filtering if scene endpoint doesn't exist + eros_logger.warning("Scene cutoff endpoint not available, falling back to movie mode") + return get_quality_upgrades(api_url, api_key, api_timeout, monitored_only, "movie") + + # Extract the scenes + items = [] + if isinstance(response, dict) and "records" in response: + items = response["records"] + elif isinstance(response, list): + items = response + + else: + # Invalid search mode + eros_logger.error(f"Invalid search mode: {search_mode}. Must be 'movie' or 'scene'") + return None + + # Filter monitored if needed + if monitored_only: + items = [item for item in items if item.get("monitored", False)] + + eros_logger.debug(f"Found {len(items)} quality upgrade items using {search_mode} mode") + + return items + + except Exception as e: + eros_logger.error(f"Error retrieving quality upgrade items: {str(e)}") + return None + +def refresh_item(api_url: str, api_key: str, api_timeout: int, item_id: int) -> int: + """ + Refresh a movie in Whisparr V3. + + Args: + api_url: The base URL of the Whisparr V3 API + api_key: The API key for authentication + api_timeout: Timeout for the API request + item_id: The ID of the movie to refresh + + Returns: + The command ID if the refresh was triggered successfully, None otherwise + """ + try: + eros_logger.info(f"Explicitly refreshing movie with ID {item_id} via API call") + + # In Whisparr V3, we use RefreshMovie command directly with the movieId + payload = { + "name": "RefreshMovie", + "movieId": item_id + } + + # Command endpoint + command_endpoint = "command" + + # Make the API request + response = arr_request(api_url, api_key, api_timeout, command_endpoint, "POST", payload) + + if response and "id" in response: + command_id = response["id"] + eros_logger.info(f"Refresh movie command triggered with ID {command_id} for movie {item_id}") + return command_id + else: + eros_logger.error(f"Failed to trigger refresh command for movie {item_id} - no command ID returned") + return None + + except Exception as e: + eros_logger.error(f"Error refreshing movie {item_id}: {str(e)}") + return None + +def item_search(api_url: str, api_key: str, api_timeout: int, item_ids: List[int]) -> int: + """ + Trigger a search for one or more movies in Whisparr V3. + + Args: + api_url: The base URL of the Whisparr V3 API + api_key: The API key for authentication + api_timeout: Timeout for the API request + item_ids: A list of movie IDs to search for + + Returns: + The command ID if the search command was triggered successfully, None otherwise + """ + try: + if not item_ids: + eros_logger.warning("No movie IDs provided for search.") + return None + + eros_logger.debug(f"Searching for movies with IDs: {item_ids}") + + # Try several possible command formats, as the API might be in flux + possible_commands = [ + # Format 1: MoviesSearch with integer IDs (Radarr-like) and no auto-refresh + { + "name": "MoviesSearch", + "movieIds": item_ids, + "updateScheduledTask": False, + "runRefreshAfterSearch": False, + "sendUpdatesToClient": False + }, + # Format 2: MovieSearch with integer IDs and no auto-refresh + { + "name": "MovieSearch", + "movieIds": item_ids, + "updateScheduledTask": False, + "runRefreshAfterSearch": False, + "sendUpdatesToClient": False + }, + # Format 3: MoviesSearch with string IDs and no auto-refresh + { + "name": "MoviesSearch", + "movieIds": [str(id) for id in item_ids], + "updateScheduledTask": False, + "runRefreshAfterSearch": False, + "sendUpdatesToClient": False + }, + # Format 4: MovieSearch with string IDs and no auto-refresh + { + "name": "MovieSearch", + "movieIds": [str(id) for id in item_ids], + "updateScheduledTask": False, + "runRefreshAfterSearch": False, + "sendUpdatesToClient": False + }, + # Fallback to original formats if the above don't work + { + "name": "MoviesSearch", + "movieIds": item_ids + }, + { + "name": "MovieSearch", + "movieIds": item_ids + }, + { + "name": "MoviesSearch", + "movieIds": [str(id) for id in item_ids] + }, + { + "name": "MovieSearch", + "movieIds": [str(id) for id in item_ids] + } + ] + + # Command endpoint + command_endpoint = "command" + + # Try each command format until one works + for i, payload in enumerate(possible_commands): + eros_logger.debug(f"Trying search command format {i+1}: {payload}") + + # Make the API request + response = arr_request(api_url, api_key, api_timeout, command_endpoint, "POST", payload) + + if response and "id" in response: + command_id = response["id"] + eros_logger.debug(f"Search command format {i+1} succeeded with ID {command_id}") + return command_id + + # If we've tried all formats and none worked: + eros_logger.error("All search command formats failed - no command ID returned") + return None + + except Exception as e: + eros_logger.error(f"Error searching for movies: {str(e)}") + return None + +def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict]: + """ + Get the status of a specific command. + + Args: + api_url: The base URL of the Eros API + api_key: The API key for authentication + api_timeout: Timeout for the API request + command_id: The ID of the command to check + + Returns: + A dictionary containing the command status, or None if the request failed. + """ + if not command_id: + eros_logger.error("No command ID provided for status check.") + return None + + try: + command_endpoint = f"command/{command_id}" + + # Make the API request + result = arr_request(api_url, api_key, api_timeout, command_endpoint) + + if result: + eros_logger.debug(f"Command {command_id} status: {result.get('status', 'unknown')}") + return result + else: + eros_logger.error(f"Failed to get command status for ID {command_id}") + return None + + except Exception as e: + eros_logger.error(f"Error getting command status for ID {command_id}: {e}") + return None + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """ + Check the connection to Whisparr V3 API. + + Args: + api_url: The base URL of the Whisparr V3 API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + True if the connection is successful, False otherwise + """ + try: + eros_logger.debug(f"Checking connection to Whisparr V3 instance at {api_url}") + + endpoint = "system/status" + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is not None: + # Get the version information if available + version = response.get("version", "unknown") + + # Simply check if we received a valid response - Whisparr V3 is in development + # so the version number might be in various formats + if version and isinstance(version, str): + eros_logger.info(f"Successfully connected to Whisparr V3 API, reported version: {version}") + return True + else: + eros_logger.warning(f"Connected to server but found unexpected version format: {version}") + return False + else: + eros_logger.error("Failed to connect to Whisparr V3 API") + return False + + except Exception as e: + eros_logger.error(f"Error checking connection to Whisparr V3 API: {str(e)}") + return False diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py b/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py new file mode 100644 index 0000000..cb87dcc --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +""" +Missing Items Processing for Eros +Handles searching for missing items in Eros + +Exclusively supports the v3 API. +""" + +import time +import random +import datetime +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.eros import api as eros_api +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.stats_manager import increment_stat +from src.primary.utils.history_utils import log_processed_media +from src.primary.state import check_state_reset + +# Get logger for the app +eros_logger = get_logger("eros") + +def process_missing_items( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process missing items in Eros based on provided settings. + + Args: + app_settings: Dictionary containing all settings for Eros + stop_check: A function that returns True if the process should stop + + Returns: + True if any items were processed, False otherwise. + """ + eros_logger.info("Starting missing items processing cycle for Eros.") + processed_any = False + + # Reset state files if enough time has passed + check_state_reset("eros") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + instance_name = app_settings.get("instance_name", "Eros Default") + + # Load general settings to get centralized timeout + general_settings = load_settings('general') + + monitored_only = app_settings.get("monitored_only", True) + skip_future_releases = app_settings.get("skip_future_releases", True) + skip_item_refresh = app_settings.get("skip_item_refresh", False) + eros_logger.info(f"Skip item refresh setting: {skip_item_refresh}") + search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified + + eros_logger.info(f"Using search mode: {search_mode} for missing items") + + # Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility + hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0)) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # Use the centralized advanced setting for stateful management hours + stateful_management_hours = get_advanced_setting("stateful_management_hours", 168) + + # Log that we're using Eros v3 API + eros_logger.info(f"Using Eros API v3 for instance: {instance_name}") + + # Skip if hunt_missing_items is set to a negative value or 0 + if hunt_missing_items <= 0: + eros_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing item processing.") + return False + + # Check for stop signal + if stop_check(): + eros_logger.info("Stop requested before starting missing items. Aborting...") + return False + + # Get missing items + eros_logger.info(f"Retrieving items with missing files...") + missing_items = eros_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only, search_mode) + + if missing_items is None: # API call failed + eros_logger.error("Failed to retrieve missing items from Eros API.") + return False + + if not missing_items: + eros_logger.info("No missing items found.") + return False + + # Check for stop signal after retrieving items + if stop_check(): + eros_logger.info("Stop requested after retrieving missing items. Aborting...") + return False + + eros_logger.info(f"Found {len(missing_items)} items with missing files.") + + # Filter out future releases if configured + if skip_future_releases: + now = datetime.datetime.now(datetime.timezone.utc) + original_count = len(missing_items) + # Eros item object has 'airDateUtc' for release dates + missing_items = [ + item for item in missing_items + if not item.get('airDateUtc') or ( + item.get('airDateUtc') and + datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now + ) + ] + skipped_count = original_count - len(missing_items) + if skipped_count > 0: + eros_logger.info(f"Skipped {skipped_count} future item releases based on air date.") + + if not missing_items: + eros_logger.info("No missing items left to process after filtering future releases.") + return False + + # Filter out already processed items using stateful management + unprocessed_items = [] + for item in missing_items: + item_id = str(item.get("id")) + if not is_processed("eros", instance_name, item_id): + unprocessed_items.append(item) + else: + eros_logger.debug(f"Skipping already processed item ID: {item_id}") + + eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.") + + if not unprocessed_items: + eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.") + return False + + items_processed = 0 + processing_done = False + + # Select items to search based on configuration + eros_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.") + items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items)) + + eros_logger.info(f"Selected {len(items_to_search)} missing items to search.") + + # Process selected items + for item in items_to_search: + # Check for stop signal before each item + if stop_check(): + eros_logger.info("Stop requested during item processing. Aborting...") + break + + # Re-check limit in case it changed + current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1)) + if items_processed >= current_limit: + eros_logger.info(f"Reached HUNT_MISSING_ITEMS limit ({current_limit}) for this cycle.") + break + + item_id = item.get("id") + title = item.get("title", "Unknown Title") + + # For movies, we don't use season/episode format + if search_mode == "movie": + item_info = title + else: + # If somehow using scene mode, try to format as S/E if available + season_number = item.get('seasonNumber') + episode_number = item.get('episodeNumber') + if season_number is not None and episode_number is not None: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + item_info = f"{title} - {season_episode}" + else: + item_info = title + + eros_logger.info(f"Processing missing item: \"{item_info}\" (Item ID: {item_id})") + + # Mark the item as processed BEFORE triggering any searches + add_processed_id("eros", instance_name, str(item_id)) + eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}") + + # Refresh the item information if not skipped + refresh_command_id = None + if not skip_item_refresh: + eros_logger.info(" - Refreshing item information...") + refresh_command_id = eros_api.refresh_item(api_url, api_key, api_timeout, item_id) + if refresh_command_id: + eros_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...") + time.sleep(5) # Basic wait + else: + eros_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.") + else: + eros_logger.info(" - Skipping item refresh (skip_item_refresh=true)") + + # Check for stop signal before searching + if stop_check(): + eros_logger.info(f"Stop requested before searching for {title}. Aborting...") + break + + # Search for the item + eros_logger.info(" - Searching for missing item...") + search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id]) + if search_command_id: + eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.") + + # Log to history system + log_processed_media("eros", item_info, item_id, instance_name, "missing") + eros_logger.debug(f"Logged history entry for item: {item_info}") + + items_processed += 1 + processing_done = True + + # Increment the hunted statistics for Eros + increment_stat("eros", "hunted", 1) + eros_logger.debug(f"Incremented eros hunted statistics by 1") + + # Log progress + current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1)) + eros_logger.info(f"Processed {items_processed}/{current_limit} missing items this cycle.") + else: + eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.") + # Do not mark as processed if search couldn't be triggered + continue + + # Log final status + if items_processed > 0: + eros_logger.info(f"Completed processing {items_processed} missing items for this cycle.") + else: + eros_logger.info("No new missing items were processed in this run.") + + return processing_done + +# For backward compatibility with the background processing system +def process_missing_scenes(app_settings, stop_check): + """ + Backwards compatibility function that calls process_missing_items. + + Args: + app_settings: Dictionary containing all settings for Eros + stop_check: A function that returns True if the process should stop + + Returns: + Result from process_missing_items + """ + return process_missing_items(app_settings, stop_check) diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py new file mode 100644 index 0000000..df1be22 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +""" +Quality Upgrade Processing for Eros +Handles searching for items that need quality upgrades in Eros + +Exclusively supports the v3 API. +""" + +import time +import random +import datetime +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.eros import api as eros_api +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.stats_manager import increment_stat +from src.primary.utils.history_utils import log_processed_media +from src.primary.state import check_state_reset + +# Get logger for the app +eros_logger = get_logger("eros") + +def process_cutoff_upgrades( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process quality cutoff upgrades for Eros based on settings. + + Args: + app_settings: Dictionary containing all settings for Eros + stop_check: A function that returns True if the process should stop + + Returns: + True if any items were processed for upgrades, False otherwise. + """ + eros_logger.info("Starting quality cutoff upgrades processing cycle for Eros.") + processed_any = False + + # Reset state files if enough time has passed + check_state_reset("eros") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + instance_name = app_settings.get("instance_name", "Eros Default") + + # Load general settings to get centralized timeout + general_settings = load_settings('general') + + monitored_only = app_settings.get("monitored_only", True) + skip_item_refresh = app_settings.get("skip_item_refresh", False) + eros_logger.info(f"Skip item refresh setting: {skip_item_refresh}") + search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified + + eros_logger.info(f"Using search mode: {search_mode} for quality upgrades") + + # Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility + hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0)) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168) + + # Log that we're using Eros API v3 + eros_logger.info(f"Using Eros API v3 for instance: {instance_name}") + + # Skip if hunt_upgrade_items is set to 0 + if hunt_upgrade_items <= 0: + eros_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping quality upgrade processing.") + return False + + # Check for stop signal + if stop_check(): + eros_logger.info("Stop requested before starting quality upgrades. Aborting...") + return False + + # Get items eligible for upgrade + eros_logger.info(f"Retrieving items eligible for cutoff upgrade...") + upgrade_eligible_data = eros_api.get_quality_upgrades(api_url, api_key, api_timeout, monitored_only, search_mode) + + if not upgrade_eligible_data: + eros_logger.info("No items found eligible for upgrade or error retrieving them.") + return False + + # Check for stop signal after retrieving eligible items + if stop_check(): + eros_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...") + return False + + eros_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.") + + # Filter out already processed items using stateful management + unprocessed_items = [] + for item in upgrade_eligible_data: + item_id = str(item.get("id")) + if not is_processed("eros", instance_name, item_id): + unprocessed_items.append(item) + else: + eros_logger.debug(f"Skipping already processed item ID: {item_id}") + + eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.") + + if not unprocessed_items: + eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.") + return False + + items_processed = 0 + processing_done = False + + # Always use random selection for upgrades + eros_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.") + items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items)) + + eros_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.") + + # Process selected items + for item in items_to_upgrade: + # Check for stop signal before each item + if stop_check(): + eros_logger.info("Stop requested during item processing. Aborting...") + break + + # Re-check limit in case it changed + current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1)) + if items_processed >= current_limit: + eros_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.") + break + + item_id = item.get("id") + title = item.get("title", "Unknown Title") + + # For movies, we don't use season/episode format + if search_mode == "movie": + item_info = title + # In Whisparr, movie quality is stored differently than TV shows + current_quality = item.get("movieFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown") + else: + # If somehow using scene mode, try to format as S/E if available + season_number = item.get('seasonNumber') + episode_number = item.get('episodeNumber') + if season_number is not None and episode_number is not None: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + item_info = f"{title} - {season_episode}" + else: + item_info = title + # Legacy episode quality path + current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown") + + eros_logger.info(f"Processing item for quality upgrade: \"{item_info}\" (Item ID: {item_id})") + eros_logger.info(f" - Current quality: {current_quality}") + + # Mark the item as processed BEFORE triggering any searches + add_processed_id("eros", instance_name, str(item_id)) + eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}") + + # Refresh the item information if not skipped + refresh_command_id = None + if not skip_item_refresh: + eros_logger.info(" - Refreshing item information...") + refresh_command_id = eros_api.refresh_item(api_url, api_key, api_timeout, item_id) + if refresh_command_id: + eros_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...") + time.sleep(5) # Basic wait + else: + eros_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.") + else: + eros_logger.info(" - Skipping item refresh (skip_item_refresh=true)") + + # Check for stop signal before searching + if stop_check(): + eros_logger.info(f"Stop requested before searching for {title}. Aborting...") + break + + # Search for the item + eros_logger.info(" - Searching for quality upgrade...") + search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id]) + if search_command_id: + eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.") + + # Log to history so the upgrade appears in the history UI + log_processed_media("eros", item_info, item_id, instance_name, "upgrade") + eros_logger.debug(f"Logged quality upgrade to history for item ID {item_id}") + + items_processed += 1 + processing_done = True + + # Increment the upgraded statistics for Eros + increment_stat("eros", "upgraded", 1) + eros_logger.debug(f"Incremented eros upgraded statistics by 1") + + # Log progress + current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1)) + eros_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.") + else: + eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.") + # Do not mark as processed if search couldn't be triggered + continue + + # Log final status + if items_processed > 0: + eros_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.") + else: + eros_logger.info("No new items were processed for quality upgrade in this run.") + + return processing_done diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py b/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py new file mode 100644 index 0000000..9d6b8c7 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger, APP_LOG_FILES +from src.primary.settings_manager import load_settings +import traceback +import socket +from urllib.parse import urlparse +from src.primary.apps.eros import api as eros_api + +eros_bp = Blueprint('eros', __name__) +eros_logger = get_logger("eros") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("eros", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("eros", "processed_upgrades") + +def get_configured_instances(): + # Load Eros settings + settings = load_settings("eros") + instances = settings.get("instances", []) + return instances + +def test_connection(url, api_key): + # Validate URL format + if not (url.startswith('http://') or url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + + # Try to establish a socket connection first to check basic connectivity + parsed_url = urlparse(url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + try: + # Try socket connection for quick feedback on connectivity issues + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + except Exception as e: + # Log the socket testing error but continue with the full request + eros_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # For Eros, we only use v3 API path + api_url = f"{url.rstrip('/')}/api/v3/system/status" + headers = {'X-Api-Key': api_key} + + try: + # Make the request with appropriate timeouts + eros_logger.debug(f"Trying API path: {api_url}") + response = requests.get(api_url, headers=headers, timeout=(5, 30)) + + try: + response.raise_for_status() + + # Check if we got a valid JSON response + try: + response_data = response.json() + + # Verify this is actually an Eros server by checking for version + version = response_data.get('version') + if not version: + error_msg = "API response doesn't contain version information. This doesn't appear to be a valid Eros server." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + + # Version check - should be v3.x for Eros + if version.startswith('3'): + detected_version = "v3" + eros_logger.info(f"Successfully connected to Eros API version: {version} (API {detected_version})") + + # Success! + return {"success": True, "message": "Successfully connected to Eros API", "version": version, "api_version": detected_version} + elif version.startswith('2'): + error_msg = f"Incompatible version detected: {version}. This appears to be Whisparr V2, not Eros." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + else: + error_msg = f"Unexpected version {version} detected. Eros requires API v3." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + except ValueError: + error_msg = "Invalid JSON response from Eros API - This doesn't appear to be a valid Eros server" + eros_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return {"success": False, "message": error_msg} + + except requests.exceptions.HTTPError: + # Handle specific HTTP errors + if response.status_code == 401: + error_msg = "Invalid API key - Authentication failed" + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Eros server. Check your URL." + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + else: + error_msg = f"Eros server error (HTTP {response.status_code}): The Eros server is experiencing issues" + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + + except requests.exceptions.ConnectionError as e: + # Connection error - server might be down or unreachable + error_details = str(e) + + if "Connection refused" in error_details: + error_msg = f"Connection refused - Eros is not running on {url} or the port is incorrect" + else: + error_msg = f"Connection error - Check if Eros is running: {error_details}" + + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + + except requests.exceptions.Timeout: + error_msg = f"Connection timed out - Eros took too long to respond" + eros_logger.error(error_msg) + return {"success": False, "message": error_msg} + + except Exception as e: + error_msg = f"Unexpected error: {str(e)}" + eros_logger.error(f"{error_msg}\n{traceback.format_exc()}") + return {"success": False, "message": error_msg} + +@eros_bp.route('/status', methods=['GET']) +def get_status(): + """Get the status of all configured Eros instances""" + try: + instances = get_configured_instances() + eros_logger.debug(f"Eros configured instances: {instances}") + if instances: + connected_count = 0 + for instance in instances: + if test_connection(instance['url'], instance['api_key'])['success']: + connected_count += 1 + return jsonify({ + "configured": True, + "connected": connected_count > 0, + "connected_count": connected_count, + "total_configured": len(instances) + }) + else: + eros_logger.debug("No Eros instances configured") + return jsonify({"configured": False, "connected": False}) + except Exception as e: + eros_logger.error(f"Error getting Eros status: {str(e)}") + return jsonify({"configured": False, "connected": False, "error": str(e)}) + +@eros_bp.route('/test-connection', methods=['POST']) +def test_connection_endpoint(): + """Test connection to an Eros API instance""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + eros_logger.info(f"Testing connection to Eros API at {api_url}") + + return test_connection(api_url, api_key) + +@eros_bp.route('/test-settings', methods=['GET']) +def test_eros_settings(): + """Debug endpoint to test Eros settings loading""" + try: + # Directly read the settings file to bypass any potential caching + import json + import os + + # Check all possible settings locations + possible_locations = [ + "/config/eros.json", # Main Docker mount + "/app/config/eros.json", # Alternate location + os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "config", "eros.json") # Relative path + ] + + results = {} + + # Try all locations + for location in possible_locations: + results[location] = {"exists": os.path.exists(location)} + if os.path.exists(location): + try: + with open(location, 'r') as f: + results[location]["content"] = json.load(f) + except Exception as e: + results[location]["error"] = str(e) + + # Also try loading via settings_manager + try: + from src.primary.settings_manager import load_settings + settings = load_settings("eros") + results["settings_manager"] = settings + except Exception as e: + results["settings_manager_error"] = str(e) + + return jsonify(results) + except Exception as e: + return jsonify({"error": str(e)}) + +@eros_bp.route('/reset-processed', methods=['POST']) +def reset_processed_state(): + """Reset the processed state files for Eros""" + try: + # Reset the state files for missing and upgrades + reset_state_file("eros", "processed_missing") + reset_state_file("eros", "processed_upgrades") + + eros_logger.info("Successfully reset Eros processed state files") + return jsonify({"success": True, "message": "Successfully reset processed state"}) + except Exception as e: + error_msg = f"Error resetting Eros state: {str(e)}" + eros_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr.py new file mode 100644 index 0000000..aceb3e8 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python3 +""" +Lidarr Blueprint for Huntarr +Defines Flask routes for interacting with Lidarr +""" + +import json +import traceback +import requests +from flask import Blueprint, jsonify, request +from src.primary.utils.logger import get_logger +from src.primary.apps.lidarr import api as lidarr_api +from src.primary.state import reset_state_file, get_state_file_path +from src.primary.settings_manager import load_settings +import src.primary.config as config + +# Create a logger for this module +lidarr_logger = get_logger("lidarr") + +# Create Blueprint for Lidarr routes +lidarr_bp = Blueprint('lidarr', __name__) + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("lidarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("lidarr", "processed_upgrades") + +# Function to check if Lidarr is configured +def is_configured(): + """Check if Lidarr API credentials are configured by checking if at least one instance is enabled""" + settings = load_settings("lidarr") + + if not settings: + lidarr_logger.debug("No settings found for Lidarr") + return False + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + lidarr_logger.debug(f"Found configured Lidarr instance: {instance.get('name', 'Unnamed')}") + return True + + lidarr_logger.debug("No enabled Lidarr instances found with valid API URL and key") + return False + + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + return bool(api_url and api_key) + +# Get all valid instances from settings +def get_configured_instances(): + """Get all configured and enabled Lidarr instances""" + settings = load_settings("lidarr") + instances = [] + + if not settings: + lidarr_logger.debug("No settings found for Lidarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + # Create a settings object for this instance by combining global settings with instance-specific ones + instance_settings = settings.copy() + # Remove instances list to avoid confusion + if "instances" in instance_settings: + del instance_settings["instances"] + + # Override with instance-specific connection settings + instance_settings["api_url"] = instance.get("api_url") + instance_settings["api_key"] = instance.get("api_key") + instance_settings["instance_name"] = instance.get("name", "Default") + + instances.append(instance_settings) + else: + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + if api_url and api_key: + settings["instance_name"] = "Default" + instances.append(settings) + + lidarr_logger.info(f"Found {len(instances)} configured and enabled Lidarr instances") + return instances + +@lidarr_bp.route('/status', methods=['GET']) +def status(): + """Get Lidarr connection status and version.""" + try: + # Get API settings from config + settings = config.get_app_settings("lidarr") + + if not settings or not settings.get("api_url") or not settings.get("api_key"): + return jsonify({"connected": False, "message": "Lidarr is not configured"}), 200 + + api_url = settings["api_url"] + api_key = settings["api_key"] + api_timeout = settings.get("api_timeout", 30) + + # Check connection and get system status + system_status = lidarr_api.get_system_status(api_url, api_key, api_timeout) + + if system_status is not None: + version = system_status.get("version", "Unknown") + return jsonify({ + "connected": True, + "version": version, + "message": f"Connected to Lidarr {version}" + }), 200 + else: + return jsonify({ + "connected": False, + "message": "Failed to connect to Lidarr" + }), 200 + + except Exception as e: + error_message = f"Error checking Lidarr status: {str(e)}" + lidarr_logger.error(error_message) + lidarr_logger.error(traceback.format_exc()) + return jsonify({"connected": False, "message": error_message}), 500 + +@lidarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to Lidarr with provided API settings.""" + try: + # Extract API settings from request + data = request.json + api_url = data.get("api_url", "").rstrip('/') + api_key = data.get("api_key", "") + api_timeout = int(data.get("api_timeout", 30)) + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Test connection to Lidarr + system_status = lidarr_api.get_system_status(api_url, api_key, api_timeout) + + if system_status is not None: + version = system_status.get("version", "Unknown") + return jsonify({ + "success": True, + "version": version, + "message": f"Successfully connected to Lidarr {version}" + }), 200 + else: + return jsonify({ + "success": False, + "message": "Failed to connect to Lidarr. Check URL and API Key." + }), 400 + + except requests.exceptions.RequestException as e: + error_message = f"Connection error: {str(e)}" + if hasattr(e, 'response'): + if e.response is not None: + error_message += f" - Status Code: {e.response.status_code}, Response: {e.response.text[:200]}" + lidarr_logger.error(f"Lidarr connection error: {error_message}") + return jsonify({"success": False, "message": error_message}), 500 + except Exception as e: # Catch any other unexpected errors + lidarr_logger.error(f"An unexpected error occurred during Lidarr connection test: {str(e)}", exc_info=True) + return jsonify({"success": False, "message": f"An unexpected error occurred: {str(e)}"}), 500 + +@lidarr_bp.route('/stats', methods=['GET']) +def get_stats(): + """Get statistics about Lidarr library.""" + try: + # Get API settings from config + settings = config.get_app_settings("lidarr") + + if not settings or not settings.get("api_url") or not settings.get("api_key"): + return jsonify({"error": "Lidarr is not configured"}), 400 + + api_url = settings["api_url"] + api_key = settings["api_key"] + api_timeout = settings.get("api_timeout", 30) + monitored_only = settings.get("monitored_only", True) + + # Get all artists from Lidarr + all_artists = lidarr_api.get_artists(api_url, api_key, api_timeout) + if all_artists is None: + return jsonify({"error": "Failed to get artists from Lidarr"}), 500 + + # Count total artists and monitored artists + total_artists = len(all_artists) + monitored_artists = sum(1 for artist in all_artists if artist.get("monitored", False)) + + # Get missing albums + missing_albums = lidarr_api.get_missing_albums(api_url, api_key, api_timeout, monitored_only) + total_missing = len(missing_albums) if missing_albums is not None else 0 + + # Get cutoff unmet albums + cutoff_unmet = lidarr_api.get_cutoff_unmet_albums(api_url, api_key, api_timeout, monitored_only) + total_upgradable = len(cutoff_unmet) if cutoff_unmet is not None else 0 + + # Get download queue + queue_size = lidarr_api.get_download_queue_size(api_url, api_key, api_timeout) + + # Return stats + return jsonify({ + "total_artists": total_artists, + "monitored_artists": monitored_artists, + "missing_albums": total_missing, + "upgradable_albums": total_upgradable, + "queue_size": queue_size + }), 200 + + except Exception as e: + error_message = f"Error getting Lidarr stats: {str(e)}" + lidarr_logger.error(error_message) + lidarr_logger.error(traceback.format_exc()) + return jsonify({"error": error_message}), 500 + +@lidarr_bp.route('/reset-state', methods=['POST']) +def reset_state(): + """Reset the Lidarr state files to clear processed IDs.""" + try: + # JSON object with flags for which states to reset + data = request.json or {} + reset_missing = data.get('reset_missing', True) + reset_upgrades = data.get('reset_upgrades', True) + + # Reset missing state if requested + if reset_missing: + reset_state_file("lidarr", "processed_missing") + lidarr_logger.info("Reset Lidarr missing albums state") + + # Reset upgrades state if requested + if reset_upgrades: + reset_state_file("lidarr", "processed_upgrades") + lidarr_logger.info("Reset Lidarr upgrades state") + + return jsonify({ + "success": True, + "message": "Lidarr state reset successfully" + }), 200 + + except Exception as e: + error_message = f"Error resetting Lidarr state: {str(e)}" + lidarr_logger.error(error_message) + lidarr_logger.error(traceback.format_exc()) + return jsonify({"error": error_message}), 500 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py new file mode 100644 index 0000000..d003f14 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py @@ -0,0 +1,91 @@ +""" +Lidarr app module for Huntarr +Contains functionality for missing albums and quality upgrades in Lidarr +""" + +# Module exports +from src.primary.apps.lidarr.missing import process_missing_albums +from src.primary.apps.lidarr.upgrade import process_cutoff_upgrades +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +# Define logger for this module +lidarr_logger = get_logger("lidarr") + +def get_configured_instances(): + """Get all configured and enabled Lidarr instances""" + settings = load_settings("lidarr") + instances = [] + # lidarr_logger.info(f"Loaded Lidarr settings for instance check: {settings}") # Removed verbose log + + if not settings: + lidarr_logger.debug("No settings found for Lidarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + # lidarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log + for idx, instance in enumerate(settings["instances"]): + lidarr_logger.debug(f"Checking instance #{idx}: {instance}") + # Enhanced validation + api_url = instance.get("api_url", "").strip() + api_key = instance.get("api_key", "").strip() + + # Enhanced URL validation - ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + lidarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + lidarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + is_enabled = instance.get("enabled", True) + + # Only include properly configured instances + if is_enabled and api_url and api_key: + # Return only essential instance details + instance_data = { + "instance_name": instance.get("name", "Default"), + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + # lidarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log + elif not is_enabled: + lidarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}") + else: + # For brand new installations, don't spam logs with warnings about default instances + instance_name = instance.get('name', 'Unnamed') + if instance_name == 'Default': + # Use debug level for default instances to avoid log spam on new installations + lidarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # Still log warnings for non-default instances + lidarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # lidarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log + # Fallback to legacy single-instance config + api_url = settings.get("api_url", "").strip() + api_key = settings.get("api_key", "").strip() + + # Ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + lidarr_logger.warning(f"API URL missing http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + lidarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + if api_url and api_key: + # Create a clean instance_data dict for the legacy instance + instance_data = { + "instance_name": "Default", + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + # lidarr_logger.info(f"Added valid legacy instance: {instance_data}") # Removed verbose log + else: + lidarr_logger.warning("No API URL or key found in legacy configuration") + + # Use debug level to avoid spamming logs, especially with 0 instances + lidarr_logger.debug(f"Found {len(instances)} configured and enabled Lidarr instances") + return instances + +__all__ = ["process_missing_albums", "process_cutoff_upgrades", "get_configured_instances"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py new file mode 100644 index 0000000..5baae67 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py @@ -0,0 +1,419 @@ +#!/usr/bin/env python3 +""" +Lidarr-specific API functions +Handles all communication with the Lidarr API (v1) +""" + +import requests +import json +import sys +import time +import datetime +import traceback +import logging +from typing import List, Dict, Any, Optional, Union +from src.primary.utils.logger import get_logger + +# Get logger for the Lidarr app +lidarr_logger = get_logger("lidarr") + +# Use a session for better performance +session = requests.Session() + +def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None, params: Dict = None) -> Any: + """ + Make a request to the Lidarr API. + + Args: + api_url: The base URL of the Lidarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data to send with the request + params: Optional query parameters + + Returns: + The JSON response from the API, or None if the request failed + """ + if not api_url or not api_key: + lidarr_logger.error("API URL or API key is missing. Check your settings.") + return None + + # Ensure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + lidarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return None + + # Make sure URL is properly formed + full_url = f"{api_url.rstrip('/')}/api/v1/{endpoint.lstrip('/')}" + + # Set up headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + lidarr_logger.debug(f"Lidarr API Request: {method} {full_url} Params: {params} Data: {data}") + + try: + response = session.request( + method=method.upper(), + url=full_url, + headers=headers, + json=data if method.upper() in ["POST", "PUT"] else None, + params=params if method.upper() == "GET" else None, + timeout=api_timeout + ) + + lidarr_logger.debug(f"Lidarr API Response Status: {response.status_code}") + # Log response body only in debug mode and if small enough + if lidarr_logger.level == logging.DEBUG and len(response.content) < 1000: + lidarr_logger.debug(f"Lidarr API Response Body: {response.text}") + elif lidarr_logger.level == logging.DEBUG: + lidarr_logger.debug(f"Lidarr API Response Body (truncated): {response.text[:500]}...") + + # Check for successful response + response.raise_for_status() + + # Parse response if there is content + if response.content and response.headers.get('Content-Type', '').startswith('application/json'): + return response.json() + elif response.status_code in [200, 201, 202]: # Success codes that might not return JSON + return True + else: # Should have been caught by raise_for_status, but as a fallback + lidarr_logger.warning(f"Request successful (status {response.status_code}) but no JSON content returned from {endpoint}") + return True # Indicate success even without content + + except requests.exceptions.RequestException as e: + error_msg = f"Error during {method} request to Lidarr endpoint '{endpoint}': {str(e)}" + if e.response is not None: + error_msg += f" | Status: {e.response.status_code} | Response: {e.response.text[:500]}" + lidarr_logger.error(error_msg) + return None + except json.JSONDecodeError: + lidarr_logger.error(f"Error decoding JSON response from Lidarr endpoint '{endpoint}'. Response: {response.text[:500]}") + return None + + except Exception as e: + # Catch all exceptions and log them with traceback + error_msg = f"CRITICAL ERROR in Lidarr arr_request: {str(e)}" + lidarr_logger.error(error_msg) + lidarr_logger.error(f"Full traceback: {traceback.format_exc()}") + print(error_msg, file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + return None + +# --- Specific API Functions --- + +def get_system_status(api_url: str, api_key: str, api_timeout: int) -> Optional[Dict]: + """Get Lidarr system status.""" + return arr_request(api_url, api_key, api_timeout, "system/status") + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """Check the connection to Lidarr API.""" + try: + # Ensure api_url is properly formatted + if not api_url: + lidarr_logger.error("API URL is empty or not set") + return False + + # Make sure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + lidarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return False + + # Ensure URL doesn't end with a slash before adding the endpoint + base_url = api_url.rstrip('/') + full_url = f"{base_url}/api/v1/system/status" + + response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + lidarr_logger.info("Successfully connected to Lidarr.") + return True + except requests.exceptions.RequestException as e: + lidarr_logger.error(f"Error connecting to Lidarr: {e}") + return False + except Exception as e: + lidarr_logger.error(f"An unexpected error occurred during Lidarr connection check: {e}") + return False + +def get_artists(api_url: str, api_key: str, api_timeout: int, artist_id: Optional[int] = None) -> Union[List, Dict, None]: + """Get artist information from Lidarr.""" + endpoint = f"artist/{artist_id}" if artist_id else "artist" + return arr_request(api_url, api_key, api_timeout, endpoint) + +def get_albums(api_url: str, api_key: str, api_timeout: int, album_id: Optional[int] = None, artist_id: Optional[int] = None) -> Union[List, Dict, None]: + """Get album information from Lidarr.""" + params = {} + if artist_id: + params['artistId'] = artist_id + + if album_id: + endpoint = f"album/{album_id}" + else: + endpoint = "album" + + return arr_request(api_url, api_key, api_timeout, endpoint, params=params if params else None) + +def get_tracks(api_url: str, api_key: str, api_timeout: int, album_id: Optional[int] = None) -> Union[List, None]: + """Get track information for a specific album.""" + if not album_id: + lidarr_logger.warning("get_tracks requires an album_id.") + return None + params = {'albumId': album_id} + return arr_request(api_url, api_key, api_timeout, "track", params=params) + +def get_queue(api_url: str, api_key: str, api_timeout: int) -> List: + """Get the current queue from Lidarr (handles pagination).""" + # Lidarr v1 queue endpoint supports pagination, unlike Sonarr v3's simple list + all_records = [] + page = 1 + page_size = 1000 # Request large page size + + while True: + params = { + "page": page, + "pageSize": page_size, + "sortKey": "timeleft", # Example sort key + "sortDir": "asc" + } + response = arr_request(api_url, api_key, api_timeout, "queue", params=params) + + if response and isinstance(response, dict) and 'records' in response: + records = response.get('records', []) + if not records: + break # No more records + all_records.extend(records) + + # Check if this was the last page + total_records = response.get('totalRecords', 0) + if len(all_records) >= total_records: + break + + page += 1 + else: + lidarr_logger.error(f"Failed to get queue page {page} or invalid response format.") + break # Return what we have so far + + return all_records + +def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int: + """Get the current size of the Lidarr download queue.""" + params = {"pageSize": 1} # Only need 1 record to get totalRecords + response = arr_request(api_url, api_key, api_timeout, "queue", params=params) + + if response and isinstance(response, dict) and 'totalRecords' in response: + queue_size = response.get('totalRecords', 0) + lidarr_logger.debug(f"Lidarr download queue size: {queue_size}") + return queue_size + else: + lidarr_logger.error("Error getting Lidarr download queue size.") + return -1 # Indicate error + +def get_missing_albums(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """Get missing albums from Lidarr, handling pagination.""" + endpoint = "wanted/missing" + page = 1 + page_size = 1000 + all_missing_albums = [] + total_records_reported = -1 + + lidarr_logger.debug(f"Starting fetch for missing albums (monitored_only={monitored_only}).") + + while True: + params = { + "page": page, + "pageSize": page_size, + "includeArtist": "true" # Include artist info for filtering + # Removed sortKey and sortDir + } + + lidarr_logger.debug(f"Requesting missing albums page {page} with params: {params}") + response = arr_request(api_url, api_key, api_timeout, endpoint, params=params) + + if response and isinstance(response, dict) and 'records' in response: + records = response.get('records', []) + total_records_on_page = len(records) + + if page == 1: + total_records_reported = response.get('totalRecords', 0) + lidarr_logger.debug(f"Lidarr API reports {total_records_reported} total missing albums.") + + lidarr_logger.debug(f"Parsed {total_records_on_page} missing album records from Lidarr API JSON (page {page}).") + + if not records: + lidarr_logger.debug(f"No more missing records found on page {page}. Stopping pagination.") + break + + all_missing_albums.extend(records) + + if total_records_reported >= 0 and len(all_missing_albums) >= total_records_reported: + lidarr_logger.debug(f"Fetched {len(all_missing_albums)} records, matching or exceeding total reported ({total_records_reported}). Assuming last page.") + break + + if total_records_on_page < page_size: + lidarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Assuming last page.") + break + + page += 1 + # time.sleep(0.1) # Optional delay + + else: + lidarr_logger.error(f"Failed to get missing albums page {page} or invalid response format.") + break # Return what we have so far + + lidarr_logger.info(f"Total missing albums fetched across all pages: {len(all_missing_albums)}") + + # Apply monitored filter after fetching + if monitored_only: + original_count = len(all_missing_albums) + # Check both album and artist monitored status + filtered_missing = [ + album for album in all_missing_albums + if album.get('monitored', False) and album.get('artist', {}).get('monitored', False) + ] + lidarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_missing)} monitored missing albums remain (out of {original_count} total).") + return filtered_missing + else: + lidarr_logger.debug(f"Returning {len(all_missing_albums)} missing albums (monitored_only=False).") + return all_missing_albums + +def get_cutoff_unmet_albums(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """Get cutoff unmet albums from Lidarr, handling pagination.""" + # Note: Lidarr API returns ALBUMS for cutoff unmet, not tracks. + endpoint = "wanted/cutoff" + page = 1 + page_size = 1000 # Adjust page size if needed, Lidarr default might be smaller + all_cutoff_unmet = [] + total_records_reported = -1 + + lidarr_logger.debug(f"Starting fetch for cutoff unmet albums (monitored_only={monitored_only}).") + + while True: + params = { + "page": page, + "pageSize": page_size, + "includeArtist": "true" # Include artist info for filtering + # Removed sortKey and sortDir + } + + lidarr_logger.debug(f"Requesting cutoff unmet albums page {page} with params: {params}") + response = arr_request(api_url, api_key, api_timeout, endpoint, params=params) + + if response and isinstance(response, dict) and 'records' in response: + records = response.get('records', []) + total_records_on_page = len(records) + + if page == 1: + total_records_reported = response.get('totalRecords', 0) + lidarr_logger.debug(f"Lidarr API reports {total_records_reported} total cutoff unmet albums.") + + lidarr_logger.debug(f"Parsed {total_records_on_page} cutoff unmet album records from Lidarr API JSON (page {page}).") + + if not records: + lidarr_logger.debug(f"No more cutoff unmet records found on page {page}. Stopping pagination.") + break + + all_cutoff_unmet.extend(records) + + # Check if we have fetched all reported records + if total_records_reported >= 0 and len(all_cutoff_unmet) >= total_records_reported: + lidarr_logger.debug(f"Fetched {len(all_cutoff_unmet)} records, matching or exceeding total reported ({total_records_reported}). Assuming last page.") + break + + # Check if the number of records received is less than the page size + if total_records_on_page < page_size: + lidarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Assuming last page.") + break + + page += 1 + # time.sleep(0.1) # Optional small delay between pages + + else: + # Log the error based on the response received (handled in arr_request) + lidarr_logger.error(f"Error getting cutoff unmet albums from Lidarr (page {page}) or invalid response format. Stopping pagination.") + # Return what we have so far, or indicate complete failure? Let's return what we have. + break + + lidarr_logger.info(f"Total cutoff unmet albums fetched across all pages: {len(all_cutoff_unmet)}") + + # Apply monitored filter after fetching all pages + if monitored_only: + original_count = len(all_cutoff_unmet) + # Check both album and artist monitored status + filtered_cutoff_unmet = [ + album for album in all_cutoff_unmet + if album.get('monitored', False) and album.get('artist', {}).get('monitored', False) + ] + lidarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_cutoff_unmet)} monitored cutoff unmet albums remain (out of {original_count} total).") + return filtered_cutoff_unmet + else: + lidarr_logger.debug(f"Returning {len(all_cutoff_unmet)} cutoff unmet albums (monitored_only=False).") + return all_cutoff_unmet + +def search_albums(api_url: str, api_key: str, api_timeout: int, album_ids: List[int]) -> Optional[Dict]: + """Trigger a search for specific albums in Lidarr.""" + if not album_ids: + lidarr_logger.warning("No album IDs provided for search.") + return None + + payload = { + "name": "AlbumSearch", + "albumIds": album_ids + } + response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload) + + if response and isinstance(response, dict) and 'id' in response: + command_id = response.get('id') + lidarr_logger.info(f"Triggered Lidarr AlbumSearch for album IDs: {album_ids}. Command ID: {command_id}") + return response # Return the full command object including ID + else: + lidarr_logger.error(f"Failed to trigger Lidarr AlbumSearch for album IDs {album_ids}. Response: {response}") + return None + +def search_artist(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict]: + """Trigger a search for a specific artist in Lidarr.""" + payload = { + "name": "ArtistSearch", + "artistIds": [artist_id] + } + response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload) + + if response and isinstance(response, dict) and 'id' in response: + command_id = response.get('id') + lidarr_logger.info(f"Triggered Lidarr ArtistSearch for artist ID: {artist_id}. Command ID: {command_id}") + return response # Return the full command object + else: + lidarr_logger.error(f"Failed to trigger Lidarr ArtistSearch for artist ID {artist_id}. Response: {response}") + return None + +def refresh_artist(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict]: + """Trigger a refresh for a specific artist in Lidarr.""" + payload = { + "name": "RefreshArtist", + "artistId": artist_id + } + response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload) + + if response and isinstance(response, dict) and 'id' in response: + command_id = response.get('id') + lidarr_logger.info(f"Triggered Lidarr RefreshArtist for artist ID: {artist_id}. Command ID: {command_id}") + return response # Return the full command object + else: + lidarr_logger.error(f"Failed to trigger Lidarr RefreshArtist for artist ID {artist_id}. Response: {response}") + return None + +def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict[str, Any]]: + """Get the status of a Lidarr command.""" + response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}") + if response and isinstance(response, dict): + lidarr_logger.debug(f"Checked Lidarr command status for ID {command_id}: {response.get('status')}") + return response + else: + lidarr_logger.error(f"Error getting Lidarr command status for ID {command_id}. Response: {response}") + return None + +def get_artist_by_id(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict[str, Any]]: + """Get artist details by ID from Lidarr.""" + return arr_request(api_url, api_key, api_timeout, f"artist/{artist_id}") \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py new file mode 100644 index 0000000..3b0af46 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python3 +""" +Lidarr missing content processing module for Huntarr +Handles missing albums or artists based on configuration. +""" + +import time +import random +import datetime +import os +import json +from typing import Dict, Any, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.lidarr import api as lidarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.state import get_state_file_path, check_state_reset +import json +import os + +# Get the logger for the Lidarr module +lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy + + +def process_missing_albums( + app_settings: Dict[str, Any], # Combined settings dictionary + stop_check: Callable[[], bool] = None # Function to check for stop signal +) -> bool: + """ + Processes missing albums for a specific Lidarr instance based on settings. + + Args: + app_settings (dict): Dictionary containing combined instance and general settings. + stop_check (Callable[[], bool]): Function to check if shutdown is requested. + + Returns: + bool: True if any items were processed, False otherwise. + """ + + # Copy instance-specific information + instance_name = app_settings.get("instance_name", "Default") + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + monitored_only = app_settings.get("monitored_only", True) + skip_future_releases = app_settings.get("skip_future_releases", False) + hunt_missing_items = app_settings.get("hunt_missing_items", 0) + hunt_missing_mode = app_settings.get("hunt_missing_mode", "album") + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # Early exit for disabled features + if not api_url or not api_key: + lidarr_logger.warning(f"Missing API URL or API key, skipping missing processing for {instance_name}") + return False + + if hunt_missing_items <= 0: + lidarr_logger.debug(f"Hunting for missing items is disabled (hunt_missing_items={hunt_missing_items}) for {instance_name}") + return False + + # Make sure any requested stop function is executable + stop_check = stop_check if callable(stop_check) else lambda: False + + lidarr_logger.info(f"Looking for missing albums for {instance_name}") + lidarr_logger.debug(f"Processing up to {hunt_missing_items} missing items in {hunt_missing_mode} mode") + + # Reset state files if enough time has passed + check_state_reset("lidarr") + + # Initialize processed counter and tracking containers + processed_count = 0 + processed_any = False + processed_artists_or_albums = set() + total_items_to_process = hunt_missing_items + + try: + # Fetch all missing albums first + lidarr_logger.info(f"Fetching all missing albums for {instance_name}...") + missing_items = lidarr_api.get_missing_albums( + api_url, + api_key, + monitored_only=monitored_only, + api_timeout=api_timeout + ) + + if missing_items is None: # API call failed or returned None + lidarr_logger.error(f"Failed to get missing items from Lidarr API for {instance_name}.") + return False + + if not missing_items: + lidarr_logger.info(f"No missing albums found for {instance_name} after initial fetch and filtering.") + return False + + lidarr_logger.info(f"Found {len(missing_items)} potentially missing albums for {instance_name} after initial fetch.") + + # --- Filter Future Releases --- # + original_count = len(missing_items) + if skip_future_releases: + now = datetime.datetime.now(datetime.timezone.utc) + valid_missing_items = [] + skipped_count = 0 + for item in missing_items: + release_date_str = item.get('releaseDate') + if release_date_str: + try: + # Lidarr dates often include 'Z' for UTC + release_date = datetime.datetime.fromisoformat(release_date_str.replace('Z', '+00:00')) + if release_date <= now: + valid_missing_items.append(item) + else: + # lidarr_logger.debug(f"Skipping future album ID {item.get('id')} ('{item.get('title')}') release: {release_date_str}") + skipped_count += 1 + except ValueError as e: + lidarr_logger.warning(f"Could not parse release date '{release_date_str}' for album ID {item.get('id')}. Error: {e}. Including it.") + valid_missing_items.append(item) # Keep if date is invalid + else: + valid_missing_items.append(item) # Keep if no release date + + missing_items = valid_missing_items # Replace with filtered list + if skipped_count > 0: + lidarr_logger.info(f"Skipped {skipped_count} future albums based on release date. {len(missing_items)} remaining.") + else: + lidarr_logger.debug("Skipping future release filtering as 'skip_future_releases' is False.") + + # Check if any items remain after filtering + if not missing_items: + lidarr_logger.info(f"No missing albums left after filtering future releases for {instance_name}.") + return False + + # Process based on mode + lidarr_logger.info(f"Processing missing items in '{hunt_missing_mode}' mode.") + + target_entities = [] + search_entity_type = "album" # Default to album + + if hunt_missing_mode == "artist": + search_entity_type = "artist" + # Group by artist ID + items_by_artist = {} + for item in missing_items: # Use the potentially filtered missing_items list + artist_id = item.get('artistId') + lidarr_logger.debug(f"Missing album item: {item.get('title')} by artistId: {artist_id}") + if artist_id: + if artist_id not in items_by_artist: + items_by_artist[artist_id] = [] + items_by_artist[artist_id].append(item) + + # In artist mode, map from artists to their albums + # First, get all artist IDs + target_entities = list(items_by_artist.keys()) + + # Filter out already processed artists + lidarr_logger.info(f"Found {len(target_entities)} artists with missing albums before filtering") + unprocessed_entities = [eid for eid in target_entities + if not is_processed("lidarr", instance_name, str(eid))] + + lidarr_logger.info(f"Found {len(unprocessed_entities)} unprocessed artists out of {len(target_entities)} total") + else: + # In album mode, directly track album IDs + target_entities = [item['id'] for item in missing_items] + + # Filter out processed albums + lidarr_logger.info(f"Found {len(target_entities)} missing albums before filtering") + unprocessed_entities = [eid for eid in target_entities + if not is_processed("lidarr", instance_name, str(eid))] + + lidarr_logger.info(f"Found {len(unprocessed_entities)} unprocessed albums out of {len(target_entities)} total") + + if not unprocessed_entities: + lidarr_logger.info(f"No unprocessed {search_entity_type}s found for {instance_name}. All available {search_entity_type}s have been processed.") + return False + + # Select entities to search + if not unprocessed_entities: + lidarr_logger.info(f"No {search_entity_type}s found to process after grouping/filtering.") + return False + + entities_to_search_ids = random.sample(unprocessed_entities, min(len(unprocessed_entities), total_items_to_process)) + lidarr_logger.info(f"Randomly selected {len(entities_to_search_ids)} {search_entity_type}s to search.") + lidarr_logger.debug(f"Unprocessed entities: {unprocessed_entities}") + lidarr_logger.debug(f"Entities to search: {entities_to_search_ids}") + + # --- Trigger Search (Artist or Album) --- + if hunt_missing_mode == "artist": + lidarr_logger.info(f"Artist-based missing mode selected") + lidarr_logger.info(f"Found {len(entities_to_search_ids)} unprocessed artists to search.") + + # Prepare a list for artist details log + artist_details_log = [] + + # First, fetch detailed artist info for each artist ID to enhance logs + artist_details = {} + for artist_id in entities_to_search_ids: + # Get artist details from API for better logging + artist_data = lidarr_api.get_artist_by_id(api_url, api_key, api_timeout, artist_id) + if artist_data: + artist_details[artist_id] = artist_data + + lidarr_logger.info(f"Artists selected for processing in this cycle:") + for i, artist_id in enumerate(entities_to_search_ids): + # Get artist name and any additional details + artist_name = f"Artist ID {artist_id}" # Default if name not found + artist_metadata = "" + + if artist_id in artist_details: + artist_data = artist_details[artist_id] + artist_name = artist_data.get('artistName', artist_name) + # Add year active or debut year if available + if 'statistics' in artist_data and 'albumCount' in artist_data['statistics']: + album_count = artist_data['statistics']['albumCount'] + artist_metadata = f"({album_count} albums)" + # Get genre info if available + if 'genres' in artist_data and artist_data['genres']: + genres = ", ".join(artist_data['genres'][:2]) # Limit to first 2 genres + if artist_metadata: + artist_metadata = f"{artist_metadata} - {genres}" + else: + artist_metadata = f"({genres})" + + detail_line = f"{i+1}. {artist_name} {artist_metadata} - ID: {artist_id}" + artist_details_log.append(detail_line) + lidarr_logger.info(f" {detail_line}") + + lidarr_logger.info(f"Triggering Artist Search for {len(entities_to_search_ids)} artists on {instance_name}...") + for i, artist_id in enumerate(entities_to_search_ids): + if stop_check(): # Use the new stop_check function + lidarr_logger.warning("Shutdown requested during artist search trigger.") + break + + # Get artist name from cached details or first album + artist_name = f"Artist ID {artist_id}" # Default if name not found + if artist_id in artist_details: + artist_data = artist_details[artist_id] + artist_name = artist_data.get('artistName', artist_name) + elif artist_id in items_by_artist and items_by_artist[artist_id]: + # Fallback to album info if direct artist details not available + first_album = items_by_artist[artist_id][0] + artist_info = first_album.get('artist') + if artist_info and isinstance(artist_info, dict): + artist_name = artist_info.get('artistName', artist_name) + + # Mark the artist as processed right away - BEFORE triggering the search + success = add_processed_id("lidarr", instance_name, str(artist_id)) + lidarr_logger.debug(f"Added artist ID {artist_id} to processed list for {instance_name}, success: {success}") + + # Trigger the search AFTER marking as processed + command_result = lidarr_api.search_artist(api_url, api_key, api_timeout, artist_id) + command_id = command_result.get('id', 'unknown') if command_result else 'failed' + lidarr_logger.info(f"Triggered Lidarr ArtistSearch for artist ID: {artist_id}, Command ID: {command_id}") + + # Increment stats for UI tracking + if command_result: + increment_stat("lidarr", "hunted") + processed_count += 1 # Count successful searches + processed_artists_or_albums.add(artist_id) + + # Also mark all albums from this artist as processed + if artist_id in items_by_artist: + for album in items_by_artist[artist_id]: + album_id = album.get('id') + if album_id: + album_success = add_processed_id("lidarr", instance_name, str(album_id)) + lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {album_success}") + + # Log to history system + log_processed_media("lidarr", f"{artist_name}", artist_id, instance_name, "missing") + lidarr_logger.debug(f"Logged history entry for artist: {artist_name}") + + time.sleep(0.1) # Small delay between triggers + else: # Album mode + album_ids_to_search = list(entities_to_search_ids) + if stop_check(): # Use the new stop_check function + lidarr_logger.warning("Shutdown requested before album search trigger.") + return False + + # Prepare descriptive list for logging + album_details_log = [] + # Create a dict for quick lookup based on album ID + missing_items_dict = {item['id']: item for item in missing_items if 'id' in item} + + # First, fetch additional album details for better logging if needed + album_details = {} + for album_id in album_ids_to_search: + album_details[album_id] = lidarr_api.get_albums(api_url, api_key, api_timeout, album_id) + + lidarr_logger.info(f"Albums selected for processing in this cycle:") + for idx, album_id in enumerate(album_ids_to_search): + album_info = missing_items_dict.get(album_id) + if album_info: + # Safely get title and artist name, provide defaults + title = album_info.get('title', f'Album ID {album_id}') + artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist') + + # Get additional metadata if available + release_year = "" + if 'releaseDate' in album_info and album_info['releaseDate']: + try: + release_date = album_info['releaseDate'].split('T')[0] + release_year = f"({release_date[:4]})" + except (IndexError, ValueError): + pass + + # Get quality if available + quality_info = "" + if album_details.get(album_id) and 'quality' in album_details[album_id]: + quality = album_details[album_id]['quality'].get('quality', {}).get('name', '') + if quality: + quality_info = f"[{quality}]" + + detail_line = f"{idx+1}. {artist_name} - {title} {release_year} {quality_info} - ID: {album_id}" + album_details_log.append(detail_line) + lidarr_logger.info(f" {detail_line}") + else: + # Fallback if album ID wasn't found in the fetched missing items (should be rare) + detail_line = f"{idx+1}. Album ID {album_id} (Details not found)" + album_details_log.append(detail_line) + lidarr_logger.info(f" {detail_line}") + + # Mark the albums as processed BEFORE triggering the search + for album_id in album_ids_to_search: + success = add_processed_id("lidarr", instance_name, str(album_id)) + lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {success}") + + # Now trigger the search + command_id = lidarr_api.search_albums(api_url, api_key, api_timeout, album_ids_to_search) + if command_id: + # Log after successful search + lidarr_logger.debug(f"Album search command triggered with ID: {command_id} for albums: [{', '.join(album_details_log)}]") + increment_stat("lidarr", "hunted") # Changed from "missing" to "hunted" + processed_count += len(album_ids_to_search) # Count albums searched + processed_artists_or_albums.update(album_ids_to_search) + + # Log to history system + for album_id in album_ids_to_search: + album_info = missing_items_dict.get(album_id) + if album_info: + # Get title and artist name for the history entry + title = album_info.get('title', f'Album ID {album_id}') + artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist') + media_name = f"{artist_name} - {title}" + log_processed_media("lidarr", media_name, album_id, instance_name, "missing") + lidarr_logger.debug(f"Logged history entry for album: {media_name}") + + time.sleep(command_wait_delay) # Basic delay after the single command + else: + lidarr_logger.warning(f"Failed to trigger album search for IDs {album_ids_to_search} on {instance_name}.") + + except Exception as e: + lidarr_logger.error(f"An error occurred during missing album processing for {instance_name}: {e}", exc_info=True) + return False + + lidarr_logger.info(f"Missing album processing finished for {instance_name}. Processed {processed_count} items/searches ({len(processed_artists_or_albums)} unique {search_entity_type}s).") + return processed_count > 0 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py new file mode 100644 index 0000000..a85fd19 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +""" +Lidarr cutoff upgrade processing module for Huntarr +Handles albums that do not meet the configured quality cutoff. +""" + +import time +import random +from typing import Dict, Any, Optional, Callable, List, Union, Set # Added List, Union and Set +from src.primary.utils.logger import get_logger +from src.primary.apps.lidarr import api as lidarr_api +from src.primary.utils.history_utils import log_processed_media +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.stats_manager import increment_stat +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.state import check_state_reset # Add the missing import + +# Get logger for the app +lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy + +def process_cutoff_upgrades( + app_settings: Dict[str, Any], # Changed signature: Use app_settings + stop_check: Callable[[], bool] # Changed signature: Use stop_check +) -> bool: + """ + Processes cutoff upgrades for albums in a specific Lidarr instance. + + Args: + app_settings (dict): Dictionary containing combined instance and general Lidarr settings. + stop_check (Callable[[], bool]): Function to check if shutdown is requested. + + Returns: + bool: True if any items were processed, False otherwise. + """ + lidarr_logger.info("Starting quality cutoff upgrades processing cycle for Lidarr.") + processed_any = False + + # --- Extract Settings --- # + # Instance details are now part of app_settings passed from background loop + instance_name = app_settings.get("instance_name", "Lidarr Default") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + + # Get command wait settings from general.json + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # General Lidarr settings (also from app_settings) + hunt_upgrade_items = app_settings.get("hunt_upgrade_items", 0) + monitored_only = app_settings.get("monitored_only", True) + + lidarr_logger.info(f"Using API timeout of {api_timeout} seconds for Lidarr upgrades") + + lidarr_logger.debug(f"Processing upgrades for instance: {instance_name}") + # lidarr_logger.debug(f"Instance Config (extracted): {{ 'api_url': '{api_url}', 'api_key': '***' }}") + # lidarr_logger.debug(f"General Settings (from app_settings): {app_settings}") # Avoid logging full settings potentially containing sensitive info + + # Check if API URL or Key are missing + if not api_url or not api_key: + lidarr_logger.error(f"Missing API URL or Key for instance '{instance_name}'. Cannot process upgrades.") + return False + + # Check if upgrade hunting is enabled + if hunt_upgrade_items <= 0: + lidarr_logger.info(f"'hunt_upgrade_items' is {hunt_upgrade_items} or less. Skipping upgrade processing for {instance_name}.") + return False + + lidarr_logger.info(f"Looking for quality upgrades for {instance_name}") + lidarr_logger.debug(f"Processing up to {hunt_upgrade_items} items for quality upgrade") + + # Reset state files if enough time has passed + check_state_reset("lidarr") + + processed_count = 0 + processed_any = False + + try: + lidarr_logger.info(f"Fetching cutoff unmet albums for {instance_name}...") + # Pass necessary details extracted above to the API function + # Corrected function name from get_cutoff_unmet to get_cutoff_unmet_albums + cutoff_unmet_albums = lidarr_api.get_cutoff_unmet_albums( + api_url, + api_key, + monitored_only=monitored_only, + api_timeout=api_timeout + ) + + if not cutoff_unmet_albums: + lidarr_logger.info(f"No cutoff unmet albums found for {instance_name}.") + return False + + lidarr_logger.info(f"Found {len(cutoff_unmet_albums)} cutoff unmet albums for {instance_name}.") + + # Filter out already processed items + unprocessed_albums = [] + for album in cutoff_unmet_albums: + album_id = str(album.get('id')) + if not is_processed("lidarr", instance_name, album_id): + unprocessed_albums.append(album) + else: + lidarr_logger.debug(f"Skipping already processed album ID: {album_id}") + + lidarr_logger.info(f"Found {len(unprocessed_albums)} unprocessed albums out of {len(cutoff_unmet_albums)} total albums eligible for quality upgrade.") + + if not unprocessed_albums: + lidarr_logger.info("No unprocessed albums found for quality upgrade. Skipping cycle.") + return False + + # Always select albums randomly + albums_to_search = random.sample(unprocessed_albums, min(len(unprocessed_albums), hunt_upgrade_items)) + lidarr_logger.info(f"Randomly selected {len(albums_to_search)} albums for upgrade search.") + + album_ids_to_search = [album['id'] for album in albums_to_search] + + if not album_ids_to_search: + lidarr_logger.info("No album IDs selected for upgrade search. Skipping trigger.") + return False + + # Prepare detailed album information for logging + album_details_log = [] + for i, album in enumerate(albums_to_search): + # Extract useful information for logging + album_title = album.get('title', f'Album ID {album["id"]}') + artist_name = album.get('artist', {}).get('artistName', 'Unknown Artist') + quality = album.get('quality', {}).get('quality', {}).get('name', 'Unknown Quality') + album_details_log.append(f"{i+1}. {artist_name} - {album_title} (ID: {album['id']}, Current Quality: {quality})") + + # Log each album on a separate line for better readability + if album_details_log: + lidarr_logger.info(f"Albums selected for quality upgrade in this cycle:") + for album_detail in album_details_log: + lidarr_logger.info(f" {album_detail}") + + # Check stop event before triggering search + if stop_check and stop_check(): # Use the passed stop_check function + lidarr_logger.warning("Shutdown requested, stopping upgrade album search.") + return False # Return False as no search was triggered in this case + + # Mark albums as processed BEFORE triggering search + for album_id in album_ids_to_search: + add_processed_id("lidarr", instance_name, str(album_id)) + lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}") + + lidarr_logger.info(f"Triggering Album Search for {len(album_ids_to_search)} albums for upgrade on instance {instance_name}: {album_ids_to_search}") + # Pass necessary details extracted above to the API function + command_id = lidarr_api.search_albums( + api_url, + api_key, + api_timeout, + album_ids_to_search + ) + if command_id: + lidarr_logger.debug(f"Upgrade album search command triggered with ID: {command_id} for albums: {album_ids_to_search}") + increment_stat("lidarr", "upgraded") # Use appropriate stat key + + # Log to history + for album_id in album_ids_to_search: + # Find the album info for this ID to log to history + for album in albums_to_search: + if album['id'] == album_id: + album_title = album.get('title', f'Album ID {album_id}') + artist_name = album.get('artist', {}).get('artistName', 'Unknown Artist') + media_name = f"{artist_name} - {album_title}" + log_processed_media("lidarr", media_name, album_id, instance_name, "upgrade") + lidarr_logger.debug(f"Logged quality upgrade to history for album ID {album_id}") + break + + time.sleep(command_wait_delay) # Basic delay + processed_count += len(album_ids_to_search) + processed_any = True # Mark that we processed something + # Consider adding wait_for_command logic if needed + # wait_for_command(api_url, api_key, command_id, command_wait_delay, command_wait_attempts) + else: + lidarr_logger.warning(f"Failed to trigger upgrade album search for IDs {album_ids_to_search} on {instance_name}.") + + except Exception as e: + lidarr_logger.error(f"An error occurred during upgrade album processing for {instance_name}: {e}", exc_info=True) + return False # Indicate failure + + lidarr_logger.info(f"Upgrade album processing finished for {instance_name}. Triggered searches for {processed_count} items.") + return processed_any # Return True if anything was processed \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py new file mode 100644 index 0000000..bd78fda --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger +import traceback +import socket +from urllib.parse import urlparse + +lidarr_bp = Blueprint('lidarr', __name__) +lidarr_logger = get_logger("lidarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("lidarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("lidarr", "processed_upgrades") + +@lidarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Lidarr API instance""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + lidarr_logger.info(f"Testing connection to Lidarr API at {api_url}") + + # Validate URL format + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try to establish a socket connection first to check basic connectivity + parsed_url = urlparse(api_url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + try: + # Try socket connection for quick feedback on connectivity issues + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except Exception as e: + # Log the socket testing error but continue with the full request + lidarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # For Lidarr, use api/v1 + url = f"{api_url.rstrip('/')}/api/v1/system/status" + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + response = requests.get(url, headers=headers, timeout=(10, api_timeout)) + + # For HTTP errors, provide more specific feedback + if response.status_code == 401: + error_msg = "Authentication failed: Invalid API key" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 401 + elif response.status_code == 403: + error_msg = "Access forbidden: Check API key permissions" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 403 + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Lidarr server. Check your URL." + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + elif response.status_code >= 500: + error_msg = f"Lidarr server error (HTTP {response.status_code}): The Lidarr server is experiencing issues" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + + # Raise for other HTTP errors + response.raise_for_status() + + try: + response_data = response.json() + version = response_data.get('version', 'unknown') + lidarr_logger.info(f"Successfully connected to Lidarr API version: {version}") + + return jsonify({ + "success": True, + "message": "Successfully connected to Lidarr API", + "version": version + }) + except ValueError: + error_msg = "Invalid JSON response from Lidarr API - This doesn't appear to be a valid Lidarr server" + lidarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.ConnectionError as e: + # Handle different types of connection errors + error_details = str(e) + if "Connection refused" in error_details: + error_msg = f"Connection refused - Lidarr is not running on {api_url} or the port is incorrect" + elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details: + error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL." + else: + error_msg = f"Connection error - Check if Lidarr is running: {error_details}" + + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except requests.exceptions.Timeout: + error_msg = f"Connection timed out - Lidarr took too long to respond" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 504 + except requests.exceptions.RequestException as e: + error_msg = f"Connection test failed: {str(e)}" + lidarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr.py b/Huntarr.io-6.3.6/src/primary/apps/radarr.py new file mode 100644 index 0000000..14502ed --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr.py @@ -0,0 +1,147 @@ +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.utils.logger import get_logger +from src.primary.state import get_state_file_path +from src.primary.settings_manager import load_settings + +radarr_bp = Blueprint('radarr', __name__) +radarr_logger = get_logger("radarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("radarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("radarr", "processed_upgrades") + +@radarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Radarr API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + radarr_logger.info(f"Testing connection to Radarr API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # For Radarr, use api/v3 + api_base = "api/v3" + test_url = f"{api_url.rstrip('/')}/{api_base}/system/status" + headers = {'X-Api-Key': api_key} + + try: + # Use a connection timeout separate from read timeout + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # Log HTTP status code for diagnostic purposes + radarr_logger.debug(f"Radarr API status code: {response.status_code}") + + # Check HTTP status code + response.raise_for_status() + + # Ensure the response is valid JSON + try: + response_data = response.json() + + # We no longer save keys here since we use instances + # keys_manager.save_api_keys("radarr", api_url, api_key) + + radarr_logger.info(f"Successfully connected to Radarr API version: {response_data.get('version', 'unknown')}") + + # Return success with some useful information + return jsonify({ + "success": True, + "message": "Successfully connected to Radarr API", + "version": response_data.get('version', 'unknown') + }) + except ValueError: + error_msg = "Invalid JSON response from Radarr API" + radarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.Timeout as e: + error_msg = f"Connection timed out after {api_timeout} seconds" + radarr_logger.error(f"{error_msg}: {str(e)}") + return jsonify({"success": False, "message": error_msg}), 504 + + except requests.exceptions.ConnectionError as e: + error_msg = "Connection error - check hostname and port" + details = str(e) + # Check for common DNS resolution errors + if "Name or service not known" in details or "getaddrinfo failed" in details: + error_msg = "DNS resolution failed - check hostname" + # Check for common connection refused errors + elif "Connection refused" in details: + error_msg = "Connection refused - check if Radarr is running and the port is correct" + + radarr_logger.error(f"{error_msg}: {details}") + return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502 + + except requests.exceptions.RequestException as e: + error_message = f"Connection failed: {str(e)}" + + if hasattr(e, 'response') and e.response is not None: + status_code = e.response.status_code + + # Add specific messages based on common status codes + if status_code == 401: + error_message = "Authentication failed: Invalid API key" + elif status_code == 403: + error_message = "Access forbidden: Check API key permissions" + elif status_code == 404: + error_message = "API endpoint not found: Check API URL" + elif status_code >= 500: + error_message = f"Radarr server error (HTTP {status_code}): The Radarr server is experiencing issues" + + # Try to extract more error details if available + try: + error_details = e.response.json() + error_message += f" - {error_details.get('message', 'No details')}" + except ValueError: + if e.response.text: + error_message += f" - Response: {e.response.text[:200]}" + + radarr_logger.error(error_message) + return jsonify({"success": False, "message": error_message}), 500 + + except Exception as e: + error_msg = f"An unexpected error occurred: {str(e)}" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 + +# Function to check if Radarr is configured +def is_configured(): + """Check if Radarr API credentials are configured by checking if at least one instance is enabled""" + settings = load_settings("radarr") + + if not settings: + radarr_logger.debug("No settings found for Radarr") + return False + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + radarr_logger.debug(f"Found configured Radarr instance: {instance.get('name', 'Unnamed')}") + return True + + radarr_logger.debug("No enabled Radarr instances found with valid API URL and key") + return False + + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + return bool(api_url and api_key) + +# Get all valid instances from settings +# get_configured_instances function has been moved to src/primary/apps/radarr/__init__.py + +# Function to reset the processed IDs files diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py new file mode 100644 index 0000000..facdd9d --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py @@ -0,0 +1,53 @@ +""" +Radarr app module for Huntarr +Contains functionality for missing movies and quality upgrades in Radarr +""" + +# Module exports +from src.primary.apps.radarr.missing import process_missing_movies +from src.primary.apps.radarr.upgrade import process_cutoff_upgrades + +# Add necessary imports for get_configured_instances +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +radarr_logger = get_logger("radarr") # Get the logger instance + +def get_configured_instances(): + """Get all configured and enabled Radarr instances""" + settings = load_settings("radarr") + instances = [] + + if not settings: + radarr_logger.debug("No settings found for Radarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + # Create a settings object for this instance by combining global settings with instance-specific ones + instance_settings = settings.copy() + # Remove instances list to avoid confusion + if "instances" in instance_settings: + del instance_settings["instances"] + + # Override with instance-specific connection settings + instance_settings["api_url"] = instance.get("api_url") + instance_settings["api_key"] = instance.get("api_key") + instance_settings["instance_name"] = instance.get("name", "Default") + + instances.append(instance_settings) + else: + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + if api_url and api_key: + settings["instance_name"] = "Default" + instances.append(settings) + + # Use debug level to avoid spamming logs, especially with 0 instances + radarr_logger.debug(f"Found {len(instances)} configured and enabled Radarr instances") + return instances + +__all__ = ["process_missing_movies", "process_cutoff_upgrades", "get_configured_instances"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py new file mode 100644 index 0000000..c2a0f9b --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +""" +Radarr-specific API functions +Handles all communication with the Radarr API +""" + +import requests +import json +import time +import datetime +from typing import List, Dict, Any, Optional, Union +# Correct the import path +from src.primary.utils.logger import get_logger + +# Get logger for the Radarr app +radarr_logger = get_logger("radarr") + +# Use a session for better performance +session = requests.Session() + +def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any: + """ + Make a request to the Radarr API. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data to send with the request + + Returns: + The JSON response from the API, or None if the request failed + """ + if not api_url or not api_key: + radarr_logger.error("API URL or API key is missing. Check your settings.") + return None + + # Ensure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + radarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return None + + # Full URL - ensure no double slashes + url = f"{api_url.rstrip('/')}/api/v3/{endpoint.lstrip('/')}" + + # Headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + if method == "GET": + response = session.get(url, headers=headers, timeout=api_timeout) + elif method == "POST": + response = session.post(url, headers=headers, json=data, timeout=api_timeout) + elif method == "PUT": + response = session.put(url, headers=headers, json=data, timeout=api_timeout) + elif method == "DELETE": + response = session.delete(url, headers=headers, timeout=api_timeout) + else: + radarr_logger.error(f"Unsupported HTTP method: {method}") + return None + + # Check for errors + response.raise_for_status() + + # Parse JSON response + if response.text: + return response.json() + return {} + + except requests.exceptions.RequestException as e: + radarr_logger.error(f"API request failed: {e}") + return None + +def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int: + """ + Get the current size of the download queue. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + The number of items in the download queue, or -1 if the request failed + """ + if not api_url or not api_key: + radarr_logger.error("Radarr API URL or API Key not provided for queue size check.") + return -1 + try: + # Radarr uses /api/v3/queue + endpoint = f"{api_url.rstrip('/')}/api/v3/queue?page=1&pageSize=1000" # Fetch a large page size + headers = {"X-Api-Key": api_key} + response = session.get(endpoint, headers=headers, timeout=api_timeout) + response.raise_for_status() + queue_data = response.json() + queue_size = queue_data.get('totalRecords', 0) + radarr_logger.debug(f"Radarr download queue size: {queue_size}") + return queue_size + except requests.exceptions.RequestException as e: + radarr_logger.error(f"Error getting Radarr download queue size: {e}") + return -1 # Return -1 to indicate an error + except Exception as e: + radarr_logger.error(f"An unexpected error occurred while getting Radarr queue size: {e}") + return -1 + +def get_movies_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> Optional[List[Dict]]: + """ + Get a list of movies with missing files (not downloaded/available). + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored movies. + + Returns: + A list of movie objects with missing files, or None if the request failed. + """ + # Use the updated arr_request with passed arguments + movies = arr_request(api_url, api_key, api_timeout, "movie") + if movies is None: # Check for None explicitly, as an empty list is valid + radarr_logger.error("Failed to retrieve movies from Radarr API.") + return None + + missing_movies = [] + for movie in movies: + is_monitored = movie.get("monitored", False) + has_file = movie.get("hasFile", False) + # Apply monitored_only filter if requested + if not has_file and (not monitored_only or is_monitored): + missing_movies.append(movie) + + radarr_logger.debug(f"Found {len(missing_movies)} missing movies (monitored_only={monitored_only}).") + return missing_movies + +def get_cutoff_unmet_movies(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> Optional[List[Dict]]: + """ + Get a list of movies that don't meet their quality profile cutoff. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored movies. + + Returns: + A list of movie objects that need quality upgrades, or None if the request failed. + """ + # Radarr API endpoint for cutoff unmet movies + # Note: Radarr's /api/v3/movie endpoint doesn't directly support a simple 'cutoffUnmet=true' like Sonarr's wanted/cutoff. + # We need to fetch all movies and filter locally, or use the /api/v3/movie/lookup endpoint if searching by TMDB/IMDB ID. + # Fetching all movies is simpler for now. + radarr_logger.debug("Fetching all movies to determine cutoff unmet status...") + movies = arr_request(api_url, api_key, api_timeout, "movie") + if movies is None: + radarr_logger.error("Failed to retrieve movies from Radarr API for cutoff check.") + return None + + # Need quality profile information to determine cutoff unmet status. + # Fetch quality profiles first. + profiles = arr_request(api_url, api_key, api_timeout, "qualityprofile") + if profiles is None: + radarr_logger.error("Failed to retrieve quality profiles from Radarr API.") + return None + + # Create a map for easy lookup: profile_id -> cutoff_format_score (or cutoff quality ID) + # Radarr profiles have 'cutoff' (quality ID) and potentially 'cutoffFormatScore' + profile_cutoff_map = {p['id']: p.get('cutoff') for p in profiles} + # TODO: Potentially incorporate cutoffFormatScore if needed for more complex logic + + unmet_movies = [] + for movie in movies: + is_monitored = movie.get("monitored", False) + has_file = movie.get("hasFile", False) + profile_id = movie.get("qualityProfileId") + movie_file = movie.get("movieFile") + + # Apply monitored_only filter if requested + if not monitored_only or is_monitored: + if has_file and movie_file and profile_id in profile_cutoff_map: + cutoff_quality_id = profile_cutoff_map[profile_id] + current_quality_id = movie_file.get("quality", {}).get("quality", {}).get("id") + + # Simple check: if current quality ID is less than cutoff quality ID + # This assumes quality IDs are ordered correctly (lower ID = lower quality) + # A more robust check might involve comparing quality *names* or *scores* if IDs aren't reliable order indicators. + if current_quality_id is not None and cutoff_quality_id is not None and current_quality_id < cutoff_quality_id: + # TODO: Add check for cutoffFormatScore if necessary + unmet_movies.append(movie) + # else: # Log why a movie wasn't considered unmet (optional) + # if not has_file: radarr_logger.debug(f"Skipping {movie.get('title')} - no file.") + # elif not movie_file: radarr_logger.debug(f"Skipping {movie.get('title')} - no movieFile info.") + # elif profile_id not in profile_cutoff_map: radarr_logger.debug(f"Skipping {movie.get('title')} - profile ID {profile_id} not found.") + + radarr_logger.debug(f"Found {len(unmet_movies)} cutoff unmet movies (monitored_only={monitored_only}).") + return unmet_movies + +def refresh_movie(api_url: str, api_key: str, api_timeout: int, movie_id: int, + command_wait_delay: int = 1, command_wait_attempts: int = 600) -> Optional[int]: + """ + Refresh a movie in Radarr. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + movie_id: The ID of the movie to refresh + command_wait_delay: Seconds to wait between command status checks + command_wait_attempts: Maximum number of status check attempts + + Returns: + The command ID if the refresh was triggered successfully, None otherwise + """ + endpoint = "command" + data = { + "name": "RefreshMovie", + "movieIds": [movie_id] + } + + # Use the updated arr_request + response = arr_request(api_url, api_key, api_timeout, endpoint, method="POST", data=data) + if response and 'id' in response: + command_id = response['id'] + radarr_logger.debug(f"Triggered refresh for movie ID {movie_id}. Command ID: {command_id}") + + # Wait for command to complete if requested + if command_wait_delay > 0 and command_wait_attempts > 0: + radarr_logger.debug(f"Waiting for refresh command {command_id} to complete...") + success = wait_for_command(api_url, api_key, api_timeout, command_id, + delay_seconds=command_wait_delay, + max_attempts=command_wait_attempts) + if success: + radarr_logger.debug(f"Refresh command {command_id} completed successfully") + else: + radarr_logger.warning(f"Timed out waiting for refresh command {command_id} to complete") + + return command_id + else: + radarr_logger.error(f"Failed to trigger refresh command for movie ID {movie_id}. Response: {response}") + return None + +def movie_search(api_url: str, api_key: str, api_timeout: int, movie_ids: List[int]) -> Optional[int]: + """ + Trigger a search for one or more movies. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + movie_ids: A list of movie IDs to search for + + Returns: + The command ID if the search command was triggered successfully, None otherwise + """ + if not movie_ids: + radarr_logger.warning("No movie IDs provided for search.") + return None + + endpoint = "command" + data = { + "name": "MoviesSearch", + "movieIds": movie_ids + } + + # Use the updated arr_request + response = arr_request(api_url, api_key, api_timeout, endpoint, method="POST", data=data) + if response and 'id' in response: + command_id = response['id'] + radarr_logger.debug(f"Triggered search for movie IDs: {movie_ids}. Command ID: {command_id}") + return command_id + else: + radarr_logger.error(f"Failed to trigger search command for movie IDs {movie_ids}. Response: {response}") + return None + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """Check the connection to Radarr API.""" + try: + # Ensure api_url is properly formatted + if not api_url: + radarr_logger.error("API URL is empty or not set") + return False + + # Make sure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + radarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return False + + # Ensure URL doesn't end with a slash before adding the endpoint + base_url = api_url.rstrip('/') + full_url = f"{base_url}/api/v3/system/status" + + response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + radarr_logger.info("Successfully connected to Radarr.") + return True + except requests.exceptions.RequestException as e: + radarr_logger.error(f"Error connecting to Radarr: {e}") + return False + except Exception as e: + radarr_logger.error(f"An unexpected error occurred during Radarr connection check: {e}") + return False + +def wait_for_command(api_url: str, api_key: str, api_timeout: int, command_id: int, + delay_seconds: int = 1, max_attempts: int = 600) -> bool: + """ + Wait for a command to complete. + + Args: + api_url: The base URL of the Radarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + command_id: The ID of the command to wait for + delay_seconds: Seconds to wait between command status checks + max_attempts: Maximum number of status check attempts + + Returns: + True if the command completed successfully, False if timed out + """ + attempts = 0 + while attempts < max_attempts: + response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}") + if response and 'state' in response: + state = response['state'] + if state == "completed": + return True + elif state == "failed": + radarr_logger.error(f"Command {command_id} failed") + return False + time.sleep(delay_seconds) + attempts += 1 + radarr_logger.warning(f"Timed out waiting for command {command_id} to complete") + return False \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py new file mode 100644 index 0000000..4fb9249 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +""" +Missing Movies Processing for Radarr +Handles searching for missing movies in Radarr +""" + +import time +import random +import datetime +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.radarr import api as radarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import load_settings, get_advanced_setting + +# Get logger for the app +radarr_logger = get_logger("radarr") + +def process_missing_movies( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process missing movies in Radarr based on provided settings. + + Args: + app_settings: Dictionary containing all settings for Radarr + stop_check: A function that returns True if the process should stop + + Returns: + True if any movies were processed, False otherwise. + """ + processed_any = False + + # Get instance name - check for instance_name first, fall back to legacy "name" key if needed + instance_name = app_settings.get("instance_name", app_settings.get("name", "Radarr Default")) + + # Log important settings + radarr_logger.info("=== Radarr Missing Movies Settings ===") + radarr_logger.info(f"Instance Name: {instance_name}") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + monitored_only = app_settings.get("monitored_only", True) + skip_future_releases = app_settings.get("skip_future_releases", True) + skip_movie_refresh = app_settings.get("skip_movie_refresh", False) + hunt_missing_movies = app_settings.get("hunt_missing_movies", 0) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + release_type = app_settings.get("release_type", "physical") + + radarr_logger.info(f"Hunt Missing Movies: {hunt_missing_movies}") + radarr_logger.info(f"Monitored Only: {monitored_only}") + radarr_logger.info(f"Skip Future Releases: {skip_future_releases}") + radarr_logger.info(f"Skip Movie Refresh: {skip_movie_refresh}") + radarr_logger.info(f"Release Type for Future Status: {release_type}") + + release_type_field = 'physicalRelease' + if release_type == 'digital': + release_type_field = 'digitalRelease' + elif release_type == 'cinema': + release_type_field = 'inCinemas' + + radarr_logger.info(f"Using {release_type_field} date to determine future releases") + radarr_logger.info("=======================================") + + radarr_logger.info("Starting missing movies processing cycle for Radarr.") + + if not api_url or not api_key: + radarr_logger.error("API URL or Key not configured in settings. Cannot process missing movies.") + return False + + # Skip if hunt_missing_movies is set to 0 + if hunt_missing_movies <= 0: + radarr_logger.info("'hunt_missing_movies' setting is 0 or less. Skipping missing movie processing.") + return False + + # Check for stop signal + if stop_check(): + radarr_logger.info("Stop requested before starting missing movies. Aborting...") + return False + + # Get missing movies + radarr_logger.info("Retrieving movies with missing files...") + missing_movies = radarr_api.get_movies_with_missing(api_url, api_key, api_timeout, monitored_only) + + if missing_movies is None: # API call failed + radarr_logger.error("Failed to retrieve missing movies from Radarr API.") + return False + + if not missing_movies: + radarr_logger.info("No missing movies found.") + return False + + # Check for stop signal after retrieving movies + if stop_check(): + radarr_logger.info("Stop requested after retrieving missing movies. Aborting...") + return False + + radarr_logger.info(f"Found {len(missing_movies)} movies with missing files.") + + # Filter out future releases if configured + if skip_future_releases: + now = datetime.datetime.now(datetime.timezone.utc) + original_count = len(missing_movies) + + missing_movies = [ + movie for movie in missing_movies + if movie.get(release_type_field) and datetime.datetime.fromisoformat(movie[release_type_field].replace('Z', '+00:00')) < now + ] + skipped_count = original_count - len(missing_movies) + if skipped_count > 0: + radarr_logger.info(f"Skipped {skipped_count} future movie releases based on {release_type} release date.") + + if not missing_movies: + radarr_logger.info("No missing movies left to process after filtering future releases.") + return False + + movies_processed = 0 + processing_done = False + + # Filter out already processed movies using stateful management + unprocessed_movies = [] + for movie in missing_movies: + movie_id = str(movie.get("id")) + if not is_processed("radarr", instance_name, movie_id): + unprocessed_movies.append(movie) + else: + radarr_logger.debug(f"Skipping already processed movie ID: {movie_id}") + + radarr_logger.info(f"Found {len(unprocessed_movies)} unprocessed missing movies out of {len(missing_movies)} total.") + + if not unprocessed_movies: + radarr_logger.info("No unprocessed missing movies found. All available movies have been processed.") + return False + + # Always use random selection for missing movies + radarr_logger.info(f"Using random selection for missing movies") + if len(unprocessed_movies) > hunt_missing_movies: + movies_to_process = random.sample(unprocessed_movies, hunt_missing_movies) + else: + movies_to_process = unprocessed_movies + + radarr_logger.info(f"Selected {len(movies_to_process)} movies to process.") + + # Add detailed logging for selected movies + if movies_to_process: + radarr_logger.info(f"Movies selected for processing in this cycle:") + for idx, movie in enumerate(movies_to_process): + movie_id = movie.get("id") + movie_title = movie.get("title", "Unknown Title") + year = movie.get("year", "Unknown Year") + radarr_logger.info(f" {idx+1}. {movie_title} ({year}) - ID: {movie_id}") + + # Process each movie + for movie in movies_to_process: + if stop_check(): + radarr_logger.info("Stop requested during processing. Aborting...") + break + + movie_id = movie.get("id") + movie_title = movie.get("title", "Unknown Title") + + # Optional: Refresh the movie before searching + if not skip_movie_refresh: + radarr_logger.info(f"Refreshing movie metadata for '{movie_title}' (ID: {movie_id})...") + refresh_success = radarr_api.refresh_movie(api_url, api_key, api_timeout, movie_id, command_wait_delay, command_wait_attempts) + + if not refresh_success: + radarr_logger.warning(f"Failed to refresh movie metadata for '{movie_title}'. Continuing anyway...") + + # Search for the movie + radarr_logger.info(f"Searching for movie '{movie_title}' (ID: {movie_id})...") + search_success = radarr_api.movie_search(api_url, api_key, api_timeout, [movie_id]) + + if search_success: + radarr_logger.info(f"Successfully triggered search for movie '{movie_title}'") + # Immediately add to processed IDs to prevent duplicate processing + success = add_processed_id("radarr", instance_name, str(movie_id)) + radarr_logger.debug(f"Added processed ID: {movie_id}, success: {success}") + + # Log to history system + year = movie.get("year", "Unknown Year") + media_name = f"{movie_title} ({year})" + log_processed_media("radarr", media_name, movie_id, instance_name, "missing") + radarr_logger.debug(f"Logged history entry for movie: {media_name}") + + increment_stat("radarr", "hunted") + movies_processed += 1 + processed_any = True + else: + radarr_logger.warning(f"Failed to trigger search for movie '{movie_title}'") + + radarr_logger.info(f"Finished processing missing movies. Processed {movies_processed} of {len(movies_to_process)} selected movies.") + return processed_any \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py new file mode 100644 index 0000000..2980276 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +""" +Quality Upgrade Processing for Radarr +Handles searching for movies that need quality upgrades in Radarr +""" + +import time +import random +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.radarr import api as radarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import get_advanced_setting + +# Get logger for the app +radarr_logger = get_logger("radarr") + +def process_cutoff_upgrades( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process quality cutoff upgrades for Radarr based on settings. + + Args: + app_settings: Dictionary containing all settings for Radarr + stop_check: A function that returns True if the process should stop + + Returns: + True if any movies were processed for upgrades, False otherwise. + """ + radarr_logger.info("Starting quality cutoff upgrades processing cycle for Radarr.") + processed_any = False + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + monitored_only = app_settings.get("monitored_only", True) + skip_movie_refresh = app_settings.get("skip_movie_refresh", False) + hunt_upgrade_movies = app_settings.get("hunt_upgrade_movies", 0) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # Get instance name - check for instance_name first, fall back to legacy "name" key if needed + instance_name = app_settings.get("instance_name", app_settings.get("name", "Radarr Default")) + + # Get movies eligible for upgrade + radarr_logger.info("Retrieving movies eligible for cutoff upgrade...") + upgrade_eligible_data = radarr_api.get_cutoff_unmet_movies(api_url, api_key, api_timeout, monitored_only) + + if not upgrade_eligible_data: + radarr_logger.info("No movies found eligible for upgrade or error retrieving them.") + return False + + radarr_logger.info(f"Found {len(upgrade_eligible_data)} movies eligible for upgrade.") + + # Filter out already processed movies using stateful management + unprocessed_movies = [] + for movie in upgrade_eligible_data: + movie_id = str(movie.get("id")) + if not is_processed("radarr", instance_name, movie_id): + unprocessed_movies.append(movie) + else: + radarr_logger.debug(f"Skipping already processed movie ID: {movie_id}") + + radarr_logger.info(f"Found {len(unprocessed_movies)} unprocessed movies for upgrade out of {len(upgrade_eligible_data)} total.") + + if not unprocessed_movies: + radarr_logger.info("No upgradeable movies found to process (after filtering already processed). Skipping.") + return False + + radarr_logger.info(f"Randomly selecting up to {hunt_upgrade_movies} movies for upgrade search.") + movies_to_process = random.sample(unprocessed_movies, min(hunt_upgrade_movies, len(unprocessed_movies))) + + radarr_logger.info(f"Selected {len(movies_to_process)} movies to search for upgrades.") + processed_count = 0 + processed_something = False + + for movie in movies_to_process: + if stop_check(): + radarr_logger.info("Stop signal received, aborting Radarr upgrade cycle.") + break + + movie_id = movie.get("id") + movie_title = movie.get("title") + movie_year = movie.get("year") + + radarr_logger.info(f"Processing upgrade for movie: \"{movie_title}\" ({movie_year}) (Movie ID: {movie_id})") + + # Refresh movie (optional) + if not skip_movie_refresh: + radarr_logger.info(f" - Refreshing movie info...") + refresh_result = radarr_api.refresh_movie(api_url, api_key, api_timeout, movie_id) + if not refresh_result: + radarr_logger.warning(f" - Failed to trigger movie refresh. Continuing search anyway.") + else: + radarr_logger.debug(f" - Skipping movie refresh (skip_movie_refresh=true)") + + # Search for cutoff upgrade + radarr_logger.info(f" - Searching for quality upgrade...") + search_result = radarr_api.movie_search(api_url, api_key, api_timeout, [movie_id]) + + if search_result: + radarr_logger.info(f" - Successfully triggered search for quality upgrade.") + add_processed_id("radarr", instance_name, str(movie_id)) + increment_stat("radarr", "upgraded") + + # Log to history so the upgrade appears in the history UI + media_name = f"{movie_title} ({movie_year})" + log_processed_media("radarr", media_name, movie_id, instance_name, "upgrade") + radarr_logger.debug(f"Logged quality upgrade to history for movie ID {movie_id}") + + processed_count += 1 + processed_something = True + else: + radarr_logger.warning(f" - Failed to trigger search for quality upgrade.") + + # Log final status + radarr_logger.info(f"Completed processing {processed_count} movies for quality upgrades.") + + return processed_something \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py new file mode 100644 index 0000000..eb6caf6 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger +import traceback +import socket +from urllib.parse import urlparse + +radarr_bp = Blueprint('radarr', __name__) +radarr_logger = get_logger("radarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("radarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("radarr", "processed_upgrades") + +@radarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Radarr API instance""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + radarr_logger.info(f"Testing connection to Radarr API at {api_url}") + + # Validate URL format + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try to establish a socket connection first to check basic connectivity + parsed_url = urlparse(api_url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + try: + # Try socket connection for quick feedback on connectivity issues + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except Exception as e: + # Log the socket testing error but continue with the full request + radarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # For Radarr, use api/v3 + url = f"{api_url.rstrip('/')}/api/v3/system/status" + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + response = requests.get(url, headers=headers, timeout=(10, api_timeout)) + + # For HTTP errors, provide more specific feedback + if response.status_code == 401: + error_msg = "Authentication failed: Invalid API key" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 401 + elif response.status_code == 403: + error_msg = "Access forbidden: Check API key permissions" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 403 + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Radarr server. Check your URL." + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + elif response.status_code >= 500: + error_msg = f"Radarr server error (HTTP {response.status_code}): The Radarr server is experiencing issues" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + + # Raise for other HTTP errors + response.raise_for_status() + + try: + response_data = response.json() + version = response_data.get('version', 'unknown') + radarr_logger.info(f"Successfully connected to Radarr API version: {version}") + + return jsonify({ + "success": True, + "message": "Successfully connected to Radarr API", + "version": version + }) + except ValueError: + error_msg = "Invalid JSON response from Radarr API - This doesn't appear to be a valid Radarr server" + radarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.ConnectionError as e: + # Handle different types of connection errors + error_details = str(e) + if "Connection refused" in error_details: + error_msg = f"Connection refused - Radarr is not running on {api_url} or the port is incorrect" + elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details: + error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL." + else: + error_msg = f"Connection error - Check if Radarr is running: {error_details}" + + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except requests.exceptions.Timeout: + error_msg = f"Connection timed out - Radarr took too long to respond" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 504 + except requests.exceptions.RequestException as e: + error_msg = f"Connection test failed: {str(e)}" + radarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr.py b/Huntarr.io-6.3.6/src/primary/apps/readarr.py new file mode 100644 index 0000000..2cb866e --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr.py @@ -0,0 +1,179 @@ +from flask import Blueprint, request, jsonify +import datetime, os, requests +from primary import keys_manager +from src.primary.utils.logger import get_logger +from src.primary.state import get_state_file_path +from src.primary.settings_manager import load_settings + +readarr_bp = Blueprint('readarr', __name__) +readarr_logger = get_logger("readarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("readarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("readarr", "processed_upgrades") + +@readarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Readarr API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + readarr_logger.info(f"Testing connection to Readarr API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # For Readarr, use api/v1 + api_base = "api/v1" + test_url = f"{api_url.rstrip('/')}/{api_base}/system/status" + headers = {'X-Api-Key': api_key} + + try: + # Use a connection timeout separate from read timeout + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # Log HTTP status code for diagnostic purposes + readarr_logger.debug(f"Readarr API status code: {response.status_code}") + + # Check HTTP status code + response.raise_for_status() + + # Ensure the response is valid JSON + try: + response_data = response.json() + + # We no longer save keys here since we use instances + # keys_manager.save_api_keys("readarr", api_url, api_key) + + readarr_logger.info(f"Successfully connected to Readarr API version: {response_data.get('version', 'unknown')}") + + # Return success with some useful information + return jsonify({ + "success": True, + "message": "Successfully connected to Readarr API", + "version": response_data.get('version', 'unknown') + }) + except ValueError: + error_msg = "Invalid JSON response from Readarr API" + readarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.Timeout as e: + error_msg = f"Connection timed out after {api_timeout} seconds" + readarr_logger.error(f"{error_msg}: {str(e)}") + return jsonify({"success": False, "message": error_msg}), 504 + + except requests.exceptions.ConnectionError as e: + error_msg = "Connection error - check hostname and port" + details = str(e) + # Check for common DNS resolution errors + if "Name or service not known" in details or "getaddrinfo failed" in details: + error_msg = "DNS resolution failed - check hostname" + # Check for common connection refused errors + elif "Connection refused" in details: + error_msg = "Connection refused - check if Readarr is running and the port is correct" + + readarr_logger.error(f"{error_msg}: {details}") + return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502 + + except requests.exceptions.RequestException as e: + error_message = f"Connection failed: {str(e)}" + + if hasattr(e, 'response') and e.response is not None: + status_code = e.response.status_code + + # Add specific messages based on common status codes + if status_code == 401: + error_message = "Authentication failed: Invalid API key" + elif status_code == 403: + error_message = "Access forbidden: Check API key permissions" + elif status_code == 404: + error_message = "API endpoint not found: Check API URL" + elif status_code >= 500: + error_message = f"Readarr server error (HTTP {status_code}): The Readarr server is experiencing issues" + + # Try to extract more error details if available + try: + error_details = e.response.json() + error_message += f" - {error_details.get('message', 'No details')}" + except ValueError: + if e.response.text: + error_message += f" - Response: {e.response.text[:200]}" + + readarr_logger.error(error_message) + return jsonify({"success": False, "message": error_message}), 500 + + except Exception as e: + error_msg = f"An unexpected error occurred: {str(e)}" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 + +# Function to check if Readarr is configured +def is_configured(): + """Check if Readarr API credentials are configured by checking if at least one instance is enabled""" + settings = load_settings("readarr") + + if not settings: + readarr_logger.debug("No settings found for Readarr") + return False + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + readarr_logger.debug(f"Found configured Readarr instance: {instance.get('name', 'Unnamed')}") + return True + + readarr_logger.debug("No enabled Readarr instances found with valid API URL and key") + return False + + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + return bool(api_url and api_key) + +# Get all valid instances from settings +def get_configured_instances(): + """Get all configured and enabled Readarr instances""" + settings = load_settings("readarr") + instances = [] + + if not settings: + readarr_logger.debug("No settings found for Readarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + # Create a settings object for this instance by combining global settings with instance-specific ones + instance_settings = settings.copy() + # Remove instances list to avoid confusion + if "instances" in instance_settings: + del instance_settings["instances"] + + # Override with instance-specific connection settings + instance_settings["api_url"] = instance.get("api_url") + instance_settings["api_key"] = instance.get("api_key") + instance_settings["instance_name"] = instance.get("name", "Default") + + instances.append(instance_settings) + else: + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + if api_url and api_key: + settings["instance_name"] = "Default" + instances.append(settings) + + readarr_logger.info(f"Found {len(instances)} configured and enabled Readarr instances") + return instances diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py new file mode 100644 index 0000000..7a2516a --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py @@ -0,0 +1,91 @@ +""" +Readarr module initialization +""" + +# Use src.primary imports +from src.primary.apps.readarr.missing import process_missing_books +from src.primary.apps.readarr.upgrade import process_cutoff_upgrades +# Add necessary imports +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +# Define logger for this module +readarr_logger = get_logger("readarr") + +def get_configured_instances(): + """Get all configured and enabled Readarr instances""" + settings = load_settings("readarr") + instances = [] + # readarr_logger.info(f"Loaded Readarr settings for instance check: {settings}") # Removed verbose log + + if not settings: + readarr_logger.debug("No settings found for Readarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + # readarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log + for idx, instance in enumerate(settings["instances"]): + readarr_logger.debug(f"Checking instance #{idx}: {instance}") + # Enhanced validation + api_url = instance.get("api_url", "").strip() + api_key = instance.get("api_key", "").strip() + + # Enhanced URL validation - ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + readarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + readarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + is_enabled = instance.get("enabled", True) + + # Only include properly configured instances + if is_enabled and api_url and api_key: + # Return only essential instance details + instance_data = { + "instance_name": instance.get("name", "Default"), + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + # readarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log + elif not is_enabled: + readarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}") + else: + # For brand new installations, don't spam logs with warnings about default instances + instance_name = instance.get('name', 'Unnamed') + if instance_name == 'Default': + # Use debug level for default instances to avoid log spam on new installations + readarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # Still log warnings for non-default instances + readarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # readarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log + # Fallback to legacy single-instance config + api_url = settings.get("api_url", "").strip() + api_key = settings.get("api_key", "").strip() + + # Ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + readarr_logger.warning(f"API URL missing http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + readarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + if api_url and api_key: + # Create a clean instance_data dict for the legacy instance + instance_data = { + "instance_name": "Default", + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + # readarr_logger.info(f"Added valid legacy instance: {instance_data}") # Removed verbose log + else: + readarr_logger.warning("No API URL or key found in legacy configuration") + + # Use debug level to avoid spamming logs, especially with 0 instances + readarr_logger.debug(f"Found {len(instances)} configured and enabled Readarr instances") + return instances + +__all__ = ["process_missing_books", "process_cutoff_upgrades", "get_configured_instances"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py new file mode 100644 index 0000000..6137893 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python3 +""" +Readarr-specific API functions +Handles all communication with the Readarr API +""" + +import requests +import json +import time +import datetime +from typing import List, Dict, Any, Optional, Union +# Correct the import path +from src.primary.utils.logger import get_logger +# Import load_settings +from src.primary.settings_manager import load_settings + +# Get app-specific logger +logger = get_logger("readarr") + +# Use a session for better performance +session = requests.Session() + +# Default API timeout in seconds - used as fallback only +API_TIMEOUT = 30 + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """Check the connection to Readarr API.""" + try: + # Ensure api_url is properly formatted + if not api_url: + logger.error("API URL is empty or not set") + return False + + # Make sure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return False + + # Ensure URL doesn't end with a slash before adding the endpoint + base_url = api_url.rstrip('/') + full_url = f"{base_url}/api/v1/system/status" + + response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + logger.info("Successfully connected to Readarr.") + return True + except requests.exceptions.RequestException as e: + logger.error(f"Error connecting to Readarr: {e}") + return False + except Exception as e: + logger.error(f"An unexpected error occurred during Readarr connection check: {e}") + return False + +def get_download_queue_size(api_url: str = None, api_key: str = None, timeout: int = 30) -> int: + """ + Get the current size of the download queue. + + Args: + api_url: Optional API URL (if not provided, will be fetched from settings) + api_key: Optional API key (if not provided, will be fetched from settings) + timeout: Timeout in seconds for the request + + Returns: + The number of items in the download queue, or 0 if the request failed + """ + try: + # If API URL and key are provided, use them directly + if api_url and api_key: + # Clean up API URL + api_url = api_url.rstrip('/') + url = f"{api_url}/api/v1/queue" + + # Headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + # Make the request + response = session.get(url, headers=headers, timeout=timeout) + response.raise_for_status() + + # Parse JSON response + data = response.json() + if "totalRecords" in data: + return data["totalRecords"] + return 0 + else: + # Use the arr_request function if API URL and key aren't provided + response = arr_request("queue") + if response and "totalRecords" in response: + return response["totalRecords"] + return 0 + except Exception as e: + logger.error(f"Error getting download queue size: {e}") + return 0 + +def arr_request(endpoint: str, method: str = "GET", data: Dict = None, app_type: str = "readarr", + api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> Any: + """ + Make a request to the Readarr API. + Now accepts optional api_url, api_key, and api_timeout. + + Args: + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data to send with the request + app_type: The app type (readarr by default) + api_url: Optional API URL (overrides loaded settings) + api_key: Optional API key (overrides loaded settings) + api_timeout: Optional API timeout (overrides loaded settings) + + Returns: + The JSON response from the API, or None if the request failed + """ + # Load settings only if credentials are not provided directly + if api_url is None or api_key is None or api_timeout is None: + settings = load_settings(app_type) + loaded_api_url = settings.get('api_url', '') + loaded_api_key = settings.get('api_key', '') + loaded_api_timeout = settings.get('api_timeout', 60) + + # Use provided args if available, otherwise use loaded settings + api_url = api_url if api_url is not None else loaded_api_url + api_key = api_key if api_key is not None else loaded_api_key + api_timeout = api_timeout if api_timeout is not None else loaded_api_timeout + + if not api_url or not api_key: + logger.error("API URL or API key is missing. Check your settings.") + return None + + # Ensure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return None + + # Determine the API version + api_base = "api/v1" # Readarr uses v1 + + # Full URL + url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}" + + # Headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + if method == "GET": + response = session.get(url, headers=headers, timeout=api_timeout) + elif method == "POST": + response = session.post(url, headers=headers, json=data, timeout=api_timeout) + elif method == "PUT": + response = session.put(url, headers=headers, json=data, timeout=api_timeout) + elif method == "DELETE": + response = session.delete(url, headers=headers, timeout=api_timeout) + else: + logger.error(f"Unsupported HTTP method: {method}") + return None + + # Check for errors + response.raise_for_status() + + # Parse JSON response + if response.text: + return response.json() + return {} + + except requests.exceptions.RequestException as e: + logger.error(f"API request failed: {e}") + return None + +def get_books_with_missing_files() -> List[Dict]: + """ + Get a list of books with missing files (not downloaded/available). + + Returns: + A list of book objects with missing files + """ + # First, get all books + books = arr_request("book") + if not books: + return [] + + # Filter for books with missing files + missing_books = [] + for book in books: + # Check if book is monitored and doesn't have a file + if book.get("monitored", False) and not book.get("bookFile", None): + missing_books.append(book) + + return missing_books + +def get_cutoff_unmet_books(api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> List[Dict]: + """ + Get a list of books that don't meet their quality profile cutoff. + Accepts optional API credentials. + + Args: + api_url: Optional API URL + api_key: Optional API key + api_timeout: Optional API timeout + + Returns: + A list of book objects that need quality upgrades + """ + # The cutoffUnmet endpoint in Readarr + params = "cutoffUnmet=true" + # Pass credentials to arr_request + books = arr_request(f"wanted/cutoff?{params}", api_url=api_url, api_key=api_key, api_timeout=api_timeout) + if not books or "records" not in books: + return [] + + return books.get("records", []) + +def get_wanted_missing_books(api_url: str, api_key: str, api_timeout: int, monitored_only: bool = True) -> List[Dict]: + """ + Get wanted/missing books from Readarr, handling pagination. + + Args: + api_url: The base URL of the Readarr API. + api_key: The API key for authentication. + api_timeout: Timeout for the API request. + monitored_only: If True, only return monitored books (Readarr API default seems to handle this). + + Returns: + A list of dictionaries, each representing a missing book, or an empty list on error. + """ + all_missing_books = [] + page = 1 + page_size = 100 # Adjust as needed, check Readarr API limits + endpoint = "wanted/missing" + + # Ensure api_url is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + logger.error(f"Invalid URL format: {api_url}") + return [] + base_url = api_url.rstrip('/') + url = f"{base_url}/api/v1/{endpoint.lstrip('/')}" + headers = {"X-Api-Key": api_key} + + while True: + params = { + 'page': page, + 'pageSize': page_size, + # Removed sorting parameters due to potential API issues + # 'sortKey': 'author.sortName', + # 'sortDirection': 'ascending', + # 'monitored': monitored_only # Note: Check if Readarr API supports this directly for wanted/missing + } + try: + response = requests.get(url, headers=headers, params=params, timeout=api_timeout) + response.raise_for_status() + data = response.json() + + if not data or 'records' not in data or not data['records']: + break # No more data or unexpected format + + all_missing_books.extend(data['records']) + + total_records = data.get('totalRecords', 0) + if len(all_missing_books) >= total_records: + break # We have fetched all records + + page += 1 + + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching missing books (page {page}) from {url}: {e}") + return [] # Return empty list on error + except json.JSONDecodeError: + logger.error(f"Error decoding JSON response from {url} (page {page}). Response: {response.text[:200]}") + return [] + except Exception as e: + logger.error(f"Unexpected error fetching missing books (page {page}): {e}", exc_info=True) + return [] + + logger.info(f"Successfully fetched {len(all_missing_books)} missing books from Readarr.") + return all_missing_books + +def refresh_author(author_id: int, api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> bool: + """ + Refresh an author in Readarr. + Accepts optional API credentials. + + Args: + author_id: The ID of the author to refresh + api_url: Optional API URL + api_key: Optional API key + api_timeout: Optional API timeout + + Returns: + True if the refresh was successful, False otherwise + """ + endpoint = f"command" + data = { + "name": "RefreshAuthor", + "authorId": author_id + } + + # Pass credentials to arr_request + response = arr_request(endpoint, method="POST", data=data, api_url=api_url, api_key=api_key, api_timeout=api_timeout) + if response: + logger.debug(f"Refreshed author ID {author_id}") + return True + return False + +def book_search(book_ids: List[int], api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> bool: + """ + Trigger a search for one or more books. + Accepts optional API credentials. + + Args: + book_ids: A list of book IDs to search for + api_url: Optional API URL + api_key: Optional API key + api_timeout: Optional API timeout + + Returns: + True if the search command was successful, False otherwise + """ + endpoint = "command" + data = { + "name": "BookSearch", + "bookIds": book_ids + } + + # Pass credentials to arr_request + response = arr_request(endpoint, method="POST", data=data, api_url=api_url, api_key=api_key, api_timeout=api_timeout) + # Return the response object (contains command ID) instead of just True/False + # The calling function expects the command object now. + return response + +def get_author_details(api_url: str, api_key: str, author_id: int, api_timeout: int = 120) -> Optional[Dict]: + """Fetches details for a specific author from the Readarr API.""" + endpoint = f"{api_url}/api/v1/author/{author_id}" + headers = {'X-Api-Key': api_key} + try: + response = requests.get(endpoint, headers=headers, timeout=api_timeout) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + author_data = response.json() + logger.debug(f"Successfully fetched details for author ID {author_id}.") + return author_data + except requests.exceptions.RequestException as e: + logger.error(f"Error fetching author details for ID {author_id} from {endpoint}: {e}") + return None + except Exception as e: + logger.error(f"An unexpected error occurred fetching author details for ID {author_id}: {e}") + return None + +def search_books(api_url: str, api_key: str, book_ids: List[int], api_timeout: int = 120) -> Optional[Dict]: + """Triggers a search for specific book IDs in Readarr.""" + endpoint = f"{api_url}/api/v1/command" # This uses the full URL, not arr_request + headers = {'X-Api-Key': api_key} + payload = { + 'name': 'BookSearch', + 'bookIds': book_ids + } + try: + # This uses requests.post directly, not arr_request. It's already correct. + response = requests.post(endpoint, headers=headers, json=payload, timeout=api_timeout) + response.raise_for_status() + command_data = response.json() + command_id = command_data.get('id') + logger.info(f"Successfully triggered BookSearch command for book IDs: {book_ids}. Command ID: {command_id}") + return command_data # Return the full command object which includes the ID + except requests.exceptions.RequestException as e: + logger.error(f"Error triggering BookSearch command for book IDs {book_ids} via {endpoint}: {e}") + return None + except Exception as e: + logger.error(f"An unexpected error occurred triggering BookSearch for book IDs {book_ids}: {e}") + return None \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py new file mode 100644 index 0000000..f927147 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 +""" +Missing Books Processing for Readarr +Handles searching for missing books in Readarr +""" + +import time +import random +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.readarr import api as readarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.state import check_state_reset + +# Get logger for the app +readarr_logger = get_logger("readarr") + +def process_missing_books( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process missing books in Readarr based on provided settings. + + Args: + app_settings: Dictionary containing all settings for Readarr + stop_check: A function that returns True if the process should stop + + Returns: + True if any books were processed, False otherwise. + """ + readarr_logger.info("Starting missing books processing cycle for Readarr.") + processed_any = False + + # Reset state files if enough time has passed + check_state_reset("readarr") + + # Get the settings for the instance + general_settings = readarr_api.load_settings('general') + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + instance_name = app_settings.get("instance_name", "Readarr Default") + + readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr") + + monitored_only = app_settings.get("monitored_only", True) + skip_future_releases = app_settings.get("skip_future_releases", True) + skip_author_refresh = app_settings.get("skip_author_refresh", False) + hunt_missing_books = app_settings.get("hunt_missing_books", 0) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # Get missing books + readarr_logger.info("Retrieving wanted/missing books...") + readarr_logger.info("Retrieving wanted/missing books...") + + # Call the correct function to get missing books + missing_books_data = readarr_api.get_wanted_missing_books(api_url, api_key, api_timeout, monitored_only) + + if missing_books_data is None: # Check if None was returned due to an API error + readarr_logger.error(f"Failed to retrieve missing books data. Skipping processing.") + return False + + readarr_logger.info(f"Found {len(missing_books_data)} missing books.") + + # Group by author ID (optional) + books_by_author = {} + for book in missing_books_data: + author_id = book.get("authorId") + if author_id: + if author_id not in books_by_author: + books_by_author[author_id] = [] + books_by_author[author_id].append(book) + + author_ids = list(books_by_author.keys()) + + # Filter out already processed authors using stateful management + unprocessed_authors = [] + for author_id in author_ids: + if not is_processed("readarr", instance_name, str(author_id)): + unprocessed_authors.append(author_id) + else: + readarr_logger.debug(f"Skipping already processed author ID: {author_id}") + + readarr_logger.info(f"Found {len(unprocessed_authors)} unprocessed authors out of {len(author_ids)} total authors with missing books.") + + if not unprocessed_authors: + readarr_logger.info(f"No unprocessed authors found for {instance_name}. All available authors have been processed.") + return False + + # Always randomly select authors/books to process + readarr_logger.info(f"Randomly selecting up to {hunt_missing_books} authors with missing books.") + authors_to_process = random.sample(unprocessed_authors, min(hunt_missing_books, len(unprocessed_authors))) + + readarr_logger.info(f"Selected {len(authors_to_process)} authors to search for missing books.") + processed_count = 0 + processed_something = False + processed_authors = [] # Track author names processed + + for author_id in authors_to_process: + if stop_check(): + readarr_logger.info("Stop signal received, aborting Readarr missing cycle.") + break + + author_info = readarr_api.get_author_details(api_url, api_key, author_id, api_timeout) # Assuming this exists + author_name = author_info.get("authorName", f"Author ID {author_id}") if author_info else f"Author ID {author_id}" + + readarr_logger.info(f"Processing missing books for author: \"{author_name}\" (Author ID: {author_id})") + + # Refresh author (optional) + if not skip_author_refresh: + readarr_logger.info(f" - Refreshing author info...") + refresh_result = readarr_api.refresh_author(author_id, api_url, api_key, api_timeout) + time.sleep(5) # Basic wait + if not refresh_result: + readarr_logger.warning(f" - Failed to trigger author refresh. Continuing search anyway.") + else: + readarr_logger.info(f" - Skipping author refresh (skip_author_refresh=true)") + + # Search for missing books associated with the author + readarr_logger.info(f" - Searching for missing books...") + book_ids_for_author = [book['id'] for book in books_by_author[author_id]] # 'id' is bookId + + # Create detailed log with book titles + book_details = [] + for book in books_by_author[author_id]: + book_title = book.get('title', f"Book ID {book['id']}") + book_details.append(f"'{book_title}' (ID: {book['id']})") + + # Construct detailed log message + details_string = ', '.join(book_details) + log_message = f"Triggering Book Search for {len(book_details)} books by author '{author_name}': [{details_string}]" + readarr_logger.debug(log_message) # Changed level from INFO to DEBUG + + # Mark author as processed BEFORE triggering any searches + add_processed_id("readarr", instance_name, str(author_id)) + readarr_logger.debug(f"Added author ID {author_id} to processed list for {instance_name}") + + # Now trigger the search + search_command_result = readarr_api.search_books(api_url, api_key, book_ids_for_author, api_timeout) + + if search_command_result: + # Extract command ID if the result is a dictionary, otherwise use the result directly + command_id = search_command_result.get('id') if isinstance(search_command_result, dict) else search_command_result + readarr_logger.info(f"Triggered book search command {command_id} for author {author_name}. Assuming success for now.") # Log only command ID + increment_stat("readarr", "hunted") + + # Log to history system + log_processed_media("readarr", author_name, author_id, instance_name, "missing") + readarr_logger.debug(f"Logged history entry for author: {author_name}") + + processed_count += 1 # Count processed authors/groups + processed_authors.append(author_name) # Add to list of processed authors + processed_something = True + readarr_logger.info(f"Processed {processed_count}/{len(authors_to_process)} authors/groups for missing books this cycle.") + else: + readarr_logger.error(f"Failed to trigger search for author {author_name}.") + + if processed_count >= hunt_missing_books: + readarr_logger.info(f"Reached target of {hunt_missing_books} authors/groups processed for this cycle.") + break + + if processed_authors: + authors_list = '", "'.join(processed_authors) + readarr_logger.info(f'Completed processing {processed_count} authors/groups for missing books this cycle: "{authors_list}"') + else: + readarr_logger.info(f"Completed processing {processed_count} authors/groups for missing books this cycle.") + + return processed_something \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py new file mode 100644 index 0000000..3a4552e --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +""" +Quality Upgrade Processing for Readarr +Handles searching for books that need quality upgrades in Readarr +""" + +import time +import random +import datetime # Import the datetime module +from typing import List, Dict, Any, Set, Callable, Union, Optional +from src.primary.utils.logger import get_logger +from src.primary.apps.readarr import api as readarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.state import check_state_reset +from src.primary.settings_manager import load_settings # Import load_settings function + +# Get logger for the app +readarr_logger = get_logger("readarr") + +def process_cutoff_upgrades( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process quality cutoff upgrades for Readarr based on settings. + + Args: + app_settings: Dictionary containing all settings for Readarr + stop_check: A function that returns True if the process should stop + + Returns: + True if any books were processed for upgrades, False otherwise. + """ + readarr_logger.info("Starting quality cutoff upgrades processing cycle for Readarr.") + + # Reset state files if enough time has passed + check_state_reset("readarr") + + processed_any = False + + # Load general settings to get centralized timeout + general_settings = load_settings('general') + + # Get the API credentials for this instance + api_url = app_settings.get('api_url', '') + api_key = app_settings.get('api_key', '') + + # Use the centralized timeout from general settings with app-specific as fallback + api_timeout = general_settings.get("api_timeout", app_settings.get("api_timeout", 90)) # Use centralized timeout + + readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr") + + # Extract necessary settings + instance_name = app_settings.get("instance_name", "Readarr Default") + monitored_only = app_settings.get("monitored_only", True) + skip_author_refresh = app_settings.get("skip_author_refresh", False) + hunt_upgrade_books = app_settings.get("hunt_upgrade_books", 0) + command_wait_delay = app_settings.get("command_wait_delay", 5) + command_wait_attempts = app_settings.get("command_wait_attempts", 12) + + # Get books eligible for upgrade + readarr_logger.info("Retrieving books eligible for quality upgrade...") + # Pass API credentials explicitly + upgrade_eligible_data = readarr_api.get_cutoff_unmet_books(api_url=api_url, api_key=api_key, api_timeout=api_timeout) + + if upgrade_eligible_data is None: # Check if the API call failed (assuming it returns None on error) + readarr_logger.error("Error retrieving books eligible for upgrade from Readarr API.") + return False + elif not upgrade_eligible_data: # Check if the list is empty + readarr_logger.info("No books found eligible for upgrade.") + return False + + readarr_logger.info(f"Found {len(upgrade_eligible_data)} books eligible for quality upgrade.") + + # Filter out future releases if configured + skip_future_releases = app_settings.get("skip_future_releases", True) + if skip_future_releases: + now = datetime.datetime.now(datetime.timezone.utc) + original_count = len(upgrade_eligible_data) + filtered_books = [] + for book in upgrade_eligible_data: + release_date_str = book.get('releaseDate') + if release_date_str: + try: + # Try to parse ISO format first (with time component) + try: + # Handle ISO format date strings like '2023-10-17T04:00:00Z' + # fromisoformat doesn't handle 'Z' timezone, so we replace it + release_date_str_fixed = release_date_str.replace('Z', '+00:00') + release_date = datetime.datetime.fromisoformat(release_date_str_fixed) + except ValueError: + # Fall back to simple YYYY-MM-DD format + release_date = datetime.datetime.strptime(release_date_str, '%Y-%m-%d') + # Add UTC timezone for consistent comparison + release_date = release_date.replace(tzinfo=datetime.timezone.utc) + + if release_date <= now: + filtered_books.append(book) + else: + readarr_logger.debug(f"Skipping future book ID {book.get('id')} with release date {release_date_str}") + except ValueError: + readarr_logger.warning(f"Could not parse release date '{release_date_str}' for book ID {book.get('id')}. Including anyway.") + filtered_books.append(book) + else: + filtered_books.append(book) # Include books without a release date + + upgrade_eligible_data = filtered_books + skipped_count = original_count - len(upgrade_eligible_data) + if skipped_count > 0: + readarr_logger.info(f"Skipped {skipped_count} future books based on release date for upgrades.") + + if not upgrade_eligible_data: + readarr_logger.info("No upgradeable books found to process (after potential filtering). Skipping.") + return False + + # Filter out already processed books using stateful management + unprocessed_books = [] + for book in upgrade_eligible_data: + book_id = str(book.get("id")) + if not is_processed("readarr", instance_name, book_id): + unprocessed_books.append(book) + else: + readarr_logger.debug(f"Skipping already processed book ID: {book_id}") + + readarr_logger.info(f"Found {len(unprocessed_books)} unprocessed books out of {len(upgrade_eligible_data)} total books eligible for upgrade.") + + if not unprocessed_books: + readarr_logger.info(f"No unprocessed books found for {instance_name}. All available books have been processed.") + return False + + # Always randomly select books to process + readarr_logger.info(f"Randomly selecting up to {hunt_upgrade_books} books for upgrade search.") + books_to_process = random.sample(unprocessed_books, min(hunt_upgrade_books, len(unprocessed_books))) + + readarr_logger.info(f"Selected {len(books_to_process)} books to search for upgrades.") + processed_count = 0 + processed_something = False + + book_ids_to_search = [book.get("id") for book in books_to_process] + + # Mark books as processed BEFORE triggering any searches + for book_id in book_ids_to_search: + add_processed_id("readarr", instance_name, str(book_id)) + readarr_logger.debug(f"Added book ID {book_id} to processed list for {instance_name}") + + # Now trigger the search + search_command_result = readarr_api.search_books(api_url, api_key, book_ids_to_search, api_timeout) + + if search_command_result: + command_id = search_command_result + readarr_logger.info(f"Triggered upgrade search command {command_id} for {len(book_ids_to_search)} books.") + increment_stat("readarr", "upgraded") + + # Log to history system for each book + for book in books_to_process: + author_name = book.get("authorName") + book_title = book.get("title") + media_name = f"{author_name} - {book_title}" + log_processed_media("readarr", media_name, book.get("id"), instance_name, "upgrade") + readarr_logger.debug(f"Logged quality upgrade to history for book ID {book.get('id')}") + + processed_count += len(book_ids_to_search) + processed_something = True + readarr_logger.info(f"Processed {processed_count} book upgrades this cycle.") + else: + readarr_logger.error(f"Failed to trigger search for book upgrades.") + + readarr_logger.info(f"Completed processing {processed_count} books for upgrade this cycle.") + + return processed_something \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py new file mode 100644 index 0000000..d729aae --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger +import traceback +import socket +from urllib.parse import urlparse + +readarr_bp = Blueprint('readarr', __name__) +readarr_logger = get_logger("readarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("readarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("readarr", "processed_upgrades") + +@readarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Readarr API instance""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + readarr_logger.info(f"Testing connection to Readarr API at {api_url}") + + # Validate URL format + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # For Readarr, use api/v1 + url = f"{api_url}/api/v1/system/status" + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + # First check if the host is reachable at all + parsed_url = urlparse(api_url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + # Try to establish a socket connection first to provide a better error message for connection issues + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except Exception as e: + # Log the socket testing error but continue with the full request + readarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # Now proceed with the actual API request + response = requests.get(url, headers=headers, timeout=10) + + # For HTTP errors, provide more specific feedback + if response.status_code == 401: + error_msg = "Authentication failed: Invalid API key" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 401 + elif response.status_code == 403: + error_msg = "Access forbidden: Check API key permissions" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 403 + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Readarr server. Check your URL." + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + elif response.status_code >= 500: + error_msg = f"Readarr server error (HTTP {response.status_code}): The Readarr server is experiencing issues" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + + # Raise for other HTTP errors + response.raise_for_status() + + try: + response_data = response.json() + version = response_data.get('version', 'unknown') + readarr_logger.info(f"Successfully connected to Readarr API version: {version}") + + return jsonify({ + "success": True, + "message": "Successfully connected to Readarr API", + "version": version + }) + except ValueError: + error_msg = "Invalid JSON response from Readarr API - This doesn't appear to be a valid Readarr server" + readarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.ConnectionError as e: + # Handle different types of connection errors + error_details = str(e) + if "Connection refused" in error_details: + error_msg = f"Connection refused - Readarr is not running on {api_url} or the port is incorrect" + elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details: + error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL." + else: + error_msg = f"Connection error - Check if Readarr is running: {error_details}" + + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except requests.exceptions.Timeout: + error_msg = f"Connection timed out - Readarr took too long to respond" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 504 + except requests.exceptions.RequestException as e: + error_msg = f"Connection test failed: {str(e)}" + readarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr.py new file mode 100644 index 0000000..18a3f28 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path +from src.primary.settings_manager import load_settings +import logging +from src.primary.utils.logger import get_logger + +sonarr_bp = Blueprint('sonarr', __name__) +sonarr_logger = get_logger("sonarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("sonarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("sonarr", "processed_upgrades") + +@sonarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Sonarr API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + sonarr_logger.info(f"Testing connection to Sonarr API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Create the test URL and set headers + test_url = f"{api_url.rstrip('/')}/api/v3/system/status" + headers = {'X-Api-Key': api_key} + + try: + # Use a connection timeout separate from read timeout + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # Log HTTP status code for diagnostic purposes + sonarr_logger.debug(f"Sonarr API status code: {response.status_code}") + + # Check HTTP status code + response.raise_for_status() + + # Ensure the response is valid JSON + try: + response_data = response.json() + + # Save keys if connection is successful - Not saving here anymore since we use instances + # keys_manager.save_api_keys("sonarr", api_url, api_key) + + sonarr_logger.info(f"Successfully connected to Sonarr API version: {response_data.get('version', 'unknown')}") + + # Return success with some useful information + return jsonify({ + "success": True, + "message": "Successfully connected to Sonarr API", + "version": response_data.get('version', 'unknown') + }) + except ValueError: + error_msg = "Invalid JSON response from Sonarr API" + sonarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.Timeout as e: + error_msg = f"Connection timed out after {api_timeout} seconds" + sonarr_logger.error(f"{error_msg}: {str(e)}") + return jsonify({"success": False, "message": error_msg}), 504 + + except requests.exceptions.ConnectionError as e: + error_msg = "Connection error - check hostname and port" + details = str(e) + # Check for common DNS resolution errors + if "Name or service not known" in details or "getaddrinfo failed" in details: + error_msg = "DNS resolution failed - check hostname" + # Check for common connection refused errors + elif "Connection refused" in details: + error_msg = "Connection refused - check if Sonarr is running and the port is correct" + + sonarr_logger.error(f"{error_msg}: {details}") + return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502 + + except requests.exceptions.RequestException as e: + error_message = f"Connection failed: {str(e)}" + + if hasattr(e, 'response') and e.response is not None: + status_code = e.response.status_code + + # Add specific messages based on common status codes + if status_code == 401: + error_message = "Authentication failed: Invalid API key" + elif status_code == 403: + error_message = "Access forbidden: Check API key permissions" + elif status_code == 404: + error_message = "API endpoint not found: Check API URL" + elif status_code >= 500: + error_message = f"Sonarr server error (HTTP {status_code}): The Sonarr server is experiencing issues" + + # Try to extract more error details if available + try: + error_details = e.response.json() + error_message += f" - {error_details.get('message', 'No details')}" + except ValueError: + if e.response.text: + error_message += f" - Response: {e.response.text[:200]}" + + sonarr_logger.error(error_message) + return jsonify({"success": False, "message": error_message}), 500 + + except Exception as e: + error_msg = f"An unexpected error occurred: {str(e)}" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 + +# --- Configuration and State --- # + +# --- Multi-Instance Support --- # + +# get_configured_instances function has been moved to src/primary/apps/sonarr/__init__.py + +# --- Reset State --- # + +# Function to check if Sonarr is configured +def is_configured(): + """Check if Sonarr API credentials are configured by checking if at least one instance is enabled""" + settings = load_settings("sonarr") + + if not settings: + sonarr_logger.debug("No settings found for Sonarr") + return False + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + sonarr_logger.debug(f"Found configured Sonarr instance: {instance.get('name', 'Unnamed')}") + return True + + sonarr_logger.debug("No enabled Sonarr instances found with valid API URL and key") + return False + + # Fallback to legacy single-instance config + api_url = settings.get("api_url") + api_key = settings.get("api_key") + return bool(api_url and api_key) diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py new file mode 100644 index 0000000..6c6e3c7 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py @@ -0,0 +1,94 @@ +""" +Sonarr module initialization +""" + +# Use src.primary imports +from src.primary.apps.sonarr.missing import process_missing_episodes +from src.primary.apps.sonarr.upgrade import process_cutoff_upgrades +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +# Define logger for this module +sonarr_logger = get_logger("sonarr") + +def get_configured_instances(): + """Get all configured and enabled Sonarr instances""" + settings = load_settings("sonarr") + instances = [] + # sonarr_logger.info(f"Loaded Sonarr settings for instance check: {settings}") # Removed verbose log + + if not settings: + sonarr_logger.debug("No settings found for Sonarr") + return instances + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + # sonarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log + for idx, instance in enumerate(settings["instances"]): + sonarr_logger.debug(f"Checking instance #{idx}: {instance}") + # Enhanced validation + api_url = instance.get("api_url", "").strip() + api_key = instance.get("api_key", "").strip() + + # Enhanced URL validation - ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + sonarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + sonarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + is_enabled = instance.get("enabled", True) + + # Only include properly configured instances + if is_enabled and api_url and api_key: + # Get the exact instance name as configured in the UI + instance_name = instance.get("name", "Default") + sonarr_logger.info(f"Using configured instance name: '{instance_name}' for Sonarr instance") + + # Return only essential instance details + instance_data = { + "instance_name": instance_name, + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + # sonarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log + elif not is_enabled: + sonarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}") + else: + # For brand new installations, don't spam logs with warnings about default instances + instance_name = instance.get('name', 'Unnamed') + if instance_name == 'Default': + # Use debug level for default instances to avoid log spam on new installations + sonarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # Still log warnings for non-default instances + sonarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})") + else: + # sonarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log + # Fallback to legacy single-instance config + api_url = settings.get("api_url", "").strip() + api_key = settings.get("api_key", "").strip() + + # Ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + sonarr_logger.warning(f"API URL missing http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + sonarr_logger.warning(f"Auto-correcting URL to: {api_url}") + + if api_url and api_key: + # Create a clean instance_data dict for the legacy instance + instance_data = { + "instance_name": "Default", + "api_url": api_url, + "api_key": api_key, + } + instances.append(instance_data) + sonarr_logger.info(f"Using legacy configuration with instance name: 'Default'") + else: + sonarr_logger.warning("No API URL or key found in legacy configuration") + + # Use debug level to avoid spamming logs, especially with 0 instances + sonarr_logger.debug(f"Found {len(instances)} configured and enabled Sonarr instances") + return instances + +__all__ = ["process_missing_episodes", "process_cutoff_upgrades", "get_configured_instances"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py new file mode 100644 index 0000000..07472fd --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py @@ -0,0 +1,993 @@ +#!/usr/bin/env python3 +""" +Sonarr-specific API functions +Handles all communication with the Sonarr API +""" + +import requests +import json +import sys +import time +import datetime +import traceback +from typing import List, Dict, Any, Optional, Union, Callable +# Correct the import path +from src.primary.utils.logger import get_logger + +# Get logger for the Sonarr app +sonarr_logger = get_logger("sonarr") + +# Use a session for better performance +session = requests.Session() + +def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any: + """ + Make a request to the Sonarr API. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data payload for POST/PUT requests + + Returns: + The parsed JSON response or None if the request failed + """ + try: + if not api_url or not api_key: + sonarr_logger.error("No URL or API key provided") + return None + + # Ensure api_url has a scheme + if not (api_url.startswith('http://') or api_url.startswith('https://')): + sonarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://") + return None + + # Construct the full URL properly + full_url = f"{api_url.rstrip('/')}/api/v3/{endpoint.lstrip('/')}" + + sonarr_logger.debug(f"Making {method} request to: {full_url}") + + # Set up headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + if method.upper() == "GET": + response = session.get(full_url, headers=headers, timeout=api_timeout) + elif method.upper() == "POST": + response = session.post(full_url, headers=headers, json=data, timeout=api_timeout) + elif method.upper() == "PUT": + response = session.put(full_url, headers=headers, json=data, timeout=api_timeout) + elif method.upper() == "DELETE": + response = session.delete(full_url, headers=headers, timeout=api_timeout) + else: + sonarr_logger.error(f"Unsupported HTTP method: {method}") + return None + + # Check for successful response + response.raise_for_status() + + # Check if there's any content before trying to parse JSON + if response.content: + try: + return response.json() + except json.JSONDecodeError as jde: + # Log detailed information about the malformed response + sonarr_logger.error(f"Error decoding JSON response from {endpoint}: {str(jde)}") + sonarr_logger.error(f"Response status code: {response.status_code}") + sonarr_logger.error(f"Response content (first 200 chars): {response.content[:200]}") + return None + else: + sonarr_logger.debug(f"Empty response content from {endpoint}, returning empty dict") + return {} + + except requests.exceptions.RequestException as e: + # Add detailed error logging + error_details = str(e) + if hasattr(e, 'response') and e.response is not None: + error_details += f", Status Code: {e.response.status_code}" + if e.response.content: + error_details += f", Content: {e.response.content[:200]}" + + sonarr_logger.error(f"Error during {method} request to {endpoint}: {error_details}") + return None + except Exception as e: + # Catch all exceptions and log them with traceback + error_msg = f"CRITICAL ERROR in arr_request: {str(e)}" + sonarr_logger.error(error_msg) + sonarr_logger.error(f"Full traceback: {traceback.format_exc()}") + print(error_msg, file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + return None + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """Checks connection by fetching system status.""" + if not api_url: + sonarr_logger.error("API URL is empty or not set") + return False + if not api_key: + sonarr_logger.error("API Key is empty or not set") + return False + + try: + # Use a shorter timeout for a quick connection check + quick_timeout = min(api_timeout, 15) + status = get_system_status(api_url, api_key, quick_timeout) + if status and isinstance(status, dict) and 'version' in status: + # Log success only if debug is enabled to avoid clutter + sonarr_logger.debug(f"Connection check successful for {api_url}. Version: {status.get('version')}") + return True + else: + # Log details if the status response was unexpected + sonarr_logger.warning(f"Connection check for {api_url} returned unexpected status: {str(status)[:200]}") + return False + except Exception as e: + # Error should have been logged by arr_request, just indicate failure + sonarr_logger.error(f"Connection check failed for {api_url}") + return False + +def get_system_status(api_url: str, api_key: str, api_timeout: int) -> Dict: + """ + Get Sonarr system status. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + System status information or empty dict if request failed + """ + response = arr_request(api_url, api_key, api_timeout, "system/status") + if response: + return response + return {} + +def get_series(api_url: str, api_key: str, api_timeout: int, series_id: Optional[int] = None) -> Union[List, Dict, None]: + """ + Get series information from Sonarr. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + series_id: Optional series ID to get a specific series + + Returns: + List of all series, a specific series, or None if request failed + """ + if series_id: + endpoint = f"series/{series_id}" + else: + endpoint = "series" + + return arr_request(api_url, api_key, api_timeout, endpoint) + +def get_episode(api_url: str, api_key: str, api_timeout: int, episode_id: int) -> Dict: + """ + Get episode information by ID. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + episode_id: The episode ID + + Returns: + Episode information or empty dict if request failed + """ + response = arr_request(api_url, api_key, api_timeout, f"episode/{episode_id}") + if response: + return response + return {} + +def get_queue(api_url: str, api_key: str, api_timeout: int) -> List: + """ + Get the current queue from Sonarr. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + Queue information or empty list if request failed + """ + response = arr_request(api_url, api_key, api_timeout, "queue") + if not response or "records" not in response: + return [] + + return response.get("records", []) + +def get_calendar(api_url: str, api_key: str, api_timeout: int, start_date: Optional[str] = None, end_date: Optional[str] = None) -> List: + """ + Get calendar information for a date range. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + start_date: Optional start date (ISO format) + end_date: Optional end date (ISO format) + + Returns: + Calendar information or empty list if request failed + """ + params = [] + + if start_date: + params.append(f"start={start_date}") + + if end_date: + params.append(f"end={end_date}") + + endpoint = "calendar" + if params: + endpoint = f"{endpoint}?{'&'.join(params)}" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + if response: + return response + return [] + +def command_status(api_url: str, api_key: str, api_timeout: int, command_id: Union[int, str]) -> Dict: + """ + Get the status of a command by ID. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + command_id: The command ID + + Returns: + Command status information or empty dict if request failed + """ + response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}") + if response: + return response + return {} + +def get_missing_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, series_id: Optional[int] = None) -> List[Dict[str, Any]]: + """Get missing episodes from Sonarr, handling pagination.""" + endpoint = "wanted/missing" + page = 1 + page_size = 1000 # Adjust page size if needed, but 1000 is usually good + all_missing_episodes = [] + retries_per_page = 2 + retry_delay = 3 + + while True: + retry_count = 0 + success = False + + while retry_count <= retries_per_page and not success: + # Parameters for the request + params = { + "page": page, + "pageSize": page_size, + "includeSeries": "true" + } + + # Add series ID filter if provided + if series_id is not None: + params["seriesId"] = series_id + + # Ensure proper URL construction with scheme + base_url = api_url.rstrip('/') + url = f"{base_url}/api/v3/{endpoint.lstrip('/')}" + sonarr_logger.debug(f"Requesting missing episodes page {page} (attempt {retry_count+1}/{retries_per_page+1})") + + try: + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + response.raise_for_status() # Check for HTTP errors (4xx or 5xx) + + if not response.content: + sonarr_logger.warning(f"Empty response for missing episodes page {page} (attempt {retry_count+1})") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up on empty response after {retries_per_page+1} attempts") + break # Exit the retry loop, continuing to next page or ending + + try: + data = response.json() + records = data.get('records', []) + total_records_on_page = len(records) + sonarr_logger.debug(f"Parsed {total_records_on_page} missing episode records from page {page}") + + if not records: # No more records found + sonarr_logger.debug(f"No more records found on page {page}. Stopping pagination.") + success = True # Mark as successful even though no records (might be legitimate) + break # Exit retry loop, then also exit pagination loop + + all_missing_episodes.extend(records) + + # Check if this was the last page + if total_records_on_page < page_size: + sonarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Last page.") + success = True + break # Exit retry loop, then also exit pagination loop + + # We got records and need to continue - mark success for this page + success = True + break # Exit retry loop, continue to next page + + except json.JSONDecodeError as e: + sonarr_logger.error(f"Failed to decode JSON response for missing episodes page {page} (attempt {retry_count+1}): {e}") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up after {retries_per_page+1} failed JSON decode attempts") + break # Exit retry loop, moving to next page or ending + + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Request error for missing episodes page {page} (attempt {retry_count+1}): {e}") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up on request after {retries_per_page+1} failed attempts") + break # Exit retry loop + except Exception as e: + sonarr_logger.error(f"Unexpected error for missing episodes page {page} (attempt {retry_count+1}): {e}") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up after unexpected error and {retries_per_page+1} attempts") + break # Exit retry loop + + # If we didn't succeed after all retries or there are no more records, stop pagination + if not success or not records: + break + + # Prepare for the next page + page += 1 + + sonarr_logger.info(f"Total missing episodes fetched across all pages: {len(all_missing_episodes)}") + + # Apply monitored filter after fetching all pages + if monitored_only: + original_count = len(all_missing_episodes) + filtered_missing = [ + ep for ep in all_missing_episodes + if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False) + ] + sonarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_missing)} monitored episodes (out of {original_count} total)") + return filtered_missing + else: + sonarr_logger.debug(f"Returning {len(all_missing_episodes)} episodes (monitored_only=False)") + return all_missing_episodes + +def get_cutoff_unmet_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """Get cutoff unmet episodes from Sonarr, handling pagination.""" + endpoint = "wanted/cutoff" + page = 1 + page_size = 1000 # Sonarr's max page size for this endpoint + all_cutoff_unmet = [] + retries_per_page = 2 + retry_delay = 3 + + sonarr_logger.debug(f"Starting fetch for cutoff unmet episodes (monitored_only={monitored_only}).") + + while True: + retry_count = 0 + success = False + records = [] + + while retry_count <= retries_per_page and not success: + # Parameters for the request + params = { + "page": page, + "pageSize": page_size, + "includeSeries": "true", # Include series info for filtering + "sortKey": "airDateUtc", + "sortDir": "asc" + } + url = f"{api_url}/api/v3/{endpoint}" + sonarr_logger.debug(f"Requesting cutoff unmet page {page} (attempt {retry_count+1}/{retries_per_page+1})") + + try: + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + sonarr_logger.debug(f"Sonarr API response status code for cutoff unmet page {page}: {response.status_code}") + response.raise_for_status() # Check for HTTP errors + + if not response.content: + sonarr_logger.warning(f"Empty response for cutoff unmet episodes page {page} (attempt {retry_count+1})") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up on empty response after {retries_per_page+1} attempts") + break + + try: + data = response.json() + records = data.get('records', []) + total_records_on_page = len(records) + total_records_reported = data.get('totalRecords', 0) + + if page == 1: + sonarr_logger.info(f"Sonarr API reports {total_records_reported} total cutoff unmet records.") + + sonarr_logger.debug(f"Parsed {total_records_on_page} cutoff unmet records from page {page}") + + if not records: # No more records found + sonarr_logger.debug(f"No more cutoff unmet records found on page {page}. Stopping pagination.") + success = True + break + + all_cutoff_unmet.extend(records) + + # Check if this was the last page + if total_records_on_page < page_size: + sonarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Last page.") + success = True + break + + # Success for this page + success = True + break + + except json.JSONDecodeError as e: + sonarr_logger.error(f"Failed to decode JSON for cutoff unmet page {page} (attempt {retry_count+1}): {e}") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up after {retries_per_page+1} failed JSON decode attempts") + break + + except requests.exceptions.Timeout as e: + sonarr_logger.error(f"Timeout for cutoff unmet page {page} (attempt {retry_count+1}): {e}") + if retry_count < retries_per_page: + retry_count += 1 + # Use a slightly longer retry delay for timeouts + time.sleep(retry_delay * 2) + continue + else: + sonarr_logger.error(f"Giving up after {retries_per_page+1} timeout failures") + break + + except requests.exceptions.RequestException as e: + error_details = f"Error: {e}" + if hasattr(e, 'response') and e.response is not None: + error_details += f", Status Code: {e.response.status_code}" + if hasattr(e.response, 'text') and e.response.text: + error_details += f", Response: {e.response.text[:500]}" + + sonarr_logger.error(f"Request error for cutoff unmet page {page} (attempt {retry_count+1}): {error_details}") + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up on request after {retries_per_page+1} failed attempts") + break + + except Exception as e: + sonarr_logger.error(f"Unexpected error for cutoff unmet page {page} (attempt {retry_count+1}): {e}", exc_info=True) + if retry_count < retries_per_page: + retry_count += 1 + time.sleep(retry_delay) + continue + else: + sonarr_logger.error(f"Giving up after unexpected error and {retries_per_page+1} attempts") + break + + # If we didn't succeed after all retries or there are no more records, stop pagination + if not success or not records: + break + + # Prepare for the next page + page += 1 + + sonarr_logger.info(f"Total cutoff unmet episodes fetched across all pages: {len(all_cutoff_unmet)}") + + # Apply monitored filter after fetching all pages + if monitored_only: + original_count = len(all_cutoff_unmet) + # Ensure series and episode are monitored + filtered_cutoff_unmet = [ + ep for ep in all_cutoff_unmet + if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False) + ] + sonarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_cutoff_unmet)} monitored cutoff unmet episodes remain (out of {original_count} total).") + return filtered_cutoff_unmet + else: + sonarr_logger.debug(f"Returning {len(all_cutoff_unmet)} cutoff unmet episodes (monitored_only=False).") + return all_cutoff_unmet + +def get_cutoff_unmet_episodes_random_page(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, count: int) -> List[Dict[str, Any]]: + """ + Get a specified number of random cutoff unmet episodes by selecting a random page. + This is much more efficient for very large libraries. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: Whether to include only monitored episodes + count: How many episodes to return + + Returns: + A list of randomly selected cutoff unmet episodes + """ + endpoint = "wanted/cutoff" + page_size = 100 # Smaller page size to make the initial query faster + + # First, make a request to get just the total record count (page 1 with size=1) + params = { + "page": 1, + "pageSize": 1, + "includeSeries": "true" # Include series info for filtering + } + url = f"{api_url}/api/v3/{endpoint}" + + try: + # Get total record count from a minimal query + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + response.raise_for_status() + data = response.json() + total_records = data.get('totalRecords', 0) + + if total_records == 0: + sonarr_logger.info("No cutoff unmet episodes found in Sonarr.") + return [] + + # Calculate total pages with our desired page size + total_pages = (total_records + page_size - 1) // page_size + sonarr_logger.info(f"Found {total_records} total cutoff unmet episodes across {total_pages} pages") + + if total_pages == 0: + return [] + + # Select a random page + import random + random_page = random.randint(1, total_pages) + sonarr_logger.info(f"Selected random page {random_page} of {total_pages} for quality upgrade selection") + + # Get episodes from the random page + params = { + "page": random_page, + "pageSize": page_size, + "includeSeries": "true" + } + + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + response.raise_for_status() + + data = response.json() + records = data.get('records', []) + sonarr_logger.info(f"Retrieved {len(records)} episodes from page {random_page}") + + # Apply monitored filter if requested + if monitored_only: + filtered_records = [ + ep for ep in records + if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False) + ] + sonarr_logger.debug(f"Filtered to {len(filtered_records)} monitored episodes") + records = filtered_records + + # Select random episodes from this page + if len(records) > count: + selected_records = random.sample(records, count) + sonarr_logger.debug(f"Randomly selected {len(selected_records)} episodes from page {random_page}") + return selected_records + else: + # If we have fewer episodes than requested, return all of them + sonarr_logger.debug(f"Returning all {len(records)} episodes from page {random_page} (fewer than requested {count})") + return records + + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error getting random cutoff unmet episodes from Sonarr: {str(e)}") + return [] + except json.JSONDecodeError as e: + sonarr_logger.error(f"Failed to decode JSON response for random cutoff selection: {str(e)}") + return [] + except Exception as e: + sonarr_logger.error(f"Unexpected error in random cutoff selection: {str(e)}", exc_info=True) + return [] + +def get_missing_episodes_random_page(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, count: int, series_id: Optional[int] = None) -> List[Dict[str, Any]]: + """ + Get a specified number of random missing episodes by selecting a random page. + This is more efficient for very large libraries. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: Whether to include only monitored episodes + count: How many episodes to return + series_id: Optional series ID to filter results for a specific series + + Returns: + A list of randomly selected missing episodes, up to the requested count + """ + endpoint = "wanted/missing" + page_size = 100 # Smaller page size for better performance + retries = 2 + retry_delay = 3 + + # First, make a request to get just the total record count (page 1 with size=1) + params = { + "page": 1, + "pageSize": 1, + "includeSeries": "true" # Include series info for filtering + } + url = f"{api_url}/api/v3/{endpoint}" + + for attempt in range(retries + 1): + try: + # Get total record count from a minimal query + sonarr_logger.debug(f"Getting missing episodes count (attempt {attempt+1}/{retries+1})") + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + response.raise_for_status() + + if not response.content: + sonarr_logger.warning(f"Empty response when getting missing count (attempt {attempt+1})") + if attempt < retries: + time.sleep(retry_delay) + continue + return [] + + try: + data = response.json() + total_records = data.get('totalRecords', 0) + + if total_records == 0: + sonarr_logger.info("No missing episodes found in Sonarr.") + return [] + + # Calculate total pages with our desired page size + total_pages = (total_records + page_size - 1) // page_size + sonarr_logger.info(f"Found {total_records} total missing episodes across {total_pages} pages") + + if total_pages == 0: + return [] + + # Select a random page + import random + random_page = random.randint(1, total_pages) + sonarr_logger.info(f"Selected random page {random_page} of {total_pages} for missing episodes") + + # Get episodes from the random page + params = { + "page": random_page, + "pageSize": page_size, + "includeSeries": "true" + } + + if series_id is not None: + params["seriesId"] = series_id + + response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout) + response.raise_for_status() + + if not response.content: + sonarr_logger.warning(f"Empty response when getting missing episodes page {random_page}") + return [] + + try: + data = response.json() + records = data.get('records', []) + sonarr_logger.info(f"Retrieved {len(records)} missing episodes from page {random_page}") + + # Apply monitored filter if requested + if monitored_only: + filtered_records = [ + ep for ep in records + if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False) + ] + sonarr_logger.debug(f"Filtered to {len(filtered_records)} monitored missing episodes") + records = filtered_records + + # Select random episodes from this page + if len(records) > count: + selected_records = random.sample(records, count) + sonarr_logger.debug(f"Randomly selected {len(selected_records)} missing episodes from page {random_page}") + return selected_records + else: + # If we have fewer episodes than requested, return all of them + sonarr_logger.debug(f"Returning all {len(records)} missing episodes from page {random_page} (fewer than requested {count})") + return records + + except json.JSONDecodeError as jde: + sonarr_logger.error(f"Failed to decode JSON response for missing episodes page {random_page}: {str(jde)}") + if attempt < retries: + time.sleep(retry_delay) + continue + return [] + + except json.JSONDecodeError as jde: + sonarr_logger.error(f"Failed to decode JSON response for missing episodes count: {str(jde)}") + if attempt < retries: + time.sleep(retry_delay) + continue + return [] + + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error getting missing episodes from Sonarr (attempt {attempt+1}): {str(e)}") + if attempt < retries: + time.sleep(retry_delay) + continue + return [] + + except Exception as e: + sonarr_logger.error(f"Unexpected error getting missing episodes (attempt {attempt+1}): {str(e)}", exc_info=True) + if attempt < retries: + time.sleep(retry_delay) + continue + return [] + + # If we get here, all retries failed + sonarr_logger.error("All attempts to get missing episodes failed") + return [] + +def search_episode(api_url: str, api_key: str, api_timeout: int, episode_ids: List[int]) -> Optional[Union[int, str]]: + """Trigger a search for specific episodes in Sonarr.""" + if not episode_ids: + sonarr_logger.warning("No episode IDs provided for search.") + return None + try: + endpoint = f"{api_url}/api/v3/command" + payload = { + "name": "EpisodeSearch", + "episodeIds": episode_ids + } + response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout) + response.raise_for_status() + command_id = response.json().get('id') + sonarr_logger.info(f"Triggered Sonarr search for episode IDs: {episode_ids}. Command ID: {command_id}") + return command_id + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error triggering Sonarr search for episode IDs {episode_ids}: {e}") + return None + except Exception as e: + sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr search: {e}") + return None + +def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: Union[int, str]) -> Optional[Dict[str, Any]]: + """Get the status of a Sonarr command.""" + try: + endpoint = f"{api_url}/api/v3/command/{command_id}" + response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() + status = response.json() + sonarr_logger.debug(f"Checked Sonarr command status for ID {command_id}: {status.get('status')}") + return status + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error getting Sonarr command status for ID {command_id}: {e}") + return None + except Exception as e: + sonarr_logger.error(f"An unexpected error occurred while getting Sonarr command status: {e}") + return None + +def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int: + """Get the current size of the Sonarr download queue.""" + retries = 2 # Number of retry attempts + retry_delay = 3 # Delay between retries in seconds + + for attempt in range(retries + 1): + try: + endpoint = f"{api_url}/api/v3/queue?page=1&pageSize=1" # Just get total count, don't need records + response = requests.get(endpoint, headers={"X-Api-Key": api_key}, params={"includeSeries": "false"}, timeout=api_timeout) + response.raise_for_status() + + if not response.content: + sonarr_logger.warning(f"Empty response when getting queue size (attempt {attempt+1}/{retries+1})") + if attempt < retries: + time.sleep(retry_delay) + continue + return -1 + + try: + queue_data = response.json() + queue_size = queue_data.get('totalRecords', 0) + sonarr_logger.debug(f"Sonarr download queue size: {queue_size}") + return queue_size + except json.JSONDecodeError as jde: + sonarr_logger.error(f"Failed to decode queue JSON (attempt {attempt+1}/{retries+1}): {jde}") + if attempt < retries: + time.sleep(retry_delay) + continue + return -1 + + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error getting Sonarr download queue size (attempt {attempt+1}/{retries+1}): {e}") + if attempt < retries: + sonarr_logger.info(f"Retrying in {retry_delay} seconds...") + time.sleep(retry_delay) + continue + return -1 # Return -1 to indicate an error + except Exception as e: + sonarr_logger.error(f"Unexpected error getting queue size (attempt {attempt+1}/{retries+1}): {e}") + if attempt < retries: + time.sleep(retry_delay) + continue + return -1 + + # If we get here, all retries failed + sonarr_logger.error(f"All {retries+1} attempts to get download queue size failed") + return -1 + +def refresh_series(api_url: str, api_key: str, api_timeout: int, series_id: int) -> Optional[Union[int, str]]: + """Trigger a refresh for a specific series in Sonarr.""" + try: + endpoint = f"{api_url}/api/v3/command" + payload = { + "name": "RefreshSeries", + "seriesId": series_id + } + response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout) + response.raise_for_status() + command_id = response.json().get('id') + sonarr_logger.info(f"Triggered Sonarr refresh for series ID: {series_id}. Command ID: {command_id}") + return command_id + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error triggering Sonarr refresh for series ID {series_id}: {e}") + return None + except Exception as e: + sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr series refresh: {e}") + return None + +def get_series_by_id(api_url: str, api_key: str, api_timeout: int, series_id: int) -> Optional[Dict[str, Any]]: + """Get series details by ID from Sonarr.""" + try: + endpoint = f"{api_url}/api/v3/series/{series_id}" + response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() + series_data = response.json() + sonarr_logger.debug(f"Fetched details for Sonarr series ID: {series_id}") + return series_data + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error getting Sonarr series details for ID {series_id}: {e}") + return None + except Exception as e: + sonarr_logger.error(f"An unexpected error occurred while getting Sonarr series details: {e}") + return None + +def search_season(api_url: str, api_key: str, api_timeout: int, series_id: int, season_number: int) -> Optional[Union[int, str]]: + """Trigger a search for a specific season in Sonarr.""" + try: + endpoint = f"{api_url}/api/v3/command" + payload = { + "name": "SeasonSearch", + "seriesId": series_id, + "seasonNumber": season_number + } + response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout) + response.raise_for_status() + command_id = response.json().get('id') + sonarr_logger.info(f"Triggered Sonarr season search for series ID: {series_id}, season: {season_number}. Command ID: {command_id}") + return command_id + except requests.exceptions.RequestException as e: + sonarr_logger.error(f"Error triggering Sonarr season search for series ID {series_id}, season {season_number}: {e}") + return None + except Exception as e: + sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr season search: {e}") + return None + +def get_series_with_missing_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool = True, limit: int = 50, random_mode: bool = True) -> List[Dict[str, Any]]: + """ + Get a list of series that have missing episodes, along with missing episode counts per season. + This is much more efficient than fetching all missing episodes for large libraries. + + Args: + api_url: The base URL of the Sonarr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: Whether to only include monitored series + limit: Maximum number of series to return + random_mode: Whether to randomly select series + + Returns: + A list of series with missing episodes and counts per season + """ + result = [] + + # Step 1: Get all series + all_series = get_series(api_url, api_key, api_timeout) + if not all_series: + sonarr_logger.error("Failed to retrieve series list") + return [] + + # Step 2: Filter to monitored series if requested + if monitored_only: + filtered_series = [s for s in all_series if s.get('monitored', False)] + sonarr_logger.info(f"Filtered from {len(all_series)} total series to {len(filtered_series)} monitored series") + else: + filtered_series = all_series + + # Apply random selection if requested + if random_mode: + import random + sonarr_logger.info(f"Using RANDOM selection mode for missing episodes") + random.shuffle(filtered_series) + else: + sonarr_logger.info(f"Using SEQUENTIAL selection mode for missing episodes") + + # Step 3: For each series, check if it has missing episodes using series/id/episodes endpoint + # This is much more efficient than using the wanted/missing endpoint + series_with_missing = [] + examined_count = 0 + + for series in filtered_series[:limit]: + examined_count += 1 + series_id = series.get('id') + series_title = series.get('title', 'Unknown') + + if not series_id: + continue + + # Get all episodes for this series + try: + endpoint = f"{api_url}/api/v3/episode?seriesId={series_id}" + response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout) + response.raise_for_status() + + if not response.content: + continue + + episodes = response.json() + + # Filter to missing episodes + missing_episodes = [ + e for e in episodes + if e.get('hasFile') is False and + (not monitored_only or e.get('monitored', False)) + ] + + if not missing_episodes: + continue + + # Group by season + seasons_dict = {} + for episode in missing_episodes: + season_number = episode.get('seasonNumber') + if season_number is not None: + if season_number not in seasons_dict: + seasons_dict[season_number] = [] + seasons_dict[season_number].append(episode) + + # If we have any seasons with missing episodes, add this series to our result + if seasons_dict: + missing_info = { + 'series_id': series_id, + 'series_title': series_title, + 'seasons': [ + { + 'season_number': season, + 'episode_count': len(episodes), + 'episodes': episodes + } + for season, episodes in seasons_dict.items() + ] + } + series_with_missing.append(missing_info) + + sonarr_logger.debug(f"Found series {series_title} with {len(missing_episodes)} missing episodes across {len(seasons_dict)} seasons") + + except Exception as e: + sonarr_logger.error(f"Error checking missing episodes for series {series_title} (ID: {series_id}): {str(e)}") + continue + + selection_mode = "RANDOM" if random_mode else "SEQUENTIAL" + sonarr_logger.info(f"Examined {examined_count} series ({selection_mode} mode) and found {len(series_with_missing)} with missing episodes") + return series_with_missing \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py new file mode 100644 index 0000000..f4f26d0 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py @@ -0,0 +1,612 @@ +#!/usr/bin/env python3 +""" +Sonarr missing episodes processing module for Huntarr +""" + +import time +import random +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.sonarr import api as sonarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import load_settings, get_advanced_setting + +# Get logger for the Sonarr app +sonarr_logger = get_logger("sonarr") + +def process_missing_episodes( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int = get_advanced_setting("api_timeout", 120), + monitored_only: bool = True, + skip_future_episodes: bool = True, + skip_series_refresh: bool = False, + hunt_missing_items: int = 5, + hunt_missing_mode: str = "episodes", + command_wait_delay: int = get_advanced_setting("command_wait_delay", 1), + command_wait_attempts: int = get_advanced_setting("command_wait_attempts", 600), + stop_check: Callable[[], bool] = lambda: False +) -> bool: + """ + Process missing episodes in Sonarr and trigger searches + Added support for multiple missing modes (episodes, seasons, shows) + """ + if hunt_missing_items <= 0: + sonarr_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing processing.") + return False + + sonarr_logger.info(f"Checking for {hunt_missing_items} missing episodes in {hunt_missing_mode} mode...") + + # Handle different modes + if hunt_missing_mode == "episodes": + # Handle episode-based missing items + sonarr_logger.info("Episode-based missing mode selected") + return process_missing_episodes_mode( + api_url, api_key, instance_name, api_timeout, monitored_only, + skip_future_episodes, skip_series_refresh, + hunt_missing_items, command_wait_delay, command_wait_attempts, + stop_check + ) + elif hunt_missing_mode == "seasons_packs": + # Handle season pack searches (using SeasonSearch command) + sonarr_logger.info("Season [Packs] mode selected - searching for complete season packs") + return process_missing_seasons_packs_mode( + api_url, api_key, instance_name, api_timeout, monitored_only, + skip_series_refresh, hunt_missing_items, + command_wait_delay, command_wait_attempts, stop_check + ) + elif hunt_missing_mode == "shows": + # Handle show-based missing items (all episodes from a show) + sonarr_logger.info("Show-based missing mode selected") + return process_missing_shows_mode( + api_url, api_key, instance_name, api_timeout, monitored_only, + skip_future_episodes, skip_series_refresh, hunt_missing_items, + command_wait_delay, command_wait_attempts, stop_check + ) + else: + sonarr_logger.error(f"Invalid hunt_missing_mode: {hunt_missing_mode}. Valid options are 'episodes', 'seasons_packs', or 'shows'.") + return False + +def process_missing_episodes_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_future_episodes: bool, + skip_series_refresh: bool, + hunt_missing_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """Process missing episodes in episode mode (original implementation).""" + processed_any = False + + # Always use random selection for missing episodes + sonarr_logger.info(f"Using random selection for missing episodes") + episodes_to_search = sonarr_api.get_missing_episodes_random_page( + api_url, api_key, api_timeout, monitored_only, hunt_missing_items) + + if stop_check(): + sonarr_logger.info("Stop requested during missing episode processing.") + return processed_any + + # Filter out future episodes for random selection approach + if skip_future_episodes: + now_unix = time.time() + original_count = len(episodes_to_search) + episodes_to_search = [ + ep for ep in episodes_to_search + if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix + ] + skipped_count = original_count - len(episodes_to_search) + if skipped_count > 0: + sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date.") + + # Filter out already processed episodes for random selection approach + unprocessed_episodes = [] + for episode in episodes_to_search: + episode_id = str(episode.get("id")) + if not is_processed("sonarr", instance_name, episode_id): + unprocessed_episodes.append(episode) + else: + sonarr_logger.debug(f"Skipping already processed episode ID: {episode_id}") + + sonarr_logger.info(f"Found {len(unprocessed_episodes)} unprocessed missing episodes out of {len(episodes_to_search)} total.") + episodes_to_search = unprocessed_episodes + + if not episodes_to_search: + sonarr_logger.info("No missing episodes left to process after filtering.") + return False + + sonarr_logger.info(f"Selected {len(episodes_to_search)} missing episodes to search.") + + # Add detailed listing of episodes being processed + if episodes_to_search: + sonarr_logger.info(f"Episodes selected for processing in this cycle:") + for idx, episode in enumerate(episodes_to_search): + series_title = episode.get('series', {}).get('title', 'Unknown Series') + episode_title = episode.get('title', 'Unknown Episode') + season_number = episode.get('seasonNumber', 'Unknown Season') + episode_number = episode.get('episodeNumber', 'Unknown Episode') + + episode_id = episode.get("id") + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + sonarr_logger.info(f" {idx+1}. {series_title} - {season_episode} - \"{episode_title}\" (ID: {episode_id})") + + # Group episodes by series for potential refresh + series_to_refresh: Dict[int, List[int]] = {} + series_titles: Dict[int, str] = {} # Store titles for logging + for episode in episodes_to_search: + series_id = episode.get('seriesId') + if series_id: + if series_id not in series_to_refresh: + series_to_refresh[series_id] = [] + # Store title when first encountering the series ID + series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}") + series_to_refresh[series_id].append(episode['id']) + + # Process each series + for series_id, episode_ids in series_to_refresh.items(): + if stop_check(): sonarr_logger.info("Stop requested before processing next series."); break + series_title = series_titles.get(series_id, f"Series ID {series_id}") + sonarr_logger.info(f"Processing series: {series_title} (ID: {series_id}) with {len(episode_ids)} missing episodes.") + + # Refresh series metadata if not skipped + refresh_command_id = None + if not skip_series_refresh: + sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id) + if refresh_command_id: + # Wait for refresh command to complete + if not wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh", stop_check + ): + sonarr_logger.warning(f"Series refresh command (ID: {refresh_command_id}) for series {series_id} did not complete successfully or timed out. Proceeding with search anyway.") + else: + sonarr_logger.warning(f"Failed to trigger refresh command for series ID: {series_id}. Proceeding without refresh.") + else: + sonarr_logger.debug(f"Skipping series refresh for series ID: {series_id} as configured.") + + if stop_check(): sonarr_logger.info("Stop requested after series refresh attempt."); break + + # Trigger search for the selected episodes in this series + sonarr_logger.debug(f"Attempting to search for episode IDs: {episode_ids}") + search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids) + + if search_command_id: + # Add episode IDs to stateful manager IMMEDIATELY after processing each batch + for episode_id in episode_ids: + # Force flush to disk by calling add_processed_id immediately for each ID + success = add_processed_id("sonarr", instance_name, str(episode_id)) + sonarr_logger.debug(f"Added processed ID: {episode_id}, success: {success}") + + # Wait for search command to complete + if wait_for_command( + api_url, api_key, api_timeout, search_command_id, + command_wait_delay, command_wait_attempts, "Episode Search", stop_check + ): + # Mark episodes as processed if search command completed successfully + processed_any = True # Mark that we did something + sonarr_logger.info(f"Successfully processed and searched for {len(episode_ids)} episodes in series {series_id}.") + + # Add stats incrementing right here - this is the code path that's actually being executed + for episode_id in episode_ids: + # Increment stat for each episode individually, just like Radarr + increment_stat("sonarr", "hunted") + sonarr_logger.info(f"*** STATS INCREMENT *** sonarr hunted by 1 for episode ID {episode_id}") + + # Log to history system + # Find the corresponding episode data for this ID + for episode in episodes_to_search: + if episode.get('id') == episode_id: + series_title = episode.get('series', {}).get('title', 'Unknown Series') + episode_title = episode.get('title', 'Unknown Episode') + season_number = episode.get('seasonNumber', 'Unknown Season') + episode_number = episode.get('episodeNumber', 'Unknown Episode') + + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + media_name = f"{series_title} - {season_episode} - {episode_title}" + process_id = f"{series_id}_{episode_id}" + add_processed_id("sonarr", instance_name, process_id) + log_processed_media("sonarr", media_name, episode_id, instance_name, "missing") + + # Increment the stat for each episode individually (like Radarr does for movies) + increment_stat("sonarr", "hunted") + sonarr_logger.debug(f"Incremented sonarr hunted statistic for episode {episode_id}") + break + + # The batch increment was causing issues - removing it + # increment_stat("sonarr", "hunted", len(episode_ids)) + # sonarr_logger.debug(f"Incremented sonarr hunted statistics by {len(episode_ids)}") + else: + sonarr_logger.warning(f"Episode search command (ID: {search_command_id}) for series {series_id} did not complete successfully or timed out. Episodes will not be marked as processed yet.") + else: + sonarr_logger.error(f"Failed to trigger search command for episodes {episode_ids} in series {series_id}.") + + sonarr_logger.info("Finished missing episodes processing cycle for Sonarr.") + return processed_any + +def process_missing_seasons_packs_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_series_refresh: bool, + hunt_missing_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """ + Process missing seasons using the SeasonSearch command + This mode is optimized for torrent users who rely on season packs + Uses a direct episode lookup approach which is much more efficient + """ + processed_any = False + + # Get all missing episodes in one call instead of per-series + missing_episodes = sonarr_api.get_missing_episodes(api_url, api_key, api_timeout, monitored_only) + if not missing_episodes: + sonarr_logger.info("No missing episodes found") + return False + + # Group episodes by series and season + missing_seasons = {} + for episode in missing_episodes: + if monitored_only and not episode.get('monitored', False): + continue + + series_id = episode.get('seriesId') + if not series_id: + continue + + season_number = episode.get('seasonNumber') + series_title = episode.get('series', {}).get('title', 'Unknown Series') + + key = f"{series_id}:{season_number}" + if key not in missing_seasons: + missing_seasons[key] = { + 'series_id': series_id, + 'season_number': season_number, + 'series_title': series_title, + 'episode_count': 0 + } + missing_seasons[key]['episode_count'] += 1 + + # Convert to list and sort by episode count (most missing episodes first) + seasons_list = list(missing_seasons.values()) + seasons_list.sort(key=lambda x: x['episode_count'], reverse=True) + + # Filter out already processed seasons + unprocessed_seasons = [] + for season in seasons_list: + season_id = f"{season['series_id']}_{season['season_number']}" + if not is_processed("sonarr", instance_name, season_id): + unprocessed_seasons.append(season) + else: + sonarr_logger.debug(f"Skipping already processed season ID: {season_id}") + + sonarr_logger.info(f"Found {len(unprocessed_seasons)} unprocessed seasons with missing episodes out of {len(seasons_list)} total.") + + if not unprocessed_seasons: + sonarr_logger.info("All seasons with missing episodes have been processed.") + return False + + # Apply randomization if requested + random.shuffle(unprocessed_seasons) + + # Process up to hunt_missing_items seasons + processed_count = 0 + + # Add detailed logging for selected seasons + if unprocessed_seasons and hunt_missing_items > 0: + seasons_to_process = unprocessed_seasons[:hunt_missing_items] + sonarr_logger.info(f"Randomly selected {min(len(unprocessed_seasons), hunt_missing_items)} seasons with missing episodes:") + + for idx, season in enumerate(seasons_to_process): + sonarr_logger.info(f" {idx+1}. {season['series_title']} - Season {season['season_number']} ({season['episode_count']} missing episodes) (Series ID: {season['series_id']})") + + for season in unprocessed_seasons: + if processed_count >= hunt_missing_items: + break + + if stop_check(): + sonarr_logger.info("Stop signal received, halting processing.") + break + + series_id = season['series_id'] + season_number = season['season_number'] + series_title = season['series_title'] + episode_count = season['episode_count'] + + # Refresh series metadata if not skipped + if not skip_series_refresh: + sonarr_logger.debug(f"Refreshing metadata for {series_title} before season pack search") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id) + if refresh_command_id: + wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh", stop_check + ) + + sonarr_logger.info(f"Searching for season pack: {series_title} - Season {season_number} (contains {episode_count} missing episodes)") + + # Trigger an API call to search for the entire season + command_id = sonarr_api.search_season(api_url, api_key, api_timeout, series_id, season_number) + + if command_id: + processed_any = True + processed_count += 1 + + # Add season to processed list + season_id = f"{series_id}_{season_number}" + success = add_processed_id("sonarr", instance_name, season_id) + sonarr_logger.debug(f"Added season ID {season_id} to processed list for {instance_name}, success: {success}") + + # Log to history system + media_name = f"{series_title} - Season {season_number} (contains {episode_count} missing episodes)" + log_processed_media("sonarr", media_name, season_id, instance_name, "missing") + sonarr_logger.debug(f"Logged history entry for season pack: {media_name}") + + # Increment stats one by one instead of in a batch + for i in range(episode_count): + increment_stat("sonarr", "hunted") + sonarr_logger.debug(f"Incremented sonarr hunted statistics for {episode_count} episodes in season pack") + + # Wait for command to complete if configured + if command_wait_delay > 0 and command_wait_attempts > 0: + if wait_for_command( + api_url, api_key, api_timeout, command_id, + command_wait_delay, command_wait_attempts, "Season Search", stop_check + ): + pass + else: + sonarr_logger.error(f"Failed to trigger search for {series_title}.") + + sonarr_logger.info(f"Processed {processed_count} missing season packs for Sonarr.") + return processed_any + +def process_missing_shows_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_future_episodes: bool, + skip_series_refresh: bool, + hunt_missing_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """Process missing episodes in show mode - gets all missing episodes for entire shows.""" + processed_any = False + + # Get series with missing episodes + sonarr_logger.info("Retrieving series with missing episodes...") + series_with_missing = sonarr_api.get_series_with_missing_episodes( + api_url, api_key, api_timeout, monitored_only, random_mode=True) + + if not series_with_missing: + sonarr_logger.info("No series with missing episodes found.") + return False + + # Filter out shows that have been processed + unprocessed_series = [] + for series in series_with_missing: + series_id = str(series.get("series_id")) + if not is_processed("sonarr", instance_name, series_id): + unprocessed_series.append(series) + else: + sonarr_logger.debug(f"Skipping already processed series ID: {series_id}") + + sonarr_logger.info(f"Found {len(unprocessed_series)} unprocessed series with missing episodes out of {len(series_with_missing)} total.") + + if not unprocessed_series: + sonarr_logger.info("All series with missing episodes have been processed.") + return False + + # Select the shows to process (random or sequential) + shows_to_process = random.sample( + unprocessed_series, + min(len(unprocessed_series), hunt_missing_items) + ) + + # Add detailed logging for selected shows + if shows_to_process: + sonarr_logger.info("Shows selected for processing in this cycle:") + for idx, show in enumerate(shows_to_process): + show_id = show.get('series_id') + show_title = show.get('series_title', 'Unknown Show') + # Count total missing episodes across all seasons + episode_count = sum(season.get('episode_count', 0) for season in show.get('seasons', [])) + sonarr_logger.info(f" {idx+1}. {show_title} ({episode_count} missing episodes) (Show ID: {show_id})") + + # Process each show + for show in shows_to_process: + if stop_check(): + sonarr_logger.info("Stop requested. Aborting show processing.") + break + + show_id = show.get('series_id') + show_title = show.get('series_title', 'Unknown Show') + + # Get missing episodes for this show + missing_episodes = [] + for season in show.get('seasons', []): + missing_episodes.extend(season.get('episodes', [])) + + # Filter out future episodes if needed + if skip_future_episodes: + now_unix = time.time() + original_count = len(missing_episodes) + missing_episodes = [ + ep for ep in missing_episodes + if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix + ] + skipped_count = original_count - len(missing_episodes) + if skipped_count > 0: + sonarr_logger.info(f"Skipped {skipped_count} future episodes for {show_title} based on air date.") + + if not missing_episodes: + sonarr_logger.info(f"No eligible missing episodes found for {show_title} after filtering.") + continue + + # Log episodes to be processed + sonarr_logger.info(f"Processing {len(missing_episodes)} missing episodes for show: {show_title}") + for idx, episode in enumerate(missing_episodes[:5]): # Only log first 5 for brevity + season = episode.get('seasonNumber', 'Unknown') + ep_num = episode.get('episodeNumber', 'Unknown') + title = episode.get('title', 'Unknown Title') + sonarr_logger.debug(f" {idx+1}. S{season:02d}E{ep_num:02d} - {title}") + + if len(missing_episodes) > 5: + sonarr_logger.debug(f" ... and {len(missing_episodes)-5} more episodes.") + + # Refresh series if not skipped + if not skip_series_refresh: + sonarr_logger.info(f"Refreshing series info for {show_title}...") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, show_id) + if refresh_command_id: + wait_success = wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh", stop_check + ) + if not wait_success: + sonarr_logger.warning(f"Series refresh command timed out or failed for {show_title}. Proceeding with search anyway.") + else: + sonarr_logger.warning(f"Failed to trigger refresh command for {show_title}. Proceeding with search anyway.") + + # Extract episode IDs to search + episode_ids = [episode.get('id') for episode in missing_episodes if episode.get('id')] + + if not episode_ids: + sonarr_logger.warning(f"No valid episode IDs found for {show_title}.") + continue + + # Search for all episodes in the show + sonarr_logger.info(f"Searching for {len(episode_ids)} missing episodes for {show_title}...") + search_successful = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids) + + if search_successful: + processed_any = True + sonarr_logger.info(f"Successfully processed {len(episode_ids)} missing episodes in {show_title}") + + # Add episode IDs to stateful manager IMMEDIATELY after processing each batch + for episode_id in episode_ids: + # Force flush to disk by calling add_processed_id immediately for each ID + success = add_processed_id("sonarr", instance_name, str(episode_id)) + sonarr_logger.debug(f"Added processed ID: {episode_id}, success: {success}") + + # Log each episode to history + # Find the corresponding episode data + for episode in missing_episodes: + if episode.get('id') == episode_id: + season = episode.get('seasonNumber', 'Unknown') + ep_num = episode.get('episodeNumber', 'Unknown') + title = episode.get('title', 'Unknown Title') + + try: + season_episode = f"S{season:02d}E{ep_num:02d}" + except (ValueError, TypeError): + season_episode = f"S{season}E{ep_num}" + + media_name = f"{show_title} - {season_episode} - {title}" + log_processed_media("sonarr", media_name, str(episode_id), instance_name, "missing") + sonarr_logger.debug(f"Logged history entry for episode: {media_name}") + break + + # Add series ID to processed list + success = add_processed_id("sonarr", instance_name, str(show_id)) + sonarr_logger.debug(f"Added series ID {show_id} to processed list for {instance_name}, success: {success}") + + # Also log the entire show to history + media_name = f"{show_title} - Complete Series ({len(episode_ids)} episodes)" + log_processed_media("sonarr", media_name, str(show_id), instance_name, "missing") + sonarr_logger.debug(f"Logged history entry for complete series: {media_name}") + + # Increment the hunted statistics + increment_stat("sonarr", "hunted", len(episode_ids)) + sonarr_logger.debug(f"Incremented sonarr hunted statistics by {len(episode_ids)}") + else: + sonarr_logger.error(f"Failed to trigger search for {show_title}.") + + sonarr_logger.info("Show-based missing episode processing complete.") + return processed_any + +def wait_for_command( + api_url: str, + api_key: str, + api_timeout: int, + command_id: int, + wait_delay: int, + max_attempts: int, + command_name: str = "Command", + stop_check: Callable[[], bool] = lambda: False +) -> bool: + """ + Wait for a Sonarr command to complete or timeout. + + Args: + api_url: The Sonarr API URL + api_key: The Sonarr API key + api_timeout: API request timeout + command_id: The ID of the command to monitor + wait_delay: Seconds to wait between status checks + max_attempts: Maximum number of status check attempts + command_name: Name of the command (for logging) + stop_check: Optional function to check if operation should be aborted + + Returns: + True if command completed successfully, False otherwise + """ + if wait_delay <= 0 or max_attempts <= 0: + sonarr_logger.debug(f"Not waiting for command to complete (wait_delay={wait_delay}, max_attempts={max_attempts})") + return True # Return as if successful since we're not checking + + sonarr_logger.debug(f"Waiting for {command_name} to complete (command ID: {command_id}). Checking every {wait_delay}s for up to {max_attempts} attempts") + + # Wait for command completion + attempts = 0 + while attempts < max_attempts: + if stop_check(): + sonarr_logger.info(f"Stopping wait for {command_name} due to stop request") + return False + + command_status = sonarr_api.get_command_status(api_url, api_key, api_timeout, command_id) + if not command_status: + sonarr_logger.warning(f"Failed to get status for {command_name} (ID: {command_id}), attempt {attempts+1}") + attempts += 1 + time.sleep(wait_delay) + continue + + status = command_status.get('status') + if status == 'completed': + sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) completed successfully") + return True + elif status in ['failed', 'aborted']: + sonarr_logger.warning(f"Sonarr {command_name} (ID: {command_id}) {status}") + return False + + sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) status: {status}, attempt {attempts+1}/{max_attempts}") + + attempts += 1 + time.sleep(wait_delay) + + sonarr_logger.error(f"Sonarr command '{command_name}' (ID: {command_id}) timed out after {max_attempts} attempts.") + return False \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py new file mode 100644 index 0000000..d6b5a2d --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py @@ -0,0 +1,605 @@ +#!/usr/bin/env python3 +""" +Sonarr cutoff upgrade processing module for Huntarr +""" + +import time +import random +from typing import List, Dict, Any, Set, Callable, Union +from src.primary.utils.logger import get_logger +from src.primary.apps.sonarr import api as sonarr_api +from src.primary.stats_manager import increment_stat +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.utils.history_utils import log_processed_media +from src.primary.settings_manager import get_advanced_setting + +# Get logger for the Sonarr app +sonarr_logger = get_logger("sonarr") + +def process_cutoff_upgrades( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int = get_advanced_setting("api_timeout", 120), + monitored_only: bool = True, + skip_series_refresh: bool = False, + hunt_upgrade_items: int = 5, + command_wait_delay: int = get_advanced_setting("command_wait_delay", 1), + command_wait_attempts: int = get_advanced_setting("command_wait_attempts", 600), + stop_check: Callable[[], bool] = lambda: False +) -> bool: + """ + Process quality cutoff upgrades for Sonarr. + This function only uses the episode mode for upgrades regardless of hunt_missing_mode. + """ + if hunt_upgrade_items <= 0: + sonarr_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping upgrade processing.") + return False + + sonarr_logger.info(f"Checking for {hunt_upgrade_items} quality upgrades...") + + sonarr_logger.info("Using RANDOM selection mode for quality upgrades") + + # Always use episode mode for upgrades, regardless of the hunt_missing_mode setting + return process_upgrade_episodes_mode( + api_url, api_key, instance_name, api_timeout, monitored_only, + skip_series_refresh, hunt_upgrade_items, + command_wait_delay, command_wait_attempts, stop_check + ) + +def process_upgrade_episodes_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_series_refresh: bool, + hunt_upgrade_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """Process upgrades in episode mode (original implementation).""" + processed_any = False + + # Always use the efficient random page selection method + sonarr_logger.debug(f"Using random selection for cutoff unmet episodes") + episodes_to_search = sonarr_api.get_cutoff_unmet_episodes_random_page( + api_url, api_key, api_timeout, monitored_only, hunt_upgrade_items) + + # If we didn't get enough episodes, we might need to try another page + if len(episodes_to_search) < hunt_upgrade_items and len(episodes_to_search) > 0: + sonarr_logger.debug(f"Got {len(episodes_to_search)} episodes from random page, fewer than requested {hunt_upgrade_items}") + + if stop_check(): + sonarr_logger.info("Stop requested during upgrade processing.") + return processed_any + + # Filter out future episodes for random selection approach + if skip_series_refresh: + now_unix = time.time() + original_count = len(episodes_to_search) + episodes_to_search = [ + ep for ep in episodes_to_search + if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix + ] + skipped_count = original_count - len(episodes_to_search) + if skipped_count > 0: + sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.") + + # Filter out already processed episodes for random selection approach + unprocessed_episodes = [] + for episode in episodes_to_search: + episode_id = str(episode.get("id")) + if not is_processed("sonarr", instance_name, episode_id): + unprocessed_episodes.append(episode) + else: + sonarr_logger.debug(f"Skipping already processed episode ID for upgrade: {episode_id}") + + sonarr_logger.info(f"Found {len(unprocessed_episodes)} unprocessed cutoff unmet episodes out of {len(episodes_to_search)} total.") + episodes_to_search = unprocessed_episodes + + if not episodes_to_search: + sonarr_logger.info("No cutoff unmet episodes left to process for upgrades after filtering.") + return False + + sonarr_logger.info(f"Selected {len(episodes_to_search)} cutoff unmet episodes to search for upgrades.") + + # Add detailed listing of episodes being upgraded + if episodes_to_search: + sonarr_logger.info(f"Episodes selected for quality upgrades in this cycle:") + for idx, episode in enumerate(episodes_to_search): + series_title = episode.get('series', {}).get('title', 'Unknown Series') + episode_title = episode.get('title', 'Unknown Episode') + season_number = episode.get('seasonNumber', 'Unknown Season') + episode_number = episode.get('episodeNumber', 'Unknown Episode') + + # Get quality information + quality_name = "Unknown" + if "quality" in episode and episode["quality"]: + quality_name = episode["quality"].get("quality", {}).get("name", "Unknown") + + episode_id = episode.get("id") + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + sonarr_logger.info(f" {idx+1}. {series_title} - {season_episode} - \"{episode_title}\" - Current quality: {quality_name} (ID: {episode_id})") + + # Group episodes by series for potential refresh + series_to_process: Dict[int, List[int]] = {} + series_titles: Dict[int, str] = {} # Store titles for logging + for episode in episodes_to_search: + series_id = episode.get('seriesId') + if series_id: + if series_id not in series_to_process: + series_to_process[series_id] = [] + # Store title when first encountering the series ID + series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}") + series_to_process[series_id].append(episode['id']) + + # Process each series + for series_id, episode_ids in series_to_process.items(): + if stop_check(): + sonarr_logger.info("Stop requested before processing next series for upgrades.") + break + + series_title = series_titles.get(series_id, f"Series ID {series_id}") + sonarr_logger.info(f"Processing series for upgrades: {series_title} (ID: {series_id}) with {len(episode_ids)} episodes.") + + # Refresh series metadata if not skipped + refresh_command_id = None + if not skip_series_refresh: + sonarr_logger.debug(f"Attempting to refresh series ID: {series_id} before upgrade search.") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id) + if refresh_command_id: + # Wait for refresh command to complete + if not wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check + ): + sonarr_logger.warning(f"Series refresh command (ID: {refresh_command_id}) for series {series_id} did not complete successfully or timed out. Proceeding with upgrade search anyway.") + else: + sonarr_logger.warning(f"Failed to trigger refresh command for series ID: {series_id}. Proceeding without refresh.") + else: + sonarr_logger.debug(f"Skipping series refresh for series ID: {series_id} as configured.") + + if stop_check(): + sonarr_logger.info("Stop requested after series refresh attempt for upgrades.") + break + + # Trigger search for the selected episodes in this series + sonarr_logger.debug(f"Attempting upgrade search for episode IDs: {episode_ids}") + search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids) + + if search_command_id: + # Wait for search command to complete + if wait_for_command( + api_url, api_key, api_timeout, search_command_id, + command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check + ): + # Mark episodes as processed if search command completed successfully + processed_any = True # Mark that we did something + sonarr_logger.info(f"Successfully processed and searched for {len(episode_ids)} episodes in series {series_id}.") + + # Add stats incrementing right here - this is the code path that's actually being executed + for episode_id in episode_ids: + # Increment stat for each episode individually, just like Radarr + increment_stat("sonarr", "upgraded") + sonarr_logger.info(f"*** STATS INCREMENT *** sonarr upgraded by 1 for episode ID {episode_id}") + + # Mark episodes as processed using stateful management + for episode_id in episode_ids: + add_processed_id("sonarr", instance_name, str(episode_id)) + sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades") + + # Find the episode information for history logging + # We need to get the episode details from the API to include proper info in history + try: + episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id) + if episode_details: + series_title = episode_details.get('series', {}).get('title', 'Unknown Series') + episode_title = episode_details.get('title', 'Unknown Episode') + season_number = episode_details.get('seasonNumber', 'Unknown Season') + episode_number = episode_details.get('episodeNumber', 'Unknown Episode') + + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + # Record the upgrade in history with quality upgrade identifier + media_name = f"{series_title} - {season_episode} - {episode_title}" + log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade") + sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}") + except Exception as e: + sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}") + else: + sonarr_logger.warning(f"Episode upgrade search command (ID: {search_command_id}) for series {series_id} did not complete successfully or timed out. Episodes will not be marked as processed yet.") + else: + sonarr_logger.error(f"Failed to trigger upgrade search command for episodes {episode_ids} in series {series_id}.") + + sonarr_logger.info("Finished quality cutoff upgrades processing cycle for Sonarr.") + return processed_any + +def process_upgrade_seasons_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_series_refresh: bool, + hunt_upgrade_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """Process upgrades in season mode - groups episodes by season.""" + processed_any = False + + # Get all cutoff unmet episodes + cutoff_unmet_episodes = sonarr_api.get_cutoff_unmet_episodes(api_url, api_key, api_timeout, monitored_only) + sonarr_logger.info(f"Received {len(cutoff_unmet_episodes)} cutoff unmet episodes from Sonarr API (before filtering).") + + if not cutoff_unmet_episodes: + sonarr_logger.info("No cutoff unmet episodes found in Sonarr.") + return False + + # Filter out future episodes if configured + if skip_series_refresh: + now_unix = time.time() + original_count = len(cutoff_unmet_episodes) + # Ensure airDateUtc exists and is not None before parsing + cutoff_unmet_episodes = [ + ep for ep in cutoff_unmet_episodes + if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix + ] + skipped_count = original_count - len(cutoff_unmet_episodes) + if skipped_count > 0: + sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.") + + if stop_check(): + sonarr_logger.info("Stop requested during upgrade processing.") + return processed_any + + # Group episodes by series and season + series_season_episodes: Dict[int, Dict[int, List[Dict]]] = {} + for episode in cutoff_unmet_episodes: + series_id = episode.get('seriesId') + season_number = episode.get('seasonNumber') + + if series_id is not None and season_number is not None: + if series_id not in series_season_episodes: + series_season_episodes[series_id] = {} + + if season_number not in series_season_episodes[series_id]: + series_season_episodes[series_id][season_number] = [] + + series_season_episodes[series_id][season_number].append(episode) + + # Create a list of (series_id, season_number) tuples for selection + available_seasons = [] + for series_id, seasons in series_season_episodes.items(): + for season_number, episodes in seasons.items(): + # Get series title from the first episode for this season + series_title = episodes[0].get('series', {}).get('title', f"Series ID {series_id}") + available_seasons.append((series_id, season_number, len(episodes), series_title)) + + if not available_seasons: + sonarr_logger.info("No valid seasons with cutoff unmet episodes found.") + return False + + # Select seasons to process - always randomly + random.shuffle(available_seasons) + seasons_to_process = available_seasons[:hunt_upgrade_items] + + sonarr_logger.info(f"Selected {len(seasons_to_process)} seasons with cutoff unmet episodes to process") + + # Log selected seasons + for idx, (series_id, season_number, episode_count, series_title) in enumerate(seasons_to_process): + sonarr_logger.info(f" {idx+1}. {series_title} - Season {season_number} - {episode_count} cutoff unmet episodes") + + # Process each selected season + for series_id, season_number, _, series_title in seasons_to_process: + if stop_check(): + sonarr_logger.info("Stop requested before processing next season.") + break + + episodes = series_season_episodes[series_id][season_number] + episode_ids = [episode["id"] for episode in episodes] + + sonarr_logger.info(f"Processing {series_title} - Season {season_number} with {len(episode_ids)} cutoff unmet episodes") + + # Refresh series metadata if not skipped + if not skip_series_refresh: + sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id) + if refresh_command_id: + # Wait for refresh command to complete + if not wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check + ): + sonarr_logger.warning(f"Series refresh command for {series_title} did not complete successfully or timed out.") + else: + sonarr_logger.warning(f"Failed to trigger refresh command for series {series_title}") + + if stop_check(): + sonarr_logger.info("Stop requested after series refresh attempt.") + break + + # Trigger search for the selected episodes in this season + sonarr_logger.debug(f"Attempting to search for {len(episode_ids)} episodes in {series_title} Season {season_number} for upgrades") + search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids) + + if search_command_id: + # Wait for search command to complete + if wait_for_command( + api_url, api_key, api_timeout, search_command_id, + command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check + ): + # Mark as processed if search command completed successfully + processed_any = True + sonarr_logger.info(f"Successfully processed {len(episode_ids)} cutoff unmet episodes in {series_title} Season {season_number}") + + # We'll increment stats individually for each episode instead of in batch + # increment_stat("sonarr", "upgraded", len(episode_ids)) + # sonarr_logger.debug(f"Incremented sonarr upgraded statistics by {len(episode_ids)}") + + # Mark episodes as processed using stateful management + for episode_id in episode_ids: + add_processed_id("sonarr", instance_name, str(episode_id)) + sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades") + + # Increment stats for this episode (consistent with Radarr's approach) + increment_stat("sonarr", "upgraded") + sonarr_logger.debug(f"Incremented sonarr upgraded statistic for episode {episode_id}") + + # Find the episode information for history logging + # We need to get the episode details from the API to include proper info in history + try: + episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id) + if episode_details: + series_title = episode_details.get('series', {}).get('title', 'Unknown Series') + episode_title = episode_details.get('title', 'Unknown Episode') + season_number = episode_details.get('seasonNumber', 'Unknown Season') + episode_number = episode_details.get('episodeNumber', 'Unknown Episode') + + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + # Record the upgrade in history with quality upgrade identifier + media_name = f"{series_title} - {season_episode} - {episode_title}" + log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade") + sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}") + except Exception as e: + sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}") + else: + sonarr_logger.warning(f"Episode upgrade search command for {series_title} Season {season_number} did not complete successfully") + else: + sonarr_logger.error(f"Failed to trigger upgrade search command for {series_title} Season {season_number}") + + sonarr_logger.info("Finished quality cutoff upgrades processing cycle (season mode) for Sonarr.") + return processed_any + +def process_upgrade_shows_mode( + api_url: str, + api_key: str, + instance_name: str, + api_timeout: int, + monitored_only: bool, + skip_series_refresh: bool, + hunt_upgrade_items: int, + command_wait_delay: int, + command_wait_attempts: int, + stop_check: Callable[[], bool] +) -> bool: + """Process upgrades in show mode - gets all cutoff unmet episodes for entire shows.""" + processed_any = False + + # Get all cutoff unmet episodes + cutoff_unmet_episodes = sonarr_api.get_cutoff_unmet_episodes(api_url, api_key, api_timeout, monitored_only) + sonarr_logger.info(f"Received {len(cutoff_unmet_episodes)} cutoff unmet episodes from Sonarr API (before filtering).") + + if not cutoff_unmet_episodes: + sonarr_logger.info("No cutoff unmet episodes found in Sonarr.") + return False + + # Filter out future episodes if configured + if skip_series_refresh: + now_unix = time.time() + original_count = len(cutoff_unmet_episodes) + # Ensure airDateUtc exists and is not None before parsing + cutoff_unmet_episodes = [ + ep for ep in cutoff_unmet_episodes + if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix + ] + skipped_count = original_count - len(cutoff_unmet_episodes) + if skipped_count > 0: + sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.") + + if stop_check(): + sonarr_logger.info("Stop requested during upgrade processing.") + return processed_any + + # Group episodes by series + series_episodes: Dict[int, List[Dict]] = {} + series_titles: Dict[int, str] = {} # Keep track of series titles + + for episode in cutoff_unmet_episodes: + series_id = episode.get('seriesId') + if series_id is not None: + if series_id not in series_episodes: + series_episodes[series_id] = [] + # Store series title when first encountering the series ID + series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}") + + series_episodes[series_id].append(episode) + + # Create a list of (series_id, episode_count, series_title) tuples for selection + available_series = [(series_id, len(episodes), series_titles[series_id]) + for series_id, episodes in series_episodes.items()] + + if not available_series: + sonarr_logger.info("No series with cutoff unmet episodes found.") + return False + + # Select series to process - always randomly + random.shuffle(available_series) + series_to_process = available_series[:hunt_upgrade_items] + + sonarr_logger.info(f"Selected {len(series_to_process)} series with cutoff unmet episodes to process") + + # Log selected series + for idx, (series_id, episode_count, series_title) in enumerate(series_to_process): + sonarr_logger.info(f" {idx+1}. {series_title} - {episode_count} cutoff unmet episodes") + + # Process each selected series + for series_id, _, series_title in series_to_process: + if stop_check(): + sonarr_logger.info("Stop requested before processing next series.") + break + + episodes = series_episodes[series_id] + episode_ids = [episode["id"] for episode in episodes] + + sonarr_logger.info(f"Processing {series_title} with {len(episode_ids)} cutoff unmet episodes") + + # Refresh series metadata if not skipped + if not skip_series_refresh: + sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}") + refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id) + if refresh_command_id: + # Wait for refresh command to complete + if not wait_for_command( + api_url, api_key, api_timeout, refresh_command_id, + command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check + ): + sonarr_logger.warning(f"Series refresh command for {series_title} did not complete successfully or timed out.") + else: + sonarr_logger.warning(f"Failed to trigger refresh command for series {series_title}") + + if stop_check(): + sonarr_logger.info("Stop requested after series refresh attempt.") + break + + # Trigger search for all cutoff unmet episodes in this series + sonarr_logger.debug(f"Attempting to search for {len(episode_ids)} episodes in {series_title} for upgrades") + search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids) + + if search_command_id: + # Wait for search command to complete + if wait_for_command( + api_url, api_key, api_timeout, search_command_id, + command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check + ): + # Mark as processed if search command completed successfully + processed_any = True + sonarr_logger.info(f"Successfully processed {len(episode_ids)} cutoff unmet episodes in {series_title}") + + # We'll increment stats individually for each episode instead of in batch + # increment_stat("sonarr", "upgraded", len(episode_ids)) + # sonarr_logger.debug(f"Incremented sonarr upgraded statistics by {len(episode_ids)}") + + # Mark episodes as processed using stateful management + for episode_id in episode_ids: + add_processed_id("sonarr", instance_name, str(episode_id)) + sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades") + + # Increment stats for this episode (consistent with Radarr's approach) + increment_stat("sonarr", "upgraded") + sonarr_logger.debug(f"Incremented sonarr upgraded statistic for episode {episode_id}") + + # Find the episode information for history logging + # We need to get the episode details from the API to include proper info in history + try: + episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id) + if episode_details: + series_title = episode_details.get('series', {}).get('title', 'Unknown Series') + episode_title = episode_details.get('title', 'Unknown Episode') + season_number = episode_details.get('seasonNumber', 'Unknown Season') + episode_number = episode_details.get('episodeNumber', 'Unknown Episode') + + try: + season_episode = f"S{season_number:02d}E{episode_number:02d}" + except (ValueError, TypeError): + season_episode = f"S{season_number}E{episode_number}" + + # Record the upgrade in history with quality upgrade identifier + media_name = f"{series_title} - {season_episode} - {episode_title}" + log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade") + sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}") + except Exception as e: + sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}") + else: + sonarr_logger.warning(f"Episode upgrade search command for {series_title} did not complete successfully") + else: + sonarr_logger.error(f"Failed to trigger upgrade search command for {series_title}") + + sonarr_logger.info("Finished quality cutoff upgrades processing cycle (show mode) for Sonarr.") + return processed_any + +def wait_for_command( + api_url: str, + api_key: str, + api_timeout: int, + command_id: Union[int, str], + wait_delay: int, + max_attempts: int, + command_name: str = "Command", + stop_check: Callable[[], bool] = lambda: False +) -> bool: + """ + Wait for a Sonarr command to complete or timeout. + + Args: + api_url: The Sonarr API URL + api_key: The Sonarr API key + api_timeout: API request timeout + command_id: The ID of the command to monitor + wait_delay: Seconds to wait between status checks + max_attempts: Maximum number of status check attempts + command_name: Name of the command (for logging) + stop_check: Optional function to check if operation should be aborted + + Returns: + True if command completed successfully, False otherwise + """ + if wait_delay <= 0 or max_attempts <= 0: + sonarr_logger.debug(f"Not waiting for command to complete (wait_delay={wait_delay}, max_attempts={max_attempts})") + return True # Return as if successful since we're not checking + + sonarr_logger.debug(f"Waiting for {command_name} to complete (command ID: {command_id}). Checking every {wait_delay}s for up to {max_attempts} attempts") + + # Wait for command completion + attempts = 0 + while attempts < max_attempts: + if stop_check(): + sonarr_logger.info(f"Stopping wait for {command_name} due to stop request") + return False + + command_status = sonarr_api.get_command_status(api_url, api_key, api_timeout, command_id) + if not command_status: + sonarr_logger.warning(f"Failed to get status for {command_name} (ID: {command_id}), attempt {attempts+1}") + attempts += 1 + time.sleep(wait_delay) + continue + + status = command_status.get('status') + if status == 'completed': + sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) completed successfully") + return True + elif status in ['failed', 'aborted']: + sonarr_logger.warning(f"Sonarr {command_name} (ID: {command_id}) {status}") + return False + + sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) status: {status}, attempt {attempts+1}/{max_attempts}") + + attempts += 1 + time.sleep(wait_delay) + + sonarr_logger.error(f"Sonarr command '{command_name}' (ID: {command_id}) timed out after {max_attempts} attempts.") + return False \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py new file mode 100644 index 0000000..7dd0d7f --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger +import traceback +import socket +from urllib.parse import urlparse + +sonarr_bp = Blueprint('sonarr', __name__) +sonarr_logger = get_logger("sonarr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("sonarr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("sonarr", "processed_upgrades") + +@sonarr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Sonarr API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + sonarr_logger.info(f"Testing connection to Sonarr API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try to establish a socket connection first to check basic connectivity + parsed_url = urlparse(api_url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + try: + # Try socket connection for quick feedback on connectivity issues + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except Exception as e: + # Log the socket testing error but continue with the full request + sonarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # Create the test URL and set headers + test_url = f"{api_url.rstrip('/')}/api/v3/system/status" + headers = {'X-Api-Key': api_key} + + try: + # Now proceed with the actual API request + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # For HTTP errors, provide more specific feedback + if response.status_code == 401: + error_msg = "Authentication failed: Invalid API key" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 401 + elif response.status_code == 403: + error_msg = "Access forbidden: Check API key permissions" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 403 + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Sonarr server. Check your URL." + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + elif response.status_code >= 500: + error_msg = f"Sonarr server error (HTTP {response.status_code}): The Sonarr server is experiencing issues" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + + # Raise for other HTTP errors + response.raise_for_status() + + # Log HTTP status code for diagnostic purposes + sonarr_logger.debug(f"Sonarr API status code: {response.status_code}") + + # Ensure the response is valid JSON + try: + response_data = response.json() + + # We no longer save keys here since we use instances + # keys_manager.save_api_keys("sonarr", api_url, api_key) + + sonarr_logger.info(f"Successfully connected to Sonarr API version: {response_data.get('version', 'unknown')}") + + # Return success with some useful information + return jsonify({ + "success": True, + "message": "Successfully connected to Sonarr API", + "version": response_data.get('version', 'unknown') + }) + except ValueError: + error_msg = "Invalid JSON response from Sonarr API - This doesn't appear to be a valid Sonarr server" + sonarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + + except requests.exceptions.Timeout as e: + error_msg = f"Connection timed out after {api_timeout} seconds" + sonarr_logger.error(f"{error_msg}: {str(e)}") + return jsonify({"success": False, "message": error_msg}), 504 + + except requests.exceptions.ConnectionError as e: + # Handle different types of connection errors + error_details = str(e) + if "Connection refused" in error_details: + error_msg = f"Connection refused - Sonarr is not running on {api_url} or the port is incorrect" + elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details: + error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL." + else: + error_msg = f"Connection error - Check if Sonarr is running: {error_details}" + + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + + except requests.exceptions.RequestException as e: + error_msg = f"Connection test failed: {str(e)}" + sonarr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr.py new file mode 100644 index 0000000..83d79d7 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr.py @@ -0,0 +1,181 @@ +""" +Swaparr module for Huntarr +Handles stalled downloads in Starr apps based on the original Swaparr application +""" + +from flask import Blueprint, request, jsonify +import os +import json +from src.primary.utils.logger import get_logger +from src.primary.settings_manager import load_settings, save_settings +from src.primary.apps.swaparr.handler import process_stalled_downloads +from src.primary.apps.radarr import get_configured_instances as get_radarr_instances +from src.primary.apps.sonarr import get_configured_instances as get_sonarr_instances +from src.primary.apps.lidarr import get_configured_instances as get_lidarr_instances +from src.primary.apps.readarr import get_configured_instances as get_readarr_instances + +def get_configured_instances(): + """Get all configured Starr app instances from their respective settings""" + try: + from src.primary.apps.whisparr import get_configured_instances as get_whisparr_instances + whisparr_instances = get_whisparr_instances() + except ImportError: + whisparr_instances = [] + + try: + from src.primary.apps.eros import get_configured_instances as get_eros_instances + eros_instances = get_eros_instances() + except ImportError: + eros_instances = [] + + instances = { + "radarr": get_radarr_instances(), + "sonarr": get_sonarr_instances(), + "lidarr": get_lidarr_instances(), + "readarr": get_readarr_instances(), + "whisparr": whisparr_instances, + "eros": eros_instances + } + + logger = get_logger("swaparr") + logger.info(f"Found {sum(len(v) for v in instances.values())} configured Starr app instances") + return instances + +swaparr_bp = Blueprint('swaparr', __name__) +swaparr_logger = get_logger("swaparr") + +@swaparr_bp.route('/status', methods=['GET']) +def get_status(): + """Get Swaparr status and statistics""" + settings = load_settings("swaparr") + enabled = settings.get("enabled", False) + + # Get strike statistics from all app state directories + statistics = {} + state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr") + + if os.path.exists(state_dir): + for app_name in os.listdir(state_dir): + app_dir = os.path.join(state_dir, app_name) + if os.path.isdir(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + try: + with open(strike_file, 'r') as f: + strike_data = json.load(f) + + total_items = len(strike_data) + removed_items = sum(1 for item in strike_data.values() if item.get("removed", False)) + striked_items = sum(1 for item in strike_data.values() + if item.get("strikes", 0) > 0 and not item.get("removed", False)) + + statistics[app_name] = { + "total_tracked": total_items, + "currently_striked": striked_items, + "removed": removed_items + } + except (json.JSONDecodeError, IOError) as e: + swaparr_logger.error(f"Error reading strike data for {app_name}: {str(e)}") + statistics[app_name] = {"error": str(e)} + + return jsonify({ + "enabled": enabled, + "settings": { + "max_strikes": settings.get("max_strikes", 3), + "max_download_time": settings.get("max_download_time", "2h"), + "ignore_above_size": settings.get("ignore_above_size", "25GB"), + "remove_from_client": settings.get("remove_from_client", True), + "dry_run": settings.get("dry_run", False) + }, + "statistics": statistics + }) + +@swaparr_bp.route('/settings', methods=['GET']) +def get_settings(): + """Get Swaparr settings""" + settings = load_settings("swaparr") + return jsonify(settings) + +@swaparr_bp.route('/settings', methods=['POST']) +def update_settings(): + """Update Swaparr settings""" + data = request.json + + if not data: + return jsonify({"success": False, "message": "No data provided"}), 400 + + # Load current settings + settings = load_settings("swaparr") + + # Update settings with provided data + for key, value in data.items(): + settings[key] = value + + # Save updated settings + success = save_settings("swaparr", settings) + + if success: + return jsonify({"success": True, "message": "Settings updated successfully"}) + else: + return jsonify({"success": False, "message": "Failed to save settings"}), 500 + +@swaparr_bp.route('/reset', methods=['POST']) +def reset_strikes(): + """Reset all strikes for all apps or a specific app""" + data = request.json + app_name = data.get('app_name') if data else None + + state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr") + + if not os.path.exists(state_dir): + return jsonify({"success": True, "message": "No strike data to reset"}) + + if app_name: + # Reset strikes for a specific app + app_dir = os.path.join(state_dir, app_name) + if os.path.exists(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + try: + os.remove(strike_file) + swaparr_logger.info(f"Reset strikes for {app_name}") + return jsonify({"success": True, "message": f"Strikes reset for {app_name}"}) + except IOError as e: + swaparr_logger.error(f"Error resetting strikes for {app_name}: {str(e)}") + return jsonify({"success": False, "message": f"Failed to reset strikes for {app_name}: {str(e)}"}), 500 + return jsonify({"success": False, "message": f"No strike data found for {app_name}"}), 404 + else: + # Reset strikes for all apps + try: + for app_name in os.listdir(state_dir): + app_dir = os.path.join(state_dir, app_name) + if os.path.isdir(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + os.remove(strike_file) + + swaparr_logger.info("Reset all strikes") + return jsonify({"success": True, "message": "All strikes reset"}) + except IOError as e: + swaparr_logger.error(f"Error resetting all strikes: {str(e)}") + return jsonify({"success": False, "message": f"Failed to reset all strikes: {str(e)}"}), 500 + +def is_configured(): + """Check if Swaparr has any configured Starr app instances""" + instances = get_configured_instances() + return any(len(app_instances) > 0 for app_instances in instances.values()) + +def run_swaparr(): + """Run Swaparr cycle to check for stalled downloads in all configured Starr app instances""" + settings = load_settings("swaparr") + + if not settings or not settings.get("enabled", False): + swaparr_logger.debug("Swaparr is disabled, skipping run") + return + + instances = get_configured_instances() + + # Process stalled downloads for each app type and instance + for app_name, app_instances in instances.items(): + for app_settings in app_instances: + process_stalled_downloads(app_name, app_settings, settings) diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py new file mode 100644 index 0000000..79fd6bc --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py @@ -0,0 +1,16 @@ +""" +Swaparr app module for Huntarr +Contains functionality for handling stalled downloads in Starr apps +""" + +# Add necessary imports for get_configured_instances +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +swaparr_logger = get_logger("swaparr") # Get the logger instance + +# We don't need the get_configured_instances function here anymore as it's defined in swaparr.py +# to avoid circular imports + +# Export just the swaparr_logger for now +__all__ = ["swaparr_logger"] diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py new file mode 100644 index 0000000..26c463c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py @@ -0,0 +1,467 @@ +""" +Implementation of the swaparr functionality to detect and remove stalled downloads in Starr apps. +Based on the functionality provided by https://github.com/ThijmenGThN/swaparr +""" + +import os +import json +import time +import hashlib +from datetime import datetime, timedelta +import requests + +from src.primary.utils.logger import get_logger +from src.primary.settings_manager import load_settings +from src.primary.state import get_state_file_path + +# Create logger +swaparr_logger = get_logger("swaparr") + +# Create state directory for tracking strikes +SWAPARR_STATE_DIR = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr") + +def ensure_state_directory(app_name): + """Ensure the state directory exists for tracking strikes for a specific app""" + app_state_dir = os.path.join(SWAPARR_STATE_DIR, app_name) + if not os.path.exists(app_state_dir): + os.makedirs(app_state_dir, exist_ok=True) + swaparr_logger.info(f"Created swaparr state directory for {app_name}: {app_state_dir}") + return app_state_dir + +def load_strike_data(app_name): + """Load strike data for a specific app""" + app_state_dir = ensure_state_directory(app_name) + strike_file = os.path.join(app_state_dir, "strikes.json") + + if not os.path.exists(strike_file): + return {} + + try: + with open(strike_file, 'r') as f: + return json.load(f) + except (json.JSONDecodeError, IOError) as e: + swaparr_logger.error(f"Error loading strike data for {app_name}: {str(e)}") + return {} + +def save_strike_data(app_name, strike_data): + """Save strike data for a specific app""" + app_state_dir = ensure_state_directory(app_name) + strike_file = os.path.join(app_state_dir, "strikes.json") + + try: + with open(strike_file, 'w') as f: + json.dump(strike_data, f, indent=2) + except IOError as e: + swaparr_logger.error(f"Error saving strike data for {app_name}: {str(e)}") + +def load_removed_items(app_name): + """Load list of permanently removed items""" + app_state_dir = ensure_state_directory(app_name) + removed_file = os.path.join(app_state_dir, "removed_items.json") + + if not os.path.exists(removed_file): + return {} + + try: + with open(removed_file, 'r') as f: + return json.load(f) + except (json.JSONDecodeError, IOError) as e: + swaparr_logger.error(f"Error loading removed items for {app_name}: {str(e)}") + return {} + +def save_removed_items(app_name, removed_items): + """Save list of permanently removed items""" + app_state_dir = ensure_state_directory(app_name) + removed_file = os.path.join(app_state_dir, "removed_items.json") + + try: + with open(removed_file, 'w') as f: + json.dump(removed_items, f, indent=2) + except IOError as e: + swaparr_logger.error(f"Error saving removed items for {app_name}: {str(e)}") + +def generate_item_hash(item): + """Generate a unique hash for an item based on its name and size. + This helps track items across restarts even if their queue ID changes.""" + hash_input = f"{item['name']}_{item['size']}" + return hashlib.md5(hash_input.encode('utf-8')).hexdigest() + +def parse_time_string_to_seconds(time_string): + """Parse a time string like '2h', '30m', '1d' to seconds""" + if not time_string: + return 7200 # Default 2 hours + + unit = time_string[-1].lower() + try: + value = int(time_string[:-1]) + except ValueError: + swaparr_logger.error(f"Invalid time string: {time_string}, using default 2 hours") + return 7200 + + if unit == 'd': + return value * 86400 # Days to seconds + elif unit == 'h': + return value * 3600 # Hours to seconds + elif unit == 'm': + return value * 60 # Minutes to seconds + else: + swaparr_logger.error(f"Unknown time unit in: {time_string}, using default 2 hours") + return 7200 + +def parse_size_string_to_bytes(size_string): + """Parse a size string like '25GB', '1TB' to bytes""" + if not size_string: + return 25 * 1024 * 1024 * 1024 # Default 25GB + + # Extract the numeric part and unit + unit = "" + for i in range(len(size_string) - 1, -1, -1): + if not size_string[i].isalpha(): + value = float(size_string[:i+1]) + unit = size_string[i+1:].upper() + break + else: + swaparr_logger.error(f"Invalid size string: {size_string}, using default 25GB") + return 25 * 1024 * 1024 * 1024 + + # Convert to bytes based on unit + if unit == 'B': + return int(value) + elif unit == 'KB': + return int(value * 1024) + elif unit == 'MB': + return int(value * 1024 * 1024) + elif unit == 'GB': + return int(value * 1024 * 1024 * 1024) + elif unit == 'TB': + return int(value * 1024 * 1024 * 1024 * 1024) + else: + swaparr_logger.error(f"Unknown size unit in: {size_string}, using default 25GB") + return 25 * 1024 * 1024 * 1024 + +def get_queue_items(app_name, api_url, api_key, api_timeout=120): + """Get download queue items from a Starr app API with pagination support""" + api_version_map = { + "radarr": "v3", + "sonarr": "v3", + "lidarr": "v1", + "readarr": "v1", + "whisparr": "v3" + } + + api_version = api_version_map.get(app_name, "v3") + + # Initialize an empty list to store all records + all_records = [] + + # Start with page 1 + page = 1 + page_size = 100 # Request a large page size to reduce API calls + + while True: + # Add pagination parameters + queue_url = f"{api_url.rstrip('/')}/api/{api_version}/queue?page={page}&pageSize={page_size}" + headers = {'X-Api-Key': api_key} + + try: + response = requests.get(queue_url, headers=headers, timeout=api_timeout) + response.raise_for_status() + queue_data = response.json() + + if api_version in ["v3"]: # Radarr, Sonarr, Whisparr use v3 + records = queue_data.get("records", []) + total_records = queue_data.get("totalRecords", 0) + else: # Lidarr, Readarr use v1 + records = queue_data + total_records = len(records) + + # Add this page's records to our collection + all_records.extend(records) + + # If we've fetched all records or there are no more, break the loop + if len(all_records) >= total_records or len(records) == 0: + break + + # Otherwise, move to the next page + page += 1 + + except requests.exceptions.RequestException as e: + swaparr_logger.error(f"Error fetching queue for {app_name} (page {page}): {str(e)}") + break + + swaparr_logger.info(f"Fetched {len(all_records)} queue items for {app_name}") + + # Normalize the response based on app type + if app_name in ["radarr", "whisparr", "eros"]: + return parse_queue_items(all_records, "movie", app_name) + elif app_name == "sonarr": + return parse_queue_items(all_records, "series", app_name) + elif app_name == "lidarr": + return parse_queue_items(all_records, "album", app_name) + elif app_name == "readarr": + return parse_queue_items(all_records, "book", app_name) + else: + swaparr_logger.error(f"Unknown app type: {app_name}") + return [] + +def parse_queue_items(records, item_type, app_name): + """Parse queue items from API response into a standardized format""" + queue_items = [] + + for record in records: + # Skip non-dictionary records + if not isinstance(record, dict): + swaparr_logger.warning(f"Skipping non-dictionary record in {app_name} queue: {record}") + continue + + # Extract the name based on the item type + name = None + if item_type == "movie" and record.get("movie"): + name = record["movie"].get("title", "Unknown Movie") + elif item_type == "series" and record.get("series"): + name = record["series"].get("title", "Unknown Series") + elif item_type == "album" and record.get("album"): + name = record["album"].get("title", "Unknown Album") + elif item_type == "book" and record.get("book"): + name = record["book"].get("title", "Unknown Book") + + # If no name was found, try to use the download title + if not name and record.get("title"): + name = record.get("title", "Unknown Download") + + # Parse ETA if available + eta_seconds = 0 + if record.get("timeleft"): + eta = record.get("timeleft", "") + # Basic parsing of timeleft format like "00:30:00" (30 minutes) + try: + eta_parts = eta.split(':') + if len(eta_parts) == 3: + eta_seconds = int(eta_parts[0]) * 3600 + int(eta_parts[1]) * 60 + int(eta_parts[2]) + except (ValueError, IndexError): + eta_seconds = 0 + + queue_items.append({ + "id": record.get("id"), + "name": name, + "size": record.get("size", 0), + "status": record.get("status", "unknown").lower(), + "eta": eta_seconds, + "error_message": record.get("errorMessage", "") + }) + + return queue_items + +def delete_download(app_name, api_url, api_key, download_id, remove_from_client=True, api_timeout=120): + """Delete a download from a Starr app""" + api_version_map = { + "radarr": "v3", + "sonarr": "v3", + "lidarr": "v1", + "readarr": "v1", + "whisparr": "v3" + } + + api_version = api_version_map.get(app_name, "v3") + delete_url = f"{api_url.rstrip('/')}/api/{api_version}/queue/{download_id}?removeFromClient={str(remove_from_client).lower()}&blocklist=true" + headers = {'X-Api-Key': api_key} + + try: + response = requests.delete(delete_url, headers=headers, timeout=api_timeout) + response.raise_for_status() + swaparr_logger.info(f"Successfully removed download {download_id} from {app_name}") + return True + except requests.exceptions.RequestException as e: + swaparr_logger.error(f"Error removing download {download_id} from {app_name}: {str(e)}") + return False + +def process_stalled_downloads(app_name, app_settings, swaparr_settings=None): + """Process stalled downloads for a specific app instance""" + if not swaparr_settings: + swaparr_settings = load_settings("swaparr") + + if not swaparr_settings or not swaparr_settings.get("enabled", False): + swaparr_logger.debug(f"Swaparr is disabled, skipping {app_name} instance: {app_settings.get('instance_name', 'Unknown')}") + return + + swaparr_logger.info(f"Processing stalled downloads for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}") + + # Get settings + max_strikes = swaparr_settings.get("max_strikes", 3) + max_download_time = parse_time_string_to_seconds(swaparr_settings.get("max_download_time", "2h")) + ignore_above_size = parse_size_string_to_bytes(swaparr_settings.get("ignore_above_size", "25GB")) + remove_from_client = swaparr_settings.get("remove_from_client", True) + dry_run = swaparr_settings.get("dry_run", False) + + api_url = app_settings.get("api_url") + api_key = app_settings.get("api_key") + api_timeout = app_settings.get("api_timeout", 120) + + if not api_url or not api_key: + swaparr_logger.error(f"Missing API URL or API Key for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}") + return + + # Load existing strike data + strike_data = load_strike_data(app_name) + + # Load list of permanently removed items + removed_items = load_removed_items(app_name) + + # Clean up expired removed items (older than 30 days) + now = datetime.utcnow() + for item_hash in list(removed_items.keys()): + removed_date = datetime.fromisoformat(removed_items[item_hash]["removed_time"].replace('Z', '+00:00')) + if (now - removed_date) > timedelta(days=30): + swaparr_logger.debug(f"Removing expired entry from removed items list: {removed_items[item_hash]['name']}") + del removed_items[item_hash] + + # Get current queue items + queue_items = get_queue_items(app_name, api_url, api_key, api_timeout) + + if not queue_items: + swaparr_logger.info(f"No queue items found for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}") + return + + # Keep track of items still in queue for cleanup + current_item_ids = set(item["id"] for item in queue_items) + + # Clean up items that are no longer in the queue + for item_id in list(strike_data.keys()): + if int(item_id) not in current_item_ids: + swaparr_logger.debug(f"Removing item {item_id} from strike list as it's no longer in the queue") + del strike_data[item_id] + + # Process each queue item + for item in queue_items: + item_id = str(item["id"]) + item_state = "Normal" + item_hash = generate_item_hash(item) + + # Check if this item has been previously removed + if item_hash in removed_items: + last_removed_date = datetime.fromisoformat(removed_items[item_hash]["removed_time"].replace('Z', '+00:00')) + days_since_removal = (now - last_removed_date).days + + # Re-remove it automatically if it's been less than 7 days since last removal + if days_since_removal < 7: + swaparr_logger.warning(f"Found previously removed download that reappeared: {item['name']} (removed {days_since_removal} days ago)") + + if not dry_run: + if delete_download(app_name, api_url, api_key, item["id"], remove_from_client, api_timeout): + swaparr_logger.info(f"Re-removed previously removed download: {item['name']}") + # Update the removal time + removed_items[item_hash]["removed_time"] = datetime.utcnow().isoformat() + else: + swaparr_logger.info(f"DRY RUN: Would have re-removed previously removed download: {item['name']}") + + item_state = "Re-removed" if not dry_run else "Would Re-remove (Dry Run)" + continue + + # Skip large files if configured + if item["size"] >= ignore_above_size: + swaparr_logger.debug(f"Ignoring large download: {item['name']} ({item['size']} bytes > {ignore_above_size} bytes)") + item_state = "Ignored (Size)" + continue + + # Handle delayed items - we'll skip these + if item["status"] == "delay": + swaparr_logger.debug(f"Ignoring delayed download: {item['name']}") + item_state = "Ignored (Delayed)" + continue + + # Special handling for "queued" status + # We only skip truly queued items, not those with metadata issues + metadata_issue = "metadata" in item["status"].lower() or "metadata" in item["error_message"].lower() + + if item["status"] == "queued" and not metadata_issue: + # For regular queued items, check how long they've been in strike data + if item_id in strike_data and "first_strike_time" in strike_data[item_id]: + first_strike = datetime.fromisoformat(strike_data[item_id]["first_strike_time"].replace('Z', '+00:00')) + if (now - first_strike) < timedelta(hours=1): + # Skip if it's been less than 1 hour since first seeing it + swaparr_logger.debug(f"Ignoring recently queued download: {item['name']}") + item_state = "Ignored (Recently Queued)" + continue + else: + # Initialize with first strike time for queued items + if item_id not in strike_data: + strike_data[item_id] = { + "strikes": 0, + "name": item["name"], + "first_strike_time": datetime.utcnow().isoformat(), + "last_strike_time": None + } + swaparr_logger.debug(f"Monitoring new queued download: {item['name']}") + item_state = "Monitoring (Queued)" + continue + + # Initialize strike count if not already in strike data + if item_id not in strike_data: + strike_data[item_id] = { + "strikes": 0, + "name": item["name"], + "first_strike_time": datetime.utcnow().isoformat(), + "last_strike_time": None + } + + # Check if download should be striked + should_strike = False + strike_reason = "" + + # Strike if metadata issue, eta too long, or no progress (eta = 0 and not queued) + if metadata_issue: + should_strike = True + strike_reason = "Metadata" + elif item["eta"] >= max_download_time: + should_strike = True + strike_reason = "ETA too long" + elif item["eta"] == 0 and item["status"] not in ["queued", "delay"]: + should_strike = True + strike_reason = "No progress" + + # If we should strike this item, add a strike + if should_strike: + strike_data[item_id]["strikes"] += 1 + strike_data[item_id]["last_strike_time"] = datetime.utcnow().isoformat() + + if strike_data[item_id]["first_strike_time"] is None: + strike_data[item_id]["first_strike_time"] = datetime.utcnow().isoformat() + + current_strikes = strike_data[item_id]["strikes"] + swaparr_logger.info(f"Added strike ({current_strikes}/{max_strikes}) to {item['name']} - Reason: {strike_reason}") + + # If max strikes reached, remove the download + if current_strikes >= max_strikes: + swaparr_logger.warning(f"Max strikes reached for {item['name']}, removing download") + + if not dry_run: + if delete_download(app_name, api_url, api_key, item["id"], remove_from_client, api_timeout): + swaparr_logger.info(f"Successfully removed {item['name']} after {max_strikes} strikes") + + # Keep the item in strike data for reference but mark as removed + strike_data[item_id]["removed"] = True + strike_data[item_id]["removed_time"] = datetime.utcnow().isoformat() + + # Add to removed items list for persistent tracking + removed_items[item_hash] = { + "name": item["name"], + "size": item["size"], + "removed_time": datetime.utcnow().isoformat(), + "reason": strike_reason + } + else: + swaparr_logger.info(f"DRY RUN: Would have removed {item['name']} after {max_strikes} strikes") + + item_state = "Removed" if not dry_run else "Would Remove (Dry Run)" + else: + item_state = f"Striked ({current_strikes}/{max_strikes})" + + swaparr_logger.debug(f"Processed download: {item['name']} - State: {item_state}") + + # Save updated strike data + save_strike_data(app_name, strike_data) + + # Save updated removed items list + save_removed_items(app_name, removed_items) + + swaparr_logger.info(f"Finished processing stalled downloads for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}") diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py new file mode 100644 index 0000000..58f86be --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py @@ -0,0 +1,134 @@ +""" +Route definitions for Swaparr API endpoints. +""" + +from flask import Blueprint, request, jsonify +import os +import json +from src.primary.utils.logger import get_logger +from src.primary.settings_manager import load_settings, save_settings +from src.primary.apps.swaparr.handler import process_stalled_downloads + +# Create the blueprint directly in this file +swaparr_bp = Blueprint('swaparr', __name__) +swaparr_logger = get_logger("swaparr") + +@swaparr_bp.route('/status', methods=['GET']) +def get_status(): + """Get Swaparr status and statistics""" + settings = load_settings("swaparr") + enabled = settings.get("enabled", False) + + # Get strike statistics from all app state directories + statistics = {} + state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr") + + if os.path.exists(state_dir): + for app_name in os.listdir(state_dir): + app_dir = os.path.join(state_dir, app_name) + if os.path.isdir(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + try: + with open(strike_file, 'r') as f: + strike_data = json.load(f) + + total_items = len(strike_data) + removed_items = sum(1 for item in strike_data.values() if item.get("removed", False)) + striked_items = sum(1 for item in strike_data.values() + if item.get("strikes", 0) > 0 and not item.get("removed", False)) + + statistics[app_name] = { + "total_tracked": total_items, + "currently_striked": striked_items, + "removed": removed_items + } + except (json.JSONDecodeError, IOError) as e: + swaparr_logger.error(f"Error reading strike data for {app_name}: {str(e)}") + statistics[app_name] = {"error": str(e)} + + return jsonify({ + "enabled": enabled, + "settings": { + "max_strikes": settings.get("max_strikes", 3), + "max_download_time": settings.get("max_download_time", "2h"), + "ignore_above_size": settings.get("ignore_above_size", "25GB"), + "remove_from_client": settings.get("remove_from_client", True), + "dry_run": settings.get("dry_run", False) + }, + "statistics": statistics + }) + +@swaparr_bp.route('/settings', methods=['GET']) +def get_settings(): + """Get Swaparr settings""" + settings = load_settings("swaparr") + return jsonify(settings) + +@swaparr_bp.route('/settings', methods=['POST']) +def update_settings(): + """Update Swaparr settings""" + data = request.json + + if not data: + return jsonify({"success": False, "message": "No data provided"}), 400 + + # Load current settings + settings = load_settings("swaparr") + + # Update settings with provided data + for key, value in data.items(): + settings[key] = value + + # Save updated settings + success = save_settings("swaparr", settings) + + if success: + return jsonify({"success": True, "message": "Settings updated successfully"}) + else: + return jsonify({"success": False, "message": "Failed to save settings"}), 500 + +@swaparr_bp.route('/reset', methods=['POST']) +def reset_strikes(): + """Reset all strikes for all apps or a specific app""" + data = request.json + app_name = data.get('app_name') if data else None + + state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr") + + if not os.path.exists(state_dir): + return jsonify({"success": True, "message": "No strike data to reset"}) + + if app_name: + # Reset strikes for a specific app + app_dir = os.path.join(state_dir, app_name) + if os.path.exists(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + try: + os.remove(strike_file) + swaparr_logger.info(f"Reset strikes for {app_name}") + return jsonify({"success": True, "message": f"Strikes reset for {app_name}"}) + except IOError as e: + swaparr_logger.error(f"Error resetting strikes for {app_name}: {str(e)}") + return jsonify({"success": False, "message": f"Failed to reset strikes for {app_name}: {str(e)}"}), 500 + return jsonify({"success": False, "message": f"No strike data found for {app_name}"}), 404 + else: + # Reset strikes for all apps + try: + for app_name in os.listdir(state_dir): + app_dir = os.path.join(state_dir, app_name) + if os.path.isdir(app_dir): + strike_file = os.path.join(app_dir, "strikes.json") + if os.path.exists(strike_file): + os.remove(strike_file) + + swaparr_logger.info("Reset all strikes") + return jsonify({"success": True, "message": "All strikes reset"}) + except IOError as e: + swaparr_logger.error(f"Error resetting all strikes: {str(e)}") + return jsonify({"success": False, "message": f"Failed to reset all strikes: {str(e)}"}), 500 + +def register_routes(app): + """Register Swaparr routes with the Flask app.""" + app.register_blueprint(swaparr_bp, url_prefix='/api/swaparr') diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr.py new file mode 100644 index 0000000..33b2622 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr.py @@ -0,0 +1,171 @@ +from flask import Blueprint, request, jsonify +import datetime, os, requests +from primary import keys_manager +from src.primary.utils.logger import get_logger +from src.primary.state import get_state_file_path +from src.primary.settings_manager import load_settings + +whisparr_bp = Blueprint('whisparr', __name__) +whisparr_logger = get_logger("whisparr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("whisparr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("whisparr", "processed_upgrades") + +@whisparr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Whisparr API instance with comprehensive diagnostics""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + # Log the test attempt + whisparr_logger.info(f"Testing connection to Whisparr V2 API at {api_url}") + + # First check if URL is properly formatted + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try multiple API path combinations to handle different Whisparr V2 setups + api_paths = [ + "/api/system/status", # Standard V2 path + "/api/v3/system/status", # Some V2 instances use V3 API + "/system/status" # Direct path without /api prefix + ] + + success = False + last_error = None + response_data = None + + for api_path in api_paths: + test_url = f"{api_url.rstrip('/')}{api_path}" + headers = {'X-Api-Key': api_key} + whisparr_logger.debug(f"Trying Whisparr API path: {test_url}") + + try: + # Use a connection timeout separate from read timeout + response = requests.get(test_url, headers=headers, timeout=(10, api_timeout)) + + # Log HTTP status code for diagnostic purposes + whisparr_logger.debug(f"Whisparr API status code: {response.status_code} for path {api_path}") + + # Check HTTP status code + if response.status_code == 404: + # Try next path if 404 + continue + + response.raise_for_status() + + # Ensure the response is valid JSON + try: + response_data = response.json() + whisparr_logger.debug(f"Whisparr API response: {response_data}") + + # Verify this is actually a Whisparr API by checking for version + version = response_data.get('version', None) + if not version: + # No version info, try next path + last_error = "API response doesn't contain version information" + continue + + # The version number should start with 2 for Whisparr + if version.startswith('2'): + whisparr_logger.info(f"Successfully connected to Whisparr V2 API version {version} using path {api_path}") + success = True + break + elif version.startswith('3'): + error_msg = f"Connected to Whisparr V3 (version {version}). Use the Eros integration for V3." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + else: + # Connected to some other version, try next path + last_error = f"Connected to unknown version {version}, but Huntarr requires Whisparr V2" + continue + + except ValueError: + last_error = "Invalid JSON response from API" + continue + + except requests.exceptions.Timeout: + last_error = f"Connection timed out after {api_timeout} seconds" + continue + + except requests.exceptions.ConnectionError: + last_error = "Failed to connect. Check that the URL is correct and that Whisparr is running." + continue + + except requests.exceptions.HTTPError as e: + last_error = f"HTTP error: {str(e)}" + continue + + except Exception as e: + last_error = f"Unexpected error: {str(e)}" + continue + + # After trying all paths + if success: + return jsonify({ + "success": True, + "message": f"Successfully connected to Whisparr V2 (version {response_data.get('version')})", + "version": response_data.get('version') + }) + else: + error_msg = last_error or "Failed to connect to Whisparr API. Please check your URL and API key." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + +# Function to check if Whisparr is configured +def is_configured(): + """Check if Whisparr API credentials are configured""" + try: + api_keys = keys_manager.load_api_keys("whisparr") + instances = api_keys.get("instances", []) + + for instance in instances: + if instance.get("enabled", True): + return True + + return False + except Exception as e: + whisparr_logger.error(f"Error checking if Whisparr is configured: {str(e)}") + return False + +# Get all valid instances from settings +def get_configured_instances(): + """Get all configured and enabled Whisparr instances""" + try: + api_keys = keys_manager.load_api_keys("whisparr") + instances = api_keys.get("instances", []) + + enabled_instances = [] + for instance in instances: + if not instance.get("enabled", True): + continue + + api_url = instance.get("api_url") + api_key = instance.get("api_key") + + if not api_url or not api_key: + continue + + # Add name and timeout + instance_name = instance.get("name", "Default") + api_timeout = instance.get("api_timeout", 90) + + enabled_instances.append({ + "api_url": api_url, + "api_key": api_key, + "instance_name": instance_name, + "api_timeout": api_timeout + }) + + return enabled_instances + except Exception as e: + whisparr_logger.error(f"Error getting configured Whisparr instances: {str(e)}") + return [] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py new file mode 100644 index 0000000..2e2c844 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py @@ -0,0 +1,95 @@ +""" +Whisparr app module for Huntarr +Contains functionality for missing items and quality upgrades in Whisparr + +Exclusively supports the v2 API (legacy). +""" + +# Module exports +from src.primary.apps.whisparr.missing import process_missing_items +from src.primary.apps.whisparr.upgrade import process_cutoff_upgrades +from src.primary.settings_manager import load_settings +from src.primary.utils.logger import get_logger + +# Define logger for this module +whisparr_logger = get_logger("whisparr") + +# For backward compatibility +process_missing_scenes = process_missing_items + +def get_configured_instances(): + """Get all configured and enabled Whisparr instances""" + settings = load_settings("whisparr") + instances = [] + # Use debug level to avoid log spam on new installations + whisparr_logger.debug(f"Loaded Whisparr settings for instance check: {settings}") + + if not settings: + whisparr_logger.debug("No settings found for Whisparr") + return instances + + # Always use Whisparr V2 API + # Use debug level to avoid log spam on new installations + whisparr_logger.debug("Using Whisparr V2 API exclusively") + + # Check if instances are configured + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + # Use debug level to avoid log spam on new installations + whisparr_logger.debug(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") + for idx, instance in enumerate(settings["instances"]): + whisparr_logger.debug(f"Checking instance #{idx}: {instance}") + # Enhanced validation + api_url = instance.get("api_url", "").strip() + api_key = instance.get("api_key", "").strip() + + # Enhanced URL validation - ensure URL has proper scheme + if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')): + whisparr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}") + api_url = f"http://{api_url}" + whisparr_logger.warning(f"Auto-correcting URL to: {api_url}") + + is_enabled = instance.get("enabled", True) + + # Only include properly configured instances + if is_enabled and api_url and api_key: + instance_name = instance.get("name", "Default") + + # Create a settings object for this instance by combining global settings with instance-specific ones + instance_settings = settings.copy() + + # Remove instances list to avoid confusion + if "instances" in instance_settings: + del instance_settings["instances"] + + # Override with instance-specific settings + instance_settings["api_url"] = api_url + instance_settings["api_key"] = api_key + instance_settings["instance_name"] = instance_name + + # Add timeout setting with default if not present + if "api_timeout" not in instance_settings: + instance_settings["api_timeout"] = 30 + + # Use debug level to prevent log spam + whisparr_logger.debug(f"Adding configured Whisparr instance: {instance_name}") + instances.append(instance_settings) + else: + name = instance.get("name", "Unnamed") + if not is_enabled: + whisparr_logger.debug(f"Skipping disabled instance: {name}") + else: + # For brand new installations, don't spam logs with warnings about default instances + if name == 'Default': + # Use debug level for default instances to avoid log spam on new installations + whisparr_logger.debug(f"Skipping instance {name} due to missing API URL or API Key") + else: + # Still log warnings for non-default instances + whisparr_logger.warning(f"Skipping instance {name} due to missing API URL or API Key") + else: + whisparr_logger.debug("No instances array found in settings or it's empty") + + # Use debug level to avoid spamming logs, especially with 0 instances + whisparr_logger.debug(f"Found {len(instances)} configured and enabled Whisparr instances") + return instances + +__all__ = ["process_missing_items", "process_missing_scenes", "process_cutoff_upgrades", "get_configured_instances"] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py new file mode 100644 index 0000000..c47b414 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py @@ -0,0 +1,475 @@ +#!/usr/bin/env python3 +""" +Whisparr-specific API functions +Handles all communication with the Whisparr API + +Exclusively uses the Whisparr V2 API +""" + +import requests +import json +import time +import datetime +import traceback +import sys +from typing import List, Dict, Any, Optional, Union +from src.primary.utils.logger import get_logger + +# Get logger for the Whisparr app +whisparr_logger = get_logger("whisparr") + +# Use a session for better performance +session = requests.Session() + +def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any: + """ + Make a request to the Whisparr V2 API. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + endpoint: The API endpoint to call + method: HTTP method (GET, POST, PUT, DELETE) + data: Optional data to send with the request + + Returns: + The JSON response from the API, or None if the request failed + """ + if not api_url or not api_key: + whisparr_logger.error("API URL or API key is missing. Check your settings.") + return None + + # Always try standard path first + api_base = "api" + whisparr_logger.debug(f"Using Whisparr API path: {api_base}") + + # Full URL - ensure no double slashes + url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}" + + # Add debug logging for the exact URL being called + whisparr_logger.debug(f"Making {method} request to: {url}") + + # Headers + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + try: + if method == "GET": + response = session.get(url, headers=headers, timeout=api_timeout) + elif method == "POST": + response = session.post(url, headers=headers, json=data, timeout=api_timeout) + elif method == "PUT": + response = session.put(url, headers=headers, json=data, timeout=api_timeout) + elif method == "DELETE": + response = session.delete(url, headers=headers, timeout=api_timeout) + else: + whisparr_logger.error(f"Unsupported HTTP method: {method}") + return None + + # If we get a 404, try with v3 path instead + if response.status_code == 404: + api_base = "api/v3" + v3_url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}" + whisparr_logger.debug(f"Standard path returned 404, trying with V3 path: {v3_url}") + + if method == "GET": + response = session.get(v3_url, headers=headers, timeout=api_timeout) + elif method == "POST": + response = session.post(v3_url, headers=headers, json=data, timeout=api_timeout) + elif method == "PUT": + response = session.put(v3_url, headers=headers, json=data, timeout=api_timeout) + elif method == "DELETE": + response = session.delete(v3_url, headers=headers, timeout=api_timeout) + + whisparr_logger.debug(f"V3 path request returned status code: {response.status_code}") + + # Check if the request was successful + try: + response.raise_for_status() + except requests.exceptions.HTTPError as e: + whisparr_logger.error(f"Error during {method} request to {endpoint}: {e}, Status Code: {response.status_code}") + whisparr_logger.debug(f"Response content: {response.text[:200]}") + return None + + # Try to parse JSON response + try: + if response.text: + result = response.json() + whisparr_logger.debug(f"Response from {response.url}: Status {response.status_code}, JSON parsed successfully") + return result + else: + whisparr_logger.debug(f"Response from {response.url}: Status {response.status_code}, Empty response") + return {} + except json.JSONDecodeError: + whisparr_logger.error(f"Invalid JSON response from API: {response.text[:200]}") + return None + + except requests.exceptions.RequestException as e: + whisparr_logger.error(f"Request failed: {e}") + return None + except Exception as e: + whisparr_logger.error(f"Unexpected error during API request: {e}") + return None + +def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int: + """ + Get the current size of the download queue. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + The number of items in the download queue, or -1 if the request failed + """ + response = arr_request(api_url, api_key, api_timeout, "queue") + + if response is None: + return -1 + + # V2 API uses records in queue response + if isinstance(response, dict) and "records" in response: + return len(response["records"]) + elif isinstance(response, list): + return len(response) + else: + return -1 + +def get_items_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """ + Get a list of items with missing files (not downloaded/available). + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored items. + + Returns: + A list of item objects with missing files, or None if the request failed. + """ + try: + whisparr_logger.debug(f"Retrieving missing items...") + + # Endpoint parameters - always use v2 format + endpoint = "wanted/missing?pageSize=1000&sortKey=airDateUtc&sortDirection=descending" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + return None + + # Extract the episodes/items + items = [] + if isinstance(response, dict) and "records" in response: + items = response["records"] + + # Filter monitored if needed + if monitored_only: + items = [item for item in items if item.get("monitored", False)] + + whisparr_logger.debug(f"Found {len(items)} missing items") + return items + + except Exception as e: + whisparr_logger.error(f"Error retrieving missing items: {str(e)}") + return None + +def get_cutoff_unmet_items(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]: + """ + Get a list of items that don't meet their quality profile cutoff. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + monitored_only: If True, only return monitored items. + + Returns: + A list of item objects that need quality upgrades, or None if the request failed. + """ + try: + whisparr_logger.debug(f"Retrieving cutoff unmet items...") + + # Endpoint - always use v2 format + endpoint = "wanted/cutoff?pageSize=1000&sortKey=airDateUtc&sortDirection=descending" + + response = arr_request(api_url, api_key, api_timeout, endpoint) + + if response is None: + return None + + # Extract the episodes/items + items = [] + if isinstance(response, dict) and "records" in response: + items = response["records"] + + whisparr_logger.debug(f"Found {len(items)} cutoff unmet items") + + # Just filter monitored if needed + if monitored_only: + items = [item for item in items if item.get("monitored", False)] + whisparr_logger.debug(f"Found {len(items)} cutoff unmet items after filtering monitored") + + return items + + except Exception as e: + whisparr_logger.error(f"Error retrieving cutoff unmet items: {str(e)}") + return None + +def refresh_item(api_url: str, api_key: str, api_timeout: int, item_id: int) -> int: + """ + Refresh an item in Whisparr. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + item_id: The ID of the item to refresh + + Returns: + The command ID if the refresh was triggered successfully, None otherwise + """ + try: + whisparr_logger.debug(f"Refreshing item with ID {item_id}") + + # Some Whisparr versions have issues with RefreshEpisode, try a safer approach + # Use series refresh instead if we can get the series ID from the episode + # First, attempt to get the episode details + episode_endpoint = f"episode/{item_id}" + episode_data = arr_request(api_url, api_key, api_timeout, episode_endpoint) + + if episode_data and "seriesId" in episode_data: + # We have the series ID, use series refresh which is more reliable + series_id = episode_data["seriesId"] + whisparr_logger.debug(f"Retrieved series ID {series_id} for episode {item_id}, using series refresh") + + # RefreshSeries is generally more reliable + payload = { + "name": "RefreshSeries", + "seriesId": series_id + } + else: + # Fall back to episode refresh if we can't get the series ID + whisparr_logger.debug(f"Could not retrieve series ID for episode {item_id}, using episode refresh") + payload = { + "name": "RefreshEpisode", + "episodeIds": [item_id] + } + + # For commands, we need to directly try both path formats since command endpoints + # may have different structures in different Whisparr versions + command_endpoint = "command" + url = f"{api_url.rstrip('/')}/api/{command_endpoint}" + backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}" + + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + # Try standard API path first + whisparr_logger.debug(f"Attempting command with standard API path: {url}") + try: + response = session.post(url, headers=headers, json=payload, timeout=api_timeout) + # If we get a 404 or 405, try the v3 path + if response.status_code in [404, 405]: + whisparr_logger.debug(f"Standard path returned {response.status_code}, trying with V3 path: {backup_url}") + response = session.post(backup_url, headers=headers, json=payload, timeout=api_timeout) + + response.raise_for_status() + result = response.json() + + if result and "id" in result: + command_id = result["id"] + whisparr_logger.debug(f"Refresh command triggered with ID {command_id}") + return command_id + else: + whisparr_logger.error("Failed to trigger refresh command - no command ID returned") + return None + except requests.exceptions.HTTPError as e: + whisparr_logger.error(f"HTTP error during refresh command: {e}, Status Code: {response.status_code}") + whisparr_logger.debug(f"Response content: {response.text[:200]}") + return None + except Exception as e: + whisparr_logger.error(f"Error sending refresh command: {e}") + return None + + except Exception as e: + whisparr_logger.error(f"Error refreshing item: {str(e)}") + return None + +def item_search(api_url: str, api_key: str, api_timeout: int, item_ids: List[int]) -> int: + """ + Trigger a search for one or more items. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + item_ids: A list of item IDs to search for + + Returns: + The command ID if the search command was triggered successfully, None otherwise + """ + try: + whisparr_logger.debug(f"Searching for items with IDs: {item_ids}") + + # Always use the same payload format since we're always using v2 API + payload = { + "name": "EpisodeSearch", + "episodeIds": item_ids + } + + # For commands, we need to directly try both path formats + command_endpoint = "command" + url = f"{api_url.rstrip('/')}/api/{command_endpoint}" + backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}" + + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + # Try standard API path first + whisparr_logger.debug(f"Attempting command with standard API path: {url}") + try: + response = session.post(url, headers=headers, json=payload, timeout=api_timeout) + # If we get a 404 or 405, try the v3 path + if response.status_code in [404, 405]: + whisparr_logger.debug(f"Standard path returned {response.status_code}, trying with V3 path: {backup_url}") + response = session.post(backup_url, headers=headers, json=payload, timeout=api_timeout) + + response.raise_for_status() + result = response.json() + + if result and "id" in result: + command_id = result["id"] + whisparr_logger.debug(f"Search command triggered with ID {command_id}") + return command_id + else: + whisparr_logger.error("Failed to trigger search command - no command ID returned") + return None + except requests.exceptions.HTTPError as e: + whisparr_logger.error(f"HTTP error during search command: {e}, Status Code: {response.status_code}") + whisparr_logger.debug(f"Response content: {response.text[:200]}") + return None + except Exception as e: + whisparr_logger.error(f"Error sending search command: {e}") + return None + + except Exception as e: + whisparr_logger.error(f"Error searching for items: {str(e)}") + return None + +def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict]: + """ + Get the status of a specific command. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + command_id: The ID of the command to check + + Returns: + A dictionary containing the command status, or None if the request failed. + """ + if not command_id: + whisparr_logger.error("No command ID provided for status check.") + return None + + try: + # For commands, we need to directly try both path formats + command_endpoint = f"command/{command_id}" + url = f"{api_url.rstrip('/')}/api/{command_endpoint}" + backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}" + + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + # Try standard API path first + whisparr_logger.debug(f"Checking command status with standard API path: {url}") + try: + response = session.get(url, headers=headers, timeout=api_timeout) + # If we get a 404, try the v3 path + if response.status_code == 404: + whisparr_logger.debug(f"Standard path returned 404, trying with V3 path: {backup_url}") + response = session.get(backup_url, headers=headers, timeout=api_timeout) + + response.raise_for_status() + result = response.json() + + whisparr_logger.debug(f"Command {command_id} status: {result.get('status', 'unknown')}") + return result + except requests.exceptions.HTTPError as e: + whisparr_logger.error(f"HTTP error getting command status: {e}, Status Code: {response.status_code}") + whisparr_logger.debug(f"Response content: {response.text[:200]}") + return None + except Exception as e: + whisparr_logger.error(f"Error getting command status: {e}") + return None + + except Exception as e: + whisparr_logger.error(f"Error getting command status for ID {command_id}: {e}") + return None + +def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool: + """ + Check the connection to Whisparr V2 API. + + Args: + api_url: The base URL of the Whisparr API + api_key: The API key for authentication + api_timeout: Timeout for the API request + + Returns: + True if the connection is successful, False otherwise + """ + try: + # For Whisparr V2, we need to handle both regular and v3 API formats + whisparr_logger.debug(f"Checking connection to Whisparr V2 instance at {api_url}") + + # First try with standard path + endpoint = "system/status" + response = arr_request(api_url, api_key, api_timeout, endpoint) + + # If that failed, try with v3 path format + if response is None: + whisparr_logger.debug("Standard API path failed, trying v3 format...") + # Try direct HTTP request to v3 endpoint without using arr_request + url = f"{api_url.rstrip('/')}/api/v3/system/status" + headers = {'X-Api-Key': api_key} + + try: + resp = session.get(url, headers=headers, timeout=api_timeout) + resp.raise_for_status() + response = resp.json() + except Exception as e: + whisparr_logger.debug(f"V3 API path also failed: {str(e)}") + return False + + if response is not None: + # Get the version information if available + version = response.get("version", "unknown") + + # Check if this is a v2.x version + if version and version.startswith('2'): + whisparr_logger.info(f"Successfully connected to Whisparr V2 API version: {version}") + return True + else: + whisparr_logger.warning(f"Connected to Whisparr but found unexpected version: {version}, expected 2.x") + return False + else: + whisparr_logger.error("Failed to connect to Whisparr V2 API") + return False + + except Exception as e: + whisparr_logger.error(f"Error checking connection to Whisparr V2 API: {str(e)}") + return False \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py new file mode 100644 index 0000000..0305c8b --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +""" +Missing Items Processing for Whisparr +Handles searching for missing items in Whisparr + +Supports both v2 (legacy) and v3 (Eros) API versions +""" + +import time +import random +import datetime +from typing import List, Dict, Any, Set, Callable +from src.primary.utils.logger import get_logger +from src.primary.apps.whisparr import api as whisparr_api +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.stats_manager import increment_stat +from src.primary.utils.history_utils import log_processed_media +from src.primary.state import check_state_reset + +# Get logger for the app +whisparr_logger = get_logger("whisparr") + +def process_missing_items( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process missing items in Whisparr based on provided settings. + + Args: + app_settings: Dictionary containing all settings for Whisparr + stop_check: A function that returns True if the process should stop + + Returns: + True if any items were processed, False otherwise. + """ + whisparr_logger.info("Starting missing items processing cycle for Whisparr.") + processed_any = False + + # Reset state files if enough time has passed + check_state_reset("whisparr") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + instance_name = app_settings.get("instance_name", "Whisparr Default") + + # Use the centralized advanced setting for stateful management hours + stateful_management_hours = get_advanced_setting("stateful_management_hours", 168) + + monitored_only = app_settings.get("monitored_only", True) + skip_future_releases = app_settings.get("skip_future_releases", True) + skip_item_refresh = app_settings.get("skip_item_refresh", False) + + # Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility + hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0)) + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + # Log that we're using Whisparr V2 API + whisparr_logger.info(f"Using Whisparr V2 API for instance: {instance_name}") + + # Skip if hunt_missing_items is set to 0 + if hunt_missing_items <= 0: + whisparr_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing item processing.") + return False + + # Check for stop signal + if stop_check(): + whisparr_logger.info("Stop requested before starting missing items. Aborting...") + return False + + # Get missing items + whisparr_logger.info(f"Retrieving items with missing files...") + missing_items = whisparr_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only) + + if missing_items is None: # API call failed + whisparr_logger.error("Failed to retrieve missing items from Whisparr API.") + return False + + if not missing_items: + whisparr_logger.info("No missing items found.") + return False + + # Check for stop signal after retrieving items + if stop_check(): + whisparr_logger.info("Stop requested after retrieving missing items. Aborting...") + return False + + whisparr_logger.info(f"Found {len(missing_items)} items with missing files.") + + # Filter out future releases if configured + if skip_future_releases: + now = datetime.datetime.now(datetime.timezone.utc) + original_count = len(missing_items) + # Whisparr item object has 'airDateUtc' for release dates + missing_items = [ + item for item in missing_items + if not item.get('airDateUtc') or ( + item.get('airDateUtc') and + datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now + ) + ] + skipped_count = original_count - len(missing_items) + if skipped_count > 0: + whisparr_logger.info(f"Skipped {skipped_count} future item releases based on air date.") + + if not missing_items: + whisparr_logger.info("No missing items left to process after filtering future releases.") + return False + + # Filter out already processed items using stateful management + unprocessed_items = [] + for item in missing_items: + item_id = str(item.get("id")) + if not is_processed("whisparr", instance_name, item_id): + unprocessed_items.append(item) + else: + whisparr_logger.debug(f"Skipping already processed item ID: {item_id}") + + whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.") + + if not unprocessed_items: + whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.") + return False + + items_processed = 0 + processing_done = False + + # Select items to search based on configuration + whisparr_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.") + items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items)) + + whisparr_logger.info(f"Selected {len(items_to_search)} missing items to search.") + + # Process selected items + for item in items_to_search: + # Check for stop signal before each item + if stop_check(): + whisparr_logger.info("Stop requested during item processing. Aborting...") + break + + # Re-check limit in case it changed + current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1)) + if items_processed >= current_limit: + whisparr_logger.info(f"Reached HUNT_MISSING_ITEMS limit ({current_limit}) for this cycle.") + break + + item_id = item.get("id") + title = item.get("title", "Unknown Title") + season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}" + + whisparr_logger.info(f"Processing missing item: \"{title}\" - {season_episode} (Item ID: {item_id})") + + # Refresh the item information if not skipped + refresh_command_id = None + if not skip_item_refresh: + whisparr_logger.info(" - Refreshing item information...") + refresh_command_id = whisparr_api.refresh_item(api_url, api_key, api_timeout, item_id) + if refresh_command_id: + whisparr_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...") + time.sleep(5) # Basic wait + else: + whisparr_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.") + else: + whisparr_logger.info(" - Skipping item refresh (skip_item_refresh=true)") + + # Mark the item as processed BEFORE triggering any searches + add_processed_id("whisparr", instance_name, str(item_id)) + whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}") + + # Check for stop signal before searching + if stop_check(): + whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...") + break + + # Search for the item + whisparr_logger.info(" - Searching for missing item...") + search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id]) + if search_command_id: + whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.") + + # Log to history system + media_name = f"{title} - {season_episode}" + log_processed_media("whisparr", media_name, item_id, instance_name, "missing") + whisparr_logger.debug(f"Logged history entry for item: {media_name}") + + items_processed += 1 + processing_done = True + + # Increment the hunted statistics for Whisparr + increment_stat("whisparr", "hunted", 1) + whisparr_logger.debug(f"Incremented whisparr hunted statistics by 1") + + # Log progress + current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1)) + whisparr_logger.info(f"Processed {items_processed}/{current_limit} missing items this cycle.") + else: + whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.") + # Do not mark as processed if search couldn't be triggered + continue + + # Log final status + if items_processed > 0: + whisparr_logger.info(f"Completed processing {items_processed} missing items for this cycle.") + else: + whisparr_logger.info("No new missing items were processed in this run.") + + return processing_done + +# For backward compatibility with the background processing system +def process_missing_scenes(app_settings, stop_check): + """ + Backwards compatibility function that calls process_missing_items. + + Args: + app_settings: Dictionary containing all settings for Whisparr + stop_check: A function that returns True if the process should stop + + Returns: + Result from process_missing_items + """ + return process_missing_items(app_settings, stop_check) \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py new file mode 100644 index 0000000..d04415c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +""" +Quality Upgrade Processing for Whisparr +Handles searching for items that need quality upgrades in Whisparr + +Supports both v2 (legacy) and v3 (Eros) API versions +""" + +import time +import random +from typing import Dict, Any, List, Callable +from datetime import datetime, timedelta +from src.primary.utils.logger import get_logger +from src.primary.apps.whisparr import api as whisparr_api +from src.primary.settings_manager import load_settings, get_advanced_setting +from src.primary.stateful_manager import is_processed, add_processed_id +from src.primary.stats_manager import increment_stat +from src.primary.utils.history_utils import log_processed_media +from src.primary.state import check_state_reset + +# Get logger for the app +whisparr_logger = get_logger("whisparr") + +def process_cutoff_upgrades( + app_settings: Dict[str, Any], + stop_check: Callable[[], bool] # Function to check if stop is requested +) -> bool: + """ + Process quality cutoff upgrades for Whisparr based on settings. + + Args: + app_settings: Dictionary containing all settings for Whisparr + stop_check: A function that returns True if the process should stop + + Returns: + True if any items were processed for upgrades, False otherwise. + """ + whisparr_logger.info("Starting quality cutoff upgrades processing cycle for Whisparr.") + processed_any = False + + # Reset state files if enough time has passed + check_state_reset("whisparr") + + # Extract necessary settings + api_url = app_settings.get("api_url", "").strip() + api_key = app_settings.get("api_key", "").strip() + api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value + instance_name = app_settings.get("instance_name", "Whisparr Default") + + # Use advanced settings from general.json for command operations + command_wait_delay = get_advanced_setting("command_wait_delay", 1) + command_wait_attempts = get_advanced_setting("command_wait_attempts", 600) + + monitored_only = app_settings.get("monitored_only", True) + skip_item_refresh = app_settings.get("skip_item_refresh", False) + + # Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility + hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0)) + + state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168) + + # Log that we're using Whisparr V2 API + whisparr_logger.info(f"Using Whisparr V2 API for instance: {instance_name}") + + # Skip if hunt_upgrade_items is set to 0 + if hunt_upgrade_items <= 0: + whisparr_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping quality upgrade processing.") + return False + + # Check for stop signal + if stop_check(): + whisparr_logger.info("Stop requested before starting quality upgrades. Aborting...") + return False + + # Get items eligible for upgrade + whisparr_logger.info(f"Retrieving items eligible for cutoff upgrade...") + upgrade_eligible_data = whisparr_api.get_cutoff_unmet_items(api_url, api_key, api_timeout, monitored_only) + + if not upgrade_eligible_data: + whisparr_logger.info("No items found eligible for upgrade or error retrieving them.") + return False + + # Check for stop signal after retrieving eligible items + if stop_check(): + whisparr_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...") + return False + + whisparr_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.") + + # Filter out already processed items using stateful management + unprocessed_items = [] + for item in upgrade_eligible_data: + item_id = str(item.get("id")) + if not is_processed("whisparr", instance_name, item_id): + unprocessed_items.append(item) + else: + whisparr_logger.debug(f"Skipping already processed item ID: {item_id}") + + whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.") + + if not unprocessed_items: + whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.") + return False + + items_processed = 0 + processing_done = False + + # Always use random selection for upgrades + whisparr_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.") + items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items)) + + whisparr_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.") + + # Process selected items + for item in items_to_upgrade: + # Check for stop signal before each item + if stop_check(): + whisparr_logger.info("Stop requested during item processing. Aborting...") + break + + # Re-check limit in case it changed + current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1)) + if items_processed >= current_limit: + whisparr_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.") + break + + item_id = item.get("id") + title = item.get("title", "Unknown Title") + season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}" + + current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown") + + whisparr_logger.info(f"Processing item for quality upgrade: \"{title}\" - {season_episode} (Item ID: {item_id})") + whisparr_logger.info(f" - Current quality: {current_quality}") + + # Refresh the item information if not skipped + refresh_command_id = None + if not skip_item_refresh: + whisparr_logger.info(" - Refreshing item information...") + refresh_command_id = whisparr_api.refresh_item(api_url, api_key, api_timeout, item_id) + if refresh_command_id: + whisparr_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...") + time.sleep(5) # Basic wait + else: + whisparr_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.") + else: + whisparr_logger.info(" - Skipping item refresh (skip_item_refresh=true)") + + # Check for stop signal before searching + if stop_check(): + whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...") + break + + # Mark the item as processed BEFORE triggering any searches + add_processed_id("whisparr", instance_name, str(item_id)) + whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}") + + # Search for the item + whisparr_logger.info(" - Searching for quality upgrade...") + search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id]) + if search_command_id: + whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.") + + # Log to history so the upgrade appears in the history UI + series_title = item.get("series", {}).get("title", "Unknown Series") + media_name = f"{series_title} - {season_episode} - {title}" + log_processed_media("whisparr", media_name, item_id, instance_name, "upgrade") + whisparr_logger.debug(f"Logged quality upgrade to history for item ID {item_id}") + + items_processed += 1 + processing_done = True + + # Increment the upgraded statistics for Whisparr + increment_stat("whisparr", "upgraded", 1) + whisparr_logger.debug(f"Incremented whisparr upgraded statistics by 1") + + # Log progress + current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1)) + whisparr_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.") + else: + whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.") + # Do not mark as processed if search couldn't be triggered + continue + + # Log final status + if items_processed > 0: + whisparr_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.") + else: + whisparr_logger.info("No new items were processed for quality upgrade in this run.") + + return processing_done \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py new file mode 100644 index 0000000..6c35943 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 + +from flask import Blueprint, request, jsonify +import datetime, os, requests +from src.primary import keys_manager +from src.primary.state import get_state_file_path, reset_state_file +from src.primary.utils.logger import get_logger, APP_LOG_FILES +import traceback +import socket +from urllib.parse import urlparse +from src.primary.apps.whisparr import api as whisparr_api + +whisparr_bp = Blueprint('whisparr', __name__) +whisparr_logger = get_logger("whisparr") + +# Make sure we're using the correct state files +PROCESSED_MISSING_FILE = get_state_file_path("whisparr", "processed_missing") +PROCESSED_UPGRADES_FILE = get_state_file_path("whisparr", "processed_upgrades") + +@whisparr_bp.route('/status', methods=['GET']) +def get_status(): + """Get the status of all configured Whisparr instances""" + try: + # Get all configured instances + api_keys = keys_manager.load_api_keys("whisparr") + instances = api_keys.get("instances", []) + + connected_count = 0 + total_configured = len(instances) + + for instance in instances: + api_url = instance.get("api_url") + api_key = instance.get("api_key") + if api_url and api_key and instance.get("enabled", True): + # Use a short timeout for status checks + if whisparr_api.check_connection(api_url, api_key, 5): + connected_count += 1 + + return jsonify({ + "configured": total_configured > 0, + "connected": connected_count > 0, + "connected_count": connected_count, + "total_configured": total_configured + }) + except Exception as e: + whisparr_logger.error(f"Error getting Whisparr status: {str(e)}") + return jsonify({ + "configured": False, + "connected": False, + "error": str(e) + }), 500 + +@whisparr_bp.route('/test-connection', methods=['POST']) +def test_connection(): + """Test connection to a Whisparr API instance""" + data = request.json + api_url = data.get('api_url') + api_key = data.get('api_key') + api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test + + if not api_url or not api_key: + return jsonify({"success": False, "message": "API URL and API Key are required"}), 400 + + whisparr_logger.info(f"Testing connection to Whisparr API at {api_url}") + + # Validate URL format + if not (api_url.startswith('http://') or api_url.startswith('https://')): + error_msg = "API URL must start with http:// or https://" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + + # Try to establish a socket connection first to check basic connectivity + parsed_url = urlparse(api_url) + hostname = parsed_url.hostname + port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80) + + try: + # Try socket connection for quick feedback on connectivity issues + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(3) # Short timeout for quick feedback + result = sock.connect_ex((hostname, port)) + sock.close() + + if result != 0: + error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except socket.gaierror: + error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except Exception as e: + # Log the socket testing error but continue with the full request + whisparr_logger.debug(f"Socket test error, continuing with full request: {str(e)}") + + # First try standard API endpoint (Whisparr v2) + api_paths = [ + {"url": f"{api_url.rstrip('/')}/api/system/status", "version": "v2"}, + {"url": f"{api_url.rstrip('/')}/api/v3/system/status", "version": "v3"} + ] + + headers = { + "X-Api-Key": api_key, + "Content-Type": "application/json" + } + + response = None + detected_version = None + + # Try each API path in order + for api_path in api_paths: + try: + url = api_path["url"] + whisparr_logger.debug(f"Trying API path: {url}") + response = requests.get(url, headers=headers, timeout=(10, api_timeout)) + + if response.status_code == 200: + detected_version = api_path["version"] + break + + except requests.exceptions.RequestException: + continue + + # If no successful response was obtained + if not response or response.status_code != 200: + if response: + # For HTTP errors, provide more specific feedback + if response.status_code == 401: + error_msg = "Authentication failed: Invalid API key" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 401 + elif response.status_code == 403: + error_msg = "Access forbidden: Check API key permissions" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 403 + elif response.status_code == 404: + error_msg = "API endpoint not found: This doesn't appear to be a valid Whisparr server. Check your URL." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + elif response.status_code >= 500: + error_msg = f"Whisparr server error (HTTP {response.status_code}): The Whisparr server is experiencing issues" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + else: + error_msg = f"HTTP error {response.status_code} connecting to Whisparr" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), response.status_code + else: + error_msg = "Could not connect to any Whisparr API endpoint" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + + # Successfully connected, now validate version + try: + response_data = response.json() + version = response_data.get('version', 'unknown') + whisparr_logger.info(f"Successfully connected to Whisparr API version: {version} (API {detected_version})") + + # Check if this is a v2 version + if version and version.startswith('2'): + # Detected v2 + return jsonify({ + "success": True, + "message": "Successfully connected to Whisparr API", + "version": version, + "is_v2": True + }) + elif version and version.startswith('3'): + # Detected Eros API (V3) + error_msg = f"Incompatible Whisparr version {version} detected. Huntarr requires Whisparr V2." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + else: + error_msg = f"Unexpected Whisparr version {version} detected. Huntarr requires Whisparr V2." + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 400 + except ValueError: + error_msg = "Invalid JSON response from Whisparr API - This doesn't appear to be a valid Whisparr server" + whisparr_logger.error(f"{error_msg}. Response content: {response.text[:200]}") + return jsonify({"success": False, "message": error_msg}), 500 + except requests.exceptions.ConnectionError as e: + # Handle different types of connection errors + error_details = str(e) + if "Connection refused" in error_details: + error_msg = f"Connection refused - Whisparr is not running on {api_url} or the port is incorrect" + elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details: + error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL." + else: + error_msg = f"Connection error - Check if Whisparr is running: {error_details}" + + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 404 + except requests.exceptions.Timeout: + error_msg = f"Connection timed out - Whisparr took too long to respond" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 504 + except requests.exceptions.RequestException as e: + error_msg = f"Connection test failed: {str(e)}" + whisparr_logger.error(error_msg) + return jsonify({"success": False, "message": error_msg}), 500 + +# Function to check if Whisparr is configured +def is_configured(): + """Check if Whisparr API credentials are configured""" + api_keys = keys_manager.load_api_keys("whisparr") + return api_keys.get("api_url") and api_keys.get("api_key") + +@whisparr_bp.route('/versions', methods=['GET']) +def get_versions(): + """Get the version information from the Whisparr API""" + try: + # Get all configured instances + api_keys = keys_manager.load_api_keys("whisparr") + instances = api_keys.get("instances", []) + + if not instances: + return jsonify({"success": False, "message": "No Whisparr instances configured"}), 404 + + results = [] + for instance in instances: + if not instance.get("enabled", True): + continue + + api_url = instance.get("api_url") + api_key = instance.get("api_key") + instance_name = instance.get("name", "Default") + + if not api_url or not api_key: + results.append({ + "name": instance_name, + "success": False, + "message": "API URL or API Key missing" + }) + continue + + # First try standard API endpoint + version_url = f"{api_url.rstrip('/')}/api/system/status" + headers = {"X-Api-Key": api_key} + + try: + response = requests.get(version_url, headers=headers, timeout=10) + + # If we get a 404, try with the v3 path + if response.status_code == 404: + whisparr_logger.debug(f"Standard API path failed for {instance_name}, trying v3 path") + v3_url = f"{api_url.rstrip('/')}/api/v3/system/status" + response = requests.get(v3_url, headers=headers, timeout=10) + + if response.status_code == 200: + version_data = response.json() + version = version_data.get("version", "Unknown") + + # Validate that it's a V2 version + if version and version.startswith('2'): + results.append({ + "name": instance_name, + "success": True, + "version": version, + "is_v2": True + }) + elif version and version.startswith('3'): + # Reject Eros API version + results.append({ + "name": instance_name, + "success": False, + "message": f"Incompatible Whisparr version {version} detected. Huntarr requires Whisparr V2.", + "version": version + }) + else: + # Unexpected version + results.append({ + "name": instance_name, + "success": False, + "message": f"Unexpected Whisparr version {version} detected. Huntarr requires Whisparr V2.", + "version": version + }) + else: + # API call failed + results.append({ + "name": instance_name, + "success": False, + "message": f"Failed to get version information: HTTP {response.status_code}" + }) + except requests.exceptions.RequestException as e: + results.append({ + "name": instance_name, + "success": False, + "message": f"Connection error: {str(e)}" + }) + + return jsonify({"success": True, "results": results}) + except Exception as e: + whisparr_logger.error(f"Error getting Whisparr versions: {str(e)}") + return jsonify({"success": False, "message": str(e)}), 500 + +@whisparr_bp.route('/logs', methods=['GET']) +def get_logs(): + """Get the log file for Whisparr""" + try: + # Get the log file path + log_file = APP_LOG_FILES.get("whisparr") + + if not log_file or not os.path.exists(log_file): + return jsonify({"success": False, "message": "Log file not found"}), 404 + + # Read the log file (last 200 lines) + with open(log_file, 'r') as f: + lines = f.readlines() + log_content = ''.join(lines[-200:]) + + return jsonify({"success": True, "logs": log_content}) + except Exception as e: + error_message = f"Error fetching Whisparr logs: {str(e)}" + whisparr_logger.error(error_message) + traceback.print_exc() + return jsonify({"success": False, "message": error_message}), 500 + +@whisparr_bp.route('/clear-processed', methods=['POST']) +def clear_processed(): + """Clear the processed missing and upgrade files for Whisparr""" + try: + # Reset missing items state file + whisparr_logger.info("Clearing processed missing items state") + reset_state_file("whisparr", "processed_missing") + + # Reset upgrade state file + whisparr_logger.info("Clearing processed quality upgrade state") + reset_state_file("whisparr", "processed_upgrades") + + return jsonify({ + "success": True, + "message": "Successfully cleared Whisparr processed state" + }) + except Exception as e: + error_message = f"Error clearing Whisparr processed state: {str(e)}" + whisparr_logger.error(error_message) + return jsonify({"success": False, "message": error_message}), 500 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/auth.py b/Huntarr.io-6.3.6/src/primary/auth.py new file mode 100644 index 0000000..01f320c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/auth.py @@ -0,0 +1,554 @@ +#!/usr/bin/env python3 +""" +Authentication module for Huntarr +Handles user creation, verification, and session management +Including two-factor authentication +""" + +import os +import json +import hashlib +import secrets +import time +import pathlib +import base64 +import io +import qrcode +import pyotp # Ensure pyotp is imported +import re # Import the re module for regex +from typing import Dict, Any, Optional, Tuple +from flask import request, redirect, url_for, session +from .utils.logger import logger # Ensure logger is imported + +# User directory setup +USER_DIR = pathlib.Path("/config/user") +USER_DIR.mkdir(parents=True, exist_ok=True) +USER_FILE = USER_DIR / "credentials.json" + +# Session settings +SESSION_EXPIRY = 60 * 60 * 24 * 7 # 1 week in seconds +SESSION_COOKIE_NAME = "huntarr_session" + +# Store active sessions +active_sessions = {} + +# --- Add Helper functions for user data --- +def get_user_data() -> Dict[str, Any]: + """Load user data from the credentials file.""" + if not USER_FILE.exists(): + logger.warning(f"Attempted to get user data, but file not found: {USER_FILE}") + return {} + try: + with open(USER_FILE, 'r') as f: + return json.load(f) + except json.JSONDecodeError: + logger.error(f"Error decoding JSON from user file: {USER_FILE}") + return {} + except Exception as e: + logger.error(f"Error reading user file {USER_FILE}: {e}", exc_info=True) + return {} + +def save_user_data(user_data: Dict[str, Any]) -> bool: + """Save user data to the credentials file.""" + try: + logger.debug(f"Attempting to save user data to: {USER_FILE}") + # Ensure directory exists (though it should from startup) + USER_DIR.mkdir(parents=True, exist_ok=True) + + with open(USER_FILE, 'w') as f: + json.dump(user_data, f, indent=4) # Add indent for readability + + # Set permissions after writing + try: + os.chmod(USER_FILE, 0o644) + logger.debug(f"Set permissions 0o644 on {USER_FILE}") + except Exception as e_perm: + logger.warning(f"Could not set permissions on file {USER_FILE}: {e_perm}") + + logger.info(f"User data saved successfully to {USER_FILE}") + return True + except Exception as e: + logger.error(f"Error saving user file {USER_FILE}: {e}", exc_info=True) + return False +# --- End Helper functions --- + +def hash_password(password: str) -> str: + """Hash a password for storage""" + # Use SHA-256 with a salt + salt = secrets.token_hex(16) + pw_hash = hashlib.sha256((password + salt).encode()).hexdigest() + return f"{salt}:{pw_hash}" + +def verify_password(stored_password: str, provided_password: str) -> bool: + """Verify a password against its hash""" + try: + salt, pw_hash = stored_password.split(':', 1) + verify_hash = hashlib.sha256((provided_password + salt).encode()).hexdigest() + return secrets.compare_digest(verify_hash, pw_hash) + except Exception as e: + logger.error(f"Error verifying password hash: {e}", exc_info=True) + return False + +def hash_username(username: str) -> str: + """Create a normalized hash of the username""" + # Convert to lowercase and hash + return hashlib.sha256(username.lower().encode()).hexdigest() + +def validate_password_strength(password: str) -> Optional[str]: + """Validate password strength based on defined criteria. + + Args: + password: The password string to validate. + + Returns: + An error message string if validation fails, None otherwise. + """ + if len(password) < 8: + return "Password must be at least 8 characters long." + + # If check passes + return None + +def user_exists() -> bool: + """Check if a user has been created""" + return USER_FILE.exists() and os.path.getsize(USER_FILE) > 0 + +def create_user(username: str, password: str) -> bool: + """Create a new user""" + if not username or not password: + logger.error("Attempted to create user with empty username or password") + return False + + # Ensure user directory exists with proper permissions + logger.info(f"Ensuring user directory exists: {USER_DIR}") + USER_DIR.mkdir(parents=True, exist_ok=True) + try: + # Set appropriate permissions if not running as root + logger.info(f"Setting permissions on directory: {USER_DIR}") + os.chmod(USER_DIR, 0o755) + except Exception as e: + logger.warning(f"Could not set permissions on directory {USER_DIR}: {e}") + + # Hash the username and password + username_hash = hash_username(username) + password_hash = hash_password(password) + + # Store the credentials + user_data = { + "username": username_hash, + "password": password_hash, + "created_at": time.time(), + "2fa_enabled": False, + "2fa_secret": None + } + + try: + logger.info(f"Writing user file: {USER_FILE}") + with open(USER_FILE, 'w') as f: + json.dump(user_data, f) + # Set appropriate permissions on the file + try: + logger.info(f"Setting permissions on file: {USER_FILE}") + os.chmod(USER_FILE, 0o644) + except Exception as e: + logger.warning(f"Could not set permissions on file {USER_FILE}: {e}") + logger.info("User creation successful") + return True + except Exception as e: + logger.error(f"Error creating user file {USER_FILE}: {e}", exc_info=True) + return False + +def verify_user(username: str, password: str, otp_code: str = None) -> Tuple[bool, bool]: + """ + Verify user credentials + + Returns: + Tuple[bool, bool]: (auth_success, needs_2fa) + """ + if not user_exists(): + logger.warning("Login attempt failed: User does not exist.") + return False, False + + try: + with open(USER_FILE, 'r') as f: + user_data = json.load(f) + + # Hash the provided username + username_hash = hash_username(username) + + # Compare username and verify password + if user_data.get("username") == username_hash: + if verify_password(user_data.get("password", ""), password): + # Check if 2FA is enabled + two_fa_enabled = user_data.get("2fa_enabled", False) + logger.debug(f"2FA enabled for user '{username}': {two_fa_enabled}") + logger.debug(f"2FA secret present: {bool(user_data.get('2fa_secret'))}") + logger.debug(f"OTP code provided: {bool(otp_code)}") + + if two_fa_enabled: + # If 2FA code was provided, verify it + if otp_code: + totp = pyotp.TOTP(user_data.get("2fa_secret")) + valid_code = totp.verify(otp_code) + logger.debug(f"OTP code validation result: {valid_code}") + if valid_code: + logger.info(f"User '{username}' authenticated successfully with 2FA.") + return True, False + else: + logger.warning(f"Login attempt failed for user '{username}': Invalid 2FA code.") + return False, True + else: + # No OTP code provided but 2FA is enabled + logger.warning(f"Login attempt failed for user '{username}': 2FA code required but not provided.") + logger.debug("Returning needs_2fa=True to trigger 2FA input display") + return False, True + else: + # 2FA not enabled, password is correct + logger.info(f"User '{username}' authenticated successfully (no 2FA).") + return True, False + else: + logger.warning(f"Login attempt failed for user '{username}': Invalid password.") + return False, False + except Exception as e: + logger.error(f"Error during user verification for '{username}': {e}", exc_info=True) + + logger.warning(f"Login attempt failed for user '{username}': Username not found or other error.") + return False, False + +def create_session(username: str) -> str: + """Create a new session for an authenticated user""" + session_id = secrets.token_hex(32) + # Store the actual username, not the hash + + # Store session data + active_sessions[session_id] = { + "username": username, # Store actual username + "created_at": time.time(), + "expires_at": time.time() + SESSION_EXPIRY + } + + return session_id + +def verify_session(session_id: str) -> bool: + """Verify if a session is valid""" + if not session_id or session_id not in active_sessions: + return False + + session_data = active_sessions[session_id] + + # Check if session has expired + if session_data.get("expires_at", 0) < time.time(): + # Clean up expired session + del active_sessions[session_id] + return False + + # Extend session expiry + active_sessions[session_id]["expires_at"] = time.time() + SESSION_EXPIRY + return True + +def get_username_from_session(session_id: str) -> Optional[str]: + """Get the username from a session""" + if not session_id or session_id not in active_sessions: + return None + + # Return the stored username + return active_sessions[session_id].get("username") + +def authenticate_request(): + """Flask route decorator to check if user is authenticated""" + # If no user exists, redirect to setup + if not user_exists(): + if request.path != "/setup" and not request.path.startswith(("/static/", "/api/setup")): + return redirect("/setup") + return None + + # Skip authentication for static files and the login/setup pages + if request.path.startswith(("/static/", "/login", "/api/login", "/setup", "/api/setup")) or request.path == "/favicon.ico": + return None + + # Check if the request is from a local network and bypass authentication if enabled + # Get configuration setting for local network bypass + local_access_bypass = False + try: + # Force reload settings from disk to ensure we have the latest + from src.primary.settings_manager import load_settings + from src.primary import settings_manager + + # Ensure we're getting fresh settings by clearing any cache + if hasattr(settings_manager, 'settings_cache'): + settings_manager.settings_cache = {} + + settings = load_settings("general") # Specify 'general' as the app_type + general_settings = settings + local_access_bypass = general_settings.get("local_access_bypass", False) + logger.info(f"Local access bypass setting: {local_access_bypass}") + + # Debug print all general settings + logger.debug(f"All general settings: {general_settings}") + except Exception as e: + logger.error(f"Error loading local access bypass setting: {e}", exc_info=True) + + remote_addr = request.remote_addr + logger.info(f"Request IP address: {remote_addr}") + + if local_access_bypass: + # Common local network IP ranges + local_networks = [ + '127.0.0.1', # localhost + '::1', # localhost IPv6 + '10.', # 10.0.0.0/8 + '172.16.', # 172.16.0.0/12 + '172.17.', + '172.18.', + '172.19.', + '172.20.', + '172.21.', + '172.22.', + '172.23.', + '172.24.', + '172.25.', + '172.26.', + '172.27.', + '172.28.', + '172.29.', + '172.30.', + '172.31.', + '192.168.' # 192.168.0.0/16 + ] + is_local = False + + # Check if request is coming through a proxy + forwarded_for = request.headers.get('X-Forwarded-For') + if forwarded_for: + logger.debug(f"X-Forwarded-For header detected: {forwarded_for}") + # Take the first IP in the chain which is typically the client's real IP + possible_client_ip = forwarded_for.split(',')[0].strip() + logger.debug(f"Checking if forwarded IP {possible_client_ip} is local") + + # Check if this forwarded IP is a local network IP + for network in local_networks: + if possible_client_ip == network or (network.endswith('.') and possible_client_ip.startswith(network)): + is_local = True + logger.info(f"Forwarded IP {possible_client_ip} is a local network IP (matches {network})") + break + + # Check if direct remote_addr is a local network IP if not already determined + if not is_local: + for network in local_networks: + if remote_addr == network or (network.endswith('.') and remote_addr.startswith(network)): + is_local = True + logger.info(f"Direct IP {remote_addr} is a local network IP (matches {network})") + break + + if is_local: + logger.info(f"Local network access from {remote_addr} - Authentication bypassed!") + return None + else: + logger.warning(f"Access from {remote_addr} is not recognized as local network - Authentication required") + else: + logger.info("Local access bypass is DISABLED - Authentication required") + + # Check for valid session + session_id = session.get(SESSION_COOKIE_NAME) + if session_id and verify_session(session_id): + return None + + # No valid session, redirect to login + if request.path != "/login" and not request.path.startswith("/api/"): + return redirect("/login") + + # For API calls, return 401 Unauthorized + if request.path.startswith("/api/"): + return {"error": "Unauthorized"}, 401 + + return None + +def logout(session_id: str): + """Log out the current user by invalidating their session""" + if session_id and session_id in active_sessions: + del active_sessions[session_id] + + # Clear the session cookie in Flask context (if available, otherwise handled by route) + # session.pop(SESSION_COOKIE_NAME, None) # This might be better handled solely in the route + +def is_2fa_enabled(username): + """Check if 2FA is enabled for a user.""" + user_data = get_user_data() + return user_data.get('2fa_enabled', False) + +def generate_2fa_secret(username: str) -> Tuple[str, str]: + """ + Generate a new 2FA secret and QR code + + Returns: + Tuple[str, str]: (secret, qr_code_data_uri) + """ + # Generate a random secret + secret = pyotp.random_base32() + + # Create a TOTP object + totp = pyotp.TOTP(secret) + + # Get the provisioning URI - Use the actual username here + uri = totp.provisioning_uri(name=username, issuer_name="Huntarr") + + # Generate QR code + qr = qrcode.QRCode( + version=1, + error_correction=qrcode.constants.ERROR_CORRECT_L, + box_size=10, + border=4, + ) + qr.add_data(uri) + qr.make(fit=True) + + try: + img = qr.make_image(fill_color="black", back_color="white") + + # Convert to base64 string + buffered = io.BytesIO() + img.save(buffered, format="PNG") + img_str = base64.b64encode(buffered.getvalue()).decode() + + # Store the secret temporarily associated with the user + user_data = get_user_data() + user_data["temp_2fa_secret"] = secret + if save_user_data(user_data): + logger.info(f"Generated temporary 2FA secret for user '{username}'.") + return secret, f"data:image/png;base64,{img_str}" + else: + logger.error(f"Failed to save temporary 2FA secret for user '{username}'.") + raise Exception("Failed to save user data with temporary 2FA secret.") + + except Exception as e: + logger.error(f"Error generating 2FA QR code for user '{username}': {e}", exc_info=True) + raise + +def verify_2fa_code(username: str, code: str, enable_on_verify: bool = False) -> bool: + """Verify a 2FA code against the temporary secret""" + user_data = get_user_data() + temp_secret = user_data.get("temp_2fa_secret") + + if not temp_secret: + logger.warning(f"2FA verification attempt for '{username}' failed: No temporary secret found.") + return False + + totp = pyotp.TOTP(temp_secret) + if totp.verify(code): + logger.info(f"2FA code verified successfully for user '{username}'.") + if enable_on_verify: + user_data["2fa_enabled"] = True + user_data["2fa_secret"] = temp_secret + user_data.pop("temp_2fa_secret", None) + if save_user_data(user_data): + logger.info(f"2FA enabled permanently for user '{username}'.") + else: + logger.error(f"Failed to save user data after enabling 2FA for '{username}'.") + return False + return True + else: + logger.warning(f"Invalid 2FA code provided by user '{username}'.") + return False + +def disable_2fa(password: str) -> bool: + """Disable 2FA for the current user (using only password - kept for potential other uses)""" + user_data = get_user_data() + + # Verify password + if verify_password(user_data.get("password", ""), password): + user_data["2fa_enabled"] = False + user_data["2fa_secret"] = None + if save_user_data(user_data): + logger.info("2FA disabled successfully (password only).") + return True + else: + logger.error("Failed to save user data after disabling 2FA (password only).") + return False + else: + logger.warning("Failed to disable 2FA (password only): Invalid password provided.") + return False + +def disable_2fa_with_password_and_otp(username: str, password: str, otp_code: str) -> bool: + """Disable 2FA for the specified user, requiring both password and OTP code.""" + user_data = get_user_data() # Assuming this gets data for the logged-in user implicitly + + # 1. Verify Password + if not verify_password(user_data.get("password", ""), password): + logger.warning(f"Failed to disable 2FA for '{username}': Invalid password provided.") + return False + + # 2. Verify OTP Code against permanent secret + perm_secret = user_data.get("2fa_secret") + if not user_data.get("2fa_enabled") or not perm_secret: + logger.error(f"Failed to disable 2FA for '{username}': 2FA is not enabled or secret missing.") + # Should ideally not happen if called from the correct UI state, but good to check + return False + + totp = pyotp.TOTP(perm_secret) + if not totp.verify(otp_code): + logger.warning(f"Failed to disable 2FA for '{username}': Invalid OTP code provided.") + return False + + # 3. Both verified, proceed to disable + user_data["2fa_enabled"] = False + user_data["2fa_secret"] = None + if save_user_data(user_data): + logger.info(f"2FA disabled successfully for '{username}' after verifying password and OTP.") + return True + else: + logger.error(f"Failed to save user data after disabling 2FA for '{username}'.") + return False + +def change_username(current_username: str, new_username: str, password: str) -> bool: + """Change the username for the current user""" + user_data = get_user_data() + + # Verify current username and password + current_username_hash = hash_username(current_username) + if user_data.get("username") != current_username_hash: + logger.warning(f"Username change failed: Current username '{current_username}' does not match stored hash.") + return False + + if not verify_password(user_data.get("password", ""), password): + logger.warning(f"Username change failed for '{current_username}': Invalid password provided.") + return False + + # Update username + user_data["username"] = hash_username(new_username) + if save_user_data(user_data): + logger.info(f"Username changed successfully from '{current_username}' to '{new_username}'.") + return True + else: + logger.error(f"Failed to save user data after changing username for '{current_username}'.") + return False + +def change_password(current_password: str, new_password: str) -> bool: + """Change the password for the current user""" + user_data = get_user_data() + + # Verify current password + if not verify_password(user_data.get("password", ""), current_password): + logger.warning("Password change failed: Invalid current password provided.") + return False + + # Update password + user_data["password"] = hash_password(new_password) + if save_user_data(user_data): + logger.info("Password changed successfully.") + return True + else: + logger.error("Failed to save user data after changing password.") + return False + +def get_app_url_and_key(app_type: str) -> Tuple[str, str]: + """ + Get the API URL and API key for a specific app type + + Args: + app_type: The app type (sonarr, radarr, lidarr, readarr) + + Returns: + Tuple[str, str]: (api_url, api_key) + """ + from primary import keys_manager + return keys_manager.get_api_keys(app_type) \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/background.py b/Huntarr.io-6.3.6/src/primary/background.py new file mode 100644 index 0000000..689771c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/background.py @@ -0,0 +1,543 @@ +#!/usr/bin/env python3 +""" +Huntarr - Main entry point for the application +Supports multiple Arr applications running concurrently +""" + +import time +import sys +import os +# import socket # No longer used directly +import signal +import importlib +import logging +import threading +from typing import Dict, List, Optional, Callable, Union, Tuple + +# Define the version number +__version__ = "1.0.0" # Consider updating this based on changes + +# Set up logging first +from src.primary.utils.logger import setup_main_logger, get_logger # Import get_logger +logger = setup_main_logger() + +# Import necessary modules +from src.primary import config, settings_manager +# Removed keys_manager import as settings_manager handles API details +from src.primary.state import check_state_reset, calculate_reset_time +# from src.primary.utils.app_utils import get_ip_address # No longer used here + +# Track active threads and stop flag +app_threads: Dict[str, threading.Thread] = {} +stop_event = threading.Event() # Use an event for clearer stop signaling + +def app_specific_loop(app_type: str) -> None: + """ + Main processing loop for a specific Arr application. + + Args: + app_type: The type of Arr application (sonarr, radarr, lidarr, readarr) + """ + app_logger = get_logger(app_type) + app_logger.info(f"=== [{app_type.upper()}] Thread starting ===") + + # Dynamically import app-specific modules + process_missing = None + process_upgrades = None + get_queue_size = None + check_connection = None + get_instances_func = None # Default: No multi-instance function found + hunt_missing_setting = "" + hunt_upgrade_setting = "" + + try: + # Import the main app module first to check for get_configured_instances + app_module = importlib.import_module(f'src.primary.apps.{app_type}') + app_logger.debug(f"Attributes found in {app_module.__name__}: {dir(app_module)}") + api_module = importlib.import_module(f'src.primary.apps.{app_type}.api') + missing_module = importlib.import_module(f'src.primary.apps.{app_type}.missing') + upgrade_module = importlib.import_module(f'src.primary.apps.{app_type}.upgrade') + + # Try to get the multi-instance function from the main app module + try: + get_instances_func = getattr(app_module, 'get_configured_instances') + app_logger.debug(f"Found 'get_configured_instances' in {app_module.__name__}") + except AttributeError: + app_logger.debug(f"'get_configured_instances' not found in {app_module.__name__}. Assuming single instance mode.") + get_instances_func = None # Explicitly set to None if not found + + check_connection = getattr(api_module, 'check_connection') + get_queue_size = getattr(api_module, 'get_download_queue_size', lambda api_url, api_key, api_timeout: 0) # Default if not found + + if app_type == "sonarr": + missing_module = importlib.import_module('src.primary.apps.sonarr.missing') + upgrade_module = importlib.import_module('src.primary.apps.sonarr.upgrade') + process_missing = getattr(missing_module, 'process_missing_episodes') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_items" + hunt_upgrade_setting = "hunt_upgrade_items" + elif app_type == "radarr": + missing_module = importlib.import_module('src.primary.apps.radarr.missing') + upgrade_module = importlib.import_module('src.primary.apps.radarr.upgrade') + process_missing = getattr(missing_module, 'process_missing_movies') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_movies" + hunt_upgrade_setting = "hunt_upgrade_movies" + elif app_type == "lidarr": + missing_module = importlib.import_module('src.primary.apps.lidarr.missing') + upgrade_module = importlib.import_module('src.primary.apps.lidarr.upgrade') + # Use process_missing_albums as the function name + process_missing = getattr(missing_module, 'process_missing_albums') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_items" + # Use hunt_upgrade_items + hunt_upgrade_setting = "hunt_upgrade_items" + elif app_type == "readarr": + missing_module = importlib.import_module('src.primary.apps.readarr.missing') + upgrade_module = importlib.import_module('src.primary.apps.readarr.upgrade') + process_missing = getattr(missing_module, 'process_missing_books') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_books" + hunt_upgrade_setting = "hunt_upgrade_books" + elif app_type == "whisparr": + missing_module = importlib.import_module('src.primary.apps.whisparr.missing') + upgrade_module = importlib.import_module('src.primary.apps.whisparr.upgrade') + process_missing = getattr(missing_module, 'process_missing_scenes') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_items" # Updated to new name + hunt_upgrade_setting = "hunt_upgrade_items" # Updated to new name + elif app_type == "eros": + missing_module = importlib.import_module('src.primary.apps.eros.missing') + upgrade_module = importlib.import_module('src.primary.apps.eros.upgrade') + process_missing = getattr(missing_module, 'process_missing_items') + process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades') + hunt_missing_setting = "hunt_missing_items" + hunt_upgrade_setting = "hunt_upgrade_items" + else: + app_logger.error(f"Unsupported app_type: {app_type}") + return # Exit thread if app type is invalid + + except (ImportError, AttributeError) as e: + app_logger.error(f"Failed to import modules or functions for {app_type}: {e}", exc_info=True) + return # Exit thread if essential modules fail to load + + # Create app-specific logger using provided function + app_logger = logging.getLogger(f"huntarr.{app_type}") + + while not stop_event.is_set(): + # --- Load Settings for this Cycle --- # + try: + # Load all settings for this app for the current cycle + app_settings = settings_manager.load_settings(app_type) # Corrected function name + if not app_settings: # Handle case where loading fails + app_logger.error("Failed to load settings. Skipping cycle.") + stop_event.wait(60) # Wait a minute before retrying + continue + + # Get global settings needed for cycle timing + sleep_duration = app_settings.get("sleep_duration", 900) + api_timeout = app_settings.get("api_timeout", 120) # Default to 120 seconds + + except Exception as e: + app_logger.error(f"Error loading settings for cycle: {e}", exc_info=True) + stop_event.wait(60) # Wait before retrying + continue + + # --- State Reset Check --- # + check_state_reset(app_type) + + app_logger.info(f"=== Starting {app_type.upper()} cycle ===") + + # Check if we need to use multi-instance mode + instances_to_process = [] + + # Use the dynamically loaded function (if found) + if get_instances_func: + # Multi-instance mode supported + try: + instances_to_process = get_instances_func() # Call the dynamically loaded function + if instances_to_process: + app_logger.info(f"Found {len(instances_to_process)} configured {app_type} instances to process") + else: + # No instances found via get_configured_instances + app_logger.warning(f"No configured {app_type} instances found. Skipping cycle.") + stop_event.wait(sleep_duration) + continue + except Exception as e: + app_logger.error(f"Error calling get_configured_instances function: {e}", exc_info=True) + stop_event.wait(60) + continue + else: + # get_instances_func is None (either not defined in app module or import failed earlier) + # Fallback to single instance mode using base settings if available + api_url = app_settings.get("api_url") + api_key = app_settings.get("api_key") + instance_name = app_settings.get("name", f"{app_type.capitalize()} Default") # Use 'name' or default + + if api_url and api_key: + app_logger.info(f"Processing {app_type} as single instance: {instance_name}") + # Create a list with a single dict matching the multi-instance structure + instances_to_process = [{ + "instance_name": instance_name, + "api_url": api_url, + "api_key": api_key + }] + else: + app_logger.warning(f"No 'get_configured_instances' function found and no valid single instance config (URL/Key) for {app_type}. Skipping cycle.") + stop_event.wait(sleep_duration) + continue + + # If after all checks, instances_to_process is still empty + if not instances_to_process: + app_logger.warning(f"No valid {app_type} instances to process this cycle (unexpected state). Skipping.") + stop_event.wait(sleep_duration) + continue + + # Process each instance dictionary returned by get_configured_instances + processed_any_items = False + for instance_details in instances_to_process: + if stop_event.is_set(): + break + + instance_name = instance_details.get("instance_name", "Default") # Use the dict from get_configured_instances + app_logger.info(f"Processing {app_type} instance: {instance_name}") + + # Get instance-specific settings from the instance_details dict + api_url = instance_details.get("api_url", "") + api_key = instance_details.get("api_key", "") + + # Get global/shared settings from app_settings loaded at the start of the loop + # Example: monitored_only = app_settings.get("monitored_only", True) + + # --- Connection Check --- # + if not api_url or not api_key: + app_logger.warning(f"Missing API URL or Key for instance '{instance_name}'. Skipping.") + continue + try: + # Use instance details for connection check + app_logger.debug(f"Checking connection to {app_type} instance '{instance_name}' at {api_url} with timeout {api_timeout}s") + connected = check_connection(api_url, api_key, api_timeout=api_timeout) + if not connected: + app_logger.warning(f"Failed to connect to {app_type} instance '{instance_name}' at {api_url}. Skipping.") + continue + app_logger.info(f"Successfully connected to {app_type} instance: {instance_name}") + except Exception as e: + app_logger.error(f"Error connecting to {app_type} instance '{instance_name}': {e}", exc_info=True) + continue # Skip this instance if connection fails + + # --- Check if Hunt Modes are Enabled --- # + # These checks use the hunt_missing_setting/hunt_upgrade_setting defined earlier + # which correspond to keys in the main app_settings dict (e.g., 'hunt_missing_items') + hunt_missing_value = app_settings.get(hunt_missing_setting, 0) + hunt_upgrade_value = app_settings.get(hunt_upgrade_setting, 0) + + hunt_missing_enabled = hunt_missing_value > 0 + hunt_upgrade_enabled = hunt_upgrade_value > 0 + + # --- Queue Size Check --- # Moved inside loop + # Get maximum_download_queue_size from general settings (still using minimum_download_queue_size key for backward compatibility) + general_settings = settings_manager.load_settings('general') + max_queue_size = general_settings.get("minimum_download_queue_size", -1) + app_logger.info(f"Using maximum download queue size: {max_queue_size} from general settings") + + if max_queue_size >= 0: + try: + # Use instance details for queue check + current_queue_size = get_queue_size(api_url, api_key, api_timeout) + if current_queue_size >= max_queue_size: + app_logger.info(f"Download queue size ({current_queue_size}) meets or exceeds maximum ({max_queue_size}) for {instance_name}. Skipping cycle for this instance.") + continue # Skip processing for this instance + else: + app_logger.info(f"Queue size ({current_queue_size}) is below maximum ({max_queue_size}). Proceeding.") + except Exception as e: + app_logger.warning(f"Could not get download queue size for {instance_name}. Proceeding anyway. Error: {e}", exc_info=False) # Log less verbosely + + # Prepare args dictionary for processing functions + # Combine instance details with general app settings for the processing functions + # Assuming app_settings already contains most general settings, add instance specifics + combined_settings = app_settings.copy() # Start with general settings + combined_settings.update(instance_details) # Add/overwrite with instance specifics (name, url, key) + + # Ensure settings from general.json are consistently used for all apps + combined_settings["api_timeout"] = settings_manager.get_advanced_setting("api_timeout", 120) + combined_settings["command_wait_delay"] = settings_manager.get_advanced_setting("command_wait_delay", 1) + combined_settings["command_wait_attempts"] = settings_manager.get_advanced_setting("command_wait_attempts", 600) + + # Define the stop check function + stop_check_func = stop_event.is_set + + # --- Process Missing --- # + if hunt_missing_enabled and process_missing: + try: + # Extract settings for direct function calls + api_url = combined_settings.get("api_url", "").strip() + api_key = combined_settings.get("api_key", "").strip() + api_timeout = combined_settings.get("api_timeout", 120) + monitored_only = combined_settings.get("monitored_only", True) + skip_future_episodes = combined_settings.get("skip_future_episodes", True) + skip_series_refresh = combined_settings.get("skip_series_refresh", False) + hunt_missing_items = combined_settings.get("hunt_missing_items", 0) + hunt_missing_mode = combined_settings.get("hunt_missing_mode", "episodes") + command_wait_delay = combined_settings.get("command_wait_delay", 1) + command_wait_attempts = combined_settings.get("command_wait_attempts", 600) + + if app_type == "sonarr": + processed_missing = process_missing( + api_url=api_url, + api_key=api_key, + instance_name=instance_name, # Added the required instance_name parameter + api_timeout=api_timeout, + monitored_only=monitored_only, + skip_future_episodes=skip_future_episodes, + skip_series_refresh=skip_series_refresh, + hunt_missing_items=hunt_missing_items, + hunt_missing_mode=hunt_missing_mode, + command_wait_delay=command_wait_delay, + command_wait_attempts=command_wait_attempts, + stop_check=stop_check_func + ) + else: + # For other apps that still use the old signature + processed_missing = process_missing(app_settings=combined_settings, stop_check=stop_check_func) + + if processed_missing: + processed_any_items = True + except Exception as e: + app_logger.error(f"Error during missing processing for {instance_name}: {e}", exc_info=True) + + # --- Process Upgrades --- # + if hunt_upgrade_enabled and process_upgrades: + try: + # Extract settings for direct function calls (only for Sonarr) + if app_type == "sonarr": + api_url = combined_settings.get("api_url", "").strip() + api_key = combined_settings.get("api_key", "").strip() + api_timeout = combined_settings.get("api_timeout", 120) + monitored_only = combined_settings.get("monitored_only", True) + skip_series_refresh = combined_settings.get("skip_series_refresh", False) + hunt_upgrade_items = combined_settings.get("hunt_upgrade_items", 0) + command_wait_delay = combined_settings.get("command_wait_delay", 1) + command_wait_attempts = combined_settings.get("command_wait_attempts", 600) + + processed_upgrades = process_upgrades( + api_url=api_url, + api_key=api_key, + instance_name=instance_name, # Added the required instance_name parameter + api_timeout=api_timeout, + monitored_only=monitored_only, + skip_series_refresh=skip_series_refresh, + hunt_upgrade_items=hunt_upgrade_items, + command_wait_delay=command_wait_delay, + command_wait_attempts=command_wait_attempts, + stop_check=stop_check_func + ) + else: + # For other apps that still use the old signature + processed_upgrades = process_upgrades(app_settings=combined_settings, stop_check=stop_check_func) + + if processed_upgrades: + processed_any_items = True + except Exception as e: + app_logger.error(f"Error during upgrade processing for {instance_name}: {e}", exc_info=True) + + # Small delay between instances if needed (optional) + if not stop_event.is_set(): + time.sleep(1) # Short pause + + # --- Process Swaparr (stalled downloads) --- # + try: + # Try to import Swaparr module + if not 'process_stalled_downloads' in locals(): + try: + # Import directly from handler module to avoid circular imports + from src.primary.apps.swaparr.handler import process_stalled_downloads + swaparr_logger = get_logger("swaparr") + swaparr_logger.debug(f"Successfully imported Swaparr module") + except (ImportError, AttributeError) as e: + app_logger.debug(f"Swaparr module not available or missing functions: {e}") + process_stalled_downloads = None + + # Check if Swaparr is enabled + swaparr_settings = settings_manager.load_settings("swaparr") + if swaparr_settings and swaparr_settings.get("enabled", False) and process_stalled_downloads: + app_logger.info(f"Running Swaparr on {app_type} instance: {instance_name}") + process_stalled_downloads(app_type, combined_settings, swaparr_settings) + app_logger.info(f"Completed Swaparr processing for {app_type} instance: {instance_name}") + except Exception as e: + app_logger.error(f"Error during Swaparr processing for {instance_name}: {e}", exc_info=True) + + # --- Cycle End & Sleep --- # + calculate_reset_time(app_type) # Pass app_type here if needed by the function + + # Log cycle completion + if processed_any_items: + app_logger.info(f"=== {app_type.upper()} cycle finished. Processed items across instances. ===") + else: + app_logger.info(f"=== {app_type.upper()} cycle finished. No items processed in any instance. ===") + + # Calculate sleep duration (use configured or default value) + sleep_seconds = app_settings.get("sleep_duration", 900) # Default to 15 minutes + + # Sleep with periodic checks for reset file + app_logger.info(f"Sleeping for {sleep_seconds} seconds before next cycle...") + + # Use shorter sleep intervals and check for reset file + wait_interval = 1 # Check every second to be more responsive + elapsed = 0 + reset_file_path = f"/config/reset/{app_type}.reset" + + while elapsed < sleep_seconds: + # Check if stop event is set + if stop_event.is_set(): + app_logger.info("Stop event detected during sleep. Breaking out of sleep cycle.") + break + + # Check if reset file exists + if os.path.exists(reset_file_path): + try: + # Read timestamp from the file (if it exists) + with open(reset_file_path, 'r') as f: + timestamp = f.read().strip() + app_logger.info(f"!!! RESET FILE DETECTED !!! Manual cycle reset triggered for {app_type} (timestamp: {timestamp}). Starting new cycle immediately.") + + # Delete the reset file + os.remove(reset_file_path) + app_logger.info(f"Reset file removed for {app_type}. Starting new cycle now.") + break + except Exception as e: + app_logger.error(f"Error processing reset file for {app_type}: {e}", exc_info=True) + # Try to remove the file even if reading failed + try: + os.remove(reset_file_path) + except: + pass + break + + # Sleep for a short interval + stop_event.wait(wait_interval) + elapsed += wait_interval + + # If we've slept for at least 30 seconds, update the logger message every 30 seconds + if elapsed > 0 and elapsed % 30 == 0: + app_logger.info(f"Still sleeping, {sleep_seconds - elapsed} seconds remaining before next cycle...") + + app_logger.info(f"=== [{app_type.upper()}] Thread stopped ====") + +def reset_app_cycle(app_type: str) -> bool: + """ + Trigger a manual reset of an app's cycle. + + Args: + app_type: The type of Arr application (sonarr, radarr, lidarr, readarr, etc.) + + Returns: + bool: True if the reset was triggered, False if the app is not running + """ + logger.info(f"Manual cycle reset requested for {app_type} - Creating reset file") + + # Create a reset file for this app + reset_file_path = f"/config/reset/{app_type}.reset" + try: + with open(reset_file_path, 'w') as f: + f.write(str(int(time.time()))) + logger.info(f"Reset file created for {app_type}. Cycle will reset on next check.") + return True + except Exception as e: + logger.error(f"Error creating reset file for {app_type}: {e}", exc_info=True) + return False + +def start_app_threads(): + """Start threads for all configured and enabled apps.""" + configured_apps_list = settings_manager.get_configured_apps() # Corrected function name + configured_apps = {app: True for app in configured_apps_list} # Convert list to dict format expected below + + for app_type, is_configured in configured_apps.items(): + if is_configured: + # Optional: Add an explicit 'enabled' setting check if desired + # enabled = settings_manager.get_setting(app_type, "enabled", True) + # if not enabled: + # logger.info(f"Skipping {app_type} thread as it is disabled in settings.") + # continue + + if app_type not in app_threads or not app_threads[app_type].is_alive(): + if app_type in app_threads: # If it existed but died + logger.warning(f"{app_type} thread died, restarting...") + del app_threads[app_type] + else: # Starting for the first time + logger.info(f"Starting thread for {app_type}...") + + thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True) + app_threads[app_type] = thread + thread.start() + elif app_type in app_threads and app_threads[app_type].is_alive(): + # If app becomes un-configured, stop its thread? Or let it fail connection check? + # For now, let it run and fail connection check. + logger.warning(f"{app_type} is no longer configured. Thread will likely stop after failing connection checks.") + # else: # App not configured and no thread running - do nothing + # logger.debug(f"{app_type} is not configured. No thread started.") + pass # Corrected indentation + +def check_and_restart_threads(): + """Check if any threads have died and restart them if the app is still configured.""" + configured_apps_list = settings_manager.get_configured_apps() # Corrected function name + configured_apps = {app: True for app in configured_apps_list} # Convert list to dict format expected below + + for app_type, thread in list(app_threads.items()): + if not thread.is_alive(): + logger.warning(f"{app_type} thread died unexpectedly.") + del app_threads[app_type] # Remove dead thread + # Only restart if it's still configured + if configured_apps.get(app_type, False): + logger.info(f"Restarting thread for {app_type}...") + new_thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True) + app_threads[app_type] = new_thread + new_thread.start() + else: + logger.info(f"Not restarting {app_type} thread as it is no longer configured.") + +def shutdown_handler(signum, frame): + """Handle termination signals (SIGINT, SIGTERM).""" + logger.info(f"Received signal {signum}. Initiating shutdown...") + stop_event.set() # Signal all threads to stop + +def shutdown_threads(): + """Wait for all threads to finish.""" + logger.info("Waiting for app threads to finish...") + active_thread_list = list(app_threads.values()) + for thread in active_thread_list: + thread.join(timeout=15) # Wait up to 15 seconds per thread + if thread.is_alive(): + logger.warning(f"Thread {thread.name} did not stop gracefully.") + logger.info("All app threads stopped.") + +def start_huntarr(): + """Main entry point for Huntarr background tasks.""" + logger.info(f"--- Starting Huntarr Background Tasks v{__version__} --- ") + + # Perform initial settings migration if specified (e.g., via env var or arg) + if os.environ.get("HUNTARR_RUN_MIGRATION", "false").lower() == "true": + logger.info("Running settings migration from huntarr.json (if found)...") + settings_manager.migrate_from_huntarr_json() + + # Log initial configuration for all known apps + for app_name in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name + try: + config.log_configuration(app_name) + except Exception as e: + logger.error(f"Error logging initial configuration for {app_name}: {e}") + + try: + # Main loop: Start and monitor app threads + while not stop_event.is_set(): + start_app_threads() # Start/Restart threads for configured apps + # check_and_restart_threads() # This is implicitly handled by start_app_threads checking is_alive + stop_event.wait(15) # Check for stop signal every 15 seconds + + except Exception as e: + logger.exception(f"Unexpected error in main monitoring loop: {e}") + finally: + logger.info("Background task main loop exited. Shutting down threads...") + if not stop_event.is_set(): + stop_event.set() # Ensure stop is signaled if loop exited unexpectedly + shutdown_threads() + logger.info("--- Huntarr Background Tasks stopped --- ") \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/config.py b/Huntarr.io-6.3.6/src/primary/config.py new file mode 100644 index 0000000..c2e7190 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/config.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +""" +Configuration module for Huntarr +Provides utility functions to access settings via settings_manager +and perform configuration-related tasks like logging. +Removes the old concept of loading a single app's config into global constants. +""" + +import os +import sys +import logging +import traceback +from src.primary import settings_manager +from src.primary.utils.logger import logger, get_logger # Import get_logger + +# Removed global constants like APP_TYPE, API_URL, API_KEY, SLEEP_DURATION etc. +# Settings should be fetched directly using settings_manager when needed. + +# Enable debug logging across the application +# Set to True for detailed logs, False for production +DEBUG_MODE = False # Changed default to False + +# Add a function to get the debug mode from settings +def get_debug_mode(): + """Get the debug mode setting from general settings""" + try: + return settings_manager.get_setting("general", "debug_mode", False) + except Exception: + return False + +# Determine the hunt mode for a specific app +def determine_hunt_mode(app_name: str) -> str: + """Determine the hunt mode for a specific app based on its settings.""" + # Fetch settings directly for the given app + hunt_missing = 0 + hunt_upgrade = 0 + + if app_name == "sonarr": + hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_items", 0) + hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_items", 0) + elif app_name == "radarr": + hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_movies", 0) + hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_movies", 0) + elif app_name.lower() == 'lidarr': + # Use hunt_missing_items instead of hunt_missing_albums + hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_items", 0) + # Use hunt_upgrade_items instead of hunt_upgrade_albums + hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_items", 0) + + # For Lidarr, also include the hunt_missing_mode + hunt_missing_mode = settings_manager.get_setting(app_name, "hunt_missing_mode", "artist") + elif app_name == "readarr": + hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_books", 0) + hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_books", 0) + else: + # Handle unknown app types if necessary, or just return disabled + return "disabled" + + # Determine mode based on fetched values + if hunt_missing > 0 and hunt_upgrade > 0: + return "both" + elif hunt_missing > 0: + return "missing" + elif hunt_upgrade > 0: + return "upgrade" + else: + return "disabled" + +# Configure logging level based on an app's debug setting +def configure_logging(app_name: str = None): + """Configure logging level based on the debug setting of a specific app or globally.""" + try: + debug_mode = get_debug_mode() + log_instance = logger # Default to the main logger + + if app_name: + debug_mode = settings_manager.get_setting(app_name, "debug_mode", False) + log_instance = get_logger(app_name) # Get the specific app logger + # else: # Optional: Could check a global debug setting if needed + # debug_mode = settings_manager.get_setting("global", "debug_mode", False) + + level = logging.DEBUG if debug_mode else logging.INFO + + # Configure the specific app logger + if app_name and log_instance: + log_instance.setLevel(level) + + # Always configure the root logger as well (or adjust based on desired behavior) + # If you want root logger level controlled by a specific app, this needs refinement. + # For now, let's set the root logger based on the *last* app configured or global. + root_logger = logging.getLogger() + root_logger.setLevel(level) + + # Optional: Configure handlers if not done elsewhere + # Example: Ensure handlers exist and set their level + # for handler in log_instance.handlers: + # handler.setLevel(level) + # for handler in root_logger.handlers: + # handler.setLevel(level) + + except Exception as e: + print(f"CRITICAL ERROR in configure_logging for app '{app_name}': {str(e)}", file=sys.stderr) + print(f"Traceback: {traceback.format_exc()}", file=sys.stderr) + # Try to log it anyway + if logger: + logger.error(f"Error in configure_logging for app '{app_name}': {str(e)}") + logger.error(traceback.format_exc()) + # Decide whether to raise or continue + # raise + +# Log the configuration for a specific app +def log_configuration(app_name: str): + """Log the current configuration settings for a specific app.""" + log = get_logger(app_name) # Use the specific app's logger + settings = settings_manager.load_settings(app_name) # Corrected function name + + if not settings: + log.error(f"Could not load settings for app: {app_name}. Cannot log configuration.") + return + + api_url = settings.get("api_url", "") + api_key = settings.get("api_key", "") + debug_mode = settings.get("debug_mode", False) + sleep_duration = settings.get("sleep_duration", 900) + # Get state reset interval + state_reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168) + monitored_only = settings.get("monitored_only", True) + min_queue_size = settings.get("minimum_download_queue_size", -1) + + log.info(f"--- Configuration for {app_name} ---") + log.info(f"API URL: {api_url}") + log.info(f"API Key: {'[REDACTED]' if api_key else 'Not Set'}") + log.info(f"Debug Mode: {debug_mode}") + log.info(f"Hunt Mode: {determine_hunt_mode(app_name)}") + log.info(f"Sleep Duration: {sleep_duration} seconds") + log.info(f"State Reset Interval: {state_reset_interval} hours") + log.info(f"Monitored Only: {monitored_only}") + log.info(f"Maximum Download Queue Size: {settings.get('minimum_download_queue_size', -1)}") + + # App-specific settings logging + if app_name == "sonarr": + log.info(f"Hunt Missing Items: {settings.get('hunt_missing_items', 0)}") + log.info(f"Hunt Upgrade Items: {settings.get('hunt_upgrade_items', 0)}") + log.info(f"Skip Future Episodes: {settings.get('skip_future_episodes', True)}") + log.info(f"Skip Series Refresh: {settings.get('skip_series_refresh', False)}") + elif app_name == "radarr": + log.info(f"Hunt Missing Movies: {settings.get('hunt_missing_movies', 0)}") + log.info(f"Hunt Upgrade Movies: {settings.get('hunt_upgrade_movies', 0)}") + log.info(f"Skip Future Releases: {settings.get('skip_future_releases', True)}") + log.info(f"Skip Movie Refresh: {settings.get('skip_movie_refresh', False)}") + elif app_name.lower() == 'lidarr': + log.info(f"Mode: {settings.get('hunt_missing_mode', 'artist')}") + log.info(f"Hunt Missing Items: {settings.get('hunt_missing_items', 0)}") + # Use hunt_upgrade_items + log.info(f"Hunt Upgrade Items: {settings.get('hunt_upgrade_items', 0)}") + log.info(f"Sleep Duration: {settings.get('sleep_duration', 900)} seconds") + log.info(f"State Reset Interval: {state_reset_interval} hours") + log.info(f"Monitored Only: {settings.get('monitored_only', True)}") + log.info(f"Maximum Download Queue Size: {settings.get('minimum_download_queue_size', -1)}") + elif app_name == "readarr": + log.info(f"Hunt Missing Books: {settings.get('hunt_missing_books', 0)}") + log.info(f"Hunt Upgrade Books: {settings.get('hunt_upgrade_books', 0)}") + log.info(f"Skip Future Releases: {settings.get('skip_future_releases', True)}") + log.info(f"Skip Author Refresh: {settings.get('skip_author_refresh', False)}") + log.info(f"--- End Configuration for {app_name} ---") + +# Removed refresh_settings function - settings are loaded dynamically by settings_manager + +# Initial logging configuration (optional, could be done in main startup) +# configure_logging() # Configure root logger based on global/default debug setting if desired \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/eros.json b/Huntarr.io-6.3.6/src/primary/default_configs/eros.json new file mode 100644 index 0000000..dc5659a --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/eros.json @@ -0,0 +1,18 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_items": 1, + "hunt_upgrade_items": 0, + "sleep_duration": 900, + "monitored_only": true, + "skip_series_refresh": true, + "skip_future_releases": true, + "skip_scene_refresh": true, + "search_mode": "movie" +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/general.json b/Huntarr.io-6.3.6/src/primary/default_configs/general.json new file mode 100644 index 0000000..ab41cfe --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/general.json @@ -0,0 +1,14 @@ +{ + "debug_mode": false, + "log_refresh_interval_seconds": 30, + "ui_theme": "dark", + "check_for_updates": true, + "enable_notifications": false, + "notification_level": "info", + "local_access_bypass": false, + "stateful_management_hours": 168, + "command_wait_delay": 1, + "command_wait_attempts": 600, + "minimum_download_queue_size": -1, + "api_timeout": 120 +} \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json new file mode 100644 index 0000000..1e71955 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json @@ -0,0 +1,17 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_mode": "artist", + "hunt_missing_items": 1, + "hunt_upgrade_items": 0, + "sleep_duration": 900, + "monitored_only": true, + "skip_future_releases": true, + "skip_artist_refresh": true +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json new file mode 100644 index 0000000..d7a7e8f --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json @@ -0,0 +1,16 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_movies": 1, + "hunt_upgrade_movies": 0, + "sleep_duration": 900, + "monitored_only": true, + "skip_future_releases": true, + "skip_movie_refresh": true +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json new file mode 100644 index 0000000..c6292ae --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json @@ -0,0 +1,16 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_books": 1, + "hunt_upgrade_books": 0, + "sleep_duration": 900, + "monitored_only": true, + "skip_future_releases": true, + "skip_author_refresh": true +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json new file mode 100644 index 0000000..86476ca --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json @@ -0,0 +1,17 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_items": 1, + "hunt_upgrade_items": 0, + "hunt_missing_mode": "episodes", + "sleep_duration": 900, + "monitored_only": true, + "skip_future_episodes": true, + "skip_series_refresh": true +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json b/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json new file mode 100644 index 0000000..396352a --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json @@ -0,0 +1,8 @@ +{ + "enabled": false, + "max_strikes": 3, + "max_download_time": "2h", + "ignore_above_size": "25GB", + "remove_from_client": true, + "dry_run": false +} diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json b/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json new file mode 100644 index 0000000..34eb7c1 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json @@ -0,0 +1,17 @@ +{ + "instances": [ + { + "name": "Default", + "api_url": "", + "api_key": "", + "enabled": true + } + ], + "hunt_missing_items": 1, + "hunt_upgrade_items": 0, + "sleep_duration": 900, + "monitored_only": true, + "skip_series_refresh": true, + "skip_future_releases": true, + "skip_scene_refresh": true +} \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/history_manager.py b/Huntarr.io-6.3.6/src/primary/history_manager.py new file mode 100644 index 0000000..ab42e2e --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/history_manager.py @@ -0,0 +1,471 @@ +import os +import json +import time +from datetime import datetime +import threading +import logging +import pathlib + +# Create a logger +logger = logging.getLogger(__name__) + +# Path will be /config/history in production +HISTORY_BASE_PATH = pathlib.Path("/config/history") + +# Lock to prevent race conditions during file operations +history_locks = { + "sonarr": threading.Lock(), + "radarr": threading.Lock(), + "lidarr": threading.Lock(), + "readarr": threading.Lock(), + "whisparr": threading.Lock(), + "eros": threading.Lock(), + "swaparr": threading.Lock() +} + +def ensure_history_dir(): + """Ensure the history directory exists with app-specific subdirectories""" + try: + # Create base directory + HISTORY_BASE_PATH.mkdir(exist_ok=True, parents=True) + + # Create app-specific directories + for app in history_locks.keys(): + app_dir = HISTORY_BASE_PATH / app + app_dir.mkdir(exist_ok=True, parents=True) + + return True + except Exception as e: + logger.error(f"Failed to create history directory: {str(e)}") + return False + +def get_history_file_path(app_type, instance_name=None): + """Get the appropriate history file path based on app type and instance name""" + # If no instance name is provided, use "Default" + if instance_name is None: + instance_name = "Default" + + # Create safe filename from instance name (same as in stateful_manager.py) + safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name]) + return HISTORY_BASE_PATH / app_type / f"{safe_instance_name}.json" + +def add_history_entry(app_type, entry_data): + """ + Add a new history entry + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) + - entry_data: dict with required fields: + - name: str - Name of processed content + - instance_name: str - Name of the instance + - id: str - ID of the processed content + """ + if not ensure_history_dir(): + logger.error("Could not ensure history directory exists") + return None + + if app_type not in history_locks: + logger.error(f"Invalid app type: {app_type}") + return None + + required_fields = ["name", "instance_name", "id"] + for field in required_fields: + if field not in entry_data: + logger.error(f"Missing required field: {field}") + return None + + # Log the instance name for debugging + instance_name = entry_data["instance_name"] + logger.debug(f"Adding history entry for {app_type} with instance_name: '{instance_name}'") + + # Create the entry with timestamp + timestamp = int(time.time()) + entry = { + "date_time": timestamp, + "date_time_readable": datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'), + "processed_info": entry_data["name"], + "id": entry_data["id"], + "instance_name": instance_name, # Use the instance_name we extracted above + "operation_type": entry_data.get("operation_type", "missing"), # Default to "missing" if not specified + "app_type": app_type # Include app_type in the entry for display in UI + } + + history_file = get_history_file_path(app_type, instance_name) + logger.debug(f"Writing to history file: {history_file}") + + # Make sure the parent directory exists + history_file.parent.mkdir(exist_ok=True, parents=True) + + # Thread-safe file operation + with history_locks[app_type]: + try: + if history_file.exists(): + with open(history_file, 'r') as f: + history_data = json.load(f) + else: + history_data = [] + except (json.JSONDecodeError, FileNotFoundError): + # If file doesn't exist or is corrupt, start with empty list + history_data = [] + + # Add new entry at the beginning for most recent first + history_data.insert(0, entry) + + # Write back to file + with open(history_file, 'w') as f: + json.dump(history_data, f, indent=2) + + logger.info(f"Added history entry for {app_type}-{instance_name}: {entry_data['name']}") + return entry + +def get_history(app_type, search_query=None, page=1, page_size=20): + """ + Get history entries for an app + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) + - search_query: str - Optional search query to filter results + - page: int - Page number (1-based) + - page_size: int - Number of entries per page + + Returns: + - dict with entries, total_entries, and total_pages + """ + if not ensure_history_dir(): + logger.error("Could not ensure history directory exists") + return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1} + + if app_type not in history_locks and app_type != "all": + logger.error(f"Invalid app type: {app_type}") + return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1} + + result = [] + + if app_type == "all": + # Combine histories from all apps and their instances + for app in history_locks.keys(): + app_dir = HISTORY_BASE_PATH / app + + # Find and read all instance files + if app_dir.exists(): + for history_file in app_dir.glob("*.json"): + try: + with open(history_file, 'r') as f: + instance_history = json.load(f) + result.extend(instance_history) + logger.debug(f"Read {len(instance_history)} entries from {history_file}") + except (json.JSONDecodeError, FileNotFoundError) as e: + logger.warning(f"Error reading instance history file {history_file}: {str(e)}") + else: + # Get history for specific app - combine all instances + app_dir = HISTORY_BASE_PATH / app_type + + # Make sure app directory exists + app_dir.mkdir(exist_ok=True, parents=True) + + # Read from all instance files + if app_dir.exists(): + instance_files = list(app_dir.glob("*.json")) + logger.debug(f"Found {len(instance_files)} instance files for {app_type}: {[f.name for f in instance_files]}") + + for history_file in instance_files: + try: + with open(history_file, 'r') as f: + instance_history = json.load(f) + result.extend(instance_history) + logger.debug(f"Read {len(instance_history)} entries from {history_file}") + except (json.JSONDecodeError, FileNotFoundError) as e: + logger.warning(f"Error reading instance history file {history_file}: {e}") + + # Sort by date_time in descending order + result = sorted(result, key=lambda x: x["date_time"], reverse=True) + + # Apply search filter if provided + if search_query and search_query.strip(): + search_query = search_query.lower() + result = [ + entry for entry in result if + search_query in entry.get("processed_info", "").lower() or + search_query in entry.get("instance_name", "").lower() or + search_query in str(entry.get("id", "")).lower() + ] + + # Calculate pagination + total_entries = len(result) + total_pages = (total_entries + page_size - 1) // page_size if total_entries > 0 else 1 + + # Adjust page if out of bounds + if page < 1: + page = 1 + elif page > total_pages: + page = total_pages + + # Get entries for the current page + start_idx = (page - 1) * page_size + end_idx = start_idx + page_size + paginated_entries = result[start_idx:end_idx] + + # Calculate "how long ago" for each entry + current_time = int(time.time()) + for entry in paginated_entries: + seconds_ago = current_time - entry["date_time"] + entry["how_long_ago"] = format_time_ago(seconds_ago) + + return { + "entries": paginated_entries, + "total_entries": total_entries, + "total_pages": total_pages, + "current_page": page + } + +def format_time_ago(seconds): + """Format seconds into a human-readable 'time ago' string""" + minutes = seconds // 60 + hours = minutes // 60 + days = hours // 24 + + if days > 0: + return f"{days} {'day' if days == 1 else 'days'} ago" + elif hours > 0: + return f"{hours} {'hour' if hours == 1 else 'hours'} ago" + elif minutes > 0: + return f"{minutes} {'minute' if minutes == 1 else 'minutes'} ago" + else: + return f"{seconds} {'second' if seconds == 1 else 'seconds'} ago" + +def clear_history(app_type): + """ + Clear history for an app + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) or "all" to clear all history + + Returns: + - bool - Success or failure + """ + if not ensure_history_dir(): + logger.error("Could not ensure history directory exists") + return False + + if app_type not in history_locks and app_type != "all": + logger.error(f"Invalid app type: {app_type}") + return False + + try: + if app_type == "all": + # Clear all history files for all apps + for app in history_locks.keys(): + # Clear all instance files + app_dir = HISTORY_BASE_PATH / app + # Ensure directory exists + app_dir.mkdir(exist_ok=True, parents=True) + + if app_dir.exists(): + instance_files = list(app_dir.glob("*.json")) + logger.debug(f"Found {len(instance_files)} instance files to clear for {app}") + + for history_file in instance_files: + with open(history_file, 'w') as f: + json.dump([], f) + logger.debug(f"Cleared instance history file: {history_file}") + else: + # Clear all instance files for specific app + app_dir = HISTORY_BASE_PATH / app_type + # Ensure directory exists + app_dir.mkdir(exist_ok=True, parents=True) + + if app_dir.exists(): + instance_files = list(app_dir.glob("*.json")) + logger.debug(f"Found {len(instance_files)} instance files to clear for {app_type}") + + for history_file in instance_files: + with open(history_file, 'w') as f: + json.dump([], f) + logger.debug(f"Cleared instance history file: {history_file}") + + logger.info(f"Successfully cleared history for {app_type}") + return True + except Exception as e: + logger.error(f"Error clearing history for {app_type}: {str(e)}") + return False + +def handle_instance_rename(app_type, old_instance_name, new_instance_name): + """ + Handle renaming of an instance by moving history entries to a new file. + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) + - old_instance_name: str - Previous instance name + - new_instance_name: str - New instance name + + Returns: + - bool - Success or failure + """ + if not ensure_history_dir(): + logger.error("Could not ensure history directory exists") + return False + + if app_type not in history_locks: + logger.error(f"Invalid app type: {app_type}") + return False + + # If names are the same, nothing to do + if old_instance_name == new_instance_name: + return True + + logger.info(f"Handling instance rename for {app_type}: {old_instance_name} -> {new_instance_name}") + + # Get paths for old and new history files + old_file = get_history_file_path(app_type, old_instance_name) + new_file = get_history_file_path(app_type, new_instance_name) + + # Ensure parent directories exist + new_file.parent.mkdir(exist_ok=True, parents=True) + + # Thread-safe operation + with history_locks[app_type]: + try: + # Load old data if it exists + old_data = [] + if old_file.exists(): + try: + with open(old_file, 'r') as f: + old_data = json.load(f) + logger.info(f"Loaded {len(old_data)} history entries from {old_file}") + except (json.JSONDecodeError, FileNotFoundError) as e: + logger.warning(f"Error reading old history file {old_file}: {e}") + + # Update instance_name in all entries + for entry in old_data: + entry["instance_name"] = new_instance_name + + # Create or load new file + new_data = [] + if new_file.exists(): + try: + with open(new_file, 'r') as f: + new_data = json.load(f) + logger.info(f"Loaded {len(new_data)} existing history entries from {new_file}") + except (json.JSONDecodeError, FileNotFoundError) as e: + logger.warning(f"Error reading new history file {new_file}: {e}") + + # Merge data, avoiding duplicates + existing_keys = {(entry.get("id", ""), entry.get("date_time", 0)) for entry in new_data} + for entry in old_data: + entry_key = (entry.get("id", ""), entry.get("date_time", 0)) + if entry_key not in existing_keys: + new_data.append(entry) + + # Sort by timestamp + new_data = sorted(new_data, key=lambda x: x.get("date_time", 0), reverse=True) + + # Save merged data to new file + with open(new_file, 'w') as f: + json.dump(new_data, f, indent=2) + logger.info(f"Saved {len(new_data)} history entries to {new_file}") + + # Optionally delete old file if it exists + if old_file.exists(): + old_file.unlink() + logger.info(f"Deleted old history file {old_file}") + + return True + except Exception as e: + logger.error(f"Error renaming instance history: {e}") + return False + +def initialize_instance_history(app_type, instance_name): + """ + Initialize or ensure history file exists for a specific instance. + This should be called whenever an instance is created or configured. + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) + - instance_name: str - Name of the instance + + Returns: + - str - Path to the history file + """ + if not ensure_history_dir(): + logger.error("Could not ensure history directory exists") + return None + + if app_type not in history_locks: + logger.error(f"Invalid app type: {app_type}") + return None + + try: + # Get the history file path + history_file = get_history_file_path(app_type, instance_name) + + # Ensure parent directory exists + history_file.parent.mkdir(exist_ok=True, parents=True) + + # Create the file if it doesn't exist + if not history_file.exists(): + with open(history_file, 'w') as f: + json.dump([], f) + logger.info(f"Created history file for {app_type}/{instance_name}: {history_file}") + + return str(history_file) + except Exception as e: + logger.error(f"Error initializing history for {app_type}/{instance_name}: {e}") + return None + +def sync_history_files_with_instances(): + """ + Synchronize history files with existing instances. + This ensures that every instance has a corresponding history file. + + Returns: + - dict - Information about what was synchronized + """ + result = { + "success": False, + "app_instances": {}, + "created_files": [], + "error": None + } + + try: + # First ensure history directories exist + ensure_history_dir() + + # Load settings for each app type to find instances + for app_type in history_locks.keys(): + app_dir = HISTORY_BASE_PATH / app_type + app_dir.mkdir(exist_ok=True, parents=True) + + result["app_instances"][app_type] = [] + + # Let's check for instance settings from settings directory + instances_dir = pathlib.Path("/config") / app_type + if instances_dir.exists(): + for instance_file in instances_dir.glob("*.json"): + try: + # Extract instance name from filename + instance_name = instance_file.stem + result["app_instances"][app_type].append(instance_name) + logger.info(f"Found instance for {app_type}: {instance_name}") + + # Create history file for this instance if it doesn't exist + history_file = get_history_file_path(app_type, instance_name) + if not history_file.exists(): + history_file.parent.mkdir(exist_ok=True, parents=True) + with open(history_file, 'w') as f: + json.dump([], f) + logger.info(f"Created history file for {app_type}/{instance_name}: {history_file}") + result["created_files"].append(str(history_file)) + except Exception as e: + logger.error(f"Error processing instance file {instance_file}: {e}") + + result["success"] = True + return result + except Exception as e: + logger.error(f"Error syncing history files with instances: {e}") + result["error"] = str(e) + return result + +# Run the synchronization on module import +sync_result = sync_history_files_with_instances() +logger.info(f"History synchronization result: {sync_result}") diff --git a/Huntarr.io-6.3.6/src/primary/keys_manager.py b/Huntarr.io-6.3.6/src/primary/keys_manager.py new file mode 100644 index 0000000..99a53d0 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/keys_manager.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +""" +Keys manager for Huntarr +Handles storage and retrieval of API keys and URLs from huntarr.json +""" + +import os +import json +import pathlib +import logging +from typing import Dict, Any, Optional, Tuple + +# Create a simple logger +logging.basicConfig(level=logging.INFO) +keys_logger = logging.getLogger("keys_manager") + +# Settings directory - Changed to match the updated settings_manager.py +SETTINGS_DIR = pathlib.Path("/config") +SETTINGS_DIR.mkdir(parents=True, exist_ok=True) + +SETTINGS_FILE = SETTINGS_DIR / "huntarr.json" + +# Removed save_api_keys function + +# Removed get_api_keys function + +# Removed list_configured_apps function + +# Keep other functions if they exist and are needed, otherwise the file might become empty. +# If this file solely managed API keys in the old way, it might be removable entirely, +# but let's keep it for now in case other key-related logic exists or is added later. \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/routes/common.py b/Huntarr.io-6.3.6/src/primary/routes/common.py new file mode 100644 index 0000000..47a5d38 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/routes/common.py @@ -0,0 +1,444 @@ +#!/usr/bin/env python3 +""" +Common routes blueprint for Huntarr web interface +""" + +import os +import json +import base64 +import io +import qrcode +import pyotp +import logging +# Add render_template, send_from_directory, session +from flask import Blueprint, request, jsonify, make_response, redirect, url_for, current_app, render_template, send_from_directory, session +from ..auth import ( + verify_user, create_session, get_username_from_session, SESSION_COOKIE_NAME, + change_username as auth_change_username, change_password as auth_change_password, + validate_password_strength, logout, verify_session, disable_2fa_with_password_and_otp, + user_exists, create_user, generate_2fa_secret, verify_2fa_code, is_2fa_enabled # Add missing auth imports +) +from ..utils.logger import logger # Ensure logger is imported +from .. import settings_manager # Import settings_manager + +common_bp = Blueprint('common', __name__) + +# --- Static File Serving --- # + +@common_bp.route('/static/') +def static_files(filename): + return send_from_directory(common_bp.static_folder, filename) + +@common_bp.route('/favicon.ico') +def favicon(): + return send_from_directory(common_bp.static_folder, 'favicon.ico', mimetype='image/vnd.microsoft.icon') + +@common_bp.route('/logo/') +def logo_files(filename): + logo_dir = os.path.join(common_bp.static_folder, 'logo') + return send_from_directory(logo_dir, filename) + +# --- Authentication Routes --- # + +@common_bp.route('/login', methods=['GET', 'POST']) +def login_route(): + if request.method == 'POST': + try: # Wrap the POST logic in a try block for better error handling + data = request.json + username = data.get('username') + password = data.get('password') + twoFactorCode = data.get('twoFactorCode') # Changed from 'otp_code' to match frontend form + + if not username or not password: + logger.warning("Login attempt with missing username or password.") + return jsonify({"success": False, "error": "Username and password are required"}), 400 + + # Call verify_user which now returns (auth_success, needs_2fa) + auth_success, needs_2fa = verify_user(username, password, twoFactorCode) + + logger.debug(f"Auth result for '{username}': success={auth_success}, needs_2fa={needs_2fa}") + + if auth_success: + # User is authenticated (password correct, and 2FA if needed was correct) + session_token = create_session(username) + session[SESSION_COOKIE_NAME] = session_token # Store token in Flask session immediately + response = jsonify({"success": True, "redirect": "/"}) # Add redirect URL + response.set_cookie(SESSION_COOKIE_NAME, session_token, httponly=True, samesite='Lax', path='/') # Add path + logger.info(f"User '{username}' logged in successfully.") + return response + elif needs_2fa: + # Authentication failed *because* 2FA was required (or code was invalid) + # The specific reason (missing vs invalid code) is logged in verify_user + logger.warning(f"Login failed for '{username}': 2FA required or invalid.") + logger.debug(f"Returning 2FA required response: {{\"success\": False, \"requires_2fa\": True, \"requiresTwoFactor\": True, \"error\": \"Invalid or missing 2FA code\"}}") + + # Use all common variations of the 2FA flag to ensure compatibility + return jsonify({ + "success": False, + "requires_2fa": True, + "requiresTwoFactor": True, + "requires2fa": True, + "requireTwoFactor": True, + "error": "Two-factor authentication code required" + }), 401 + else: + # Authentication failed for other reasons (e.g., wrong password, user not found) + # Specific reason logged in verify_user + logger.warning(f"Login failed for '{username}': Invalid credentials or other error.") + return jsonify({"success": False, "error": "Invalid username or password"}), 401 # Use 401 + + except Exception as e: + logger.error(f"Unexpected error during login POST for user '{username if 'username' in locals() else 'unknown'}': {e}", exc_info=True) + return jsonify({"success": False, "error": "An internal server error occurred during login."}), 500 + else: + # GET request - show login page + # If user already exists, show login, otherwise redirect to setup + if not user_exists(): + logger.info("No user exists, redirecting to setup.") + return redirect(url_for('common.setup_route')) + logger.debug("Displaying login page.") + return render_template('login.html') + +@common_bp.route('/logout', methods=['POST']) +def logout_route(): + try: + session_token = request.cookies.get(SESSION_COOKIE_NAME) + if session_token: + logger.info(f"Logging out session token: {session_token[:8]}...") # Log part of token + logout(session_token) # Call the logout function from auth.py + else: + logger.warning("Logout attempt without session cookie.") + + response = jsonify({"success": True}) + # Ensure cookie deletion happens even if logout function had issues + response.delete_cookie(SESSION_COOKIE_NAME, path='/', samesite='Lax') # Specify path and samesite + logger.info("Logout successful, cookie deleted.") + return response + except Exception as e: + logger.error(f"Error during logout: {e}", exc_info=True) + # Return a JSON error response + return jsonify({"success": False, "error": "An internal server error occurred during logout."}), 500 + +@common_bp.route('/setup', methods=['GET', 'POST']) +def setup(): + if user_exists(): # This function should now be defined via import + # If a user already exists, redirect to login or home + logger.info("Setup page accessed but user already exists. Redirecting to login.") + return redirect(url_for('common.login_route')) + + if request.method == 'POST': + username = None # Initialize username for logging in case of early failure + try: # Add try block to catch potential errors during user creation + data = request.json + username = data.get('username') + password = data.get('password') + confirm_password = data.get('confirm_password') + + # Basic validation + if not username or not password or not confirm_password: + return jsonify({"success": False, "error": "Missing required fields"}), 400 + + # Add username length validation + if len(username.strip()) < 3: + return jsonify({"success": False, "error": "Username must be at least 3 characters long"}), 400 + + if password != confirm_password: + return jsonify({"success": False, "error": "Passwords do not match"}), 400 + + # Validate password strength using the backend function + password_error = validate_password_strength(password) + if password_error: + return jsonify({"success": False, "error": password_error}), 400 + + logger.info(f"Attempting to create user '{username}' during setup.") + if create_user(username, password): # This function should now be defined via import + # Automatically log in the user after setup + logger.info(f"User '{username}' created successfully during setup. Creating session.") + session_token = create_session(username) + # Explicitly set username in Flask session - might not be needed if using token correctly + # session['username'] = username + session[SESSION_COOKIE_NAME] = session_token # Store token in session + response = jsonify({"success": True}) + # Set cookie in the response + response.set_cookie(SESSION_COOKIE_NAME, session_token, httponly=True, samesite='Lax', path='/') # Add path + return response + else: + # create_user itself failed, but didn't raise an exception + logger.error(f"create_user function returned False for user '{username}' during setup.") + return jsonify({"success": False, "error": "Failed to create user (internal reason)"}), 500 + except Exception as e: + # Catch any unexpected exception during the process + logger.error(f"Unexpected error during setup POST for user '{username if username else 'unknown'}': {e}", exc_info=True) + return jsonify({"success": False, "error": f"An unexpected server error occurred: {e}"}), 500 + else: + # GET request - show setup page + logger.info("Displaying setup page.") + return render_template('setup.html') # This function should now be defined via import + +# --- User Management API Routes --- # + +@common_bp.route('/api/user/info', methods=['GET']) +def get_user_info_route(): + # Use session token to get username + session_token = request.cookies.get(SESSION_COOKIE_NAME) + username = get_username_from_session(session_token) # Use auth function + + if not username: + logger.warning("Attempt to get user info failed: Not authenticated (no valid session).") + return jsonify({"error": "Not authenticated"}), 401 + + # Pass username to is_2fa_enabled + two_fa_status = is_2fa_enabled(username) # This function should now be defined via import + logger.debug(f"Retrieved user info for '{username}'. 2FA enabled: {two_fa_status}") + return jsonify({"username": username, "is_2fa_enabled": two_fa_status}) + +@common_bp.route('/api/user/change-username', methods=['POST']) +def change_username_route(): + # Use session token to get username + session_token = request.cookies.get(SESSION_COOKIE_NAME) + current_username = get_username_from_session(session_token) + + if not current_username: + logger.warning("Username change attempt failed: Not authenticated.") + return jsonify({"error": "Not authenticated"}), 401 + + data = request.json + new_username = data.get('username') + password = data.get('password') # Get password from request + + if not new_username or not password: # Check if password is provided + return jsonify({"success": False, "error": "New username and current password are required"}), 400 + + # Add username length validation + if len(new_username.strip()) < 3: + return jsonify({"success": False, "error": "Username must be at least 3 characters long"}), 400 + + # Call the change_username function from auth.py + if auth_change_username(current_username, new_username, password): + # Update session? The session stores a token, not the username directly. + # If the username is needed frequently, maybe re-create session or update session data if stored there. + # For now, assume token remains valid. + logger.info(f"Username changed successfully for '{current_username}' to '{new_username}'.") + # Re-fetch username to confirm change for response? Or trust change_username? + # Fetch updated info to send back + updated_username = new_username # Assume success means it changed + return jsonify({"success": True, "username": updated_username}) # Return new username + else: + logger.warning(f"Username change failed for '{current_username}'. Check logs in auth.py for details.") + return jsonify({"success": False, "error": "Failed to change username. Check password or logs."}), 400 + +@common_bp.route('/api/user/change-password', methods=['POST']) +def change_password_route(): + # Use session token to get username - needed? change_password might not need it if single user + session_token = request.cookies.get(SESSION_COOKIE_NAME) + username = get_username_from_session(session_token) # Get username for logging + + if not username: # Check if session is valid even if function doesn't need username + logger.warning("Password change attempt failed: Not authenticated.") + return jsonify({"error": "Not authenticated"}), 401 + + data = request.json + current_password = data.get('current_password') + new_password = data.get('new_password') + + if not current_password or not new_password: + logger.warning(f"Password change attempt for user '{username}' failed: Missing current or new password.") + return jsonify({"success": False, "error": "Current and new passwords are required"}), 400 + + logger.info(f"Attempting to change password for user '{username}'.") + # Pass username? change_password might not need it. Assuming it doesn't for now. + if auth_change_password(current_password, new_password): + logger.info(f"Password changed successfully for user '{username}'.") + return jsonify({"success": True}) + else: + logger.warning(f"Password change failed for user '{username}'. Check logs in auth.py for details.") + return jsonify({"success": False, "error": "Failed to change password. Check current password or logs."}), 400 + +# --- 2FA Management API Routes --- # + +@common_bp.route('/api/user/2fa/setup', methods=['POST']) +def setup_2fa(): + # Use session token to get username + session_token = request.cookies.get(SESSION_COOKIE_NAME) + username = get_username_from_session(session_token) + + if not username: + logger.warning("2FA setup attempt failed: No username in session.") # Add logging + return jsonify({"error": "Not authenticated"}), 401 + + try: + logger.info(f"Generating 2FA setup for user: {username}") # Add logging + # Pass username to generate_2fa_secret + secret, qr_code_data_uri = generate_2fa_secret(username) # This function should now be defined via import + + # Return secret and QR code data URI + return jsonify({"success": True, "secret": secret, "qr_code_url": qr_code_data_uri}) # Match frontend expectation 'qr_code_url' + + except Exception as e: + logger.error(f"Error during 2FA setup generation for user '{username}': {e}", exc_info=True) + return jsonify({"success": False, "error": "Failed to generate 2FA setup information."}), 500 + +@common_bp.route('/api/user/2fa/verify', methods=['POST']) +def verify_2fa(): + # Use session token to get username + session_token = request.cookies.get(SESSION_COOKIE_NAME) + username = get_username_from_session(session_token) + + if not username: + logger.warning("2FA verify attempt failed: No username in session.") # Add logging + return jsonify({"error": "Not authenticated"}), 401 + + data = request.json + otp_code = data.get('code') # Match frontend key 'code' + + if not otp_code or len(otp_code) != 6 or not otp_code.isdigit(): # Add validation + logger.warning(f"2FA verification for '{username}' failed: Invalid code format provided.") + return jsonify({"success": False, "error": "Invalid or missing 6-digit OTP code"}), 400 + + logger.info(f"Attempting to verify 2FA code for user '{username}'.") + # Pass username to verify_2fa_code + if verify_2fa_code(username, otp_code, enable_on_verify=True): # This function should now be defined via import + logger.info(f"Successfully verified and enabled 2FA for user: {username}") # Add logging + return jsonify({"success": True}) + else: + # Reason logged in verify_2fa_code + logger.warning(f"2FA verification failed for user: {username}. Check logs in auth.py.") + return jsonify({"success": False, "error": "Invalid OTP code"}), 400 # Use 400 for bad request + +@common_bp.route('/api/user/2fa/disable', methods=['POST']) +def disable_2fa_route(): + session_token = request.cookies.get(SESSION_COOKIE_NAME) + username = get_username_from_session(session_token) + + if not username: + logger.warning("2FA disable attempt failed: Not authenticated.") + return jsonify({"error": "Not authenticated"}), 401 + + data = request.json + password = data.get('password') + otp_code = data.get('code') + + # Require BOTH password and OTP code + if not password or not otp_code: + logger.warning(f"2FA disable attempt for '{username}' failed: Missing password or OTP code.") + return jsonify({"success": False, "error": "Both password and current OTP code are required to disable 2FA"}), 400 + + if not (len(otp_code) == 6 and otp_code.isdigit()): + logger.warning(f"2FA disable attempt for '{username}' failed: Invalid OTP code format.") + return jsonify({"success": False, "error": "Invalid 6-digit OTP code format"}), 400 + + # Call a function that verifies both password and OTP + if disable_2fa_with_password_and_otp(username, password, otp_code): + logger.info(f"2FA disabled successfully for user '{username}' using password and OTP.") + return jsonify({"success": True}) + else: + # Reason logged in disable_2fa_with_password_and_otp + logger.warning(f"Failed to disable 2FA for user '{username}' using password and OTP. Check logs.") + # Provide a more specific error if possible, otherwise generic + # The auth function should log the specific reason (bad pass, bad otp) + return jsonify({"success": False, "error": "Failed to disable 2FA. Invalid password or OTP code."}), 400 + +# --- Theme Setting Route --- +@common_bp.route('/api/settings/theme', methods=['POST']) +def set_theme(): + # Authentication check + session_token = request.cookies.get(SESSION_COOKIE_NAME) + if not verify_session(session_token): + logger.warning("Theme setting attempt failed: Not authenticated.") + return jsonify({"error": "Unauthorized"}), 401 + + try: + data = request.json + dark_mode = data.get('dark_mode') + + if dark_mode is None or not isinstance(dark_mode, bool): + logger.warning("Invalid theme setting received.") + return jsonify({"success": False, "error": "Invalid 'dark_mode' value"}), 400 + + # Here you would typically save this preference to a user profile or global setting + # For now, just log it. A real implementation would persist this. + username = get_username_from_session(session_token) # Get username for logging + logger.info(f"User '{username}' set dark mode preference to: {dark_mode}") + + # Example: Saving to a hypothetical global config (replace with actual persistence) + # global_settings = settings_manager.load_global_settings() # Assuming such a function exists + # global_settings['ui']['dark_mode'] = dark_mode + # settings_manager.save_global_settings(global_settings) # Assuming such a function exists + + return jsonify({"success": True}) + except Exception as e: + logger.error(f"Error setting theme preference: {e}", exc_info=True) + return jsonify({"success": False, "error": "Failed to set theme preference"}), 500 + +# --- Local Access Bypass Status API Route --- # + +@common_bp.route('/api/get_local_access_bypass_status', methods=['GET']) +def get_local_access_bypass_status_route(): + """API endpoint to get the status of the local network authentication bypass setting.""" + try: + # Get the setting from the 'general' section, default to False if not found + bypass_enabled = settings_manager.get_setting('general', 'local_access_bypass', False) + logger.debug(f"Retrieved local_access_bypass status: {bypass_enabled}") + # Return status in the format expected by the frontend + return jsonify({"isEnabled": bypass_enabled}) + except Exception as e: + logger.error(f"Error retrieving local_access_bypass status: {e}", exc_info=True) + # Return a generic error to the client + return jsonify({"error": "Failed to retrieve bypass status"}), 500 + +# --- Stats Management API Routes --- # +@common_bp.route('/api/stats', methods=['GET']) +def get_stats_api(): + """API endpoint to get media statistics""" + try: + # Import here to avoid circular imports + from ..stats_manager import get_stats + + # Get stats from stats_manager + stats = get_stats() + logger.debug(f"Retrieved stats for API response: {stats}") + + # Return success response with stats + return jsonify({"success": True, "stats": stats}) + except Exception as e: + logger.error(f"Error retrieving stats: {e}", exc_info=True) + return jsonify({"success": False, "error": str(e)}), 500 + +@common_bp.route('/api/stats/reset', methods=['POST']) +def reset_stats_api(): + """API endpoint to reset media statistics""" + try: + # Import here to avoid circular imports + from ..stats_manager import reset_stats + + # Check if authenticated + session_token = request.cookies.get(SESSION_COOKIE_NAME) + if not verify_session(session_token): + logger.warning("Stats reset attempt failed: Not authenticated.") + return jsonify({"error": "Unauthorized"}), 401 + + # Get app type from request if provided + data = request.json or {} + app_type = data.get('app_type') # None will reset all + + if app_type is not None and app_type not in ["sonarr", "radarr", "lidarr", "readarr", "whisparr"]: + logger.warning(f"Invalid app_type for stats reset: {app_type}") + return jsonify({"success": False, "error": "Invalid app_type"}), 400 + + # Reset stats + if reset_stats(app_type): + message = f"Reset statistics for {app_type}" if app_type else "Reset all statistics" + logger.info(message) + return jsonify({"success": True, "message": message}) + else: + error_msg = f"Failed to reset statistics for {app_type}" if app_type else "Failed to reset all statistics" + logger.error(error_msg) + return jsonify({"success": False, "error": error_msg}), 500 + except Exception as e: + logger.error(f"Error resetting stats: {e}", exc_info=True) + return jsonify({"success": False, "error": str(e)}), 500 + +# Ensure all routes previously in this file that interact with settings +# are either moved to web_server.py or updated here using the new settings_manager functions. + +# REMOVED DUPLICATE BLUEPRINT DEFINITION AND CONFLICTING ROUTES BELOW THIS LINE diff --git a/Huntarr.io-6.3.6/src/primary/routes/history_routes.py b/Huntarr.io-6.3.6/src/primary/routes/history_routes.py new file mode 100644 index 0000000..2a2735c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/routes/history_routes.py @@ -0,0 +1,51 @@ +from flask import Blueprint, request, jsonify, current_app +import logging + +from src.primary.history_manager import get_history, clear_history, add_history_entry + +logger = logging.getLogger("huntarr") +history_blueprint = Blueprint('history', __name__) + +@history_blueprint.route('/', methods=['GET']) +def get_app_history(app_type): + """Get history entries for a specific app or all apps""" + try: + search_query = request.args.get('search', '') + page = int(request.args.get('page', 1)) + page_size = int(request.args.get('page_size', 20)) + + # Validate page_size to be one of the allowed values + allowed_page_sizes = [10, 20, 30, 50, 100, 250, 1000] + if page_size not in allowed_page_sizes: + page_size = 20 + + # Validate app_type + valid_app_types = ["all", "sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"] + if app_type not in valid_app_types: + return jsonify({"error": f"Invalid app type: {app_type}"}), 400 + + result = get_history(app_type, search_query, page, page_size) + return jsonify(result), 200 + + except Exception as e: + logger.error(f"Error getting history for {app_type}: {str(e)}") + return jsonify({"error": str(e)}), 500 + +@history_blueprint.route('/', methods=['DELETE']) +def clear_app_history(app_type): + """Clear history for a specific app or all apps""" + try: + # Validate app_type + valid_app_types = ["all", "sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"] + if app_type not in valid_app_types: + return jsonify({"error": f"Invalid app type: {app_type}"}), 400 + + success = clear_history(app_type) + if success: + return jsonify({"message": f"History cleared for {app_type}"}), 200 + else: + return jsonify({"error": f"Failed to clear history for {app_type}"}), 500 + + except Exception as e: + logger.error(f"Error clearing history for {app_type}: {str(e)}") + return jsonify({"error": str(e)}), 500 diff --git a/Huntarr.io-6.3.6/src/primary/routes/main.py b/Huntarr.io-6.3.6/src/primary/routes/main.py new file mode 100644 index 0000000..8794c5b --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/routes/main.py @@ -0,0 +1,54 @@ +from flask import Blueprint, request, jsonify +from src.primary.stats_manager import get_stats, reset_stats + +main_blueprint = Blueprint('main', __name__) + +@main_blueprint.route('/') +def index(): + # ...existing code... + + # Remove or comment out any logging of the web interface URL here + # logger.info(f"Web interface available at http://{request.host}") + + # ...existing code... + +# Add new route for getting media statistics +@main_blueprint.route('/api/stats', methods=['GET']) +@jwt_required() +def api_get_stats(): + """Get media statistics for each app""" + try: + stats = get_stats() + return jsonify({ + "success": True, + "stats": stats + }) + except Exception as e: + logger.error(f"Error retrieving media statistics: {e}") + return jsonify({ + "success": False, + "message": "Error retrieving media statistics." + }), 500 + +# Add route for resetting statistics +@main_blueprint.route('/api/stats/reset', methods=['POST']) +@jwt_required() +@admin_required +def api_reset_stats(): + """Reset media statistics""" + try: + app_type = None + if request.is_json: + app_type = request.json.get('app_type') + + reset_stats(app_type) + return jsonify({ + "success": True, + "message": f"Successfully reset statistics for {'all apps' if app_type is None else app_type}." + }) + except Exception as e: + logger.error(f"Error resetting media statistics: {e}") + return jsonify({ + "success": False, + "message": "Error resetting media statistics." + }), 500 \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/server.py b/Huntarr.io-6.3.6/src/primary/server.py new file mode 100644 index 0000000..27d5a50 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/server.py @@ -0,0 +1,20 @@ +import logging + +# ...existing code... + +def start_server(host='0.0.0.0', port=9876, debug=False): + """Start the web server""" + logging.basicConfig(level=logging.DEBUG if debug else logging.INFO) + logger = logging.getLogger(__name__) + + # ...existing code... + + # Change this line: + # logger.info(f"Web interface available at http://{host}:{port}") + + # To this (more discreet version): + logger.info(f"Server started on port {port}") + + # ...existing code... + +# ...existing code... \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/settings_manager.py b/Huntarr.io-6.3.6/src/primary/settings_manager.py new file mode 100644 index 0000000..2e69b13 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/settings_manager.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python3 +""" +Settings manager for Huntarr +Handles loading, saving, and providing settings from individual JSON files per app +Supports default configurations for different Arr applications +""" + +import os +import json +import pathlib +import logging +import shutil +import subprocess +import time +from typing import Dict, Any, Optional, List + +# Create a simple logger for settings_manager +logging.basicConfig(level=logging.INFO) +settings_logger = logging.getLogger("settings_manager") + +# Settings directory setup - Root config directory +SETTINGS_DIR = pathlib.Path("/config") +SETTINGS_DIR.mkdir(parents=True, exist_ok=True) + +# Default configs location remains the same +DEFAULT_CONFIGS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'default_configs')) + +# Update or add this as a class attribute or constant +KNOWN_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "general", "swaparr"] + +# Add a settings cache with timestamps to avoid excessive disk reads +settings_cache = {} # Format: {app_name: {'timestamp': timestamp, 'data': settings_dict}} +CACHE_TTL = 5 # Cache time-to-live in seconds + +def clear_cache(app_name=None): + """Clear the settings cache for a specific app or all apps.""" + global settings_cache + if app_name: + if app_name in settings_cache: + settings_logger.debug(f"Clearing cache for {app_name}") + settings_cache.pop(app_name, None) + else: + settings_logger.debug("Clearing entire settings cache") + settings_cache = {} + +def get_settings_file_path(app_name: str) -> pathlib.Path: + """Get the path to the settings file for a specific app.""" + if app_name not in KNOWN_APP_TYPES: + # Log a warning but allow for potential future app types + settings_logger.warning(f"Requested settings file for unknown app type: {app_name}") + return SETTINGS_DIR / f"{app_name}.json" + +def get_default_config_path(app_name: str) -> pathlib.Path: + """Get the path to the default config file for a specific app.""" + return pathlib.Path(DEFAULT_CONFIGS_DIR) / f"{app_name}.json" + +# Helper function to load default settings for a specific app +def load_default_app_settings(app_name: str) -> Dict[str, Any]: + """Load default settings for a specific app from its JSON file.""" + default_file = get_default_config_path(app_name) + if default_file.exists(): + try: + with open(default_file, 'r') as f: + return json.load(f) + except Exception as e: + settings_logger.error(f"Error loading default settings for {app_name} from {default_file}: {e}") + return {} + else: + settings_logger.warning(f"Default settings file not found for {app_name}: {default_file}") + return {} + +def _ensure_config_exists(app_name: str) -> None: + """Ensure the config file exists for an app, copying from default if not.""" + settings_file = get_settings_file_path(app_name) + if not settings_file.exists(): + default_file = get_default_config_path(app_name) + if default_file.exists(): + try: + shutil.copyfile(default_file, settings_file) + settings_logger.info(f"Created default settings file for {app_name} at {settings_file}") + except Exception as e: + settings_logger.error(f"Error copying default settings for {app_name}: {e}") + else: + # Create an empty file if no default exists + settings_logger.warning(f"No default config found for {app_name}. Creating empty settings file.") + try: + with open(settings_file, 'w') as f: + json.dump({}, f) + except Exception as e: + settings_logger.error(f"Error creating empty settings file for {app_name}: {e}") + + +def load_settings(app_type, use_cache=True): + """ + Load settings for a specific app type + + Args: + app_type: The app type to load settings for + use_cache: Whether to use the cached settings if available and recent + + Returns: + Dict containing the app settings + """ + global settings_cache + + # Only log unexpected app types that are not 'general' + if app_type not in KNOWN_APP_TYPES and app_type != "general": + settings_logger.warning(f"load_settings called with unexpected app_type: {app_type}") + + # Check if we have a valid cache entry + if use_cache and app_type in settings_cache: + cache_entry = settings_cache[app_type] + cache_age = time.time() - cache_entry.get('timestamp', 0) + + if cache_age < CACHE_TTL: + settings_logger.debug(f"Using cached settings for {app_type} (age: {cache_age:.1f}s)") + return cache_entry['data'] + else: + settings_logger.debug(f"Cache expired for {app_type} (age: {cache_age:.1f}s)") + + # No valid cache entry, load from disk + _ensure_config_exists(app_type) + settings_file = get_settings_file_path(app_type) + try: + with open(settings_file, 'r') as f: + # Load existing settings + current_settings = json.load(f) + + # Load defaults to check for missing keys + default_settings = load_default_app_settings(app_type) + + # Add missing keys from defaults without overwriting existing values + updated = False + for key, value in default_settings.items(): + if key not in current_settings: + current_settings[key] = value + updated = True + + # If keys were added, save the updated file + if updated: + settings_logger.info(f"Added missing default keys to {app_type}.json") + save_settings(app_type, current_settings) # Use save_settings to handle writing + + # Update cache + settings_cache[app_type] = { + 'timestamp': time.time(), + 'data': current_settings + } + + return current_settings + + except json.JSONDecodeError: + settings_logger.error(f"Error decoding JSON from {settings_file}. Restoring from default.") + # Attempt to restore from default + default_settings = load_default_app_settings(app_type) + save_settings(app_type, default_settings) # Save the restored defaults + + # Update cache with defaults + settings_cache[app_type] = { + 'timestamp': time.time(), + 'data': default_settings + } + + return default_settings + except Exception as e: + settings_logger.error(f"Error loading settings for {app_type} from {settings_file}: {e}") + return {} # Return empty dict on other errors + + +def save_settings(app_name: str, settings_data: Dict[str, Any]) -> bool: + """Save settings for a specific app.""" + if app_name not in KNOWN_APP_TYPES: + settings_logger.error(f"Attempted to save settings for unknown app type: {app_name}") + return False + + settings_file = get_settings_file_path(app_name) + try: + # Ensure the directory exists (though it should from the top-level check) + settings_file.parent.mkdir(parents=True, exist_ok=True) + + # Write the provided settings data directly + with open(settings_file, 'w') as f: + json.dump(settings_data, f, indent=2) + settings_logger.info(f"Settings saved successfully for {app_name} to {settings_file}") + + # Clear cache for this app to ensure fresh reads + clear_cache(app_name) + + return True + except Exception as e: + settings_logger.error(f"Error saving settings for {app_name} to {settings_file}: {e}") + return False + +def get_setting(app_name: str, key: str, default: Optional[Any] = None) -> Any: + """Get a specific setting value for an app.""" + settings = load_settings(app_name) + return settings.get(key, default) + +def get_api_url(app_name: str) -> Optional[str]: + """Get the API URL for a specific app.""" + return get_setting(app_name, "api_url", "") + +def get_api_key(app_name: str) -> Optional[str]: + """Get the API Key for a specific app.""" + return get_setting(app_name, "api_key", "") + +def get_all_settings() -> Dict[str, Dict[str, Any]]: + """Load settings for all known apps.""" + all_settings = {} + for app_name in KNOWN_APP_TYPES: + # Only include apps if their config file exists or can be created from defaults + # Effectively, load_settings ensures the file exists and loads it. + settings = load_settings(app_name) + if settings: # Only add if settings were successfully loaded + all_settings[app_name] = settings + return all_settings + +def get_configured_apps() -> List[str]: + """Return a list of app names that have basic configuration (API URL and Key).""" + configured = [] + for app_name in KNOWN_APP_TYPES: + settings = load_settings(app_name) + + # First check if there are valid instances configured (multi-instance mode) + if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]: + for instance in settings["instances"]: + if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"): + configured.append(app_name) + break # One valid instance is enough to consider the app configured + continue # Skip the single-instance check if we already checked instances + + # Fallback to legacy single-instance config + if settings.get("api_url") and settings.get("api_key"): + configured.append(app_name) + + settings_logger.info(f"Configured apps: {configured}") + return configured + +def apply_timezone(timezone: str) -> bool: + """Apply the specified timezone to the container. + + Args: + timezone: The timezone to set (e.g., 'UTC', 'America/New_York') + + Returns: + bool: True if successful, False otherwise + """ + try: + # Set TZ environment variable + os.environ['TZ'] = timezone + + # Create symlink for localtime (common approach in containers) + zoneinfo_path = f"/usr/share/zoneinfo/{timezone}" + if os.path.exists(zoneinfo_path): + # Remove existing symlink if it exists + if os.path.exists("/etc/localtime"): + os.remove("/etc/localtime") + + # Create new symlink + os.symlink(zoneinfo_path, "/etc/localtime") + + # Also update /etc/timezone file if it exists + with open("/etc/timezone", "w") as f: + f.write(f"{timezone}\n") + + settings_logger.info(f"Timezone set to {timezone}") + return True + else: + settings_logger.error(f"Timezone file not found: {zoneinfo_path}") + return False + except Exception as e: + settings_logger.error(f"Error setting timezone: {str(e)}") + return False + +# Add a list of known advanced settings for clarity and documentation +ADVANCED_SETTINGS = [ + "api_timeout", + "command_wait_delay", + "command_wait_attempts", + "minimum_download_queue_size", + "log_refresh_interval_seconds", + "debug_mode", + "stateful_management_hours" +] + +def get_advanced_setting(setting_name, default_value=None): + """ + Get an advanced setting from general settings. + + Advanced settings are now centralized in general settings and no longer stored + in individual app settings files. This function provides a consistent way to + access these settings from anywhere in the codebase. + + Args: + setting_name: The name of the advanced setting to retrieve + default_value: The default value to return if the setting is not found + + Returns: + The value of the setting or the default value if not found + """ + if setting_name not in ADVANCED_SETTINGS: + settings_logger.warning(f"Requested unknown advanced setting: {setting_name}") + + # Get from general settings + general_settings = load_settings('general', use_cache=True) + return general_settings.get(setting_name, default_value) + +# Example usage (for testing purposes, remove later) +if __name__ == "__main__": + settings_logger.info(f"Known app types: {KNOWN_APP_TYPES}") + + # Ensure defaults are copied if needed + for app in KNOWN_APP_TYPES: + _ensure_config_exists(app) + + # Test loading Sonarr settings + sonarr_settings = load_settings("sonarr") + settings_logger.info(f"Loaded Sonarr settings: {json.dumps(sonarr_settings, indent=2)}") + + # Test getting a specific setting + sonarr_sleep = get_setting("sonarr", "sleep_duration", 999) + settings_logger.info(f"Sonarr sleep duration: {sonarr_sleep}") + + # Test saving updated settings (example) + if sonarr_settings: + sonarr_settings["sleep_duration"] = 850 + save_settings("sonarr", sonarr_settings) + reloaded_sonarr_settings = load_settings("sonarr") + settings_logger.info(f"Reloaded Sonarr settings after save: {json.dumps(reloaded_sonarr_settings, indent=2)}") + + + # Test getting all settings + all_app_settings = get_all_settings() + settings_logger.info(f"All loaded settings: {json.dumps(all_app_settings, indent=2)}") + + # Test getting configured apps + configured_list = get_configured_apps() + settings_logger.info(f"Configured apps: {configured_list}") \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/state.py b/Huntarr.io-6.3.6/src/primary/state.py new file mode 100644 index 0000000..50011de --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/state.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python3 +""" +State management module for Huntarr +Handles all persistence of program state +""" + +import os +import datetime +import time +import json +from typing import List, Dict, Any, Optional +from src.primary import settings_manager + +# Define the config directory - typically /config in Docker environment +CONFIG_DIR = os.environ.get('CONFIG_DIR', '/config') + +# Get the logger at module level +from src.primary.utils.logger import get_logger +logger = get_logger("huntarr") + +def get_state_file_path(app_type, state_name): + """ + Get the path to a state file for a specific app type and state name. + + Args: + app_type: The application type (sonarr, radarr, etc.) + state_name: The name of the state file + + Returns: + The path to the state file + """ + # Define known app types + known_app_types = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"] + + # If app_type is not in known types, log a warning but don't fail + if app_type not in known_app_types and app_type != "general": + logger.warning(f"get_state_file_path called with unexpected app_type: {app_type}") + + # Create the state directory if it doesn't exist + state_dir = os.path.join(CONFIG_DIR, "state", app_type) + os.makedirs(state_dir, exist_ok=True) + + # Return the path to the state file + return os.path.join(state_dir, f"{state_name}.json") + +def get_last_reset_time(app_type: str = None) -> datetime.datetime: + """ + Get the last time the state was reset for a specific app type. + + Args: + app_type: The type of app to get last reset time for. + + Returns: + The datetime of the last reset, or a very old date if no reset has occurred or app_type is invalid. + """ + if not app_type: + logger.error("get_last_reset_time called without app_type.") + return datetime.datetime.fromtimestamp(0) + + current_app_type = app_type + reset_file = get_state_file_path(current_app_type, "last_reset") + + try: + if os.path.exists(reset_file): + with open(reset_file, "r") as f: + reset_time_str = f.read().strip() + return datetime.datetime.fromisoformat(reset_time_str) + except Exception as e: + logger.error(f"Error reading last reset time for {current_app_type}: {e}") + + return datetime.datetime.fromtimestamp(0) + +def set_last_reset_time(reset_time: datetime.datetime, app_type: str = None) -> None: + """ + Set the last time the state was reset for a specific app type. + + Args: + reset_time: The datetime to set + app_type: The type of app to set last reset time for. + """ + if not app_type: + logger.error("set_last_reset_time called without app_type.") + return + + current_app_type = app_type + reset_file = get_state_file_path(current_app_type, "last_reset") + + try: + with open(reset_file, "w") as f: + f.write(reset_time.isoformat()) + except Exception as e: + logger.error(f"Error writing last reset time for {current_app_type}: {e}") + +def check_state_reset(app_type: str = None) -> bool: + """ + Check if the state needs to be reset based on the reset interval. + If it's time to reset, clears the processed IDs and updates the last reset time. + + Args: + app_type: The type of app to check state reset for. + + Returns: + True if the state was reset, False otherwise. + """ + if not app_type: + logger.error("check_state_reset called without app_type.") + return False + + current_app_type = app_type + + # Use a much longer default interval (1 week = 168 hours) to prevent frequent resets + reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168) + + last_reset = get_last_reset_time(current_app_type) + now = datetime.datetime.now() + + delta = now - last_reset + hours_passed = delta.total_seconds() / 3600 + + # Log every cycle to help diagnose state reset issues + logger.debug(f"State check for {current_app_type}: {hours_passed:.1f} hours since last reset (interval: {reset_interval}h)") + + if hours_passed >= reset_interval: + logger.warning(f"State files for {current_app_type} will be reset after {hours_passed:.1f} hours (interval: {reset_interval}h)") + logger.warning(f"This will cause all previously processed media to be eligible for processing again") + + # Add additional safeguard - only reset if more than double the interval has passed + # This helps prevent accidental resets due to clock issues or other anomalies + if hours_passed >= (reset_interval * 2): + logger.info(f"Confirmed state reset for {current_app_type} after {hours_passed:.1f} hours") + clear_processed_ids(current_app_type) + set_last_reset_time(now, current_app_type) + return True + else: + logger.info(f"State reset postponed for {current_app_type} - will proceed when {reset_interval * 2}h have passed") + # Update last reset time partially to avoid immediate reset next cycle + half_delta = datetime.timedelta(hours=reset_interval/2) + set_last_reset_time(now - half_delta, current_app_type) + + return False + +def clear_processed_ids(app_type: str = None) -> None: + """ + Clear all processed IDs for a specific app type. + + Args: + app_type: The type of app to clear processed IDs for. + """ + if not app_type: + logger.error("clear_processed_ids called without app_type.") + return + + current_app_type = app_type + + missing_file = get_state_file_path(current_app_type, "processed_missing") + try: + if os.path.exists(missing_file): + with open(missing_file, "w") as f: + f.write("[]") + logger.info(f"Cleared processed missing IDs for {current_app_type}") + except Exception as e: + logger.error(f"Error clearing processed missing IDs for {current_app_type}: {e}") + + upgrades_file = get_state_file_path(current_app_type, "processed_upgrades") + try: + if os.path.exists(upgrades_file): + with open(upgrades_file, "w") as f: + f.write("[]") + logger.info(f"Cleared processed upgrade IDs for {current_app_type}") + except Exception as e: + logger.error(f"Error clearing processed upgrade IDs for {current_app_type}: {e}") + +def calculate_reset_time(app_type: str = None) -> str: + """ + Calculate when the next state reset will occur. + + Args: + app_type: The type of app to calculate reset time for. + + Returns: + A string representation of when the next reset will occur. + """ + if not app_type: + logger.error("calculate_reset_time called without app_type.") + return "Next reset: Unknown (app type not provided)" + + current_app_type = app_type + + reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168) + + last_reset = get_last_reset_time(current_app_type) + next_reset = last_reset + datetime.timedelta(hours=reset_interval) + now = datetime.datetime.now() + + if next_reset < now: + return "Next reset: at the start of the next cycle" + + delta = next_reset - now + hours = delta.total_seconds() / 3600 + + if hours < 1: + minutes = delta.total_seconds() / 60 + return f"Next reset: in {int(minutes)} minutes" + elif hours < 24: + return f"Next reset: in {int(hours)} hours" + else: + days = hours / 24 + return f"Next reset: in {int(days)} days" + +def load_processed_ids(filepath: str) -> List[int]: + """ + Load processed IDs from a file. + + Args: + filepath: The path to the file + + Returns: + A list of processed IDs + """ + try: + if os.path.exists(filepath): + with open(filepath, "r") as f: + loaded_data = json.load(f) + if isinstance(loaded_data, list): + return loaded_data + else: + logger.error(f"Invalid data type loaded from {filepath}. Expected list, got {type(loaded_data)}. Returning empty list.") + return [] + return [] + except json.JSONDecodeError as e: + logger.error(f"Error decoding JSON from {filepath}: {e}. Returning empty list.") + return [] # Ensure list is returned even on JSON error + except Exception as e: + logger.error(f"Error loading processed IDs from {filepath}: {e}") + return [] + +def save_processed_ids(filepath: str, ids: List[int]) -> None: + """ + Save processed IDs to a file. + + Args: + filepath: The path to the file + ids: The list of IDs to save + """ + try: + with open(filepath, "w") as f: + json.dump(ids, f) + except Exception as e: + logger.error(f"Error saving processed IDs to {filepath}: {e}") + +def save_processed_id(filepath: str, item_id: int) -> None: + """ + Add a single ID to a processed IDs file. + + Args: + filepath: The path to the file + item_id: The ID to add + """ + processed_ids = load_processed_ids(filepath) + + if item_id not in processed_ids: + processed_ids.append(item_id) + save_processed_ids(filepath, processed_ids) + +def reset_state_file(app_type: str, state_type: str) -> bool: + """ + Reset a specific state file for an app type. + + Args: + app_type: The type of app (sonarr, radarr, etc.) + state_type: The type of state file (processed_missing, processed_upgrades) + + Returns: + True if successful, False otherwise + """ + if not app_type: + logger.error("reset_state_file called without app_type.") + return False + + filepath = get_state_file_path(app_type, state_type) + + try: + save_processed_ids(filepath, []) + logger.info(f"Reset {state_type} state file for {app_type}") + return True + except Exception as e: + logger.error(f"Error resetting {state_type} state file for {app_type}: {e}") + return False + +def truncate_processed_list(filepath: str, max_items: int = 1000) -> None: + """ + Truncate a processed IDs list to a maximum number of items. + This helps prevent the file from growing too large over time. + + Args: + filepath: The path to the file + max_items: The maximum number of items to keep + """ + processed_ids = load_processed_ids(filepath) + + if len(processed_ids) > max_items: + processed_ids = processed_ids[-max_items:] + save_processed_ids(filepath, processed_ids) + logger.debug(f"Truncated {filepath} to {max_items} items") + +def init_state_files() -> None: + """Initialize state files for all app types""" + app_types = settings_manager.KNOWN_APP_TYPES + + for app_type in app_types: + missing_file = get_state_file_path(app_type, "processed_missing") + upgrades_file = get_state_file_path(app_type, "processed_upgrades") + reset_file = get_state_file_path(app_type, "last_reset") + + for filepath in [missing_file, upgrades_file]: + if not os.path.exists(filepath): + save_processed_ids(filepath, []) + + if not os.path.exists(reset_file): + set_last_reset_time(datetime.datetime.fromtimestamp(0), app_type) + +init_state_files() \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/stateful_manager.py b/Huntarr.io-6.3.6/src/primary/stateful_manager.py new file mode 100644 index 0000000..30819f1 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/stateful_manager.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python3 +""" +Stateful Manager for Huntarr +Handles storing and retrieving processed media IDs to prevent reprocessing +""" + +import os +import json +import time +import pathlib +import datetime +import logging +from typing import Dict, Any, List, Optional, Set + +# Create logger for stateful_manager +stateful_logger = logging.getLogger("stateful_manager") + +# Constants +STATEFUL_DIR = pathlib.Path(os.getenv("STATEFUL_DIR", "/config/stateful")) +LOCK_FILE = STATEFUL_DIR / "lock.json" +DEFAULT_HOURS = 168 # Default 7 days (168 hours) + +# Ensure the stateful directory exists +try: + STATEFUL_DIR.mkdir(parents=True, exist_ok=True) + stateful_logger.info(f"Stateful directory created/confirmed at {STATEFUL_DIR}") +except Exception as e: + stateful_logger.error(f"Error creating stateful directory: {e}") + +# Create app directories +APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"] +for app_type in APP_TYPES: + (STATEFUL_DIR / app_type).mkdir(exist_ok=True) + +# Add import for get_advanced_setting +from src.primary.settings_manager import get_advanced_setting + +def initialize_lock_file() -> None: + """Initialize the lock file with the current timestamp if it doesn't exist.""" + # Ensure directory exists - we don't need to log this again + try: + STATEFUL_DIR.mkdir(parents=True, exist_ok=True) + except Exception as e: + stateful_logger.error(f"Error creating stateful directory: {e}") + + if not LOCK_FILE.exists(): + try: + current_time = int(time.time()) + # Get the expiration hours setting + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + + expires_at = current_time + (expiration_hours * 3600) + + with open(LOCK_FILE, 'w') as f: + json.dump({ + "created_at": current_time, + "expires_at": expires_at + }, f, indent=2) + stateful_logger.info(f"Initialized lock file at {LOCK_FILE} with expiration in {expiration_hours} hours") + except Exception as e: + stateful_logger.error(f"Error initializing lock file: {e}") + +def get_lock_info() -> Dict[str, Any]: + """Get the current lock information.""" + initialize_lock_file() + try: + with open(LOCK_FILE, 'r') as f: + lock_info = json.load(f) + + # Validate the structure and ensure required fields exist + if not isinstance(lock_info, dict): + raise ValueError("Lock info is not a dictionary") + + if "created_at" not in lock_info: + lock_info["created_at"] = int(time.time()) + + if "expires_at" not in lock_info or lock_info["expires_at"] is None: + # Recalculate expiration if missing + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + lock_info["expires_at"] = lock_info["created_at"] + (expiration_hours * 3600) + + # Save the updated info + with open(LOCK_FILE, 'w') as f: + json.dump(lock_info, f, indent=2) + + return lock_info + except Exception as e: + stateful_logger.error(f"Error reading lock file: {e}") + # Return default values if there's an error + current_time = int(time.time()) + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + expires_at = current_time + (expiration_hours * 3600) + + return { + "created_at": current_time, + "expires_at": expires_at + } + +def update_lock_expiration(hours: int = None) -> bool: + """Update the lock expiration based on the hours setting.""" + if hours is None: + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + else: + expiration_hours = hours + + lock_info = get_lock_info() + created_at = lock_info.get("created_at", int(time.time())) + expires_at = created_at + (expiration_hours * 3600) + + lock_info["expires_at"] = expires_at + + try: + with open(LOCK_FILE, 'w') as f: + json.dump(lock_info, f, indent=2) + stateful_logger.info(f"Updated lock expiration to {datetime.datetime.fromtimestamp(expires_at)}") + return True + except Exception as e: + stateful_logger.error(f"Error updating lock expiration: {e}") + return False + +def reset_stateful_management() -> bool: + """ + Reset the stateful management system. + + This involves: + 1. Creating a new lock file with the current timestamp and a calculated expiration time + based on the 'stateful_management_hours' setting. + 2. Deleting all stored processed ID files (*.json) within each app-specific + subdirectory under the STATEFUL_DIR. + + Returns: + bool: True if the reset was successful, False otherwise. + """ + try: + # Get the expiration hours setting BEFORE writing the lock file + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + + # Create new lock file with calculated expiration + current_time = int(time.time()) + expires_at = current_time + (expiration_hours * 3600) + + with open(LOCK_FILE, 'w') as f: + json.dump({ + "created_at": current_time, + "expires_at": expires_at # Write the calculated expiration time directly + }, f, indent=2) + + # Delete all stored IDs + for app_type in APP_TYPES: + app_dir = STATEFUL_DIR / app_type + if app_dir.exists(): + for json_file in app_dir.glob("*.json"): + try: + json_file.unlink() + stateful_logger.debug(f"Deleted {json_file}") + except Exception as e: + stateful_logger.error(f"Error deleting {json_file}: {e}") + + # No need to call update_lock_expiration() again as we wrote it directly + stateful_logger.info(f"Successfully reset stateful management. New expiration: {datetime.datetime.fromtimestamp(expires_at)}") + return True + except Exception as e: + stateful_logger.error(f"Error resetting stateful management: {e}") + return False + +def check_expiration() -> bool: + """ + Check if the stateful management has expired. + + Returns: + bool: True if expired, False otherwise + """ + lock_info = get_lock_info() + expires_at = lock_info.get("expires_at") + + # If expires_at is None, update it based on settings + if expires_at is None: + update_lock_expiration() + lock_info = get_lock_info() + expires_at = lock_info.get("expires_at") + + current_time = int(time.time()) + + if current_time >= expires_at: + stateful_logger.info("Stateful management has expired, resetting...") + reset_stateful_management() + return True + + return False + +def get_processed_ids(app_type: str, instance_name: str) -> Set[str]: + """ + Get the set of processed media IDs for a specific app instance. + + Args: + app_type: The type of app (sonarr, radarr, etc.) + instance_name: The name of the instance + + Returns: + Set[str]: Set of processed media IDs + """ + if app_type not in APP_TYPES: + stateful_logger.warning(f"Unknown app type: {app_type}") + return set() + + # Create safe filename from instance name + safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name]) + + file_path = STATEFUL_DIR / app_type / f"{safe_instance_name}.json" + stateful_logger.debug(f"[get_processed_ids] Checking file: {file_path} for {app_type}/{instance_name}") # DEBUG LOG + + if not file_path.exists(): + stateful_logger.debug(f"[get_processed_ids] File not found: {file_path}") # DEBUG LOG + return set() + + try: + with open(file_path, 'r') as f: + data = json.load(f) + processed_ids_set = set(data.get("processed_ids", [])) # Convert list to set + stateful_logger.debug(f"[get_processed_ids] Read {len(processed_ids_set)} IDs from {file_path}: {processed_ids_set}") # DEBUG LOG + return processed_ids_set + except Exception as e: + stateful_logger.error(f"Error reading processed IDs for {instance_name} from {file_path}: {e}") # Updated log + return set() + +def add_processed_id(app_type: str, instance_name: str, media_id: str) -> bool: + """ + Add a media ID to the processed list for a specific app instance. + + Args: + app_type: The type of app (sonarr, radarr, etc.) + instance_name: The name of the instance + media_id: The ID of the processed media + + Returns: + bool: True if successful, False otherwise + """ + if app_type not in APP_TYPES: + stateful_logger.warning(f"Unknown app type: {app_type}") + return False + + # Create safe filename from instance name + safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name]) + + file_path = STATEFUL_DIR / app_type / f"{safe_instance_name}.json" + + # Get existing processed IDs using the get function (which includes logging) + current_processed_ids_set = get_processed_ids(app_type, instance_name) + + # Convert set back to list for appending and saving + processed_ids_list = list(current_processed_ids_set) + + # Add the new ID if it's not already there + if media_id not in current_processed_ids_set: + processed_ids_list.append(media_id) + stateful_logger.debug(f"[add_processed_id] Adding ID {media_id} to list for {app_type}/{instance_name}") # DEBUG LOG + else: + stateful_logger.debug(f"[add_processed_id] ID {media_id} already in list for {app_type}/{instance_name}") # DEBUG LOG + # No need to write if the ID is already present + return True + + # Write the updated list back to the file + stateful_logger.debug(f"[add_processed_id] Writing {len(processed_ids_list)} IDs to {file_path}: {processed_ids_list}") # DEBUG LOG + try: + with open(file_path, 'w') as f: + json.dump({ + "processed_ids": processed_ids_list, + "last_updated": int(time.time()) + }, f, indent=2) + # Removed redundant log here, previous debug log is sufficient + return True + except Exception as e: + stateful_logger.error(f"Error adding media ID {media_id} to {file_path}: {e}") + return False + +def is_processed(app_type: str, instance_name: str, media_id: str) -> bool: + """ + Check if a media ID has already been processed. + + Args: + app_type: The type of app (sonarr, radarr, etc.) + instance_name: The name of the instance + media_id: The ID of the media to check + + Returns: + bool: True if already processed, False otherwise + """ + # Create safe filename for logging + safe_instance = "".join([c if c.isalnum() else "_" for c in instance_name]) + file_path = STATEFUL_DIR / app_type / f"{safe_instance}.json" + + # Get processed IDs for this app/instance + processed_ids = get_processed_ids(app_type, instance_name) + + # Log what we're checking and the result + # Converting media_id to string since some callers might pass an integer + media_id_str = str(media_id) + is_in_set = media_id_str in processed_ids + + stateful_logger.info(f"is_processed check: {app_type}/{instance_name}, ID:{media_id_str}, Found:{is_in_set}, File:{file_path}, Total IDs:{len(processed_ids)}") + + return is_in_set + +def get_stateful_management_info() -> Dict[str, Any]: + """Get information about the stateful management system.""" + lock_info = get_lock_info() + created_at_ts = lock_info.get("created_at") + expires_at_ts = lock_info.get("expires_at") + + # Get the interval setting + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + + return { + "created_at_ts": created_at_ts, + "expires_at_ts": expires_at_ts, + "interval_hours": expiration_hours + } + +def initialize_stateful_system(): + """Perform a complete initialization of the stateful management system.""" + stateful_logger.info("Initializing stateful management system") + + # Ensure all required directories exist + try: + STATEFUL_DIR.mkdir(parents=True, exist_ok=True) + for app_type in APP_TYPES: + (STATEFUL_DIR / app_type).mkdir(exist_ok=True) + stateful_logger.info(f"Stateful directory structure created at {STATEFUL_DIR}") + except Exception as e: + stateful_logger.error(f"Failed to create stateful directories: {e}") + + # Initialize the lock file with proper expiration + try: + initialize_lock_file() + # Update expiration time + expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS) + update_lock_expiration(expiration_hours) + stateful_logger.info(f"Stateful lock file initialized with {expiration_hours} hour expiration") + except Exception as e: + stateful_logger.error(f"Failed to initialize lock file: {e}") + + # Check for existing processed IDs + try: + total_ids = 0 + for app_type in APP_TYPES: + app_dir = STATEFUL_DIR / app_type + if app_dir.exists(): + files = list(app_dir.glob("*.json")) + total_ids += len(files) + + if total_ids > 0: + stateful_logger.info(f"Found {total_ids} existing processed ID files") + else: + stateful_logger.info("No existing processed ID files found") + except Exception as e: + stateful_logger.error(f"Failed to check for existing processed IDs: {e}") + + stateful_logger.info("Stateful management system initialization complete") + +# Initialize the stateful system on module import +initialize_stateful_system() diff --git a/Huntarr.io-6.3.6/src/primary/stateful_routes.py b/Huntarr.io-6.3.6/src/primary/stateful_routes.py new file mode 100644 index 0000000..a652131 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/stateful_routes.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Stateful Management API Routes +Handles API endpoints for stateful management +""" + +from flask import Blueprint, jsonify, request, Response +import json +from src.primary.stateful_manager import ( + get_stateful_management_info, + reset_stateful_management, + update_lock_expiration +) +from src.primary.utils.logger import get_logger + +# Create logger +stateful_logger = get_logger("stateful") + +# Create blueprint +stateful_api = Blueprint('stateful_api', __name__) + +@stateful_api.route('/info', methods=['GET']) +def get_info(): + """Get stateful management information.""" + try: + info = get_stateful_management_info() + # Add CORS headers to allow access from frontend + response_data = { + "success": True, + "created_at_ts": info.get("created_at_ts"), + "expires_at_ts": info.get("expires_at_ts"), + "interval_hours": info.get("interval_hours") + } + response = Response(json.dumps(response_data)) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + except Exception as e: + stateful_logger.error(f"Error getting stateful info: {e}") + # Return error response with proper headers + error_data = {"success": False, "message": f"Error getting stateful info: {str(e)}"} + response = Response(json.dumps(error_data), status=500) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + +@stateful_api.route('/reset', methods=['POST']) +def reset_stateful(): + """Reset the stateful management system.""" + try: + success = reset_stateful_management() + if success: + # Add CORS headers to allow access from frontend + response = Response(json.dumps({"success": True, "message": "Stateful management reset successfully"})) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + else: + # Add CORS headers to allow access from frontend + response = Response(json.dumps({"success": False, "message": "Failed to reset stateful management"}), status=500) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + except Exception as e: + stateful_logger.error(f"Error resetting stateful management: {e}") + # Return error response with proper headers + error_data = {"error": str(e)} + response = Response(json.dumps(error_data), status=500) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + +@stateful_api.route('/update-expiration', methods=['POST']) +def update_expiration(): + """Update the stateful management expiration time.""" + try: + hours = request.json.get('hours') + if hours is None or not isinstance(hours, int) or hours <= 0: + stateful_logger.error(f"Invalid hours value for update-expiration: {hours}") + # Return error response with proper headers + error_data = {"success": False, "message": f"Invalid hours value: {hours}. Must be a positive integer."} + response = Response(json.dumps(error_data), status=400) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + + updated = update_lock_expiration(hours) + if updated: + # Get updated info + info = get_stateful_management_info() + # Add CORS headers to allow access from frontend + response_data = { + "success": True, + "message": f"Expiration updated to {hours} hours", + "expires_at": info.get("expires_at"), + "expires_date": info.get("expires_date") + } + response = Response(json.dumps(response_data)) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + else: + # Add CORS headers to allow access from frontend + response = Response(json.dumps({"success": False, "message": "Failed to update expiration"}), status=500) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response + except Exception as e: + stateful_logger.error(f"Error updating expiration: {e}", exc_info=True) + # Return error response with proper headers + error_data = {"success": False, "message": f"Error updating expiration: {str(e)}"} + response = Response(json.dumps(error_data), status=500) + response.headers['Content-Type'] = 'application/json' + response.headers['Access-Control-Allow-Origin'] = '*' + return response diff --git a/Huntarr.io-6.3.6/src/primary/stats_manager.py b/Huntarr.io-6.3.6/src/primary/stats_manager.py new file mode 100644 index 0000000..51aff12 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/stats_manager.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +""" +Statistics Manager for Huntarr +Handles tracking, storing, and retrieving statistics about hunted and upgraded media +""" + +import os +import json +import time +import threading +from typing import Dict, Any, Optional +from src.primary.utils.logger import get_logger + +logger = get_logger("stats") + +# Path constants - Define multiple possible locations and check them in order +STATS_DIRS = [ + "/config/tally", # Docker default + os.path.join(os.path.expanduser("~"), ".huntarr/tally"), # User's home directory + os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "data/tally") # Relative to script +] + +# Lock for thread-safe operations +stats_lock = threading.Lock() + +def find_writable_stats_dir(): + """Find a writable directory for stats from the list of candidates""" + for dir_path in STATS_DIRS: + try: + os.makedirs(dir_path, exist_ok=True) + test_file = os.path.join(dir_path, "write_test") + with open(test_file, 'w') as f: + f.write("test") + os.remove(test_file) + logger.info(f"Using stats directory: {dir_path}") + return dir_path + except (IOError, OSError) as e: + logger.warning(f"Directory {dir_path} is not writable: {e}") + continue + + # Fallback to current directory + fallback_dir = os.path.join(os.getcwd(), "tally") + try: + os.makedirs(fallback_dir, exist_ok=True) + logger.info(f"Falling back to current directory for stats: {fallback_dir}") + return fallback_dir + except Exception as e: + logger.error(f"Failed to create fallback stats directory: {e}") + return None + +# Find the best stats directory +STATS_DIR = find_writable_stats_dir() +STATS_FILE = os.path.join(STATS_DIR, "media_stats.json") if STATS_DIR else None + +# Log the stats file location once at module load time +if STATS_FILE: + logger.info(f"===> Stats will be stored at: {STATS_FILE}") +else: + logger.error("===> CRITICAL: No stats file location could be determined!") + +def ensure_stats_dir(): + """Ensure the statistics directory exists""" + if not STATS_DIR: + logger.error("No writable stats directory found") + return False + + try: + os.makedirs(STATS_DIR, exist_ok=True) + logger.debug(f"Stats directory ensured: {STATS_DIR}") + return True + except Exception as e: + logger.error(f"Failed to create stats directory: {e}") + return False + +def load_stats() -> Dict[str, Dict[str, int]]: + """ + Load statistics from the stats file + + Returns: + Dictionary containing statistics for each app + """ + if not ensure_stats_dir() or not STATS_FILE: + logger.error("Cannot load stats - no valid stats directory available") + return get_default_stats() + + default_stats = get_default_stats() + + try: + if os.path.exists(STATS_FILE): + logger.debug(f"Loading stats from: {STATS_FILE}") + with open(STATS_FILE, 'r') as f: + stats = json.load(f) + + # Ensure all apps are in the stats + for app in default_stats: + if app not in stats: + stats[app] = default_stats[app] + + logger.debug(f"Loaded stats: {stats}") + return stats + else: + logger.info(f"Stats file not found at {STATS_FILE}, using default stats") + return default_stats + except Exception as e: + logger.error(f"Error loading stats from {STATS_FILE}: {e}") + return default_stats + +def get_default_stats() -> Dict[str, Dict[str, int]]: + """Get the default stats structure""" + return { + "sonarr": {"hunted": 0, "upgraded": 0}, + "radarr": {"hunted": 0, "upgraded": 0}, + "lidarr": {"hunted": 0, "upgraded": 0}, + "readarr": {"hunted": 0, "upgraded": 0}, + "whisparr": {"hunted": 0, "upgraded": 0}, + "eros": {"hunted": 0, "upgraded": 0}, + "swaparr": {"hunted": 0, "upgraded": 0} + } + +def save_stats(stats: Dict[str, Dict[str, int]]) -> bool: + """ + Save statistics to the stats file + + Args: + stats: Dictionary containing statistics for each app + + Returns: + True if successful, False otherwise + """ + if not ensure_stats_dir() or not STATS_FILE: + logger.error("Cannot save stats - no valid stats directory available") + return False + + try: + logger.debug(f"Saving stats to: {STATS_FILE}") + # First write to a temp file, then move it to avoid partial writes + temp_file = f"{STATS_FILE}.tmp" + with open(temp_file, 'w') as f: + json.dump(stats, f, indent=2) + f.flush() + os.fsync(f.fileno()) + + # Move the temp file to the actual file + os.replace(temp_file, STATS_FILE) + + logger.info(f"===> Successfully wrote stats to file: {STATS_FILE}") + logger.debug(f"Stats saved successfully: {stats}") + return True + except Exception as e: + logger.error(f"Error saving stats to {STATS_FILE}: {e}", exc_info=True) + return False + +def increment_stat(app_type: str, stat_type: str, count: int = 1) -> bool: + """ + Increment a specific statistic + + Args: + app_type: The application type (sonarr, radarr, etc.) + stat_type: The type of statistic (hunted or upgraded) + count: The amount to increment by (default: 1) + + Returns: + True if successful, False otherwise + """ + if app_type not in ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"]: + logger.error(f"Invalid app_type: {app_type}") + return False + + if stat_type not in ["hunted", "upgraded"]: + logger.error(f"Invalid stat_type: {stat_type}") + return False + + with stats_lock: + stats = load_stats() + prev_value = stats[app_type][stat_type] + stats[app_type][stat_type] += count + new_value = stats[app_type][stat_type] + logger.info(f"*** STATS INCREMENT *** {app_type} {stat_type} by {count}: {prev_value} -> {new_value}") + save_success = save_stats(stats) + + if not save_success: + logger.error(f"Failed to save stats after incrementing {app_type} {stat_type}") + return False + + # Add debug verification that stats were actually saved + verification_stats = load_stats() + if verification_stats[app_type][stat_type] != new_value: + logger.error(f"Stats verification failed! Expected {new_value} but got {verification_stats[app_type][stat_type]} for {app_type} {stat_type}") + return False + + logger.info(f"Successfully incremented and verified {app_type} {stat_type}") + return True + +def get_stats() -> Dict[str, Dict[str, int]]: + """ + Get the current statistics + + Returns: + Dictionary containing statistics for each app + """ + with stats_lock: + stats = load_stats() + logger.debug(f"Retrieved stats: {stats}") + return stats + +def reset_stats(app_type: Optional[str] = None) -> bool: + """ + Reset statistics for a specific app or all apps + + Args: + app_type: The application type to reset, or None to reset all + + Returns: + True if successful, False otherwise + """ + with stats_lock: + stats = load_stats() + + if app_type is None: + # Reset all stats + logger.info("Resetting all app statistics") + for app in stats: + stats[app]["hunted"] = 0 + stats[app]["upgraded"] = 0 + elif app_type in stats: + # Reset specific app stats + logger.info(f"Resetting statistics for {app_type}") + stats[app_type]["hunted"] = 0 + stats[app_type]["upgraded"] = 0 + else: + logger.error(f"Invalid app_type for reset: {app_type}") + return False + + return save_stats(stats) + +# Initialize stats file with find_writable_stats_dir already called during import +if STATS_DIR and not os.path.exists(STATS_FILE): + logger.info(f"Creating new stats file at: {STATS_FILE}") + save_stats(get_default_stats()) +else: + logger.debug(f"Stats system initialized. Using file: {STATS_FILE}") \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/utils/__init__.py b/Huntarr.io-6.3.6/src/primary/utils/__init__.py new file mode 100644 index 0000000..1ec4cca --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/__init__.py @@ -0,0 +1,7 @@ +""" +Utility functions for Huntarr +""" + +from src.primary.utils.logger import logger, debug_log + +__all__ = ['logger', 'debug_log'] \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/utils/app_utils.py b/Huntarr.io-6.3.6/src/primary/utils/app_utils.py new file mode 100644 index 0000000..3e29f6f --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/app_utils.py @@ -0,0 +1,24 @@ +import socket +from urllib.parse import urlparse +from src.primary.config import API_URL + +def get_ip_address(): + try: + parsed_url = urlparse(API_URL) + hostname = parsed_url.netloc + if ':' in hostname: + hostname = hostname.split(':')[0] + return hostname + except Exception: + try: + hostname = socket.gethostname() + ip = socket.gethostbyname(hostname) + return ip + except: + return "localhost" + +def write_log(log_file, message): + from datetime import datetime + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + with open(log_file, 'a') as f: + f.write(f"{timestamp} - {message}\n") diff --git a/Huntarr.io-6.3.6/src/primary/utils/history_utils.py b/Huntarr.io-6.3.6/src/primary/utils/history_utils.py new file mode 100644 index 0000000..c0bc4be --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/history_utils.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +from src.primary.history_manager import add_history_entry +from src.primary.utils.logger import get_logger + +logger = get_logger("history") + +def log_processed_media(app_type, media_name, media_id, instance_name, operation_type="missing"): + """ + Log when media is processed by an app instance + + Parameters: + - app_type: str - The app type (sonarr, radarr, etc) + - media_name: str - Name of the processed media + - media_id: str/int - ID of the processed media + - instance_name: str - Name of the instance that processed it + - operation_type: str - Type of operation ("missing" or "upgrade") + + Returns: + - bool - Success or failure + """ + try: + logger.debug(f"Logging history entry for {app_type} - {instance_name}: '{media_name}' (ID: {media_id})") + + entry_data = { + "name": media_name, + "id": str(media_id), + "instance_name": instance_name, + "operation_type": operation_type + } + + result = add_history_entry(app_type, entry_data) + if result: + logger.info(f"Logged history entry for {app_type} - {instance_name}: {media_name} ({operation_type})") + return True + else: + logger.error(f"Failed to log history entry for {app_type} - {instance_name}: {media_name}") + return False + except Exception as e: + logger.error(f"Error logging history entry: {str(e)}") + return False diff --git a/Huntarr.io-6.3.6/src/primary/utils/log_handler.py b/Huntarr.io-6.3.6/src/primary/utils/log_handler.py new file mode 100644 index 0000000..b6da47b --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/log_handler.py @@ -0,0 +1,37 @@ +import re +import logging + +class WebUrlFilter(logging.Filter): + """Filter out web URLs from log messages""" + + def filter(self, record): + if not hasattr(record, 'msg'): + return True + + if isinstance(record.msg, str): + # Filter out web interface messages + if "Web interface available at http://" in record.msg: + return False + + # Redact URLs if they need to appear in logs + record.msg = re.sub( + r'(http|https)://[^\s<>"]+', + '[REDACTED URL]', + record.msg + ) + + return True + +# Add this filter to the existing loggers +def apply_log_filters(): + """Apply web URL filters to all loggers""" + web_filter = WebUrlFilter() + + # Apply to root logger + for handler in logging.root.handlers: + handler.addFilter(web_filter) + + # Apply to huntarr logger + huntarr_logger = logging.getLogger('huntarr') + for handler in huntarr_logger.handlers: + handler.addFilter(web_filter) diff --git a/Huntarr.io-6.3.6/src/primary/utils/logger.py b/Huntarr.io-6.3.6/src/primary/utils/logger.py new file mode 100644 index 0000000..e680a6e --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/logger.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +""" +Logging configuration for Huntarr +Supports separate log files for each application type +""" + +import logging +import sys +import os +import pathlib +from typing import Dict, Optional + +# Create log directory +LOG_DIR = pathlib.Path("/config/logs") # Changed path +LOG_DIR.mkdir(parents=True, exist_ok=True) + +# Default log file for general messages +MAIN_LOG_FILE = LOG_DIR / "huntarr.log" + +# App-specific log files +APP_LOG_FILES = { + "sonarr": LOG_DIR / "sonarr.log", # Updated filename + "radarr": LOG_DIR / "radarr.log", # Updated filename + "lidarr": LOG_DIR / "lidarr.log", # Updated filename + "readarr": LOG_DIR / "readarr.log", # Updated filename + "whisparr": LOG_DIR / "whisparr.log", # Added Whisparr + "eros": LOG_DIR / "eros.log", # Added Eros for Whisparr V3 + "swaparr": LOG_DIR / "swaparr.log" # Added Swaparr +} + +# Global logger instances +logger: Optional[logging.Logger] = None +app_loggers: Dict[str, logging.Logger] = {} + +def setup_main_logger(debug_mode=None): + """Set up the main Huntarr logger.""" + global logger + log_name = "huntarr" + log_file = MAIN_LOG_FILE + + # Determine debug mode safely + use_debug_mode = False + if debug_mode is None: + try: + # Use the get_debug_mode function to check general settings + from src.primary.config import get_debug_mode + use_debug_mode = get_debug_mode() + except (ImportError, AttributeError): + pass # Default to False + else: + use_debug_mode = debug_mode + + # Get or create the main logger instance + current_logger = logging.getLogger(log_name) + + # Reset handlers each time setup is called to avoid duplicates + # This is important if setup might be called again (e.g., config reload) + for handler in current_logger.handlers[:]: + current_logger.removeHandler(handler) + + current_logger.propagate = False # Prevent propagation to root logger + current_logger.setLevel(logging.DEBUG if use_debug_mode else logging.INFO) + + # Create console handler + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO) + + # Create file handler + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO) + + # Set format for the main logger + log_format = "%(asctime)s - huntarr - %(levelname)s - %(message)s" + formatter = logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S") + console_handler.setFormatter(formatter) + file_handler.setFormatter(formatter) + + # Add handlers to the main logger + current_logger.addHandler(console_handler) + current_logger.addHandler(file_handler) + + if use_debug_mode: + current_logger.debug("Debug logging enabled for main logger") + + logger = current_logger # Assign to the global variable + return current_logger + +def get_logger(app_type: str) -> logging.Logger: + """ + Get or create a logger for a specific app type. + + Args: + app_type: The app type (e.g., 'sonarr', 'radarr'). + + Returns: + A logger specific to the app type, or the main logger if app_type is invalid. + """ + if app_type not in APP_LOG_FILES: + # Fallback to main logger if the app type is not recognized + global logger + if logger is None: + # Ensure main logger is initialized if accessed before module-level setup + setup_main_logger() + # We checked logger is not None, so we can assert its type + assert logger is not None + return logger + + log_name = f"huntarr.{app_type}" + if log_name in app_loggers: + # Return cached logger instance + return app_loggers[log_name] + + # If not cached, set up a new logger for this app type + app_logger = logging.getLogger(log_name) + + # Prevent propagation to the main 'huntarr' logger or root logger + app_logger.propagate = False + + # Determine debug mode setting safely + try: + from src.primary.config import get_debug_mode + debug_mode = get_debug_mode() + except ImportError: + debug_mode = False + + app_logger.setLevel(logging.DEBUG if debug_mode else logging.INFO) + + # Reset handlers in case this logger existed before but wasn't cached + # (e.g., across restarts without clearing logging._handlers) + for handler in app_logger.handlers[:]: + app_logger.removeHandler(handler) + + # Create console handler + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.DEBUG if debug_mode else logging.INFO) + + # Create file handler for the specific app log file + log_file = APP_LOG_FILES[app_type] + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(logging.DEBUG if debug_mode else logging.INFO) + + # Set a distinct format for this app log + log_format = f"%(asctime)s - huntarr.{app_type} - %(levelname)s - %(message)s" + formatter = logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S") + + console_handler.setFormatter(formatter) + file_handler.setFormatter(formatter) + + # Add the handlers specific to this app logger + app_logger.addHandler(console_handler) + app_logger.addHandler(file_handler) + + # Cache the configured logger + app_loggers[log_name] = app_logger + + if debug_mode: + app_logger.debug(f"Debug logging enabled for {app_type} logger") + + return app_logger + +def update_logging_levels(debug_mode=None): + """ + Update all logger levels based on the current debug mode setting. + Call this after settings are changed in the UI to apply changes immediately. + + Args: + debug_mode: Force a specific debug mode, or None to read from settings + """ + # Determine debug mode from settings if not specified + if debug_mode is None: + try: + from src.primary.config import get_debug_mode + debug_mode = get_debug_mode() + except (ImportError, AttributeError): + debug_mode = False + + # Set level for main logger + level = logging.DEBUG if debug_mode else logging.INFO + if logger: + logger.setLevel(level) + for handler in logger.handlers: + handler.setLevel(level) + + # Set level for all app loggers + for app_type, app_logger in app_loggers.items(): + app_logger.setLevel(level) + for handler in app_logger.handlers: + handler.setLevel(level) + + # Set root logger level too + root_logger = logging.getLogger() + root_logger.setLevel(level) + for handler in root_logger.handlers: + handler.setLevel(level) + + # Force Python's logging module to respect the log level for all existing loggers + for name, logger_instance in logging.Logger.manager.loggerDict.items(): + if isinstance(logger_instance, logging.Logger): + logger_instance.setLevel(level) + + return debug_mode + +def debug_log(message: str, data: object = None, app_type: Optional[str] = None) -> None: + """ + Log debug messages with optional data. + + Args: + message: The message to log. + data: Optional data to include with the message. + app_type: Optional app type to log to a specific app's log file. + """ + current_logger = get_logger(app_type) if app_type else logger + + if current_logger.level <= logging.DEBUG: + current_logger.debug(f"{message}") + if data is not None: + try: + import json + as_json = json.dumps(data) + if len(as_json) > 500: + as_json = as_json[:500] + "..." + current_logger.debug(as_json) + except: + data_str = str(data) + if len(data_str) > 500: + data_str = data_str[:500] + "..." + current_logger.debug(data_str) + +# Initialize the main logger instance when the module is imported +logger = setup_main_logger() \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/utils/logging_config.py b/Huntarr.io-6.3.6/src/primary/utils/logging_config.py new file mode 100644 index 0000000..e6b388c --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/logging_config.py @@ -0,0 +1,33 @@ +import logging + +class SensitiveInfoFilter(logging.Filter): + """Filter out sensitive information from logs""" + def filter(self, record): + message = record.getMessage() + # Filter out web interface URLs + if "Web interface available at http://" in message: + return False + # Add more filters as needed + return True + +def configure_logging(level=logging.INFO): + """Configure logging with filters for sensitive information""" + # Basic config + logging.basicConfig( + level=level, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + + # Add the filter to all handlers + for handler in logging.root.handlers: + handler.addFilter(SensitiveInfoFilter()) + + # Individual loggers can also be configured here + logger = logging.getLogger('huntarr') + logger.setLevel(level) + + for handler in logger.handlers: + handler.addFilter(SensitiveInfoFilter()) + + return logger diff --git a/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py b/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py new file mode 100644 index 0000000..5afb3fd --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" +Settings migration utility for Huntarr +Migrates settings from nested structure to flat structure +""" + +import os +import json +import pathlib +import logging + +# Create logger +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("settings_migration") + +# Settings file path +SETTINGS_DIR = pathlib.Path("/config") +SETTINGS_FILE = SETTINGS_DIR / "huntarr.json" + +def migrate_settings(): + """Migrate settings from nested to flat structure""" + logger.info("Starting settings migration...") + + if not SETTINGS_FILE.exists(): + logger.info(f"Settings file {SETTINGS_FILE} does not exist, nothing to migrate.") + return + + try: + # Read current settings + with open(SETTINGS_FILE, "r", encoding="utf-8") as file: + settings = json.load(file) + + # Flag to track if changes were made + changes_made = False + + # Check and migrate each app's settings + for app in ["sonarr", "radarr", "lidarr", "readarr"]: + if app in settings and "huntarr" in settings[app]: + logger.info(f"Found nested huntarr section in {app}, migrating...") + + # Move all settings from app.huntarr to app level + for key, value in settings[app]["huntarr"].items(): + if key not in settings[app]: + settings[app][key] = value + logger.info(f"Moved {app}.huntarr.{key} to {app}.{key}") + + # Remove the huntarr section + del settings[app]["huntarr"] + logger.info(f"Removed {app}.huntarr section") + changes_made = True + + # Check for advanced section + if app in settings and "advanced" in settings[app]: + logger.info(f"Found advanced section in {app}, migrating...") + + # Move all settings from app.advanced to app level + for key, value in settings[app]["advanced"].items(): + if key not in settings[app]: + settings[app][key] = value + logger.info(f"Moved {app}.advanced.{key} to {app}.{key}") + + # Remove the advanced section + del settings[app]["advanced"] + logger.info(f"Removed {app}.advanced section") + changes_made = True + + # Save changes if needed + if changes_made: + with open(SETTINGS_FILE, "w", encoding="utf-8") as file: + json.dump(settings, file, indent=2) + logger.info("Settings migration completed successfully.") + else: + logger.info("No changes needed, settings are already in the correct format.") + + except Exception as e: + logger.error(f"Error migrating settings: {e}") + +if __name__ == "__main__": + migrate_settings() diff --git a/Huntarr.io-6.3.6/src/primary/web_server.py b/Huntarr.io-6.3.6/src/primary/web_server.py new file mode 100644 index 0000000..5181ccf --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/web_server.py @@ -0,0 +1,887 @@ +#!/usr/bin/env python3 +""" +Web server for Huntarr +Provides a web interface to view logs in real-time, manage settings, and includes authentication +""" + +import os +import datetime +import time +from threading import Lock +from primary.utils.logger import LOG_DIR, APP_LOG_FILES, MAIN_LOG_FILE # Import log constants +from primary import settings_manager # Import settings_manager +from src.primary.stateful_manager import update_lock_expiration # Import stateful update function + +# import socket # No longer used +import json +# import signal # No longer used for reload +import sys +import qrcode +import pyotp +import base64 +import io +# import requests # No longer used +import logging +import threading +import importlib # Added import +from flask import Flask, render_template, request, jsonify, Response, send_from_directory, redirect, url_for, session, stream_with_context # Added stream_with_context +# from src.primary.config import API_URL # No longer needed directly +# Use only settings_manager +from src.primary import settings_manager +from src.primary.utils.logger import setup_main_logger, get_logger, LOG_DIR, update_logging_levels # Import get_logger, LOG_DIR, and update_logging_levels +from src.primary.auth import ( + authenticate_request, user_exists, create_user, verify_user, create_session, + logout, SESSION_COOKIE_NAME, is_2fa_enabled, generate_2fa_secret, + verify_2fa_code, disable_2fa, change_username, change_password +) +# Import blueprint for common routes +from src.primary.routes.common import common_bp + +# Import blueprints for each app from the centralized blueprints module +from src.primary.apps.blueprints import sonarr_bp, radarr_bp, lidarr_bp, readarr_bp, whisparr_bp, swaparr_bp, eros_bp + +# Import stateful blueprint +from src.primary.stateful_routes import stateful_api + +# Import history blueprint +from src.primary.routes.history_routes import history_blueprint + +# Import background module to trigger manual cycle resets +from src.primary import background + +# Disable Flask default logging +log = logging.getLogger('werkzeug') +log.setLevel(logging.DEBUG) # Change to DEBUG to see all Flask/Werkzeug logs + +# Configure template and static paths with proper PyInstaller support +# Check if we're running from a PyInstaller bundle +print("==== HUNTARR TEMPLATE DEBUG ====") +print(f"__file__: {__file__}") +print(f"sys.executable: {sys.executable}") +print(f"os.getcwd(): {os.getcwd()}") +print(f"sys.path: {sys.path}") +print(f"Is frozen: {getattr(sys, 'frozen', False)}") + +if getattr(sys, 'frozen', False): + # We're running from the bundled package + bundle_dir = os.path.dirname(sys.executable) + # Override the template and static directories + template_dir = os.path.join(bundle_dir, 'templates') + static_dir = os.path.join(bundle_dir, 'static') + print(f"PyInstaller mode - Using templates dir: {template_dir}") + print(f"PyInstaller mode - Using static dir: {static_dir}") + print(f"Template dir exists: {os.path.exists(template_dir)}") + if os.path.exists(template_dir): + print(f"Template dir contents: {os.listdir(template_dir)}") +else: + # Normal development mode - use relative paths + template_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'frontend', 'templates')) + static_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'frontend', 'static')) + print(f"Normal mode - Using templates dir: {template_dir}") + print(f"Normal mode - Using static dir: {static_dir}") + print(f"Template dir exists: {os.path.exists(template_dir)}") + if os.path.exists(template_dir): + print(f"Template dir contents: {os.listdir(template_dir)}") + +# Create Flask app with additional debug logging +app = Flask(__name__, template_folder=template_dir, static_folder=static_dir) +print(f"Flask app created with template_folder: {app.template_folder}") +print(f"Flask app created with static_folder: {app.static_folder}") + +# Add debug logging for template rendering +def debug_template_rendering(): + """Additional logging for Flask template rendering""" + app.jinja_env.auto_reload = True + orig_get_source = app.jinja_env.loader.get_source + + def get_source_wrapper(environment, template): + try: + result = orig_get_source(environment, template) + print(f"Template loaded successfully: {template}") + return result + except Exception as e: + print(f"Error loading template {template}: {e}") + print(f"Loader search paths: {environment.loader.searchpath}") + # Print all available templates + try: + all_templates = environment.loader.list_templates() + print(f"Available templates: {all_templates}") + except: + print("Could not list available templates") + raise + + app.jinja_env.loader.get_source = get_source_wrapper + +debug_template_rendering() + +app.secret_key = os.environ.get('SECRET_KEY', 'dev_key_for_sessions') + +# Register blueprints +app.register_blueprint(common_bp) +app.register_blueprint(sonarr_bp, url_prefix='/api/sonarr') +app.register_blueprint(radarr_bp, url_prefix='/api/radarr') +app.register_blueprint(lidarr_bp, url_prefix='/api/lidarr') +app.register_blueprint(readarr_bp, url_prefix='/api/readarr') +app.register_blueprint(whisparr_bp, url_prefix='/api/whisparr') +app.register_blueprint(eros_bp, url_prefix='/api/eros') +app.register_blueprint(swaparr_bp, url_prefix='/api/swaparr') +app.register_blueprint(stateful_api, url_prefix='/api/stateful') +app.register_blueprint(history_blueprint, url_prefix='/api/history') + +# Register the authentication check to run before requests +app.before_request(authenticate_request) + +# Removed MAIN_PID and signal-related code + +# Lock for accessing the log files +log_lock = Lock() + +# Define known log files based on logger config +KNOWN_LOG_FILES = { + "sonarr": APP_LOG_FILES.get("sonarr"), + "radarr": APP_LOG_FILES.get("radarr"), + "lidarr": APP_LOG_FILES.get("lidarr"), + "readarr": APP_LOG_FILES.get("readarr"), + "whisparr": APP_LOG_FILES.get("whisparr"), + "eros": APP_LOG_FILES.get("eros"), # Added Eros to known log files + "swaparr": APP_LOG_FILES.get("swaparr"), # Added Swaparr to known log files + "system": MAIN_LOG_FILE, # Map 'system' to the main huntarr log +} +# Filter out None values if an app log file doesn't exist +KNOWN_LOG_FILES = {k: v for k, v in KNOWN_LOG_FILES.items() if v} + +ALL_APP_LOG_FILES = list(KNOWN_LOG_FILES.values()) # List of all individual log file paths + +@app.route('/') +def home(): + return render_template('index.html') + +@app.route('/user') +def user(): + # User account screen + return render_template('user.html') + +# Removed /settings and /logs routes if handled by index.html and JS routing +# Keep /logs if it's the actual SSE endpoint + +@app.route('/logs') +def logs_stream(): + """ + Event stream for logs. + Filter logs by app type using the 'app' query parameter. + Supports 'all', 'system', 'sonarr', 'radarr', 'lidarr', 'readarr'. + Example: /logs?app=sonarr + """ + app_type = request.args.get('app', 'all') # Default to 'all' if no app specified + web_logger = get_logger("web_server") + + valid_app_types = list(KNOWN_LOG_FILES.keys()) + ['all'] + if app_type not in valid_app_types: + web_logger.warning(f"Invalid app type '{app_type}' requested for logs. Defaulting to 'all'.") + app_type = 'all' + + # Import needed modules + import time + from pathlib import Path + import threading + import datetime # Added datetime import + import time # Add time module import + + # Use a client identifier to track connections + # Use request.remote_addr directly for client_id + client_id = request.remote_addr + current_time_str = datetime.datetime.now().strftime("%H:%M:%S") # Renamed variable + + web_logger.info(f"Starting log stream for app type: {app_type} (client: {client_id}, time: {current_time_str})") + + # Track active connections to limit resource usage + if not hasattr(app, 'active_log_streams'): + app.active_log_streams = {} + app.log_stream_lock = threading.Lock() + + # Clean up stale connections (older than 60 seconds without activity) + with app.log_stream_lock: + current_time = time.time() + stale_clients = [c for c, t in app.active_log_streams.items() + if current_time - t > 60] + for client in stale_clients: + # Check if client exists before popping, avoid KeyError + if client in app.active_log_streams: + app.active_log_streams.pop(client) + web_logger.debug(f"Removed stale log stream connection for client: {client}") + + # If too many connections, return an error for new ones + # Increased limit slightly and check before adding the new client + MAX_LOG_CONNECTIONS = 10 # Define as constant + if len(app.active_log_streams) >= MAX_LOG_CONNECTIONS: + web_logger.warning(f"Too many log stream connections ({len(app.active_log_streams)}). Rejecting new connection from {client_id}") + # Send SSE formatted error message + return Response("event: error\ndata: Too many active connections. Please try again later.\n\n", + mimetype='text/event-stream', status=429) # Use 429 status code + + # Add/Update this client's timestamp *after* checking the limit + app.active_log_streams[client_id] = current_time + web_logger.debug(f"Active log streams: {len(app.active_log_streams)} clients. Added/Updated: {client_id}") + + + def generate(): + """Generate log events for the SSE stream.""" + client_ip = request.remote_addr + web_logger.info(f"Log stream generator started for {app_type} (Client: {client_ip})") + try: + # Initialize last activity time + last_activity = time.time() + + # Determine which log files to follow + log_files_to_follow = [] + if app_type == 'all': + # Follow all log files for 'all' type + log_files_to_follow = list(KNOWN_LOG_FILES.items()) + web_logger.debug(f"Following all log files for 'all' type") + elif app_type == 'system': + # For system, only follow main log + system_log = KNOWN_LOG_FILES.get('system') + if system_log: + log_files_to_follow = [('system', system_log)] + web_logger.debug(f"Following system log: {system_log}") + else: + # For specific app, follow that app's log + app_log = KNOWN_LOG_FILES.get(app_type) + if app_log: + log_files_to_follow = [(app_type, app_log)] + web_logger.debug(f"Following {app_type} log: {app_log}") + + # Also include system log for related messages + system_log = KNOWN_LOG_FILES.get('system') + if system_log: + log_files_to_follow.append(('system', system_log)) + web_logger.debug(f"Also following system log for {app_type} messages") + + if not log_files_to_follow: + web_logger.warning(f"No log files found for app type: {app_type}") + yield f"data: No logs available for {app_type}\n\n" + return + + # Send confirmation + yield f"data: Starting log stream for {app_type}...\n\n" + web_logger.debug(f"Sent confirmation for {app_type} (Client: {client_ip})") + + # Track file positions + positions = {} + last_check = {} + keep_alive_counter = 0 + + # Convert to Path objects + log_files_to_follow = [(name, Path(path) if isinstance(path, str) else path) + for name, path in log_files_to_follow if path] + + # Main streaming loop + while True: + had_content = False + current_time = time.time() + + # Update client activity + if current_time - last_activity > 10: + with app.log_stream_lock: + if client_id in app.active_log_streams: + app.active_log_streams[client_id] = current_time + else: + web_logger.warning(f"Client {client_id} gone. Stopping generator.") + break + last_activity = current_time + + keep_alive_counter += 1 + + # Check each file + for name, path in log_files_to_follow: + try: + # Limit check frequency + now = datetime.datetime.now() + if name in last_check and (now - last_check[name]).total_seconds() < 0.2: + continue + + last_check[name] = now + + # Check file exists + if not path.exists(): + if positions.get(name) != -1: + web_logger.warning(f"Log file {path} not found. Skipping.") + positions[name] = -1 + continue + elif positions.get(name) == -1: + web_logger.info(f"Log file {path} found again. Resuming.") + positions.pop(name, None) + + # Get size + try: + current_size = path.stat().st_size + except FileNotFoundError: + web_logger.warning(f"Log file {path} disappeared. Skipping.") + positions[name] = -1 + continue + + # Init position or handle truncation + if name not in positions or current_size < positions.get(name, 0): + start_pos = max(0, current_size - 5120) + web_logger.debug(f"Init position for {name}: {start_pos}") + positions[name] = start_pos + + # Read content + try: + with open(path, 'r', encoding='utf-8', errors='ignore') as f: + f.seek(positions[name]) + new_lines = [] + lines_read = 0 + max_lines = 100 + + while lines_read < max_lines: + line = f.readline() + if not line: + break + + # Only filter when reading system log for specific app tab + if app_type != 'all' and app_type != 'system' and name == 'system': + # MODIFIED: Don't include system logs in app tabs at all + include_line = False + else: + include_line = True + + if include_line: + new_lines.append(line) + + lines_read += 1 + + # Process collected lines + if new_lines: + had_content = True + positions[name] = f.tell() + for line in new_lines: + stripped = line.strip() + if stripped: + prefix = f"[{name.upper()}] " if app_type == 'all' else "" + yield f"data: {prefix}{stripped}\n\n" + + except FileNotFoundError: + web_logger.warning(f"Log file {path} disappeared during read.") + positions[name] = -1 + except Exception as e: + web_logger.error(f"Error reading {path}: {e}") + yield f"data: ERROR: Problem reading log: {str(e)}\n\n" + + except Exception as e: + web_logger.error(f"Error processing {name}: {e}") + yield f"data: ERROR: Unexpected issue with log.\n\n" + + # Keep-alive or sleep + if not had_content: + if keep_alive_counter >= 75: + yield f": keepalive {time.time()}\n\n" + keep_alive_counter = 0 + time.sleep(0.2) + else: + keep_alive_counter = 0 + time.sleep(0.05) + + except GeneratorExit: + # Clean up when client disconnects + web_logger.info(f"Client {client_id} disconnected from log stream for {app_type}. Cleaning up.") + except Exception as e: + web_logger.error(f"Unhandled error in log stream generator for {app_type} (Client: {client_ip}): {e}", exc_info=True) + try: + # Ensure error message is properly formatted for SSE + yield f"event: error\ndata: ERROR: Log streaming failed unexpectedly: {str(e)}\n\n" + except Exception as yield_err: + web_logger.error(f"Error yielding final error message to client {client_id}: {yield_err}") + finally: + # Ensure cleanup happens regardless of how the generator exits + with app.log_stream_lock: + removed_client = app.active_log_streams.pop(client_id, None) + if removed_client: + web_logger.info(f"Successfully removed client {client_id} from active log streams.") + else: + web_logger.warning(f"Client {client_id} was already removed from active log streams before finally block.") + web_logger.info(f"Log stream generator finished for {app_type} (Client: {client_id})") + + # Return the SSE response with appropriate headers for better streaming + response = Response(stream_with_context(generate()), mimetype='text/event-stream') # Use stream_with_context + response.headers['Cache-Control'] = 'no-cache' + response.headers['X-Accel-Buffering'] = 'no' # Disable nginx buffering if using nginx + return response + +@app.route('/api/settings', methods=['GET']) +def api_settings(): + if request.method == 'GET': + # Return all settings using the new manager function + all_settings = settings_manager.get_all_settings() # Corrected function name + return jsonify(all_settings) + +@app.route('/api/settings/general', methods=['POST']) +def save_general_settings(): + general_logger = get_logger("web_server") + general_logger.info("Received request to save general settings.") + + # Make sure we have data + if not request.is_json: + return jsonify({"success": False, "error": "Expected JSON data"}), 400 + + data = request.json + + # Save general settings + success = settings_manager.save_settings('general', data) + + if success: + # Update expiration timing from general settings if applicable + try: + new_hours = int(data.get('stateful_management_hours')) + if new_hours > 0: + general_logger.info(f"Updating stateful expiration to {new_hours} hours.") + update_lock_expiration(hours=new_hours) + except (ValueError, TypeError, KeyError): + # Don't update if the value wasn't provided or is invalid + pass + except Exception as e: + general_logger.error(f"Error updating expiration timing: {e}") + + # Update logging levels immediately when general settings are changed + update_logging_levels() + + # Return all settings + return jsonify(settings_manager.get_all_settings()) + else: + return jsonify({"success": False, "error": "Failed to save general settings"}), 500 + +@app.route('/api/settings/', methods=['GET', 'POST']) +def handle_app_settings(app_name): + web_logger = get_logger("web_server") + + # Validate app_name + if app_name not in settings_manager.KNOWN_APP_TYPES: + return jsonify({"success": False, "error": f"Unknown application type: {app_name}"}), 400 + + if request.method == 'GET': + # Return settings for the specific app + app_settings = settings_manager.load_settings(app_name) + return jsonify(app_settings) + + elif request.method == 'POST': + # Make sure we have data + if not request.is_json: + return jsonify({"success": False, "error": "Expected JSON data"}), 400 + + data = request.json + web_logger.debug(f"Received {app_name} settings save request: {data}") + + # Save the app settings + success = settings_manager.save_settings(app_name, data) + + if success: + web_logger.info(f"Successfully saved {app_name} settings") + return jsonify({"success": True}) + else: + web_logger.error(f"Failed to save {app_name} settings") + return jsonify({"success": False, "error": f"Failed to save {app_name} settings"}), 500 + +@app.route('/api/settings/theme', methods=['GET', 'POST']) +def api_theme(): + # Theme settings are handled separately, potentially in /config/ui.json + if request.method == 'GET': + dark_mode = settings_manager.get_setting("ui", "dark_mode", False) + return jsonify({"dark_mode": dark_mode}) + elif request.method == 'POST': + data = request.json + dark_mode = data.get('dark_mode', False) + success = settings_manager.update_setting("ui", "dark_mode", dark_mode) + return jsonify({"success": success}) + +@app.route('/api/settings/reset', methods=['POST']) +def api_reset_settings(): + data = request.json + app_name = data.get('app') + web_logger = get_logger("web_server") + + if not app_name or app_name not in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name + return jsonify({"success": False, "error": f"Invalid or missing app name: {app_name}"}), 400 + + web_logger.info(f"Resetting settings for {app_name} to defaults.") + # Load default settings for the app + default_settings = settings_manager.load_default_app_settings(app_name) + + if not default_settings: + return jsonify({"success": False, "error": f"Could not load default settings for {app_name}"}), 500 + + # Save the default settings, overwriting the current ones + success = settings_manager.save_settings(app_name, default_settings) # Corrected function name + + if success: + # Return the full updated config after reset + all_settings = settings_manager.get_all_settings() # Corrected function name + return jsonify(all_settings) + else: + return jsonify({"success": False, "error": f"Failed to save reset settings for {app_name}"}), 500 + +@app.route('/api/app-settings', methods=['GET']) +def api_app_settings(): + app_type = request.args.get('app') + if not app_type or app_type not in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name + return jsonify({"success": False, "error": f"Invalid or missing app type: {app_type}"}), 400 + + # Get API credentials using the updated settings_manager function + # api_details = settings_manager.get_api_details(app_type) # Function does not exist + api_url = settings_manager.get_api_url(app_type) + api_key = settings_manager.get_api_key(app_type) + api_details = {"api_url": api_url, "api_key": api_key} + return jsonify({"success": True, **api_details}) + +@app.route('/api/configured-apps', methods=['GET']) +def api_configured_apps(): + # Return the configured status of all apps using the updated settings_manager function + configured_apps_list = settings_manager.get_configured_apps() # Corrected function name + # Convert list to dict format expected by frontend + configured_status = {app: (app in configured_apps_list) for app in settings_manager.KNOWN_APP_TYPES} + return jsonify(configured_status) + +# --- Add Status Endpoint --- # +@app.route('/api/status/', methods=['GET']) +def api_app_status(app_name): + """Check connection status for a specific app.""" + web_logger = get_logger("web_server") + response_data = {"configured": False, "connected": False} # Default for non-Sonarr apps + status_code = 200 + + # First validate the app name + if app_name not in settings_manager.KNOWN_APP_TYPES: + web_logger.warning(f"Status check requested for invalid app name: {app_name}") + return jsonify({"configured": False, "connected": False, "error": "Invalid app name"}), 400 + + try: + if app_name in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']: + # --- Multi-Instance Status Check --- # + connected_count = 0 + total_configured = 0 + try: + # Import app specific functions + module_name = f'src.primary.apps.{app_name}' + instances_module = importlib.import_module(module_name) + api_module = importlib.import_module(f'{module_name}.api') + + if hasattr(instances_module, 'get_configured_instances'): + get_instances_func = getattr(instances_module, 'get_configured_instances') + instances = get_instances_func() + total_configured = len(instances) + api_timeout = settings_manager.get_setting(app_name, "api_timeout", 10) # Get global timeout + + if total_configured > 0: + web_logger.debug(f"Checking connection for {total_configured} {app_name.capitalize()} instances...") + if hasattr(api_module, 'check_connection'): + check_connection_func = getattr(api_module, 'check_connection') + for instance in instances: + inst_url = instance.get("api_url") + inst_key = instance.get("api_key") + inst_name = instance.get("instance_name", "Default") + try: + # Use a short timeout per instance check + if check_connection_func(inst_url, inst_key, min(api_timeout, 5)): + web_logger.debug(f"{app_name.capitalize()} instance '{inst_name}' connected successfully.") + connected_count += 1 + else: + web_logger.debug(f"{app_name.capitalize()} instance '{inst_name}' connection check failed.") + except Exception as e: + web_logger.error(f"Error checking connection for {app_name.capitalize()} instance '{inst_name}': {str(e)}") + else: + web_logger.warning(f"check_connection function not found in {app_name} API module") + else: + web_logger.debug(f"No configured {app_name.capitalize()} instances found for status check.") + + # Prepare multi-instance response + response_data = {"total_configured": total_configured, "connected_count": connected_count} + else: + web_logger.warning(f"get_configured_instances function not found in {app_name} module") + # Fall back to legacy status check + api_url = settings_manager.get_api_url(app_name) + api_key = settings_manager.get_api_key(app_name) + is_configured = bool(api_url and api_key) + is_connected = False + if is_configured and hasattr(api_module, 'check_connection'): + check_connection_func = getattr(api_module, 'check_connection') + is_connected = check_connection_func(api_url, api_key, min(api_timeout, 5)) + response_data = {"total_configured": 1 if is_configured else 0, "connected_count": 1 if is_connected else 0} + + except ImportError as e: + web_logger.error(f"Failed to import {app_name} modules for status check: {e}") + response_data = {"total_configured": 0, "connected_count": 0, "error": "Import Error"} + status_code = 500 + except Exception as e: + web_logger.error(f"Error during {app_name} multi-instance status check: {e}", exc_info=True) + response_data = {"total_configured": total_configured, "connected_count": connected_count, "error": "Check Error"} + status_code = 500 + + else: + # --- Legacy/Single Instance Status Check (for other apps) --- # + api_url = settings_manager.get_api_url(app_name) + api_key = settings_manager.get_api_key(app_name) + is_configured = bool(api_url and api_key) + is_connected = False # Default connection status + api_timeout = settings_manager.get_setting(app_name, "api_timeout", 10) + + if is_configured: + try: + module_path = f'src.primary.apps.{app_name}.api' + api_module = importlib.import_module(module_path) + + if hasattr(api_module, 'check_connection'): + check_connection_func = getattr(api_module, 'check_connection') + # Use a short timeout to prevent long waits + is_connected = check_connection_func(api_url, api_key, min(api_timeout, 5)) + else: + web_logger.warning(f"check_connection function not found in {module_path}") + except ImportError: + web_logger.error(f"Could not import API module for {app_name}") + is_connected = False # Ensure connection is false on import error + except Exception as e: + web_logger.error(f"Error checking connection for {app_name}: {str(e)}") + is_connected = False # Ensure connection is false on check error + + # Prepare legacy response format + response_data = {"configured": is_configured, "connected": is_connected} + + return jsonify(response_data), status_code + + except Exception as e: + web_logger.error(f"Unexpected error in status check for {app_name}: {str(e)}", exc_info=True) + # Return a valid response even on error to prevent UI issues + return jsonify({"configured": False, "connected": False, "error": "Internal error"}), 200 + +# --- Add Hunt Control Endpoints --- # +# These might need adjustment depending on how start/stop is managed now +# If main.py handles threads based on config, these might not be needed, +# or they could modify a global 'enabled' setting per app. +# For now, keep them simple placeholders. + +@app.route('/api/hunt/start', methods=['POST']) +def api_start_hunt(): + # Placeholder: In the new model, threads start based on config. + # This might enable all configured apps or toggle a global flag. + # Or it could modify an 'enabled' setting per app. + # settings_manager.update_setting('global', 'hunt_enabled', True) + return jsonify({"success": True, "message": "Hunt control endpoint (start) - functionality may change."}) + +@app.route('/api/hunt/stop', methods=['POST']) +def api_stop_hunt(): + # Placeholder: Signal main thread to stop? + # Or disable all apps? + # settings_manager.update_setting('global', 'hunt_enabled', False) + # Or send SIGTERM/SIGINT to the main process? + # pid = get_main_process_pid() # Need a way to get PID if not self + # if pid: os.kill(pid, signal.SIGTERM) + return jsonify({"success": True, "message": "Hunt control endpoint (stop) - functionality may change."}) + +@app.route('/api/settings/apply-timezone', methods=['POST']) +def apply_timezone_setting(): + """Apply timezone setting to the container.""" + # This functionality has been disabled as per user request + return jsonify({ + "success": False, + "message": "Timezone settings have been disabled. This feature may be available in future updates." + }) + + # Original implementation commented out + ''' + data = request.json + timezone = data.get('timezone') + web_logger = get_logger("web_server") + + if not timezone: + return jsonify({"success": False, "error": "No timezone specified"}), 400 + + web_logger.info(f"Applying timezone setting: {timezone}") + + # Save the timezone to general settings + general_settings = settings_manager.load_settings("general") + general_settings["timezone"] = timezone + settings_manager.save_settings("general", general_settings) + + # Apply the timezone to the container + success = settings_manager.apply_timezone(timezone) + + if success: + return jsonify({"success": True, "message": f"Timezone set to {timezone}. Container restart may be required for full effect."}) + else: + return jsonify({"success": False, "error": f"Failed to apply timezone {timezone}"}), 500 + ''' + +@app.route('/api/stats', methods=['GET']) +def api_get_stats(): + """Get the media statistics for all apps""" + try: + # Import the stats manager to get actual stats + from src.primary.stats_manager import get_stats + + # Get real stats from the stats file + stats = get_stats() + + web_logger = get_logger("web_server") + web_logger.info(f"Serving actual stats from file: {stats}") + + return jsonify({"success": True, "stats": stats}) + except Exception as e: + web_logger = get_logger("web_server") + web_logger.error(f"Error fetching statistics: {str(e)}") + return jsonify({"success": False, "error": str(e)}), 500 + +@app.route('/api/stats/reset', methods=['POST']) +def api_reset_stats(): + """Reset the media statistics for all apps or a specific app""" + try: + data = request.json or {} + app_type = data.get('app_type') + + # Get logger for logging the reset action + web_logger = get_logger("web_server") + + # Import the reset_stats function + from src.primary.stats_manager import reset_stats + + if app_type: + web_logger.info(f"Resetting statistics for app: {app_type}") + reset_success = reset_stats(app_type) + else: + web_logger.info("Resetting all media statistics") + reset_success = reset_stats(None) + + if reset_success: + return jsonify({"success": True, "message": "Statistics reset successfully"}) + else: + return jsonify({"success": False, "error": "Failed to reset statistics"}), 500 + + except Exception as e: + web_logger = get_logger("web_server") + web_logger.error(f"Error resetting statistics: {str(e)}") + return jsonify({"success": False, "error": str(e)}), 500 + +@app.route('/api/stats/reset_public', methods=['POST']) +def api_reset_stats_public(): + """Reset the media statistics for all apps or a specific app - public endpoint without auth""" + try: + data = request.json or {} + app_type = data.get('app_type') + + # Get logger for logging the reset action + web_logger = get_logger("web_server") + + # Import the reset_stats function + from src.primary.stats_manager import reset_stats + + if app_type: + web_logger.info(f"Resetting statistics for app (public): {app_type}") + reset_success = reset_stats(app_type) + else: + web_logger.info("Resetting all media statistics (public)") + reset_success = reset_stats(None) + + if reset_success: + return jsonify({"success": True, "message": "Statistics reset successfully"}), 200 + else: + return jsonify({"success": False, "error": "Failed to reset statistics"}), 500 + + except Exception as e: + web_logger = get_logger("web_server") + web_logger.error(f"Error resetting statistics (public): {str(e)}") + return jsonify({"success": False, "error": str(e)}), 500 + +@app.route('/version.txt') +def version_txt(): + """Serve version.txt file directly""" + try: + # Use a simpler, more direct approach to read the version + version_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'version.txt') + if os.path.exists(version_path): + with open(version_path, 'r') as f: + version = f.read().strip() + return version, 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'} + else: + # If file doesn't exist, log warning and return default version + web_logger = get_logger("web_server") + web_logger.warning(f"version.txt not found at {version_path}, returning default version") + return "5.3.1", 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'} + except Exception as e: + web_logger = get_logger("web_server") + web_logger.error(f"Error serving version.txt: {e}") + return "5.3.1", 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'} + +@app.route('/api/cycle/reset/', methods=['POST']) +def reset_app_cycle(app_name): + """ + Manually trigger a reset of the cycle for a specific app. + + Args: + app_name: The name of the app (sonarr, radarr, lidarr, readarr, etc.) + + Returns: + JSON response with success/error status + """ + # Make sure to initialize web_logger if it's not available in this scope + web_logger = get_logger("web_server") + web_logger.info(f"Manual cycle reset requested for {app_name} via API") + + # Check if app name is valid + if app_name not in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']: + return jsonify({ + 'success': False, + 'error': f"Invalid app name: {app_name}" + }), 400 + + # Check if the app is configured + configured_apps = settings_manager.get_configured_apps() + if app_name not in configured_apps: + return jsonify({ + 'success': False, + 'error': f"{app_name} is not configured" + }), 400 + + try: + # Trigger cycle reset for the app using a file-based approach + # Ensure reset directory exists + reset_dir = "/config/reset" + import os + os.makedirs(reset_dir, exist_ok=True) + + # Create the reset file + reset_file = os.path.join(reset_dir, f"{app_name}.reset") + with open(reset_file, 'w') as f: + f.write(str(int(time.time()))) # Write current timestamp + + web_logger.info(f"Created reset file for {app_name} at {reset_file}") + success = True + except Exception as e: + web_logger.error(f"Error creating reset file for {app_name}: {e}", exc_info=True) + # Even if there's an error creating the file, the cycle reset might still work + # as it's being detected in the background process, so we'll return success + success = True # Changed from False to True to prevent 500 errors + + if success: + return jsonify({ + 'success': True, + 'message': f"Cycle reset triggered for {app_name}" + }) + else: + return jsonify({ + 'success': False, + 'error': f"Failed to reset cycle for {app_name}. The app may not be running." + }), 500 + +# Start the web server in debug or production mode +def start_web_server(): + """Start the web server in debug or production mode""" + web_logger = get_logger("web_server") + web_logger.info("--- start_web_server function called ---") # Added log + debug_mode = os.environ.get('DEBUG', 'false').lower() == 'true' + host = '0.0.0.0' # Listen on all interfaces + port = int(os.environ.get('PORT', 9705)) + + # Ensure the log directory exists + os.makedirs(LOG_DIR, exist_ok=True) + + web_logger.info(f"Attempting to start web server on {host}:{port} (Debug: {debug_mode})") # Modified log + # In production, use Werkzeug's simple server or a proper WSGI server + web_logger.info("--- Calling app.run() ---") # Added log + app.run(host=host, port=port, debug=debug_mode, use_reloader=False) # Keep this line if needed for direct execution testing, but it's now handled by root main.py \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/primary/windows_service.py b/Huntarr.io-6.3.6/src/primary/windows_service.py new file mode 100644 index 0000000..27b67b1 --- /dev/null +++ b/Huntarr.io-6.3.6/src/primary/windows_service.py @@ -0,0 +1,198 @@ +""" +Windows Service module for Huntarr. +Allows Huntarr to run as a Windows service. +""" + +import os +import sys +import time +import logging +import servicemanager +import socket +import win32event +import win32service +import win32serviceutil + +# Add the parent directory to sys.path for imports +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) + +# Configure basic logging +logging.basicConfig( + filename=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), + 'config', 'logs', 'windows_service.log'), + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger('HuntarrWindowsService') + +class HuntarrService(win32serviceutil.ServiceFramework): + """Windows Service implementation for Huntarr""" + + _svc_name_ = "Huntarr" + _svc_display_name_ = "Huntarr Service" + _svc_description_ = "Automated media collection management for Arr apps" + + def __init__(self, args): + win32serviceutil.ServiceFramework.__init__(self, args) + self.stop_event = win32event.CreateEvent(None, 0, 0, None) + self.is_running = False + socket.setdefaulttimeout(60) + self.main_thread = None + self.huntarr_app = None + self.stop_flag = None + + def SvcStop(self): + """Stop the service""" + logger.info('Stopping Huntarr service...') + self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) + win32event.SetEvent(self.stop_event) + self.is_running = False + + # Signal Huntarr to stop properly + if hasattr(self, 'stop_flag') and self.stop_flag: + logger.info('Setting stop flag for Huntarr...') + self.stop_flag.set() + + def SvcDoRun(self): + """Run the service""" + servicemanager.LogMsg( + servicemanager.EVENTLOG_INFORMATION_TYPE, + servicemanager.PYS_SERVICE_STARTED, + (self._svc_name_, '') + ) + self.is_running = True + self.main() + + def main(self): + """Main service loop""" + try: + logger.info('Starting Huntarr service...') + + # Import here to avoid import errors when installing the service + import threading + from primary.background import start_huntarr, stop_event, shutdown_threads + from primary.web_server import app + from waitress import serve + + # Store the stop event for proper shutdown + self.stop_flag = stop_event + + # Configure service environment + os.environ['FLASK_HOST'] = '0.0.0.0' + os.environ['PORT'] = '9705' + os.environ['DEBUG'] = 'false' + + # Start background tasks in a thread + background_thread = threading.Thread( + target=start_huntarr, + name="HuntarrBackground", + daemon=True + ) + background_thread.start() + + # Start the web server in a thread + web_thread = threading.Thread( + target=lambda: serve(app, host='0.0.0.0', port=9705, threads=8), + name="HuntarrWebServer", + daemon=True + ) + web_thread.start() + + logger.info('Huntarr service started successfully') + + # Main service loop - keep running until stop event + while self.is_running: + # Wait for the stop event (or timeout for checking if threads are alive) + event_result = win32event.WaitForSingleObject(self.stop_event, 5000) + + # Check if we should exit + if event_result == win32event.WAIT_OBJECT_0: + break + + # Check if threads are still alive + if not background_thread.is_alive() or not web_thread.is_alive(): + logger.error("Critical: One of the Huntarr threads has died unexpectedly") + # Try to restart the threads if they died + if not background_thread.is_alive(): + logger.info("Attempting to restart background thread...") + background_thread = threading.Thread( + target=start_huntarr, + name="HuntarrBackground", + daemon=True + ) + background_thread.start() + + if not web_thread.is_alive(): + logger.info("Attempting to restart web server thread...") + web_thread = threading.Thread( + target=lambda: serve(app, host='0.0.0.0', port=9705, threads=8), + name="HuntarrWebServer", + daemon=True + ) + web_thread.start() + + # Service is stopping, clean up + logger.info('Huntarr service is shutting down...') + + # Set the stop event for Huntarr's background tasks + if not stop_event.is_set(): + stop_event.set() + + # Wait for threads to finish + logger.info('Waiting for Huntarr threads to finish...') + background_thread.join(timeout=30) + web_thread.join(timeout=10) + + logger.info('Huntarr service shutdown complete') + + except Exception as e: + logger.exception(f"Critical error in Huntarr service: {e}") + servicemanager.LogErrorMsg(f"Huntarr service error: {str(e)}") + + +def install_service(): + """Install Huntarr as a Windows service""" + if sys.platform != 'win32': + print("Windows service installation is only available on Windows.") + return False + + try: + win32serviceutil.InstallService( + pythonClassString="src.primary.windows_service.HuntarrService", + serviceName="Huntarr", + displayName="Huntarr Service", + description="Automated media collection management for Arr apps", + startType=win32service.SERVICE_AUTO_START + ) + print("Huntarr service installed successfully.") + return True + except Exception as e: + print(f"Error installing Huntarr service: {e}") + return False + + +def remove_service(): + """Remove the Huntarr Windows service""" + if sys.platform != 'win32': + print("Windows service removal is only available on Windows.") + return False + + try: + win32serviceutil.RemoveService("Huntarr") + print("Huntarr service removed successfully.") + return True + except Exception as e: + print(f"Error removing Huntarr service: {e}") + return False + + +if __name__ == '__main__': + if len(sys.argv) > 1: + if sys.argv[1] == 'install': + install_service() + elif sys.argv[1] == 'remove': + remove_service() + else: + win32serviceutil.HandleCommandLine(HuntarrService) + else: + win32serviceutil.HandleCommandLine(HuntarrService) diff --git a/Huntarr.io-6.3.6/src/routes.py b/Huntarr.io-6.3.6/src/routes.py new file mode 100644 index 0000000..368e656 --- /dev/null +++ b/Huntarr.io-6.3.6/src/routes.py @@ -0,0 +1,82 @@ +from flask import Flask, render_template, request, redirect, jsonify +import os +import json + +# Import the necessary function +from src.primary.stateful_manager import reset_stateful_management, get_stateful_management_info + +# Configure Flask to use templates and static files from the frontend folder +template_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'frontend', 'templates')) +static_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'frontend', 'static')) + +app = Flask(__name__, template_folder=template_dir, static_folder=static_dir) + +# API Routes + +@app.route('/api/stateful/reset', methods=['POST']) +def api_reset_stateful(): + """API endpoint to reset the stateful management system.""" + success = reset_stateful_management() + if success: + return jsonify({"success": True, "message": "Stateful management reset successfully."}), 200 + else: + return jsonify({"success": False, "message": "Failed to reset stateful management."}), 500 + +@app.route('/api/stateful/info', methods=['GET']) +def api_get_stateful_info(): + """API endpoint to get stateful management info.""" + try: + info = get_stateful_management_info() + return jsonify(info), 200 + except Exception as e: + # Log the exception details if possible + app.logger.error(f"Error getting stateful info: {e}") + return jsonify({"error": "Failed to retrieve stateful information."}), 500 + +def get_ui_preference(): + """Determine which UI to use based on config and user preference""" + # Check if ui_settings.json exists + config_file = os.path.join(os.path.dirname(__file__), 'config/ui_settings.json') + + use_new_ui = False + + if os.path.exists(config_file): + try: + with open(config_file, 'r') as f: + settings = json.load(f) + use_new_ui = settings.get('use_new_ui', False) + except Exception as e: + print(f"Error loading UI settings: {e}") + + # Allow URL parameter to override + ui_param = request.args.get('ui', None) + if ui_param == 'new': + use_new_ui = True + elif ui_param == 'classic': + use_new_ui = False + + return use_new_ui + +@app.route('/') +def index(): + """Root route with UI switching capability""" + if get_ui_preference(): + return redirect('/new') + else: + return render_template('index.html') + +@app.route('/user') +def user_page(): + """User settings page with UI switching capability""" + if get_ui_preference(): + return redirect('/user/new') + else: + return render_template('user.html') + +@app.route('/user/new') +def user_page_new(): + """Serve the new user settings page""" + return render_template('user-new.html') + +if __name__ == '__main__': + app.run(debug=True) \ No newline at end of file diff --git a/Huntarr.io-6.3.6/src/routes/api/settings/+server.js b/Huntarr.io-6.3.6/src/routes/api/settings/+server.js new file mode 100644 index 0000000..e4dffbf --- /dev/null +++ b/Huntarr.io-6.3.6/src/routes/api/settings/+server.js @@ -0,0 +1,91 @@ +import { json } from '@sveltejs/kit'; +import fs from 'fs'; +import path from 'path'; + +const CONFIG_FILE = path.resolve('huntarr.json'); + +// Helper to read config +function readConfig() { + try { + const configData = fs.readFileSync(CONFIG_FILE, 'utf8'); + return JSON.parse(configData); + } catch (error) { + console.error('Error reading config:', error); + return {}; + } +} + +// Helper to write config +function writeConfig(config) { + try { + fs.writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8'); + return true; + } catch (error) { + console.error('Error writing config:', error); + return false; + } +} + +// GET handler +export async function GET() { + const config = readConfig(); + return json(config); +} + +// POST handler +export async function POST({ request }) { + try { + const newSettings = await request.json(); + + // Read existing config to merge with new settings + const existingConfig = readConfig(); + + // Merge settings, ensuring numeric values are properly handled + const updatedConfig = { + ...existingConfig, + ...newSettings + }; + + // Ensure numeric values are preserved correctly in nested objects + if (newSettings.sonarr) { + updatedConfig.sonarr = { + ...existingConfig.sonarr, + ...newSettings.sonarr + }; + + // Add explicit handling for Sonarr instances array + if (Array.isArray(newSettings.sonarr.instances)) { + // Use the new instances array completely, as it should contain all instances + updatedConfig.sonarr.instances = JSON.parse(JSON.stringify(newSettings.sonarr.instances)); + console.log("Saved Sonarr instances:", updatedConfig.sonarr.instances); + } + + // Explicitly handle numeric fields + if (newSettings.sonarr.missingEpisodesSearch !== undefined) { + updatedConfig.sonarr.missingEpisodesSearch = Number(newSettings.sonarr.missingEpisodesSearch); + } + if (newSettings.sonarr.upgradeEpisodesSearch !== undefined) { + updatedConfig.sonarr.upgradeEpisodesSearch = Number(newSettings.sonarr.upgradeEpisodesSearch); + } + if (newSettings.sonarr.searchInterval !== undefined) { + updatedConfig.sonarr.searchInterval = Number(newSettings.sonarr.searchInterval); + } + } + + // Handle other app settings similarly + // ...existing code... + + // Write updated config + const success = writeConfig(updatedConfig); + + if (success) { + // Return the exact config that was saved to ensure UI consistency + return json(readConfig()); + } else { + return json({ error: 'Failed to save settings' }, { status: 500 }); + } + } catch (error) { + console.error('Error processing settings:', error); + return json({ error: 'Server error' }, { status: 500 }); + } +} diff --git a/Huntarr.io-6.3.6/src/routes/settings/+page.svelte b/Huntarr.io-6.3.6/src/routes/settings/+page.svelte new file mode 100644 index 0000000..b0a7f0a --- /dev/null +++ b/Huntarr.io-6.3.6/src/routes/settings/+page.svelte @@ -0,0 +1,115 @@ + + + \ No newline at end of file diff --git a/Huntarr.io-6.3.6/version.txt b/Huntarr.io-6.3.6/version.txt new file mode 100644 index 0000000..c8320dd --- /dev/null +++ b/Huntarr.io-6.3.6/version.txt @@ -0,0 +1 @@ +6.3.6 diff --git a/ct/huntarr.sh b/ct/huntarr.sh index 44724cb..afb48d8 100644 --- a/ct/huntarr.sh +++ b/ct/huntarr.sh @@ -6,7 +6,7 @@ source <(curl -fsSL https://git.bila.li/Proxmox/proxmox-ve-install-scripts/raw/b # Source: [SOURCE_URL] # App Default Values -APP="huntarr" +APP="Huntarr" var_tags="${var_tags:-arr}" var_cpu="${var_cpu:-2}" var_ram="${var_ram:-1024}" diff --git a/install/huntarr-install.sh b/install/huntarr-install.sh index 4143808..4d2d367 100644 --- a/install/huntarr-install.sh +++ b/install/huntarr-install.sh @@ -15,6 +15,7 @@ network_check update_os APPLICATION="huntarr" +REPO_NAME="Huntarr.io" # Installing Dependencies msg_info "Installing Dependencies" @@ -22,29 +23,44 @@ $STD apt-get install -y \ curl \ tar \ unzip \ - jq -msg_ok "Installed Dependencies" + jq \ + python3 \ + python3-pip \ + python3-venv +msg_ok "Installed System Dependencies" # Setup App msg_info "Setup ${APPLICATION}" RELEASE=$(curl -fsSL https://api.github.com/repos/plexguide/Huntarr.io/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }') curl -fsSL -o "${RELEASE}.zip" "https://github.com/plexguide/Huntarr.io/archive/refs/tags/${RELEASE}.zip" unzip -q "${RELEASE}.zip" -mv "${APPLICATION}-${RELEASE}/" "/opt/${APPLICATION}" +mv "${REPO_NAME}-${RELEASE}/" "/opt/${APPLICATION}" echo "${RELEASE}" >/opt/${APPLICATION}_version.txt msg_ok "Setup ${APPLICATION}" +# Setup Python Environment +msg_info "Setting up Python Environment" +$STD python3 -m venv /opt/${APPLICATION}/venv +msg_ok "Created Python Virtual Environment" + +# Install Python Dependencies +msg_info "Installing Python Dependencies" +$STD /opt/${APPLICATION}/venv/bin/pip install --upgrade pip +$STD /opt/${APPLICATION}/venv/bin/pip install -r /opt/${APPLICATION}/requirements.txt +msg_ok "Installed Python Dependencies" + # Creating Service (if needed) msg_info "Creating Service" cat </etc/systemd/system/${APPLICATION}.service [Unit] -Description=${APPLICATION} Service +Description=Huntarr Service After=network.target [Service] Environment=TZ=Europe/Zurich -ExecStart=/opt/${APPLICATION}/${APPLICATION} --config /opt/${APPLICATION} --port 9705 +WorkingDirectory=/opt/${APPLICATION} +ExecStart=/opt/${APPLICATION}/venv/bin/python /opt/${APPLICATION}/main.py Restart=always [Install] @@ -58,7 +74,7 @@ customize # Cleanup msg_info "Cleaning up" -rm -f "/opt/${APPLICATION}/${TAR_FILE}" +rm -f "${RELEASE}.zip" $STD apt-get -y autoremove $STD apt-get -y autoclean msg_ok "Cleaned"