+ const appsAppSelect = document.getElementById('appsAppSelect');
+ if (appsAppSelect) {
+ appsAppSelect.addEventListener('change', (e) => {
+ const app = e.target.value;
+ this.handleAppsAppChange(app);
+ });
+ }
+
+ // Dropdown toggle
+ if (this.elements.appsDropdownBtn) {
+ this.elements.appsDropdownBtn.addEventListener('click', () => {
+ this.elements.appsDropdownContent.classList.toggle('show');
+
+ // Close all other dropdowns
+ document.querySelectorAll('.log-dropdown-content.show').forEach(dropdown => {
+ if (dropdown !== this.elements.appsDropdownContent) {
+ dropdown.classList.remove('show');
+ }
+ });
+ });
+ }
+
+ // Close dropdown when clicking outside
+ document.addEventListener('click', e => {
+ if (!e.target.matches('#appsSection .log-dropdown-btn') &&
+ !e.target.closest('#appsSection .log-dropdown-btn')) {
+ if (this.elements.appsDropdownContent && this.elements.appsDropdownContent.classList.contains('show')) {
+ this.elements.appsDropdownContent.classList.remove('show');
+ }
+ }
+ });
+
+ // Save button
+ if (this.elements.saveAppsButton) {
+ this.elements.saveAppsButton.addEventListener('click', (event) => this.saveApps(event));
+ }
+ },
+
+ // Load apps for initial display
+ loadApps: function() {
+ // Set default app if none is selected
+ if (!this.currentApp) {
+ this.currentApp = 'sonarr'; // Default to Sonarr
+
+ // Update the dropdown text to show current app
+ if (this.elements.currentAppsApp) {
+ this.elements.currentAppsApp.textContent = 'Sonarr';
+ }
+
+ // Mark the sonarr option as active in the dropdown
+ if (this.elements.appsOptions) {
+ this.elements.appsOptions.forEach(option => {
+ option.classList.remove('active');
+ if (option.getAttribute('data-app') === 'sonarr') {
+ option.classList.add('active');
+ }
+ });
+ }
+ }
+
+ // Load the currently selected app
+ this.loadAppSettings(this.currentApp);
+ },
+
+ // Load app settings
+ loadAppSettings: function(app) {
+ console.log(`[Apps] Loading settings for ${app}`);
+
+ // Get the container to put the settings in
+ const appPanel = document.getElementById(app + 'Apps');
+ if (!appPanel) {
+ console.error(`App panel not found for ${app}`);
+ return;
+ }
+
+ // Clear existing content
+ appPanel.innerHTML = ' Loading settings...
';
+
+ // Fetch settings for this app
+ fetch(`/api/settings/${app}`)
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(appSettings => {
+ console.log(`[Apps] Received settings for ${app}:`, appSettings);
+
+ // Clear loading message
+ appPanel.innerHTML = '';
+
+ // Create a form container with the app-type attribute
+ const formElement = document.createElement('form');
+ formElement.classList.add('settings-form');
+ formElement.setAttribute('data-app-type', app);
+ appPanel.appendChild(formElement);
+
+ // Generate the form using SettingsForms module
+ if (typeof SettingsForms !== 'undefined') {
+ const formFunction = SettingsForms[`generate${app.charAt(0).toUpperCase()}${app.slice(1)}Form`];
+ if (typeof formFunction === 'function') {
+ // Use .call() to set the 'this' context correctly
+ formFunction.call(SettingsForms, formElement, appSettings);
+
+ // Update duration displays for this app
+ if (typeof SettingsForms.updateDurationDisplay === 'function') {
+ SettingsForms.updateDurationDisplay();
+ }
+
+ // Store original form values after form is generated
+ this.storeOriginalFormValues(appPanel);
+
+ // Add change listener to detect modifications
+ this.addFormChangeListeners(formElement);
+ } else {
+ console.warn(`Form generation function not found for ${app}`);
+ appPanel.innerHTML = `Settings for ${app.charAt(0).toUpperCase() + app.slice(1)} are not available.
`;
+ }
+ } else {
+ console.error('SettingsForms module not found');
+ appPanel.innerHTML = 'Unable to generate settings form. Please reload the page.
';
+ }
+ })
+ .catch(error => {
+ console.error(`Error loading ${app} settings:`, error);
+ appPanel.innerHTML = ` Error loading settings: ${error.message}
`;
+ });
+ },
+
+ // Add change event listeners to form elements
+ addFormChangeListeners: function(form) {
+ if (!form) return;
+
+ console.log(`Adding form change listeners to form with app type: ${form.getAttribute('data-app-type')}`);
+
+ // Function to handle form element changes
+ const handleChange = () => {
+ if (this.hasFormChanges(form)) {
+ console.log('Form changed, enabling save button');
+ this.markAppsAsChanged();
+ } else {
+ console.log('No actual changes, save button remains disabled');
+ }
+ };
+
+ // Add listeners to all form inputs, selects, and textareas
+ const formElements = form.querySelectorAll('input, select, textarea');
+ formElements.forEach(element => {
+ // Skip buttons
+ if (element.type === 'button' || element.type === 'submit') return;
+
+ // Remove any existing change listeners to avoid duplicates
+ element.removeEventListener('change', handleChange);
+ element.removeEventListener('input', handleChange);
+
+ // Add change listeners
+ element.addEventListener('change', handleChange);
+
+ // For text and number inputs, also listen for input events
+ if (element.type === 'text' || element.type === 'number' || element.type === 'textarea') {
+ element.addEventListener('input', handleChange);
+ }
+
+ console.log(`Added change listener to ${element.tagName} with id: ${element.id || 'no-id'}`);
+ });
+
+ // Also add a MutationObserver to detect when instances are added or removed
+ // This is needed because adding/removing instances doesn't trigger input events
+ try {
+ // Check if we already have an observer for this form
+ if (this.observer) {
+ this.observer.disconnect();
+ }
+
+ // Create a new MutationObserver
+ this.observer = new MutationObserver((mutations) => {
+ let shouldUpdate = false;
+
+ mutations.forEach(mutation => {
+ // Check for elements added or removed
+ if (mutation.type === 'childList' &&
+ (mutation.addedNodes.length > 0 || mutation.removedNodes.length > 0)) {
+ shouldUpdate = true;
+ }
+ });
+
+ if (shouldUpdate) {
+ console.log('Instances container changed - checking for form changes');
+ if (this.hasFormChanges(form)) {
+ console.log('Form changed, enabling save button');
+ this.markAppsAsChanged();
+ } else {
+ console.log('No actual changes, save button remains disabled');
+ }
+ }
+ });
+
+ // Start observing instances container for changes
+ const instancesContainers = form.querySelectorAll('.instances-container');
+ instancesContainers.forEach(container => {
+ this.observer.observe(container, { childList: true, subtree: true });
+ console.log(`Added MutationObserver to container: ${container.className}`);
+ });
+ } catch (error) {
+ console.error('Error setting up MutationObserver:', error);
+ }
+ },
+
+ // Mark apps as changed
+ markAppsAsChanged: function() {
+ this.settingsChanged = true;
+
+ // Find the currently visible app panel and track it in our list of changed apps
+ const currentApp = document.querySelector('.app-panel:not([style*="display: none"])');
+ if (currentApp) {
+ const appType = currentApp.getAttribute('data-app-type');
+ if (appType) {
+ // Initialize the array if it doesn't exist yet
+ if (!this.appsWithChanges) {
+ this.appsWithChanges = [];
+ }
+
+ // Add this app to the list of apps with changes if not already there
+ if (!this.appsWithChanges.includes(appType)) {
+ this.appsWithChanges.push(appType);
+ console.log(`Added ${appType} to appsWithChanges:`, this.appsWithChanges);
+ }
+
+ // Set the global tracking flag for this specific app
+ if (!window._hasAppChanges) {
+ window._hasAppChanges = {};
+ }
+ window._hasAppChanges[appType] = true;
+
+ // Also update the huntarrUI tracking if available
+ if (window.huntarrUI && window.huntarrUI.formChanged) {
+ window.huntarrUI.formChanged[appType] = true;
+ window.huntarrUI.hasUnsavedChanges = true;
+ }
+ }
+ }
+
+ if (this.elements.saveAppsButton) {
+ this.elements.saveAppsButton.disabled = false;
+ console.log('Save button enabled');
+ } else {
+ console.error('Save button element not found');
+ }
+ },
+
+ // Check if the form has actual changes compared to original values
+ hasFormChanges: function(form) {
+ if (!form || !this.originalSettings) return true;
+
+ let hasChanges = false;
+ const formElements = form.querySelectorAll('input, select, textarea');
+
+ formElements.forEach(element => {
+ // Skip buttons
+ if (element.type === 'button' || element.type === 'submit') return;
+
+ const originalValue = this.originalSettings[element.id];
+ const currentValue = element.type === 'checkbox' ? element.checked : element.value;
+
+ // Compare with original value
+ if (originalValue !== undefined && String(originalValue) !== String(currentValue)) {
+ console.log(`Element changed: ${element.id}, Original: ${originalValue}, Current: ${currentValue}`);
+ hasChanges = true;
+ }
+ });
+
+ return hasChanges;
+ },
+
+ // Show specific app panel and hide others
+ showAppPanel: function(app) {
+ console.log(`Showing app panel for ${app}`);
+ // Hide all app panels
+ this.elements.appAppsPanels.forEach(panel => {
+ panel.style.display = 'none';
+ panel.classList.remove('active');
+ });
+
+ // Show the selected app panel
+ const appPanel = document.getElementById(`${app}Apps`);
+ if (appPanel) {
+ appPanel.style.display = 'block';
+ appPanel.classList.add('active');
+
+ // Ensure the panel has the correct data-app-type attribute
+ appPanel.setAttribute('data-app-type', app);
+
+ console.log(`App panel for ${app} is now active`);
+ } else {
+ console.error(`App panel for ${app} not found`);
+ }
+ },
+
+ // Handle app selection changes
+ handleAppsAppChange: function(selectedApp) {
+ // If called with an event, extract the value
+ if (selectedApp && selectedApp.target && typeof selectedApp.target.value === 'string') {
+ selectedApp = selectedApp.target.value;
+ }
+ if (!selectedApp || selectedApp === this.currentApp) return;
+
+ // Special case for Cleanuperr - it's an information page, not a settings page
+ if (selectedApp === 'cleanuperr') {
+ // Hide apps section and show Cleanuperr section
+ document.getElementById('appsSection').classList.remove('active');
+ document.getElementById('cleanuperrSection').classList.add('active');
+
+ // Update the page title
+ if (huntarrUI && typeof huntarrUI.switchSection === 'function') {
+ huntarrUI.currentSection = 'cleanuparr';
+ // We're not calling the full switchSection as that would alter navigation
+ // Just update the title
+ const pageTitleElement = document.getElementById('currentPageTitle');
+ if (pageTitleElement) {
+ pageTitleElement.textContent = 'Cleanuperr';
+ }
+ }
+
+ return;
+ }
+
+ // Check for unsaved changes
+ if (this.settingsChanged) {
+ const confirmSwitch = confirm('You have unsaved changes. Do you want to continue without saving?');
+ if (!confirmSwitch) {
+ // Reset the select to the current app
+ const appsAppSelect = document.getElementById('appsAppSelect');
+ if (appsAppSelect) appsAppSelect.value = this.currentApp;
+ return;
+ }
+ }
+ // Update the select value
+ const appsAppSelect = document.getElementById('appsAppSelect');
+ if (appsAppSelect) appsAppSelect.value = selectedApp;
+ // Show the selected app's panel
+ this.showAppPanel(selectedApp);
+ this.currentApp = selectedApp;
+ // Load the newly selected app's settings
+ this.loadAppSettings(selectedApp);
+ // Reset changed state
+ this.settingsChanged = false;
+ if (this.elements.saveAppsButton) this.elements.saveAppsButton.disabled = true;
+ },
+
+ // Save apps settings - completely rewritten for reliability
+ saveApps: function(event) {
+ if (event) event.preventDefault();
+
+ console.log('[Apps] Save button clicked');
+
+ // Set a flag that we're in the middle of saving
+ window._appsCurrentlySaving = true;
+
+ // Get the current app from module state
+ const appType = this.currentApp;
+ if (!appType) {
+ console.error('No current app selected');
+
+ // Emergency fallback - try to find the visible app panel
+ const visiblePanel = document.querySelector('.app-apps-panel[style*="display: block"]');
+ if (visiblePanel && visiblePanel.id) {
+ // Extract app type from panel ID (e.g., "sonarrApps" -> "sonarr")
+ const extractedType = visiblePanel.id.replace('Apps', '');
+ console.log(`Fallback: Found visible panel with ID ${visiblePanel.id}, extracted app type: ${extractedType}`);
+
+ if (extractedType) {
+ // Continue with the extracted app type
+ return this.saveAppSettings(extractedType, visiblePanel);
+ }
+ }
+
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification('Error: Could not determine which app settings to save', 'error');
+ } else {
+ alert('Error: Could not determine which app settings to save');
+ }
+ return;
+ }
+
+ // Direct DOM access to find the app panel
+ const appPanel = document.getElementById(`${appType}Apps`);
+ if (!appPanel) {
+ console.error(`App panel not found for ${appType}`);
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification(`Error: App panel not found for ${appType}`, 'error');
+ } else {
+ alert(`Error: App panel not found for ${appType}`);
+ }
+ return;
+ }
+
+ // Proceed with saving for the found app panel
+ this.saveAppSettings(appType, appPanel);
+ },
+
+ // Helper function to save settings for a specific app
+ saveAppSettings: function(appType, appPanel) {
+ console.log(`Saving settings for ${appType}`);
+
+ // For Whisparr, ensure we indicate we're working with V2
+ let apiVersion = "";
+ if (appType === "whisparr") {
+ console.log("Saving Whisparr V2 settings");
+ apiVersion = "V2";
+ } else if (appType === "eros") {
+ console.log("Saving Eros (Whisparr V3) settings");
+ }
+
+ let settings;
+ try {
+ // Make sure the app type is set on the panel for SettingsForms
+ appPanel.setAttribute('data-app-type', appType);
+
+ // Get settings from the form
+ settings = SettingsForms.getFormSettings(appPanel, appType);
+ console.log(`Collected settings for ${appType}:`, settings);
+ } catch (error) {
+ console.error(`Error collecting settings for ${appType}:`, error);
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification(`Error collecting settings: ${error.message}`, 'error');
+ } else {
+ alert(`Error collecting settings: ${error.message}`);
+ }
+ return;
+ }
+
+ // Add specific logging for settings critical to stateful management
+ if (appType === 'general') {
+ console.log('Stateful management settings being saved:', {
+ statefulExpirationHours: settings.statefulExpirationHours,
+ api_timeout: settings.api_timeout,
+ command_wait_delay: settings.command_wait_delay,
+ command_wait_attempts: settings.command_wait_attempts
+ });
+ }
+
+ // Send settings to the server
+ console.log(`Sending ${appType} settings to server...`);
+ fetch(`/api/settings/${appType}`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(settings)
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error ${response.status}: ${response.statusText}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log(`${appType} settings saved successfully:`, data);
+
+ // Temporarily suppress change detection
+ window._appsSuppressChangeDetection = true;
+
+ // Store the current form values as the new "original" values
+ this.storeOriginalFormValues(appPanel);
+
+ // Disable save button and reset state
+ this.settingsChanged = false;
+ if (this.elements.saveAppsButton) {
+ this.elements.saveAppsButton.disabled = true;
+ }
+
+ // Reset the saving flag
+ window._appsCurrentlySaving = false;
+
+ // Ensure form elements are properly updated to reflect saved state
+ this.markFormAsUnchanged(appPanel);
+
+ // After a short delay, re-enable change detection
+ setTimeout(() => {
+ window._appsSuppressChangeDetection = false;
+ }, 1000);
+
+ // Show success notification
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification(`${appType} settings saved successfully`, 'success');
+ } else {
+ alert(`${appType} settings saved successfully`);
+ }
+ })
+ .catch(error => {
+ console.error(`Error saving ${appType} settings:`, error);
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification(`Error saving settings: ${error.message}`, 'error');
+ } else {
+ alert(`Error saving settings: ${error.message}`);
+ }
+ // Reset the saving flag
+ window._appsCurrentlySaving = false;
+ });
+ },
+
+ // Store the current form values as the new "original" values
+ storeOriginalFormValues: function(appPanel) {
+ const form = appPanel.querySelector('form');
+ if (!form) return;
+
+ const originalValues = {};
+ const formElements = form.querySelectorAll('input, select, textarea');
+ formElements.forEach(element => {
+ originalValues[element.id] = element.value;
+ });
+
+ this.originalSettings = originalValues;
+ console.log('Original form values stored:', this.originalSettings);
+ },
+
+ // Mark form as unchanged
+ markFormAsUnchanged: function(appPanel) {
+ const form = appPanel.querySelector('form');
+ if (!form) return;
+
+ // First, remove the 'changed' class from all form elements
+ const formElements = form.querySelectorAll('input, select, textarea');
+ formElements.forEach(element => {
+ element.classList.remove('changed');
+ });
+
+ // Get the app type to properly handle app-specific flags
+ const appType = appPanel.getAttribute('data-app-type') || '';
+ console.log(`Marking form as unchanged for app type: ${appType}`);
+
+ // Clear app-specific change flags
+ if (window._hasAppChanges && typeof window._hasAppChanges === 'object') {
+ window._hasAppChanges[appType] = false;
+ }
+
+ // Ensure we reset all change tracking for this app
+ try {
+ // Reset any form change flags
+ if (form.dataset) {
+ form.dataset.hasChanges = 'false';
+ }
+
+ // Clear any app-specific data attributes that might be tracking changes
+ appPanel.querySelectorAll('[data-changed="true"]').forEach(el => {
+ el.setAttribute('data-changed', 'false');
+ });
+
+ // Reset the internal change tracking for this specific app
+ if (appType && this.appsWithChanges && this.appsWithChanges.includes(appType)) {
+ this.appsWithChanges = this.appsWithChanges.filter(app => app !== appType);
+ console.log(`Removed ${appType} from appsWithChanges:`, this.appsWithChanges);
+ }
+
+ // Force update overall app state
+ this.settingsChanged = this.appsWithChanges && this.appsWithChanges.length > 0;
+
+ // Explicitly handle Readarr, Lidarr, and Whisparr which seem to have issues
+ if (appType === 'readarr' || appType === 'lidarr' || appType === 'whisparr' || appType === 'whisparrv2') {
+ console.log(`Special handling for ${appType} to ensure changes are cleared`);
+ // Force additional global state updates
+ if (window.huntarrUI && window.huntarrUI.formChanged) {
+ window.huntarrUI.formChanged[appType] = false;
+ }
+ // Reset the global changed state tracker if this was the only app with changes
+ if (!this.settingsChanged && window.huntarrUI) {
+ window.huntarrUI.hasUnsavedChanges = false;
+ }
+ // Force immediate re-evaluation of the form state
+ setTimeout(() => {
+ this.hasFormChanges(form);
+ }, 10);
+ }
+ } catch (error) {
+ console.error(`Error in markFormAsUnchanged for ${appType}:`, error);
+ }
+ }
+};
+
+// Initialize when document is ready
+document.addEventListener('DOMContentLoaded', () => {
+ appsModule.init();
+
+ // Add a direct event listener to the save button for maximum reliability
+ const saveButton = document.getElementById('saveAppsButton');
+ if (saveButton) {
+ saveButton.addEventListener('click', function(event) {
+ console.log('Save button clicked directly');
+ appsModule.saveApps(event);
+ });
+ }
+});
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/cleanuperr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/cleanuperr.js
new file mode 100644
index 0000000..801391c
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/cleanuperr.js
@@ -0,0 +1,7 @@
+/**
+ * Cleanuperr - Information display component for Huntarr
+ * Provides information about the Cleanuperr project by Flaminel
+ */
+
+// No active functionality needed for this information page
+console.log('Cleanuperr information module loaded');
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/eros.js b/Huntarr.io-6.3.6/frontend/static/js/apps/eros.js
new file mode 100644
index 0000000..6331a07
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/eros.js
@@ -0,0 +1,196 @@
+/**
+ * Eros.js - Handles Eros settings and interactions in the Huntarr UI
+ */
+
+document.addEventListener('DOMContentLoaded', function() {
+ // Don't call setupErosForm here, new-main.js will call it when the tab is active
+ // setupErosForm();
+ // setupErosLogs(); // Assuming logs are handled by the main logs section
+ // setupClearProcessedButtons('eros'); // Assuming this is handled elsewhere or not needed immediately
+});
+
+/**
+ * Setup Eros settings form and connection test
+ * This function is now called by new-main.js when the Eros settings tab is shown.
+ */
+function setupErosForm() {
+ console.log("[eros.js] Setting up Eros form...");
+ const panel = document.getElementById('erosSettings');
+ if (!panel) {
+ console.warn("[eros.js] Eros settings panel not found.");
+ return;
+ }
+
+ const testErosButton = panel.querySelector('#test-eros-button');
+ const erosStatusIndicator = panel.querySelector('#eros-connection-status');
+ const erosVersionDisplay = panel.querySelector('#eros-version');
+ const apiUrlInput = panel.querySelector('#eros_api_url');
+ const apiKeyInput = panel.querySelector('#eros_api_key');
+
+ // Check if event listener is already attached (prevents duplicate handlers)
+ if (!testErosButton || testErosButton.dataset.listenerAttached === 'true') {
+ console.log("[eros.js] Test button not found or listener already attached.");
+ return;
+ }
+ console.log("[eros.js] Setting up Eros form listeners.");
+ testErosButton.dataset.listenerAttached = 'true'; // Mark as attached
+
+ // Add event listener for connection test
+ testErosButton.addEventListener('click', function() {
+ console.log("[eros.js] Testing Eros connection...");
+
+ // Basic validation
+ if (!apiUrlInput.value || !apiKeyInput.value) {
+ if (typeof huntarrUI !== 'undefined') {
+ huntarrUI.showNotification('Please enter both API URL and API Key for Eros', 'error');
+ } else {
+ alert('Please enter both API URL and API Key for Eros');
+ }
+ return;
+ }
+
+ // Disable button during test and show pending status
+ testErosButton.disabled = true;
+ if (erosStatusIndicator) {
+ erosStatusIndicator.className = 'connection-status pending';
+ erosStatusIndicator.textContent = 'Testing...';
+ }
+
+ // Call API to test connection
+ HuntarrUtils.fetchWithTimeout('/api/eros/test-connection', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: apiUrlInput.value,
+ api_key: apiKeyInput.value,
+ api_timeout: 30
+ })
+ }, 30000) // 30 second timeout
+ .then(response => response.json())
+ .then(data => {
+ // Enable the button again
+ testErosButton.disabled = false;
+
+ if (erosStatusIndicator) {
+ if (data.success) {
+ erosStatusIndicator.className = 'connection-status success';
+ erosStatusIndicator.textContent = 'Connected';
+ if (typeof huntarrUI !== 'undefined') {
+ huntarrUI.showNotification('Successfully connected to Eros', 'success');
+ }
+ getErosVersion(); // Fetch version after successful connection
+ } else {
+ erosStatusIndicator.className = 'connection-status failure';
+ erosStatusIndicator.textContent = 'Failed';
+ if (typeof huntarrUI !== 'undefined') {
+ huntarrUI.showNotification(data.message || 'Failed to connect to Eros', 'error');
+ } else {
+ alert(data.message || 'Failed to connect to Eros');
+ }
+ }
+ }
+ })
+ .catch(error => {
+ console.error('[eros.js] Error testing connection:', error);
+ testErosButton.disabled = false;
+
+ if (erosStatusIndicator) {
+ erosStatusIndicator.className = 'connection-status failure';
+ erosStatusIndicator.textContent = 'Error';
+ }
+
+ if (typeof huntarrUI !== 'undefined') {
+ huntarrUI.showNotification('Error testing connection: ' + error.message, 'error');
+ } else {
+ alert('Error testing connection: ' + error.message);
+ }
+ });
+ });
+
+ // Initialize form state and fetch data
+ refreshErosStatusAndVersion();
+}
+
+/**
+ * Get the Eros software version from the instance.
+ * This is separate from the API test.
+ */
+function getErosVersion() {
+ const panel = document.getElementById('erosSettings');
+ if (!panel) return;
+
+ const versionDisplay = panel.querySelector('#eros-version');
+ if (!versionDisplay) return;
+
+ // Try to get the API settings from the form
+ const apiUrlInput = panel.querySelector('#eros_api_url');
+ const apiKeyInput = panel.querySelector('#eros_api_key');
+
+ if (!apiUrlInput || !apiUrlInput.value || !apiKeyInput || !apiKeyInput.value) {
+ versionDisplay.textContent = 'N/A';
+ return;
+ }
+
+ // Endpoint to get version info - using the test endpoint since it returns version
+ HuntarrUtils.fetchWithTimeout('/api/eros/test-connection', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: apiUrlInput.value,
+ api_key: apiKeyInput.value,
+ api_timeout: 10
+ })
+ }, 10000)
+ .then(response => response.json())
+ .then(data => {
+ if (data.success && data.version) {
+ versionDisplay.textContent = 'v' + data.version;
+ } else {
+ versionDisplay.textContent = 'Unknown';
+ }
+ })
+ .catch(error => {
+ console.error('[eros.js] Error fetching version:', error);
+ versionDisplay.textContent = 'Error';
+ });
+}
+
+/**
+ * Refresh the connection status and version display for Eros.
+ */
+function refreshErosStatusAndVersion() {
+ // Try to get current connection status from the server
+ fetch('/api/eros/status')
+ .then(response => response.json())
+ .then(data => {
+ const panel = document.getElementById('erosSettings');
+ if (!panel) return;
+
+ const statusIndicator = panel.querySelector('#eros-connection-status');
+ if (statusIndicator) {
+ if (data.connected) {
+ statusIndicator.className = 'connection-status success';
+ statusIndicator.textContent = 'Connected';
+ getErosVersion(); // Try to get version if connected
+ } else if (data.configured) {
+ statusIndicator.className = 'connection-status failure';
+ statusIndicator.textContent = 'Not Connected';
+ } else {
+ statusIndicator.className = 'connection-status pending';
+ statusIndicator.textContent = 'Not Configured';
+ }
+ }
+ })
+ .catch(error => {
+ console.error('[eros.js] Error checking status:', error);
+ });
+}
+
+// Mark functions as global if needed by other parts of the application
+window.setupErosForm = setupErosForm;
+window.getErosVersion = getErosVersion;
+window.refreshErosStatusAndVersion = refreshErosStatusAndVersion;
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/lidarr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/lidarr.js
new file mode 100644
index 0000000..436ed5e
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/lidarr.js
@@ -0,0 +1,101 @@
+// Lidarr-specific functionality
+
+(function(app) {
+ if (!app) {
+ console.error("Huntarr App core is not loaded!");
+ return;
+ }
+
+ const lidarrModule = {
+ elements: {
+ apiUrlInput: document.getElementById('lidarr_api_url'),
+ apiKeyInput: document.getElementById('lidarr_api_key'),
+ connectionTestButton: document.getElementById('test-lidarr-connection'),
+ huntMissingModeSelect: document.getElementById('hunt_missing_mode'),
+ huntMissingItemsInput: document.getElementById('hunt_missing_items'),
+ huntUpgradeItemsInput: document.getElementById('hunt_upgrade_items'),
+ sleepDurationInput: document.getElementById('lidarr_sleep_duration'),
+ sleepDurationHoursSpan: document.getElementById('lidarr_sleep_duration_hours'),
+ stateResetIntervalInput: document.getElementById('lidarr_state_reset_interval_hours'),
+ monitoredOnlyInput: document.getElementById('lidarr_monitored_only'),
+ skipFutureReleasesInput: document.getElementById('lidarr_skip_future_releases'),
+ skipArtistRefreshInput: document.getElementById('skip_artist_refresh'),
+ randomMissingInput: document.getElementById('lidarr_random_missing'),
+ randomUpgradesInput: document.getElementById('lidarr_random_upgrades'),
+ debugModeInput: document.getElementById('lidarr_debug_mode'),
+ apiTimeoutInput: document.getElementById('lidarr_api_timeout'),
+ commandWaitDelayInput: document.getElementById('lidarr_command_wait_delay'),
+ commandWaitAttemptsInput: document.getElementById('lidarr_command_wait_attempts'),
+ minimumDownloadQueueSizeInput: document.getElementById('lidarr_minimum_download_queue_size')
+ },
+
+ init: function() {
+ console.log('[Lidarr Module] Initializing...');
+ // Cache elements specific to the Lidarr settings form
+ this.elements = {
+ apiUrlInput: document.getElementById('lidarr_api_url'),
+ apiKeyInput: document.getElementById('lidarr_api_key'),
+ connectionTestButton: document.getElementById('test-lidarr-connection'),
+ huntMissingModeSelect: document.getElementById('hunt_missing_mode'),
+ huntMissingItemsInput: document.getElementById('hunt_missing_items'),
+ huntUpgradeItemsInput: document.getElementById('hunt_upgrade_items'),
+ // ...other element references
+ };
+
+ // Add event listeners
+ this.addEventListeners();
+ },
+
+ addEventListeners() {
+ // Add connection test button click handler
+ if (this.elements.connectionTestButton) {
+ this.elements.connectionTestButton.addEventListener('click', this.testConnection.bind(this));
+ }
+
+ // Add event listener to update help text when missing mode changes
+ if (this.elements.huntMissingModeSelect) {
+ this.elements.huntMissingModeSelect.addEventListener('change', this.updateHuntMissingModeHelp.bind(this));
+ // Initial update
+ this.updateHuntMissingModeHelp();
+ }
+ },
+
+ // Update help text based on selected missing mode
+ updateHuntMissingModeHelp() {
+ const mode = this.elements.huntMissingModeSelect.value;
+ const helpText = document.querySelector('#hunt_missing_items + .setting-help');
+
+ if (helpText) {
+ if (mode === 'artist') {
+ helpText.textContent = "Number of artists with missing albums to search per cycle (0 to disable)";
+ } else if (mode === 'album') {
+ helpText.textContent = "Number of specific albums to search per cycle (0 to disable)";
+ }
+ }
+ },
+
+ updateSleepDurationDisplay: function() {
+ // This function remains as it updates a specific UI element
+ if (this.elements.sleepDurationInput && this.elements.sleepDurationHoursSpan) {
+ const seconds = parseInt(this.elements.sleepDurationInput.value) || 900;
+ // Assuming app.updateDurationDisplay exists and is accessible
+ if (app && typeof app.updateDurationDisplay === 'function') {
+ app.updateDurationDisplay(seconds, this.elements.sleepDurationHoursSpan);
+ } else {
+ console.warn("app.updateDurationDisplay not found, sleep duration text might not update.");
+ }
+ }
+ }
+ };
+
+ // Initialize Lidarr module when DOM content is loaded and if lidarrSettings exists
+ document.addEventListener('DOMContentLoaded', () => {
+ if (document.getElementById('lidarrSettings')) {
+ lidarrModule.init();
+ if (app) {
+ app.lidarrModule = lidarrModule;
+ }
+ }
+ });
+
+})(window.huntarrUI); // Pass the global UI object
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/radarr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/radarr.js
new file mode 100644
index 0000000..0b63517
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/radarr.js
@@ -0,0 +1,75 @@
+// Radarr-specific functionality
+
+(function(app) {
+ if (!app) {
+ console.error("Huntarr App core is not loaded!");
+ return;
+ }
+
+ const radarrModule = {
+ elements: {},
+
+ init: function() {
+ console.log('[Radarr Module] Initializing...');
+ this.cacheElements();
+ this.setupEventListeners();
+ },
+
+ cacheElements: function() {
+ // Cache elements specific to the Radarr settings form
+ this.elements.apiUrlInput = document.getElementById('radarr_api_url');
+ this.elements.apiKeyInput = document.getElementById('radarr_api_key');
+ this.elements.huntMissingMoviesInput = document.getElementById('hunt_missing_movies');
+ this.elements.huntUpgradeMoviesInput = document.getElementById('hunt_upgrade_movies');
+ this.elements.sleepDurationInput = document.getElementById('radarr_sleep_duration');
+ this.elements.sleepDurationHoursSpan = document.getElementById('radarr_sleep_duration_hours');
+ this.elements.stateResetIntervalInput = document.getElementById('radarr_state_reset_interval_hours');
+ this.elements.monitoredOnlyInput = document.getElementById('radarr_monitored_only');
+ this.elements.skipFutureReleasesInput = document.getElementById('skip_future_releases'); // Note: ID might be shared
+ this.elements.skipMovieRefreshInput = document.getElementById('skip_movie_refresh');
+ this.elements.randomMissingInput = document.getElementById('radarr_random_missing');
+ this.elements.randomUpgradesInput = document.getElementById('radarr_random_upgrades');
+ this.elements.debugModeInput = document.getElementById('radarr_debug_mode');
+ this.elements.apiTimeoutInput = document.getElementById('radarr_api_timeout');
+ this.elements.commandWaitDelayInput = document.getElementById('radarr_command_wait_delay');
+ this.elements.commandWaitAttemptsInput = document.getElementById('radarr_command_wait_attempts');
+ this.elements.minimumDownloadQueueSizeInput = document.getElementById('radarr_minimum_download_queue_size');
+ // Add any other Radarr-specific elements
+ },
+
+ setupEventListeners: function() {
+ // Keep listeners ONLY for elements with specific UI updates beyond simple value changes
+ if (this.elements.sleepDurationInput) {
+ this.elements.sleepDurationInput.addEventListener('input', () => {
+ this.updateSleepDurationDisplay();
+ // No need to call checkForChanges here, handled by delegation
+ });
+ }
+ // Remove other input listeners previously used for checkForChanges
+ },
+
+ updateSleepDurationDisplay: function() {
+ // This function remains as it updates a specific UI element
+ if (this.elements.sleepDurationInput && this.elements.sleepDurationHoursSpan) {
+ const seconds = parseInt(this.elements.sleepDurationInput.value) || 900;
+ // Assuming app.updateDurationDisplay exists and is accessible
+ if (app && typeof app.updateDurationDisplay === 'function') {
+ app.updateDurationDisplay(seconds, this.elements.sleepDurationHoursSpan);
+ } else {
+ console.warn("app.updateDurationDisplay not found, sleep duration text might not update.");
+ }
+ }
+ }
+ };
+
+ // Initialize Radarr module
+ document.addEventListener('DOMContentLoaded', () => {
+ if (document.getElementById('radarrSettings')) {
+ radarrModule.init();
+ if (app) {
+ app.radarrModule = radarrModule;
+ }
+ }
+ });
+
+})(window.huntarrUI); // Pass the global UI object
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/readarr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/readarr.js
new file mode 100644
index 0000000..75b659a
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/readarr.js
@@ -0,0 +1,75 @@
+// Readarr-specific functionality
+
+(function(app) {
+ if (!app) {
+ console.error("Huntarr App core is not loaded!");
+ return;
+ }
+
+ const readarrModule = {
+ elements: {},
+
+ init: function() {
+ console.log('[Readarr Module] Initializing...');
+ this.cacheElements();
+ this.setupEventListeners();
+ },
+
+ cacheElements: function() {
+ // Cache elements specific to the Readarr settings form
+ this.elements.apiUrlInput = document.getElementById('readarr_api_url');
+ this.elements.apiKeyInput = document.getElementById('readarr_api_key');
+ this.elements.huntMissingBooksInput = document.getElementById('hunt_missing_books');
+ this.elements.huntUpgradeBooksInput = document.getElementById('hunt_upgrade_books');
+ this.elements.sleepDurationInput = document.getElementById('readarr_sleep_duration');
+ this.elements.sleepDurationHoursSpan = document.getElementById('readarr_sleep_duration_hours');
+ this.elements.stateResetIntervalInput = document.getElementById('readarr_state_reset_interval_hours');
+ this.elements.monitoredOnlyInput = document.getElementById('readarr_monitored_only');
+ this.elements.skipFutureReleasesInput = document.getElementById('readarr_skip_future_releases');
+ this.elements.skipAuthorRefreshInput = document.getElementById('skip_author_refresh');
+ this.elements.randomMissingInput = document.getElementById('readarr_random_missing');
+ this.elements.randomUpgradesInput = document.getElementById('readarr_random_upgrades');
+ this.elements.debugModeInput = document.getElementById('readarr_debug_mode');
+ this.elements.apiTimeoutInput = document.getElementById('readarr_api_timeout');
+ this.elements.commandWaitDelayInput = document.getElementById('readarr_command_wait_delay');
+ this.elements.commandWaitAttemptsInput = document.getElementById('readarr_command_wait_attempts');
+ this.elements.minimumDownloadQueueSizeInput = document.getElementById('readarr_minimum_download_queue_size');
+ // Add any other Readarr-specific elements
+ },
+
+ setupEventListeners: function() {
+ // Keep listeners ONLY for elements with specific UI updates beyond simple value changes
+ if (this.elements.sleepDurationInput) {
+ this.elements.sleepDurationInput.addEventListener('input', () => {
+ this.updateSleepDurationDisplay();
+ // No need to call checkForChanges here, handled by delegation
+ });
+ }
+ // Remove other input listeners previously used for checkForChanges
+ },
+
+ updateSleepDurationDisplay: function() {
+ // This function remains as it updates a specific UI element
+ if (this.elements.sleepDurationInput && this.elements.sleepDurationHoursSpan) {
+ const seconds = parseInt(this.elements.sleepDurationInput.value) || 900;
+ // Assuming app.updateDurationDisplay exists and is accessible
+ if (app && typeof app.updateDurationDisplay === 'function') {
+ app.updateDurationDisplay(seconds, this.elements.sleepDurationHoursSpan);
+ } else {
+ console.warn("app.updateDurationDisplay not found, sleep duration text might not update.");
+ }
+ }
+ }
+ };
+
+ // Initialize Readarr module
+ document.addEventListener('DOMContentLoaded', () => {
+ if (document.getElementById('readarrSettings')) {
+ readarrModule.init();
+ if (app) {
+ app.readarrModule = readarrModule;
+ }
+ }
+ });
+
+})(window.huntarrUI); // Pass the global UI object
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/sonarr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/sonarr.js
new file mode 100644
index 0000000..d905b4a
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/sonarr.js
@@ -0,0 +1,82 @@
+// Sonarr-specific functionality
+
+(function(app) {
+ if (!app) {
+ console.error("Huntarr App core is not loaded!");
+ return;
+ }
+
+ const sonarrModule = {
+ elements: {},
+
+ init: function() {
+ // Cache elements specific to Sonarr settings
+ this.cacheElements();
+ // Setup event listeners specific to Sonarr settings
+ this.setupEventListeners();
+ // Initial population of the form is handled by new-main.js
+ },
+
+ cacheElements: function() {
+ // Cache elements used by Sonarr settings form
+ this.elements.apiUrlInput = document.getElementById('sonarr_api_url');
+ this.elements.apiKeyInput = document.getElementById('sonarr_api_key');
+ this.elements.huntMissingItemsInput = document.getElementById('sonarr-hunt-missing-items');
+ this.elements.huntUpgradeItemsInput = document.getElementById('sonarr-hunt-upgrade-items');
+ this.elements.sleepDurationInput = document.getElementById('sonarr_sleep_duration');
+ this.elements.sleepDurationHoursSpan = document.getElementById('sonarr_sleep_duration_hours');
+ this.elements.monitoredOnlyInput = document.getElementById('sonarr_monitored_only');
+ this.elements.skipFutureEpisodesInput = document.getElementById('sonarr_skip_future_episodes');
+ this.elements.skipSeriesRefreshInput = document.getElementById('sonarr_skip_series_refresh');
+ this.elements.randomMissingInput = document.getElementById('sonarr_random_missing');
+ this.elements.randomUpgradesInput = document.getElementById('sonarr_random_upgrades');
+ this.elements.debugModeInput = document.getElementById('sonarr_debug_mode');
+ this.elements.apiTimeoutInput = document.getElementById('sonarr_api_timeout');
+ this.elements.commandWaitDelayInput = document.getElementById('sonarr_command_wait_delay');
+ this.elements.commandWaitAttemptsInput = document.getElementById('sonarr_command_wait_attempts');
+ this.elements.minimumDownloadQueueSizeInput = document.getElementById('sonarr_minimum_download_queue_size');
+ // Add other Sonarr-specific elements if any
+ },
+
+ setupEventListeners: function() {
+ // Add event listeners for Sonarr-specific controls if needed
+ // Example: If there were unique interactions for Sonarr settings
+ // Most change detection is now handled centrally by new-main.js
+
+ // Update sleep duration display on input change
+ if (this.elements.sleepDurationInput) {
+ this.elements.sleepDurationInput.addEventListener('input', () => {
+ this.updateSleepDurationDisplay();
+ // Central change detection handles the rest
+ });
+ }
+ },
+
+ updateSleepDurationDisplay: function() {
+ // Use the central utility function for updating duration display
+ if (this.elements.sleepDurationInput && this.elements.sleepDurationHoursSpan) {
+ const seconds = parseInt(this.elements.sleepDurationInput.value) || 900;
+ app.updateDurationDisplay(seconds, this.elements.sleepDurationHoursSpan);
+ }
+ },
+
+ // REMOVED: loadSettings function (handled by new-main.js)
+
+ // REMOVED: checkForChanges function (handled by new-main.js)
+
+ // REMOVED: updateSaveButtonState function (handled by new-main.js)
+
+ // REMOVED: getSettingsPayload function (handled by new-main.js)
+
+ // REMOVED: saveSettings function (handled by new-main.js)
+
+ // REMOVED: Overriding of app.saveSettings
+ };
+
+ // Initialize Sonarr module
+ sonarrModule.init();
+
+ // Add the Sonarr module to the app for reference if needed elsewhere
+ app.sonarrModule = sonarrModule;
+
+})(window.huntarrUI); // Use the new global object name
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/swaparr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/swaparr.js
new file mode 100644
index 0000000..9a0f10a
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/swaparr.js
@@ -0,0 +1,381 @@
+// Swaparr-specific functionality
+
+(function(app) {
+ if (!app) {
+ console.error("Huntarr App core is not loaded!");
+ return;
+ }
+
+ const swaparrModule = {
+ elements: {},
+ isTableView: true, // Default to table view for Swaparr logs
+ hasRenderedAnyContent: false, // Track if we've rendered any content
+
+ // Store data for display
+ logData: {
+ config: {
+ platform: '',
+ maxStrikes: 3,
+ scanInterval: '10m',
+ maxDownloadTime: '2h',
+ ignoreAboveSize: '25 GB'
+ },
+ downloads: [], // Will store download status records
+ rawLogs: [] // Store raw logs for backup display
+ },
+
+ init: function() {
+ console.log('[Swaparr Module] Initializing...');
+ this.setupLogProcessor();
+
+ // Add a listener for when the log tab changes to Swaparr
+ const swaparrTab = document.querySelector('.log-tab[data-app="swaparr"]');
+ if (swaparrTab) {
+ swaparrTab.addEventListener('click', () => {
+ console.log('[Swaparr Module] Swaparr tab clicked');
+ // Small delay to ensure everything is ready
+ setTimeout(() => {
+ this.ensureContentRendered();
+ }, 200);
+ });
+ }
+ },
+
+ setupLogProcessor: function() {
+ // Setup a listener for custom event from huntarrUI's log processing
+ document.addEventListener('swaparrLogReceived', (event) => {
+ console.log('[Swaparr Module] Received log event:', event.detail.logData.substring(0, 100) + '...');
+ this.processLogLine(event.detail.logData);
+ });
+ },
+
+ processLogLine: function(logLine) {
+ // Always store raw logs for backup display
+ this.logData.rawLogs.push(logLine);
+
+ // Limit raw logs storage to prevent memory issues
+ if (this.logData.rawLogs.length > 500) {
+ this.logData.rawLogs.shift();
+ }
+
+ // Process log lines specific to Swaparr
+ if (!logLine) return;
+
+ // Check if this looks like a Swaparr config line and extract information
+ if (logLine.includes('Platform:') && logLine.includes('Max strikes:')) {
+ this.extractConfigInfo(logLine);
+ this.renderConfigPanel();
+ return;
+ }
+
+ // Look for strike-related logs from system
+ if (logLine.includes('Added strike') ||
+ logLine.includes('Max strikes reached') ||
+ logLine.includes('removing download') ||
+ logLine.includes('Would have removed')) {
+
+ this.processStrikeLog(logLine);
+ return;
+ }
+
+ // Check if this is a table header/separator line
+ if (logLine.includes('strikes') && logLine.includes('status') && logLine.includes('name') && logLine.includes('size') && logLine.includes('eta')) {
+ // This is the header line, we can ignore it or use it to confirm table format
+ return;
+ }
+
+ // Try to match download info line
+ // Format: [strikes/max] status name size eta
+ // Example: 2/3 Striked MyDownload.mkv 1.5 GB 2h 15m
+ const downloadLinePattern = /(\d+\/\d+)\s+(\w+)\s+(.+?)\s+(\d+(?:\.\d+)?)\s*(\w+)\s+([\ddhms\s]+|Infinite)/;
+ const match = logLine.match(downloadLinePattern);
+
+ if (match) {
+ // Extract download information
+ const downloadInfo = {
+ strikes: match[1],
+ status: match[2],
+ name: match[3],
+ size: match[4] + ' ' + match[5],
+ eta: match[6]
+ };
+
+ // Update or add to our list of downloads
+ this.updateDownloadsList(downloadInfo);
+ this.renderTableView();
+ }
+
+ // If we're viewing the Swaparr tab, always ensure content is rendered
+ if (app.currentLogApp === 'swaparr') {
+ this.ensureContentRendered();
+ }
+ },
+
+ // Process strike-related logs from system logs
+ processStrikeLog: function(logLine) {
+ // Try to extract download name and strike info
+ let downloadName = '';
+ let strikes = '1/3'; // Default value
+ let status = 'Striked';
+
+ // Extract download name
+ if (logLine.includes('Added strike')) {
+ const match = logLine.match(/Added strike \((\d+)\/(\d+)\) to (.+?) - Reason:/);
+ if (match) {
+ strikes = `${match[1]}/${match[2]}`;
+ downloadName = match[3];
+ status = 'Striked';
+ }
+ } else if (logLine.includes('Max strikes reached')) {
+ const match = logLine.match(/Max strikes reached for (.+?), removing download/);
+ if (match) {
+ downloadName = match[1];
+ status = 'Removed';
+ }
+ } else if (logLine.includes('Would have removed')) {
+ const match = logLine.match(/Would have removed (.+?) after (\d+) strikes/);
+ if (match) {
+ downloadName = match[1];
+ status = 'Pending Removal';
+ strikes = `${match[2]}/3`;
+ }
+ }
+
+ if (downloadName) {
+ // Create a download info object with partial information
+ const downloadInfo = {
+ strikes: strikes,
+ status: status,
+ name: downloadName,
+ size: 'Unknown',
+ eta: 'Unknown'
+ };
+
+ // Update downloads list
+ this.updateDownloadsList(downloadInfo);
+ this.renderTableView();
+ }
+ },
+
+ extractConfigInfo: function(logLine) {
+ // Extract the config data from the log line
+ const platformMatch = logLine.match(/Platform:\s+(\w+)/);
+ const maxStrikesMatch = logLine.match(/Max strikes:\s+(\d+)/);
+ const scanIntervalMatch = logLine.match(/Scan interval:\s+(\d+\w+)/);
+ const maxDownloadTimeMatch = logLine.match(/Max download time:\s+(\d+\w+)/);
+ const ignoreSizeMatch = logLine.match(/Ignore above size:\s+(\d+\s*\w+)/);
+
+ if (platformMatch) this.logData.config.platform = platformMatch[1];
+ if (maxStrikesMatch) this.logData.config.maxStrikes = maxStrikesMatch[1];
+ if (scanIntervalMatch) this.logData.config.scanInterval = scanIntervalMatch[1];
+ if (maxDownloadTimeMatch) this.logData.config.maxDownloadTime = maxDownloadTimeMatch[1];
+ if (ignoreSizeMatch) this.logData.config.ignoreAboveSize = ignoreSizeMatch[1];
+ },
+
+ updateDownloadsList: function(downloadInfo) {
+ // Find if this download already exists in our list
+ const existingIndex = this.logData.downloads.findIndex(item =>
+ item.name.trim() === downloadInfo.name.trim()
+ );
+
+ if (existingIndex >= 0) {
+ // Update existing entry
+ this.logData.downloads[existingIndex] = downloadInfo;
+ } else {
+ // Add new entry
+ this.logData.downloads.push(downloadInfo);
+ }
+ },
+
+ renderConfigPanel: function() {
+ // Find the logs container
+ const logsContainer = document.getElementById('logsContainer');
+ if (!logsContainer) return;
+
+ // If the user has selected swaparr logs, show the config panel at the top
+ if (app.currentLogApp === 'swaparr') {
+ // Check if config panel already exists
+ let configPanel = document.getElementById('swaparr-config-panel');
+ if (!configPanel) {
+ // Create the panel
+ configPanel = document.createElement('div');
+ configPanel.id = 'swaparr-config-panel';
+ configPanel.classList.add('swaparr-panel');
+ logsContainer.appendChild(configPanel);
+ }
+
+ // Update the panel content
+ configPanel.innerHTML = `
+
+
Swaparr${this.logData.config.platform ? ' — ' + this.logData.config.platform : ''}
+
+ Max strikes: ${this.logData.config.maxStrikes}
+ Scan interval: ${this.logData.config.scanInterval}
+ Max download time: ${this.logData.config.maxDownloadTime}
+ Ignore above size: ${this.logData.config.ignoreAboveSize}
+
+
+ `;
+
+ this.hasRenderedAnyContent = true;
+ }
+ },
+
+ renderTableView: function() {
+ // Find the logs container
+ const logsContainer = document.getElementById('logsContainer');
+ if (!logsContainer || app.currentLogApp !== 'swaparr') return;
+
+ // Check if table already exists
+ let tableView = document.getElementById('swaparr-table-view');
+ if (!tableView) {
+ // Create the table
+ tableView = document.createElement('div');
+ tableView.id = 'swaparr-table-view';
+ tableView.classList.add('swaparr-table');
+ logsContainer.appendChild(tableView);
+ }
+
+ // Only render table if we have downloads to show
+ if (this.logData.downloads.length > 0) {
+ // Generate table HTML
+ let tableHTML = `
+
+
+
+ Strikes
+ Status
+ Name
+ Size
+ ETA
+
+
+
+ `;
+
+ // Add each download as a row
+ this.logData.downloads.forEach(download => {
+ // Apply status-specific CSS class
+ let statusClass = download.status.toLowerCase();
+
+ // Normalize some status values
+ if (statusClass === 'pending removal') statusClass = 'pending';
+ if (statusClass === 'removed') statusClass = 'removed';
+ if (statusClass === 'striked') statusClass = 'striked';
+ if (statusClass === 'normal') statusClass = 'normal';
+ if (statusClass === 'ignored') statusClass = 'ignored';
+
+ tableHTML += `
+
+ ${download.strikes}
+ ${download.status}
+ ${download.name}
+ ${download.size}
+ ${download.eta}
+
+ `;
+ });
+
+ tableHTML += `
+
+
+ `;
+
+ tableView.innerHTML = tableHTML;
+ this.hasRenderedAnyContent = true;
+ }
+ },
+
+ // Render raw logs if we don't have structured content
+ renderRawLogs: function() {
+ // Only show raw logs if we have no other content
+ if (this.hasRenderedAnyContent) return;
+
+ const logsContainer = document.getElementById('logsContainer');
+ if (!logsContainer || app.currentLogApp !== 'swaparr') return;
+
+ // Start with a message
+ const noDataMessage = document.createElement('div');
+ noDataMessage.classList.add('swaparr-panel');
+ noDataMessage.innerHTML = `
+
+
Swaparr Logs
+
Waiting for structured Swaparr data. Showing raw logs below:
+
+ `;
+ logsContainer.appendChild(noDataMessage);
+
+ // Add raw logs
+ for (const logLine of this.logData.rawLogs) {
+ const logEntry = document.createElement('div');
+ logEntry.className = 'log-entry';
+ logEntry.innerHTML = `${logLine} `;
+
+ // Basic level detection
+ if (logLine.includes('ERROR')) logEntry.classList.add('log-error');
+ else if (logLine.includes('WARN') || logLine.includes('WARNING')) logEntry.classList.add('log-warning');
+ else if (logLine.includes('DEBUG')) logEntry.classList.add('log-debug');
+ else logEntry.classList.add('log-info');
+
+ logsContainer.appendChild(logEntry);
+ }
+
+ this.hasRenderedAnyContent = true;
+ },
+
+ // Make sure we display something in the Swaparr tab
+ ensureContentRendered: function() {
+ console.log('[Swaparr Module] Ensuring content is rendered, has content:', this.hasRenderedAnyContent);
+
+ // Reset rendered flag
+ this.hasRenderedAnyContent = false;
+
+ // Check if we're viewing Swaparr tab
+ if (app.currentLogApp !== 'swaparr') return;
+
+ // First try to render structured content
+ this.renderConfigPanel();
+ this.renderTableView();
+
+ // If no structured content, show raw logs
+ if (!this.hasRenderedAnyContent) {
+ this.renderRawLogs();
+ }
+ },
+
+ // Clear the data when switching log views
+ clearData: function() {
+ this.logData.downloads = [];
+ // Keep raw logs for now
+ this.hasRenderedAnyContent = false;
+ }
+ };
+
+ // Initialize the module
+ document.addEventListener('DOMContentLoaded', () => {
+ swaparrModule.init();
+
+ if (app) {
+ app.swaparrModule = swaparrModule;
+
+ // Setup a handler for when log tabs are changed
+ document.querySelectorAll('.log-tab').forEach(tab => {
+ tab.addEventListener('click', (e) => {
+ // If switching to swaparr tab, make sure we render the view
+ if (e.target.getAttribute('data-app') === 'swaparr') {
+ console.log('[Swaparr Module] Swaparr tab clicked via delegation');
+ // Small delay to allow logs to load
+ setTimeout(() => {
+ swaparrModule.ensureContentRendered();
+ }, 200);
+ }
+ // If switching away from swaparr tab, clear the data
+ else if (app.currentLogApp === 'swaparr') {
+ swaparrModule.clearData();
+ }
+ });
+ });
+ }
+ });
+
+})(window.huntarrUI); // Pass the global UI object
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/static/js/apps/whisparr.js b/Huntarr.io-6.3.6/frontend/static/js/apps/whisparr.js
new file mode 100644
index 0000000..28316f8
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/apps/whisparr.js
@@ -0,0 +1,195 @@
+/**
+ * Whisparr.js - Handles Whisparr settings and interactions in the Huntarr UI
+ */
+
+document.addEventListener("DOMContentLoaded", function() {
+ // Don't call setupWhisparrForm here, new-main.js will call it when the tab is active
+ // setupWhisparrForm();
+ // setupWhisparrLogs(); // Assuming logs are handled by the main logs section
+ // setupClearProcessedButtons('whisparr'); // Assuming this is handled elsewhere or not needed immediately
+});
+
+/**
+ * Setup Whisparr settings form and connection test
+ * This function is now called by new-main.js when the Whisparr settings tab is shown.
+ */
+function setupWhisparrForm() {
+ // Use querySelector within the active panel to be safe, though IDs should be unique
+ const panel = document.getElementById('whisparrSettings');
+ if (!panel) {
+ console.warn("[whisparr.js] Whisparr settings panel not found.");
+ return;
+ }
+
+ const testWhisparrButton = panel.querySelector('#test-whisparr-button');
+ const whisparrStatusIndicator = panel.querySelector('#whisparr-connection-status');
+ const whisparrVersionDisplay = panel.querySelector('#whisparr-version');
+ const apiUrlInput = panel.querySelector('#whisparr_api_url');
+ const apiKeyInput = panel.querySelector('#whisparr_api_key');
+
+ // Check if elements exist and if listener already attached to prevent duplicates
+ if (!testWhisparrButton || testWhisparrButton.dataset.listenerAttached === 'true') {
+ console.log("[whisparr.js] Test button not found or listener already attached.");
+ return;
+ }
+ console.log("[whisparr.js] Setting up Whisparr form listeners.");
+ testWhisparrButton.dataset.listenerAttached = 'true'; // Mark as attached
+
+ // Test connection button
+ testWhisparrButton.addEventListener('click', function() {
+ const apiUrl = apiUrlInput ? apiUrlInput.value.trim() : '';
+ const apiKey = apiKeyInput ? apiKeyInput.value.trim() : '';
+
+ if (!apiUrl || !apiKey) {
+ // Use the main UI notification system if available
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.showNotification) {
+ huntarrUI.showNotification('Please enter both API URL and API Key for Whisparr', 'error');
+ } else {
+ alert('Please enter both API URL and API Key for Whisparr');
+ }
+ return;
+ }
+
+ testWhisparrButton.disabled = true;
+ if (whisparrStatusIndicator) {
+ whisparrStatusIndicator.className = 'connection-status pending';
+ whisparrStatusIndicator.textContent = 'Testing...';
+ }
+
+ // Direct connection test - let the backend handle version checking
+ HuntarrUtils.fetchWithTimeout('/api/whisparr/test-connection', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: apiUrl,
+ api_key: apiKey
+ })
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (whisparrStatusIndicator) {
+ if (data.success) {
+ whisparrStatusIndicator.className = 'connection-status success';
+ whisparrStatusIndicator.textContent = 'Connected';
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.showNotification) {
+ huntarrUI.showNotification('Successfully connected to Whisparr V2', 'success');
+ }
+ getWhisparrVersion(); // Fetch version after successful connection
+ } else {
+ whisparrStatusIndicator.className = 'connection-status failure';
+ whisparrStatusIndicator.textContent = 'Failed';
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.showNotification) {
+ huntarrUI.showNotification('Connection to Whisparr failed: ' + data.message, 'error');
+ }
+ }
+ }
+ })
+ .catch(error => {
+ if (whisparrStatusIndicator) {
+ whisparrStatusIndicator.className = 'connection-status failure';
+ whisparrStatusIndicator.textContent = 'Error';
+ }
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.showNotification) {
+ huntarrUI.showNotification('Error testing Whisparr connection: ' + error, 'error');
+ }
+ })
+ .finally(() => {
+ if (testWhisparrButton.disabled) {
+ testWhisparrButton.disabled = false;
+ }
+ });
+ });
+
+ // Get Whisparr version if connection details are present and version display exists
+ // Only perform auto-check if we haven't already fetched the version
+ if (apiUrlInput && apiKeyInput && whisparrVersionDisplay &&
+ apiUrlInput.value && apiKeyInput.value &&
+ (!whisparrVersionDisplay.textContent || whisparrVersionDisplay.textContent === 'Unknown')) {
+
+ // Set a flag to prevent automatic version checks from triggering unsaved changes
+ const wasSettingsChanged = typeof huntarrUI !== 'undefined' ? huntarrUI.settingsChanged : false;
+
+ getWhisparrVersion();
+
+ // Restore the original settingsChanged state after the version check
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.settingsChanged !== wasSettingsChanged) {
+ setTimeout(() => {
+ huntarrUI.settingsChanged = wasSettingsChanged;
+ console.log("[whisparr.js] Restored settingsChanged state after version check");
+
+ // If there are no actual changes, update the save button state
+ if (!wasSettingsChanged && typeof huntarrUI.updateSaveResetButtonState === 'function') {
+ huntarrUI.updateSaveResetButtonState(false);
+ }
+ }, 100);
+ }
+ }
+
+ // Function to get Whisparr version
+ function getWhisparrVersion() {
+ if (!whisparrVersionDisplay) return; // Check if element exists
+
+ const wasSettingsChanged = typeof huntarrUI !== 'undefined' ? huntarrUI.settingsChanged : false;
+
+ HuntarrUtils.fetchWithTimeout('/api/whisparr/get-versions')
+ .then(response => {
+ if (!response.ok) {
+ throw new Error('Failed to fetch Whisparr version');
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data.success && data.version) {
+ // Temporarily store the textContent so we can detect if it actually changes
+ const oldContent = whisparrVersionDisplay.textContent;
+ const newContent = `v${data.version}`;
+
+ if (oldContent !== newContent) {
+ whisparrVersionDisplay.textContent = newContent; // Prepend 'v'
+
+ // Restore settings changed state to prevent triggering the dialog
+ if (typeof huntarrUI !== 'undefined') {
+ setTimeout(() => {
+ huntarrUI.settingsChanged = wasSettingsChanged;
+
+ // If there are no actual changes, update the save button state
+ if (!wasSettingsChanged && typeof huntarrUI.updateSaveResetButtonState === 'function') {
+ huntarrUI.updateSaveResetButtonState(false);
+ }
+ }, 50);
+ }
+ }
+ } else {
+ whisparrVersionDisplay.textContent = 'Unknown';
+ }
+ })
+ .catch(error => {
+ whisparrVersionDisplay.textContent = 'Error';
+ console.error('Error fetching Whisparr version:', error);
+ })
+ .finally(() => {
+ // Final safety check to restore settings state
+ if (typeof huntarrUI !== 'undefined' && huntarrUI.settingsChanged !== wasSettingsChanged) {
+ setTimeout(() => {
+ huntarrUI.settingsChanged = wasSettingsChanged;
+ // If there are no actual changes, update the save button state
+ if (!wasSettingsChanged && typeof huntarrUI.updateSaveResetButtonState === 'function') {
+ huntarrUI.updateSaveResetButtonState(false);
+ }
+ }, 100);
+ }
+ });
+ }
+}
+
+// Helper function for escaping HTML (keep if needed elsewhere, e.g., if logs are added here later)
+function escapeHtml(unsafe) {
+ return unsafe
+ .replace(/&/g, "&")
+ .replace(//g, ">")
+ .replace(/"/g, """)
+ .replace(/'/g, "'");
+}
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/static/js/direct-reset.js b/Huntarr.io-6.3.6/frontend/static/js/direct-reset.js
new file mode 100644
index 0000000..c87fda0
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/direct-reset.js
@@ -0,0 +1,181 @@
+// Direct reset button implementation - completely separate from the regular UI
+// This will add a new red button directly to the stateful management section
+
+// Set a flag to prevent showing expiration update notification on reset
+window.justCompletedStatefulReset = false;
+// Keep track of the current stateful hours value to detect real changes
+window.lastStatefulHoursValue = null;
+
+// Run this code as soon as this script is loaded
+(function() {
+ function insertDirectResetButton() {
+ // Look for the stateful header row
+ const headerRow = document.querySelector('.stateful-header-row');
+
+ if (!headerRow) {
+ // If we can't find it, try again soon
+ console.log('Stateful header not found, will try again in 1 second');
+ setTimeout(insertDirectResetButton, 1000);
+ return;
+ }
+
+ // Check if our button already exists to avoid duplicates
+ if (document.getElementById('emergency_reset_btn')) {
+ return;
+ }
+
+ console.log('Found stateful header, adding emergency reset button');
+
+ // Create the new button
+ const resetButton = document.createElement('button');
+ resetButton.id = 'emergency_reset_btn';
+ resetButton.innerText = '🔥 EMERGENCY RESET 🔥';
+ resetButton.style.background = 'linear-gradient(to right, #ff0000, #8b0000)';
+ resetButton.style.color = 'white';
+ resetButton.style.fontWeight = 'bold';
+ resetButton.style.border = 'none';
+ resetButton.style.borderRadius = '4px';
+ resetButton.style.padding = '8px 16px';
+ resetButton.style.marginLeft = '15px';
+ resetButton.style.cursor = 'pointer';
+ resetButton.style.boxShadow = '0 2px 5px rgba(0,0,0,0.3)';
+
+ // Add click handler for the new button
+ resetButton.onclick = function() {
+ if (confirm('⚠️ EMERGENCY RESET: Are you absolutely sure you want to reset all processed media IDs? This cannot be undone!')) {
+
+ // Show loading state
+ this.disabled = true;
+ this.innerText = '⏳ Resetting...';
+ this.style.background = '#666';
+
+ // Mark that we're performing a reset to prevent expiration notification
+ window.justCompletedStatefulReset = true;
+
+ // Make direct API call
+ fetch('/api/stateful/reset', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error('Server returned status ' + response.status);
+ }
+ return response.json();
+ })
+ .then(data => {
+ alert('✅ Success! Stateful management has been reset.');
+
+ // Reload the page with a query parameter to indicate reset was done
+ window.location.href = window.location.pathname + '?reset=done' + window.location.hash;
+ })
+ .catch(error => {
+ console.error('Reset failed:', error);
+ alert('❌ Reset failed: ' + error.message);
+
+ // Restore button state
+ this.disabled = false;
+ this.innerText = '🔥 EMERGENCY RESET 🔥';
+ this.style.background = 'linear-gradient(to right, #ff0000, #8b0000)';
+
+ // Clear the reset flag since operation failed
+ window.justCompletedStatefulReset = false;
+ });
+ }
+
+ // Prevent event propagation
+ return false;
+ };
+
+ // Add the button to the page
+ headerRow.appendChild(resetButton);
+ console.log('Emergency reset button added successfully');
+
+ // Track the initial value of the stateful hours input
+ const hoursInput = document.getElementById('stateful_management_hours');
+ if (hoursInput) {
+ window.lastStatefulHoursValue = parseInt(hoursInput.value);
+
+ // Add a change listener to detect when the user actually changes the value
+ hoursInput.addEventListener('change', function() {
+ window.lastStatefulHoursValue = parseInt(this.value);
+ });
+ }
+ }
+
+ // Try to add the button immediately
+ insertDirectResetButton();
+
+ // Also try when the DOM is loaded
+ document.addEventListener('DOMContentLoaded', insertDirectResetButton);
+
+ // And again when everything is fully loaded
+ window.addEventListener('load', insertDirectResetButton);
+
+ // Also check periodically to make sure the button exists
+ setInterval(function() {
+ const headerRow = document.querySelector('.stateful-header-row');
+ if (headerRow && !document.getElementById('emergency_reset_btn')) {
+ console.log('Emergency reset button missing, re-adding it');
+ insertDirectResetButton();
+ }
+ }, 1000); // Check every second
+
+ // Also listen for potential UI updates that might remove our button
+ // Especially listen for when settings are saved
+ const saveButton = document.getElementById('saveSettingsButton');
+ if (saveButton) {
+ saveButton.addEventListener('click', function() {
+ // After settings are saved, the UI might refresh
+ // Wait a short moment then check if our button is still there
+ setTimeout(function() {
+ const headerRow = document.querySelector('.stateful-header-row');
+ if (headerRow && !document.getElementById('emergency_reset_btn')) {
+ console.log('Emergency reset button missing after save, re-adding it');
+ insertDirectResetButton();
+ }
+ }, 500); // Check half a second after save
+ });
+ }
+
+ // Add a global interceptor for the notification system
+ const originalShowNotification = window.huntarrUI && window.huntarrUI.showNotification;
+ if (originalShowNotification) {
+ window.huntarrUI.showNotification = function(message, type) {
+ // If we just completed a reset and this is an expiration update notification, don't show it
+ if (window.justCompletedStatefulReset && message.includes('Updated expiration to')) {
+ console.log('Suppressing expiration update notification after reset');
+ window.justCompletedStatefulReset = false; // Reset the flag
+ return;
+ }
+
+ // Also suppress expiration notifications when saving general settings if hours didn't change
+ if (message.includes('Updated expiration to')) {
+ const hoursInput = document.getElementById('stateful_management_hours');
+ if (hoursInput) {
+ const currentValue = parseInt(hoursInput.value);
+ // Only show notification if the value actually changed
+ if (window.lastStatefulHoursValue === currentValue) {
+ console.log('Suppressing expiration notification because hours value did not change');
+ return;
+ }
+ // Update our tracked value
+ window.lastStatefulHoursValue = currentValue;
+ }
+ }
+
+ // Saving settings already shows a "Settings saved successfully" notification,
+ // so we don't need the expiration one too - suppress it if we just saved settings
+ if (message.includes('Updated expiration to') && document.getElementById('saveSettingsButton')?.disabled) {
+ console.log('Suppressing expiration notification after saving general settings');
+ return;
+ }
+
+ // Otherwise, proceed with the original notification
+ return originalShowNotification.call(this, message, type);
+ };
+ console.log('Notification system intercepted to handle notifications properly');
+ }
+})();
diff --git a/Huntarr.io-6.3.6/frontend/static/js/github-sponsors.js b/Huntarr.io-6.3.6/frontend/static/js/github-sponsors.js
new file mode 100644
index 0000000..6c2d672
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/github-sponsors.js
@@ -0,0 +1,269 @@
+/**
+ * GitHub Sponsors Integration
+ * Fetches and displays sponsors from GitHub for PlexGuide
+ */
+
+const GithubSponsors = {
+ // Constants
+ sponsorsUsername: 'plexguide',
+ sponsorsApiUrl: 'https://api.github.com/sponsors/',
+ cacheDuration: 3600000, // 1 hour in milliseconds
+
+ // Initialize the sponsors display
+ init: function() {
+ console.log('Initializing GitHub Sponsors display');
+
+ // Immediately call loadSponsors with mock data for a better user experience
+ // This prevents the loading spinner from staying visible
+ const mockSponsors = this.getImmediateMockSponsors();
+ this.displaySponsors(mockSponsors);
+
+ // Then load the actual data (which would be fetched from the API in a real implementation)
+ setTimeout(() => {
+ this.loadSponsors();
+ }, 100);
+
+ // Add event listener for manual refresh
+ document.addEventListener('click', function(e) {
+ if (e.target.closest('.action-button.refresh-sponsors')) {
+ GithubSponsors.loadSponsors(true);
+ }
+ });
+ },
+
+ // Get immediate mock sponsors without any delay
+ getImmediateMockSponsors: function() {
+ return [
+ {
+ name: 'MediaServer Pro',
+ url: 'https://github.com/mediaserverpro',
+ avatarUrl: 'https://ui-avatars.com/api/?name=MS&background=4A90E2&color=fff&size=200',
+ tier: 'Gold Sponsor'
+ },
+ {
+ name: 'StreamVault',
+ url: 'https://github.com/streamvault',
+ avatarUrl: 'https://ui-avatars.com/api/?name=SV&background=6C5CE7&color=fff&size=200',
+ tier: 'Gold Sponsor'
+ },
+ {
+ name: 'MediaStack',
+ url: 'https://github.com/mediastack',
+ avatarUrl: 'https://ui-avatars.com/api/?name=MS&background=00B894&color=fff&size=200',
+ tier: 'Silver Sponsor'
+ },
+ {
+ name: 'NASGuru',
+ url: 'https://github.com/nasguru',
+ avatarUrl: 'https://ui-avatars.com/api/?name=NG&background=FD79A8&color=fff&size=200',
+ tier: 'Silver Sponsor'
+ }
+ ];
+ },
+
+ // Load sponsors data
+ loadSponsors: function(skipCache = false) {
+ // Elements
+ const loadingEl = document.getElementById('sponsors-loading');
+ const sponsorsListEl = document.getElementById('sponsors-list');
+ const errorEl = document.getElementById('sponsors-error');
+
+ if (!loadingEl || !sponsorsListEl || !errorEl) {
+ console.error('Sponsors DOM elements not found');
+ return;
+ }
+
+ // First check for cached data
+ const cachedData = this.getCachedSponsors();
+
+ if (!skipCache && cachedData && cachedData.sponsors) {
+ console.log('Using cached sponsors data');
+ this.displaySponsors(cachedData.sponsors);
+ return;
+ }
+
+ // Show loading state
+ loadingEl.style.display = 'block';
+ sponsorsListEl.style.display = 'none';
+ errorEl.style.display = 'none';
+
+ // Since GitHub's API requires authentication for the sponsors endpoint,
+ // we'll use a mock implementation for demonstration purposes.
+ // In a production environment, this would be replaced with a proper server-side
+ // implementation that securely accesses the GitHub API with appropriate tokens.
+ this.getMockSponsors()
+ .then(sponsors => {
+ // Cache the sponsors data
+ this.cacheSponsors(sponsors);
+
+ // Display the sponsors
+ this.displaySponsors(sponsors);
+ })
+ .catch(error => {
+ console.error('Error fetching sponsors:', error);
+
+ // Show error state
+ loadingEl.style.display = 'none';
+ errorEl.style.display = 'block';
+ errorEl.querySelector('span').textContent = 'Could not load sponsors: ' + error.message;
+ });
+ },
+
+ // Get cached sponsors data
+ getCachedSponsors: function() {
+ const cachedData = localStorage.getItem('huntarr-github-sponsors');
+
+ if (!cachedData) {
+ return null;
+ }
+
+ try {
+ const data = JSON.parse(cachedData);
+
+ // Check if cache is expired
+ if (Date.now() - data.timestamp > this.cacheDuration) {
+ console.log('Sponsors cache expired');
+ return null;
+ }
+
+ return data;
+ } catch (e) {
+ console.error('Error parsing cached sponsors data:', e);
+ return null;
+ }
+ },
+
+ // Cache sponsors data
+ cacheSponsors: function(sponsors) {
+ const data = {
+ sponsors: sponsors,
+ timestamp: Date.now()
+ };
+
+ localStorage.setItem('huntarr-github-sponsors', JSON.stringify(data));
+ console.log('Cached sponsors data');
+ },
+
+ // Display sponsors in the UI
+ displaySponsors: function(sponsors) {
+ const sponsorsListEl = document.getElementById('sponsors-list');
+ const loadingEl = document.getElementById('sponsors-loading');
+
+ if (!sponsorsListEl) {
+ console.error('Sponsors list element not found');
+ return;
+ }
+
+ // Clear existing content
+ sponsorsListEl.innerHTML = '';
+
+ // Hide loading spinner
+ if (loadingEl) {
+ loadingEl.style.display = 'none';
+ }
+
+ // Show sponsors list
+ sponsorsListEl.style.display = 'flex';
+
+ if (!sponsors || sponsors.length === 0) {
+ sponsorsListEl.innerHTML = '';
+ return;
+ }
+
+ // Shuffle and limit to 10 random sponsors
+ const shuffledSponsors = this.shuffleArray([...sponsors]);
+ const limitedSponsors = shuffledSponsors.slice(0, 10);
+
+ // Create sponsor elements
+ limitedSponsors.forEach(sponsor => {
+ const sponsorEl = document.createElement('a');
+ sponsorEl.href = sponsor.url;
+ sponsorEl.target = '_blank';
+ sponsorEl.className = 'sponsor-item';
+ sponsorEl.title = `${sponsor.name} - ${sponsor.tier}`;
+
+ sponsorEl.innerHTML = `
+
+
+
+ `;
+
+ sponsorsListEl.appendChild(sponsorEl);
+ });
+ },
+
+ // Utility function to shuffle an array (Fisher-Yates algorithm)
+ shuffleArray: function(array) {
+ for (let i = array.length - 1; i > 0; i--) {
+ const j = Math.floor(Math.random() * (i + 1));
+ [array[i], array[j]] = [array[j], array[i]];
+ }
+ return array;
+ },
+
+ // Mock implementation to get sponsors
+ getMockSponsors: function() {
+ return new Promise((resolve) => {
+ // Simulate API delay
+ setTimeout(() => {
+ const mockSponsors = [
+ {
+ name: 'MediaServer Pro',
+ url: 'https://github.com/mediaserverpro',
+ avatarUrl: 'https://ui-avatars.com/api/?name=MS&background=4A90E2&color=fff&size=200',
+ tier: 'Gold Sponsor'
+ },
+ {
+ name: 'StreamVault',
+ url: 'https://github.com/streamvault',
+ avatarUrl: 'https://ui-avatars.com/api/?name=SV&background=6C5CE7&color=fff&size=200',
+ tier: 'Gold Sponsor'
+ },
+ {
+ name: 'MediaStack',
+ url: 'https://github.com/mediastack',
+ avatarUrl: 'https://ui-avatars.com/api/?name=MS&background=00B894&color=fff&size=200',
+ tier: 'Silver Sponsor'
+ },
+ {
+ name: 'NASGuru',
+ url: 'https://github.com/nasguru',
+ avatarUrl: 'https://ui-avatars.com/api/?name=NG&background=FD79A8&color=fff&size=200',
+ tier: 'Silver Sponsor'
+ },
+ {
+ name: 'ServerSquad',
+ url: 'https://github.com/serversquad',
+ avatarUrl: 'https://ui-avatars.com/api/?name=SS&background=F1C40F&color=fff&size=200',
+ tier: 'Bronze Sponsor'
+ },
+ {
+ name: 'CloudCache',
+ url: 'https://github.com/cloudcache',
+ avatarUrl: 'https://ui-avatars.com/api/?name=CC&background=E74C3C&color=fff&size=200',
+ tier: 'Bronze Sponsor'
+ },
+ {
+ name: 'MediaMinder',
+ url: 'https://github.com/mediaminder',
+ avatarUrl: 'https://ui-avatars.com/api/?name=MM&background=9B59B6&color=fff&size=200',
+ tier: 'Bronze Sponsor'
+ },
+ {
+ name: 'StreamSage',
+ url: 'https://github.com/streamsage',
+ avatarUrl: 'https://ui-avatars.com/api/?name=SS&background=2ECC71&color=fff&size=200',
+ tier: 'Bronze Sponsor'
+ }
+ ];
+
+ resolve(mockSponsors);
+ }, 800);
+ });
+ }
+};
+
+// Initialize when the document is ready
+document.addEventListener('DOMContentLoaded', function() {
+ GithubSponsors.init();
+});
diff --git a/Huntarr.io-6.3.6/frontend/static/js/history.js b/Huntarr.io-6.3.6/frontend/static/js/history.js
new file mode 100644
index 0000000..3049a97
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/history.js
@@ -0,0 +1,345 @@
+/**
+ * Huntarr - History Module
+ * Handles displaying and managing history entries for all media apps
+ */
+
+const historyModule = {
+ // State
+ currentApp: 'all',
+ currentPage: 1,
+ totalPages: 1,
+ pageSize: 20,
+ searchQuery: '',
+ isLoading: false,
+
+ // DOM elements
+ elements: {},
+
+ // Initialize the history module
+ init: function() {
+ this.cacheElements();
+ this.setupEventListeners();
+
+ // Initial load if history is active section
+ if (huntarrUI && huntarrUI.currentSection === 'history') {
+ this.loadHistory();
+ }
+ },
+
+ // Cache DOM elements
+ cacheElements: function() {
+ this.elements = {
+ // History dropdown
+ historyOptions: document.querySelectorAll('.history-option'),
+ currentHistoryApp: document.getElementById('current-history-app'),
+ historyDropdownBtn: document.querySelector('.history-dropdown-btn'),
+ historyDropdownContent: document.querySelector('.history-dropdown-content'),
+
+ // Table and containers
+ historyTable: document.querySelector('.history-table'),
+ historyTableBody: document.getElementById('historyTableBody'),
+ historyContainer: document.querySelector('.history-container'),
+
+ // Controls
+ historySearchInput: document.getElementById('historySearchInput'),
+ historySearchButton: document.getElementById('historySearchButton'),
+ historyPageSize: document.getElementById('historyPageSize'),
+ clearHistoryButton: document.getElementById('clearHistoryButton'),
+
+ // Pagination
+ historyPrevPage: document.getElementById('historyPrevPage'),
+ historyNextPage: document.getElementById('historyNextPage'),
+ historyCurrentPage: document.getElementById('historyCurrentPage'),
+ historyTotalPages: document.getElementById('historyTotalPages'),
+
+ // State displays
+ historyEmptyState: document.getElementById('historyEmptyState'),
+ historyLoading: document.getElementById('historyLoading')
+ };
+ },
+
+ // Set up event listeners
+ setupEventListeners: function() {
+ // App selection (native select)
+ const historyAppSelect = document.getElementById('historyAppSelect');
+ if (historyAppSelect) {
+ historyAppSelect.addEventListener('change', (e) => {
+ this.handleHistoryAppChange(e.target.value);
+ });
+ }
+ // App selection (legacy click)
+ this.elements.historyOptions.forEach(option => {
+ option.addEventListener('click', e => this.handleHistoryAppChange(e));
+ });
+
+ // Search
+ this.elements.historySearchButton.addEventListener('click', () => this.handleSearch());
+ this.elements.historySearchInput.addEventListener('keypress', e => {
+ if (e.key === 'Enter') this.handleSearch();
+ });
+
+ // Page size
+ this.elements.historyPageSize.addEventListener('change', () => this.handlePageSizeChange());
+
+ // Clear history
+ this.elements.clearHistoryButton.addEventListener('click', () => this.handleClearHistory());
+
+ // Pagination
+ this.elements.historyPrevPage.addEventListener('click', () => this.handlePagination('prev'));
+ this.elements.historyNextPage.addEventListener('click', () => this.handlePagination('next'));
+ },
+
+ // Load history data when section becomes active
+ loadHistory: function() {
+ if (this.elements.historyContainer) {
+ this.fetchHistoryData();
+ }
+ },
+
+ // Handle app selection changes
+ handleHistoryAppChange: function(eOrValue) {
+ let selectedApp;
+ if (typeof eOrValue === 'string') {
+ selectedApp = eOrValue;
+ } else if (eOrValue && eOrValue.target) {
+ selectedApp = eOrValue.target.getAttribute('data-app');
+ eOrValue.preventDefault();
+ }
+ if (!selectedApp || selectedApp === this.currentApp) return;
+ // Update UI (for legacy click)
+ if (this.elements.historyOptions) {
+ this.elements.historyOptions.forEach(option => {
+ option.classList.remove('active');
+ if (option.getAttribute('data-app') === selectedApp) {
+ option.classList.add('active');
+ }
+ });
+ }
+ // Update dropdown text (if present)
+ if (this.elements.currentHistoryApp) {
+ const displayName = selectedApp.charAt(0).toUpperCase() + selectedApp.slice(1);
+ this.elements.currentHistoryApp.textContent = displayName;
+ }
+ // Reset pagination
+ this.currentPage = 1;
+ // Update state and fetch data
+ this.currentApp = selectedApp;
+ this.fetchHistoryData();
+ },
+
+ // Handle search
+ handleSearch: function() {
+ const newSearchQuery = this.elements.historySearchInput.value.trim();
+
+ // Only fetch if search query changed
+ if (newSearchQuery !== this.searchQuery) {
+ this.searchQuery = newSearchQuery;
+ this.currentPage = 1; // Reset to first page
+ this.fetchHistoryData();
+ }
+ },
+
+ // Handle page size change
+ handlePageSizeChange: function() {
+ const newPageSize = parseInt(this.elements.historyPageSize.value);
+ if (newPageSize !== this.pageSize) {
+ this.pageSize = newPageSize;
+ this.currentPage = 1; // Reset to first page
+ this.fetchHistoryData();
+ }
+ },
+
+ // Handle pagination
+ handlePagination: function(direction) {
+ if (direction === 'prev' && this.currentPage > 1) {
+ this.currentPage--;
+ this.fetchHistoryData();
+ } else if (direction === 'next' && this.currentPage < this.totalPages) {
+ this.currentPage++;
+ this.fetchHistoryData();
+ }
+ },
+
+ // Handle clear history
+ handleClearHistory: function() {
+ if (confirm(`Are you sure you want to clear ${this.currentApp === 'all' ? 'all history' : this.currentApp + ' history'}?`)) {
+ this.clearHistory();
+ }
+ },
+
+ // Fetch history data from API
+ fetchHistoryData: function() {
+ this.setLoading(true);
+
+ // Construct URL with parameters
+ let url = `/api/history/${this.currentApp}?page=${this.currentPage}&page_size=${this.pageSize}`;
+ if (this.searchQuery) {
+ url += `&search=${encodeURIComponent(this.searchQuery)}`;
+ }
+
+ fetch(url)
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ this.totalPages = data.total_pages;
+ this.renderHistoryData(data);
+ this.updatePaginationUI();
+ this.setLoading(false);
+ })
+ .catch(error => {
+ console.error('Error fetching history data:', error);
+ this.showError('Failed to load history data. Please try again later.');
+ this.setLoading(false);
+ });
+ },
+
+ // Clear history
+ clearHistory: function() {
+ this.setLoading(true);
+
+ fetch(`/api/history/${this.currentApp}`, {
+ method: 'DELETE',
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(() => {
+ // Reload data
+ this.fetchHistoryData();
+ })
+ .catch(error => {
+ console.error('Error clearing history:', error);
+ this.showError('Failed to clear history. Please try again later.');
+ this.setLoading(false);
+ });
+ },
+
+ // Render history data to table
+ renderHistoryData: function(data) {
+ const tableBody = this.elements.historyTableBody;
+ tableBody.innerHTML = '';
+
+ if (!data.entries || data.entries.length === 0) {
+ this.showEmptyState();
+ return;
+ }
+
+ // Hide empty state
+ this.elements.historyEmptyState.style.display = 'none';
+ this.elements.historyTable.style.display = 'table';
+
+ // Render rows
+ data.entries.forEach(entry => {
+ const row = document.createElement('tr');
+
+ // Format the instance name to include app type (capitalize first letter of app type)
+ const appType = entry.app_type ? entry.app_type.charAt(0).toUpperCase() + entry.app_type.slice(1) : '';
+ const formattedInstance = appType ? `${appType} - ${entry.instance_name}` : entry.instance_name;
+
+ row.innerHTML = `
+ ${entry.date_time_readable}
+ ${this.escapeHtml(entry.processed_info)}
+ ${this.formatOperationType(entry.operation_type)}
+ ${this.escapeHtml(entry.id)}
+ ${this.escapeHtml(formattedInstance)}
+ ${this.escapeHtml(entry.how_long_ago)}
+ `;
+
+ tableBody.appendChild(row);
+ });
+ },
+
+ // Update pagination UI
+ updatePaginationUI: function() {
+ this.elements.historyCurrentPage.textContent = this.currentPage;
+ this.elements.historyTotalPages.textContent = this.totalPages;
+
+ // Enable/disable pagination buttons
+ this.elements.historyPrevPage.disabled = this.currentPage <= 1;
+ this.elements.historyNextPage.disabled = this.currentPage >= this.totalPages;
+ },
+
+ // Show empty state
+ showEmptyState: function() {
+ this.elements.historyTable.style.display = 'none';
+ this.elements.historyEmptyState.style.display = 'flex';
+ },
+
+ // Show error
+ showError: function(message) {
+ // Use huntarrUI's notification system if available
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.showNotification === 'function') {
+ huntarrUI.showNotification(message, 'error');
+ } else {
+ alert(message);
+ }
+ },
+
+ // Set loading state
+ setLoading: function(isLoading) {
+ this.isLoading = isLoading;
+
+ if (isLoading) {
+ this.elements.historyLoading.style.display = 'flex';
+ this.elements.historyTable.style.display = 'none';
+ this.elements.historyEmptyState.style.display = 'none';
+ } else {
+ this.elements.historyLoading.style.display = 'none';
+ }
+ },
+
+ // Helper function to escape HTML
+ escapeHtml: function(text) {
+ if (text === null || text === undefined) return '';
+
+ const map = {
+ '&': '&',
+ '<': '<',
+ '>': '>',
+ '"': '"',
+ "'": '''
+ };
+
+ return String(text).replace(/[&<>"']/g, function(m) { return map[m]; });
+ },
+
+ // Helper function to format operation type
+ formatOperationType: function(operationType) {
+ switch (operationType) {
+ case 'missing':
+ return 'Missing ';
+ case 'upgrade':
+ return 'Upgrade ';
+ default:
+ return operationType ? this.escapeHtml(operationType.charAt(0).toUpperCase() + operationType.slice(1)) : 'Unknown';
+ }
+ }
+};
+
+// Initialize when huntarrUI is ready
+document.addEventListener('DOMContentLoaded', () => {
+ historyModule.init();
+
+ // Connect with main app
+ if (typeof huntarrUI !== 'undefined') {
+ // Add loadHistory to the section switch handler
+ const originalSwitchSection = huntarrUI.switchSection;
+
+ huntarrUI.switchSection = function(section) {
+ // Call original function
+ originalSwitchSection.call(huntarrUI, section);
+
+ // Load history data when switching to history section
+ if (section === 'history') {
+ historyModule.loadHistory();
+ }
+ };
+ }
+});
diff --git a/Huntarr.io-6.3.6/frontend/static/js/new-main.js b/Huntarr.io-6.3.6/frontend/static/js/new-main.js
new file mode 100644
index 0000000..60e6c2e
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/new-main.js
@@ -0,0 +1,2655 @@
+/**
+ * Huntarr - New UI Implementation
+ * Main JavaScript file for handling UI interactions and API communication
+ */
+
+let huntarrUI = {
+ // Current state
+ eventSources: {},
+ currentSection: 'home', // Default section
+ currentLogApp: 'all', // Default log app
+ currentHistoryApp: 'all', // Default history app
+ autoScroll: true,
+ configuredApps: {
+ sonarr: false,
+ radarr: false,
+ lidarr: false,
+ readarr: false, // Added readarr
+ whisparr: false, // Added whisparr
+ eros: false // Added eros
+ },
+ originalSettings: {}, // Store the full original settings object
+ settingsChanged: false, // Flag to track unsaved settings changes
+ hasUnsavedChanges: false, // Global flag for unsaved changes across all apps
+ formChanged: {}, // Track unsaved changes per app
+ suppressUnsavedChangesCheck: false, // Flag to suppress unsaved changes dialog
+
+ // Logo URL
+ logoUrl: '/static/logo/256.png',
+
+ // Element references
+ elements: {},
+
+ // Initialize the application
+ init: function() {
+ // Cache DOM elements
+ this.cacheElements();
+
+ // Set up event listeners
+ this.setupEventListeners();
+
+ // Setup logo handling to prevent flashing during navigation
+ this.setupLogoHandling();
+
+ // Connect to logs if we're on logs page
+ if (window.location.hash === '#logs') {
+ this.connectToLogs();
+ }
+
+ // Remove setupStatefulResetButton references that are causing errors
+ // this.setupStatefulResetButton();
+
+ // Initial navigation based on hash
+ this.handleHashNavigation(window.location.hash);
+
+ // Register unsaved changes handler
+ this.registerGlobalUnsavedChangesHandler();
+
+ // Load username
+ this.loadUsername();
+
+ // When all elements are ready, call the method
+ // this.setupStatefulResetButton();
+
+ // Apply any preloaded theme immediately to avoid flashing
+ const prefersDarkMode = localStorage.getItem('huntarr-dark-mode') === 'true';
+ if (prefersDarkMode) {
+ document.body.classList.add('dark-theme');
+ }
+
+ const resetButton = document.getElementById('reset-stats');
+ if (resetButton) {
+ resetButton.addEventListener('click', (e) => {
+ e.preventDefault();
+ this.resetMediaStats();
+ });
+ }
+ // Ensure logo is visible immediately
+ this.logoUrl = localStorage.getItem('huntarr-logo-url') || this.logoUrl;
+
+ // Load media stats
+ this.loadMediaStats(); // Load media statistics
+
+ // Load current version
+ this.loadCurrentVersion(); // Load current version
+
+ // Load latest version from GitHub
+ this.loadLatestVersion(); // Load latest version from GitHub
+
+ // Load latest beta version from GitHub
+ this.loadBetaVersion(); // Load latest beta version from GitHub
+
+ // Load GitHub star count
+ this.loadGitHubStarCount(); // Load GitHub star count
+
+ // Preload stateful management info so it's ready when needed
+ this.loadStatefulInfo();
+
+ // Ensure logo is applied
+ if (typeof window.applyLogoToAllElements === 'function') {
+ window.applyLogoToAllElements();
+ }
+
+ // Initialize instance event handlers
+ this.setupInstanceEventHandlers();
+
+ // Add global event handler for unsaved changes
+ this.registerGlobalUnsavedChangesHandler();
+
+ // Also call it again after a delay in case settings are loaded dynamically
+ setTimeout(() => {
+ // this.setupStatefulResetButton();
+ }, 1000);
+ },
+
+ // Cache DOM elements for better performance
+ cacheElements: function() {
+ // Navigation
+ this.elements.navItems = document.querySelectorAll('.nav-item');
+ this.elements.homeNav = document.getElementById('homeNav');
+ this.elements.logsNav = document.getElementById('logsNav');
+ this.elements.historyNav = document.getElementById('historyNav');
+ this.elements.settingsNav = document.getElementById('settingsNav');
+ this.elements.userNav = document.getElementById('userNav');
+
+ // Sections
+ this.elements.sections = document.querySelectorAll('.content-section');
+ this.elements.homeSection = document.getElementById('homeSection');
+ this.elements.logsSection = document.getElementById('logsSection');
+ this.elements.historySection = document.getElementById('historySection');
+ this.elements.settingsSection = document.getElementById('settingsSection');
+
+ // App tabs & Settings Tabs
+ this.elements.appTabs = document.querySelectorAll('.app-tab'); // For logs section
+ this.elements.logOptions = document.querySelectorAll('.log-option'); // New: replaced logTabs with logOptions
+ this.elements.currentLogApp = document.getElementById('current-log-app'); // New: dropdown current selection text
+ this.elements.logDropdownBtn = document.querySelector('.log-dropdown-btn'); // New: dropdown toggle button
+ this.elements.logDropdownContent = document.querySelector('.log-dropdown-content'); // New: dropdown content
+
+ // History dropdown elements
+ this.elements.historyOptions = document.querySelectorAll('.history-option'); // History dropdown options
+ this.elements.currentHistoryApp = document.getElementById('current-history-app'); // Current history app text
+ this.elements.historyDropdownBtn = document.querySelector('.history-dropdown-btn'); // History dropdown button
+ this.elements.historyDropdownContent = document.querySelector('.history-dropdown-content'); // History dropdown content
+ this.elements.historyPlaceholderText = document.getElementById('history-placeholder-text'); // Placeholder text for history
+
+ // Settings dropdown elements
+ this.elements.settingsOptions = document.querySelectorAll('.settings-option'); // New: settings dropdown options
+ this.elements.currentSettingsApp = document.getElementById('current-settings-app'); // New: current settings app text
+ this.elements.settingsDropdownBtn = document.querySelector('.settings-dropdown-btn'); // New: settings dropdown button
+ this.elements.settingsDropdownContent = document.querySelector('.settings-dropdown-content'); // New: dropdown content
+
+ this.elements.appSettingsPanels = document.querySelectorAll('.app-settings-panel');
+
+ // Logs
+ this.elements.logsContainer = document.getElementById('logsContainer');
+ this.elements.autoScrollCheckbox = document.getElementById('autoScrollCheckbox');
+ this.elements.clearLogsButton = document.getElementById('clearLogsButton');
+ this.elements.logConnectionStatus = document.getElementById('logConnectionStatus');
+
+ // Settings
+ this.elements.saveSettingsButton = document.getElementById('saveSettingsButton'); // Corrected ID
+
+ // Status elements
+ this.elements.sonarrHomeStatus = document.getElementById('sonarrHomeStatus');
+ this.elements.radarrHomeStatus = document.getElementById('radarrHomeStatus');
+ this.elements.lidarrHomeStatus = document.getElementById('lidarrHomeStatus');
+ this.elements.readarrHomeStatus = document.getElementById('readarrHomeStatus'); // Added readarr
+ this.elements.whisparrHomeStatus = document.getElementById('whisparrHomeStatus'); // Added whisparr
+ this.elements.erosHomeStatus = document.getElementById('erosHomeStatus'); // Added eros
+
+ // Actions
+ this.elements.startHuntButton = document.getElementById('startHuntButton');
+ this.elements.stopHuntButton = document.getElementById('stopHuntButton');
+
+ // Theme
+ // this.elements.themeToggle = document.getElementById('themeToggle'); // Removed theme toggle
+
+ // Logout
+ this.elements.logoutLink = document.getElementById('logoutLink'); // Added logout link
+ },
+
+ // Set up event listeners
+ setupEventListeners: function() {
+ // Global dropdown handling - close all dropdowns when clicking on any option
+ document.addEventListener('click', (e) => {
+ // If the clicked element is a dropdown option (has class 'log-option')
+ if (e.target.classList.contains('log-option')) {
+ // Find all dropdown content elements and close them
+ document.querySelectorAll('.log-dropdown-content').forEach(dropdown => {
+ dropdown.classList.remove('show');
+ });
+ }
+ });
+
+ // Navigation
+ document.addEventListener('click', (e) => {
+ // Navigation link handling
+ if (e.target.matches('.nav-link') || e.target.closest('.nav-link')) {
+ const link = e.target.matches('.nav-link') ? e.target : e.target.closest('.nav-link');
+ e.preventDefault();
+ this.handleNavigation(e);
+ }
+
+ // Handle cycle reset button clicks
+ if (e.target.matches('.cycle-reset-button') || e.target.closest('.cycle-reset-button')) {
+ const button = e.target.matches('.cycle-reset-button') ? e.target : e.target.closest('.cycle-reset-button');
+ const app = button.dataset.app;
+ if (app) {
+ this.resetAppCycle(app, button);
+ }
+ }
+ });
+
+ // Log auto-scroll setting
+ if (this.elements.autoScrollCheckbox) {
+ this.elements.autoScrollCheckbox.addEventListener('change', (e) => {
+ this.autoScroll = e.target.checked;
+ });
+ }
+
+ // Clear logs button
+ if (this.elements.clearLogsButton) {
+ this.elements.clearLogsButton.addEventListener('click', () => this.clearLogs());
+ }
+
+ // App tabs in logs section
+ this.elements.appTabs.forEach(tab => {
+ tab.addEventListener('click', (e) => this.handleAppTabChange(e));
+ });
+
+ // Log options dropdown
+ this.elements.logOptions.forEach(option => {
+ option.addEventListener('click', (e) => this.handleLogOptionChange(e));
+ });
+
+ // Log dropdown toggle
+ if (this.elements.logDropdownBtn) {
+ this.elements.logDropdownBtn.addEventListener('click', (e) => {
+ e.preventDefault();
+ e.stopPropagation(); // Prevent event bubbling
+
+ // Close any other open dropdowns first
+ if (this.elements.historyDropdownContent && this.elements.historyDropdownContent.classList.contains('show')) {
+ this.elements.historyDropdownContent.classList.remove('show');
+ }
+
+ // Toggle this dropdown
+ this.elements.logDropdownContent.classList.toggle('show');
+ });
+
+ // Close dropdown when clicking outside
+ document.addEventListener('click', (e) => {
+ if (!e.target.closest('.log-dropdown') && this.elements.logDropdownContent.classList.contains('show')) {
+ this.elements.logDropdownContent.classList.remove('show');
+ }
+ });
+ }
+
+ // History dropdown toggle
+ if (this.elements.historyDropdownBtn) {
+ this.elements.historyDropdownBtn.addEventListener('click', (e) => {
+ e.preventDefault();
+ e.stopPropagation(); // Prevent event bubbling
+
+ // Close any other open dropdowns first
+ if (this.elements.logDropdownContent && this.elements.logDropdownContent.classList.contains('show')) {
+ this.elements.logDropdownContent.classList.remove('show');
+ }
+
+ // Toggle this dropdown
+ this.elements.historyDropdownContent.classList.toggle('show');
+ });
+
+ // Close dropdown when clicking outside
+ document.addEventListener('click', (e) => {
+ if (!e.target.closest('.history-dropdown') && this.elements.historyDropdownContent.classList.contains('show')) {
+ this.elements.historyDropdownContent.classList.remove('show');
+ }
+ });
+ }
+
+ // History options
+ this.elements.historyOptions.forEach(option => {
+ option.addEventListener('click', (e) => this.handleHistoryOptionChange(e));
+ });
+
+ // Settings dropdown toggle
+ if (this.elements.settingsDropdownBtn) {
+ this.elements.settingsDropdownBtn.addEventListener('click', (e) => {
+ e.preventDefault();
+ e.stopPropagation(); // Prevent event bubbling
+
+ // Close any other open dropdowns first
+ if (this.elements.logDropdownContent && this.elements.logDropdownContent.classList.contains('show')) {
+ this.elements.logDropdownContent.classList.remove('show');
+ }
+
+ if (this.elements.historyDropdownContent && this.elements.historyDropdownContent.classList.contains('show')) {
+ this.elements.historyDropdownContent.classList.remove('show');
+ }
+
+ // Toggle this dropdown
+ this.elements.settingsDropdownContent.classList.toggle('show');
+ });
+
+ // Close dropdown when clicking outside
+ document.addEventListener('click', (e) => {
+ if (!e.target.closest('.settings-dropdown') && this.elements.settingsDropdownContent.classList.contains('show')) {
+ this.elements.settingsDropdownContent.classList.remove('show');
+ }
+ });
+ }
+
+ // Settings options
+ this.elements.settingsOptions.forEach(option => {
+ option.addEventListener('click', (e) => this.handleSettingsOptionChange(e));
+ });
+
+ // Save settings button
+ if (this.elements.saveSettingsButton) {
+ this.elements.saveSettingsButton.addEventListener('click', () => this.saveSettings());
+ }
+
+ // Start hunt button
+ if (this.elements.startHuntButton) {
+ this.elements.startHuntButton.addEventListener('click', () => this.startHunt());
+ }
+
+ // Stop hunt button
+ if (this.elements.stopHuntButton) {
+ this.elements.stopHuntButton.addEventListener('click', () => this.stopHunt());
+ }
+
+ // Logout button
+ if (this.elements.logoutLink) {
+ this.elements.logoutLink.addEventListener('click', (e) => this.logout(e));
+ }
+
+ // Dark mode toggle
+ const darkModeToggle = document.getElementById('darkModeToggle');
+ if (darkModeToggle) {
+ const prefersDarkMode = localStorage.getItem('huntarr-dark-mode') === 'true';
+ darkModeToggle.checked = prefersDarkMode;
+
+ darkModeToggle.addEventListener('change', function() {
+ const isDarkMode = this.checked;
+ document.body.classList.toggle('dark-theme', isDarkMode);
+ localStorage.setItem('huntarr-dark-mode', isDarkMode);
+ });
+ }
+
+ // Settings inputs change tracking
+ document.querySelectorAll('#settingsSection input, #settingsSection select').forEach(element => {
+ element.addEventListener('change', () => this.markSettingsAsChanged());
+ });
+
+ // Monitor for window beforeunload to warn about unsaved settings
+ window.addEventListener('beforeunload', (e) => {
+ if (this.settingsChanged && this.hasFormChanges(this.currentSettingsTab)) {
+ // Standard way to show a confirmation dialog when navigating away
+ e.preventDefault();
+ e.returnValue = ''; // Chrome requires returnValue to be set
+ return ''; // Legacy browsers
+ }
+ });
+
+ // Stateful management reset button
+ const resetStatefulBtn = document.getElementById('reset_stateful_btn');
+ if (resetStatefulBtn) {
+ resetStatefulBtn.addEventListener('click', () => this.handleStatefulReset());
+ }
+
+ // Stateful management hours input
+ const statefulHoursInput = document.getElementById('stateful_management_hours');
+ if (statefulHoursInput) {
+ statefulHoursInput.addEventListener('change', () => {
+ this.updateStatefulExpirationOnUI();
+ });
+ }
+
+ // Handle window hash change
+ window.addEventListener('hashchange', () => this.handleHashNavigation(window.location.hash)); // Ensure hash is passed
+
+ // Settings form delegation
+ const settingsFormContainer = document.querySelector('.settings-form');
+ if (settingsFormContainer) {
+ settingsFormContainer.addEventListener('input', (event) => {
+ if (event.target.closest('.app-settings-panel.active')) {
+ // Check if the target is an input, select, or textarea within the active panel
+ if (event.target.matches('input, select, textarea')) {
+ this.markSettingsAsChanged(); // Use the new function
+ }
+ }
+ });
+ settingsFormContainer.addEventListener('change', (event) => {
+ if (event.target.closest('.app-settings-panel.active')) {
+ // Handle changes for checkboxes and selects that use 'change' event
+ if (event.target.matches('input[type="checkbox"], select')) {
+ this.markSettingsAsChanged(); // Use the new function
+ }
+ }
+ });
+ }
+
+ // Add listener for unsaved changes prompt (External Navigation)
+ window.onbeforeunload = (event) => {
+ if (this.settingsChanged) {
+ // Standard way to trigger the browser's confirmation dialog
+ event.preventDefault();
+ // Chrome requires returnValue to be set
+ event.returnValue = 'You have unsaved changes. Are you sure you want to leave?';
+ return 'You have unsaved changes. Are you sure you want to leave?'; // For older browsers
+ }
+ // If no changes, return undefined to allow navigation without prompt
+ return undefined;
+ };
+
+ // Initial setup based on hash or default to home
+ const initialHash = window.location.hash || '#home';
+ this.handleHashNavigation(initialHash);
+
+ // LOGS: Listen for change on #logAppSelect
+ const logAppSelect = document.getElementById('logAppSelect');
+ if (logAppSelect) {
+ logAppSelect.addEventListener('change', (e) => {
+ const app = e.target.value;
+ this.handleLogOptionChange(app);
+ });
+ }
+ // HISTORY: Listen for change on #historyAppSelect
+ const historyAppSelect = document.getElementById('historyAppSelect');
+ if (historyAppSelect) {
+ historyAppSelect.addEventListener('change', (e) => {
+ const app = e.target.value;
+ this.handleHistoryOptionChange(app);
+ });
+ }
+ },
+
+ // Setup logo handling to prevent flashing during navigation
+ setupLogoHandling: function() {
+ // Get the logo image
+ const logoImg = document.querySelector('.sidebar .logo');
+ if (logoImg) {
+ // Cache the source
+ this.logoSrc = logoImg.src;
+
+ // Ensure it's fully loaded
+ if (!logoImg.complete) {
+ logoImg.onload = () => {
+ // Once loaded, store the source
+ this.logoSrc = logoImg.src;
+ };
+ }
+ }
+
+ // Also add event listener to ensure logo is preserved during navigation
+ window.addEventListener('beforeunload', () => {
+ // Store logo src in session storage to persist across page loads
+ if (this.logoSrc) {
+ sessionStorage.setItem('huntarr-logo-src', this.logoSrc);
+ }
+ });
+ },
+
+ // Navigation handling
+ handleNavigation: function(e) {
+ const targetElement = e.currentTarget; // Get the clicked nav item
+ const href = targetElement.getAttribute('href');
+ const target = targetElement.getAttribute('target');
+
+ // Allow links with target="_blank" to open in a new window (return early)
+ if (target === '_blank') {
+ return; // Let the default click behavior happen
+ }
+
+ // For all other links, prevent default behavior and handle internally
+ e.preventDefault();
+
+ if (!href) return; // Exit if no href
+
+ let targetSection = null;
+ let isInternalLink = href.startsWith('#');
+
+ if (isInternalLink) {
+ targetSection = href.substring(1) || 'home'; // Get section from hash, default to 'home' if only '#'
+ } else {
+ // Handle external links (like /user) or non-hash links if needed
+ // For now, assume non-hash links navigate away
+ }
+
+ // Check for unsaved changes ONLY if navigating INTERNALLY away from settings
+ if (isInternalLink && this.currentSection === 'settings' && targetSection !== 'settings' && this.settingsChanged) {
+ // Use our new comparison function to check if there are actual changes
+ const hasRealChanges = this.hasFormChanges(this.currentSettingsTab);
+
+ if (hasRealChanges && !confirm('You have unsaved changes. Are you sure you want to leave? Changes will be lost.')) {
+ return; // Stop navigation if user cancels
+ }
+
+ // User confirmed or no real changes, reset flag before navigating
+ this.settingsChanged = false;
+ this.updateSaveResetButtonState(false);
+ }
+
+ // Add special handling for apps section - clear global app module flags
+ if (this.currentSection === 'apps' && targetSection !== 'apps') {
+ // Reset the app module flags when navigating away
+ if (window._appsModuleLoaded) {
+ window._appsSuppressChangeDetection = true;
+ if (window.appsModule && typeof window.appsModule.settingsChanged !== 'undefined') {
+ window.appsModule.settingsChanged = false;
+ }
+ // Schedule ending suppression to avoid any edge case issues
+ setTimeout(() => {
+ window._appsSuppressChangeDetection = false;
+ }, 1000);
+ }
+ }
+
+ // Proceed with navigation
+ if (isInternalLink) {
+ window.location.hash = href; // Change hash to trigger handleHashNavigation
+ } else {
+ // If it's an external link (like /user), just navigate normally
+ window.location.href = href;
+ }
+ },
+
+ handleHashNavigation: function(hash) {
+ const section = hash.substring(1) || 'home';
+ this.switchSection(section);
+ },
+
+ switchSection: function(section) {
+ // Update active section
+ this.elements.sections.forEach(s => {
+ s.classList.remove('active');
+ });
+
+ // Update navigation
+ this.elements.navItems.forEach(item => {
+ item.classList.remove('active');
+ });
+
+ // Show selected section
+ let newTitle = 'Home'; // Default title
+ const sponsorsSection = document.getElementById('sponsorsSection'); // Get sponsors section element
+ const sponsorsNav = document.getElementById('sponsorsNav'); // Get sponsors nav element
+
+ if (section === 'home' && this.elements.homeSection) {
+ this.elements.homeSection.classList.add('active');
+ if (this.elements.homeNav) this.elements.homeNav.classList.add('active');
+ newTitle = 'Home';
+ this.currentSection = 'home';
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+ // Check app connections when returning to home page to update status
+ this.checkAppConnections();
+ // Also refresh media stats
+ this.loadMediaStats();
+ } else if (section === 'logs' && this.elements.logsSection) {
+ this.elements.logsSection.classList.add('active');
+ if (this.elements.logsNav) this.elements.logsNav.classList.add('active');
+ newTitle = 'Logs';
+ this.currentSection = 'logs';
+ this.connectToLogs();
+ } else if (section === 'history' && this.elements.historySection) {
+ this.elements.historySection.classList.add('active');
+ if (this.elements.historyNav) this.elements.historyNav.classList.add('active');
+ newTitle = 'History';
+ this.currentSection = 'history';
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+ } else if (section === 'apps' && document.getElementById('appsSection')) {
+ document.getElementById('appsSection').classList.add('active');
+ if (document.getElementById('appsNav')) document.getElementById('appsNav').classList.add('active');
+ newTitle = 'Apps';
+ this.currentSection = 'apps';
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+
+ // Load apps if the apps module exists
+ if (typeof appsModule !== 'undefined') {
+ appsModule.loadApps();
+ }
+ } else if (section === 'settings' && this.elements.settingsSection) {
+ this.elements.settingsSection.classList.add('active');
+ if (this.elements.settingsNav) this.elements.settingsNav.classList.add('active');
+ newTitle = 'Settings';
+ this.currentSection = 'settings';
+
+ // Ensure default settings tab is set if none is active
+ if (!this.currentSettingsTab) {
+ this.currentSettingsTab = 'general'; // Default to general tab
+
+ // Set the general tab as active
+ const generalTab = document.querySelector('.settings-tab[data-app="general"]');
+ if (generalTab) {
+ this.elements.settingsTabs.forEach(t => {
+ t.classList.remove('active');
+ });
+ generalTab.classList.add('active');
+
+ // Also set the general panel as visible
+ this.elements.appSettingsPanels.forEach(panel => {
+ panel.classList.remove('active');
+ panel.style.display = 'none';
+ });
+
+ const generalPanel = document.getElementById('generalSettings');
+ if (generalPanel) {
+ generalPanel.classList.add('active');
+ generalPanel.style.display = 'block';
+ }
+ }
+ }
+
+ // Load stateful info immediately, don't wait for loadAllSettings to complete
+ this.loadStatefulInfo();
+
+ // Load all settings after stateful info has started loading
+ this.loadAllSettings();
+
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+ } else if (section === 'sponsors' && sponsorsSection) { // ADDED sponsors case
+ sponsorsSection.classList.add('active');
+ if (sponsorsNav) sponsorsNav.classList.add('active');
+ newTitle = 'Project Sponsors';
+ this.currentSection = 'sponsors';
+ // Set the iframe source when switching to this section
+ const sponsorsFrame = document.getElementById('sponsorsFrame');
+ if (sponsorsFrame && (!sponsorsFrame.src || sponsorsFrame.src === 'about:blank')) { // Set src only if not already set or blank
+ sponsorsFrame.src = 'https://github.com/sponsors/plexguide';
+ }
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+ } else {
+ // Default to home if section is unknown or element missing
+ if (this.elements.homeSection) this.elements.homeSection.classList.add('active');
+ if (this.elements.homeNav) this.elements.homeNav.classList.add('active');
+ newTitle = 'Home';
+ this.currentSection = 'home';
+ // Disconnect logs if switching away from logs
+ this.disconnectAllEventSources();
+ }
+
+ // Update the page title
+ const pageTitleElement = document.getElementById('currentPageTitle');
+ if (pageTitleElement) {
+ pageTitleElement.textContent = newTitle;
+ } else {
+ console.warn("[huntarrUI] currentPageTitle element not found during section switch.");
+ }
+ },
+
+ // App tab switching
+ handleAppTabChange: function(e) {
+ const app = e.target.getAttribute('data-app');
+ if (!app) return;
+
+ // Update active tab
+ this.elements.appTabs.forEach(tab => {
+ tab.classList.remove('active');
+ });
+ e.target.classList.add('active');
+
+ // Switch to the selected app logs
+ this.currentApp = app;
+ this.connectToLogs();
+ },
+
+ // Log option dropdown handling
+ handleLogOptionChange: function(app) {
+ if (app && app.target && typeof app.target.value === 'string') {
+ app = app.target.value;
+ } else if (app && app.target && typeof app.target.getAttribute === 'function') {
+ app = app.target.getAttribute('data-app');
+ }
+ if (!app || app === this.currentLogApp) return;
+ // Update the select value
+ const logAppSelect = document.getElementById('logAppSelect');
+ if (logAppSelect) logAppSelect.value = app;
+ // Update the current log app text with proper capitalization
+ let displayName = app.charAt(0).toUpperCase() + app.slice(1);
+ if (app === 'whisparr') displayName = 'Whisparr V2';
+ else if (app === 'eros') displayName = 'Whisparr V3';
+ if (this.elements.currentLogApp) this.elements.currentLogApp.textContent = displayName;
+ // Switch to the selected app logs
+ this.currentLogApp = app;
+ this.clearLogs();
+ this.connectToLogs();
+ },
+
+ // History option dropdown handling
+ handleHistoryOptionChange: function(app) {
+ if (app && app.target && typeof app.target.value === 'string') {
+ app = app.target.value;
+ } else if (app && app.target && typeof app.target.getAttribute === 'function') {
+ app = app.target.getAttribute('data-app');
+ }
+ if (!app || app === this.currentHistoryApp) return;
+ // Update the select value
+ const historyAppSelect = document.getElementById('historyAppSelect');
+ if (historyAppSelect) historyAppSelect.value = app;
+ // Update the current history app text with proper capitalization
+ let displayName = app.charAt(0).toUpperCase() + app.slice(1);
+ if (app === 'whisparr') displayName = 'Whisparr V2';
+ else if (app === 'eros') displayName = 'Whisparr V3';
+ if (this.elements.currentHistoryApp) this.elements.currentHistoryApp.textContent = displayName;
+ // Update the placeholder text
+ this.updateHistoryPlaceholder(app);
+ // Switch to the selected app history
+ this.currentHistoryApp = app;
+ },
+
+ // Update the history placeholder text based on the selected app
+ updateHistoryPlaceholder: function(app) {
+ if (!this.elements.historyPlaceholderText) return;
+
+ let message = "";
+ if (app === 'all') {
+ message = "The History feature will be available in a future update. Stay tuned for enhancements that will allow you to view your media processing history.";
+ } else {
+ let displayName = this.capitalizeFirst(app);
+ message = `The ${displayName} History feature is under development and will be available in a future update. You'll be able to track your ${displayName} media processing history here.`;
+ }
+
+ this.elements.historyPlaceholderText.textContent = message;
+ },
+
+ // Settings option handling
+ handleSettingsOptionChange: function(e) {
+ e.preventDefault(); // Prevent default anchor behavior
+
+ const app = e.target.getAttribute('data-app');
+ if (!app || app === this.currentSettingsApp) return; // Do nothing if same tab clicked
+
+ // Update active option
+ this.elements.settingsOptions.forEach(option => {
+ option.classList.remove('active');
+ });
+ e.target.classList.add('active');
+
+ // Update the current settings app text with proper capitalization
+ let displayName = app.charAt(0).toUpperCase() + app.slice(1);
+ this.elements.currentSettingsApp.textContent = displayName;
+
+ // Close the dropdown
+ this.elements.settingsDropdownContent.classList.remove('show');
+
+ // Hide all settings panels
+ this.elements.appSettingsPanels.forEach(panel => {
+ panel.classList.remove('active');
+ panel.style.display = 'none';
+ });
+
+ // Show the selected app's settings panel
+ const selectedPanel = document.getElementById(app + 'Settings');
+ if (selectedPanel) {
+ selectedPanel.classList.add('active');
+ selectedPanel.style.display = 'block';
+ }
+
+ this.currentSettingsTab = app;
+ console.log(`[huntarrUI] Switched settings tab to: ${this.currentSettingsTab}`); // Added logging
+ },
+
+ // Logs handling
+ connectToLogs: function() {
+ // Disconnect any existing event sources
+ this.disconnectAllEventSources();
+
+ // Connect to logs stream for the currentLogApp
+ this.connectEventSource(this.currentLogApp); // Pass the selected app
+ this.elements.logConnectionStatus.textContent = 'Connecting...';
+ this.elements.logConnectionStatus.className = '';
+ },
+
+ connectEventSource: function(appType) {
+ // Close any existing event source
+ if (this.eventSources.logs) {
+ this.eventSources.logs.close();
+ }
+
+ try {
+ // Append the app type to the URL
+ const eventSource = new EventSource(`/logs?app=${appType}`);
+
+ eventSource.onopen = () => {
+ this.elements.logConnectionStatus.textContent = 'Connected';
+ this.elements.logConnectionStatus.className = 'status-connected';
+ };
+
+ eventSource.onmessage = (event) => {
+ if (!this.elements.logsContainer) return;
+
+ try {
+ const logString = event.data;
+ // Regex to parse log lines: Optional [APP], Timestamp, Logger, Level, Message
+ // Example: [SONARR] 2024-01-01 12:00:00 - huntarr.sonarr - INFO - Message content
+ // Example: 2024-01-01 12:00:00 - huntarr - DEBUG - System message
+ const logRegex = /^(?:\\[(\\w+)\\]\\s)?([\\d\\-]+\\s[\\d:]+)\\s-\\s([\\w\\.]+)\\s-\\s(\\w+)\\s-\\s(.*)$/;
+ const match = logString.match(logRegex);
+
+ // First determine the app type for this log message
+ let logAppType = 'system'; // Default to system
+
+ if (match && match[1]) {
+ // If we have a match with app tag like [SONARR], use that
+ logAppType = match[1].toLowerCase();
+ } else if (match && match[3]) {
+ // Otherwise try to determine from the logger name (e.g., huntarr.sonarr)
+ const loggerParts = match[3].split('.');
+ if (loggerParts.length > 1) {
+ const possibleApp = loggerParts[1].toLowerCase();
+ if (['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr'].includes(possibleApp)) {
+ logAppType = possibleApp;
+ }
+ }
+ }
+
+ // Special case for system logs that may contain app-specific patterns
+ if (logAppType === 'system') {
+ // App-specific patterns that may appear in system logs
+ const patterns = {
+ 'sonarr': ['episode', 'series', 'tv show', 'sonarr'],
+ 'radarr': ['movie', 'film', 'radarr'],
+ 'lidarr': ['album', 'artist', 'track', 'music', 'lidarr'],
+ 'readarr': ['book', 'author', 'readarr'],
+ 'whisparr': ['scene', 'adult', 'whisparr'],
+ 'eros': ['eros', 'whisparr v3', 'whisparrv3'],
+ 'swaparr': ['added strike', 'max strikes reached', 'would have removed', 'strikes, removing download', 'processing stalled downloads', 'swaparr']
+ };
+
+ // Check each app's patterns
+ for (const [app, appPatterns] of Object.entries(patterns)) {
+ if (appPatterns.some(pattern => logString.toLowerCase().includes(pattern))) {
+ logAppType = app;
+ break;
+ }
+ }
+ }
+
+ // Determine if this log should be displayed based on the selected app tab
+ const shouldDisplay =
+ this.currentLogApp === 'all' ||
+ this.currentLogApp === logAppType;
+
+ if (!shouldDisplay) return;
+
+ const logEntry = document.createElement('div');
+ logEntry.className = 'log-entry';
+
+ if (match) {
+ const [, appName, timestamp, loggerName, level, message] = match;
+
+ logEntry.innerHTML = `
+ ${timestamp.split(' ')[1]}
+ ${appName ? `[${appName}] ` : ''}
+ ${level}
+ (${loggerName.replace('huntarr.', '')})
+ ${message}
+ `;
+ logEntry.classList.add(`log-${level.toLowerCase()}`);
+ } else {
+ // Fallback for lines that don't match the expected format
+ logEntry.innerHTML = `${logString} `;
+
+ // Basic level detection for fallback
+ if (logString.includes('ERROR')) logEntry.classList.add('log-error');
+ else if (logString.includes('WARN') || logString.includes('WARNING')) logEntry.classList.add('log-warning');
+ else if (logString.includes('DEBUG')) logEntry.classList.add('log-debug');
+ else logEntry.classList.add('log-info');
+ }
+
+ // Add to logs container
+ this.elements.logsContainer.appendChild(logEntry);
+
+ // Special event dispatching for Swaparr logs
+ if (logAppType === 'swaparr' && this.currentLogApp === 'swaparr') {
+ // Dispatch a custom event for swaparr.js to process
+ const swaparrEvent = new CustomEvent('swaparrLogReceived', {
+ detail: {
+ logData: match && match[5] ? match[5] : logString
+ }
+ });
+ document.dispatchEvent(swaparrEvent);
+ }
+
+ // Auto-scroll to bottom if enabled
+ if (this.autoScroll) {
+ this.elements.logsContainer.scrollTop = this.elements.logsContainer.scrollHeight;
+ }
+ } catch (error) {
+ console.error('[huntarrUI] Error processing log message:', error, 'Data:', event.data);
+ }
+ };
+
+ eventSource.onerror = (err) => {
+ console.error(`[huntarrUI] EventSource error for app ${this.currentLogApp}:`, err);
+ if (this.elements.logConnectionStatus) {
+ this.elements.logConnectionStatus.textContent = 'Error/Disconnected';
+ this.elements.logConnectionStatus.className = 'status-error'; // Use a specific error class
+ }
+ // Close the potentially broken connection
+ if (this.eventSources.logs) {
+ this.eventSources.logs.close();
+ console.log(`[huntarrUI] Closed potentially broken log EventSource for ${this.currentLogApp}.`);
+ }
+ // Attempt to reconnect after a delay, but only if still on the logs page
+ if (this.currentSection === 'logs') {
+ console.log(`[huntarrUI] Attempting to reconnect log stream for ${this.currentLogApp} in 5 seconds...`);
+ setTimeout(() => {
+ // Double-check if still on logs page before reconnecting
+ if (this.currentSection === 'logs') {
+ console.log(`[huntarrUI] Reconnecting log stream for ${this.currentLogApp}.`);
+ this.connectToLogs(); // Re-initiate connection
+ } else {
+ console.log(`[huntarrUI] Log reconnect cancelled; user navigated away from logs section.`);
+ }
+ }, 5000); // 5-second delay
+ }
+ }; // Added missing semicolon
+
+ this.eventSources.logs = eventSource; // Store the reference
+ } catch (e) {
+ console.error(`[huntarrUI] Failed to create EventSource for app ${appType}:`, e);
+ if (this.elements.logConnectionStatus) {
+ this.elements.logConnectionStatus.textContent = 'Failed to connect';
+ this.elements.logConnectionStatus.className = 'status-error';
+ }
+ }
+ },
+
+ disconnectAllEventSources: function() {
+ Object.keys(this.eventSources).forEach(key => {
+ const source = this.eventSources[key];
+ if (source) {
+ try {
+ if (source.readyState !== EventSource.CLOSED) {
+ source.close();
+ console.log(`[huntarrUI] Closed event source for ${key}.`);
+ } else {
+ console.log(`[huntarrUI] Event source for ${key} was already closed.`);
+ }
+ } catch (e) {
+ console.error(`[huntarrUI] Error closing event source for ${key}:`, e);
+ }
+ }
+ // Clear the reference
+ delete this.eventSources[key]; // Use delete
+ });
+ // Reset status indicator if logs aren't the active section
+ if (this.currentSection !== 'logs' && this.elements.logConnectionStatus) {
+ this.elements.logConnectionStatus.textContent = 'Disconnected';
+ this.elements.logConnectionStatus.className = 'status-disconnected';
+ }
+ },
+
+ clearLogs: function() {
+ if (this.elements.logsContainer) {
+ this.elements.logsContainer.innerHTML = '';
+ }
+ },
+
+ // Settings handling
+ loadAllSettings: function() {
+ // Disable save button until changes are made
+ this.updateSaveResetButtonState(false);
+ this.settingsChanged = false;
+
+ // Get all settings to populate forms
+ HuntarrUtils.fetchWithTimeout('/api/settings')
+ .then(response => response.json())
+ .then(data => {
+ console.log('Loaded settings:', data);
+
+ // Store original settings for comparison
+ this.originalSettings = data;
+
+ // Populate each app's settings form
+ if (data.sonarr) this.populateSettingsForm('sonarr', data.sonarr);
+ if (data.radarr) this.populateSettingsForm('radarr', data.radarr);
+ if (data.lidarr) this.populateSettingsForm('lidarr', data.lidarr);
+ if (data.readarr) this.populateSettingsForm('readarr', data.readarr);
+ if (data.whisparr) this.populateSettingsForm('whisparr', data.whisparr);
+ if (data.eros) this.populateSettingsForm('eros', data.eros);
+ if (data.swaparr) this.populateSettingsForm('swaparr', data.swaparr);
+ if (data.general) this.populateSettingsForm('general', data.general);
+
+ // Update duration displays (like sleep durations)
+ if (typeof SettingsForms !== 'undefined' &&
+ typeof SettingsForms.updateDurationDisplay === 'function') {
+ SettingsForms.updateDurationDisplay();
+ }
+
+ // Load stateful info immediately, don't wait for loadAllSettings to complete
+ this.loadStatefulInfo();
+ })
+ .catch(error => {
+ console.error('Error loading settings:', error);
+ this.showNotification('Error loading settings. Please try again.', 'error');
+ });
+ },
+
+ populateSettingsForm: function(app, appSettings) {
+ // Cache the form for this app
+ const form = document.getElementById(`${app}Settings`);
+ if (!form) return;
+
+ // Check if SettingsForms is loaded to generate the form
+ if (typeof SettingsForms !== 'undefined') {
+ const formFunction = SettingsForms[`generate${app.charAt(0).toUpperCase()}${app.slice(1)}Form`];
+ if (typeof formFunction === 'function') {
+ formFunction(form, appSettings); // This function already calls setupInstanceManagement internally
+
+ // Update duration displays for this app
+ if (typeof SettingsForms.updateDurationDisplay === 'function') {
+ try {
+ SettingsForms.updateDurationDisplay();
+ } catch (e) {
+ console.error(`[huntarrUI] Error updating duration display:`, e);
+ }
+ }
+ } else {
+ console.error(`[huntarrUI] Form generator function not found for app: ${app}`);
+ }
+ } else {
+ console.error('[huntarrUI] SettingsForms is not defined');
+ return;
+ }
+ },
+
+ // Called when any setting input changes in the active tab
+ markSettingsAsChanged() {
+ if (!this.settingsChanged) {
+ console.log("[huntarrUI] Settings marked as changed.");
+ this.settingsChanged = true;
+ this.updateSaveResetButtonState(true); // Enable buttons
+ }
+ },
+
+ saveSettings: function() {
+ const app = this.currentSettingsTab;
+ console.log(`[huntarrUI] saveSettings called for app: ${app}`);
+
+ // Clear the unsaved changes flag BEFORE sending the request
+ // This prevents the "unsaved changes" dialog from appearing
+ this.settingsChanged = false;
+ this.updateSaveResetButtonState(false);
+
+ // Use getFormSettings for all apps, as it handles different structures
+ let settings = this.getFormSettings(app);
+
+ if (!settings) {
+ console.error(`[huntarrUI] Failed to collect settings for app: ${app}`);
+ this.showNotification('Error collecting settings from form.', 'error');
+ return;
+ }
+
+ console.log(`[huntarrUI] Collected settings for ${app}:`, settings);
+
+ // Check if this is general settings and if local_access_bypass is being changed
+ const isLocalAccessBypassChanged = app === 'general' &&
+ this.originalSettings &&
+ this.originalSettings.general &&
+ this.originalSettings.general.local_access_bypass !== settings.local_access_bypass;
+
+ console.log(`[huntarrUI] Local access bypass changed: ${isLocalAccessBypassChanged}`);
+
+ console.log(`[huntarrUI] Sending settings payload for ${app}:`, settings);
+
+ // Use the correct endpoint based on app type
+ const endpoint = app === 'general' ? '/api/settings/general' : `/api/settings/${app}`;
+
+ HuntarrUtils.fetchWithTimeout(endpoint, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(settings)
+ })
+ .then(response => {
+ if (!response.ok) {
+ // Try to get error message from response body
+ return response.json().then(errData => {
+ throw new Error(errData.error || `HTTP error! status: ${response.status}`);
+ }).catch(() => {
+ // Fallback if response body is not JSON or empty
+ throw new Error(`HTTP error! status: ${response.status}`);
+ });
+ }
+ return response.json();
+ })
+ .then(savedConfig => {
+ console.log('[huntarrUI] Settings saved successfully. Full config received:', savedConfig);
+
+ // If local access bypass setting was changed, reload the page
+ if (isLocalAccessBypassChanged) {
+ this.showNotification('Settings saved successfully. Reloading page to apply authentication changes...', 'success');
+ setTimeout(() => {
+ window.location.href = '/'; // Redirect to home page after a brief delay
+ }, 1500);
+ return;
+ }
+
+ this.showNotification('Settings saved successfully', 'success');
+
+ // Update original settings state with the full config returned from backend
+ if (typeof savedConfig === 'object' && savedConfig !== null) {
+ this.originalSettings = JSON.parse(JSON.stringify(savedConfig));
+ } else {
+ console.error('[huntarrUI] Invalid config received from backend after save:', savedConfig);
+ this.loadAllSettings();
+ return;
+ }
+
+ // Re-populate the form with the saved data
+ const currentAppSettings = this.originalSettings[app] || {};
+
+ // Preserve instances data if missing in the response but was in our sent data
+ if (app === 'sonarr' && !currentAppSettings.instances && settings.instances) {
+ currentAppSettings.instances = settings.instances;
+ }
+
+ this.populateSettingsForm(app, currentAppSettings);
+
+ // Update connection status and UI
+ this.checkAppConnection(app);
+ this.updateHomeConnectionStatus();
+
+ // If general settings were saved, refresh the stateful info display
+ if (app === 'general') {
+ // Update the displayed interval hours if it's available in the settings
+ if (settings.stateful_management_hours && document.getElementById('stateful_management_hours')) {
+ const intervalInput = document.getElementById('stateful_management_hours');
+ const intervalDaysSpan = document.getElementById('stateful_management_days');
+ const expiresDateEl = document.getElementById('stateful_expires_date');
+
+ // Update the input value
+ intervalInput.value = settings.stateful_management_hours;
+
+ // Update the days display
+ if (intervalDaysSpan) {
+ const days = (settings.stateful_management_hours / 24).toFixed(1);
+ intervalDaysSpan.textContent = `${days} days`;
+ }
+
+ // Show updating indicator
+ if (expiresDateEl) {
+ expiresDateEl.textContent = 'Updating...';
+ }
+
+ // Also directly update the stateful expiration on the server and update UI
+ this.updateStatefulExpirationOnUI();
+ } else {
+ this.loadStatefulInfo();
+ }
+ }
+ })
+ .catch(error => {
+ console.error('Error saving settings:', error);
+ this.showNotification(`Error saving settings: ${error.message}`, 'error');
+ // If there was an error, mark settings as changed again
+ this.settingsChanged = true;
+ this.updateSaveResetButtonState(true);
+ });
+ },
+
+ // Add or modify this function to handle enabling/disabling save/reset
+ updateSaveResetButtonState(enable) { // Changed signature
+ const saveButton = this.elements.saveSettingsButton;
+
+ if (saveButton) {
+ saveButton.disabled = !enable;
+ // Optional: Add/remove class for styling
+ if (enable) {
+ saveButton.classList.remove('disabled-button');
+ } else {
+ saveButton.classList.add('disabled-button');
+ }
+ }
+ },
+
+ // Get settings from the form, updated to handle instances consistently
+ getFormSettings: function(app) {
+ const settings = {};
+ const form = document.getElementById(`${app}Settings`);
+ if (!form) {
+ console.error(`[huntarrUI] Settings form for ${app} not found.`);
+ return null;
+ }
+
+ // Special handling for Swaparr which has a different structure
+ if (app === 'swaparr') {
+ // Get all inputs directly without filtering for instance fields
+ const inputs = form.querySelectorAll('input, select');
+ inputs.forEach(input => {
+ // Extract the field name without the app prefix
+ let key = input.id;
+ if (key.startsWith(`${app}_`)) {
+ key = key.substring(app.length + 1);
+ }
+
+ // Store the value based on input type
+ if (input.type === 'checkbox') {
+ settings[key] = input.checked;
+ } else if (input.type === 'number') {
+ settings[key] = input.value === '' ? null : parseInt(input.value, 10);
+ } else {
+ settings[key] = input.value.trim();
+ }
+ });
+
+ console.log(`[huntarrUI] Collected Swaparr settings:`, settings);
+ return settings;
+ }
+
+ // Handle apps that use instances (Sonarr, Radarr, etc.)
+ // Get all instance items in the form
+ const instanceItems = form.querySelectorAll('.instance-item');
+ settings.instances = [];
+
+ // Check if multi-instance UI elements exist (like Sonarr)
+ if (instanceItems.length > 0) {
+ console.log(`[huntarrUI] Found ${instanceItems.length} instance items for ${app}. Processing multi-instance mode.`);
+ // Multi-instance logic (current Sonarr logic)
+ instanceItems.forEach((item, index) => {
+ const instanceId = item.dataset.instanceId; // Assumes Sonarr uses data-instance-id
+ const nameInput = form.querySelector(`#${app}_instance_${instanceId}_name`);
+ const urlInput = form.querySelector(`#${app}_instance_${instanceId}_api_url`);
+ const keyInput = form.querySelector(`#${app}_instance_${instanceId}_api_key`);
+ const enabledInput = item.querySelector('.instance-enabled'); // Assumes Sonarr uses this class for enable toggle
+
+ if (urlInput && keyInput) { // Need URL and Key at least
+ settings.instances.push({
+ // Use nameInput value if available, otherwise generate a default
+ name: nameInput && nameInput.value.trim() !== '' ? nameInput.value.trim() : `Instance ${index + 1}`,
+ api_url: urlInput.value.trim(),
+ api_key: keyInput.value.trim(),
+ // Default to true if toggle doesn't exist or is checked
+ enabled: enabledInput ? enabledInput.checked : true
+ });
+ }
+ });
+ } else {
+ console.log(`[huntarrUI] No instance items found for ${app}. Processing single-instance mode.`);
+ // Single-instance logic (for Radarr, Lidarr, etc.)
+ // Look for the standard IDs used in their forms
+ const nameInput = form.querySelector(`#${app}_instance_name`); // Check for a specific name field
+ const urlInput = form.querySelector(`#${app}_api_url`);
+ const keyInput = form.querySelector(`#${app}_api_key`);
+ // Assuming single instances might have an enable toggle like #app_enabled
+ const enabledInput = form.querySelector(`#${app}_enabled`);
+
+ // Only add if URL and Key have values
+ if (urlInput && urlInput.value.trim() && keyInput && keyInput.value.trim()) {
+ settings.instances.push({
+ name: nameInput && nameInput.value.trim() !== '' ? nameInput.value.trim() : `${app} Instance 1`, // Default name
+ api_url: urlInput.value.trim(),
+ api_key: keyInput.value.trim(),
+ // Default to true if toggle doesn't exist or is checked
+ enabled: enabledInput ? enabledInput.checked : true
+ });
+ }
+ }
+
+ console.log(`[huntarrUI] Processed instances for ${app}:`, settings.instances);
+
+ // Now collect any OTHER settings NOT part of the instance structure
+ const allInputs = form.querySelectorAll('input, select');
+ const handledInstanceFieldIds = new Set();
+
+ // Identify IDs used in instance collection to avoid double-adding them
+ if (instanceItems.length > 0) {
+ // Multi-instance: Iterate items again to get IDs
+ instanceItems.forEach((item) => {
+ const instanceId = item.dataset.instanceId;
+ if(instanceId) {
+ handledInstanceFieldIds.add(`${app}_instance_${instanceId}_name`);
+ handledInstanceFieldIds.add(`${app}_instance_${instanceId}_api_url`);
+ handledInstanceFieldIds.add(`${app}_instance_${instanceId}_api_key`);
+ const enabledToggle = item.querySelector('.instance-enabled');
+ if (enabledToggle && enabledToggle.id) handledInstanceFieldIds.add(enabledToggle.id);
+ }
+ });
+ } else {
+ // Single-instance: Check for standard IDs
+ if (form.querySelector(`#${app}_instance_name`)) handledInstanceFieldIds.add(`${app}_instance_name`);
+ if (form.querySelector(`#${app}_api_url`)) handledInstanceFieldIds.add(`${app}_api_url`);
+ if (form.querySelector(`#${app}_api_key`)) handledInstanceFieldIds.add(`${app}_api_key`);
+ if (form.querySelector(`#${app}_enabled`)) handledInstanceFieldIds.add(`${app}_enabled`);
+ }
+
+ allInputs.forEach(input => {
+ // Handle special case for Whisparr version
+ if (input.id === 'whisparr_version') {
+ if (app === 'whisparr') {
+ settings['whisparr_version'] = input.value.trim();
+ return; // Skip further processing for this field
+ }
+ }
+
+ // Skip buttons and fields already processed as part of an instance
+ if (input.type === 'button' || handledInstanceFieldIds.has(input.id)) {
+ return;
+ }
+
+ // Get the field key (remove app prefix)
+ let key = input.id;
+
+ if (key.startsWith(`${app}_`)) {
+ key = key.substring(app.length + 1);
+ }
+
+ // Skip empty keys or keys that are just numbers (unlikely but possible)
+ if (!key || /^\d+$/.test(key)) return;
+
+ // Store the value
+ if (input.type === 'checkbox') {
+ settings[key] = input.checked;
+ } else if (input.type === 'number') {
+ // Handle potential empty string for numbers, store as null or default?
+ settings[key] = input.value === '' ? null : parseInt(input.value, 10);
+ } else {
+ settings[key] = input.value.trim();
+ }
+ });
+
+ console.log(`[huntarrUI] Final collected settings for ${app}:`, settings);
+ return settings;
+ },
+
+ // Handle instance management events
+ setupInstanceEventHandlers: function() {
+ console.log("DEBUG: setupInstanceEventHandlers called"); // Added logging
+ const settingsPanels = document.querySelectorAll('.app-settings-panel');
+
+ settingsPanels.forEach(panel => {
+ console.log(`DEBUG: Adding listeners to panel '${panel.id}'`); // Added logging
+ panel.addEventListener('addInstance', (e) => {
+ console.log(`DEBUG: addInstance event listener fired for panel '${panel.id}'. Event detail:`, e.detail);
+ this.addAppInstance(e.detail.appName);
+ });
+
+ panel.addEventListener('removeInstance', (e) => {
+ this.removeAppInstance(e.detail.appName, e.detail.instanceId);
+ });
+
+ panel.addEventListener('testConnection', (e) => {
+ this.testInstanceConnection(e.detail.appName, e.detail.instanceId, e.detail.url, e.detail.apiKey);
+ });
+ });
+ },
+
+ // Add a new instance to the app
+ addAppInstance: function(appName) {
+ console.log(`DEBUG: addAppInstance called for app '${appName}'`);
+ const container = document.getElementById(`${appName}Settings`);
+ if (!container) return;
+
+ // Get current settings
+ const currentSettings = this.getFormSettings(appName);
+
+ if (!currentSettings.instances) {
+ currentSettings.instances = [];
+ }
+
+ // Limit to 9 instances
+ if (currentSettings.instances.length >= 9) {
+ this.showNotification('Maximum of 9 instances allowed', 'error');
+ return;
+ }
+
+ // Add new instance with a default name
+ currentSettings.instances.push({
+ name: `Instance ${currentSettings.instances.length + 1}`,
+ api_url: '',
+ api_key: '',
+ enabled: true
+ });
+
+ // Regenerate form with new instance
+ SettingsForms[`generate${appName.charAt(0).toUpperCase()}${appName.slice(1)}Form`](container, currentSettings);
+
+ // Update controls like duration displays
+ SettingsForms.updateDurationDisplay();
+
+ this.showNotification('New instance added', 'success');
+ },
+
+ // Remove an instance
+ removeAppInstance: function(appName, instanceId) {
+ const container = document.getElementById(`${appName}Settings`);
+ if (!container) return;
+
+ // Get current settings
+ const currentSettings = this.getFormSettings(appName);
+
+ // Remove the instance
+ if (currentSettings.instances && instanceId >= 0 && instanceId < currentSettings.instances.length) {
+ // Keep at least one instance
+ if (currentSettings.instances.length > 1) {
+ const removedName = currentSettings.instances[instanceId].name;
+ currentSettings.instances.splice(instanceId, 1);
+
+ // Regenerate form
+ SettingsForms[`generate${appName.charAt(0).toUpperCase()}${appName.slice(1)}Form`](container, currentSettings);
+
+ // Update controls like duration displays
+ SettingsForms.updateDurationDisplay();
+
+ this.showNotification(`Instance "${removedName}" removed`, 'info');
+ } else {
+ this.showNotification('Cannot remove the last instance', 'error');
+ }
+ }
+ },
+
+ // Test connection for a specific instance
+ testInstanceConnection: function(appName, instanceId, url, apiKey) {
+ console.log(`Testing connection for ${appName} instance ${instanceId} with URL: ${url}`);
+
+ // Make sure instanceId is treated as a number
+ instanceId = parseInt(instanceId, 10);
+
+ // Find the status span where we'll display the result
+ const statusSpan = document.getElementById(`${appName}_instance_${instanceId}_status`);
+ if (!statusSpan) {
+ console.error(`Status span not found for ${appName} instance ${instanceId}`);
+ return;
+ }
+
+ // Show testing status
+ statusSpan.textContent = 'Testing...';
+ statusSpan.className = 'connection-status testing';
+
+ // Validate URL and API key
+ if (!url || !apiKey) {
+ statusSpan.textContent = 'Missing URL or API key';
+ statusSpan.className = 'connection-status error';
+ return;
+ }
+
+ // Check if URL is properly formatted
+ if (!url.startsWith('http://') && !url.startsWith('https://')) {
+ statusSpan.textContent = 'URL must start with http:// or https://';
+ statusSpan.className = 'connection-status error';
+ return;
+ }
+
+ // Check for trailing slashes in URL
+ if (url.endsWith('/') || url.endsWith('\\')) {
+ statusSpan.textContent = 'Remove trailing slash from URL (/ or \\)';
+ statusSpan.className = 'status-error';
+ return;
+ }
+
+ // Clean up the URL by removing trailing slashes (redundant but kept for safety)
+ url = url.trim().replace(/[/\\]+$/, '');
+
+ // Make the API request to test the connection
+ HuntarrUtils.fetchWithTimeout(`/api/${appName}/test-connection`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: url,
+ api_key: apiKey
+ })
+ })
+ .then(response => {
+ if (!response.ok) {
+ return response.json().then(errorData => {
+ throw new Error(errorData.message || this.getConnectionErrorMessage(response.status));
+ }).catch(() => {
+ // Fallback if response body is not JSON or empty
+ throw new Error(this.getConnectionErrorMessage(response.status));
+ });
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log(`Connection test response data for ${appName} instance ${instanceId}:`, data);
+ if (data.success) {
+ statusSpan.textContent = data.message || 'Connected';
+ statusSpan.className = 'connection-status success';
+
+ // If a version was returned, display it
+ if (data.version) {
+ statusSpan.textContent += ` (v${data.version})`;
+ }
+ } else {
+ statusSpan.textContent = data.message || 'Failed';
+ statusSpan.className = 'connection-status error';
+ }
+ })
+ .catch(error => {
+ console.error(`Error testing connection for ${appName} instance ${instanceId}:`, error);
+
+ // Extract the most relevant part of the error message
+ let errorMessage = error.message || 'Unknown error';
+ if (errorMessage.includes('Name or service not known')) {
+ errorMessage = 'Unable to resolve hostname. Check the URL.';
+ } else if (errorMessage.includes('Connection refused')) {
+ errorMessage = 'Connection refused. Check that the service is running.';
+ } else if (errorMessage.includes('connect ETIMEDOUT') || errorMessage.includes('timeout')) {
+ errorMessage = 'Connection timed out. Check URL and port.';
+ } else if (errorMessage.includes('401') || errorMessage.includes('Authentication failed')) {
+ errorMessage = 'Invalid API key';
+ } else if (errorMessage.includes('404') || errorMessage.includes('not found')) {
+ errorMessage = 'URL endpoint not found. Check the URL.';
+ } else if (errorMessage.startsWith('HTTP error!')) {
+ errorMessage = 'Connection failed. Check URL and port.';
+ }
+
+ statusSpan.textContent = errorMessage;
+ statusSpan.className = 'connection-status error';
+ });
+ },
+
+ // Helper function to translate HTTP error codes to user-friendly messages
+ getConnectionErrorMessage: function(status) {
+ switch(status) {
+ case 400:
+ return 'Invalid request. Check URL format.';
+ case 401:
+ return 'Invalid API key';
+ case 403:
+ return 'Access forbidden. Check permissions.';
+ case 404:
+ return 'Service not found at this URL. Check address.';
+ case 500:
+ return 'Server error. Check if the service is working properly.';
+ case 502:
+ return 'Bad gateway. Check network connectivity.';
+ case 503:
+ return 'Service unavailable. Check if the service is running.';
+ case 504:
+ return 'Gateway timeout. Check network connectivity.';
+ default:
+ return `Connection error. Check URL and port.`;
+ }
+ },
+
+ // App connections
+ checkAppConnections: function() {
+ this.checkAppConnection('sonarr');
+ this.checkAppConnection('radarr');
+ this.checkAppConnection('lidarr');
+ this.checkAppConnection('readarr'); // Added readarr
+ this.checkAppConnection('whisparr'); // Added whisparr
+ this.checkAppConnection('eros'); // Enable actual Eros API check
+ },
+
+ checkAppConnection: function(app) {
+ HuntarrUtils.fetchWithTimeout(`/api/status/${app}`)
+ .then(response => response.json())
+ .then(data => {
+ // Pass the whole data object for all apps
+ this.updateConnectionStatus(app, data);
+
+ // Still update the configuredApps flag for potential other uses, but after updating status
+ this.configuredApps[app] = data.configured === true; // Ensure it's a boolean
+ })
+ .catch(error => {
+ console.error(`Error checking ${app} connection:`, error);
+ // Pass a default 'not configured' status object on error
+ this.updateConnectionStatus(app, { configured: false, connected: false });
+ });
+ },
+
+ updateConnectionStatus: function(app, statusData) {
+ const statusElement = this.elements[`${app}HomeStatus`];
+ if (!statusElement) return;
+
+ // Find the parent container for the whole app status box
+ const appBox = statusElement.closest('.app-stats-card'); // CORRECTED SELECTOR
+ if (!appBox) {
+ // If the card structure changes, this might fail. Log a warning.
+ console.warn(`[huntarrUI] Could not find parent '.app-stats-card' element for ${app}`);
+ }
+
+ let isConfigured = false;
+ let isConnected = false;
+
+ // Try to determine configured and connected status from statusData object
+ // Default to false if properties are missing
+ isConfigured = statusData?.configured === true;
+ isConnected = statusData?.connected === true;
+
+ // Special handling for *arr apps' multi-instance connected count
+ let connectedCount = statusData?.connected_count ?? 0;
+ let totalConfigured = statusData?.total_configured ?? 0;
+
+ // For all *arr apps, 'isConfigured' means at least one instance is configured
+ if (['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros'].includes(app)) {
+ isConfigured = totalConfigured > 0;
+ // For *arr apps, 'isConnected' means at least one instance is connected
+ isConnected = isConfigured && connectedCount > 0;
+ }
+
+ // --- Visibility Logic ---
+ if (isConfigured) {
+ // Ensure the box is visible
+ if (appBox) appBox.style.display = '';
+ } else {
+ // Not configured - HIDE the box
+ if (appBox) appBox.style.display = 'none';
+ // Update badge even if hidden (optional, but good practice)
+ statusElement.className = 'status-badge not-configured';
+ statusElement.innerHTML = ' Not Configured';
+ return; // No need to update badge further if not configured
+ }
+
+ // --- Badge Update Logic (only runs if configured) ---
+ if (['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros'].includes(app)) {
+ // *Arr specific badge text (already checked isConfigured)
+ statusElement.innerHTML = ` Connected ${connectedCount}/${totalConfigured}`;
+ statusElement.className = 'status-badge ' + (isConnected ? 'connected' : 'error');
+ } else {
+ // Standard badge update for other configured apps
+ if (isConnected) {
+ statusElement.className = 'status-badge connected';
+ statusElement.innerHTML = ' Connected';
+ } else {
+ statusElement.className = 'status-badge not-connected';
+ statusElement.innerHTML = ' Not Connected';
+ }
+ }
+ },
+
+ // User actions
+ startHunt: function() {
+ HuntarrUtils.fetchWithTimeout('/api/hunt/start', { method: 'POST' })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ this.showNotification('Hunt started successfully', 'success');
+ } else {
+ this.showNotification('Failed to start hunt', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error starting hunt:', error);
+ this.showNotification('Error starting hunt', 'error');
+ });
+ },
+
+ stopHunt: function() {
+ HuntarrUtils.fetchWithTimeout('/api/hunt/stop', { method: 'POST' })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ this.showNotification('Hunt stopped successfully', 'success');
+ } else {
+ this.showNotification('Failed to stop hunt', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error stopping hunt:', error);
+ this.showNotification('Error stopping hunt', 'error');
+ });
+ },
+
+ // User
+ loadUsername: function() {
+ const usernameElement = document.getElementById('username');
+ if (!usernameElement) return;
+
+ HuntarrUtils.fetchWithTimeout('/api/user/info')
+ .then(response => response.json())
+ .then(data => {
+ if (data.username) {
+ usernameElement.textContent = data.username;
+ }
+
+ // Check if local access bypass is enabled and update UI visibility
+ this.checkLocalAccessBypassStatus();
+ })
+ .catch(error => {
+ console.error('Error loading username:', error);
+
+ // Still check local access bypass status even if username loading failed
+ this.checkLocalAccessBypassStatus();
+ });
+ },
+
+ // Check if local access bypass is enabled and update UI accordingly
+ checkLocalAccessBypassStatus: function() {
+ console.log("Checking local access bypass status...");
+ HuntarrUtils.fetchWithTimeout('/api/get_local_access_bypass_status') // Corrected URL
+ .then(response => {
+ if (!response.ok) {
+ // Log error if response is not OK (e.g., 404, 500)
+ console.error(`Error fetching bypass status: ${response.status} ${response.statusText}`);
+ // Attempt to read response body for more details, if available
+ response.text().then(text => console.error('Response body:', text));
+ // Throw an error to trigger the catch block with a clearer message
+ throw new Error(`HTTP error ${response.status}`);
+ }
+ return response.json(); // Only parse JSON if response is OK
+ })
+ .then(data => {
+ if (data && typeof data.isEnabled === 'boolean') {
+ console.log("Local access bypass status received:", data.isEnabled);
+ this.updateUIForLocalAccessBypass(data.isEnabled);
+ } else {
+ // Handle cases where response is JSON but not the expected format
+ console.error('Invalid data format received for bypass status:', data);
+ this.updateUIForLocalAccessBypass(false); // Default to disabled/showing elements
+ }
+ })
+ .catch(error => {
+ // Catch network errors and the error thrown from !response.ok
+ console.error('Error checking local access bypass status:', error);
+ // Default to showing elements if we can't determine status
+ this.updateUIForLocalAccessBypass(false);
+ });
+ },
+
+ // Update UI elements visibility based on local access bypass status
+ updateUIForLocalAccessBypass: function(isEnabled) {
+ console.log("Updating UI for local access bypass:", isEnabled);
+
+ // Get the user info container in topbar (username and logout button)
+ const userInfoContainer = document.getElementById('userInfoContainer');
+
+ // Get the user nav item in sidebar
+ const userNav = document.getElementById('userNav');
+
+ // Set display style explicitly based on local access bypass setting
+ if (isEnabled === true) {
+ console.log("Local access bypass is ENABLED - hiding user elements");
+
+ // Hide user info in topbar
+ if (userInfoContainer) {
+ userInfoContainer.style.display = 'none';
+ console.log(" • Hidden userInfoContainer");
+ } else {
+ console.warn(" ⚠ userInfoContainer not found");
+ }
+
+ // Hide user nav in sidebar
+ if (userNav) {
+ userNav.style.display = 'none';
+ console.log(" • Hidden userNav");
+ } else {
+ console.warn(" ⚠ userNav not found");
+ }
+ } else {
+ console.log("Local access bypass is DISABLED - showing user elements");
+
+ // Show user info in topbar
+ if (userInfoContainer) {
+ userInfoContainer.style.display = '';
+ console.log(" • Showing userInfoContainer");
+ } else {
+ console.warn(" ⚠ userInfoContainer not found");
+ }
+
+ // Show user nav in sidebar
+ if (userNav) {
+ userNav.style.display = '';
+ console.log(" • Showing userNav");
+ } else {
+ console.warn(" ⚠ userNav not found");
+ }
+ }
+ },
+
+ logout: function(e) { // Added logout function
+ e.preventDefault(); // Prevent default link behavior
+ console.log('[huntarrUI] Logging out...');
+ HuntarrUtils.fetchWithTimeout('/logout', { // Use the correct endpoint defined in Flask
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ console.log('[huntarrUI] Logout successful, redirecting to login.');
+ window.location.href = '/login'; // Redirect to login page
+ } else {
+ console.error('[huntarrUI] Logout failed:', data.message);
+ this.showNotification('Logout failed. Please try again.', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error during logout:', error);
+ this.showNotification('An error occurred during logout.', 'error');
+ });
+ },
+
+ // Media statistics handling
+ loadMediaStats: function() {
+ HuntarrUtils.fetchWithTimeout('/api/stats')
+ .then(response => {
+ if (!response.ok) {
+ throw new Error('Network response was not ok');
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data.success && data.stats) {
+ this.updateStatsDisplay(data.stats);
+ } else {
+ console.error('Failed to load statistics:', data.message || 'Unknown error');
+ }
+ })
+ .catch(error => {
+ console.error('Error fetching statistics:', error);
+ });
+ },
+
+ updateStatsDisplay: function(stats) {
+ // Update each app's statistics
+ const apps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr'];
+ const statTypes = ['hunted', 'upgraded'];
+
+ apps.forEach(app => {
+ if (stats[app]) {
+ statTypes.forEach(type => {
+ const element = document.getElementById(`${app}-${type}`);
+ if (element) {
+ // Animate the number change
+ this.animateNumber(element, parseInt(element.textContent), stats[app][type] || 0);
+ }
+ });
+ }
+ });
+ },
+
+ animateNumber: function(element, start, end) {
+ const duration = 1000; // Animation duration in milliseconds
+ const startTime = performance.now();
+
+ const updateNumber = (currentTime) => {
+ const elapsedTime = currentTime - startTime;
+ const progress = Math.min(elapsedTime / duration, 1);
+
+ // Easing function for smooth animation
+ const easeOutQuad = progress * (2 - progress);
+
+ const currentValue = Math.floor(start + (end - start) * easeOutQuad);
+ element.textContent = currentValue;
+
+ if (progress < 1) {
+ requestAnimationFrame(updateNumber);
+ } else {
+ element.textContent = end; // Ensure we end with the exact target number
+ }
+ };
+
+ requestAnimationFrame(updateNumber);
+ },
+
+ resetMediaStats: function(appType = null) {
+ // Directly update the UI first to provide immediate feedback
+ const stats = {
+ 'sonarr': {'hunted': 0, 'upgraded': 0},
+ 'radarr': {'hunted': 0, 'upgraded': 0},
+ 'lidarr': {'hunted': 0, 'upgraded': 0},
+ 'readarr': {'hunted': 0, 'upgraded': 0},
+ 'whisparr': {'hunted': 0, 'upgraded': 0},
+ 'eros': {'hunted': 0, 'upgraded': 0},
+ 'swaparr': {'hunted': 0, 'upgraded': 0}
+ };
+
+ // Immediately update UI before even showing the confirmation
+ if (appType) {
+ // Only reset the specific app's stats
+ this.updateStatsDisplay({
+ [appType]: stats[appType]
+ });
+ } else {
+ // Reset all stats
+ this.updateStatsDisplay(stats);
+ }
+
+ // Show a success notification
+ this.showNotification('Statistics reset successfully', 'success');
+
+ // Try to send the reset to the server, but don't depend on it
+ try {
+ const requestBody = appType ? { app_type: appType } : {};
+
+ HuntarrUtils.fetchWithTimeout('/api/stats/reset_public', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(requestBody)
+ })
+ .then(response => {
+ // Just log the response, don't rely on it for UI feedback
+ if (!response.ok) {
+ console.warn('Server responded with non-OK status for stats reset');
+ }
+ return response.json().catch(() => ({}));
+ })
+ .then(data => {
+ console.log('Stats reset response:', data);
+ })
+ .catch(error => {
+ console.warn('Error communicating with server for stats reset:', error);
+ });
+ } catch (error) {
+ console.warn('Error in stats reset:', error);
+ }
+ },
+
+ // Utility functions
+ showNotification: function(message, type) {
+ // Create a notification element
+ const notification = document.createElement('div');
+ notification.className = `notification ${type}`;
+ notification.textContent = message;
+
+ // Add to the document
+ document.body.appendChild(notification);
+
+ // Ensure any existing notification is removed first to prevent stacking
+ const existingNotifications = document.querySelectorAll('.notification');
+ existingNotifications.forEach(n => {
+ if (n !== notification) {
+ n.classList.remove('show');
+ setTimeout(() => n.remove(), 300);
+ }
+ });
+
+ // Fade in
+ setTimeout(() => {
+ notification.classList.add('show');
+ }, 10);
+
+ // Remove after a delay
+ setTimeout(() => {
+ notification.classList.remove('show');
+ setTimeout(() => {
+ notification.remove();
+ }, 300);
+ }, 3000);
+ },
+
+ capitalizeFirst: function(string) {
+ return string.charAt(0).toUpperCase() + string.slice(1);
+ },
+
+ // Load current version from version.txt
+ loadCurrentVersion: function() {
+ HuntarrUtils.fetchWithTimeout('/version.txt')
+ .then(response => {
+ if (!response.ok) {
+ throw new Error('Failed to load version.txt');
+ }
+ return response.text();
+ })
+ .then(version => {
+ const versionElement = document.getElementById('version-value');
+ if (versionElement) {
+ versionElement.textContent = version.trim();
+ }
+ })
+ .catch(error => {
+ console.error('Error loading current version:', error);
+ const versionElement = document.getElementById('version-value');
+ if (versionElement) {
+ versionElement.textContent = 'Error';
+ }
+ });
+ },
+
+ // Load latest version from GitHub releases
+ loadLatestVersion: function() {
+ HuntarrUtils.fetchWithTimeout('https://api.github.com/repos/plexguide/Huntarr.io/releases/latest')
+ .then(response => {
+ if (!response.ok) {
+ // Handle rate limiting or other errors
+ if (response.status === 403) {
+ console.warn('GitHub API rate limit likely exceeded.');
+ throw new Error('Rate limited');
+ }
+ throw new Error(`HTTP error! status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ const latestVersionElement = document.getElementById('latest-version-value');
+ if (latestVersionElement && data && data.tag_name) {
+ // Remove potential 'v' prefix for consistency if needed, or keep it
+ latestVersionElement.textContent = data.tag_name;
+ } else if (latestVersionElement) {
+ latestVersionElement.textContent = 'N/A';
+ }
+ })
+ .catch(error => {
+ console.error('Error loading latest version from GitHub:', error);
+ const latestVersionElement = document.getElementById('latest-version-value');
+ if (latestVersionElement) {
+ latestVersionElement.textContent = error.message === 'Rate limited' ? 'Rate Limited' : 'Error';
+ }
+ });
+ },
+
+ // Load latest beta version from GitHub tags
+ loadBetaVersion: function() {
+ HuntarrUtils.fetchWithTimeout('https://api.github.com/repos/plexguide/Huntarr.io/tags?per_page=100')
+ .then(response => {
+ if (!response.ok) {
+ // Handle rate limiting or other errors
+ if (response.status === 403) {
+ console.warn('GitHub API rate limit likely exceeded.');
+ throw new Error('Rate limited');
+ }
+ throw new Error(`HTTP error! status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ const betaVersionElement = document.getElementById('beta-version-value');
+
+ if (betaVersionElement && data && Array.isArray(data) && data.length > 0) {
+ // Find the first tag that starts with B (case insensitive)
+ const betaTag = data.find(tag => tag.name.toUpperCase().startsWith('B'));
+
+ if (betaTag) {
+ betaVersionElement.textContent = betaTag.name;
+ // Store in localStorage for future reference
+ try {
+ const versionInfo = localStorage.getItem('huntarr-version-info') || '{}';
+ const parsedInfo = JSON.parse(versionInfo);
+ parsedInfo.betaVersion = betaTag.name;
+ localStorage.setItem('huntarr-version-info', JSON.stringify(parsedInfo));
+ } catch (e) {
+ console.error('Error saving beta version to localStorage:', e);
+ }
+ } else {
+ betaVersionElement.textContent = 'None';
+ }
+ } else if (betaVersionElement) {
+ betaVersionElement.textContent = 'N/A';
+ }
+ })
+ .catch(error => {
+ console.error('Error loading beta version from GitHub:', error);
+ const betaVersionElement = document.getElementById('beta-version-value');
+ if (betaVersionElement) {
+ betaVersionElement.textContent = error.message === 'Rate limited' ? 'Rate Limited' : 'Error';
+ }
+ });
+ },
+
+ // Load GitHub star count
+ loadGitHubStarCount: function() {
+ const starsElement = document.getElementById('github-stars-value');
+ if (!starsElement) return;
+
+ starsElement.textContent = 'Loading...';
+
+ // GitHub API endpoint for repository information
+ const apiUrl = 'https://api.github.com/repos/plexguide/huntarr';
+
+ HuntarrUtils.fetchWithTimeout(apiUrl)
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`GitHub API error: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data && data.stargazers_count !== undefined) {
+ // Format the number with commas for thousands
+ const formattedStars = data.stargazers_count.toLocaleString();
+ starsElement.textContent = formattedStars;
+
+ // Store in localStorage to avoid excessive API requests
+ const cacheData = {
+ stars: data.stargazers_count,
+ timestamp: Date.now()
+ };
+ localStorage.setItem('huntarr-github-stars', JSON.stringify(cacheData));
+ } else {
+ throw new Error('Star count not found in response');
+ }
+ })
+ .catch(error => {
+ console.error('Error fetching GitHub stars:', error);
+
+ // Try to load from cache if we have it
+ const cachedData = localStorage.getItem('huntarr-github-stars');
+ if (cachedData) {
+ try {
+ const parsed = JSON.parse(cachedData);
+ starsElement.textContent = parsed.stars.toLocaleString();
+ } catch (e) {
+ starsElement.textContent = 'N/A';
+ }
+ } else {
+ starsElement.textContent = 'N/A';
+ }
+ });
+ },
+
+ // Add or modify this function to handle enabling/disabling save/reset
+ updateSaveResetButtonState(enable) { // Changed signature
+ const saveButton = this.elements.saveSettingsButton;
+
+ if (saveButton) {
+ saveButton.disabled = !enable;
+ // Optional: Add/remove class for styling
+ if (enable) {
+ saveButton.classList.remove('disabled-button');
+ } else {
+ saveButton.classList.add('disabled-button');
+ }
+ }
+ },
+
+ // Add updateHomeConnectionStatus if it doesn't exist or needs adjustment
+ updateHomeConnectionStatus: function() {
+ console.log('[huntarrUI] Updating home connection statuses...');
+ // This function should ideally call checkAppConnection for all relevant apps
+ // or use the stored configuredApps status if checkAppConnection updates it.
+ this.checkAppConnections(); // Re-check all connections after a save might be simplest
+ },
+
+ // Load stateful management info
+ loadStatefulInfo: function(attempts = 0, skipCache = false) {
+ const initialStateEl = document.getElementById('stateful_initial_state');
+ const expiresDateEl = document.getElementById('stateful_expires_date');
+ const intervalInput = document.getElementById('stateful_management_hours');
+ const intervalDaysSpan = document.getElementById('stateful_management_days');
+
+ // Max retry attempts - increased for better reliability
+ const maxAttempts = 5;
+
+ console.log(`[StatefulInfo] Loading stateful info (attempt ${attempts + 1}, skipCache: ${skipCache})`);
+
+ // Update UI to show loading state instead of N/A on first attempt
+ if (attempts === 0) {
+ if (initialStateEl && initialStateEl.textContent !== 'Loading...') initialStateEl.textContent = 'Loading...';
+ if (expiresDateEl && expiresDateEl.textContent !== 'Updating...') expiresDateEl.textContent = 'Loading...';
+ }
+
+ // First check if we have cached data in localStorage that we can use immediately
+ const cachedStatefulData = localStorage.getItem('huntarr-stateful-data');
+ if (!skipCache && cachedStatefulData && attempts === 0) {
+ try {
+ const parsedData = JSON.parse(cachedStatefulData);
+ const cacheAge = Date.now() - parsedData.timestamp;
+
+ // Use cache if it's less than 5 minutes old while waiting for fresh data
+ if (cacheAge < 300000) {
+ console.log('[StatefulInfo] Using cached data while fetching fresh data');
+
+ // Display cached data
+ if (initialStateEl && parsedData.created_at_ts) {
+ const createdDate = new Date(parsedData.created_at_ts * 1000);
+ initialStateEl.textContent = this.formatDateNicely(createdDate);
+ }
+
+ if (expiresDateEl && parsedData.expires_at_ts) {
+ const expiresDate = new Date(parsedData.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDateNicely(expiresDate);
+ }
+
+ // Update interval input and days display
+ if (intervalInput && parsedData.interval_hours) {
+ intervalInput.value = parsedData.interval_hours;
+ if (intervalDaysSpan) {
+ const days = (parsedData.interval_hours / 24).toFixed(1);
+ intervalDaysSpan.textContent = `${days} days`;
+ }
+ }
+ }
+ } catch (e) {
+ console.warn('[StatefulInfo] Error parsing cached data:', e);
+ }
+ }
+
+ // Always fetch fresh data from the server
+ HuntarrUtils.fetchWithTimeout('/api/stateful/info', {
+ cache: 'no-cache',
+ headers: {
+ 'Cache-Control': 'no-cache, no-store, must-revalidate',
+ 'Pragma': 'no-cache',
+ 'Expires': '0'
+ }
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status} ${response.statusText}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data.success) {
+ // Cache the response with a timestamp for future use
+ localStorage.setItem('huntarr-stateful-data', JSON.stringify({
+ ...data,
+ timestamp: Date.now()
+ }));
+
+ // Handle initial state date
+ if (initialStateEl) {
+ if (data.created_at_ts) {
+ const createdDate = new Date(data.created_at_ts * 1000);
+ initialStateEl.textContent = this.formatDateNicely(createdDate);
+ } else {
+ initialStateEl.textContent = 'Not yet created';
+
+ // If this is the first state load attempt and no timestamp exists,
+ // it might be because the state file hasn't been created yet
+ if (attempts < maxAttempts) {
+ console.log(`[StatefulInfo] No initial state timestamp, will retry (${attempts + 1}/${maxAttempts})`);
+ setTimeout(() => {
+ this.loadStatefulInfo(attempts + 1);
+ }, 500); // Longer delay for better chance of success
+ return;
+ }
+ }
+ }
+
+ // Handle expiration date
+ if (expiresDateEl) {
+ if (data.expires_at_ts) {
+ const expiresDate = new Date(data.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDateNicely(expiresDate);
+ } else {
+ expiresDateEl.textContent = 'Not set';
+ }
+ }
+
+ // Update interval input and days display
+ if (intervalInput && data.interval_hours) {
+ intervalInput.value = data.interval_hours;
+ if (intervalDaysSpan) {
+ const days = (data.interval_hours / 24).toFixed(1);
+ intervalDaysSpan.textContent = `${days} days`;
+ }
+ }
+
+ // Hide error notification if it was visible
+ const notification = document.getElementById('stateful-notification');
+ if (notification) {
+ notification.style.display = 'none';
+ }
+
+ // Store the data for future reference
+ this._cachedStatefulData = data;
+
+ console.log('[StatefulInfo] Successfully loaded and displayed stateful data');
+ } else {
+ throw new Error(data.message || 'Failed to load stateful info');
+ }
+ })
+ .catch(error => {
+ console.error(`Error loading stateful info (attempt ${attempts + 1}/${maxAttempts + 1}):`, error);
+
+ // Retry if we haven't reached max attempts with exponential backoff
+ if (attempts < maxAttempts) {
+ const delay = Math.min(2000, 300 * Math.pow(2, attempts)); // Exponential backoff with max 2000ms
+ console.log(`[StatefulInfo] Retrying in ${delay}ms (attempt ${attempts + 1}/${maxAttempts})`);
+ setTimeout(() => {
+ // Double-check if still on the same page before retrying
+ if (document.getElementById('stateful_management_hours')) {
+ this.loadStatefulInfo(attempts + 1);
+ } else {
+ console.log(`[StatefulInfo] Stateful info retry cancelled; user navigated away.`);
+ }
+ }, delay);
+ return;
+ }
+
+ // Use cached data as fallback if available
+ const cachedStatefulData = localStorage.getItem('huntarr-stateful-data');
+ if (cachedStatefulData) {
+ try {
+ console.log('[StatefulInfo] Using cached data as fallback after failed fetch');
+ const parsedData = JSON.parse(cachedStatefulData);
+
+ if (initialStateEl && parsedData.created_at_ts) {
+ const createdDate = new Date(parsedData.created_at_ts * 1000);
+ initialStateEl.textContent = this.formatDateNicely(createdDate) + ' (cached)';
+ } else if (initialStateEl) {
+ initialStateEl.textContent = 'Not available';
+ }
+
+ if (expiresDateEl && parsedData.expires_at_ts) {
+ const expiresDate = new Date(parsedData.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDateNicely(expiresDate) + ' (cached)';
+ } else if (expiresDateEl) {
+ expiresDateEl.textContent = 'Not available';
+ }
+
+ // Update interval input and days display from cache
+ if (intervalInput && parsedData.interval_hours) {
+ intervalInput.value = parsedData.interval_hours;
+ if (intervalDaysSpan) {
+ const days = (parsedData.interval_hours / 24).toFixed(1);
+ intervalDaysSpan.textContent = `${days} days`;
+ }
+ }
+
+ return;
+ } catch (e) {
+ console.warn('[StatefulInfo] Error parsing cached data as fallback:', e);
+ }
+ }
+
+ // Final fallback if no cached data
+ if (initialStateEl) initialStateEl.textContent = 'Not available';
+ if (expiresDateEl) expiresDateEl.textContent = 'Not available';
+
+ // Show error notification
+ const notification = document.getElementById('stateful-notification');
+ if (notification) {
+ notification.style.display = 'block';
+ notification.textContent = 'Could not load stateful management info. This may affect media tracking.';
+ }
+ });
+ },
+
+ // Format date nicely with time, day, and relative time indication
+ formatDateNicely: function(date) {
+ if (!(date instanceof Date) || isNaN(date)) {
+ return 'Invalid date';
+ }
+
+ const options = {
+ weekday: 'short',
+ year: 'numeric',
+ month: 'short',
+ day: 'numeric',
+ hour: '2-digit',
+ minute: '2-digit'
+ };
+
+ const formattedDate = date.toLocaleDateString(undefined, options);
+
+ // Add relative time indicator (e.g., "in 6 days" or "7 days ago")
+ const now = new Date();
+ const diffTime = date.getTime() - now.getTime();
+ const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24));
+
+ let relativeTime = '';
+ if (diffDays > 0) {
+ relativeTime = ` (in ${diffDays} day${diffDays !== 1 ? 's' : ''})`;
+ } else if (diffDays < 0) {
+ relativeTime = ` (${Math.abs(diffDays)} day${Math.abs(diffDays) !== 1 ? 's' : ''} ago)`;
+ } else {
+ relativeTime = ' (today)';
+ }
+
+ return `${formattedDate}${relativeTime}`;
+ },
+
+ // Reset stateful management - clear all processed IDs
+ resetStatefulManagement: function() {
+ console.log("Reset stateful management function called");
+
+ // Show a loading indicator or disable the button
+ const resetBtn = document.getElementById('reset_stateful_btn');
+ if (resetBtn) {
+ resetBtn.disabled = true;
+ const originalText = resetBtn.innerHTML;
+ resetBtn.innerHTML = ' Resetting...';
+ console.log("Reset button found and disabled:", resetBtn);
+ } else {
+ console.error("Reset button not found in the DOM!");
+ }
+
+ // Add debug logging
+ console.log("Sending reset request to /api/stateful/reset");
+
+ HuntarrUtils.fetchWithTimeout('/api/stateful/reset', {
+ method: 'POST',
+ headers: {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'Cache-Control': 'no-cache, no-store, must-revalidate',
+ 'Pragma': 'no-cache'
+ },
+ cache: 'no-cache' // Add cache control to prevent caching
+ })
+ .then(response => {
+ console.log("Reset response received:", response.status, response.statusText);
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log("Reset response data:", data);
+
+ if (data.success) {
+ this.showNotification('Stateful management reset successfully', 'success');
+ // Wait a moment before reloading the info to ensure it's refreshed
+ setTimeout(() => {
+ this.loadStatefulInfo(0); // Reload stateful info with fresh attempt
+
+ // Re-enable the button
+ if (resetBtn) {
+ resetBtn.disabled = false;
+ resetBtn.innerHTML = ' Reset';
+ }
+ }, 1000);
+ } else {
+ throw new Error(data.message || 'Unknown error resetting stateful management');
+ }
+ })
+ .catch(error => {
+ console.error("Error resetting stateful management:", error);
+ this.showNotification(`Error resetting stateful management: ${error.message}`, 'error');
+
+ // Re-enable the button
+ if (resetBtn) {
+ resetBtn.disabled = false;
+ resetBtn.innerHTML = ' Reset';
+ }
+ });
+ },
+
+ // Update stateful management expiration based on hours input
+ updateStatefulExpirationOnUI: function() {
+ const hoursInput = document.getElementById('stateful_management_hours');
+ if (!hoursInput) return;
+
+ const hours = parseInt(hoursInput.value) || 168;
+
+ // Show updating indicator
+ const expiresDateEl = document.getElementById('stateful_expires_date');
+ const initialStateEl = document.getElementById('stateful_initial_state');
+
+ if (expiresDateEl) {
+ expiresDateEl.textContent = 'Updating...';
+ }
+
+ HuntarrUtils.fetchWithTimeout('/api/stateful/update-expiration', {
+ method: 'POST',
+ headers: {
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({ hours: hours }),
+ cache: 'no-cache'
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status} ${response.statusText}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data.success) {
+ console.log('[huntarrUI] Stateful expiration updated successfully:', data);
+
+ // Get updated info to show proper dates
+ this.loadStatefulInfo(0, true);
+
+ // Show a notification
+ this.showNotification(`Updated expiration to ${hours} hours (${(hours/24).toFixed(1)} days)`, 'success');
+ } else {
+ throw new Error(data.message || 'Unknown error updating expiration');
+ }
+ })
+ .catch(error => {
+ console.error('Error updating stateful expiration:', error);
+ this.showNotification(`Failed to update expiration: ${error.message}`, 'error');
+ // Reset the UI
+ if (expiresDateEl) {
+ expiresDateEl.textContent = 'Error updating';
+ }
+
+ // Try to reload original data
+ setTimeout(() => this.loadStatefulInfo(), 1000);
+ });
+ },
+
+ // Add the updateStatefulExpiration method
+ updateStatefulExpiration: function(hours) {
+ if (!hours || typeof hours !== 'number' || hours <= 0) {
+ console.error('[huntarrUI] Invalid hours value for updateStatefulExpiration:', hours);
+ return;
+ }
+
+ console.log(`[huntarrUI] Directly updating stateful expiration to ${hours} hours`);
+
+ // Make a direct API call to update the stateful expiration
+ HuntarrUtils.fetchWithTimeout('/api/stateful/update-expiration', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({ hours: hours })
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log('[huntarrUI] Stateful expiration updated successfully:', data);
+ // Update the expiration date display
+ const expiresDateEl = document.getElementById('stateful_expires_date');
+ if (expiresDateEl && data.expires_date) {
+ expiresDateEl.textContent = data.expires_date;
+ }
+ })
+ .catch(error => {
+ console.error('[huntarrUI] Error updating stateful expiration:', error);
+ });
+ },
+
+ // Add global event handler and method to track saved settings across all apps
+ registerGlobalUnsavedChangesHandler: function() {
+ window.addEventListener('beforeunload', this.handleUnsavedChangesBeforeUnload.bind(this));
+
+ // Reset hasUnsavedChanges when settings are saved
+ document.addEventListener('settings:saved', (event) => {
+ if (event.detail && event.detail.appType) {
+ console.log(`settings:saved event received for ${event.detail.appType}`);
+ if (this.formChanged) {
+ this.formChanged[event.detail.appType] = false;
+ }
+
+ // Also clear the change tracking in the appsModule if it exists
+ if (window.appsModule) {
+ // Reset the app in the tracking array
+ if (window.appsModule.appsWithChanges &&
+ window.appsModule.appsWithChanges.includes(event.detail.appType)) {
+ window.appsModule.appsWithChanges =
+ window.appsModule.appsWithChanges.filter(app => app !== event.detail.appType);
+ }
+
+ // Only update the overall flag if there are no apps with changes left
+ if (!window.appsModule.appsWithChanges || window.appsModule.appsWithChanges.length === 0) {
+ window.appsModule.settingsChanged = false;
+ }
+ }
+
+ // Check if there are any remaining form changes
+ this.checkForRemainingChanges();
+ }
+ });
+ },
+
+ // New method to check if any forms still have changes
+ checkForRemainingChanges: function() {
+ if (!this.formChanged) return;
+
+ // Check if any forms still have changes
+ const hasAnyChanges = Object.values(this.formChanged).some(val => val === true);
+
+ console.log('Checking for remaining form changes:', {
+ formChanged: this.formChanged,
+ hasAnyChanges: hasAnyChanges
+ });
+
+ // Update the global flag
+ this.hasUnsavedChanges = hasAnyChanges;
+ },
+
+ // Handle unsaved changes before unload
+ handleUnsavedChangesBeforeUnload: function(event) {
+ // Check if we should suppress the check (used for test connection functionality)
+ if (this.suppressUnsavedChangesCheck || window._suppressUnsavedChangesDialog) {
+ console.log('Unsaved changes check suppressed');
+ return;
+ }
+
+ // If we have unsaved changes, show confirmation dialog
+ if (this.hasUnsavedChanges) {
+ console.log('Preventing navigation due to unsaved changes');
+ event.preventDefault();
+ event.returnValue = 'You have unsaved changes. Do you want to continue without saving?';
+ return event.returnValue;
+ }
+ },
+
+ // Add a proper hasFormChanges function to compare form values with original values
+ hasFormChanges: function(app) {
+ if (!app || !this.originalSettings || !this.originalSettings[app]) return false;
+
+ const form = document.getElementById(`${app}Settings`);
+ if (!form) return false;
+
+ const currentSettings = this.getFormSettings(app);
+ if (!currentSettings) return false;
+
+ // Deep comparison of current settings with original settings
+ // For complex objects like instances, we need to stringify them for comparison
+ const originalJSON = JSON.stringify(this.originalSettings[app]);
+ const currentJSON = JSON.stringify(currentSettings);
+
+ return originalJSON !== currentJSON;
+ },
+
+ // Add resetAppCycle function to the huntarrUI object
+ resetAppCycle: function(app, button) {
+ // Show spinner and disable button
+ const originalButtonText = button.innerHTML;
+ button.innerHTML = ' Resetting...';
+ button.classList.add('resetting');
+
+ // Make API call to reset the app cycle
+ fetch(`/api/cycle/reset/${app}`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ if (data.success) {
+ // Show success notification
+ this.showNotification(`${app.charAt(0).toUpperCase() + app.slice(1)} cycle reset triggered successfully`, 'success');
+ } else {
+ // Show error notification
+ this.showNotification(`Error: ${data.error || 'Failed to reset cycle'}`, 'error');
+ }
+ })
+ .catch(error => {
+ // Show error notification
+ this.showNotification(`Error: ${error.message}`, 'error');
+ })
+ .finally(() => {
+ // Restore button state after 2 seconds for visual feedback
+ setTimeout(() => {
+ button.innerHTML = originalButtonText;
+ button.classList.remove('resetting');
+ }, 2000);
+ });
+ },
+};
+
+// Initialize when document is ready
+document.addEventListener('DOMContentLoaded', () => {
+ huntarrUI.init();
+});
+
+// Expose huntarrUI to the global scope for access by app modules
+window.huntarrUI = huntarrUI;
diff --git a/Huntarr.io-6.3.6/frontend/static/js/new-user.js b/Huntarr.io-6.3.6/frontend/static/js/new-user.js
new file mode 100644
index 0000000..0adbd5d
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/new-user.js
@@ -0,0 +1,363 @@
+/**
+ * Huntarr - User Settings Page
+ * Handles user profile management functionality
+ */
+
+// Immediately execute this function to avoid global scope pollution
+(function() {
+ // Wait for the DOM to be fully loaded
+ document.addEventListener('DOMContentLoaded', function() {
+ console.log('User settings page loaded');
+
+ // Initialize user settings functionality
+ initUserPage();
+
+ // Setup button handlers
+ setupEventHandlers();
+ });
+
+ function initUserPage() {
+ // Set active nav item
+ const navItems = document.querySelectorAll('.nav-item');
+ navItems.forEach(item => item.classList.remove('active'));
+ const userNav = document.getElementById('userNav');
+ if (userNav) userNav.classList.add('active');
+
+ const pageTitleElement = document.getElementById('currentPageTitle');
+ if (pageTitleElement) pageTitleElement.textContent = 'User Settings';
+
+ // Apply dark mode
+ document.body.classList.add('dark-theme');
+ localStorage.setItem('huntarr-dark-mode', 'true');
+
+ // Fetch user data
+ fetchUserInfo();
+ }
+
+ // Setup all event handlers for the page
+ function setupEventHandlers() {
+ // Change username handler
+ const saveUsernameBtn = document.getElementById('saveUsername');
+ if (saveUsernameBtn) {
+ saveUsernameBtn.addEventListener('click', handleUsernameChange);
+ }
+
+ // Change password handler
+ const savePasswordBtn = document.getElementById('savePassword');
+ if (savePasswordBtn) {
+ savePasswordBtn.addEventListener('click', handlePasswordChange);
+ }
+
+ // 2FA handlers
+ const enableTwoFactorBtn = document.getElementById('enableTwoFactor');
+ if (enableTwoFactorBtn) {
+ enableTwoFactorBtn.addEventListener('click', handleEnableTwoFactor);
+ }
+
+ const verifyTwoFactorBtn = document.getElementById('verifyTwoFactor');
+ if (verifyTwoFactorBtn) {
+ verifyTwoFactorBtn.addEventListener('click', handleVerifyTwoFactor);
+ }
+
+ const disableTwoFactorBtn = document.getElementById('disableTwoFactor');
+ if (disableTwoFactorBtn) {
+ disableTwoFactorBtn.addEventListener('click', handleDisableTwoFactor);
+ }
+ }
+
+ // Username change handler
+ function handleUsernameChange() {
+ const newUsername = document.getElementById('newUsername').value.trim();
+ const currentPassword = document.getElementById('currentPasswordForUsernameChange').value;
+ const statusElement = document.getElementById('usernameStatus');
+
+ if (!newUsername || !currentPassword) {
+ showStatus(statusElement, 'Please fill in all fields', 'error');
+ return;
+ }
+
+ // Min username length check
+ if (newUsername.length < 3) {
+ showStatus(statusElement, 'Username must be at least 3 characters long', 'error');
+ return;
+ }
+
+ fetch('/api/user/change-username', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ username: newUsername,
+ password: currentPassword
+ })
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ showStatus(statusElement, 'Username updated successfully', 'success');
+ // Update displayed username
+ updateUsernameElements(newUsername);
+ // Clear form fields
+ document.getElementById('newUsername').value = '';
+ document.getElementById('currentPasswordForUsernameChange').value = '';
+ } else {
+ showStatus(statusElement, data.error || 'Failed to update username', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error updating username:', error);
+ showStatus(statusElement, 'Error updating username: ' + error.message, 'error');
+ });
+ }
+
+ // Password change handler
+ function handlePasswordChange() {
+ const currentPassword = document.getElementById('currentPassword').value;
+ const newPassword = document.getElementById('newPassword').value;
+ const confirmPassword = document.getElementById('confirmPassword').value;
+ const statusElement = document.getElementById('passwordStatus');
+
+ if (!currentPassword || !newPassword || !confirmPassword) {
+ showStatus(statusElement, 'Please fill in all fields', 'error');
+ return;
+ }
+
+ if (newPassword !== confirmPassword) {
+ showStatus(statusElement, 'New passwords do not match', 'error');
+ return;
+ }
+
+ // Validate password (using function from user.html)
+ const passwordError = validatePassword(newPassword);
+ if (passwordError) {
+ showStatus(statusElement, passwordError, 'error');
+ return;
+ }
+
+ fetch('/api/user/change-password', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ current_password: currentPassword,
+ new_password: newPassword
+ })
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ showStatus(statusElement, 'Password updated successfully', 'success');
+ // Clear form fields
+ document.getElementById('currentPassword').value = '';
+ document.getElementById('newPassword').value = '';
+ document.getElementById('confirmPassword').value = '';
+ } else {
+ showStatus(statusElement, data.error || 'Failed to update password', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error updating password:', error);
+ showStatus(statusElement, 'Error updating password: ' + error.message, 'error');
+ });
+ }
+
+ // 2FA setup handler
+ function handleEnableTwoFactor() {
+ fetch('/api/user/2fa/setup', { method: 'POST' })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ // Update QR code and secret
+ const qrCodeImg = document.getElementById('qrCode');
+ if (qrCodeImg) {
+ qrCodeImg.src = data.qr_code_url;
+ }
+
+ const secretKeyElement = document.getElementById('secretKey');
+ if (secretKeyElement) {
+ secretKeyElement.textContent = data.secret;
+ }
+
+ // Show setup section
+ updateVisibility('enableTwoFactorSection', false);
+ updateVisibility('setupTwoFactorSection', true);
+ } else {
+ console.error('Failed to setup 2FA:', data.error);
+ alert('Failed to setup 2FA: ' + (data.error || 'Unknown error'));
+ }
+ })
+ .catch(error => {
+ console.error('Error setting up 2FA:', error);
+ alert('Error setting up 2FA: ' + error.message);
+ });
+ }
+
+ // 2FA verification handler
+ function handleVerifyTwoFactor() {
+ const code = document.getElementById('verificationCode').value;
+ const verifyStatusElement = document.getElementById('verifyStatus');
+
+ if (!code || code.length !== 6 || !/^\d{6}$/.test(code)) {
+ showStatus(verifyStatusElement, 'Please enter a valid 6-digit verification code', 'error');
+ return;
+ }
+
+ fetch('/api/user/2fa/verify', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ code: code })
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ showStatus(verifyStatusElement, '2FA enabled successfully', 'success');
+ // Update UI state
+ setTimeout(() => {
+ update2FAStatus(true);
+ document.getElementById('verificationCode').value = '';
+ }, 1500); // Short delay to allow user to see success message
+ } else {
+ showStatus(verifyStatusElement, data.error || 'Invalid verification code', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error verifying 2FA:', error);
+ showStatus(verifyStatusElement, 'Error verifying code: ' + error.message, 'error');
+ });
+ }
+
+ // 2FA disable handler
+ function handleDisableTwoFactor() {
+ const password = document.getElementById('currentPasswordFor2FADisable').value;
+ const otpCode = document.getElementById('otpCodeFor2FADisable').value;
+ const disableStatusElement = document.getElementById('disableStatus');
+
+ if (!password) {
+ showStatus(disableStatusElement, 'Please enter your current password', 'error');
+ return;
+ }
+
+ if (!otpCode || otpCode.length !== 6 || !/^\d{6}$/.test(otpCode)) {
+ showStatus(disableStatusElement, 'Please enter a valid 6-digit verification code', 'error');
+ return;
+ }
+
+ fetch('/api/user/2fa/disable', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ password: password,
+ code: otpCode
+ })
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ showStatus(disableStatusElement, '2FA disabled successfully', 'success');
+ // Update UI state
+ setTimeout(() => {
+ update2FAStatus(false);
+ document.getElementById('currentPasswordFor2FADisable').value = '';
+ document.getElementById('otpCodeFor2FADisable').value = '';
+ }, 1500); // Short delay to allow user to see success message
+ } else {
+ showStatus(disableStatusElement, data.error || 'Failed to disable 2FA', 'error');
+ }
+ })
+ .catch(error => {
+ console.error('Error disabling 2FA:', error);
+ showStatus(disableStatusElement, 'Error disabling 2FA: ' + error.message, 'error');
+ });
+ }
+
+ // Helper function for validation
+ function validatePassword(password) {
+ // Only check for minimum length of 8 characters
+ if (password.length < 8) {
+ return 'Password must be at least 8 characters long.';
+ }
+ return null; // Password is valid
+ }
+
+ // Helper function to show status messages
+ function showStatus(element, message, type) {
+ if (!element) return;
+
+ element.textContent = message;
+ element.className = type === 'success' ? 'status-success' : 'status-error';
+ element.style.display = 'block';
+
+ // Auto-hide after 5 seconds
+ setTimeout(() => {
+ element.style.display = 'none';
+ }, 5000);
+ }
+
+ // Function to fetch user information
+ function fetchUserInfo() {
+ fetch('/api/user/info')
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ // Update username elements
+ updateUsernameElements(data.username);
+
+ // Update 2FA status
+ update2FAStatus(data.is_2fa_enabled);
+ })
+ .catch(error => {
+ console.error('Error loading user info:', error);
+ // Show error state in the UI
+ showErrorState();
+ });
+ }
+
+ // Helper functions
+ function updateUsernameElements(username) {
+ if (!username) return;
+
+ const usernameElements = [
+ document.getElementById('username'),
+ document.getElementById('currentUsername')
+ ];
+
+ usernameElements.forEach(element => {
+ if (element) {
+ element.textContent = username;
+ }
+ });
+ }
+
+ function update2FAStatus(isEnabled) {
+ const statusElement = document.getElementById('twoFactorEnabled');
+ if (statusElement) {
+ statusElement.textContent = isEnabled ? 'Enabled' : 'Disabled';
+ }
+
+ // Update visibility of relevant sections
+ updateVisibility('enableTwoFactorSection', !isEnabled);
+ updateVisibility('setupTwoFactorSection', false);
+ updateVisibility('disableTwoFactorSection', isEnabled);
+ }
+
+ function updateVisibility(elementId, isVisible) {
+ const element = document.getElementById(elementId);
+ if (element) {
+ element.style.display = isVisible ? 'block' : 'none';
+ }
+ }
+
+ function showErrorState() {
+ const usernameElement = document.getElementById('currentUsername');
+ if (usernameElement) {
+ usernameElement.textContent = 'Error loading username';
+ }
+
+ const statusElement = document.getElementById('twoFactorEnabled');
+ if (statusElement) {
+ statusElement.textContent = 'Error loading status';
+ }
+ }
+})();
diff --git a/Huntarr.io-6.3.6/frontend/static/js/reset-stateful.js b/Huntarr.io-6.3.6/frontend/static/js/reset-stateful.js
new file mode 100644
index 0000000..e999184
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/reset-stateful.js
@@ -0,0 +1,95 @@
+// Direct approach: Create a new reset button next to the existing one
+
+// Run immediately
+(function() {
+ // Function to create and insert our custom reset button
+ function createDirectResetButton() {
+ console.log('Creating direct reset button');
+
+ // Check if we're on the settings page and can find the stateful header
+ const statefulHeader = document.querySelector('.stateful-header-row');
+ if (!statefulHeader) {
+ console.log('Stateful header not found yet, waiting...');
+ setTimeout(createDirectResetButton, 500);
+ return;
+ }
+
+ // Check if we already added our button
+ if (document.getElementById('direct_reset_btn')) {
+ console.log('Direct reset button already exists');
+ return;
+ }
+
+ // Create our new reset button
+ const resetBtn = document.createElement('button');
+ resetBtn.id = 'direct_reset_btn';
+ resetBtn.className = 'danger-reset-button';
+ resetBtn.innerHTML = ' Reset (Direct)';
+ resetBtn.style.marginLeft = '5px';
+ resetBtn.style.backgroundColor = '#d9534f';
+ resetBtn.style.color = 'white';
+ resetBtn.style.border = 'none';
+ resetBtn.style.borderRadius = '4px';
+ resetBtn.style.padding = '8px 15px';
+ resetBtn.style.cursor = 'pointer';
+
+ // Add click handler
+ resetBtn.onclick = function(e) {
+ e.preventDefault();
+ e.stopPropagation();
+ console.log('Direct reset button clicked!');
+
+ // Ask for confirmation
+ if (!confirm('Are you sure you want to reset stateful management? This will clear all processed media IDs.')) {
+ return false;
+ }
+
+ console.log('Reset confirmed, making API call');
+
+ // Show loading state
+ this.disabled = true;
+ this.innerHTML = ' Resetting...';
+
+ // Make API call
+ fetch('/api/stateful/reset', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ .then(function(response) {
+ console.log('Got response:', response.status);
+ if (!response.ok) {
+ throw new Error('Server returned ' + response.status);
+ }
+ return response.json();
+ })
+ .then(function(data) {
+ console.log('Reset successful!', data);
+ alert('Stateful management has been reset successfully!');
+ window.location.reload();
+ })
+ .catch(function(error) {
+ console.error('Reset failed:', error);
+ alert('Reset failed: ' + error.message);
+ resetBtn.disabled = false;
+ resetBtn.innerHTML = ' Reset (Direct)';
+ });
+
+ return false;
+ };
+
+ // Add the button to the page
+ statefulHeader.appendChild(resetBtn);
+ console.log('Direct reset button added to page!');
+ }
+
+ // Try to create the button immediately
+ createDirectResetButton();
+
+ // Also try when the page is fully loaded
+ window.addEventListener('load', createDirectResetButton);
+
+ // And periodically check for the stateful header
+ setInterval(createDirectResetButton, 2000);
+})();
diff --git a/Huntarr.io-6.3.6/frontend/static/js/settings_forms.js b/Huntarr.io-6.3.6/frontend/static/js/settings_forms.js
new file mode 100644
index 0000000..44a0764
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/settings_forms.js
@@ -0,0 +1,1912 @@
+/**
+ * Settings forms for Huntarr
+ * This file handles generating HTML forms for each app's settings
+ */
+
+const SettingsForms = {
+ // Generate Sonarr settings form
+ generateSonarrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'sonarr');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: settings.api_url || "", // Legacy support
+ api_key: settings.api_key || "", // Legacy support
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances
+ let instancesHtml = `
+
+
Sonarr Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Sonarr instance
+
+
+
URL:
+
+
Base URL for Sonarr (e.g., http://localhost:8989)
+
+
+
API Key:
+
+
API key for Sonarr
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ instancesHtml += `
+
+
+
+ Add Sonarr Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Search Settings
+ let searchSettingsHtml = `
+
+
Search Settings
+
+
Missing Search Mode:
+
+ Episodes
+ Season Packs
+ Shows
+
+
How to search for missing Sonarr content (Season Packs recommended for torrent users)
+
+
+
Missing Items to Search:
+
+
Number of missing items to search per cycle (0 to disable)
+
+
+
Upgrade Items to Search:
+
+
Number of episodes to upgrade per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Episodes:
+
+
+
+
+
Skip searching for episodes with future air dates
+
+
+
Skip Series Refresh:
+
+
+
+
+
Skip refreshing series metadata before searching
+
+
+ `;
+
+ // Set the content
+ container.innerHTML = instancesHtml + searchSettingsHtml;
+
+ // Setup instance management (add/remove/test)
+ SettingsForms.setupInstanceManagement(container, 'sonarr', settings.instances.length);
+ },
+
+ // Generate Radarr settings form
+ generateRadarrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'radarr');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: settings.api_url || "",
+ api_key: settings.api_key || "",
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances with a scrollable area for many instances
+ let instancesHtml = `
+
+
Radarr Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Radarr instance
+
+
+
URL:
+
+
Base URL for Radarr (e.g., http://localhost:7878)
+
+
+
API Key:
+
+
API key for Radarr
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ // Add a button to add new instances (limit to 9 total)
+ instancesHtml += `
+
+
+
+ Add Radarr Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Continue with the rest of the settings form
+ container.innerHTML = `
+ ${instancesHtml}
+
+
+
Search Settings
+
+
Missing Movies to Search:
+
+
Number of missing movies to search per cycle (0 to disable)
+
+
+
Movies to Upgrade:
+
+
Number of movies to search for quality upgrades per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Releases:
+
+
+
+
+
Skip searching for movies with future release dates
+
+
+
Release Type for Future Status:
+
+ Digital Release
+ Physical Release
+ Cinema Release
+
+
Select which release date type to use when determining if a movie is considered a future release
+
+
+
Skip Movie Refresh:
+
+
+
+
+
Skip refreshing movie metadata before searching
+
+
+ `;
+
+ // Add event listeners for the instance management
+ SettingsForms.setupInstanceManagement(container, 'radarr', settings.instances.length);
+
+ // Set up event listeners for the skip_future_releases checkbox
+ const skipFutureCheckbox = container.querySelector('#skip_future_releases');
+ const releaseTypeContainer = container.querySelector('#future_release_type_container');
+
+ if (skipFutureCheckbox) {
+ skipFutureCheckbox.addEventListener('change', function() {
+ if (this.checked) {
+ releaseTypeContainer.style.display = '';
+ } else {
+ releaseTypeContainer.style.display = 'none';
+ }
+ });
+ }
+ },
+
+ // Generate Lidarr settings form
+ generateLidarrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'lidarr');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: settings.api_url || "", // Legacy support
+ api_key: settings.api_key || "", // Legacy support
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances
+ let instancesHtml = `
+
+
Lidarr Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Lidarr instance
+
+
+
URL:
+
+
Base URL for Lidarr (e.g., http://localhost:8686)
+
+
+
API Key:
+
+
API key for Lidarr
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ instancesHtml += `
+
+
+
+ Add Lidarr Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Continue with the rest of the settings form
+ container.innerHTML = `
+ ${instancesHtml}
+
+
+
Search Settings
+
+
Missing Search Mode:
+
+ Artist
+ Album
+
+
Whether to search by artist (all missing albums for artist) or individual albums
+
+
+
Missing Items to Search:
+
+
Number of artists with missing albums to search per cycle (0 to disable)
+
+
+
+
Items to Upgrade:
+
+
Number of albums to search for quality upgrades per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Releases:
+
+
+
+
+
Skip searching for albums with future release dates
+
+
+
Skip Artist Refresh:
+
+
+
+
+
Skip refreshing artist metadata before searching
+
+
+ `;
+
+ // Add event listeners for the instance management
+ SettingsForms.setupInstanceManagement(container, 'lidarr', settings.instances.length);
+ },
+
+ // Generate Readarr settings form
+ generateReadarrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'readarr');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: settings.api_url || "", // Legacy support
+ api_key: settings.api_key || "", // Legacy support
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances
+ let instancesHtml = `
+
+
Readarr Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Readarr instance
+
+
+
URL:
+
+
Base URL for Readarr (e.g., http://localhost:8787)
+
+
+
API Key:
+
+
API key for Readarr
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ instancesHtml += `
+
+
+
+ Add Readarr Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Continue with the rest of the settings form
+ container.innerHTML = `
+ ${instancesHtml}
+
+
+
Search Settings
+
+
Missing Books to Search:
+
+
Number of missing books to search per cycle (0 to disable)
+
+
+
Books to Upgrade:
+
+
Number of books to search for quality upgrades per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Releases:
+
+
+
+
+
Skip searching for books with future release dates
+
+
+
Skip Author Refresh:
+
+
+
+
+
Skip refreshing author metadata before searching
+
+
+ `;
+
+ // Add event listeners for the instance management
+ SettingsForms.setupInstanceManagement(container, 'readarr', settings.instances.length);
+ },
+
+ // Generate Whisparr settings form
+ generateWhisparrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'whisparr');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: "",
+ api_key: "",
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances
+ let instancesHtml = `
+
+
Whisparr V2 Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Whisparr V2 instance
+
+
+
URL:
+
+
Base URL for Whisparr V2 (e.g., http://localhost:6969)
+
+
+
API Key:
+
+
API key for Whisparr V2
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ instancesHtml += `
+
+
+
+ Add Whisparr V2 Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Search Settings
+ let searchSettingsHtml = `
+
+
Search Settings
+
+
Missing Items to Search:
+
+
Number of missing items to search per cycle (0 to disable)
+
+
+
Items to Upgrade:
+
+
Number of items to search for quality upgrades per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Releases:
+
+
+
+
+
Skip searching for scenes with future release dates
+
+
+
Skip Series Refresh:
+
+
+
+
+
Skip refreshing series metadata before searching
+
+
+
Skip Scene Refresh:
+
+
+
+
+
Skip refreshing scene info before searching
+
+
+ `;
+
+ // Set the content
+ container.innerHTML = instancesHtml + searchSettingsHtml;
+
+ // Add event listeners for the instance management
+ this.setupInstanceManagement(container, 'whisparr', settings.instances.length);
+
+ // Update duration display
+ this.updateDurationDisplay();
+ },
+
+ // Generate Eros settings form
+ generateErosForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'eros');
+
+ // Make sure the instances array exists
+ if (!settings.instances || !Array.isArray(settings.instances) || settings.instances.length === 0) {
+ settings.instances = [{
+ name: "Default",
+ api_url: "",
+ api_key: "",
+ enabled: true
+ }];
+ }
+
+ // Create a container for instances
+ let instancesHtml = `
+
+
Whisparr V3 Instances
+
+ `;
+
+ // Generate form elements for each instance
+ settings.instances.forEach((instance, index) => {
+ instancesHtml += `
+
+
+
+
+
Name:
+
+
Friendly name for this Whisparr V3 instance
+
+
+
URL:
+
+
Base URL for Whisparr V3 (e.g., http://localhost:6969)
+
+
+
API Key:
+
+
API key for Whisparr V3
+
+
+ Enabled:
+
+
+
+
+
+
+
+ `;
+ });
+
+ instancesHtml += `
+
+
+
+ Add Whisparr V3 Instance (${settings.instances.length}/9)
+
+
+
+ `;
+
+ // Search Mode dropdown
+ let searchSettingsHtml = `
+
+
Search Settings
+
+
Search Mode:
+
+ Movie
+ Scene
+
+
How to search for missing and upgradable Whisparr V3 content (Movie-based or Scene-based)
+
+
+
Missing Items to Search:
+
+
Number of missing items to search per cycle (0 to disable)
+
+
+
Items to Upgrade:
+
+
Number of items to search for quality upgrades per cycle (0 to disable)
+
+
+
Sleep Duration:
+
+
Time in seconds between processing cycles
+
+
+
+
+
Additional Options
+
+
Monitored Only:
+
+
+
+
+
Only search for monitored items
+
+
+
Skip Future Releases:
+
+
+
+
+
Skip searching for scenes with future release dates
+
+
+
Skip Movie/Scene Refresh:
+
+
+
+
+
Skip refreshing movie metadata before searching (strongly recommended to enable this for Whisparr V3)
+
+
+ `;
+
+ // Set the content
+ container.innerHTML = instancesHtml + searchSettingsHtml;
+
+ // Add event listeners for the instance management
+ this.setupInstanceManagement(container, 'eros', settings.instances.length);
+
+ // Update duration display
+ this.updateDurationDisplay();
+ },
+
+ // Generate Swaparr settings form
+ generateSwaparrForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'swaparr');
+
+ container.innerHTML = `
+
+
Swaparr (Beta) - Only For Torrent Users
+
+
Swaparr addresses the issue of stalled downloads and I rewrote it to support Huntarr. Visit Swaparr's GitHub for more information and support the developer!
+
+
+
+
+
Swaparr Settings
+
+
Enable Swaparr:
+
+
+
+
+
Enable automatic handling of stalled downloads
+
+
+
Maximum Strikes:
+
+
Number of strikes before removing a stalled download
+
+
+
Max Download Time:
+
+
Maximum time a download can be stalled (e.g., 30m, 2h, 1d)
+
+
+
Ignore Above Size:
+
+
Ignore files larger than this size (e.g., 1GB, 25GB, 1TB)
+
+
+
Remove From Client:
+
+
+
+
+
Remove the download from the torrent/usenet client when removed
+
+
+
Dry Run Mode:
+
+
+
+
+
Log actions but don't actually remove downloads. Useful for testing the first time!
+
+
+
+
+
Swaparr Status
+
+
+
+ Reset
+
+
+
+
Loading Swaparr status...
+
+
+
+ `;
+
+ // Load Swaparr status automatically
+ const resetStrikesBtn = container.querySelector('#reset_swaparr_strikes');
+ const statusContainer = container.querySelector('#swaparr_status');
+
+ fetch('/api/swaparr/status')
+ .then(response => response.json())
+ .then(data => {
+ let statusHTML = '';
+
+ // Add stats for each app if available
+ if (data.statistics && Object.keys(data.statistics).length > 0) {
+ statusHTML += '';
+
+ for (const [app, stats] of Object.entries(data.statistics)) {
+ statusHTML += `${app.toUpperCase()} : `;
+ if (stats.error) {
+ statusHTML += `Error: ${stats.error} `;
+ } else {
+ statusHTML += `${stats.currently_striked} currently striked, ${stats.removed} removed (${stats.total_tracked} total tracked)`;
+ }
+ }
+
+ statusHTML += ' ';
+ } else {
+ statusHTML += 'No statistics available yet.
';
+ }
+
+ statusContainer.innerHTML = statusHTML;
+ })
+ .catch(error => {
+ console.error('Error loading Swaparr status:', error);
+ statusContainer.innerHTML = `Error fetching status: ${error.message}
`;
+ });
+
+ // Add event listener for the Reset Strikes button
+ if (resetStrikesBtn) {
+ resetStrikesBtn.addEventListener('click', function() {
+ if (confirm('Are you sure you want to reset all Swaparr strikes? This will clear the strike history for all apps.')) {
+ statusContainer.innerHTML = 'Resetting strikes...
';
+
+ fetch('/api/swaparr/reset', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({})
+ })
+ .then(response => response.json())
+ .then(data => {
+ if (data.success) {
+ statusContainer.innerHTML = `Success: ${data.message}
`;
+ // Reload status after a short delay
+ setTimeout(() => {
+ fetch('/api/swaparr/status')
+ .then(response => response.json())
+ .then(data => {
+ let statusHTML = '';
+ if (data.statistics && Object.keys(data.statistics).length > 0) {
+ statusHTML += '';
+ for (const [app, stats] of Object.entries(data.statistics)) {
+ statusHTML += `${app.toUpperCase()} : `;
+ if (stats.error) {
+ statusHTML += `Error: ${stats.error} `;
+ } else {
+ statusHTML += `${stats.currently_striked} currently striked, ${stats.removed} removed (${stats.total_tracked} total tracked)`;
+ }
+ }
+ statusHTML += ' ';
+ } else {
+ statusHTML += 'No statistics available yet.
';
+ }
+ statusContainer.innerHTML = statusHTML;
+ });
+ }, 1000);
+ } else {
+ statusContainer.innerHTML = `Error: ${data.message}
`;
+ }
+ })
+ .catch(error => {
+ statusContainer.innerHTML = `Error resetting strikes: ${error.message}
`;
+ });
+ }
+ });
+ } else if (!resetStrikesBtn) {
+ console.warn('Could not find #reset_swaparr_strikes to attach listener.');
+ } else {
+ console.warn('huntarrUI or huntarrUI.resetStatefulManagement is not available.');
+ }
+
+ // Add confirmation dialog for local access bypass toggle
+ const localAccessBypassCheckbox = container.querySelector('#local_access_bypass');
+ if (localAccessBypassCheckbox) {
+ // Store original state
+ const originalState = localAccessBypassCheckbox.checked;
+
+ localAccessBypassCheckbox.addEventListener('change', function() {
+ const newState = this.checked;
+
+ // Preview the UI changes immediately, but they'll be reverted if user doesn't save
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.updateUIForLocalAccessBypass === 'function') {
+ huntarrUI.updateUIForLocalAccessBypass(newState);
+ }
+ // Also ensure the main app knows settings have changed if the preview runs
+ if (typeof huntarrUI !== 'undefined' && typeof huntarrUI.markSettingsAsChanged === 'function') {
+ huntarrUI.markSettingsAsChanged();
+ }
+ });
+ }
+ },
+
+ // Format date nicely for display
+ formatDate: function(date) {
+ if (!date) return 'Never';
+
+ const options = {
+ year: 'numeric',
+ month: 'short',
+ day: 'numeric',
+ hour: '2-digit',
+ minute: '2-digit',
+ hour12: true
+ };
+
+ return date.toLocaleString('en-US', options);
+ },
+
+ // Get settings from form
+ getFormSettings: function(container, appType) {
+ let settings = {};
+
+ // Helper function to get input value with fallback
+ function getInputValue(selector, defaultValue) {
+ const element = container.querySelector(selector);
+ if (!element) return defaultValue;
+
+ if (element.type === 'checkbox') {
+ return element.checked;
+ } else if (element.type === 'number') {
+ const parsedValue = parseInt(element.value);
+ return !isNaN(parsedValue) ? parsedValue : defaultValue;
+ } else {
+ return element.value || defaultValue;
+ }
+ }
+
+ // For the general settings form, collect settings including advanced settings
+ if (appType === 'general') {
+ settings.themeName = getInputValue('#theme-select', 'dark');
+ settings.resetInterval = getInputValue('#resetInterval', 168);
+ settings.clearCache = getInputValue('#clearCache', false);
+ settings.refreshSchedule = getInputValue('#refreshSchedule', false);
+ settings.disableSorting = getInputValue('#disableSorting', false);
+ settings.disableNotifications = getInputValue('#disableNotifications', false);
+ settings.openInNewTab = getInputValue('#openInNewTab', false);
+ settings.saveColumnSortState = getInputValue('#saveColumnSortState', true);
+ settings.disableAnimation = getInputValue('#disableAnimation', false);
+ settings.useCompactLayout = getInputValue('#useCompactLayout', false);
+ settings.disableAllowListPopup = getInputValue('#disableAllowListPopup', false);
+ settings.maxHistoryItems = getInputValue('#maxHistoryItems', 100);
+ settings.maxLogItems = getInputValue('#maxLogItems', 200);
+ settings.stateful_management_hours = getInputValue('#stateful_management_hours', 168);
+ settings.autoLoginWithoutPassword = getInputValue('#autoLoginWithoutPassword', false);
+
+ // Add collection of advanced settings
+ settings.api_timeout = getInputValue('#api_timeout', 120);
+ settings.command_wait_delay = getInputValue('#command_wait_delay', 1);
+ settings.command_wait_attempts = getInputValue('#command_wait_attempts', 600);
+ settings.minimum_download_queue_size = getInputValue('#minimum_download_queue_size', -1);
+ settings.log_refresh_interval_seconds = getInputValue('#log_refresh_interval_seconds', 30);
+ }
+
+ // For other app types, collect settings
+ else {
+ // Handle instances differently
+ const instances = [];
+ // Find instance containers with both old and new class names
+ const instanceContainers = container.querySelectorAll('.instance-item, .instance-panel');
+
+ // Collect instance data with improved error handling
+ instanceContainers.forEach((instance, index) => {
+ const nameInput = instance.querySelector('input[name="name"]');
+ const urlInput = instance.querySelector('input[name="api_url"]');
+ const keyInput = instance.querySelector('input[name="api_key"]');
+ const enabledInput = instance.querySelector('input[name="enabled"]');
+
+ const name = nameInput ? nameInput.value : null;
+ const url = urlInput ? urlInput.value : null;
+ const key = keyInput ? keyInput.value : null;
+ const enabled = enabledInput ? enabledInput.checked : true; // Default to enabled if checkbox not found
+
+ if (!name || !url || !key) {
+ console.warn(`Instance ${index} is missing required fields`);
+ }
+
+ const instanceObj = {
+ name: name || `Instance ${index + 1}`,
+ api_url: url || "",
+ api_key: key || "",
+ enabled: enabled
+ };
+
+ instances.push(instanceObj);
+ });
+
+ // Ensure we always have at least one instance
+ if (instances.length === 0) {
+ console.warn('No instances found, adding a default empty instance');
+ instances.push({
+ name: 'Default',
+ api_url: '',
+ api_key: '',
+ enabled: true
+ });
+ }
+
+ settings.instances = instances;
+
+ // Add app-specific settings
+ if (appType === 'sonarr') {
+ settings.hunt_missing_mode = getInputValue('#sonarr-hunt-missing-mode', 'episodes');
+ settings.hunt_missing_items = getInputValue('#sonarr-hunt-missing-items', 1);
+ settings.hunt_upgrade_items = getInputValue('#sonarr-hunt-upgrade-items', 0);
+ settings.sleep_duration = getInputValue('#sonarr_sleep_duration', 900);
+ settings.monitored_only = getInputValue('#sonarr_monitored_only', true);
+ settings.skip_future_episodes = getInputValue('#sonarr_skip_future_episodes', true);
+ settings.skip_series_refresh = getInputValue('#sonarr_skip_series_refresh', false);
+ }
+ else if (appType === 'radarr') {
+ settings.hunt_missing_movies = getInputValue('#radarr_hunt_missing_movies', 1);
+ settings.hunt_upgrade_movies = getInputValue('#radarr_hunt_upgrade_movies', 0);
+ settings.monitored_only = getInputValue('#radarr_monitored_only', true);
+ settings.skip_future_releases = getInputValue('#skip_future_releases', true);
+ settings.skip_movie_refresh = getInputValue('#skip_movie_refresh', false);
+ settings.sleep_duration = getInputValue('#radarr_sleep_duration', 900);
+ settings.release_type = getInputValue('#release_type', 'physical');
+ }
+ else if (appType === 'lidarr') {
+ settings.hunt_missing_items = getInputValue('#lidarr_hunt_missing_items', 1);
+ settings.hunt_upgrade_items = getInputValue('#lidarr_hunt_upgrade_items', 0);
+ settings.hunt_missing_mode = getInputValue('#lidarr_hunt_missing_mode', 'artist');
+ settings.monitored_only = getInputValue('#lidarr_monitored_only', true);
+ settings.sleep_duration = getInputValue('#lidarr_sleep_duration', 900);
+ }
+ else if (appType === 'readarr') {
+ settings.hunt_missing_books = getInputValue('#readarr_hunt_missing_books', 1);
+ settings.hunt_upgrade_books = getInputValue('#readarr_hunt_upgrade_books', 0);
+ settings.monitored_only = getInputValue('#readarr_monitored_only', true);
+ settings.skip_future_releases = getInputValue('#readarr_skip_future_releases', true);
+ settings.skip_author_refresh = getInputValue('#skip_author_refresh', false);
+ settings.sleep_duration = getInputValue('#readarr_sleep_duration', 900);
+ }
+ else if (appType === 'whisparr') {
+ settings.hunt_missing_items = getInputValue('#whisparr_hunt_missing_items', 1);
+ settings.hunt_upgrade_items = getInputValue('#whisparr_hunt_upgrade_items', 0);
+ settings.monitored_only = getInputValue('#whisparr_monitored_only', true);
+ settings.whisparr_version = getInputValue('#whisparr-api-version', 'v3');
+ settings.skip_future_releases = getInputValue('#whisparr_skip_future_releases', true);
+ settings.sleep_duration = getInputValue('#whisparr_sleep_duration', 900);
+ }
+ else if (appType === 'eros') {
+ settings.search_mode = getInputValue('#eros_search_mode', 'movie');
+ settings.hunt_missing_items = getInputValue('#eros_hunt_missing_items', 1);
+ settings.hunt_upgrade_items = getInputValue('#eros_hunt_upgrade_items', 0);
+ settings.monitored_only = getInputValue('#eros_monitored_only', true);
+ settings.skip_future_releases = getInputValue('#eros_skip_future_releases', true);
+ settings.skip_item_refresh = getInputValue('#eros_skip_item_refresh', false);
+ settings.sleep_duration = getInputValue('#eros_sleep_duration', 900);
+ }
+ else if (appType === 'swaparr') {
+ settings.enabled = getInputValue('#swaparr_enabled', false);
+ settings.max_strikes = getInputValue('#swaparr_max_strikes', 3);
+ settings.max_download_time = getInputValue('#swaparr_max_download_time', '2h');
+ settings.ignore_above_size = getInputValue('#swaparr_ignore_above_size', '25GB');
+ settings.remove_from_client = getInputValue('#swaparr_remove_from_client', true);
+ settings.dry_run = getInputValue('#swaparr_dry_run', false);
+ }
+ }
+
+ console.log('Collected settings for', appType, settings);
+ return settings;
+ },
+
+ // Generate General settings form
+ generateGeneralForm: function(container, settings = {}) {
+ // Add data-app-type attribute to container
+ container.setAttribute('data-app-type', 'general');
+
+ container.innerHTML = `
+
+
System Settings
+
+
Check for Updates:
+
+
+
+
+
Automatically check for Huntarr updates
+
+
+
Debug Mode:
+
+
+
+
+
Enable verbose logging for troubleshooting (applies to all apps)
+
+
+
Log Refresh Interval:
+
+
Interval in seconds to refresh log display (applies to all apps)
+
+
+
+
+
+
+
+ Failed to load stateful management info. Check logs for details.
+
+
+
+
Initial State Created:
+
Loading...
+
+
+
State Reset Date:
+
Loading...
+
+
+
+
+
State Reset Interval (Hours):
+
+
Hours before resetting processed media state (${((settings.stateful_management_hours || 168) / 24).toFixed(1)} days )
+
Reset clears all processed media IDs to allow reprocessing
+
+
+
+
+
Security
+
+
Local Network Auth Bypass:
+
+
+
+
+
Allow access without login when connecting from local network IP addresses (e.g., 192.168.x.x, 10.x.x.x)
+
+
+
+
+
Advanced Settings
+
+
API Timeout:
+
+
API request timeout in seconds
+
+
+
Command Wait Delay:
+
+
Delay between command status checks in seconds
+
+
+
Command Wait Attempts:
+
+
Maximum number of attempts to check command status
+
+
+ Maximum Download Queue Size:
+
+ If the current download queue for an app instance exceeds this value, downloads will be skipped until the queue reduces. Set to -1 to disable this limit. This setting applies per app instance.
+
+
+
Log Refresh Interval:
+
+
How often Huntarr refreshes logs from apps (seconds)
+
+
+ `;
+
+ // Get hours input and days span elements once
+ const statefulHoursInput = container.querySelector('#stateful_management_hours');
+ const statefulDaysSpan = container.querySelector('#stateful_management_days');
+
+ if (statefulHoursInput && statefulDaysSpan) {
+ statefulHoursInput.addEventListener('input', function() {
+ const hours = parseInt(this.value);
+ const days = (hours / 24).toFixed(1);
+ statefulDaysSpan.textContent = `${days} days`;
+ });
+ }
+
+ // Load stateful management info
+ const createdDateEl = document.getElementById('stateful_initial_state');
+ const expiresDateEl = document.getElementById('stateful_expires_date');
+
+ // Skip loading if huntarrUI has already loaded this data to prevent flashing
+ if (window.huntarrUI && window.huntarrUI._cachedStatefulData) {
+ console.log('[SettingsForms] Using existing huntarrUI cached stateful data');
+ return; // Exit early - main.js already has this covered
+ }
+
+ // Only set to Loading if not already populated
+ if (createdDateEl && (!createdDateEl.textContent || createdDateEl.textContent === 'N/A')) {
+ createdDateEl.textContent = 'Loading...';
+ }
+ if (expiresDateEl && (!expiresDateEl.textContent || expiresDateEl.textContent === 'N/A')) {
+ expiresDateEl.textContent = 'Loading...';
+ }
+
+ // Check if data is already cached in localStorage
+ const cachedStatefulData = localStorage.getItem('huntarr-stateful-data');
+ if (cachedStatefulData) {
+ try {
+ const parsedData = JSON.parse(cachedStatefulData);
+ const cacheAge = Date.now() - parsedData.timestamp;
+
+ // Use cache if it's less than 5 minutes old
+ if (cacheAge < 300000) {
+ console.log('[SettingsForms] Using cached stateful data');
+
+ if (createdDateEl && parsedData.created_at_ts) {
+ const createdDate = new Date(parsedData.created_at_ts * 1000);
+ createdDateEl.textContent = this.formatDate(createdDate);
+ }
+
+ if (expiresDateEl && parsedData.expires_at_ts) {
+ const expiresDate = new Date(parsedData.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDate(expiresDate);
+ }
+
+ // Still fetch fresh data in the background, but don't update UI
+ fetchStatefulInfoSilently();
+ return;
+ }
+ } catch (e) {
+ console.warn('[SettingsForms] Error parsing cached stateful data:', e);
+ }
+ }
+
+ fetch('/api/stateful/info', {
+ cache: 'no-cache',
+ headers: {
+ 'Cache-Control': 'no-cache, no-store, must-revalidate',
+ 'Pragma': 'no-cache',
+ 'Expires': '0'
+ }
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error! Status: ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ // Cache the response with a timestamp for future use
+ localStorage.setItem('huntarr-stateful-data', JSON.stringify({
+ ...data,
+ timestamp: Date.now()
+ }));
+
+ if (createdDateEl) {
+ if (data.created_at_ts) {
+ const createdDate = new Date(data.created_at_ts * 1000);
+ createdDateEl.textContent = this.formatDate(createdDate);
+ } else {
+ createdDateEl.textContent = 'Not yet created';
+ }
+ }
+
+ if (expiresDateEl) {
+ if (data.expires_at_ts) {
+ const expiresDate = new Date(data.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDate(expiresDate);
+ } else {
+ expiresDateEl.textContent = 'Not set';
+ }
+ }
+
+ // Store data for other components to use
+ if (window.huntarrUI) {
+ window.huntarrUI._cachedStatefulData = data;
+ }
+ })
+ .catch(error => {
+ console.error('Error loading stateful info:', error);
+
+ // Try using cached data as fallback
+ if (cachedStatefulData) {
+ try {
+ const parsedData = JSON.parse(cachedStatefulData);
+
+ if (createdDateEl && parsedData.created_at_ts) {
+ const createdDate = new Date(parsedData.created_at_ts * 1000);
+ createdDateEl.textContent = this.formatDate(createdDate) + ' (cached)';
+ } else if (createdDateEl) {
+ createdDateEl.textContent = 'Not available';
+ }
+
+ if (expiresDateEl && parsedData.expires_at_ts) {
+ const expiresDate = new Date(parsedData.expires_at_ts * 1000);
+ expiresDateEl.textContent = this.formatDate(expiresDate) + ' (cached)';
+ } else if (expiresDateEl) {
+ expiresDateEl.textContent = 'Not available';
+ }
+ } catch (e) {
+ if (createdDateEl) createdDateEl.textContent = 'Not available';
+ if (expiresDateEl) expiresDateEl.textContent = 'Not available';
+ }
+ } else {
+ if (createdDateEl) createdDateEl.textContent = 'Not available';
+ if (expiresDateEl) expiresDateEl.textContent = 'Not available';
+ }
+ });
+
+ // Helper function to fetch data silently without updating UI
+ function fetchStatefulInfoSilently() {
+ fetch('/api/stateful/info', {
+ cache: 'no-cache',
+ headers: {
+ 'Cache-Control': 'no-cache, no-store, must-revalidate',
+ 'Pragma': 'no-cache',
+ 'Expires': '0'
+ }
+ })
+ .then(response => response.ok ? response.json() : null)
+ .then(data => {
+ if (data && data.success) {
+ localStorage.setItem('huntarr-stateful-data', JSON.stringify({
+ ...data,
+ timestamp: Date.now()
+ }));
+
+ if (window.huntarrUI) {
+ window.huntarrUI._cachedStatefulData = data;
+ }
+ }
+ })
+ .catch(error => console.warn('Silent stateful info fetch failed:', error));
+ }
+ },
+
+ // Update duration display - e.g., convert seconds to hours
+ updateDurationDisplay: function() {
+ // Function to update a specific sleep duration display
+ const updateSleepDisplay = function(inputId, spanId) {
+ const input = document.getElementById(inputId);
+ const span = document.getElementById(spanId);
+ if (!input || !span) return;
+
+ const seconds = parseInt(input.value);
+ if (isNaN(seconds)) return;
+
+ const hours = (seconds / 3600).toFixed(1);
+ if (hours < 1) {
+ const minutes = Math.round(seconds / 60);
+ span.textContent = `${minutes} minutes`;
+ } else {
+ span.textContent = `${hours} hours`;
+ }
+ };
+
+ // Update for each app
+ updateSleepDisplay('sleep_duration', 'sleep_duration_hours');
+ updateSleepDisplay('radarr_sleep_duration', 'radarr_sleep_duration_hours');
+ updateSleepDisplay('lidarr_sleep_duration', 'lidarr_sleep_duration_hours');
+ updateSleepDisplay('readarr_sleep_duration', 'readarr_sleep_duration_hours');
+ updateSleepDisplay('whisparr_sleep_duration', 'whisparr_sleep_duration_hours'); // Added Whisparr
+ },
+
+ // Setup instance management - test connection buttons and add/remove instance buttons
+ setupInstanceManagement: function(container, appType, initialCount) {
+ console.log(`Setting up instance management for ${appType} with ${initialCount} instances`);
+
+ // Make sure container has the app type set
+ const form = container.closest('.settings-form');
+ if (form && !form.hasAttribute('data-app-type')) {
+ form.setAttribute('data-app-type', appType);
+ }
+
+ // Add listeners for test connection buttons
+ const testButtons = container.querySelectorAll('.test-connection-btn');
+ testButtons.forEach(button => {
+ button.addEventListener('click', (e) => {
+ // Prevent any default form submission
+ e.preventDefault();
+
+ console.log('Test connection button clicked');
+
+ // Get the instance panel containing this button - look for both old and new class names
+ const instancePanel = button.closest('.instance-item') || button.closest('.instance-panel');
+ if (!instancePanel) {
+ console.error('Could not find instance panel for test button', button);
+ alert('Error: Could not find instance panel');
+ return;
+ }
+
+ // Get the URL and API key inputs directly within this instance panel
+ const urlInput = instancePanel.querySelector('input[name="api_url"]');
+ const keyInput = instancePanel.querySelector('input[name="api_key"]');
+
+ console.log('Found inputs:', urlInput, keyInput);
+
+ if (!urlInput || !keyInput) {
+ console.error('Could not find URL or API key inputs in panel', instancePanel);
+ alert('Error: Could not find URL or API key inputs');
+ return;
+ }
+
+ const url = urlInput.value.trim();
+ const apiKey = keyInput.value.trim();
+
+ console.log(`Testing connection for ${appType} - URL: ${url}, API Key: ${apiKey.substring(0, 5)}...`);
+
+ if (!url) {
+ alert('Please enter a valid URL');
+ urlInput.focus();
+ return;
+ }
+
+ if (!apiKey) {
+ alert('Please enter a valid API key');
+ keyInput.focus();
+ return;
+ }
+
+ // Show testing status
+ const originalButtonHTML = button.innerHTML;
+ button.innerHTML = ' Testing...';
+ button.disabled = true;
+
+ // Make the API request
+ fetch(`/api/${appType}/test-connection`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: url,
+ api_key: apiKey
+ })
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error ${response.status}: ${response.statusText}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log(`Test connection response:`, data);
+
+ // Reset button
+ button.disabled = false;
+
+ if (data.success) {
+ // Success
+ button.innerHTML = ' Connected!';
+ button.classList.add('test-success');
+
+ let successMessage = `Successfully connected to ${appType.charAt(0).toUpperCase() + appType.slice(1)}`;
+ if (data.version) {
+ successMessage += ` (version ${data.version})`;
+ }
+
+ // Alert the user of success
+ alert(successMessage);
+
+ // Reset button after delay
+ setTimeout(() => {
+ button.innerHTML = originalButtonHTML;
+ button.classList.remove('test-success');
+ }, 3000);
+ } else {
+ // Failure
+ button.innerHTML = ' Failed';
+ button.classList.add('test-failed');
+
+ alert(`Connection failed: ${data.message || 'Unknown error'}`);
+
+ setTimeout(() => {
+ button.innerHTML = originalButtonHTML;
+ button.classList.remove('test-failed');
+ }, 3000);
+ }
+ })
+ .catch(error => {
+ console.error(`Test connection error:`, error);
+
+ button.disabled = false;
+ button.innerHTML = ' Error';
+ button.classList.add('test-failed');
+
+ alert(`Connection test failed: ${error.message}`);
+
+ setTimeout(() => {
+ button.innerHTML = originalButtonHTML;
+ button.classList.remove('test-failed');
+ }, 3000);
+ });
+ });
+ });
+
+ // Add a button to add new instances (limit to 9 total)
+ const addBtn = container.querySelector(`.add-${appType}-instance-btn`);
+ if (addBtn) {
+ // Function to update the button text with current instance count
+ const updateAddButtonText = () => {
+ const instancesContainer = container.querySelector('.instances-container');
+ if (!instancesContainer) return;
+ const currentCount = instancesContainer.querySelectorAll('.instance-item').length;
+ addBtn.innerHTML = ` Add ${appType.charAt(0).toUpperCase() + appType.slice(1)} Instance (${currentCount}/9)`;
+
+ // Disable button if we've reached the maximum
+ if (currentCount >= 9) {
+ addBtn.disabled = true;
+ addBtn.title = "Maximum number of instances reached";
+ } else {
+ addBtn.disabled = false;
+ addBtn.title = "";
+ }
+ };
+
+ // Initialize button text
+ updateAddButtonText();
+
+ addBtn.addEventListener('click', function() {
+ const instancesContainer = container.querySelector('.instances-container');
+ if (!instancesContainer) return;
+
+ // Count current instances
+ const currentCount = instancesContainer.querySelectorAll('.instance-item').length;
+
+ // Don't add more if we have 9 already
+ if (currentCount >= 9) {
+ alert("Maximum of 9 instances allowed");
+ return;
+ }
+
+ // Create new instance div
+ const newInstanceDiv = document.createElement('div');
+ newInstanceDiv.className = 'instance-item'; // Use instance-item
+ newInstanceDiv.dataset.instanceId = currentCount;
+
+ // Set content for the new instance using the updated structure
+ newInstanceDiv.innerHTML = `
+
+
+
+
Name:
+
+
Friendly name for this ${appType} instance
+
+
+
URL:
+
+
Base URL for ${appType} (e.g., http://localhost:8989)
+
+
+
API Key:
+
+
API key for ${appType}
+
+
+ Enabled:
+
+
+
+
+
+
+ `;
+
+ // Add the new instance to the container
+ instancesContainer.appendChild(newInstanceDiv);
+
+ // Update the button text with new count
+ updateAddButtonText();
+
+ // Add event listener for the remove button
+ const removeBtn = newInstanceDiv.querySelector('.remove-instance-btn');
+ if (removeBtn) {
+ removeBtn.addEventListener('click', function() {
+ instancesContainer.removeChild(newInstanceDiv);
+
+ // Update the add button text after removing
+ updateAddButtonText();
+
+ // Trigger change event to update save button state
+ const changeEvent = new Event('change');
+ container.dispatchEvent(changeEvent);
+ });
+ }
+
+ // Add event listener for test connection button
+ const testBtn = newInstanceDiv.querySelector('.test-connection-btn');
+ if (testBtn) {
+ testBtn.addEventListener('click', function() {
+ // Get the URL and API key inputs from the parent instance item
+ const instanceContainer = testBtn.closest('.instance-item') || testBtn.closest('.instance-panel');
+ if (!instanceContainer) {
+ alert('Error: Could not find instance container');
+ return;
+ }
+
+ const urlInput = instanceContainer.querySelector('input[name="api_url"]');
+ const keyInput = instanceContainer.querySelector('input[name="api_key"]');
+
+ if (!urlInput || !keyInput) {
+ alert('Error: Could not find URL or API key inputs');
+ return;
+ }
+
+ const url = urlInput.value.trim();
+ const apiKey = keyInput.value.trim();
+
+ if (!url) {
+ alert('Please enter a valid URL');
+ urlInput.focus();
+ return;
+ }
+
+ if (!apiKey) {
+ alert('Please enter a valid API key');
+ keyInput.focus();
+ return;
+ }
+
+ // Call the test connection function
+ SettingsForms.testConnection(appType, url, apiKey, testBtn);
+ });
+ }
+
+ // Trigger change event to update save button state
+ const changeEvent = new Event('change');
+ container.dispatchEvent(changeEvent);
+ });
+ }
+
+ // Set up remove buttons for existing instances
+ const removeButtons = container.querySelectorAll('.remove-instance-btn');
+ removeButtons.forEach(btn => {
+ btn.addEventListener('click', function() {
+ const instancePanel = btn.closest('.instance-item') || btn.closest('.instance-panel');
+ if (instancePanel && instancePanel.parentNode) {
+ instancePanel.parentNode.removeChild(instancePanel);
+
+ // Update the button text with new count if updateAddButtonText exists
+ if (typeof updateAddButtonText === 'function') {
+ updateAddButtonText();
+ }
+
+ // Trigger change event to update save button state
+ const changeEvent = new Event('change');
+ container.dispatchEvent(changeEvent);
+ }
+ });
+ });
+ },
+
+ // Test connection to an *arr API
+ testConnection: function(app, url, apiKey, buttonElement) {
+ // Temporarily suppress change detection to prevent the unsaved changes dialog
+ if (window.huntarrUI && window.huntarrUI.suppressUnsavedChangesCheck) {
+ window.huntarrUI.suppressUnsavedChangesCheck = true;
+ }
+
+ // Also set a global flag used by the apps module
+ window._suppressUnsavedChangesDialog = true;
+
+ // Find or create a status message element next to the button
+ let statusElement = buttonElement.closest('.instance-actions').querySelector('.connection-message');
+ if (!statusElement) {
+ statusElement = document.createElement('span');
+ statusElement.className = 'connection-message';
+ statusElement.style.marginLeft = '10px';
+ statusElement.style.fontWeight = 'bold';
+ buttonElement.closest('.instance-actions').insertBefore(statusElement, buttonElement);
+ }
+
+ // Show testing status
+ const originalButtonHTML = buttonElement.innerHTML;
+ buttonElement.innerHTML = ' Testing...';
+ buttonElement.disabled = true;
+ statusElement.textContent = 'Testing connection...';
+ statusElement.style.color = '#888';
+
+ console.log(`Testing connection for ${app} - URL: ${url}, API Key: ${apiKey.substring(0, 5)}...`);
+
+ if (!url) {
+ statusElement.textContent = 'Please enter a valid URL';
+ statusElement.style.color = 'red';
+ buttonElement.innerHTML = originalButtonHTML;
+ buttonElement.disabled = false;
+ // Reset suppression flags
+ this._resetSuppressionFlags();
+ return;
+ }
+
+ if (!apiKey) {
+ statusElement.textContent = 'Please enter a valid API key';
+ statusElement.style.color = 'red';
+ buttonElement.innerHTML = originalButtonHTML;
+ buttonElement.disabled = false;
+ // Reset suppression flags
+ this._resetSuppressionFlags();
+ return;
+ }
+
+ // Make the API request
+ fetch(`/api/${app}/test-connection`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ api_url: url,
+ api_key: apiKey
+ })
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error ${response.status}: ${response.statusText}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ console.log(`Test connection response:`, data);
+
+ // Reset button
+ buttonElement.disabled = false;
+
+ if (data.success) {
+ // Success
+ buttonElement.innerHTML = ' Test Connection';
+
+ let successMessage = `Connected successfully`;
+ if (data.version) {
+ successMessage += ` (v${data.version})`;
+ }
+
+ // Show success message
+ statusElement.textContent = successMessage;
+ statusElement.style.color = 'green';
+ } else {
+ // Failure
+ buttonElement.innerHTML = ' Test Connection';
+
+ // Show error message
+ const errorMsg = data.message || 'Connection failed';
+ statusElement.textContent = errorMsg;
+ statusElement.style.color = 'red';
+ }
+
+ // Reset suppression flags after a short delay to handle any potential redirects
+ setTimeout(() => {
+ this._resetSuppressionFlags();
+ }, 500);
+ })
+ .catch(error => {
+ console.error(`Connection test error:`, error);
+
+ // Reset button
+ buttonElement.innerHTML = originalButtonHTML;
+ buttonElement.disabled = false;
+
+ // Show error message
+ statusElement.textContent = `Error: ${error.message}`;
+ statusElement.style.color = 'red';
+
+ // Reset suppression flags
+ this._resetSuppressionFlags();
+ });
+ },
+
+ // Helper method to reset unsaved changes suppression flags
+ _resetSuppressionFlags: function() {
+ // Reset all suppression flags
+ if (window.huntarrUI) {
+ window.huntarrUI.suppressUnsavedChangesCheck = false;
+ }
+ window._suppressUnsavedChangesDialog = false;
+ },
+};
diff --git a/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js b/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js
new file mode 100644
index 0000000..21f9907
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/stats-reset.js
@@ -0,0 +1,80 @@
+/**
+ * Stats Reset Handler
+ * Provides a unified way to handle stats reset operations
+ */
+
+document.addEventListener('DOMContentLoaded', function() {
+ // Find the reset button on the home page
+ const resetButton = document.getElementById('reset-stats');
+
+ if (resetButton) {
+ console.log('Stats reset button found, attaching handler');
+
+ resetButton.addEventListener('click', function(e) {
+ e.preventDefault();
+
+ // Prevent double-clicks
+ if (this.disabled) return;
+
+ // First update the UI immediately for responsive feedback
+ resetStatsUI();
+
+ // Then make the API call to persist the changes
+ resetStatsAPI()
+ .then(response => {
+ console.log('Stats reset response:', response);
+ if (!response.success) {
+ console.warn('Server reported an error with stats reset:', response.error);
+ }
+ })
+ .catch(error => {
+ console.error('Error during stats reset:', error);
+ });
+ });
+ }
+});
+
+/**
+ * Reset the stats UI immediately for responsive feedback
+ */
+function resetStatsUI() {
+ // Find all stat counters and reset them to 0
+ const statCounters = document.querySelectorAll('.stat-number');
+ statCounters.forEach(counter => {
+ if (counter && counter.textContent) {
+ counter.textContent = '0';
+ }
+ });
+
+ // Show success notification if available
+ if (window.huntarrUI && typeof window.huntarrUI.showNotification === 'function') {
+ window.huntarrUI.showNotification('Statistics reset successfully', 'success');
+ }
+}
+
+/**
+ * Make the API call to reset stats on the server
+ * @param {string|null} appType - Optional specific app to reset
+ * @returns {Promise} - Promise resolving to the API response
+ */
+function resetStatsAPI(appType = null) {
+ const requestBody = appType ? { app_type: appType } : {};
+
+ // Use the public endpoint that doesn't require authentication
+ return fetch('/api/stats/reset_public', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(requestBody)
+ })
+ .then(response => {
+ if (!response.ok) {
+ throw new Error('Server responded with status: ' + response.status);
+ }
+ return response.json();
+ });
+}
+
+// Make resetStatsAPI available globally so other scripts can use it
+window.resetStatsAPI = resetStatsAPI;
diff --git a/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js b/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js
new file mode 100644
index 0000000..92befe1
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/theme-preload.js
@@ -0,0 +1,108 @@
+(function() {
+ // Store logo URL consistently across the app - use local path instead of GitHub
+ const LOGO_URL = '/static/logo/256.png';
+
+ // Create and preload image with local path
+ const preloadImg = new Image();
+ preloadImg.src = LOGO_URL;
+
+ // Always enforce dark theme
+ document.documentElement.classList.add('dark-theme');
+ localStorage.setItem('huntarr-dark-mode', 'true');
+
+ // Add inline style to immediately set background color
+ // This prevents flash before the CSS files load
+ const style = document.createElement('style');
+ style.textContent = `
+ body, html {
+ background-color: #1a1d24 !important;
+ color: #f8f9fa !important;
+ }
+ .sidebar {
+ background-color: #121212 !important;
+ }
+ .top-bar {
+ background-color: #252a34 !important;
+ }
+ .login-container {
+ background-color: #252a34 !important;
+ }
+ .login-header {
+ background-color: #121212 !important;
+ }
+ `;
+ document.head.appendChild(style);
+
+ // Store the logo URL in localStorage for persistence across page loads
+ localStorage.setItem('huntarr-logo-url', LOGO_URL);
+
+ // Create a global function to apply the logo to all logo elements
+ window.applyLogoToAllElements = function() {
+ const logoUrl = localStorage.getItem('huntarr-logo-url') || LOGO_URL;
+ const logoElements = document.querySelectorAll('.logo, .login-logo');
+
+ logoElements.forEach(img => {
+ if (!img.src || img.src !== logoUrl) {
+ img.src = logoUrl;
+ }
+
+ // Handle image load event properly
+ if (img.complete) {
+ img.classList.add('loaded');
+ } else {
+ img.onload = function() {
+ this.classList.add('loaded');
+ };
+ img.onerror = function() {
+ // Fallback if local path fails
+ console.warn('Logo failed to load, trying alternate source');
+ if (this.src !== '/logo/256.png') {
+ this.src = '/logo/256.png';
+ }
+ };
+ }
+ });
+
+ // Check if the logo source needs updating
+ document.querySelectorAll('img[alt*="Logo"]').forEach(img => {
+ // Check if the src is not the correct static path
+ const currentSrc = new URL(img.src, window.location.origin).pathname;
+ if (currentSrc !== LOGO_URL) {
+ // Check against the old incorrect path as well, just in case
+ if (currentSrc === '/logo/64.png') {
+ img.src = LOGO_URL;
+ }
+ // You might want to add more specific checks or broader updates here
+ // For now, we only correct the specific incorrect path found
+ }
+ });
+ };
+
+ // Apply logo as soon as DOM is interactive
+ if (document.readyState === 'loading') {
+ document.addEventListener('DOMContentLoaded', window.applyLogoToAllElements);
+ } else {
+ // DOMContentLoaded already fired
+ window.applyLogoToAllElements();
+ }
+
+ // Set up MutationObserver to catch any dynamically added logo elements
+ document.addEventListener('DOMContentLoaded', function() {
+ const observer = new MutationObserver(function(mutations) {
+ let shouldApplyLogos = false;
+ mutations.forEach(function(mutation) {
+ if (mutation.addedNodes.length) {
+ shouldApplyLogos = true;
+ }
+ });
+ if (shouldApplyLogos) {
+ window.applyLogoToAllElements();
+ }
+ });
+
+ observer.observe(document.body, { childList: true, subtree: true });
+ });
+
+ // Ensure logo is loaded when navigating with AJAX
+ window.addEventListener('load', window.applyLogoToAllElements);
+})();
diff --git a/Huntarr.io-6.3.6/frontend/static/js/user.js b/Huntarr.io-6.3.6/frontend/static/js/user.js
new file mode 100644
index 0000000..8670b97
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/user.js
@@ -0,0 +1,53 @@
+/**
+ * Huntarr - User Settings Page
+ * Handles user profile management functionality
+ */
+
+document.addEventListener('DOMContentLoaded', function() {
+ // This file serves as a placeholder for any additional user management
+ // functionality that might be needed in the future
+
+ console.log('User settings page loaded');
+
+ // Most of the user functionality is implemented inline in the HTML page
+ // The following functions could be moved here in the future:
+
+ // Function to load user information
+ function loadUserInfo() {
+ fetch('/api/user/info')
+ .then(response => response.json())
+ .then(data => {
+ if (data.username) {
+ document.getElementById('username').textContent = data.username;
+ document.getElementById('currentUsername').value = data.username;
+ }
+ })
+ .catch(error => console.error('Error loading user info:', error));
+ }
+
+ // Function to check 2FA status
+ function check2FAStatus() {
+ fetch('/api/user/2fa-status')
+ .then(response => response.json())
+ .then(data => {
+ const enable2FACheckbox = document.getElementById('enable2FA');
+ const setup2FAContainer = document.getElementById('setup2FAContainer');
+ const remove2FAContainer = document.getElementById('remove2FAContainer');
+
+ if (data.enabled) {
+ enable2FACheckbox.checked = true;
+ setup2FAContainer.style.display = 'none';
+ remove2FAContainer.style.display = 'block';
+ } else {
+ enable2FACheckbox.checked = false;
+ setup2FAContainer.style.display = 'none';
+ remove2FAContainer.style.display = 'none';
+ }
+ })
+ .catch(error => console.error('Error checking 2FA status:', error));
+ }
+
+ // Call these functions if needed
+ // loadUserInfo();
+ // check2FAStatus();
+});
diff --git a/Huntarr.io-6.3.6/frontend/static/js/utils.js b/Huntarr.io-6.3.6/frontend/static/js/utils.js
new file mode 100644
index 0000000..040cbab
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/js/utils.js
@@ -0,0 +1,71 @@
+/**
+ * Huntarr - Utility Functions
+ * Shared functions for use across the application
+ */
+
+const HuntarrUtils = {
+ /**
+ * Fetch with timeout using the global settings
+ * @param {string} url - The URL to fetch
+ * @param {Object} options - Fetch options
+ * @returns {Promise} - Fetch promise with timeout handling
+ */
+ fetchWithTimeout: function(url, options = {}) {
+ // Get the API timeout from global settings, default to 120 seconds if not set
+ let apiTimeout = 120000; // Default 120 seconds in milliseconds
+
+ // Try to get timeout from huntarrUI if available
+ if (window.huntarrUI && window.huntarrUI.originalSettings &&
+ window.huntarrUI.originalSettings.general &&
+ window.huntarrUI.originalSettings.general.api_timeout) {
+ apiTimeout = window.huntarrUI.originalSettings.general.api_timeout * 1000;
+ }
+
+ // Create abort controller for timeout
+ const controller = new AbortController();
+ const timeoutId = setTimeout(() => controller.abort(), apiTimeout);
+
+ // Merge options with signal from AbortController
+ const fetchOptions = {
+ ...options,
+ signal: controller.signal
+ };
+
+ return fetch(url, fetchOptions)
+ .then(response => {
+ clearTimeout(timeoutId);
+ return response;
+ })
+ .catch(error => {
+ clearTimeout(timeoutId);
+ // Customize the error if it was a timeout
+ if (error.name === 'AbortError') {
+ throw new Error(`Request timeout after ${apiTimeout / 1000} seconds`);
+ }
+ throw error;
+ });
+ },
+
+ /**
+ * Get the global API timeout value in seconds
+ * @returns {number} - API timeout in seconds
+ */
+ getApiTimeout: function() {
+ // Default value
+ let timeout = 120;
+
+ // Try to get from global settings
+ if (window.huntarrUI && window.huntarrUI.originalSettings &&
+ window.huntarrUI.originalSettings.general &&
+ window.huntarrUI.originalSettings.general.api_timeout) {
+ timeout = window.huntarrUI.originalSettings.general.api_timeout;
+ }
+
+ return timeout;
+ }
+};
+
+// If running in Node.js environment
+if (typeof module !== 'undefined' && module.exports) {
+ module.exports = HuntarrUtils;
+}
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/128.png b/Huntarr.io-6.3.6/frontend/static/logo/128.png
new file mode 100644
index 0000000..aeb4179
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/128.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/16.png b/Huntarr.io-6.3.6/frontend/static/logo/16.png
new file mode 100644
index 0000000..987f868
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/16.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/256.png b/Huntarr.io-6.3.6/frontend/static/logo/256.png
new file mode 100644
index 0000000..b1d4977
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/256.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/32.png b/Huntarr.io-6.3.6/frontend/static/logo/32.png
new file mode 100644
index 0000000..c3f4e1b
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/32.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/40.png b/Huntarr.io-6.3.6/frontend/static/logo/40.png
new file mode 100644
index 0000000..30c260c
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/40.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/400.png b/Huntarr.io-6.3.6/frontend/static/logo/400.png
new file mode 100644
index 0000000..ce1d294
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/400.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/48.png b/Huntarr.io-6.3.6/frontend/static/logo/48.png
new file mode 100644
index 0000000..1aad9a7
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/48.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/512.png b/Huntarr.io-6.3.6/frontend/static/logo/512.png
new file mode 100644
index 0000000..3740908
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/512.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/64.png b/Huntarr.io-6.3.6/frontend/static/logo/64.png
new file mode 100644
index 0000000..8544ec0
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/64.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/800.png b/Huntarr.io-6.3.6/frontend/static/logo/800.png
new file mode 100644
index 0000000..69e4fd4
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/800.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/864.png b/Huntarr.io-6.3.6/frontend/static/logo/864.png
new file mode 100644
index 0000000..7914fd8
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/864.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg b/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg
new file mode 100644
index 0000000..e79f9a6
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/static/logo/Huntarr.svg
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png b/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png
new file mode 100644
index 0000000..4bf2f32
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/apps/cleanuperr.png differ
diff --git a/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico b/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico
new file mode 100644
index 0000000..45c70ed
Binary files /dev/null and b/Huntarr.io-6.3.6/frontend/static/logo/huntarr.ico differ
diff --git a/Huntarr.io-6.3.6/frontend/templates/base.html b/Huntarr.io-6.3.6/frontend/templates/base.html
new file mode 100644
index 0000000..c971f1a
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/base.html
@@ -0,0 +1,27 @@
+
+
+
+
+
+ My App
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html b/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html
new file mode 100644
index 0000000..3794a1b
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/apps_section.html
@@ -0,0 +1,233 @@
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html b/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html
new file mode 100644
index 0000000..d2e7643
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/cleanuperr_section.html
@@ -0,0 +1,426 @@
+
+
+
+
+
+
+
+
+
+
+
Cleanuperr is a tool for automating the cleanup of unwanted or blocked files in Sonarr, Radarr, and supported download clients like qBittorrent. It removes incomplete or blocked downloads, updates queues, and enforces blacklists or whitelists to manage file selection. After removing blocked content, Cleanuperr can also trigger a search to replace the deleted shows/movies.
+
+
Key Features:
+
+ Strike system to mark stalled or downloads stuck in metadata downloading
+ Remove and block downloads that reached a maximum number of strikes
+ Remove and block downloads that have a low download speed or high estimated completion time
+ Remove downloads blocked by qBittorrent or by Cleanuperr's content blocker
+ Trigger a search for downloads removed from the *arrs
+ Clean up downloads that have been seeding for a certain amount of time
+ Notify on strike or download removal
+ Ignore certain torrent hashes, categories, tags or trackers from being processed
+
+
+
+
Cleanuperr was created primarily to address malicious files, such as *.lnk or *.zipx, that were getting stuck in Sonarr/Radarr and required manual intervention. It supports both qBittorrent's built-in exclusion features and its own blocklist-based system.
+
+
+
+
+
+
+
About the Author
+
Cleanuperr is developed by Flaminel , a passionate developer focused on creating tools that enhance the media server experience.
+
+
+
+
+
+
Huntarr is proud to feature Cleanuperr as part of our commitment to helping other projects grow. We believe in collaboration across the media server community to create better tools for everyone.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/footer.html b/Huntarr.io-6.3.6/frontend/templates/components/footer.html
new file mode 100644
index 0000000..f32c515
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/footer.html
@@ -0,0 +1,68 @@
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/head.html b/Huntarr.io-6.3.6/frontend/templates/components/head.html
new file mode 100644
index 0000000..9caa105
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/head.html
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/history_section.html b/Huntarr.io-6.3.6/frontend/templates/components/history_section.html
new file mode 100644
index 0000000..f963ab0
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/history_section.html
@@ -0,0 +1,703 @@
+
+
+
+
+
+
+
+
+
+
No history found. Items will appear here when media is processed.
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/home_section.html b/Huntarr.io-6.3.6/frontend/templates/components/home_section.html
new file mode 100644
index 0000000..ade5801
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/home_section.html
@@ -0,0 +1,959 @@
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html b/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html
new file mode 100644
index 0000000..4602768
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/logs_section.html
@@ -0,0 +1,426 @@
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/scripts.html b/Huntarr.io-6.3.6/frontend/templates/components/scripts.html
new file mode 100644
index 0000000..447707d
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/scripts.html
@@ -0,0 +1,3 @@
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html b/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html
new file mode 100644
index 0000000..f86f96e
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/settings_section.html
@@ -0,0 +1,69 @@
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html b/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html
new file mode 100644
index 0000000..8ad1e29
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/sidebar.html
@@ -0,0 +1,245 @@
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/topbar.html b/Huntarr.io-6.3.6/frontend/templates/components/topbar.html
new file mode 100644
index 0000000..169fef2
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/topbar.html
@@ -0,0 +1,267 @@
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html b/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html
new file mode 100644
index 0000000..78110a8
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/components/user_profile.html
@@ -0,0 +1,137 @@
+
+
+
Change Username
+
+
+ New Username:
+
+
+
+
+ Save Username
+
+
+
+
+
+
+
Change Password
+
+
+
+
+
+ Save Password
+
+
+
+
+
+
+
Two-Factor Authentication
+
+
+
+
+
+
+
+
+
+
+
+
+ Verification Code:
+
+
+
+
+ Verify and Enable
+
+
+
+
+
+
+
+
+
+
diff --git a/Huntarr.io-6.3.6/frontend/templates/index.html b/Huntarr.io-6.3.6/frontend/templates/index.html
new file mode 100644
index 0000000..685f277
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/index.html
@@ -0,0 +1,50 @@
+
+
+
+ {% include 'components/head.html' %}
+ Huntarr - Home
+
+
+
+ {% include 'components/sidebar.html' %}
+
+
+ {% include 'components/topbar.html' %}
+
+
+ {% include 'components/home_section.html' %}
+
+
+ {% include 'components/logs_section.html' %}
+
+
+ {% include 'components/history_section.html' %}
+
+
+ {% include 'components/apps_section.html' %}
+
+
+ {% include 'components/cleanuperr_section.html' %}
+
+
+ {% include 'components/settings_section.html' %}
+
+
+
+
+ {% include 'components/footer.html' %}
+
+ {% include 'components/scripts.html' %}
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/templates/login.html b/Huntarr.io-6.3.6/frontend/templates/login.html
new file mode 100644
index 0000000..576b965
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/login.html
@@ -0,0 +1,400 @@
+
+
+
+
+
+ Huntarr Login
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/templates/setup.html b/Huntarr.io-6.3.6/frontend/templates/setup.html
new file mode 100644
index 0000000..7c765ca
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/setup.html
@@ -0,0 +1,768 @@
+
+
+
+
+
+ Setup - Huntarr
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/frontend/templates/user.html b/Huntarr.io-6.3.6/frontend/templates/user.html
new file mode 100644
index 0000000..34e0736
--- /dev/null
+++ b/Huntarr.io-6.3.6/frontend/templates/user.html
@@ -0,0 +1,439 @@
+
+
+
+ User Settings - Huntarr
+ {% include 'components/head.html' %}
+
+
+
+
+ {% include 'components/sidebar.html' %}
+
+
+ {% include 'components/topbar.html' %}
+
+
+
+
Change Username
+
+ Current Username:
+ Loading...
+
+
+ New Username:
+
+
+
+ Current Password:
+
+
+
+ Save Username
+
+
+
+
+
+
Change Password
+
+ Current Password:
+
+
+
+ New Password:
+
+
+
+ Confirm Password:
+
+
+
+ Save Password
+
+
+
+
+
+
Two-Factor Authentication
+
+ Status:
+ Loading...
+
+
+
+
+
+
+
+
+
+
+
+
+ Verification Code:
+
+
+
+ Verify and Enable
+
+
+
+
+
+
+ Current Password:
+
+
+
+ Current OTP Code:
+
+
+
+ Disable 2FA
+
+
+
+
+
+
+
+
+ {% include 'components/scripts.html' %}
+
+
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/main.py b/Huntarr.io-6.3.6/main.py
new file mode 100644
index 0000000..654484a
--- /dev/null
+++ b/Huntarr.io-6.3.6/main.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python3
+"""
+Main entry point for Huntarr
+Starts both the web server and the background processing tasks.
+"""
+
+import os
+import threading
+import sys
+import signal
+import logging # Use standard logging for initial setup
+
+# Ensure the 'src' directory is in the Python path
+# This allows importing modules from 'src.primary' etc.
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'src')))
+
+# --- Early Logging Setup (Before importing app components) ---
+# Basic logging to capture early errors during import or setup
+log_level = logging.DEBUG if os.environ.get('DEBUG', 'false').lower() == 'true' else logging.INFO
+logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
+root_logger = logging.getLogger("HuntarrRoot") # Specific logger for this entry point
+root_logger.info("--- Huntarr Main Process Starting ---")
+root_logger.info(f"Python sys.path: {sys.path}")
+
+# Check for Windows service commands
+if sys.platform == 'win32' and len(sys.argv) > 1:
+ if sys.argv[1] == '--install-service':
+ try:
+ from src.primary.windows_service import install_service
+ success = install_service()
+ sys.exit(0 if success else 1)
+ except ImportError:
+ root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.")
+ sys.exit(1)
+ except Exception as e:
+ root_logger.exception(f"Error installing Windows service: {e}")
+ sys.exit(1)
+ elif sys.argv[1] == '--remove-service':
+ try:
+ from src.primary.windows_service import remove_service
+ success = remove_service()
+ sys.exit(0 if success else 1)
+ except ImportError:
+ root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.")
+ sys.exit(1)
+ except Exception as e:
+ root_logger.exception(f"Error removing Windows service: {e}")
+ sys.exit(1)
+ elif sys.argv[1] in ['--start', '--stop', '--restart', '--debug', '--update']:
+ try:
+ import win32serviceutil
+ service_name = "Huntarr"
+ if sys.argv[1] == '--start':
+ win32serviceutil.StartService(service_name)
+ print(f"Started {service_name} service")
+ elif sys.argv[1] == '--stop':
+ win32serviceutil.StopService(service_name)
+ print(f"Stopped {service_name} service")
+ elif sys.argv[1] == '--restart':
+ win32serviceutil.RestartService(service_name)
+ print(f"Restarted {service_name} service")
+ elif sys.argv[1] == '--debug':
+ # Run the service in debug mode directly
+ from src.primary.windows_service import HuntarrService
+ win32serviceutil.HandleCommandLine(HuntarrService)
+ elif sys.argv[1] == '--update':
+ # Update the service
+ win32serviceutil.StopService(service_name)
+ from src.primary.windows_service import install_service
+ install_service()
+ win32serviceutil.StartService(service_name)
+ print(f"Updated {service_name} service")
+ sys.exit(0)
+ except ImportError:
+ root_logger.error("Failed to import Windows service module. Make sure pywin32 is installed.")
+ sys.exit(1)
+ except Exception as e:
+ root_logger.exception(f"Error managing Windows service: {e}")
+ sys.exit(1)
+
+try:
+ # Import the Flask app instance
+ from primary.web_server import app
+ # Import the background task starter function and shutdown helpers from the renamed file
+ from primary.background import start_huntarr, stop_event, shutdown_threads
+ # Configure logging first
+ import logging
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
+ from primary.utils.logger import setup_main_logger, get_logger
+
+ # Initialize main logger
+ huntarr_logger = setup_main_logger()
+ huntarr_logger.info("Successfully imported application components.")
+except ImportError as e:
+ root_logger.critical(f"Fatal Error: Failed to import application components: {e}", exc_info=True)
+ root_logger.critical("Please ensure the application structure is correct, dependencies are installed (`pip install -r requirements.txt`), and the script is run from the project root.")
+ sys.exit(1)
+except Exception as e:
+ root_logger.critical(f"Fatal Error: An unexpected error occurred during initial imports: {e}", exc_info=True)
+ sys.exit(1)
+
+
+def run_background_tasks():
+ """Runs the Huntarr background processing."""
+ bg_logger = get_logger("HuntarrBackground") # Use app's logger
+ try:
+ bg_logger.info("Starting Huntarr background tasks...")
+ start_huntarr() # This function contains the main loop and shutdown logic
+ except Exception as e:
+ bg_logger.exception(f"Critical error in Huntarr background tasks: {e}")
+ finally:
+ bg_logger.info("Huntarr background tasks stopped.")
+
+def run_web_server():
+ """Runs the Flask web server using Waitress in production."""
+ web_logger = get_logger("WebServer") # Use app's logger
+ debug_mode = os.environ.get('DEBUG', 'false').lower() == 'true'
+ host = os.environ.get('FLASK_HOST', '0.0.0.0')
+ port = int(os.environ.get('PORT', 9705)) # Use PORT for consistency
+
+ web_logger.info(f"Starting web server on {host}:{port} (Debug: {debug_mode})...")
+
+ if debug_mode:
+ # Use Flask's development server for debugging (less efficient, auto-reloads)
+ # Note: use_reloader=True can cause issues with threads starting twice.
+ web_logger.warning("Running in DEBUG mode with Flask development server.")
+ try:
+ app.run(host=host, port=port, debug=True, use_reloader=False)
+ except Exception as e:
+ web_logger.exception(f"Flask development server failed: {e}")
+ # Signal background thread to stop if server fails critically
+ if not stop_event.is_set():
+ stop_event.set()
+ else:
+ # Use Waitress for production
+ try:
+ from waitress import serve
+ web_logger.info("Running with Waitress production server.")
+ # Adjust threads as needed, default is 4
+ serve(app, host=host, port=port, threads=8)
+ except ImportError:
+ web_logger.error("Waitress not found. Falling back to Flask development server (NOT recommended for production).")
+ web_logger.error("Install waitress ('pip install waitress') for production use.")
+ try:
+ app.run(host=host, port=port, debug=False, use_reloader=False)
+ except Exception as e:
+ web_logger.exception(f"Flask development server (fallback) failed: {e}")
+ # Signal background thread to stop if server fails critically
+ if not stop_event.is_set():
+ stop_event.set()
+ except Exception as e:
+ web_logger.exception(f"Waitress server failed: {e}")
+ # Signal background thread to stop if server fails critically
+ if not stop_event.is_set():
+ stop_event.set()
+
+def main_shutdown_handler(signum, frame):
+ """Gracefully shut down the application."""
+ huntarr_logger.warning(f"Received signal {signal.Signals(signum).name}. Initiating shutdown...")
+ if not stop_event.is_set():
+ stop_event.set()
+ # The rest of the cleanup happens after run_web_server() returns or in the finally block.
+
+if __name__ == '__main__':
+ # Register signal handlers for graceful shutdown in the main process
+ signal.signal(signal.SIGINT, main_shutdown_handler)
+ signal.signal(signal.SIGTERM, main_shutdown_handler)
+
+ background_thread = None
+ try:
+ # Start background tasks in a daemon thread
+ # Daemon threads exit automatically if the main thread exits unexpectedly,
+ # but we'll try to join() them for a graceful shutdown.
+ background_thread = threading.Thread(target=run_background_tasks, name="HuntarrBackground", daemon=True)
+ background_thread.start()
+
+ # Start the web server in the main thread (blocking)
+ # This will run until the server is stopped (e.g., by Ctrl+C)
+ run_web_server()
+
+ except KeyboardInterrupt:
+ huntarr_logger.info("KeyboardInterrupt received in main thread. Shutting down...")
+ if not stop_event.is_set():
+ stop_event.set()
+ except Exception as e:
+ huntarr_logger.exception(f"An unexpected error occurred in the main execution block: {e}")
+ if not stop_event.is_set():
+ stop_event.set() # Ensure shutdown is triggered on unexpected errors
+ finally:
+ # --- Cleanup ---
+ huntarr_logger.info("Web server has stopped. Initiating final shutdown sequence...")
+
+ # Ensure the stop event is set (might already be set by signal handler or error)
+ if not stop_event.is_set():
+ huntarr_logger.warning("Stop event was not set before final cleanup. Setting now.")
+ stop_event.set()
+
+ # Wait for the background thread to finish cleanly
+ if background_thread and background_thread.is_alive():
+ huntarr_logger.info("Waiting for background tasks to complete...")
+ background_thread.join(timeout=30) # Wait up to 30 seconds
+
+ if background_thread.is_alive():
+ huntarr_logger.warning("Background thread did not stop gracefully within the timeout.")
+ elif background_thread:
+ huntarr_logger.info("Background thread already stopped.")
+ else:
+ huntarr_logger.info("Background thread was not started.")
+
+ # Call the shutdown_threads function from primary.main (if it does more than just join)
+ # This might be redundant if start_huntarr handles its own cleanup via stop_event
+ # huntarr_logger.info("Calling shutdown_threads()...")
+ # shutdown_threads() # Uncomment if primary.main.shutdown_threads() does more cleanup
+
+ huntarr_logger.info("--- Huntarr Main Process Exiting ---")
+ # Use os._exit(0) for a more forceful exit if necessary, but sys.exit(0) is generally preferred
+ sys.exit(0)
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/requirements.txt b/Huntarr.io-6.3.6/requirements.txt
new file mode 100644
index 0000000..419b6de
--- /dev/null
+++ b/Huntarr.io-6.3.6/requirements.txt
@@ -0,0 +1,7 @@
+Flask==3.0.0
+requests==2.31.0
+waitress==2.1.2
+bcrypt==4.1.2
+qrcode[pil]==7.4.2 # Added qrcode with PIL support
+pyotp==2.9.0 # Added pyotp
+pywin32==306; sys_platform == 'win32' # For Windows service support
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/routes.py b/Huntarr.io-6.3.6/routes.py
new file mode 100644
index 0000000..fe3f483
--- /dev/null
+++ b/Huntarr.io-6.3.6/routes.py
@@ -0,0 +1,62 @@
+from flask import Flask, render_template, request, redirect, send_file
+
+app = Flask(__name__)
+
+import os
+import json
+
+def get_ui_preference():
+ """Determine which UI to use based on config and user preference"""
+ # Check if ui_settings.json exists
+ config_file = os.path.join(os.path.dirname(__file__), 'config/ui_settings.json')
+
+ use_new_ui = False
+
+ if os.path.exists(config_file):
+ try:
+ with open(config_file, 'r') as f:
+ settings = json.load(f)
+ use_new_ui = settings.get('use_new_ui', False)
+ except Exception as e:
+ print(f"Error loading UI settings: {e}")
+
+ # Allow URL parameter to override
+ ui_param = request.args.get('ui', None)
+ if ui_param == 'new':
+ use_new_ui = True
+ elif ui_param == 'classic':
+ use_new_ui = False
+
+ return use_new_ui
+
+@app.route('/')
+def index():
+ """Root route with UI switching capability"""
+ if get_ui_preference():
+ return redirect('/new')
+ else:
+ return render_template('index.html')
+
+@app.route('/user')
+def user_page():
+ """User settings page with UI switching capability"""
+ return render_template('user.html')
+
+@app.route('/user/new')
+def user_new_page():
+ """User settings page for new UI"""
+ return render_template('user.html')
+
+@app.route('/version.txt')
+def version_txt():
+ """Serve version.txt file directly"""
+ version_path = os.path.join(os.path.dirname(__file__), 'version.txt')
+ print(f"Serving version.txt from path: {version_path}") # Debug log
+ try:
+ return send_file(version_path, mimetype='text/plain')
+ except Exception as e:
+ print(f"Error serving version.txt: {e}") # Log any errors
+ return str(e), 500 # Return error message and 500 status code
+
+if __name__ == '__main__':
+ app.run(debug=True)
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/__init__.py b/Huntarr.io-6.3.6/src/primary/__init__.py
new file mode 100644
index 0000000..851281f
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/__init__.py
@@ -0,0 +1,6 @@
+"""
+Huntarr - Find Missing & Upgrade Media Items
+A unified tool for Sonarr, Radarr, Lidarr, and Readarr
+"""
+
+__version__ = "4.0.0"
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/api.py b/Huntarr.io-6.3.6/src/primary/api.py
new file mode 100644
index 0000000..50d0e5b
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/api.py
@@ -0,0 +1,389 @@
+#!/usr/bin/env python3
+"""
+Arr API Helper Functions
+Handles all communication with the Arr API
+"""
+
+import requests
+import time
+from typing import List, Dict, Any, Optional, Union
+from primary.utils.logger import logger, debug_log
+from primary.config import API_KEY, API_URL, API_TIMEOUT, COMMAND_WAIT_DELAY, COMMAND_WAIT_ATTEMPTS, APP_TYPE
+from src.primary.stats_manager import get_stats, reset_stats
+
+# Create a session for reuse
+session = requests.Session()
+
+def arr_request(endpoint: str, method: str = "GET", data: Dict = None) -> Optional[Union[Dict, List]]:
+ """
+ Make a request to the Arr API.
+ `endpoint` should be something like 'series', 'command', 'wanted/cutoff', etc.
+ """
+ # Determine the API version based on app type
+ if APP_TYPE == "sonarr":
+ api_base = "api/v3"
+ elif APP_TYPE == "radarr":
+ api_base = "api/v3"
+ elif APP_TYPE == "lidarr":
+ api_base = "api/v1"
+ elif APP_TYPE == "readarr":
+ api_base = "api/v1"
+ else:
+ # Default to v3 for unknown app types
+ api_base = "api/v3"
+
+ url = f"{API_URL}/{api_base}/{endpoint}"
+ headers = {
+ "X-Api-Key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method.upper() == "GET":
+ response = session.get(url, headers=headers, timeout=API_TIMEOUT)
+ elif method.upper() == "POST":
+ response = session.post(url, headers=headers, json=data, timeout=API_TIMEOUT)
+ else:
+ logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # Check for 401 Unauthorized or other error status codes
+ if response.status_code == 401:
+ logger.error(f"API request error: 401 Client Error: Unauthorized for url: {url}")
+ return None
+
+ response.raise_for_status()
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ logger.error(f"API request error: {e}")
+ return None
+
+def check_connection(app_type: str = None) -> bool:
+ """
+ Check if we can connect to the Arr API.
+ Returns True if connection is successful, False otherwise.
+
+ Args:
+ app_type: Optional app type to check connection for (sonarr, radarr, etc.).
+ If None, uses the global APP_TYPE.
+ """
+ # Determine which app type to use
+ current_app_type = app_type or APP_TYPE
+
+ # Get API credentials for the specified app type
+ from primary import keys_manager
+ api_url, api_key = keys_manager.get_api_keys(current_app_type)
+
+ # First explicitly check if API URL and Key are configured
+ if not api_url:
+ logger.error(f"API URL is not configured for {current_app_type} in settings. Please set it up in the Settings page.")
+ return False
+
+ if not api_key:
+ logger.error(f"API Key is not configured for {current_app_type} in settings. Please set it up in the Settings page.")
+ return False
+
+ # Log what we're attempting to connect to
+ logger.debug(f"Attempting to connect to {current_app_type.title()} at {api_url}")
+
+ # Try to access the system/status endpoint which should be available on all Arr applications
+ try:
+ endpoint = "system/status"
+
+ # Determine the API version based on app type
+ if current_app_type == "sonarr":
+ api_base = "api/v3"
+ elif current_app_type == "radarr":
+ api_base = "api/v3"
+ elif current_app_type == "lidarr":
+ api_base = "api/v1"
+ elif current_app_type == "readarr":
+ api_base = "api/v1"
+ else:
+ # Default to v3 for unknown app types
+ api_base = "api/v3"
+
+ url = f"{api_url}/{api_base}/{endpoint}"
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ logger.debug(f"Testing connection with URL: {url}")
+ response = session.get(url, headers=headers, timeout=API_TIMEOUT)
+
+ if response.status_code == 401:
+ logger.error(f"Connection test failed: 401 Client Error: Unauthorized - Invalid API key for {current_app_type.title()}")
+ return False
+
+ response.raise_for_status()
+ logger.info(f"Connection to {current_app_type.title()} at {api_url} successful")
+ return True
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Connection test failed for {current_app_type}: {e}")
+ return False
+
+def wait_for_command(command_id: int):
+ logger.debug(f"Waiting for command {command_id} to complete...")
+ attempts = 0
+ while True:
+ try:
+ time.sleep(COMMAND_WAIT_DELAY)
+ response = arr_request(f"command/{command_id}")
+ logger.debug(f"Command {command_id} Status: {response['status']}")
+ except Exception as error:
+ logger.error(f"Error fetching command status on attempt {attempts + 1}: {error}")
+ return False
+
+ attempts += 1
+
+ if response['status'].lower() in ['complete', 'completed'] or attempts >= COMMAND_WAIT_ATTEMPTS:
+ break
+
+ if response['status'].lower() not in ['complete', 'completed']:
+ logger.warning(f"Command {command_id} did not complete within the allowed attempts.")
+ return False
+
+ time.sleep(0.5)
+
+ return response['status'].lower() in ['complete', 'completed']
+
+# Sonarr-specific functions
+def get_series() -> List[Dict]:
+ """Get all series from Sonarr."""
+ if APP_TYPE != "sonarr":
+ logger.error("get_series() called but APP_TYPE is not sonarr")
+ return []
+
+ series_list = arr_request("series")
+ if series_list:
+ debug_log("Raw series API response sample:", series_list[:2] if len(series_list) > 2 else series_list)
+ return series_list or []
+
+def refresh_series(series_id: int) -> bool:
+ """
+ POST /api/v5/command
+ {
+ "name": "RefreshSeries",
+ "seriesId":
+ }
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("refresh_series() called but APP_TYPE is not sonarr")
+ return False
+
+ data = {
+ "name": "RefreshSeries",
+ "seriesId": series_id
+ }
+ response = arr_request("command", method="POST", data=data)
+ if not response or 'id' not in response:
+ return False
+ return wait_for_command(response['id'])
+
+def episode_search_episodes(episode_ids: List[int]) -> bool:
+ """
+ POST /api/v5/command
+ {
+ "name": "EpisodeSearch",
+ "episodeIds": [...]
+ }
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("episode_search_episodes() called but APP_TYPE is not sonarr")
+ return False
+
+ data = {
+ "name": "EpisodeSearch",
+ "episodeIds": episode_ids
+ }
+ response = arr_request("command", method="POST", data=data)
+ if not response or 'id' not in response:
+ return False
+ return wait_for_command(response['id'])
+
+def get_download_queue_size() -> int:
+ """
+ GET /api/v5/queue
+ Returns total number of items in the queue with the status 'downloading'.
+ """
+ # Endpoint is the same for all apps
+ response = arr_request("queue?status=downloading")
+ if not response:
+ return 0
+
+ total_records = response.get("totalRecords", 0)
+ if not isinstance(total_records, int):
+ total_records = 0
+ logger.debug(f"Download Queue Size: {total_records}")
+
+ return total_records
+
+def get_cutoff_unmet(page: int = 1) -> Optional[Dict]:
+ """
+ GET /api/v5/wanted/cutoff?sortKey=airDateUtc&sortDirection=descending&includeSeriesInformation=true
+ &page=&pageSize=200
+ Returns JSON with a "records" array and "totalRecords".
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("get_cutoff_unmet() called but APP_TYPE is not sonarr")
+ return None
+
+ endpoint = (
+ "wanted/cutoff?"
+ "sortKey=airDateUtc&sortDirection=descending&includeSeriesInformation=true"
+ f"&page={page}&pageSize=200"
+ )
+ return arr_request(endpoint, method="GET")
+
+def get_cutoff_unmet_total_pages() -> int:
+ """
+ To find total pages, call the endpoint with page=1&pageSize=1, read totalRecords,
+ then compute how many pages if each pageSize=200.
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("get_cutoff_unmet_total_pages() called but APP_TYPE is not sonarr")
+ return 0
+
+ response = arr_request("wanted/cutoff?page=1&pageSize=1")
+ if not response or "totalRecords" not in response:
+ return 0
+
+ total_records = response.get("totalRecords", 0)
+ if not isinstance(total_records, int) or total_records < 1:
+ return 0
+
+ # Each page has up to 200 episodes
+ total_pages = (total_records + 200 - 1) // 200
+ return max(total_pages, 1)
+
+def get_episodes_for_series(series_id: int) -> Optional[List[Dict]]:
+ """Get all episodes for a specific series"""
+ if APP_TYPE != "sonarr":
+ logger.error("get_episodes_for_series() called but APP_TYPE is not sonarr")
+ return None
+
+ return arr_request(f"episode?seriesId={series_id}", method="GET")
+
+def get_missing_episodes(pageSize: int = 1000) -> Optional[Dict]:
+ """
+ GET /api/v5/wanted/missing?pageSize=&includeSeriesInformation=true
+ Returns JSON with a "records" array of missing episodes and "totalRecords".
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("get_missing_episodes() called but APP_TYPE is not sonarr")
+ return None
+
+ endpoint = f"wanted/missing?pageSize={pageSize}&includeSeriesInformation=true"
+ result = arr_request(endpoint, method="GET")
+
+ # Better debugging for missing episodes query
+ if result:
+ logger.debug(f"Found {result.get('totalRecords', 0)} total missing episodes")
+ if result.get('records'):
+ logger.debug(f"First few missing episodes: {result['records'][:2] if len(result['records']) > 2 else result['records']}")
+ else:
+ logger.warning("Missing episodes query returned no data")
+
+ return result
+
+def get_series_with_missing_episodes() -> List[Dict]:
+ """
+ Fetch all shows that have missing episodes using the wanted/missing endpoint.
+ Returns a list of series objects with an additional 'missingEpisodes' field
+ containing the list of missing episodes for that series.
+ """
+ if APP_TYPE != "sonarr":
+ logger.error("get_series_with_missing_episodes() called but APP_TYPE is not sonarr")
+ return []
+
+ # Log request attempt
+ logger.debug("Requesting missing episodes from Sonarr API")
+
+ missing_data = get_missing_episodes()
+ if not missing_data or "records" not in missing_data:
+ logger.error("Failed to get missing episodes data or no 'records' field in response")
+ return []
+
+ # Group missing episodes by series ID
+ series_with_missing = {}
+ for episode in missing_data.get("records", []):
+ series_id = episode.get("seriesId")
+ if not series_id:
+ logger.warning(f"Found episode without seriesId: {episode}")
+ continue
+
+ series_title = None
+
+ # Try to get series info from the episode record
+ if "series" in episode and isinstance(episode["series"], dict):
+ series_info = episode["series"]
+ series_title = series_info.get("title")
+
+ # Initialize the series entry if it doesn't exist
+ if series_id not in series_with_missing:
+ series_with_missing[series_id] = {
+ "id": series_id,
+ "title": series_title or "Unknown Show",
+ "monitored": series_info.get("monitored", False),
+ "missingEpisodes": []
+ }
+ else:
+ # If we don't have series info, need to fetch it
+ if series_id not in series_with_missing:
+ # Get series info directly
+ series_info = arr_request(f"series/{series_id}", method="GET")
+ if series_info:
+ series_with_missing[series_id] = {
+ "id": series_id,
+ "title": series_info.get("title", "Unknown Show"),
+ "monitored": series_info.get("monitored", False),
+ "missingEpisodes": []
+ }
+ else:
+ logger.warning(f"Could not get series info for ID {series_id}, skipping episode")
+ continue
+
+ # Add the episode to the series record
+ if series_id in series_with_missing:
+ series_with_missing[series_id]["missingEpisodes"].append(episode)
+
+ # Convert to list and add count for convenience
+ result = []
+ for series_id, series_data in series_with_missing.items():
+ series_data["missingEpisodeCount"] = len(series_data["missingEpisodes"])
+ result.append(series_data)
+
+ logger.debug(f"Processed missing episodes data into {len(result)} series with missing episodes")
+ return result
+
+def get_media_stats():
+ """Get statistics for hunted and upgraded media"""
+ try:
+ stats = get_stats()
+ return jsonify({
+ "success": True,
+ "stats": stats
+ })
+ except Exception as e:
+ logger.error(f"Error retrieving media statistics: {e}")
+ return jsonify({
+ "success": False,
+ "message": "Error retrieving media statistics."
+ }), 500
+
+def reset_media_stats():
+ """Reset statistics for hunted and upgraded media"""
+ try:
+ app_type = request.json.get('app_type') if request.json else None
+ reset_stats(app_type)
+ return jsonify({
+ "success": True,
+ "message": f"Successfully reset statistics for {'all apps' if app_type is None else app_type}."
+ })
+ except Exception as e:
+ logger.error(f"Error resetting media statistics: {e}")
+ return jsonify({
+ "success": False,
+ "message": "Error resetting media statistics."
+ }), 500
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/app.py b/Huntarr.io-6.3.6/src/primary/app.py
new file mode 100644
index 0000000..5e78d06
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/app.py
@@ -0,0 +1,129 @@
+import logging
+import json
+import pathlib
+from datetime import datetime
+import time
+
+class WebAddressFilter(logging.Filter):
+ """Filter out web interface availability messages"""
+ def filter(self, record):
+ if "Web interface available at http://" in record.getMessage():
+ return False
+ return True
+
+def configure_logging():
+ # Get timezone set in the environment (this will be updated when user changes the timezone in UI)
+ try:
+ # Create a custom formatter that includes timezone information
+ class TimezoneFormatter(logging.Formatter):
+ def formatTime(self, record, datefmt=None):
+ ct = self.converter(record.created)
+ if datefmt:
+ return time.strftime(datefmt, ct)
+ else:
+ # Include timezone in the timestamp
+ return time.strftime("%Y-%m-%d %H:%M:%S %z", ct)
+
+ # Configure the formatter for all handlers
+ formatter = TimezoneFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+ # Reset the root logger and reconfigure with proper timezone handling
+ for handler in logging.root.handlers[:]:
+ logging.root.removeHandler(handler)
+
+ logging.basicConfig(level=logging.INFO)
+
+ # Apply the formatter to all handlers
+ for handler in logging.root.handlers:
+ handler.setFormatter(formatter)
+
+ except Exception as e:
+ # Fallback to basic logging if any issues
+ logging.basicConfig(level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ logging.error(f"Error setting up timezone-aware logging: {e}")
+
+ # Add filter to remove web interface URL logs
+ for handler in logging.root.handlers:
+ handler.addFilter(WebAddressFilter())
+
+ logging.info("Logging is configured.")
+
+def migrate_settings():
+ """Migrate settings from nested to flat structure"""
+ # Settings file path
+ SETTINGS_DIR = pathlib.Path("/config")
+ SETTINGS_FILE = SETTINGS_DIR / "huntarr.json"
+
+ if not SETTINGS_FILE.exists():
+ logging.info(f"Settings file {SETTINGS_FILE} does not exist, nothing to migrate.")
+ return
+
+ try:
+ # Read current settings
+ with open(SETTINGS_FILE, "r", encoding="utf-8") as file:
+ settings = json.load(file)
+
+ # Flag to track if changes were made
+ changes_made = False
+
+ # Check and migrate each app's settings
+ for app in ["sonarr", "radarr", "lidarr", "readarr"]:
+ if app in settings and "huntarr" in settings[app]:
+ logging.info(f"Found nested huntarr section in {app}, migrating...")
+
+ # Move all settings from app.huntarr to app level
+ for key, value in settings[app]["huntarr"].items():
+ if key not in settings[app]:
+ settings[app][key] = value
+
+ # Remove the huntarr section
+ del settings[app]["huntarr"]
+ changes_made = True
+
+ # Check for advanced section
+ if app in settings and "advanced" in settings[app]:
+ logging.info(f"Found advanced section in {app}, migrating...")
+
+ # Move all settings from app.advanced to app level
+ for key, value in settings[app]["advanced"].items():
+ if key not in settings[app]:
+ settings[app][key] = value
+
+ # Remove the advanced section
+ del settings[app]["advanced"]
+ changes_made = True
+
+ # Remove global section if present
+ if "global" in settings:
+ logging.info("Removing global section...")
+ del settings["global"]
+ changes_made = True
+
+ # Remove UI section if present
+ if "ui" in settings:
+ logging.info("Removing UI section...")
+ del settings["ui"]
+ changes_made = True
+
+ # Save changes if needed
+ if changes_made:
+ with open(SETTINGS_FILE, "w", encoding="utf-8") as file:
+ json.dump(settings, file, indent=2)
+ logging.info("Settings migration completed successfully.")
+ else:
+ logging.info("No changes needed, settings are already in the correct format.")
+
+ except Exception as e:
+ logging.error(f"Error migrating settings: {e}")
+
+if __name__ == "__main__":
+ configure_logging()
+ logging.info("Starting Huntarr application")
+
+ # Migrate settings to flat structure
+ migrate_settings()
+
+ # Using filtered logging
+ logging.info("Web interface available at http://localhost:8080")
+ logging.info("Application started")
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/app_manager.py b/Huntarr.io-6.3.6/src/primary/app_manager.py
new file mode 100644
index 0000000..7eb80d4
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/app_manager.py
@@ -0,0 +1,41 @@
+# If this file doesn't exist, we'll create it
+
+import os
+from src.primary.utils.logger import get_logger
+from src.primary.settings_manager import load_settings
+
+logger = get_logger("app_manager")
+
+# List of supported app types
+SUPPORTED_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"]
+
+def initialize_apps():
+ """Initialize all supported applications"""
+ for app_type in SUPPORTED_APP_TYPES:
+ initialize_app(app_type)
+
+ # Also load general settings but don't treat it as a regular app
+ load_general_settings()
+
+def initialize_app(app_type):
+ """Initialize a specific application"""
+ if app_type not in SUPPORTED_APP_TYPES:
+ logger.warning(f"Attempted to initialize unsupported app type: {app_type}")
+ return False
+
+ # Load settings for this app
+ settings = load_settings(app_type)
+
+ # Additional initialization as needed
+ # ...
+
+ return True
+
+def load_general_settings():
+ """Load general settings without treating it as a regular app"""
+ settings = load_settings("general")
+ logger.info("--- Configuration for general ---")
+ # Log the settings as needed
+ # ...
+ logger.info("--- End Configuration for general ---")
+ return settings
diff --git a/Huntarr.io-6.3.6/src/primary/apps/blueprints.py b/Huntarr.io-6.3.6/src/primary/apps/blueprints.py
new file mode 100644
index 0000000..786e29e
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/blueprints.py
@@ -0,0 +1,24 @@
+"""
+Centralized blueprint imports
+This module provides a single location to import all app blueprints
+to avoid circular import issues
+"""
+
+# Import blueprints from the renamed route files
+from src.primary.apps.sonarr_routes import sonarr_bp
+from src.primary.apps.radarr_routes import radarr_bp
+from src.primary.apps.lidarr_routes import lidarr_bp
+from src.primary.apps.readarr_routes import readarr_bp
+from src.primary.apps.whisparr_routes import whisparr_bp
+from src.primary.apps.swaparr_routes import swaparr_bp
+from src.primary.apps.eros_routes import eros_bp
+
+__all__ = [
+ "sonarr_bp",
+ "radarr_bp",
+ "lidarr_bp",
+ "readarr_bp",
+ "whisparr_bp",
+ "swaparr_bp",
+ "eros_bp"
+]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros.py b/Huntarr.io-6.3.6/src/primary/apps/eros.py
new file mode 100644
index 0000000..9bbd866
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros.py
@@ -0,0 +1,171 @@
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from primary import keys_manager
+from src.primary.utils.logger import get_logger
+from src.primary.state import get_state_file_path
+from src.primary.settings_manager import load_settings, settings_manager
+
+eros_bp = Blueprint('eros', __name__)
+eros_logger = get_logger("eros")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("eros", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("eros", "processed_upgrades")
+
+@eros_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to an Eros API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ eros_logger.info(f"Testing connection to Eros API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ eros_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try multiple API path combinations to handle different Whisparr V3/Eros setups
+ api_paths = [
+ "/api/v3/system/status", # Standard V3 path
+ "/api/system/status", # Standard V2 path that might still work
+ "/system/status" # Direct path without /api prefix
+ ]
+
+ success = False
+ last_error = None
+ response_data = None
+
+ for api_path in api_paths:
+ test_url = f"{api_url.rstrip('/')}{api_path}"
+ headers = {'X-Api-Key': api_key}
+ eros_logger.debug(f"Trying Eros API path: {test_url}")
+
+ try:
+ # Use a connection timeout separate from read timeout
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # Log HTTP status code for diagnostic purposes
+ eros_logger.debug(f"Eros API status code: {response.status_code} for path {api_path}")
+
+ # Check HTTP status code
+ if response.status_code == 404:
+ # Try next path if 404
+ continue
+
+ response.raise_for_status()
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+ eros_logger.debug(f"Eros API response: {response_data}")
+
+ # Verify this is actually an Eros API by checking for version
+ version = response_data.get('version', None)
+ if not version:
+ # No version info, try next path
+ last_error = "API response doesn't contain version information"
+ continue
+
+ # The version number should start with 3 for Eros
+ if version.startswith('3'):
+ eros_logger.info(f"Successfully connected to Eros API version {version} using path {api_path}")
+ success = True
+ break
+ elif version.startswith('2'):
+ error_msg = f"Connected to Whisparr V2 (version {version}). Use the Whisparr integration for V2."
+ eros_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+ else:
+ # Connected to some other version, try next path
+ last_error = f"Connected to unknown version {version}, but Huntarr requires Eros V3"
+ continue
+
+ except ValueError:
+ last_error = "Invalid JSON response from API"
+ continue
+
+ except requests.exceptions.Timeout:
+ last_error = f"Connection timed out after {api_timeout} seconds"
+ continue
+
+ except requests.exceptions.ConnectionError:
+ last_error = "Failed to connect. Check that the URL is correct and that Eros is running."
+ continue
+
+ except requests.exceptions.HTTPError as e:
+ last_error = f"HTTP error: {str(e)}"
+ continue
+
+ except Exception as e:
+ last_error = f"Unexpected error: {str(e)}"
+ continue
+
+ # After trying all paths
+ if success:
+ return jsonify({
+ "success": True,
+ "message": f"Successfully connected to Eros (version {response_data.get('version')})",
+ "version": response_data.get('version')
+ })
+ else:
+ error_msg = last_error or "Failed to connect to Eros API. Please check your URL and API key."
+ eros_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+# Function to check if Eros is configured
+def is_configured():
+ """Check if Eros API credentials are configured"""
+ try:
+ settings = load_settings("eros")
+ instances = settings.get("instances", [])
+
+ for instance in instances:
+ if instance.get("enabled", True):
+ return True
+
+ return False
+ except Exception as e:
+ eros_logger.error(f"Error checking if Eros is configured: {str(e)}")
+ return False
+
+# Get all valid instances from settings
+def get_configured_instances():
+ """Get all configured and enabled Eros instances"""
+ try:
+ settings = load_settings("eros")
+ instances = settings.get("instances", [])
+
+ enabled_instances = []
+ for instance in instances:
+ if not instance.get("enabled", True):
+ continue
+
+ api_url = instance.get("api_url")
+ api_key = instance.get("api_key")
+
+ if not api_url or not api_key:
+ continue
+
+ # Add name and timeout
+ instance_name = instance.get("name", "Default")
+ api_timeout = instance.get("api_timeout", 90)
+
+ enabled_instances.append({
+ "api_url": api_url,
+ "api_key": api_key,
+ "instance_name": instance_name,
+ "api_timeout": api_timeout
+ })
+
+ return enabled_instances
+ except Exception as e:
+ eros_logger.error(f"Error getting configured Eros instances: {str(e)}")
+ return []
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py
new file mode 100644
index 0000000..896c14a
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros/__init__.py
@@ -0,0 +1,95 @@
+"""
+Eros app module for Huntarr
+Contains functionality for missing items and quality upgrades in Eros
+
+Exclusively supports the v3 API.
+"""
+
+# Module exports
+from src.primary.apps.eros.missing import process_missing_items
+from src.primary.apps.eros.upgrade import process_cutoff_upgrades
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+# Define logger for this module
+eros_logger = get_logger("eros")
+
+# For backward compatibility
+process_missing_scenes = process_missing_items
+
+def get_configured_instances():
+ """Get all configured and enabled Eros instances"""
+ settings = load_settings("eros")
+ instances = []
+ # Use debug level to avoid log spam on new installations
+ eros_logger.debug(f"Loaded Eros settings for instance check: {settings}")
+
+ if not settings:
+ eros_logger.debug("No settings found for Eros")
+ return instances
+
+ # Always use Eros V3 API
+ # Use debug level to avoid log spam on new installations
+ eros_logger.debug("Using Eros API v3 exclusively")
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ # Use debug level to avoid log spam on new installations
+ eros_logger.debug(f"Found 'instances' list with {len(settings['instances'])} items. Processing...")
+ for idx, instance in enumerate(settings["instances"]):
+ eros_logger.debug(f"Checking instance #{idx}: {instance}")
+ # Enhanced validation
+ api_url = instance.get("api_url", "").strip()
+ api_key = instance.get("api_key", "").strip()
+
+ # Enhanced URL validation - ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ eros_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ eros_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ is_enabled = instance.get("enabled", True)
+
+ # Only include properly configured instances
+ if is_enabled and api_url and api_key:
+ instance_name = instance.get("name", "Default")
+
+ # Create a settings object for this instance by combining global settings with instance-specific ones
+ instance_settings = settings.copy()
+
+ # Remove instances list to avoid confusion
+ if "instances" in instance_settings:
+ del instance_settings["instances"]
+
+ # Override with instance-specific settings
+ instance_settings["api_url"] = api_url
+ instance_settings["api_key"] = api_key
+ instance_settings["instance_name"] = instance_name
+
+ # Add timeout setting with default if not present
+ if "api_timeout" not in instance_settings:
+ instance_settings["api_timeout"] = 30
+
+ # Use debug level to prevent log spam
+ eros_logger.debug(f"Adding configured Eros instance: {instance_name}")
+ instances.append(instance_settings)
+ else:
+ name = instance.get("name", "Unnamed")
+ if not is_enabled:
+ eros_logger.debug(f"Skipping disabled instance: {name}")
+ else:
+ # For brand new installations, don't spam logs with warnings about default instances
+ if name == 'Default':
+ # Use debug level for default instances to avoid log spam on new installations
+ eros_logger.debug(f"Skipping instance {name} due to missing API URL or API Key")
+ else:
+ # Still log warnings for non-default instances
+ eros_logger.warning(f"Skipping instance {name} due to missing API URL or API Key")
+ else:
+ eros_logger.debug("No instances array found in settings or it's empty")
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ eros_logger.debug(f"Found {len(instances)} configured and enabled Eros instances")
+ return instances
+
+__all__ = ["process_missing_items", "process_missing_scenes", "process_cutoff_upgrades", "get_configured_instances"]
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/api.py b/Huntarr.io-6.3.6/src/primary/apps/eros/api.py
new file mode 100644
index 0000000..b403bda
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros/api.py
@@ -0,0 +1,517 @@
+#!/usr/bin/env python3
+"""
+Eros-specific API functions
+Handles all communication with the Eros API
+
+Exclusively uses the Eros API v3
+"""
+
+import requests
+import json
+import time
+import datetime
+import traceback
+import sys
+from typing import List, Dict, Any, Optional, Union
+from src.primary.utils.logger import get_logger
+
+# Get logger for the Eros app
+eros_logger = get_logger("eros")
+
+# Use a session for better performance
+session = requests.Session()
+
+def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any:
+ """
+ Make a request to the Eros API.
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data to send with the request
+
+ Returns:
+ The JSON response from the API, or None if the request failed
+ """
+ if not api_url or not api_key:
+ eros_logger.error("API URL or API key is missing. Check your settings.")
+ return None
+
+ # Always use v3 API path
+ api_base = "api/v3"
+ eros_logger.debug(f"Using Eros API path: {api_base}")
+
+ # Full URL - ensure no double slashes
+ url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}"
+
+ # Add debug logging for the exact URL being called
+ eros_logger.debug(f"Making {method} request to: {url}")
+
+ # Headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method == "GET":
+ response = session.get(url, headers=headers, timeout=api_timeout)
+ elif method == "POST":
+ response = session.post(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "PUT":
+ response = session.put(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "DELETE":
+ response = session.delete(url, headers=headers, timeout=api_timeout)
+ else:
+ eros_logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # Check if the request was successful
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ eros_logger.error(f"Error during {method} request to {endpoint}: {e}, Status Code: {response.status_code}")
+ eros_logger.debug(f"Response content: {response.text[:200]}")
+ return None
+
+ # Try to parse JSON response
+ try:
+ if response.text:
+ result = response.json()
+ eros_logger.debug(f"Response from {response.url}: Status {response.status_code}, JSON parsed successfully")
+ return result
+ else:
+ eros_logger.debug(f"Response from {response.url}: Status {response.status_code}, Empty response")
+ return {}
+ except json.JSONDecodeError:
+ eros_logger.error(f"Invalid JSON response from API: {response.text[:200]}")
+ return None
+
+ except requests.exceptions.RequestException as e:
+ eros_logger.error(f"Request failed: {e}")
+ return None
+ except Exception as e:
+ eros_logger.error(f"Unexpected error during API request: {e}")
+ return None
+
+def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int:
+ """
+ Get the current size of the download queue.
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ The number of items in the download queue, or -1 if the request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, "queue")
+
+ if response is None:
+ return -1
+
+ # V3 API returns a list directly
+ if isinstance(response, list):
+ return len(response)
+ # Fallback to records format if needed
+ elif isinstance(response, dict) and "records" in response:
+ return len(response["records"])
+ else:
+ return -1
+
+def get_items_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, search_mode: str = "movie") -> List[Dict[str, Any]]:
+ """
+ Get a list of items with missing files (not downloaded/available).
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored items.
+ search_mode: The search mode to use - 'movie' for movie-based or 'scene' for scene-based
+
+ Returns:
+ A list of item objects with missing files, or None if the request failed.
+ """
+ try:
+ eros_logger.debug(f"Retrieving missing items using search mode: {search_mode}...")
+
+ if search_mode == "movie":
+ # In movie mode, we get all movies and filter for ones without files
+ endpoint = "movie"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ return None
+
+ # Extract the movies with missing files
+ items = []
+ if isinstance(response, list):
+ # Filter for movies that don't have files (hasFile = false)
+ items = [item for item in response if not item.get("hasFile", True)]
+ elif isinstance(response, dict) and "records" in response:
+ # Fallback to old format if somehow it returns in this format
+ items = [item for item in response["records"] if not item.get("hasFile", True)]
+
+ elif search_mode == "scene":
+ # In scene mode, we try to use scene-specific endpoints
+ # First check if the movie-scene endpoint exists
+ endpoint = "scene/missing?pageSize=1000"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ # Fallback to regular movie filtering if scene endpoint doesn't exist
+ eros_logger.warning("Scene endpoint not available, falling back to movie mode")
+ return get_items_with_missing(api_url, api_key, api_timeout, monitored_only, "movie")
+
+ # Extract the scenes
+ items = []
+ if isinstance(response, dict) and "records" in response:
+ items = response["records"]
+ elif isinstance(response, list):
+ items = response
+
+ else:
+ # Invalid search mode
+ eros_logger.error(f"Invalid search mode: {search_mode}. Must be 'movie' or 'scene'")
+ return None
+
+ # Filter monitored if needed
+ if monitored_only:
+ items = [item for item in items if item.get("monitored", False)]
+
+ eros_logger.debug(f"Found {len(items)} missing items using {search_mode} mode")
+
+ return items
+
+ except Exception as e:
+ eros_logger.error(f"Error retrieving missing items: {str(e)}")
+ return None
+
+def get_cutoff_unmet_items(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """
+ Get a list of items that don't meet their quality profile cutoff.
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored items.
+
+ Returns:
+ A list of item objects that need quality upgrades, or None if the request failed.
+ """
+ try:
+ eros_logger.debug(f"Retrieving cutoff unmet items...")
+
+ # Endpoint
+ endpoint = "wanted/cutoff?pageSize=1000&sortKey=airDateUtc&sortDirection=descending"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ return None
+
+ # Extract the episodes/items
+ items = []
+ if isinstance(response, dict) and "records" in response:
+ items = response["records"]
+ elif isinstance(response, list):
+ items = response
+
+ eros_logger.debug(f"Found {len(items)} cutoff unmet items")
+
+ # Just filter monitored if needed
+ if monitored_only:
+ items = [item for item in items if item.get("monitored", False)]
+ eros_logger.debug(f"Found {len(items)} cutoff unmet items after filtering monitored")
+
+ return items
+
+ except Exception as e:
+ eros_logger.error(f"Error retrieving cutoff unmet items: {str(e)}")
+ return None
+
+def get_quality_upgrades(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, search_mode: str = "movie") -> List[Dict[str, Any]]:
+ """
+ Get a list of items that can be upgraded to better quality.
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored items.
+ search_mode: The search mode to use - 'movie' for movie-based or 'scene' for scene-based
+
+ Returns:
+ A list of item objects that need quality upgrades, or None if the request failed.
+ """
+ try:
+ eros_logger.debug(f"Retrieving quality upgrade items using search mode: {search_mode}...")
+
+ if search_mode == "movie":
+ # In movie mode, we get all movies and filter for ones that have files but need quality upgrades
+ endpoint = "movie"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ return None
+
+ # Extract movies that have files but need quality upgrades
+ items = []
+ if isinstance(response, list):
+ # Filter for movies that have files but haven't met quality cutoff
+ items = [item for item in response if item.get("hasFile", False) and item.get("qualityCutoffNotMet", False)]
+ elif isinstance(response, dict) and "records" in response:
+ # Fallback to old format if somehow it returns in this format
+ items = [item for item in response["records"] if item.get("hasFile", False) and item.get("qualityCutoffNotMet", False)]
+
+ elif search_mode == "scene":
+ # In scene mode, try to use scene-specific endpoints
+ endpoint = "scene/cutoff?pageSize=1000"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ # Fallback to regular movie filtering if scene endpoint doesn't exist
+ eros_logger.warning("Scene cutoff endpoint not available, falling back to movie mode")
+ return get_quality_upgrades(api_url, api_key, api_timeout, monitored_only, "movie")
+
+ # Extract the scenes
+ items = []
+ if isinstance(response, dict) and "records" in response:
+ items = response["records"]
+ elif isinstance(response, list):
+ items = response
+
+ else:
+ # Invalid search mode
+ eros_logger.error(f"Invalid search mode: {search_mode}. Must be 'movie' or 'scene'")
+ return None
+
+ # Filter monitored if needed
+ if monitored_only:
+ items = [item for item in items if item.get("monitored", False)]
+
+ eros_logger.debug(f"Found {len(items)} quality upgrade items using {search_mode} mode")
+
+ return items
+
+ except Exception as e:
+ eros_logger.error(f"Error retrieving quality upgrade items: {str(e)}")
+ return None
+
+def refresh_item(api_url: str, api_key: str, api_timeout: int, item_id: int) -> int:
+ """
+ Refresh a movie in Whisparr V3.
+
+ Args:
+ api_url: The base URL of the Whisparr V3 API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ item_id: The ID of the movie to refresh
+
+ Returns:
+ The command ID if the refresh was triggered successfully, None otherwise
+ """
+ try:
+ eros_logger.info(f"Explicitly refreshing movie with ID {item_id} via API call")
+
+ # In Whisparr V3, we use RefreshMovie command directly with the movieId
+ payload = {
+ "name": "RefreshMovie",
+ "movieId": item_id
+ }
+
+ # Command endpoint
+ command_endpoint = "command"
+
+ # Make the API request
+ response = arr_request(api_url, api_key, api_timeout, command_endpoint, "POST", payload)
+
+ if response and "id" in response:
+ command_id = response["id"]
+ eros_logger.info(f"Refresh movie command triggered with ID {command_id} for movie {item_id}")
+ return command_id
+ else:
+ eros_logger.error(f"Failed to trigger refresh command for movie {item_id} - no command ID returned")
+ return None
+
+ except Exception as e:
+ eros_logger.error(f"Error refreshing movie {item_id}: {str(e)}")
+ return None
+
+def item_search(api_url: str, api_key: str, api_timeout: int, item_ids: List[int]) -> int:
+ """
+ Trigger a search for one or more movies in Whisparr V3.
+
+ Args:
+ api_url: The base URL of the Whisparr V3 API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ item_ids: A list of movie IDs to search for
+
+ Returns:
+ The command ID if the search command was triggered successfully, None otherwise
+ """
+ try:
+ if not item_ids:
+ eros_logger.warning("No movie IDs provided for search.")
+ return None
+
+ eros_logger.debug(f"Searching for movies with IDs: {item_ids}")
+
+ # Try several possible command formats, as the API might be in flux
+ possible_commands = [
+ # Format 1: MoviesSearch with integer IDs (Radarr-like) and no auto-refresh
+ {
+ "name": "MoviesSearch",
+ "movieIds": item_ids,
+ "updateScheduledTask": False,
+ "runRefreshAfterSearch": False,
+ "sendUpdatesToClient": False
+ },
+ # Format 2: MovieSearch with integer IDs and no auto-refresh
+ {
+ "name": "MovieSearch",
+ "movieIds": item_ids,
+ "updateScheduledTask": False,
+ "runRefreshAfterSearch": False,
+ "sendUpdatesToClient": False
+ },
+ # Format 3: MoviesSearch with string IDs and no auto-refresh
+ {
+ "name": "MoviesSearch",
+ "movieIds": [str(id) for id in item_ids],
+ "updateScheduledTask": False,
+ "runRefreshAfterSearch": False,
+ "sendUpdatesToClient": False
+ },
+ # Format 4: MovieSearch with string IDs and no auto-refresh
+ {
+ "name": "MovieSearch",
+ "movieIds": [str(id) for id in item_ids],
+ "updateScheduledTask": False,
+ "runRefreshAfterSearch": False,
+ "sendUpdatesToClient": False
+ },
+ # Fallback to original formats if the above don't work
+ {
+ "name": "MoviesSearch",
+ "movieIds": item_ids
+ },
+ {
+ "name": "MovieSearch",
+ "movieIds": item_ids
+ },
+ {
+ "name": "MoviesSearch",
+ "movieIds": [str(id) for id in item_ids]
+ },
+ {
+ "name": "MovieSearch",
+ "movieIds": [str(id) for id in item_ids]
+ }
+ ]
+
+ # Command endpoint
+ command_endpoint = "command"
+
+ # Try each command format until one works
+ for i, payload in enumerate(possible_commands):
+ eros_logger.debug(f"Trying search command format {i+1}: {payload}")
+
+ # Make the API request
+ response = arr_request(api_url, api_key, api_timeout, command_endpoint, "POST", payload)
+
+ if response and "id" in response:
+ command_id = response["id"]
+ eros_logger.debug(f"Search command format {i+1} succeeded with ID {command_id}")
+ return command_id
+
+ # If we've tried all formats and none worked:
+ eros_logger.error("All search command formats failed - no command ID returned")
+ return None
+
+ except Exception as e:
+ eros_logger.error(f"Error searching for movies: {str(e)}")
+ return None
+
+def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict]:
+ """
+ Get the status of a specific command.
+
+ Args:
+ api_url: The base URL of the Eros API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ command_id: The ID of the command to check
+
+ Returns:
+ A dictionary containing the command status, or None if the request failed.
+ """
+ if not command_id:
+ eros_logger.error("No command ID provided for status check.")
+ return None
+
+ try:
+ command_endpoint = f"command/{command_id}"
+
+ # Make the API request
+ result = arr_request(api_url, api_key, api_timeout, command_endpoint)
+
+ if result:
+ eros_logger.debug(f"Command {command_id} status: {result.get('status', 'unknown')}")
+ return result
+ else:
+ eros_logger.error(f"Failed to get command status for ID {command_id}")
+ return None
+
+ except Exception as e:
+ eros_logger.error(f"Error getting command status for ID {command_id}: {e}")
+ return None
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """
+ Check the connection to Whisparr V3 API.
+
+ Args:
+ api_url: The base URL of the Whisparr V3 API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ True if the connection is successful, False otherwise
+ """
+ try:
+ eros_logger.debug(f"Checking connection to Whisparr V3 instance at {api_url}")
+
+ endpoint = "system/status"
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is not None:
+ # Get the version information if available
+ version = response.get("version", "unknown")
+
+ # Simply check if we received a valid response - Whisparr V3 is in development
+ # so the version number might be in various formats
+ if version and isinstance(version, str):
+ eros_logger.info(f"Successfully connected to Whisparr V3 API, reported version: {version}")
+ return True
+ else:
+ eros_logger.warning(f"Connected to server but found unexpected version format: {version}")
+ return False
+ else:
+ eros_logger.error("Failed to connect to Whisparr V3 API")
+ return False
+
+ except Exception as e:
+ eros_logger.error(f"Error checking connection to Whisparr V3 API: {str(e)}")
+ return False
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py b/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py
new file mode 100644
index 0000000..cb87dcc
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros/missing.py
@@ -0,0 +1,245 @@
+#!/usr/bin/env python3
+"""
+Missing Items Processing for Eros
+Handles searching for missing items in Eros
+
+Exclusively supports the v3 API.
+"""
+
+import time
+import random
+import datetime
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.eros import api as eros_api
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.stats_manager import increment_stat
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.state import check_state_reset
+
+# Get logger for the app
+eros_logger = get_logger("eros")
+
+def process_missing_items(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process missing items in Eros based on provided settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Eros
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any items were processed, False otherwise.
+ """
+ eros_logger.info("Starting missing items processing cycle for Eros.")
+ processed_any = False
+
+ # Reset state files if enough time has passed
+ check_state_reset("eros")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ instance_name = app_settings.get("instance_name", "Eros Default")
+
+ # Load general settings to get centralized timeout
+ general_settings = load_settings('general')
+
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_future_releases = app_settings.get("skip_future_releases", True)
+ skip_item_refresh = app_settings.get("skip_item_refresh", False)
+ eros_logger.info(f"Skip item refresh setting: {skip_item_refresh}")
+ search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified
+
+ eros_logger.info(f"Using search mode: {search_mode} for missing items")
+
+ # Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility
+ hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0))
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # Use the centralized advanced setting for stateful management hours
+ stateful_management_hours = get_advanced_setting("stateful_management_hours", 168)
+
+ # Log that we're using Eros v3 API
+ eros_logger.info(f"Using Eros API v3 for instance: {instance_name}")
+
+ # Skip if hunt_missing_items is set to a negative value or 0
+ if hunt_missing_items <= 0:
+ eros_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing item processing.")
+ return False
+
+ # Check for stop signal
+ if stop_check():
+ eros_logger.info("Stop requested before starting missing items. Aborting...")
+ return False
+
+ # Get missing items
+ eros_logger.info(f"Retrieving items with missing files...")
+ missing_items = eros_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only, search_mode)
+
+ if missing_items is None: # API call failed
+ eros_logger.error("Failed to retrieve missing items from Eros API.")
+ return False
+
+ if not missing_items:
+ eros_logger.info("No missing items found.")
+ return False
+
+ # Check for stop signal after retrieving items
+ if stop_check():
+ eros_logger.info("Stop requested after retrieving missing items. Aborting...")
+ return False
+
+ eros_logger.info(f"Found {len(missing_items)} items with missing files.")
+
+ # Filter out future releases if configured
+ if skip_future_releases:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ original_count = len(missing_items)
+ # Eros item object has 'airDateUtc' for release dates
+ missing_items = [
+ item for item in missing_items
+ if not item.get('airDateUtc') or (
+ item.get('airDateUtc') and
+ datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now
+ )
+ ]
+ skipped_count = original_count - len(missing_items)
+ if skipped_count > 0:
+ eros_logger.info(f"Skipped {skipped_count} future item releases based on air date.")
+
+ if not missing_items:
+ eros_logger.info("No missing items left to process after filtering future releases.")
+ return False
+
+ # Filter out already processed items using stateful management
+ unprocessed_items = []
+ for item in missing_items:
+ item_id = str(item.get("id"))
+ if not is_processed("eros", instance_name, item_id):
+ unprocessed_items.append(item)
+ else:
+ eros_logger.debug(f"Skipping already processed item ID: {item_id}")
+
+ eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.")
+
+ if not unprocessed_items:
+ eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ return False
+
+ items_processed = 0
+ processing_done = False
+
+ # Select items to search based on configuration
+ eros_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.")
+ items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items))
+
+ eros_logger.info(f"Selected {len(items_to_search)} missing items to search.")
+
+ # Process selected items
+ for item in items_to_search:
+ # Check for stop signal before each item
+ if stop_check():
+ eros_logger.info("Stop requested during item processing. Aborting...")
+ break
+
+ # Re-check limit in case it changed
+ current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
+ if items_processed >= current_limit:
+ eros_logger.info(f"Reached HUNT_MISSING_ITEMS limit ({current_limit}) for this cycle.")
+ break
+
+ item_id = item.get("id")
+ title = item.get("title", "Unknown Title")
+
+ # For movies, we don't use season/episode format
+ if search_mode == "movie":
+ item_info = title
+ else:
+ # If somehow using scene mode, try to format as S/E if available
+ season_number = item.get('seasonNumber')
+ episode_number = item.get('episodeNumber')
+ if season_number is not None and episode_number is not None:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ item_info = f"{title} - {season_episode}"
+ else:
+ item_info = title
+
+ eros_logger.info(f"Processing missing item: \"{item_info}\" (Item ID: {item_id})")
+
+ # Mark the item as processed BEFORE triggering any searches
+ add_processed_id("eros", instance_name, str(item_id))
+ eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
+
+ # Refresh the item information if not skipped
+ refresh_command_id = None
+ if not skip_item_refresh:
+ eros_logger.info(" - Refreshing item information...")
+ refresh_command_id = eros_api.refresh_item(api_url, api_key, api_timeout, item_id)
+ if refresh_command_id:
+ eros_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...")
+ time.sleep(5) # Basic wait
+ else:
+ eros_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.")
+ else:
+ eros_logger.info(" - Skipping item refresh (skip_item_refresh=true)")
+
+ # Check for stop signal before searching
+ if stop_check():
+ eros_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ break
+
+ # Search for the item
+ eros_logger.info(" - Searching for missing item...")
+ search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id])
+ if search_command_id:
+ eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
+
+ # Log to history system
+ log_processed_media("eros", item_info, item_id, instance_name, "missing")
+ eros_logger.debug(f"Logged history entry for item: {item_info}")
+
+ items_processed += 1
+ processing_done = True
+
+ # Increment the hunted statistics for Eros
+ increment_stat("eros", "hunted", 1)
+ eros_logger.debug(f"Incremented eros hunted statistics by 1")
+
+ # Log progress
+ current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
+ eros_logger.info(f"Processed {items_processed}/{current_limit} missing items this cycle.")
+ else:
+ eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ # Do not mark as processed if search couldn't be triggered
+ continue
+
+ # Log final status
+ if items_processed > 0:
+ eros_logger.info(f"Completed processing {items_processed} missing items for this cycle.")
+ else:
+ eros_logger.info("No new missing items were processed in this run.")
+
+ return processing_done
+
+# For backward compatibility with the background processing system
+def process_missing_scenes(app_settings, stop_check):
+ """
+ Backwards compatibility function that calls process_missing_items.
+
+ Args:
+ app_settings: Dictionary containing all settings for Eros
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ Result from process_missing_items
+ """
+ return process_missing_items(app_settings, stop_check)
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py
new file mode 100644
index 0000000..df1be22
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros/upgrade.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python3
+"""
+Quality Upgrade Processing for Eros
+Handles searching for items that need quality upgrades in Eros
+
+Exclusively supports the v3 API.
+"""
+
+import time
+import random
+import datetime
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.eros import api as eros_api
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.stats_manager import increment_stat
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.state import check_state_reset
+
+# Get logger for the app
+eros_logger = get_logger("eros")
+
+def process_cutoff_upgrades(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process quality cutoff upgrades for Eros based on settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Eros
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any items were processed for upgrades, False otherwise.
+ """
+ eros_logger.info("Starting quality cutoff upgrades processing cycle for Eros.")
+ processed_any = False
+
+ # Reset state files if enough time has passed
+ check_state_reset("eros")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ instance_name = app_settings.get("instance_name", "Eros Default")
+
+ # Load general settings to get centralized timeout
+ general_settings = load_settings('general')
+
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_item_refresh = app_settings.get("skip_item_refresh", False)
+ eros_logger.info(f"Skip item refresh setting: {skip_item_refresh}")
+ search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified
+
+ eros_logger.info(f"Using search mode: {search_mode} for quality upgrades")
+
+ # Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility
+ hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0))
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+ state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168)
+
+ # Log that we're using Eros API v3
+ eros_logger.info(f"Using Eros API v3 for instance: {instance_name}")
+
+ # Skip if hunt_upgrade_items is set to 0
+ if hunt_upgrade_items <= 0:
+ eros_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping quality upgrade processing.")
+ return False
+
+ # Check for stop signal
+ if stop_check():
+ eros_logger.info("Stop requested before starting quality upgrades. Aborting...")
+ return False
+
+ # Get items eligible for upgrade
+ eros_logger.info(f"Retrieving items eligible for cutoff upgrade...")
+ upgrade_eligible_data = eros_api.get_quality_upgrades(api_url, api_key, api_timeout, monitored_only, search_mode)
+
+ if not upgrade_eligible_data:
+ eros_logger.info("No items found eligible for upgrade or error retrieving them.")
+ return False
+
+ # Check for stop signal after retrieving eligible items
+ if stop_check():
+ eros_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...")
+ return False
+
+ eros_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.")
+
+ # Filter out already processed items using stateful management
+ unprocessed_items = []
+ for item in upgrade_eligible_data:
+ item_id = str(item.get("id"))
+ if not is_processed("eros", instance_name, item_id):
+ unprocessed_items.append(item)
+ else:
+ eros_logger.debug(f"Skipping already processed item ID: {item_id}")
+
+ eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.")
+
+ if not unprocessed_items:
+ eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ return False
+
+ items_processed = 0
+ processing_done = False
+
+ # Always use random selection for upgrades
+ eros_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.")
+ items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items))
+
+ eros_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.")
+
+ # Process selected items
+ for item in items_to_upgrade:
+ # Check for stop signal before each item
+ if stop_check():
+ eros_logger.info("Stop requested during item processing. Aborting...")
+ break
+
+ # Re-check limit in case it changed
+ current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
+ if items_processed >= current_limit:
+ eros_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.")
+ break
+
+ item_id = item.get("id")
+ title = item.get("title", "Unknown Title")
+
+ # For movies, we don't use season/episode format
+ if search_mode == "movie":
+ item_info = title
+ # In Whisparr, movie quality is stored differently than TV shows
+ current_quality = item.get("movieFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown")
+ else:
+ # If somehow using scene mode, try to format as S/E if available
+ season_number = item.get('seasonNumber')
+ episode_number = item.get('episodeNumber')
+ if season_number is not None and episode_number is not None:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ item_info = f"{title} - {season_episode}"
+ else:
+ item_info = title
+ # Legacy episode quality path
+ current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown")
+
+ eros_logger.info(f"Processing item for quality upgrade: \"{item_info}\" (Item ID: {item_id})")
+ eros_logger.info(f" - Current quality: {current_quality}")
+
+ # Mark the item as processed BEFORE triggering any searches
+ add_processed_id("eros", instance_name, str(item_id))
+ eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
+
+ # Refresh the item information if not skipped
+ refresh_command_id = None
+ if not skip_item_refresh:
+ eros_logger.info(" - Refreshing item information...")
+ refresh_command_id = eros_api.refresh_item(api_url, api_key, api_timeout, item_id)
+ if refresh_command_id:
+ eros_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...")
+ time.sleep(5) # Basic wait
+ else:
+ eros_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.")
+ else:
+ eros_logger.info(" - Skipping item refresh (skip_item_refresh=true)")
+
+ # Check for stop signal before searching
+ if stop_check():
+ eros_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ break
+
+ # Search for the item
+ eros_logger.info(" - Searching for quality upgrade...")
+ search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id])
+ if search_command_id:
+ eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
+
+ # Log to history so the upgrade appears in the history UI
+ log_processed_media("eros", item_info, item_id, instance_name, "upgrade")
+ eros_logger.debug(f"Logged quality upgrade to history for item ID {item_id}")
+
+ items_processed += 1
+ processing_done = True
+
+ # Increment the upgraded statistics for Eros
+ increment_stat("eros", "upgraded", 1)
+ eros_logger.debug(f"Incremented eros upgraded statistics by 1")
+
+ # Log progress
+ current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
+ eros_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.")
+ else:
+ eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ # Do not mark as processed if search couldn't be triggered
+ continue
+
+ # Log final status
+ if items_processed > 0:
+ eros_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.")
+ else:
+ eros_logger.info("No new items were processed for quality upgrade in this run.")
+
+ return processing_done
diff --git a/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py b/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py
new file mode 100644
index 0000000..9d6b8c7
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/eros_routes.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger, APP_LOG_FILES
+from src.primary.settings_manager import load_settings
+import traceback
+import socket
+from urllib.parse import urlparse
+from src.primary.apps.eros import api as eros_api
+
+eros_bp = Blueprint('eros', __name__)
+eros_logger = get_logger("eros")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("eros", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("eros", "processed_upgrades")
+
+def get_configured_instances():
+ # Load Eros settings
+ settings = load_settings("eros")
+ instances = settings.get("instances", [])
+ return instances
+
+def test_connection(url, api_key):
+ # Validate URL format
+ if not (url.startswith('http://') or url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+
+ # Try to establish a socket connection first to check basic connectivity
+ parsed_url = urlparse(url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ try:
+ # Try socket connection for quick feedback on connectivity issues
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ eros_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # For Eros, we only use v3 API path
+ api_url = f"{url.rstrip('/')}/api/v3/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ # Make the request with appropriate timeouts
+ eros_logger.debug(f"Trying API path: {api_url}")
+ response = requests.get(api_url, headers=headers, timeout=(5, 30))
+
+ try:
+ response.raise_for_status()
+
+ # Check if we got a valid JSON response
+ try:
+ response_data = response.json()
+
+ # Verify this is actually an Eros server by checking for version
+ version = response_data.get('version')
+ if not version:
+ error_msg = "API response doesn't contain version information. This doesn't appear to be a valid Eros server."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+
+ # Version check - should be v3.x for Eros
+ if version.startswith('3'):
+ detected_version = "v3"
+ eros_logger.info(f"Successfully connected to Eros API version: {version} (API {detected_version})")
+
+ # Success!
+ return {"success": True, "message": "Successfully connected to Eros API", "version": version, "api_version": detected_version}
+ elif version.startswith('2'):
+ error_msg = f"Incompatible version detected: {version}. This appears to be Whisparr V2, not Eros."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ else:
+ error_msg = f"Unexpected version {version} detected. Eros requires API v3."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ except ValueError:
+ error_msg = "Invalid JSON response from Eros API - This doesn't appear to be a valid Eros server"
+ eros_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return {"success": False, "message": error_msg}
+
+ except requests.exceptions.HTTPError:
+ # Handle specific HTTP errors
+ if response.status_code == 401:
+ error_msg = "Invalid API key - Authentication failed"
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Eros server. Check your URL."
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+ else:
+ error_msg = f"Eros server error (HTTP {response.status_code}): The Eros server is experiencing issues"
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+
+ except requests.exceptions.ConnectionError as e:
+ # Connection error - server might be down or unreachable
+ error_details = str(e)
+
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Eros is not running on {url} or the port is incorrect"
+ else:
+ error_msg = f"Connection error - Check if Eros is running: {error_details}"
+
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+
+ except requests.exceptions.Timeout:
+ error_msg = f"Connection timed out - Eros took too long to respond"
+ eros_logger.error(error_msg)
+ return {"success": False, "message": error_msg}
+
+ except Exception as e:
+ error_msg = f"Unexpected error: {str(e)}"
+ eros_logger.error(f"{error_msg}\n{traceback.format_exc()}")
+ return {"success": False, "message": error_msg}
+
+@eros_bp.route('/status', methods=['GET'])
+def get_status():
+ """Get the status of all configured Eros instances"""
+ try:
+ instances = get_configured_instances()
+ eros_logger.debug(f"Eros configured instances: {instances}")
+ if instances:
+ connected_count = 0
+ for instance in instances:
+ if test_connection(instance['url'], instance['api_key'])['success']:
+ connected_count += 1
+ return jsonify({
+ "configured": True,
+ "connected": connected_count > 0,
+ "connected_count": connected_count,
+ "total_configured": len(instances)
+ })
+ else:
+ eros_logger.debug("No Eros instances configured")
+ return jsonify({"configured": False, "connected": False})
+ except Exception as e:
+ eros_logger.error(f"Error getting Eros status: {str(e)}")
+ return jsonify({"configured": False, "connected": False, "error": str(e)})
+
+@eros_bp.route('/test-connection', methods=['POST'])
+def test_connection_endpoint():
+ """Test connection to an Eros API instance"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ eros_logger.info(f"Testing connection to Eros API at {api_url}")
+
+ return test_connection(api_url, api_key)
+
+@eros_bp.route('/test-settings', methods=['GET'])
+def test_eros_settings():
+ """Debug endpoint to test Eros settings loading"""
+ try:
+ # Directly read the settings file to bypass any potential caching
+ import json
+ import os
+
+ # Check all possible settings locations
+ possible_locations = [
+ "/config/eros.json", # Main Docker mount
+ "/app/config/eros.json", # Alternate location
+ os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "config", "eros.json") # Relative path
+ ]
+
+ results = {}
+
+ # Try all locations
+ for location in possible_locations:
+ results[location] = {"exists": os.path.exists(location)}
+ if os.path.exists(location):
+ try:
+ with open(location, 'r') as f:
+ results[location]["content"] = json.load(f)
+ except Exception as e:
+ results[location]["error"] = str(e)
+
+ # Also try loading via settings_manager
+ try:
+ from src.primary.settings_manager import load_settings
+ settings = load_settings("eros")
+ results["settings_manager"] = settings
+ except Exception as e:
+ results["settings_manager_error"] = str(e)
+
+ return jsonify(results)
+ except Exception as e:
+ return jsonify({"error": str(e)})
+
+@eros_bp.route('/reset-processed', methods=['POST'])
+def reset_processed_state():
+ """Reset the processed state files for Eros"""
+ try:
+ # Reset the state files for missing and upgrades
+ reset_state_file("eros", "processed_missing")
+ reset_state_file("eros", "processed_upgrades")
+
+ eros_logger.info("Successfully reset Eros processed state files")
+ return jsonify({"success": True, "message": "Successfully reset processed state"})
+ except Exception as e:
+ error_msg = f"Error resetting Eros state: {str(e)}"
+ eros_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr.py
new file mode 100644
index 0000000..aceb3e8
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr.py
@@ -0,0 +1,242 @@
+#!/usr/bin/env python3
+"""
+Lidarr Blueprint for Huntarr
+Defines Flask routes for interacting with Lidarr
+"""
+
+import json
+import traceback
+import requests
+from flask import Blueprint, jsonify, request
+from src.primary.utils.logger import get_logger
+from src.primary.apps.lidarr import api as lidarr_api
+from src.primary.state import reset_state_file, get_state_file_path
+from src.primary.settings_manager import load_settings
+import src.primary.config as config
+
+# Create a logger for this module
+lidarr_logger = get_logger("lidarr")
+
+# Create Blueprint for Lidarr routes
+lidarr_bp = Blueprint('lidarr', __name__)
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("lidarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("lidarr", "processed_upgrades")
+
+# Function to check if Lidarr is configured
+def is_configured():
+ """Check if Lidarr API credentials are configured by checking if at least one instance is enabled"""
+ settings = load_settings("lidarr")
+
+ if not settings:
+ lidarr_logger.debug("No settings found for Lidarr")
+ return False
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ lidarr_logger.debug(f"Found configured Lidarr instance: {instance.get('name', 'Unnamed')}")
+ return True
+
+ lidarr_logger.debug("No enabled Lidarr instances found with valid API URL and key")
+ return False
+
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ return bool(api_url and api_key)
+
+# Get all valid instances from settings
+def get_configured_instances():
+ """Get all configured and enabled Lidarr instances"""
+ settings = load_settings("lidarr")
+ instances = []
+
+ if not settings:
+ lidarr_logger.debug("No settings found for Lidarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ # Create a settings object for this instance by combining global settings with instance-specific ones
+ instance_settings = settings.copy()
+ # Remove instances list to avoid confusion
+ if "instances" in instance_settings:
+ del instance_settings["instances"]
+
+ # Override with instance-specific connection settings
+ instance_settings["api_url"] = instance.get("api_url")
+ instance_settings["api_key"] = instance.get("api_key")
+ instance_settings["instance_name"] = instance.get("name", "Default")
+
+ instances.append(instance_settings)
+ else:
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ if api_url and api_key:
+ settings["instance_name"] = "Default"
+ instances.append(settings)
+
+ lidarr_logger.info(f"Found {len(instances)} configured and enabled Lidarr instances")
+ return instances
+
+@lidarr_bp.route('/status', methods=['GET'])
+def status():
+ """Get Lidarr connection status and version."""
+ try:
+ # Get API settings from config
+ settings = config.get_app_settings("lidarr")
+
+ if not settings or not settings.get("api_url") or not settings.get("api_key"):
+ return jsonify({"connected": False, "message": "Lidarr is not configured"}), 200
+
+ api_url = settings["api_url"]
+ api_key = settings["api_key"]
+ api_timeout = settings.get("api_timeout", 30)
+
+ # Check connection and get system status
+ system_status = lidarr_api.get_system_status(api_url, api_key, api_timeout)
+
+ if system_status is not None:
+ version = system_status.get("version", "Unknown")
+ return jsonify({
+ "connected": True,
+ "version": version,
+ "message": f"Connected to Lidarr {version}"
+ }), 200
+ else:
+ return jsonify({
+ "connected": False,
+ "message": "Failed to connect to Lidarr"
+ }), 200
+
+ except Exception as e:
+ error_message = f"Error checking Lidarr status: {str(e)}"
+ lidarr_logger.error(error_message)
+ lidarr_logger.error(traceback.format_exc())
+ return jsonify({"connected": False, "message": error_message}), 500
+
+@lidarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to Lidarr with provided API settings."""
+ try:
+ # Extract API settings from request
+ data = request.json
+ api_url = data.get("api_url", "").rstrip('/')
+ api_key = data.get("api_key", "")
+ api_timeout = int(data.get("api_timeout", 30))
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Test connection to Lidarr
+ system_status = lidarr_api.get_system_status(api_url, api_key, api_timeout)
+
+ if system_status is not None:
+ version = system_status.get("version", "Unknown")
+ return jsonify({
+ "success": True,
+ "version": version,
+ "message": f"Successfully connected to Lidarr {version}"
+ }), 200
+ else:
+ return jsonify({
+ "success": False,
+ "message": "Failed to connect to Lidarr. Check URL and API Key."
+ }), 400
+
+ except requests.exceptions.RequestException as e:
+ error_message = f"Connection error: {str(e)}"
+ if hasattr(e, 'response'):
+ if e.response is not None:
+ error_message += f" - Status Code: {e.response.status_code}, Response: {e.response.text[:200]}"
+ lidarr_logger.error(f"Lidarr connection error: {error_message}")
+ return jsonify({"success": False, "message": error_message}), 500
+ except Exception as e: # Catch any other unexpected errors
+ lidarr_logger.error(f"An unexpected error occurred during Lidarr connection test: {str(e)}", exc_info=True)
+ return jsonify({"success": False, "message": f"An unexpected error occurred: {str(e)}"}), 500
+
+@lidarr_bp.route('/stats', methods=['GET'])
+def get_stats():
+ """Get statistics about Lidarr library."""
+ try:
+ # Get API settings from config
+ settings = config.get_app_settings("lidarr")
+
+ if not settings or not settings.get("api_url") or not settings.get("api_key"):
+ return jsonify({"error": "Lidarr is not configured"}), 400
+
+ api_url = settings["api_url"]
+ api_key = settings["api_key"]
+ api_timeout = settings.get("api_timeout", 30)
+ monitored_only = settings.get("monitored_only", True)
+
+ # Get all artists from Lidarr
+ all_artists = lidarr_api.get_artists(api_url, api_key, api_timeout)
+ if all_artists is None:
+ return jsonify({"error": "Failed to get artists from Lidarr"}), 500
+
+ # Count total artists and monitored artists
+ total_artists = len(all_artists)
+ monitored_artists = sum(1 for artist in all_artists if artist.get("monitored", False))
+
+ # Get missing albums
+ missing_albums = lidarr_api.get_missing_albums(api_url, api_key, api_timeout, monitored_only)
+ total_missing = len(missing_albums) if missing_albums is not None else 0
+
+ # Get cutoff unmet albums
+ cutoff_unmet = lidarr_api.get_cutoff_unmet_albums(api_url, api_key, api_timeout, monitored_only)
+ total_upgradable = len(cutoff_unmet) if cutoff_unmet is not None else 0
+
+ # Get download queue
+ queue_size = lidarr_api.get_download_queue_size(api_url, api_key, api_timeout)
+
+ # Return stats
+ return jsonify({
+ "total_artists": total_artists,
+ "monitored_artists": monitored_artists,
+ "missing_albums": total_missing,
+ "upgradable_albums": total_upgradable,
+ "queue_size": queue_size
+ }), 200
+
+ except Exception as e:
+ error_message = f"Error getting Lidarr stats: {str(e)}"
+ lidarr_logger.error(error_message)
+ lidarr_logger.error(traceback.format_exc())
+ return jsonify({"error": error_message}), 500
+
+@lidarr_bp.route('/reset-state', methods=['POST'])
+def reset_state():
+ """Reset the Lidarr state files to clear processed IDs."""
+ try:
+ # JSON object with flags for which states to reset
+ data = request.json or {}
+ reset_missing = data.get('reset_missing', True)
+ reset_upgrades = data.get('reset_upgrades', True)
+
+ # Reset missing state if requested
+ if reset_missing:
+ reset_state_file("lidarr", "processed_missing")
+ lidarr_logger.info("Reset Lidarr missing albums state")
+
+ # Reset upgrades state if requested
+ if reset_upgrades:
+ reset_state_file("lidarr", "processed_upgrades")
+ lidarr_logger.info("Reset Lidarr upgrades state")
+
+ return jsonify({
+ "success": True,
+ "message": "Lidarr state reset successfully"
+ }), 200
+
+ except Exception as e:
+ error_message = f"Error resetting Lidarr state: {str(e)}"
+ lidarr_logger.error(error_message)
+ lidarr_logger.error(traceback.format_exc())
+ return jsonify({"error": error_message}), 500
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py
new file mode 100644
index 0000000..d003f14
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/__init__.py
@@ -0,0 +1,91 @@
+"""
+Lidarr app module for Huntarr
+Contains functionality for missing albums and quality upgrades in Lidarr
+"""
+
+# Module exports
+from src.primary.apps.lidarr.missing import process_missing_albums
+from src.primary.apps.lidarr.upgrade import process_cutoff_upgrades
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+# Define logger for this module
+lidarr_logger = get_logger("lidarr")
+
+def get_configured_instances():
+ """Get all configured and enabled Lidarr instances"""
+ settings = load_settings("lidarr")
+ instances = []
+ # lidarr_logger.info(f"Loaded Lidarr settings for instance check: {settings}") # Removed verbose log
+
+ if not settings:
+ lidarr_logger.debug("No settings found for Lidarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ # lidarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log
+ for idx, instance in enumerate(settings["instances"]):
+ lidarr_logger.debug(f"Checking instance #{idx}: {instance}")
+ # Enhanced validation
+ api_url = instance.get("api_url", "").strip()
+ api_key = instance.get("api_key", "").strip()
+
+ # Enhanced URL validation - ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ lidarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ lidarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ is_enabled = instance.get("enabled", True)
+
+ # Only include properly configured instances
+ if is_enabled and api_url and api_key:
+ # Return only essential instance details
+ instance_data = {
+ "instance_name": instance.get("name", "Default"),
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ # lidarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log
+ elif not is_enabled:
+ lidarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}")
+ else:
+ # For brand new installations, don't spam logs with warnings about default instances
+ instance_name = instance.get('name', 'Unnamed')
+ if instance_name == 'Default':
+ # Use debug level for default instances to avoid log spam on new installations
+ lidarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # Still log warnings for non-default instances
+ lidarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # lidarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url", "").strip()
+ api_key = settings.get("api_key", "").strip()
+
+ # Ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ lidarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ lidarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ if api_url and api_key:
+ # Create a clean instance_data dict for the legacy instance
+ instance_data = {
+ "instance_name": "Default",
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ # lidarr_logger.info(f"Added valid legacy instance: {instance_data}") # Removed verbose log
+ else:
+ lidarr_logger.warning("No API URL or key found in legacy configuration")
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ lidarr_logger.debug(f"Found {len(instances)} configured and enabled Lidarr instances")
+ return instances
+
+__all__ = ["process_missing_albums", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py
new file mode 100644
index 0000000..5baae67
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/api.py
@@ -0,0 +1,419 @@
+#!/usr/bin/env python3
+"""
+Lidarr-specific API functions
+Handles all communication with the Lidarr API (v1)
+"""
+
+import requests
+import json
+import sys
+import time
+import datetime
+import traceback
+import logging
+from typing import List, Dict, Any, Optional, Union
+from src.primary.utils.logger import get_logger
+
+# Get logger for the Lidarr app
+lidarr_logger = get_logger("lidarr")
+
+# Use a session for better performance
+session = requests.Session()
+
+def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None, params: Dict = None) -> Any:
+ """
+ Make a request to the Lidarr API.
+
+ Args:
+ api_url: The base URL of the Lidarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data to send with the request
+ params: Optional query parameters
+
+ Returns:
+ The JSON response from the API, or None if the request failed
+ """
+ if not api_url or not api_key:
+ lidarr_logger.error("API URL or API key is missing. Check your settings.")
+ return None
+
+ # Ensure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ lidarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return None
+
+ # Make sure URL is properly formed
+ full_url = f"{api_url.rstrip('/')}/api/v1/{endpoint.lstrip('/')}"
+
+ # Set up headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ lidarr_logger.debug(f"Lidarr API Request: {method} {full_url} Params: {params} Data: {data}")
+
+ try:
+ response = session.request(
+ method=method.upper(),
+ url=full_url,
+ headers=headers,
+ json=data if method.upper() in ["POST", "PUT"] else None,
+ params=params if method.upper() == "GET" else None,
+ timeout=api_timeout
+ )
+
+ lidarr_logger.debug(f"Lidarr API Response Status: {response.status_code}")
+ # Log response body only in debug mode and if small enough
+ if lidarr_logger.level == logging.DEBUG and len(response.content) < 1000:
+ lidarr_logger.debug(f"Lidarr API Response Body: {response.text}")
+ elif lidarr_logger.level == logging.DEBUG:
+ lidarr_logger.debug(f"Lidarr API Response Body (truncated): {response.text[:500]}...")
+
+ # Check for successful response
+ response.raise_for_status()
+
+ # Parse response if there is content
+ if response.content and response.headers.get('Content-Type', '').startswith('application/json'):
+ return response.json()
+ elif response.status_code in [200, 201, 202]: # Success codes that might not return JSON
+ return True
+ else: # Should have been caught by raise_for_status, but as a fallback
+ lidarr_logger.warning(f"Request successful (status {response.status_code}) but no JSON content returned from {endpoint}")
+ return True # Indicate success even without content
+
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Error during {method} request to Lidarr endpoint '{endpoint}': {str(e)}"
+ if e.response is not None:
+ error_msg += f" | Status: {e.response.status_code} | Response: {e.response.text[:500]}"
+ lidarr_logger.error(error_msg)
+ return None
+ except json.JSONDecodeError:
+ lidarr_logger.error(f"Error decoding JSON response from Lidarr endpoint '{endpoint}'. Response: {response.text[:500]}")
+ return None
+
+ except Exception as e:
+ # Catch all exceptions and log them with traceback
+ error_msg = f"CRITICAL ERROR in Lidarr arr_request: {str(e)}"
+ lidarr_logger.error(error_msg)
+ lidarr_logger.error(f"Full traceback: {traceback.format_exc()}")
+ print(error_msg, file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ return None
+
+# --- Specific API Functions ---
+
+def get_system_status(api_url: str, api_key: str, api_timeout: int) -> Optional[Dict]:
+ """Get Lidarr system status."""
+ return arr_request(api_url, api_key, api_timeout, "system/status")
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """Check the connection to Lidarr API."""
+ try:
+ # Ensure api_url is properly formatted
+ if not api_url:
+ lidarr_logger.error("API URL is empty or not set")
+ return False
+
+ # Make sure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ lidarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return False
+
+ # Ensure URL doesn't end with a slash before adding the endpoint
+ base_url = api_url.rstrip('/')
+ full_url = f"{base_url}/api/v1/system/status"
+
+ response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
+ lidarr_logger.info("Successfully connected to Lidarr.")
+ return True
+ except requests.exceptions.RequestException as e:
+ lidarr_logger.error(f"Error connecting to Lidarr: {e}")
+ return False
+ except Exception as e:
+ lidarr_logger.error(f"An unexpected error occurred during Lidarr connection check: {e}")
+ return False
+
+def get_artists(api_url: str, api_key: str, api_timeout: int, artist_id: Optional[int] = None) -> Union[List, Dict, None]:
+ """Get artist information from Lidarr."""
+ endpoint = f"artist/{artist_id}" if artist_id else "artist"
+ return arr_request(api_url, api_key, api_timeout, endpoint)
+
+def get_albums(api_url: str, api_key: str, api_timeout: int, album_id: Optional[int] = None, artist_id: Optional[int] = None) -> Union[List, Dict, None]:
+ """Get album information from Lidarr."""
+ params = {}
+ if artist_id:
+ params['artistId'] = artist_id
+
+ if album_id:
+ endpoint = f"album/{album_id}"
+ else:
+ endpoint = "album"
+
+ return arr_request(api_url, api_key, api_timeout, endpoint, params=params if params else None)
+
+def get_tracks(api_url: str, api_key: str, api_timeout: int, album_id: Optional[int] = None) -> Union[List, None]:
+ """Get track information for a specific album."""
+ if not album_id:
+ lidarr_logger.warning("get_tracks requires an album_id.")
+ return None
+ params = {'albumId': album_id}
+ return arr_request(api_url, api_key, api_timeout, "track", params=params)
+
+def get_queue(api_url: str, api_key: str, api_timeout: int) -> List:
+ """Get the current queue from Lidarr (handles pagination)."""
+ # Lidarr v1 queue endpoint supports pagination, unlike Sonarr v3's simple list
+ all_records = []
+ page = 1
+ page_size = 1000 # Request large page size
+
+ while True:
+ params = {
+ "page": page,
+ "pageSize": page_size,
+ "sortKey": "timeleft", # Example sort key
+ "sortDir": "asc"
+ }
+ response = arr_request(api_url, api_key, api_timeout, "queue", params=params)
+
+ if response and isinstance(response, dict) and 'records' in response:
+ records = response.get('records', [])
+ if not records:
+ break # No more records
+ all_records.extend(records)
+
+ # Check if this was the last page
+ total_records = response.get('totalRecords', 0)
+ if len(all_records) >= total_records:
+ break
+
+ page += 1
+ else:
+ lidarr_logger.error(f"Failed to get queue page {page} or invalid response format.")
+ break # Return what we have so far
+
+ return all_records
+
+def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int:
+ """Get the current size of the Lidarr download queue."""
+ params = {"pageSize": 1} # Only need 1 record to get totalRecords
+ response = arr_request(api_url, api_key, api_timeout, "queue", params=params)
+
+ if response and isinstance(response, dict) and 'totalRecords' in response:
+ queue_size = response.get('totalRecords', 0)
+ lidarr_logger.debug(f"Lidarr download queue size: {queue_size}")
+ return queue_size
+ else:
+ lidarr_logger.error("Error getting Lidarr download queue size.")
+ return -1 # Indicate error
+
+def get_missing_albums(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """Get missing albums from Lidarr, handling pagination."""
+ endpoint = "wanted/missing"
+ page = 1
+ page_size = 1000
+ all_missing_albums = []
+ total_records_reported = -1
+
+ lidarr_logger.debug(f"Starting fetch for missing albums (monitored_only={monitored_only}).")
+
+ while True:
+ params = {
+ "page": page,
+ "pageSize": page_size,
+ "includeArtist": "true" # Include artist info for filtering
+ # Removed sortKey and sortDir
+ }
+
+ lidarr_logger.debug(f"Requesting missing albums page {page} with params: {params}")
+ response = arr_request(api_url, api_key, api_timeout, endpoint, params=params)
+
+ if response and isinstance(response, dict) and 'records' in response:
+ records = response.get('records', [])
+ total_records_on_page = len(records)
+
+ if page == 1:
+ total_records_reported = response.get('totalRecords', 0)
+ lidarr_logger.debug(f"Lidarr API reports {total_records_reported} total missing albums.")
+
+ lidarr_logger.debug(f"Parsed {total_records_on_page} missing album records from Lidarr API JSON (page {page}).")
+
+ if not records:
+ lidarr_logger.debug(f"No more missing records found on page {page}. Stopping pagination.")
+ break
+
+ all_missing_albums.extend(records)
+
+ if total_records_reported >= 0 and len(all_missing_albums) >= total_records_reported:
+ lidarr_logger.debug(f"Fetched {len(all_missing_albums)} records, matching or exceeding total reported ({total_records_reported}). Assuming last page.")
+ break
+
+ if total_records_on_page < page_size:
+ lidarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Assuming last page.")
+ break
+
+ page += 1
+ # time.sleep(0.1) # Optional delay
+
+ else:
+ lidarr_logger.error(f"Failed to get missing albums page {page} or invalid response format.")
+ break # Return what we have so far
+
+ lidarr_logger.info(f"Total missing albums fetched across all pages: {len(all_missing_albums)}")
+
+ # Apply monitored filter after fetching
+ if monitored_only:
+ original_count = len(all_missing_albums)
+ # Check both album and artist monitored status
+ filtered_missing = [
+ album for album in all_missing_albums
+ if album.get('monitored', False) and album.get('artist', {}).get('monitored', False)
+ ]
+ lidarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_missing)} monitored missing albums remain (out of {original_count} total).")
+ return filtered_missing
+ else:
+ lidarr_logger.debug(f"Returning {len(all_missing_albums)} missing albums (monitored_only=False).")
+ return all_missing_albums
+
+def get_cutoff_unmet_albums(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """Get cutoff unmet albums from Lidarr, handling pagination."""
+ # Note: Lidarr API returns ALBUMS for cutoff unmet, not tracks.
+ endpoint = "wanted/cutoff"
+ page = 1
+ page_size = 1000 # Adjust page size if needed, Lidarr default might be smaller
+ all_cutoff_unmet = []
+ total_records_reported = -1
+
+ lidarr_logger.debug(f"Starting fetch for cutoff unmet albums (monitored_only={monitored_only}).")
+
+ while True:
+ params = {
+ "page": page,
+ "pageSize": page_size,
+ "includeArtist": "true" # Include artist info for filtering
+ # Removed sortKey and sortDir
+ }
+
+ lidarr_logger.debug(f"Requesting cutoff unmet albums page {page} with params: {params}")
+ response = arr_request(api_url, api_key, api_timeout, endpoint, params=params)
+
+ if response and isinstance(response, dict) and 'records' in response:
+ records = response.get('records', [])
+ total_records_on_page = len(records)
+
+ if page == 1:
+ total_records_reported = response.get('totalRecords', 0)
+ lidarr_logger.debug(f"Lidarr API reports {total_records_reported} total cutoff unmet albums.")
+
+ lidarr_logger.debug(f"Parsed {total_records_on_page} cutoff unmet album records from Lidarr API JSON (page {page}).")
+
+ if not records:
+ lidarr_logger.debug(f"No more cutoff unmet records found on page {page}. Stopping pagination.")
+ break
+
+ all_cutoff_unmet.extend(records)
+
+ # Check if we have fetched all reported records
+ if total_records_reported >= 0 and len(all_cutoff_unmet) >= total_records_reported:
+ lidarr_logger.debug(f"Fetched {len(all_cutoff_unmet)} records, matching or exceeding total reported ({total_records_reported}). Assuming last page.")
+ break
+
+ # Check if the number of records received is less than the page size
+ if total_records_on_page < page_size:
+ lidarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Assuming last page.")
+ break
+
+ page += 1
+ # time.sleep(0.1) # Optional small delay between pages
+
+ else:
+ # Log the error based on the response received (handled in arr_request)
+ lidarr_logger.error(f"Error getting cutoff unmet albums from Lidarr (page {page}) or invalid response format. Stopping pagination.")
+ # Return what we have so far, or indicate complete failure? Let's return what we have.
+ break
+
+ lidarr_logger.info(f"Total cutoff unmet albums fetched across all pages: {len(all_cutoff_unmet)}")
+
+ # Apply monitored filter after fetching all pages
+ if monitored_only:
+ original_count = len(all_cutoff_unmet)
+ # Check both album and artist monitored status
+ filtered_cutoff_unmet = [
+ album for album in all_cutoff_unmet
+ if album.get('monitored', False) and album.get('artist', {}).get('monitored', False)
+ ]
+ lidarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_cutoff_unmet)} monitored cutoff unmet albums remain (out of {original_count} total).")
+ return filtered_cutoff_unmet
+ else:
+ lidarr_logger.debug(f"Returning {len(all_cutoff_unmet)} cutoff unmet albums (monitored_only=False).")
+ return all_cutoff_unmet
+
+def search_albums(api_url: str, api_key: str, api_timeout: int, album_ids: List[int]) -> Optional[Dict]:
+ """Trigger a search for specific albums in Lidarr."""
+ if not album_ids:
+ lidarr_logger.warning("No album IDs provided for search.")
+ return None
+
+ payload = {
+ "name": "AlbumSearch",
+ "albumIds": album_ids
+ }
+ response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload)
+
+ if response and isinstance(response, dict) and 'id' in response:
+ command_id = response.get('id')
+ lidarr_logger.info(f"Triggered Lidarr AlbumSearch for album IDs: {album_ids}. Command ID: {command_id}")
+ return response # Return the full command object including ID
+ else:
+ lidarr_logger.error(f"Failed to trigger Lidarr AlbumSearch for album IDs {album_ids}. Response: {response}")
+ return None
+
+def search_artist(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict]:
+ """Trigger a search for a specific artist in Lidarr."""
+ payload = {
+ "name": "ArtistSearch",
+ "artistIds": [artist_id]
+ }
+ response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload)
+
+ if response and isinstance(response, dict) and 'id' in response:
+ command_id = response.get('id')
+ lidarr_logger.info(f"Triggered Lidarr ArtistSearch for artist ID: {artist_id}. Command ID: {command_id}")
+ return response # Return the full command object
+ else:
+ lidarr_logger.error(f"Failed to trigger Lidarr ArtistSearch for artist ID {artist_id}. Response: {response}")
+ return None
+
+def refresh_artist(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict]:
+ """Trigger a refresh for a specific artist in Lidarr."""
+ payload = {
+ "name": "RefreshArtist",
+ "artistId": artist_id
+ }
+ response = arr_request(api_url, api_key, api_timeout, "command", method="POST", data=payload)
+
+ if response and isinstance(response, dict) and 'id' in response:
+ command_id = response.get('id')
+ lidarr_logger.info(f"Triggered Lidarr RefreshArtist for artist ID: {artist_id}. Command ID: {command_id}")
+ return response # Return the full command object
+ else:
+ lidarr_logger.error(f"Failed to trigger Lidarr RefreshArtist for artist ID {artist_id}. Response: {response}")
+ return None
+
+def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict[str, Any]]:
+ """Get the status of a Lidarr command."""
+ response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}")
+ if response and isinstance(response, dict):
+ lidarr_logger.debug(f"Checked Lidarr command status for ID {command_id}: {response.get('status')}")
+ return response
+ else:
+ lidarr_logger.error(f"Error getting Lidarr command status for ID {command_id}. Response: {response}")
+ return None
+
+def get_artist_by_id(api_url: str, api_key: str, api_timeout: int, artist_id: int) -> Optional[Dict[str, Any]]:
+ """Get artist details by ID from Lidarr."""
+ return arr_request(api_url, api_key, api_timeout, f"artist/{artist_id}")
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py
new file mode 100644
index 0000000..3b0af46
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/missing.py
@@ -0,0 +1,355 @@
+#!/usr/bin/env python3
+"""
+Lidarr missing content processing module for Huntarr
+Handles missing albums or artists based on configuration.
+"""
+
+import time
+import random
+import datetime
+import os
+import json
+from typing import Dict, Any, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.lidarr import api as lidarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.state import get_state_file_path, check_state_reset
+import json
+import os
+
+# Get the logger for the Lidarr module
+lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy
+
+
+def process_missing_albums(
+ app_settings: Dict[str, Any], # Combined settings dictionary
+ stop_check: Callable[[], bool] = None # Function to check for stop signal
+) -> bool:
+ """
+ Processes missing albums for a specific Lidarr instance based on settings.
+
+ Args:
+ app_settings (dict): Dictionary containing combined instance and general settings.
+ stop_check (Callable[[], bool]): Function to check if shutdown is requested.
+
+ Returns:
+ bool: True if any items were processed, False otherwise.
+ """
+
+ # Copy instance-specific information
+ instance_name = app_settings.get("instance_name", "Default")
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_future_releases = app_settings.get("skip_future_releases", False)
+ hunt_missing_items = app_settings.get("hunt_missing_items", 0)
+ hunt_missing_mode = app_settings.get("hunt_missing_mode", "album")
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # Early exit for disabled features
+ if not api_url or not api_key:
+ lidarr_logger.warning(f"Missing API URL or API key, skipping missing processing for {instance_name}")
+ return False
+
+ if hunt_missing_items <= 0:
+ lidarr_logger.debug(f"Hunting for missing items is disabled (hunt_missing_items={hunt_missing_items}) for {instance_name}")
+ return False
+
+ # Make sure any requested stop function is executable
+ stop_check = stop_check if callable(stop_check) else lambda: False
+
+ lidarr_logger.info(f"Looking for missing albums for {instance_name}")
+ lidarr_logger.debug(f"Processing up to {hunt_missing_items} missing items in {hunt_missing_mode} mode")
+
+ # Reset state files if enough time has passed
+ check_state_reset("lidarr")
+
+ # Initialize processed counter and tracking containers
+ processed_count = 0
+ processed_any = False
+ processed_artists_or_albums = set()
+ total_items_to_process = hunt_missing_items
+
+ try:
+ # Fetch all missing albums first
+ lidarr_logger.info(f"Fetching all missing albums for {instance_name}...")
+ missing_items = lidarr_api.get_missing_albums(
+ api_url,
+ api_key,
+ monitored_only=monitored_only,
+ api_timeout=api_timeout
+ )
+
+ if missing_items is None: # API call failed or returned None
+ lidarr_logger.error(f"Failed to get missing items from Lidarr API for {instance_name}.")
+ return False
+
+ if not missing_items:
+ lidarr_logger.info(f"No missing albums found for {instance_name} after initial fetch and filtering.")
+ return False
+
+ lidarr_logger.info(f"Found {len(missing_items)} potentially missing albums for {instance_name} after initial fetch.")
+
+ # --- Filter Future Releases --- #
+ original_count = len(missing_items)
+ if skip_future_releases:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ valid_missing_items = []
+ skipped_count = 0
+ for item in missing_items:
+ release_date_str = item.get('releaseDate')
+ if release_date_str:
+ try:
+ # Lidarr dates often include 'Z' for UTC
+ release_date = datetime.datetime.fromisoformat(release_date_str.replace('Z', '+00:00'))
+ if release_date <= now:
+ valid_missing_items.append(item)
+ else:
+ # lidarr_logger.debug(f"Skipping future album ID {item.get('id')} ('{item.get('title')}') release: {release_date_str}")
+ skipped_count += 1
+ except ValueError as e:
+ lidarr_logger.warning(f"Could not parse release date '{release_date_str}' for album ID {item.get('id')}. Error: {e}. Including it.")
+ valid_missing_items.append(item) # Keep if date is invalid
+ else:
+ valid_missing_items.append(item) # Keep if no release date
+
+ missing_items = valid_missing_items # Replace with filtered list
+ if skipped_count > 0:
+ lidarr_logger.info(f"Skipped {skipped_count} future albums based on release date. {len(missing_items)} remaining.")
+ else:
+ lidarr_logger.debug("Skipping future release filtering as 'skip_future_releases' is False.")
+
+ # Check if any items remain after filtering
+ if not missing_items:
+ lidarr_logger.info(f"No missing albums left after filtering future releases for {instance_name}.")
+ return False
+
+ # Process based on mode
+ lidarr_logger.info(f"Processing missing items in '{hunt_missing_mode}' mode.")
+
+ target_entities = []
+ search_entity_type = "album" # Default to album
+
+ if hunt_missing_mode == "artist":
+ search_entity_type = "artist"
+ # Group by artist ID
+ items_by_artist = {}
+ for item in missing_items: # Use the potentially filtered missing_items list
+ artist_id = item.get('artistId')
+ lidarr_logger.debug(f"Missing album item: {item.get('title')} by artistId: {artist_id}")
+ if artist_id:
+ if artist_id not in items_by_artist:
+ items_by_artist[artist_id] = []
+ items_by_artist[artist_id].append(item)
+
+ # In artist mode, map from artists to their albums
+ # First, get all artist IDs
+ target_entities = list(items_by_artist.keys())
+
+ # Filter out already processed artists
+ lidarr_logger.info(f"Found {len(target_entities)} artists with missing albums before filtering")
+ unprocessed_entities = [eid for eid in target_entities
+ if not is_processed("lidarr", instance_name, str(eid))]
+
+ lidarr_logger.info(f"Found {len(unprocessed_entities)} unprocessed artists out of {len(target_entities)} total")
+ else:
+ # In album mode, directly track album IDs
+ target_entities = [item['id'] for item in missing_items]
+
+ # Filter out processed albums
+ lidarr_logger.info(f"Found {len(target_entities)} missing albums before filtering")
+ unprocessed_entities = [eid for eid in target_entities
+ if not is_processed("lidarr", instance_name, str(eid))]
+
+ lidarr_logger.info(f"Found {len(unprocessed_entities)} unprocessed albums out of {len(target_entities)} total")
+
+ if not unprocessed_entities:
+ lidarr_logger.info(f"No unprocessed {search_entity_type}s found for {instance_name}. All available {search_entity_type}s have been processed.")
+ return False
+
+ # Select entities to search
+ if not unprocessed_entities:
+ lidarr_logger.info(f"No {search_entity_type}s found to process after grouping/filtering.")
+ return False
+
+ entities_to_search_ids = random.sample(unprocessed_entities, min(len(unprocessed_entities), total_items_to_process))
+ lidarr_logger.info(f"Randomly selected {len(entities_to_search_ids)} {search_entity_type}s to search.")
+ lidarr_logger.debug(f"Unprocessed entities: {unprocessed_entities}")
+ lidarr_logger.debug(f"Entities to search: {entities_to_search_ids}")
+
+ # --- Trigger Search (Artist or Album) ---
+ if hunt_missing_mode == "artist":
+ lidarr_logger.info(f"Artist-based missing mode selected")
+ lidarr_logger.info(f"Found {len(entities_to_search_ids)} unprocessed artists to search.")
+
+ # Prepare a list for artist details log
+ artist_details_log = []
+
+ # First, fetch detailed artist info for each artist ID to enhance logs
+ artist_details = {}
+ for artist_id in entities_to_search_ids:
+ # Get artist details from API for better logging
+ artist_data = lidarr_api.get_artist_by_id(api_url, api_key, api_timeout, artist_id)
+ if artist_data:
+ artist_details[artist_id] = artist_data
+
+ lidarr_logger.info(f"Artists selected for processing in this cycle:")
+ for i, artist_id in enumerate(entities_to_search_ids):
+ # Get artist name and any additional details
+ artist_name = f"Artist ID {artist_id}" # Default if name not found
+ artist_metadata = ""
+
+ if artist_id in artist_details:
+ artist_data = artist_details[artist_id]
+ artist_name = artist_data.get('artistName', artist_name)
+ # Add year active or debut year if available
+ if 'statistics' in artist_data and 'albumCount' in artist_data['statistics']:
+ album_count = artist_data['statistics']['albumCount']
+ artist_metadata = f"({album_count} albums)"
+ # Get genre info if available
+ if 'genres' in artist_data and artist_data['genres']:
+ genres = ", ".join(artist_data['genres'][:2]) # Limit to first 2 genres
+ if artist_metadata:
+ artist_metadata = f"{artist_metadata} - {genres}"
+ else:
+ artist_metadata = f"({genres})"
+
+ detail_line = f"{i+1}. {artist_name} {artist_metadata} - ID: {artist_id}"
+ artist_details_log.append(detail_line)
+ lidarr_logger.info(f" {detail_line}")
+
+ lidarr_logger.info(f"Triggering Artist Search for {len(entities_to_search_ids)} artists on {instance_name}...")
+ for i, artist_id in enumerate(entities_to_search_ids):
+ if stop_check(): # Use the new stop_check function
+ lidarr_logger.warning("Shutdown requested during artist search trigger.")
+ break
+
+ # Get artist name from cached details or first album
+ artist_name = f"Artist ID {artist_id}" # Default if name not found
+ if artist_id in artist_details:
+ artist_data = artist_details[artist_id]
+ artist_name = artist_data.get('artistName', artist_name)
+ elif artist_id in items_by_artist and items_by_artist[artist_id]:
+ # Fallback to album info if direct artist details not available
+ first_album = items_by_artist[artist_id][0]
+ artist_info = first_album.get('artist')
+ if artist_info and isinstance(artist_info, dict):
+ artist_name = artist_info.get('artistName', artist_name)
+
+ # Mark the artist as processed right away - BEFORE triggering the search
+ success = add_processed_id("lidarr", instance_name, str(artist_id))
+ lidarr_logger.debug(f"Added artist ID {artist_id} to processed list for {instance_name}, success: {success}")
+
+ # Trigger the search AFTER marking as processed
+ command_result = lidarr_api.search_artist(api_url, api_key, api_timeout, artist_id)
+ command_id = command_result.get('id', 'unknown') if command_result else 'failed'
+ lidarr_logger.info(f"Triggered Lidarr ArtistSearch for artist ID: {artist_id}, Command ID: {command_id}")
+
+ # Increment stats for UI tracking
+ if command_result:
+ increment_stat("lidarr", "hunted")
+ processed_count += 1 # Count successful searches
+ processed_artists_or_albums.add(artist_id)
+
+ # Also mark all albums from this artist as processed
+ if artist_id in items_by_artist:
+ for album in items_by_artist[artist_id]:
+ album_id = album.get('id')
+ if album_id:
+ album_success = add_processed_id("lidarr", instance_name, str(album_id))
+ lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {album_success}")
+
+ # Log to history system
+ log_processed_media("lidarr", f"{artist_name}", artist_id, instance_name, "missing")
+ lidarr_logger.debug(f"Logged history entry for artist: {artist_name}")
+
+ time.sleep(0.1) # Small delay between triggers
+ else: # Album mode
+ album_ids_to_search = list(entities_to_search_ids)
+ if stop_check(): # Use the new stop_check function
+ lidarr_logger.warning("Shutdown requested before album search trigger.")
+ return False
+
+ # Prepare descriptive list for logging
+ album_details_log = []
+ # Create a dict for quick lookup based on album ID
+ missing_items_dict = {item['id']: item for item in missing_items if 'id' in item}
+
+ # First, fetch additional album details for better logging if needed
+ album_details = {}
+ for album_id in album_ids_to_search:
+ album_details[album_id] = lidarr_api.get_albums(api_url, api_key, api_timeout, album_id)
+
+ lidarr_logger.info(f"Albums selected for processing in this cycle:")
+ for idx, album_id in enumerate(album_ids_to_search):
+ album_info = missing_items_dict.get(album_id)
+ if album_info:
+ # Safely get title and artist name, provide defaults
+ title = album_info.get('title', f'Album ID {album_id}')
+ artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist')
+
+ # Get additional metadata if available
+ release_year = ""
+ if 'releaseDate' in album_info and album_info['releaseDate']:
+ try:
+ release_date = album_info['releaseDate'].split('T')[0]
+ release_year = f"({release_date[:4]})"
+ except (IndexError, ValueError):
+ pass
+
+ # Get quality if available
+ quality_info = ""
+ if album_details.get(album_id) and 'quality' in album_details[album_id]:
+ quality = album_details[album_id]['quality'].get('quality', {}).get('name', '')
+ if quality:
+ quality_info = f"[{quality}]"
+
+ detail_line = f"{idx+1}. {artist_name} - {title} {release_year} {quality_info} - ID: {album_id}"
+ album_details_log.append(detail_line)
+ lidarr_logger.info(f" {detail_line}")
+ else:
+ # Fallback if album ID wasn't found in the fetched missing items (should be rare)
+ detail_line = f"{idx+1}. Album ID {album_id} (Details not found)"
+ album_details_log.append(detail_line)
+ lidarr_logger.info(f" {detail_line}")
+
+ # Mark the albums as processed BEFORE triggering the search
+ for album_id in album_ids_to_search:
+ success = add_processed_id("lidarr", instance_name, str(album_id))
+ lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {success}")
+
+ # Now trigger the search
+ command_id = lidarr_api.search_albums(api_url, api_key, api_timeout, album_ids_to_search)
+ if command_id:
+ # Log after successful search
+ lidarr_logger.debug(f"Album search command triggered with ID: {command_id} for albums: [{', '.join(album_details_log)}]")
+ increment_stat("lidarr", "hunted") # Changed from "missing" to "hunted"
+ processed_count += len(album_ids_to_search) # Count albums searched
+ processed_artists_or_albums.update(album_ids_to_search)
+
+ # Log to history system
+ for album_id in album_ids_to_search:
+ album_info = missing_items_dict.get(album_id)
+ if album_info:
+ # Get title and artist name for the history entry
+ title = album_info.get('title', f'Album ID {album_id}')
+ artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist')
+ media_name = f"{artist_name} - {title}"
+ log_processed_media("lidarr", media_name, album_id, instance_name, "missing")
+ lidarr_logger.debug(f"Logged history entry for album: {media_name}")
+
+ time.sleep(command_wait_delay) # Basic delay after the single command
+ else:
+ lidarr_logger.warning(f"Failed to trigger album search for IDs {album_ids_to_search} on {instance_name}.")
+
+ except Exception as e:
+ lidarr_logger.error(f"An error occurred during missing album processing for {instance_name}: {e}", exc_info=True)
+ return False
+
+ lidarr_logger.info(f"Missing album processing finished for {instance_name}. Processed {processed_count} items/searches ({len(processed_artists_or_albums)} unique {search_entity_type}s).")
+ return processed_count > 0
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py
new file mode 100644
index 0000000..a85fd19
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr/upgrade.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+"""
+Lidarr cutoff upgrade processing module for Huntarr
+Handles albums that do not meet the configured quality cutoff.
+"""
+
+import time
+import random
+from typing import Dict, Any, Optional, Callable, List, Union, Set # Added List, Union and Set
+from src.primary.utils.logger import get_logger
+from src.primary.apps.lidarr import api as lidarr_api
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.stats_manager import increment_stat
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.state import check_state_reset # Add the missing import
+
+# Get logger for the app
+lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy
+
+def process_cutoff_upgrades(
+ app_settings: Dict[str, Any], # Changed signature: Use app_settings
+ stop_check: Callable[[], bool] # Changed signature: Use stop_check
+) -> bool:
+ """
+ Processes cutoff upgrades for albums in a specific Lidarr instance.
+
+ Args:
+ app_settings (dict): Dictionary containing combined instance and general Lidarr settings.
+ stop_check (Callable[[], bool]): Function to check if shutdown is requested.
+
+ Returns:
+ bool: True if any items were processed, False otherwise.
+ """
+ lidarr_logger.info("Starting quality cutoff upgrades processing cycle for Lidarr.")
+ processed_any = False
+
+ # --- Extract Settings --- #
+ # Instance details are now part of app_settings passed from background loop
+ instance_name = app_settings.get("instance_name", "Lidarr Default")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+
+ # Get command wait settings from general.json
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # General Lidarr settings (also from app_settings)
+ hunt_upgrade_items = app_settings.get("hunt_upgrade_items", 0)
+ monitored_only = app_settings.get("monitored_only", True)
+
+ lidarr_logger.info(f"Using API timeout of {api_timeout} seconds for Lidarr upgrades")
+
+ lidarr_logger.debug(f"Processing upgrades for instance: {instance_name}")
+ # lidarr_logger.debug(f"Instance Config (extracted): {{ 'api_url': '{api_url}', 'api_key': '***' }}")
+ # lidarr_logger.debug(f"General Settings (from app_settings): {app_settings}") # Avoid logging full settings potentially containing sensitive info
+
+ # Check if API URL or Key are missing
+ if not api_url or not api_key:
+ lidarr_logger.error(f"Missing API URL or Key for instance '{instance_name}'. Cannot process upgrades.")
+ return False
+
+ # Check if upgrade hunting is enabled
+ if hunt_upgrade_items <= 0:
+ lidarr_logger.info(f"'hunt_upgrade_items' is {hunt_upgrade_items} or less. Skipping upgrade processing for {instance_name}.")
+ return False
+
+ lidarr_logger.info(f"Looking for quality upgrades for {instance_name}")
+ lidarr_logger.debug(f"Processing up to {hunt_upgrade_items} items for quality upgrade")
+
+ # Reset state files if enough time has passed
+ check_state_reset("lidarr")
+
+ processed_count = 0
+ processed_any = False
+
+ try:
+ lidarr_logger.info(f"Fetching cutoff unmet albums for {instance_name}...")
+ # Pass necessary details extracted above to the API function
+ # Corrected function name from get_cutoff_unmet to get_cutoff_unmet_albums
+ cutoff_unmet_albums = lidarr_api.get_cutoff_unmet_albums(
+ api_url,
+ api_key,
+ monitored_only=monitored_only,
+ api_timeout=api_timeout
+ )
+
+ if not cutoff_unmet_albums:
+ lidarr_logger.info(f"No cutoff unmet albums found for {instance_name}.")
+ return False
+
+ lidarr_logger.info(f"Found {len(cutoff_unmet_albums)} cutoff unmet albums for {instance_name}.")
+
+ # Filter out already processed items
+ unprocessed_albums = []
+ for album in cutoff_unmet_albums:
+ album_id = str(album.get('id'))
+ if not is_processed("lidarr", instance_name, album_id):
+ unprocessed_albums.append(album)
+ else:
+ lidarr_logger.debug(f"Skipping already processed album ID: {album_id}")
+
+ lidarr_logger.info(f"Found {len(unprocessed_albums)} unprocessed albums out of {len(cutoff_unmet_albums)} total albums eligible for quality upgrade.")
+
+ if not unprocessed_albums:
+ lidarr_logger.info("No unprocessed albums found for quality upgrade. Skipping cycle.")
+ return False
+
+ # Always select albums randomly
+ albums_to_search = random.sample(unprocessed_albums, min(len(unprocessed_albums), hunt_upgrade_items))
+ lidarr_logger.info(f"Randomly selected {len(albums_to_search)} albums for upgrade search.")
+
+ album_ids_to_search = [album['id'] for album in albums_to_search]
+
+ if not album_ids_to_search:
+ lidarr_logger.info("No album IDs selected for upgrade search. Skipping trigger.")
+ return False
+
+ # Prepare detailed album information for logging
+ album_details_log = []
+ for i, album in enumerate(albums_to_search):
+ # Extract useful information for logging
+ album_title = album.get('title', f'Album ID {album["id"]}')
+ artist_name = album.get('artist', {}).get('artistName', 'Unknown Artist')
+ quality = album.get('quality', {}).get('quality', {}).get('name', 'Unknown Quality')
+ album_details_log.append(f"{i+1}. {artist_name} - {album_title} (ID: {album['id']}, Current Quality: {quality})")
+
+ # Log each album on a separate line for better readability
+ if album_details_log:
+ lidarr_logger.info(f"Albums selected for quality upgrade in this cycle:")
+ for album_detail in album_details_log:
+ lidarr_logger.info(f" {album_detail}")
+
+ # Check stop event before triggering search
+ if stop_check and stop_check(): # Use the passed stop_check function
+ lidarr_logger.warning("Shutdown requested, stopping upgrade album search.")
+ return False # Return False as no search was triggered in this case
+
+ # Mark albums as processed BEFORE triggering search
+ for album_id in album_ids_to_search:
+ add_processed_id("lidarr", instance_name, str(album_id))
+ lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}")
+
+ lidarr_logger.info(f"Triggering Album Search for {len(album_ids_to_search)} albums for upgrade on instance {instance_name}: {album_ids_to_search}")
+ # Pass necessary details extracted above to the API function
+ command_id = lidarr_api.search_albums(
+ api_url,
+ api_key,
+ api_timeout,
+ album_ids_to_search
+ )
+ if command_id:
+ lidarr_logger.debug(f"Upgrade album search command triggered with ID: {command_id} for albums: {album_ids_to_search}")
+ increment_stat("lidarr", "upgraded") # Use appropriate stat key
+
+ # Log to history
+ for album_id in album_ids_to_search:
+ # Find the album info for this ID to log to history
+ for album in albums_to_search:
+ if album['id'] == album_id:
+ album_title = album.get('title', f'Album ID {album_id}')
+ artist_name = album.get('artist', {}).get('artistName', 'Unknown Artist')
+ media_name = f"{artist_name} - {album_title}"
+ log_processed_media("lidarr", media_name, album_id, instance_name, "upgrade")
+ lidarr_logger.debug(f"Logged quality upgrade to history for album ID {album_id}")
+ break
+
+ time.sleep(command_wait_delay) # Basic delay
+ processed_count += len(album_ids_to_search)
+ processed_any = True # Mark that we processed something
+ # Consider adding wait_for_command logic if needed
+ # wait_for_command(api_url, api_key, command_id, command_wait_delay, command_wait_attempts)
+ else:
+ lidarr_logger.warning(f"Failed to trigger upgrade album search for IDs {album_ids_to_search} on {instance_name}.")
+
+ except Exception as e:
+ lidarr_logger.error(f"An error occurred during upgrade album processing for {instance_name}: {e}", exc_info=True)
+ return False # Indicate failure
+
+ lidarr_logger.info(f"Upgrade album processing finished for {instance_name}. Triggered searches for {processed_count} items.")
+ return processed_any # Return True if anything was processed
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py
new file mode 100644
index 0000000..bd78fda
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/lidarr_routes.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger
+import traceback
+import socket
+from urllib.parse import urlparse
+
+lidarr_bp = Blueprint('lidarr', __name__)
+lidarr_logger = get_logger("lidarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("lidarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("lidarr", "processed_upgrades")
+
+@lidarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Lidarr API instance"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ lidarr_logger.info(f"Testing connection to Lidarr API at {api_url}")
+
+ # Validate URL format
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try to establish a socket connection first to check basic connectivity
+ parsed_url = urlparse(api_url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ try:
+ # Try socket connection for quick feedback on connectivity issues
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ lidarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # For Lidarr, use api/v1
+ url = f"{api_url.rstrip('/')}/api/v1/system/status"
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = requests.get(url, headers=headers, timeout=(10, api_timeout))
+
+ # For HTTP errors, provide more specific feedback
+ if response.status_code == 401:
+ error_msg = "Authentication failed: Invalid API key"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 401
+ elif response.status_code == 403:
+ error_msg = "Access forbidden: Check API key permissions"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 403
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Lidarr server. Check your URL."
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ elif response.status_code >= 500:
+ error_msg = f"Lidarr server error (HTTP {response.status_code}): The Lidarr server is experiencing issues"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+
+ # Raise for other HTTP errors
+ response.raise_for_status()
+
+ try:
+ response_data = response.json()
+ version = response_data.get('version', 'unknown')
+ lidarr_logger.info(f"Successfully connected to Lidarr API version: {version}")
+
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Lidarr API",
+ "version": version
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Lidarr API - This doesn't appear to be a valid Lidarr server"
+ lidarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.ConnectionError as e:
+ # Handle different types of connection errors
+ error_details = str(e)
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Lidarr is not running on {api_url} or the port is incorrect"
+ elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
+ error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ else:
+ error_msg = f"Connection error - Check if Lidarr is running: {error_details}"
+
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except requests.exceptions.Timeout:
+ error_msg = f"Connection timed out - Lidarr took too long to respond"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 504
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Connection test failed: {str(e)}"
+ lidarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr.py b/Huntarr.io-6.3.6/src/primary/apps/radarr.py
new file mode 100644
index 0000000..14502ed
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr.py
@@ -0,0 +1,147 @@
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.utils.logger import get_logger
+from src.primary.state import get_state_file_path
+from src.primary.settings_manager import load_settings
+
+radarr_bp = Blueprint('radarr', __name__)
+radarr_logger = get_logger("radarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("radarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("radarr", "processed_upgrades")
+
+@radarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Radarr API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ radarr_logger.info(f"Testing connection to Radarr API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # For Radarr, use api/v3
+ api_base = "api/v3"
+ test_url = f"{api_url.rstrip('/')}/{api_base}/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ # Use a connection timeout separate from read timeout
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # Log HTTP status code for diagnostic purposes
+ radarr_logger.debug(f"Radarr API status code: {response.status_code}")
+
+ # Check HTTP status code
+ response.raise_for_status()
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+
+ # We no longer save keys here since we use instances
+ # keys_manager.save_api_keys("radarr", api_url, api_key)
+
+ radarr_logger.info(f"Successfully connected to Radarr API version: {response_data.get('version', 'unknown')}")
+
+ # Return success with some useful information
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Radarr API",
+ "version": response_data.get('version', 'unknown')
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Radarr API"
+ radarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.Timeout as e:
+ error_msg = f"Connection timed out after {api_timeout} seconds"
+ radarr_logger.error(f"{error_msg}: {str(e)}")
+ return jsonify({"success": False, "message": error_msg}), 504
+
+ except requests.exceptions.ConnectionError as e:
+ error_msg = "Connection error - check hostname and port"
+ details = str(e)
+ # Check for common DNS resolution errors
+ if "Name or service not known" in details or "getaddrinfo failed" in details:
+ error_msg = "DNS resolution failed - check hostname"
+ # Check for common connection refused errors
+ elif "Connection refused" in details:
+ error_msg = "Connection refused - check if Radarr is running and the port is correct"
+
+ radarr_logger.error(f"{error_msg}: {details}")
+ return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502
+
+ except requests.exceptions.RequestException as e:
+ error_message = f"Connection failed: {str(e)}"
+
+ if hasattr(e, 'response') and e.response is not None:
+ status_code = e.response.status_code
+
+ # Add specific messages based on common status codes
+ if status_code == 401:
+ error_message = "Authentication failed: Invalid API key"
+ elif status_code == 403:
+ error_message = "Access forbidden: Check API key permissions"
+ elif status_code == 404:
+ error_message = "API endpoint not found: Check API URL"
+ elif status_code >= 500:
+ error_message = f"Radarr server error (HTTP {status_code}): The Radarr server is experiencing issues"
+
+ # Try to extract more error details if available
+ try:
+ error_details = e.response.json()
+ error_message += f" - {error_details.get('message', 'No details')}"
+ except ValueError:
+ if e.response.text:
+ error_message += f" - Response: {e.response.text[:200]}"
+
+ radarr_logger.error(error_message)
+ return jsonify({"success": False, "message": error_message}), 500
+
+ except Exception as e:
+ error_msg = f"An unexpected error occurred: {str(e)}"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
+
+# Function to check if Radarr is configured
+def is_configured():
+ """Check if Radarr API credentials are configured by checking if at least one instance is enabled"""
+ settings = load_settings("radarr")
+
+ if not settings:
+ radarr_logger.debug("No settings found for Radarr")
+ return False
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ radarr_logger.debug(f"Found configured Radarr instance: {instance.get('name', 'Unnamed')}")
+ return True
+
+ radarr_logger.debug("No enabled Radarr instances found with valid API URL and key")
+ return False
+
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ return bool(api_url and api_key)
+
+# Get all valid instances from settings
+# get_configured_instances function has been moved to src/primary/apps/radarr/__init__.py
+
+# Function to reset the processed IDs files
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py
new file mode 100644
index 0000000..facdd9d
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/__init__.py
@@ -0,0 +1,53 @@
+"""
+Radarr app module for Huntarr
+Contains functionality for missing movies and quality upgrades in Radarr
+"""
+
+# Module exports
+from src.primary.apps.radarr.missing import process_missing_movies
+from src.primary.apps.radarr.upgrade import process_cutoff_upgrades
+
+# Add necessary imports for get_configured_instances
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+radarr_logger = get_logger("radarr") # Get the logger instance
+
+def get_configured_instances():
+ """Get all configured and enabled Radarr instances"""
+ settings = load_settings("radarr")
+ instances = []
+
+ if not settings:
+ radarr_logger.debug("No settings found for Radarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ # Create a settings object for this instance by combining global settings with instance-specific ones
+ instance_settings = settings.copy()
+ # Remove instances list to avoid confusion
+ if "instances" in instance_settings:
+ del instance_settings["instances"]
+
+ # Override with instance-specific connection settings
+ instance_settings["api_url"] = instance.get("api_url")
+ instance_settings["api_key"] = instance.get("api_key")
+ instance_settings["instance_name"] = instance.get("name", "Default")
+
+ instances.append(instance_settings)
+ else:
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ if api_url and api_key:
+ settings["instance_name"] = "Default"
+ instances.append(settings)
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ radarr_logger.debug(f"Found {len(instances)} configured and enabled Radarr instances")
+ return instances
+
+__all__ = ["process_missing_movies", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py
new file mode 100644
index 0000000..c2a0f9b
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/api.py
@@ -0,0 +1,337 @@
+#!/usr/bin/env python3
+"""
+Radarr-specific API functions
+Handles all communication with the Radarr API
+"""
+
+import requests
+import json
+import time
+import datetime
+from typing import List, Dict, Any, Optional, Union
+# Correct the import path
+from src.primary.utils.logger import get_logger
+
+# Get logger for the Radarr app
+radarr_logger = get_logger("radarr")
+
+# Use a session for better performance
+session = requests.Session()
+
+def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any:
+ """
+ Make a request to the Radarr API.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data to send with the request
+
+ Returns:
+ The JSON response from the API, or None if the request failed
+ """
+ if not api_url or not api_key:
+ radarr_logger.error("API URL or API key is missing. Check your settings.")
+ return None
+
+ # Ensure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ radarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return None
+
+ # Full URL - ensure no double slashes
+ url = f"{api_url.rstrip('/')}/api/v3/{endpoint.lstrip('/')}"
+
+ # Headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method == "GET":
+ response = session.get(url, headers=headers, timeout=api_timeout)
+ elif method == "POST":
+ response = session.post(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "PUT":
+ response = session.put(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "DELETE":
+ response = session.delete(url, headers=headers, timeout=api_timeout)
+ else:
+ radarr_logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # Check for errors
+ response.raise_for_status()
+
+ # Parse JSON response
+ if response.text:
+ return response.json()
+ return {}
+
+ except requests.exceptions.RequestException as e:
+ radarr_logger.error(f"API request failed: {e}")
+ return None
+
+def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int:
+ """
+ Get the current size of the download queue.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ The number of items in the download queue, or -1 if the request failed
+ """
+ if not api_url or not api_key:
+ radarr_logger.error("Radarr API URL or API Key not provided for queue size check.")
+ return -1
+ try:
+ # Radarr uses /api/v3/queue
+ endpoint = f"{api_url.rstrip('/')}/api/v3/queue?page=1&pageSize=1000" # Fetch a large page size
+ headers = {"X-Api-Key": api_key}
+ response = session.get(endpoint, headers=headers, timeout=api_timeout)
+ response.raise_for_status()
+ queue_data = response.json()
+ queue_size = queue_data.get('totalRecords', 0)
+ radarr_logger.debug(f"Radarr download queue size: {queue_size}")
+ return queue_size
+ except requests.exceptions.RequestException as e:
+ radarr_logger.error(f"Error getting Radarr download queue size: {e}")
+ return -1 # Return -1 to indicate an error
+ except Exception as e:
+ radarr_logger.error(f"An unexpected error occurred while getting Radarr queue size: {e}")
+ return -1
+
+def get_movies_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> Optional[List[Dict]]:
+ """
+ Get a list of movies with missing files (not downloaded/available).
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored movies.
+
+ Returns:
+ A list of movie objects with missing files, or None if the request failed.
+ """
+ # Use the updated arr_request with passed arguments
+ movies = arr_request(api_url, api_key, api_timeout, "movie")
+ if movies is None: # Check for None explicitly, as an empty list is valid
+ radarr_logger.error("Failed to retrieve movies from Radarr API.")
+ return None
+
+ missing_movies = []
+ for movie in movies:
+ is_monitored = movie.get("monitored", False)
+ has_file = movie.get("hasFile", False)
+ # Apply monitored_only filter if requested
+ if not has_file and (not monitored_only or is_monitored):
+ missing_movies.append(movie)
+
+ radarr_logger.debug(f"Found {len(missing_movies)} missing movies (monitored_only={monitored_only}).")
+ return missing_movies
+
+def get_cutoff_unmet_movies(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> Optional[List[Dict]]:
+ """
+ Get a list of movies that don't meet their quality profile cutoff.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored movies.
+
+ Returns:
+ A list of movie objects that need quality upgrades, or None if the request failed.
+ """
+ # Radarr API endpoint for cutoff unmet movies
+ # Note: Radarr's /api/v3/movie endpoint doesn't directly support a simple 'cutoffUnmet=true' like Sonarr's wanted/cutoff.
+ # We need to fetch all movies and filter locally, or use the /api/v3/movie/lookup endpoint if searching by TMDB/IMDB ID.
+ # Fetching all movies is simpler for now.
+ radarr_logger.debug("Fetching all movies to determine cutoff unmet status...")
+ movies = arr_request(api_url, api_key, api_timeout, "movie")
+ if movies is None:
+ radarr_logger.error("Failed to retrieve movies from Radarr API for cutoff check.")
+ return None
+
+ # Need quality profile information to determine cutoff unmet status.
+ # Fetch quality profiles first.
+ profiles = arr_request(api_url, api_key, api_timeout, "qualityprofile")
+ if profiles is None:
+ radarr_logger.error("Failed to retrieve quality profiles from Radarr API.")
+ return None
+
+ # Create a map for easy lookup: profile_id -> cutoff_format_score (or cutoff quality ID)
+ # Radarr profiles have 'cutoff' (quality ID) and potentially 'cutoffFormatScore'
+ profile_cutoff_map = {p['id']: p.get('cutoff') for p in profiles}
+ # TODO: Potentially incorporate cutoffFormatScore if needed for more complex logic
+
+ unmet_movies = []
+ for movie in movies:
+ is_monitored = movie.get("monitored", False)
+ has_file = movie.get("hasFile", False)
+ profile_id = movie.get("qualityProfileId")
+ movie_file = movie.get("movieFile")
+
+ # Apply monitored_only filter if requested
+ if not monitored_only or is_monitored:
+ if has_file and movie_file and profile_id in profile_cutoff_map:
+ cutoff_quality_id = profile_cutoff_map[profile_id]
+ current_quality_id = movie_file.get("quality", {}).get("quality", {}).get("id")
+
+ # Simple check: if current quality ID is less than cutoff quality ID
+ # This assumes quality IDs are ordered correctly (lower ID = lower quality)
+ # A more robust check might involve comparing quality *names* or *scores* if IDs aren't reliable order indicators.
+ if current_quality_id is not None and cutoff_quality_id is not None and current_quality_id < cutoff_quality_id:
+ # TODO: Add check for cutoffFormatScore if necessary
+ unmet_movies.append(movie)
+ # else: # Log why a movie wasn't considered unmet (optional)
+ # if not has_file: radarr_logger.debug(f"Skipping {movie.get('title')} - no file.")
+ # elif not movie_file: radarr_logger.debug(f"Skipping {movie.get('title')} - no movieFile info.")
+ # elif profile_id not in profile_cutoff_map: radarr_logger.debug(f"Skipping {movie.get('title')} - profile ID {profile_id} not found.")
+
+ radarr_logger.debug(f"Found {len(unmet_movies)} cutoff unmet movies (monitored_only={monitored_only}).")
+ return unmet_movies
+
+def refresh_movie(api_url: str, api_key: str, api_timeout: int, movie_id: int,
+ command_wait_delay: int = 1, command_wait_attempts: int = 600) -> Optional[int]:
+ """
+ Refresh a movie in Radarr.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ movie_id: The ID of the movie to refresh
+ command_wait_delay: Seconds to wait between command status checks
+ command_wait_attempts: Maximum number of status check attempts
+
+ Returns:
+ The command ID if the refresh was triggered successfully, None otherwise
+ """
+ endpoint = "command"
+ data = {
+ "name": "RefreshMovie",
+ "movieIds": [movie_id]
+ }
+
+ # Use the updated arr_request
+ response = arr_request(api_url, api_key, api_timeout, endpoint, method="POST", data=data)
+ if response and 'id' in response:
+ command_id = response['id']
+ radarr_logger.debug(f"Triggered refresh for movie ID {movie_id}. Command ID: {command_id}")
+
+ # Wait for command to complete if requested
+ if command_wait_delay > 0 and command_wait_attempts > 0:
+ radarr_logger.debug(f"Waiting for refresh command {command_id} to complete...")
+ success = wait_for_command(api_url, api_key, api_timeout, command_id,
+ delay_seconds=command_wait_delay,
+ max_attempts=command_wait_attempts)
+ if success:
+ radarr_logger.debug(f"Refresh command {command_id} completed successfully")
+ else:
+ radarr_logger.warning(f"Timed out waiting for refresh command {command_id} to complete")
+
+ return command_id
+ else:
+ radarr_logger.error(f"Failed to trigger refresh command for movie ID {movie_id}. Response: {response}")
+ return None
+
+def movie_search(api_url: str, api_key: str, api_timeout: int, movie_ids: List[int]) -> Optional[int]:
+ """
+ Trigger a search for one or more movies.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ movie_ids: A list of movie IDs to search for
+
+ Returns:
+ The command ID if the search command was triggered successfully, None otherwise
+ """
+ if not movie_ids:
+ radarr_logger.warning("No movie IDs provided for search.")
+ return None
+
+ endpoint = "command"
+ data = {
+ "name": "MoviesSearch",
+ "movieIds": movie_ids
+ }
+
+ # Use the updated arr_request
+ response = arr_request(api_url, api_key, api_timeout, endpoint, method="POST", data=data)
+ if response and 'id' in response:
+ command_id = response['id']
+ radarr_logger.debug(f"Triggered search for movie IDs: {movie_ids}. Command ID: {command_id}")
+ return command_id
+ else:
+ radarr_logger.error(f"Failed to trigger search command for movie IDs {movie_ids}. Response: {response}")
+ return None
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """Check the connection to Radarr API."""
+ try:
+ # Ensure api_url is properly formatted
+ if not api_url:
+ radarr_logger.error("API URL is empty or not set")
+ return False
+
+ # Make sure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ radarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return False
+
+ # Ensure URL doesn't end with a slash before adding the endpoint
+ base_url = api_url.rstrip('/')
+ full_url = f"{base_url}/api/v3/system/status"
+
+ response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
+ radarr_logger.info("Successfully connected to Radarr.")
+ return True
+ except requests.exceptions.RequestException as e:
+ radarr_logger.error(f"Error connecting to Radarr: {e}")
+ return False
+ except Exception as e:
+ radarr_logger.error(f"An unexpected error occurred during Radarr connection check: {e}")
+ return False
+
+def wait_for_command(api_url: str, api_key: str, api_timeout: int, command_id: int,
+ delay_seconds: int = 1, max_attempts: int = 600) -> bool:
+ """
+ Wait for a command to complete.
+
+ Args:
+ api_url: The base URL of the Radarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ command_id: The ID of the command to wait for
+ delay_seconds: Seconds to wait between command status checks
+ max_attempts: Maximum number of status check attempts
+
+ Returns:
+ True if the command completed successfully, False if timed out
+ """
+ attempts = 0
+ while attempts < max_attempts:
+ response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}")
+ if response and 'state' in response:
+ state = response['state']
+ if state == "completed":
+ return True
+ elif state == "failed":
+ radarr_logger.error(f"Command {command_id} failed")
+ return False
+ time.sleep(delay_seconds)
+ attempts += 1
+ radarr_logger.warning(f"Timed out waiting for command {command_id} to complete")
+ return False
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py
new file mode 100644
index 0000000..4fb9249
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/missing.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python3
+"""
+Missing Movies Processing for Radarr
+Handles searching for missing movies in Radarr
+"""
+
+import time
+import random
+import datetime
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.radarr import api as radarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import load_settings, get_advanced_setting
+
+# Get logger for the app
+radarr_logger = get_logger("radarr")
+
+def process_missing_movies(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process missing movies in Radarr based on provided settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Radarr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any movies were processed, False otherwise.
+ """
+ processed_any = False
+
+ # Get instance name - check for instance_name first, fall back to legacy "name" key if needed
+ instance_name = app_settings.get("instance_name", app_settings.get("name", "Radarr Default"))
+
+ # Log important settings
+ radarr_logger.info("=== Radarr Missing Movies Settings ===")
+ radarr_logger.info(f"Instance Name: {instance_name}")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_future_releases = app_settings.get("skip_future_releases", True)
+ skip_movie_refresh = app_settings.get("skip_movie_refresh", False)
+ hunt_missing_movies = app_settings.get("hunt_missing_movies", 0)
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+ release_type = app_settings.get("release_type", "physical")
+
+ radarr_logger.info(f"Hunt Missing Movies: {hunt_missing_movies}")
+ radarr_logger.info(f"Monitored Only: {monitored_only}")
+ radarr_logger.info(f"Skip Future Releases: {skip_future_releases}")
+ radarr_logger.info(f"Skip Movie Refresh: {skip_movie_refresh}")
+ radarr_logger.info(f"Release Type for Future Status: {release_type}")
+
+ release_type_field = 'physicalRelease'
+ if release_type == 'digital':
+ release_type_field = 'digitalRelease'
+ elif release_type == 'cinema':
+ release_type_field = 'inCinemas'
+
+ radarr_logger.info(f"Using {release_type_field} date to determine future releases")
+ radarr_logger.info("=======================================")
+
+ radarr_logger.info("Starting missing movies processing cycle for Radarr.")
+
+ if not api_url or not api_key:
+ radarr_logger.error("API URL or Key not configured in settings. Cannot process missing movies.")
+ return False
+
+ # Skip if hunt_missing_movies is set to 0
+ if hunt_missing_movies <= 0:
+ radarr_logger.info("'hunt_missing_movies' setting is 0 or less. Skipping missing movie processing.")
+ return False
+
+ # Check for stop signal
+ if stop_check():
+ radarr_logger.info("Stop requested before starting missing movies. Aborting...")
+ return False
+
+ # Get missing movies
+ radarr_logger.info("Retrieving movies with missing files...")
+ missing_movies = radarr_api.get_movies_with_missing(api_url, api_key, api_timeout, monitored_only)
+
+ if missing_movies is None: # API call failed
+ radarr_logger.error("Failed to retrieve missing movies from Radarr API.")
+ return False
+
+ if not missing_movies:
+ radarr_logger.info("No missing movies found.")
+ return False
+
+ # Check for stop signal after retrieving movies
+ if stop_check():
+ radarr_logger.info("Stop requested after retrieving missing movies. Aborting...")
+ return False
+
+ radarr_logger.info(f"Found {len(missing_movies)} movies with missing files.")
+
+ # Filter out future releases if configured
+ if skip_future_releases:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ original_count = len(missing_movies)
+
+ missing_movies = [
+ movie for movie in missing_movies
+ if movie.get(release_type_field) and datetime.datetime.fromisoformat(movie[release_type_field].replace('Z', '+00:00')) < now
+ ]
+ skipped_count = original_count - len(missing_movies)
+ if skipped_count > 0:
+ radarr_logger.info(f"Skipped {skipped_count} future movie releases based on {release_type} release date.")
+
+ if not missing_movies:
+ radarr_logger.info("No missing movies left to process after filtering future releases.")
+ return False
+
+ movies_processed = 0
+ processing_done = False
+
+ # Filter out already processed movies using stateful management
+ unprocessed_movies = []
+ for movie in missing_movies:
+ movie_id = str(movie.get("id"))
+ if not is_processed("radarr", instance_name, movie_id):
+ unprocessed_movies.append(movie)
+ else:
+ radarr_logger.debug(f"Skipping already processed movie ID: {movie_id}")
+
+ radarr_logger.info(f"Found {len(unprocessed_movies)} unprocessed missing movies out of {len(missing_movies)} total.")
+
+ if not unprocessed_movies:
+ radarr_logger.info("No unprocessed missing movies found. All available movies have been processed.")
+ return False
+
+ # Always use random selection for missing movies
+ radarr_logger.info(f"Using random selection for missing movies")
+ if len(unprocessed_movies) > hunt_missing_movies:
+ movies_to_process = random.sample(unprocessed_movies, hunt_missing_movies)
+ else:
+ movies_to_process = unprocessed_movies
+
+ radarr_logger.info(f"Selected {len(movies_to_process)} movies to process.")
+
+ # Add detailed logging for selected movies
+ if movies_to_process:
+ radarr_logger.info(f"Movies selected for processing in this cycle:")
+ for idx, movie in enumerate(movies_to_process):
+ movie_id = movie.get("id")
+ movie_title = movie.get("title", "Unknown Title")
+ year = movie.get("year", "Unknown Year")
+ radarr_logger.info(f" {idx+1}. {movie_title} ({year}) - ID: {movie_id}")
+
+ # Process each movie
+ for movie in movies_to_process:
+ if stop_check():
+ radarr_logger.info("Stop requested during processing. Aborting...")
+ break
+
+ movie_id = movie.get("id")
+ movie_title = movie.get("title", "Unknown Title")
+
+ # Optional: Refresh the movie before searching
+ if not skip_movie_refresh:
+ radarr_logger.info(f"Refreshing movie metadata for '{movie_title}' (ID: {movie_id})...")
+ refresh_success = radarr_api.refresh_movie(api_url, api_key, api_timeout, movie_id, command_wait_delay, command_wait_attempts)
+
+ if not refresh_success:
+ radarr_logger.warning(f"Failed to refresh movie metadata for '{movie_title}'. Continuing anyway...")
+
+ # Search for the movie
+ radarr_logger.info(f"Searching for movie '{movie_title}' (ID: {movie_id})...")
+ search_success = radarr_api.movie_search(api_url, api_key, api_timeout, [movie_id])
+
+ if search_success:
+ radarr_logger.info(f"Successfully triggered search for movie '{movie_title}'")
+ # Immediately add to processed IDs to prevent duplicate processing
+ success = add_processed_id("radarr", instance_name, str(movie_id))
+ radarr_logger.debug(f"Added processed ID: {movie_id}, success: {success}")
+
+ # Log to history system
+ year = movie.get("year", "Unknown Year")
+ media_name = f"{movie_title} ({year})"
+ log_processed_media("radarr", media_name, movie_id, instance_name, "missing")
+ radarr_logger.debug(f"Logged history entry for movie: {media_name}")
+
+ increment_stat("radarr", "hunted")
+ movies_processed += 1
+ processed_any = True
+ else:
+ radarr_logger.warning(f"Failed to trigger search for movie '{movie_title}'")
+
+ radarr_logger.info(f"Finished processing missing movies. Processed {movies_processed} of {len(movies_to_process)} selected movies.")
+ return processed_any
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py
new file mode 100644
index 0000000..2980276
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr/upgrade.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+"""
+Quality Upgrade Processing for Radarr
+Handles searching for movies that need quality upgrades in Radarr
+"""
+
+import time
+import random
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.radarr import api as radarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import get_advanced_setting
+
+# Get logger for the app
+radarr_logger = get_logger("radarr")
+
+def process_cutoff_upgrades(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process quality cutoff upgrades for Radarr based on settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Radarr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any movies were processed for upgrades, False otherwise.
+ """
+ radarr_logger.info("Starting quality cutoff upgrades processing cycle for Radarr.")
+ processed_any = False
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_movie_refresh = app_settings.get("skip_movie_refresh", False)
+ hunt_upgrade_movies = app_settings.get("hunt_upgrade_movies", 0)
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # Get instance name - check for instance_name first, fall back to legacy "name" key if needed
+ instance_name = app_settings.get("instance_name", app_settings.get("name", "Radarr Default"))
+
+ # Get movies eligible for upgrade
+ radarr_logger.info("Retrieving movies eligible for cutoff upgrade...")
+ upgrade_eligible_data = radarr_api.get_cutoff_unmet_movies(api_url, api_key, api_timeout, monitored_only)
+
+ if not upgrade_eligible_data:
+ radarr_logger.info("No movies found eligible for upgrade or error retrieving them.")
+ return False
+
+ radarr_logger.info(f"Found {len(upgrade_eligible_data)} movies eligible for upgrade.")
+
+ # Filter out already processed movies using stateful management
+ unprocessed_movies = []
+ for movie in upgrade_eligible_data:
+ movie_id = str(movie.get("id"))
+ if not is_processed("radarr", instance_name, movie_id):
+ unprocessed_movies.append(movie)
+ else:
+ radarr_logger.debug(f"Skipping already processed movie ID: {movie_id}")
+
+ radarr_logger.info(f"Found {len(unprocessed_movies)} unprocessed movies for upgrade out of {len(upgrade_eligible_data)} total.")
+
+ if not unprocessed_movies:
+ radarr_logger.info("No upgradeable movies found to process (after filtering already processed). Skipping.")
+ return False
+
+ radarr_logger.info(f"Randomly selecting up to {hunt_upgrade_movies} movies for upgrade search.")
+ movies_to_process = random.sample(unprocessed_movies, min(hunt_upgrade_movies, len(unprocessed_movies)))
+
+ radarr_logger.info(f"Selected {len(movies_to_process)} movies to search for upgrades.")
+ processed_count = 0
+ processed_something = False
+
+ for movie in movies_to_process:
+ if stop_check():
+ radarr_logger.info("Stop signal received, aborting Radarr upgrade cycle.")
+ break
+
+ movie_id = movie.get("id")
+ movie_title = movie.get("title")
+ movie_year = movie.get("year")
+
+ radarr_logger.info(f"Processing upgrade for movie: \"{movie_title}\" ({movie_year}) (Movie ID: {movie_id})")
+
+ # Refresh movie (optional)
+ if not skip_movie_refresh:
+ radarr_logger.info(f" - Refreshing movie info...")
+ refresh_result = radarr_api.refresh_movie(api_url, api_key, api_timeout, movie_id)
+ if not refresh_result:
+ radarr_logger.warning(f" - Failed to trigger movie refresh. Continuing search anyway.")
+ else:
+ radarr_logger.debug(f" - Skipping movie refresh (skip_movie_refresh=true)")
+
+ # Search for cutoff upgrade
+ radarr_logger.info(f" - Searching for quality upgrade...")
+ search_result = radarr_api.movie_search(api_url, api_key, api_timeout, [movie_id])
+
+ if search_result:
+ radarr_logger.info(f" - Successfully triggered search for quality upgrade.")
+ add_processed_id("radarr", instance_name, str(movie_id))
+ increment_stat("radarr", "upgraded")
+
+ # Log to history so the upgrade appears in the history UI
+ media_name = f"{movie_title} ({movie_year})"
+ log_processed_media("radarr", media_name, movie_id, instance_name, "upgrade")
+ radarr_logger.debug(f"Logged quality upgrade to history for movie ID {movie_id}")
+
+ processed_count += 1
+ processed_something = True
+ else:
+ radarr_logger.warning(f" - Failed to trigger search for quality upgrade.")
+
+ # Log final status
+ radarr_logger.info(f"Completed processing {processed_count} movies for quality upgrades.")
+
+ return processed_something
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py
new file mode 100644
index 0000000..eb6caf6
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/radarr_routes.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger
+import traceback
+import socket
+from urllib.parse import urlparse
+
+radarr_bp = Blueprint('radarr', __name__)
+radarr_logger = get_logger("radarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("radarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("radarr", "processed_upgrades")
+
+@radarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Radarr API instance"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ radarr_logger.info(f"Testing connection to Radarr API at {api_url}")
+
+ # Validate URL format
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try to establish a socket connection first to check basic connectivity
+ parsed_url = urlparse(api_url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ try:
+ # Try socket connection for quick feedback on connectivity issues
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ radarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # For Radarr, use api/v3
+ url = f"{api_url.rstrip('/')}/api/v3/system/status"
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = requests.get(url, headers=headers, timeout=(10, api_timeout))
+
+ # For HTTP errors, provide more specific feedback
+ if response.status_code == 401:
+ error_msg = "Authentication failed: Invalid API key"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 401
+ elif response.status_code == 403:
+ error_msg = "Access forbidden: Check API key permissions"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 403
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Radarr server. Check your URL."
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ elif response.status_code >= 500:
+ error_msg = f"Radarr server error (HTTP {response.status_code}): The Radarr server is experiencing issues"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+
+ # Raise for other HTTP errors
+ response.raise_for_status()
+
+ try:
+ response_data = response.json()
+ version = response_data.get('version', 'unknown')
+ radarr_logger.info(f"Successfully connected to Radarr API version: {version}")
+
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Radarr API",
+ "version": version
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Radarr API - This doesn't appear to be a valid Radarr server"
+ radarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.ConnectionError as e:
+ # Handle different types of connection errors
+ error_details = str(e)
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Radarr is not running on {api_url} or the port is incorrect"
+ elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
+ error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ else:
+ error_msg = f"Connection error - Check if Radarr is running: {error_details}"
+
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except requests.exceptions.Timeout:
+ error_msg = f"Connection timed out - Radarr took too long to respond"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 504
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Connection test failed: {str(e)}"
+ radarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr.py b/Huntarr.io-6.3.6/src/primary/apps/readarr.py
new file mode 100644
index 0000000..2cb866e
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr.py
@@ -0,0 +1,179 @@
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from primary import keys_manager
+from src.primary.utils.logger import get_logger
+from src.primary.state import get_state_file_path
+from src.primary.settings_manager import load_settings
+
+readarr_bp = Blueprint('readarr', __name__)
+readarr_logger = get_logger("readarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("readarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("readarr", "processed_upgrades")
+
+@readarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Readarr API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ readarr_logger.info(f"Testing connection to Readarr API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # For Readarr, use api/v1
+ api_base = "api/v1"
+ test_url = f"{api_url.rstrip('/')}/{api_base}/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ # Use a connection timeout separate from read timeout
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # Log HTTP status code for diagnostic purposes
+ readarr_logger.debug(f"Readarr API status code: {response.status_code}")
+
+ # Check HTTP status code
+ response.raise_for_status()
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+
+ # We no longer save keys here since we use instances
+ # keys_manager.save_api_keys("readarr", api_url, api_key)
+
+ readarr_logger.info(f"Successfully connected to Readarr API version: {response_data.get('version', 'unknown')}")
+
+ # Return success with some useful information
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Readarr API",
+ "version": response_data.get('version', 'unknown')
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Readarr API"
+ readarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.Timeout as e:
+ error_msg = f"Connection timed out after {api_timeout} seconds"
+ readarr_logger.error(f"{error_msg}: {str(e)}")
+ return jsonify({"success": False, "message": error_msg}), 504
+
+ except requests.exceptions.ConnectionError as e:
+ error_msg = "Connection error - check hostname and port"
+ details = str(e)
+ # Check for common DNS resolution errors
+ if "Name or service not known" in details or "getaddrinfo failed" in details:
+ error_msg = "DNS resolution failed - check hostname"
+ # Check for common connection refused errors
+ elif "Connection refused" in details:
+ error_msg = "Connection refused - check if Readarr is running and the port is correct"
+
+ readarr_logger.error(f"{error_msg}: {details}")
+ return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502
+
+ except requests.exceptions.RequestException as e:
+ error_message = f"Connection failed: {str(e)}"
+
+ if hasattr(e, 'response') and e.response is not None:
+ status_code = e.response.status_code
+
+ # Add specific messages based on common status codes
+ if status_code == 401:
+ error_message = "Authentication failed: Invalid API key"
+ elif status_code == 403:
+ error_message = "Access forbidden: Check API key permissions"
+ elif status_code == 404:
+ error_message = "API endpoint not found: Check API URL"
+ elif status_code >= 500:
+ error_message = f"Readarr server error (HTTP {status_code}): The Readarr server is experiencing issues"
+
+ # Try to extract more error details if available
+ try:
+ error_details = e.response.json()
+ error_message += f" - {error_details.get('message', 'No details')}"
+ except ValueError:
+ if e.response.text:
+ error_message += f" - Response: {e.response.text[:200]}"
+
+ readarr_logger.error(error_message)
+ return jsonify({"success": False, "message": error_message}), 500
+
+ except Exception as e:
+ error_msg = f"An unexpected error occurred: {str(e)}"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
+
+# Function to check if Readarr is configured
+def is_configured():
+ """Check if Readarr API credentials are configured by checking if at least one instance is enabled"""
+ settings = load_settings("readarr")
+
+ if not settings:
+ readarr_logger.debug("No settings found for Readarr")
+ return False
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ readarr_logger.debug(f"Found configured Readarr instance: {instance.get('name', 'Unnamed')}")
+ return True
+
+ readarr_logger.debug("No enabled Readarr instances found with valid API URL and key")
+ return False
+
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ return bool(api_url and api_key)
+
+# Get all valid instances from settings
+def get_configured_instances():
+ """Get all configured and enabled Readarr instances"""
+ settings = load_settings("readarr")
+ instances = []
+
+ if not settings:
+ readarr_logger.debug("No settings found for Readarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ # Create a settings object for this instance by combining global settings with instance-specific ones
+ instance_settings = settings.copy()
+ # Remove instances list to avoid confusion
+ if "instances" in instance_settings:
+ del instance_settings["instances"]
+
+ # Override with instance-specific connection settings
+ instance_settings["api_url"] = instance.get("api_url")
+ instance_settings["api_key"] = instance.get("api_key")
+ instance_settings["instance_name"] = instance.get("name", "Default")
+
+ instances.append(instance_settings)
+ else:
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ if api_url and api_key:
+ settings["instance_name"] = "Default"
+ instances.append(settings)
+
+ readarr_logger.info(f"Found {len(instances)} configured and enabled Readarr instances")
+ return instances
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py
new file mode 100644
index 0000000..7a2516a
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/__init__.py
@@ -0,0 +1,91 @@
+"""
+Readarr module initialization
+"""
+
+# Use src.primary imports
+from src.primary.apps.readarr.missing import process_missing_books
+from src.primary.apps.readarr.upgrade import process_cutoff_upgrades
+# Add necessary imports
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+# Define logger for this module
+readarr_logger = get_logger("readarr")
+
+def get_configured_instances():
+ """Get all configured and enabled Readarr instances"""
+ settings = load_settings("readarr")
+ instances = []
+ # readarr_logger.info(f"Loaded Readarr settings for instance check: {settings}") # Removed verbose log
+
+ if not settings:
+ readarr_logger.debug("No settings found for Readarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ # readarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log
+ for idx, instance in enumerate(settings["instances"]):
+ readarr_logger.debug(f"Checking instance #{idx}: {instance}")
+ # Enhanced validation
+ api_url = instance.get("api_url", "").strip()
+ api_key = instance.get("api_key", "").strip()
+
+ # Enhanced URL validation - ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ readarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ readarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ is_enabled = instance.get("enabled", True)
+
+ # Only include properly configured instances
+ if is_enabled and api_url and api_key:
+ # Return only essential instance details
+ instance_data = {
+ "instance_name": instance.get("name", "Default"),
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ # readarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log
+ elif not is_enabled:
+ readarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}")
+ else:
+ # For brand new installations, don't spam logs with warnings about default instances
+ instance_name = instance.get('name', 'Unnamed')
+ if instance_name == 'Default':
+ # Use debug level for default instances to avoid log spam on new installations
+ readarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # Still log warnings for non-default instances
+ readarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # readarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url", "").strip()
+ api_key = settings.get("api_key", "").strip()
+
+ # Ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ readarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ readarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ if api_url and api_key:
+ # Create a clean instance_data dict for the legacy instance
+ instance_data = {
+ "instance_name": "Default",
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ # readarr_logger.info(f"Added valid legacy instance: {instance_data}") # Removed verbose log
+ else:
+ readarr_logger.warning("No API URL or key found in legacy configuration")
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ readarr_logger.debug(f"Found {len(instances)} configured and enabled Readarr instances")
+ return instances
+
+__all__ = ["process_missing_books", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py
new file mode 100644
index 0000000..6137893
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/api.py
@@ -0,0 +1,372 @@
+#!/usr/bin/env python3
+"""
+Readarr-specific API functions
+Handles all communication with the Readarr API
+"""
+
+import requests
+import json
+import time
+import datetime
+from typing import List, Dict, Any, Optional, Union
+# Correct the import path
+from src.primary.utils.logger import get_logger
+# Import load_settings
+from src.primary.settings_manager import load_settings
+
+# Get app-specific logger
+logger = get_logger("readarr")
+
+# Use a session for better performance
+session = requests.Session()
+
+# Default API timeout in seconds - used as fallback only
+API_TIMEOUT = 30
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """Check the connection to Readarr API."""
+ try:
+ # Ensure api_url is properly formatted
+ if not api_url:
+ logger.error("API URL is empty or not set")
+ return False
+
+ # Make sure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return False
+
+ # Ensure URL doesn't end with a slash before adding the endpoint
+ base_url = api_url.rstrip('/')
+ full_url = f"{base_url}/api/v1/system/status"
+
+ response = requests.get(full_url, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
+ logger.info("Successfully connected to Readarr.")
+ return True
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error connecting to Readarr: {e}")
+ return False
+ except Exception as e:
+ logger.error(f"An unexpected error occurred during Readarr connection check: {e}")
+ return False
+
+def get_download_queue_size(api_url: str = None, api_key: str = None, timeout: int = 30) -> int:
+ """
+ Get the current size of the download queue.
+
+ Args:
+ api_url: Optional API URL (if not provided, will be fetched from settings)
+ api_key: Optional API key (if not provided, will be fetched from settings)
+ timeout: Timeout in seconds for the request
+
+ Returns:
+ The number of items in the download queue, or 0 if the request failed
+ """
+ try:
+ # If API URL and key are provided, use them directly
+ if api_url and api_key:
+ # Clean up API URL
+ api_url = api_url.rstrip('/')
+ url = f"{api_url}/api/v1/queue"
+
+ # Headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ # Make the request
+ response = session.get(url, headers=headers, timeout=timeout)
+ response.raise_for_status()
+
+ # Parse JSON response
+ data = response.json()
+ if "totalRecords" in data:
+ return data["totalRecords"]
+ return 0
+ else:
+ # Use the arr_request function if API URL and key aren't provided
+ response = arr_request("queue")
+ if response and "totalRecords" in response:
+ return response["totalRecords"]
+ return 0
+ except Exception as e:
+ logger.error(f"Error getting download queue size: {e}")
+ return 0
+
+def arr_request(endpoint: str, method: str = "GET", data: Dict = None, app_type: str = "readarr",
+ api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> Any:
+ """
+ Make a request to the Readarr API.
+ Now accepts optional api_url, api_key, and api_timeout.
+
+ Args:
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data to send with the request
+ app_type: The app type (readarr by default)
+ api_url: Optional API URL (overrides loaded settings)
+ api_key: Optional API key (overrides loaded settings)
+ api_timeout: Optional API timeout (overrides loaded settings)
+
+ Returns:
+ The JSON response from the API, or None if the request failed
+ """
+ # Load settings only if credentials are not provided directly
+ if api_url is None or api_key is None or api_timeout is None:
+ settings = load_settings(app_type)
+ loaded_api_url = settings.get('api_url', '')
+ loaded_api_key = settings.get('api_key', '')
+ loaded_api_timeout = settings.get('api_timeout', 60)
+
+ # Use provided args if available, otherwise use loaded settings
+ api_url = api_url if api_url is not None else loaded_api_url
+ api_key = api_key if api_key is not None else loaded_api_key
+ api_timeout = api_timeout if api_timeout is not None else loaded_api_timeout
+
+ if not api_url or not api_key:
+ logger.error("API URL or API key is missing. Check your settings.")
+ return None
+
+ # Ensure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return None
+
+ # Determine the API version
+ api_base = "api/v1" # Readarr uses v1
+
+ # Full URL
+ url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}"
+
+ # Headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method == "GET":
+ response = session.get(url, headers=headers, timeout=api_timeout)
+ elif method == "POST":
+ response = session.post(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "PUT":
+ response = session.put(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "DELETE":
+ response = session.delete(url, headers=headers, timeout=api_timeout)
+ else:
+ logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # Check for errors
+ response.raise_for_status()
+
+ # Parse JSON response
+ if response.text:
+ return response.json()
+ return {}
+
+ except requests.exceptions.RequestException as e:
+ logger.error(f"API request failed: {e}")
+ return None
+
+def get_books_with_missing_files() -> List[Dict]:
+ """
+ Get a list of books with missing files (not downloaded/available).
+
+ Returns:
+ A list of book objects with missing files
+ """
+ # First, get all books
+ books = arr_request("book")
+ if not books:
+ return []
+
+ # Filter for books with missing files
+ missing_books = []
+ for book in books:
+ # Check if book is monitored and doesn't have a file
+ if book.get("monitored", False) and not book.get("bookFile", None):
+ missing_books.append(book)
+
+ return missing_books
+
+def get_cutoff_unmet_books(api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> List[Dict]:
+ """
+ Get a list of books that don't meet their quality profile cutoff.
+ Accepts optional API credentials.
+
+ Args:
+ api_url: Optional API URL
+ api_key: Optional API key
+ api_timeout: Optional API timeout
+
+ Returns:
+ A list of book objects that need quality upgrades
+ """
+ # The cutoffUnmet endpoint in Readarr
+ params = "cutoffUnmet=true"
+ # Pass credentials to arr_request
+ books = arr_request(f"wanted/cutoff?{params}", api_url=api_url, api_key=api_key, api_timeout=api_timeout)
+ if not books or "records" not in books:
+ return []
+
+ return books.get("records", [])
+
+def get_wanted_missing_books(api_url: str, api_key: str, api_timeout: int, monitored_only: bool = True) -> List[Dict]:
+ """
+ Get wanted/missing books from Readarr, handling pagination.
+
+ Args:
+ api_url: The base URL of the Readarr API.
+ api_key: The API key for authentication.
+ api_timeout: Timeout for the API request.
+ monitored_only: If True, only return monitored books (Readarr API default seems to handle this).
+
+ Returns:
+ A list of dictionaries, each representing a missing book, or an empty list on error.
+ """
+ all_missing_books = []
+ page = 1
+ page_size = 100 # Adjust as needed, check Readarr API limits
+ endpoint = "wanted/missing"
+
+ # Ensure api_url is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ logger.error(f"Invalid URL format: {api_url}")
+ return []
+ base_url = api_url.rstrip('/')
+ url = f"{base_url}/api/v1/{endpoint.lstrip('/')}"
+ headers = {"X-Api-Key": api_key}
+
+ while True:
+ params = {
+ 'page': page,
+ 'pageSize': page_size,
+ # Removed sorting parameters due to potential API issues
+ # 'sortKey': 'author.sortName',
+ # 'sortDirection': 'ascending',
+ # 'monitored': monitored_only # Note: Check if Readarr API supports this directly for wanted/missing
+ }
+ try:
+ response = requests.get(url, headers=headers, params=params, timeout=api_timeout)
+ response.raise_for_status()
+ data = response.json()
+
+ if not data or 'records' not in data or not data['records']:
+ break # No more data or unexpected format
+
+ all_missing_books.extend(data['records'])
+
+ total_records = data.get('totalRecords', 0)
+ if len(all_missing_books) >= total_records:
+ break # We have fetched all records
+
+ page += 1
+
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error fetching missing books (page {page}) from {url}: {e}")
+ return [] # Return empty list on error
+ except json.JSONDecodeError:
+ logger.error(f"Error decoding JSON response from {url} (page {page}). Response: {response.text[:200]}")
+ return []
+ except Exception as e:
+ logger.error(f"Unexpected error fetching missing books (page {page}): {e}", exc_info=True)
+ return []
+
+ logger.info(f"Successfully fetched {len(all_missing_books)} missing books from Readarr.")
+ return all_missing_books
+
+def refresh_author(author_id: int, api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> bool:
+ """
+ Refresh an author in Readarr.
+ Accepts optional API credentials.
+
+ Args:
+ author_id: The ID of the author to refresh
+ api_url: Optional API URL
+ api_key: Optional API key
+ api_timeout: Optional API timeout
+
+ Returns:
+ True if the refresh was successful, False otherwise
+ """
+ endpoint = f"command"
+ data = {
+ "name": "RefreshAuthor",
+ "authorId": author_id
+ }
+
+ # Pass credentials to arr_request
+ response = arr_request(endpoint, method="POST", data=data, api_url=api_url, api_key=api_key, api_timeout=api_timeout)
+ if response:
+ logger.debug(f"Refreshed author ID {author_id}")
+ return True
+ return False
+
+def book_search(book_ids: List[int], api_url: Optional[str] = None, api_key: Optional[str] = None, api_timeout: Optional[int] = None) -> bool:
+ """
+ Trigger a search for one or more books.
+ Accepts optional API credentials.
+
+ Args:
+ book_ids: A list of book IDs to search for
+ api_url: Optional API URL
+ api_key: Optional API key
+ api_timeout: Optional API timeout
+
+ Returns:
+ True if the search command was successful, False otherwise
+ """
+ endpoint = "command"
+ data = {
+ "name": "BookSearch",
+ "bookIds": book_ids
+ }
+
+ # Pass credentials to arr_request
+ response = arr_request(endpoint, method="POST", data=data, api_url=api_url, api_key=api_key, api_timeout=api_timeout)
+ # Return the response object (contains command ID) instead of just True/False
+ # The calling function expects the command object now.
+ return response
+
+def get_author_details(api_url: str, api_key: str, author_id: int, api_timeout: int = 120) -> Optional[Dict]:
+ """Fetches details for a specific author from the Readarr API."""
+ endpoint = f"{api_url}/api/v1/author/{author_id}"
+ headers = {'X-Api-Key': api_key}
+ try:
+ response = requests.get(endpoint, headers=headers, timeout=api_timeout)
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
+ author_data = response.json()
+ logger.debug(f"Successfully fetched details for author ID {author_id}.")
+ return author_data
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error fetching author details for ID {author_id} from {endpoint}: {e}")
+ return None
+ except Exception as e:
+ logger.error(f"An unexpected error occurred fetching author details for ID {author_id}: {e}")
+ return None
+
+def search_books(api_url: str, api_key: str, book_ids: List[int], api_timeout: int = 120) -> Optional[Dict]:
+ """Triggers a search for specific book IDs in Readarr."""
+ endpoint = f"{api_url}/api/v1/command" # This uses the full URL, not arr_request
+ headers = {'X-Api-Key': api_key}
+ payload = {
+ 'name': 'BookSearch',
+ 'bookIds': book_ids
+ }
+ try:
+ # This uses requests.post directly, not arr_request. It's already correct.
+ response = requests.post(endpoint, headers=headers, json=payload, timeout=api_timeout)
+ response.raise_for_status()
+ command_data = response.json()
+ command_id = command_data.get('id')
+ logger.info(f"Successfully triggered BookSearch command for book IDs: {book_ids}. Command ID: {command_id}")
+ return command_data # Return the full command object which includes the ID
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error triggering BookSearch command for book IDs {book_ids} via {endpoint}: {e}")
+ return None
+ except Exception as e:
+ logger.error(f"An unexpected error occurred triggering BookSearch for book IDs {book_ids}: {e}")
+ return None
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py
new file mode 100644
index 0000000..f927147
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/missing.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+"""
+Missing Books Processing for Readarr
+Handles searching for missing books in Readarr
+"""
+
+import time
+import random
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.readarr import api as readarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.state import check_state_reset
+
+# Get logger for the app
+readarr_logger = get_logger("readarr")
+
+def process_missing_books(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process missing books in Readarr based on provided settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Readarr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any books were processed, False otherwise.
+ """
+ readarr_logger.info("Starting missing books processing cycle for Readarr.")
+ processed_any = False
+
+ # Reset state files if enough time has passed
+ check_state_reset("readarr")
+
+ # Get the settings for the instance
+ general_settings = readarr_api.load_settings('general')
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ instance_name = app_settings.get("instance_name", "Readarr Default")
+
+ readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr")
+
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_future_releases = app_settings.get("skip_future_releases", True)
+ skip_author_refresh = app_settings.get("skip_author_refresh", False)
+ hunt_missing_books = app_settings.get("hunt_missing_books", 0)
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # Get missing books
+ readarr_logger.info("Retrieving wanted/missing books...")
+ readarr_logger.info("Retrieving wanted/missing books...")
+
+ # Call the correct function to get missing books
+ missing_books_data = readarr_api.get_wanted_missing_books(api_url, api_key, api_timeout, monitored_only)
+
+ if missing_books_data is None: # Check if None was returned due to an API error
+ readarr_logger.error(f"Failed to retrieve missing books data. Skipping processing.")
+ return False
+
+ readarr_logger.info(f"Found {len(missing_books_data)} missing books.")
+
+ # Group by author ID (optional)
+ books_by_author = {}
+ for book in missing_books_data:
+ author_id = book.get("authorId")
+ if author_id:
+ if author_id not in books_by_author:
+ books_by_author[author_id] = []
+ books_by_author[author_id].append(book)
+
+ author_ids = list(books_by_author.keys())
+
+ # Filter out already processed authors using stateful management
+ unprocessed_authors = []
+ for author_id in author_ids:
+ if not is_processed("readarr", instance_name, str(author_id)):
+ unprocessed_authors.append(author_id)
+ else:
+ readarr_logger.debug(f"Skipping already processed author ID: {author_id}")
+
+ readarr_logger.info(f"Found {len(unprocessed_authors)} unprocessed authors out of {len(author_ids)} total authors with missing books.")
+
+ if not unprocessed_authors:
+ readarr_logger.info(f"No unprocessed authors found for {instance_name}. All available authors have been processed.")
+ return False
+
+ # Always randomly select authors/books to process
+ readarr_logger.info(f"Randomly selecting up to {hunt_missing_books} authors with missing books.")
+ authors_to_process = random.sample(unprocessed_authors, min(hunt_missing_books, len(unprocessed_authors)))
+
+ readarr_logger.info(f"Selected {len(authors_to_process)} authors to search for missing books.")
+ processed_count = 0
+ processed_something = False
+ processed_authors = [] # Track author names processed
+
+ for author_id in authors_to_process:
+ if stop_check():
+ readarr_logger.info("Stop signal received, aborting Readarr missing cycle.")
+ break
+
+ author_info = readarr_api.get_author_details(api_url, api_key, author_id, api_timeout) # Assuming this exists
+ author_name = author_info.get("authorName", f"Author ID {author_id}") if author_info else f"Author ID {author_id}"
+
+ readarr_logger.info(f"Processing missing books for author: \"{author_name}\" (Author ID: {author_id})")
+
+ # Refresh author (optional)
+ if not skip_author_refresh:
+ readarr_logger.info(f" - Refreshing author info...")
+ refresh_result = readarr_api.refresh_author(author_id, api_url, api_key, api_timeout)
+ time.sleep(5) # Basic wait
+ if not refresh_result:
+ readarr_logger.warning(f" - Failed to trigger author refresh. Continuing search anyway.")
+ else:
+ readarr_logger.info(f" - Skipping author refresh (skip_author_refresh=true)")
+
+ # Search for missing books associated with the author
+ readarr_logger.info(f" - Searching for missing books...")
+ book_ids_for_author = [book['id'] for book in books_by_author[author_id]] # 'id' is bookId
+
+ # Create detailed log with book titles
+ book_details = []
+ for book in books_by_author[author_id]:
+ book_title = book.get('title', f"Book ID {book['id']}")
+ book_details.append(f"'{book_title}' (ID: {book['id']})")
+
+ # Construct detailed log message
+ details_string = ', '.join(book_details)
+ log_message = f"Triggering Book Search for {len(book_details)} books by author '{author_name}': [{details_string}]"
+ readarr_logger.debug(log_message) # Changed level from INFO to DEBUG
+
+ # Mark author as processed BEFORE triggering any searches
+ add_processed_id("readarr", instance_name, str(author_id))
+ readarr_logger.debug(f"Added author ID {author_id} to processed list for {instance_name}")
+
+ # Now trigger the search
+ search_command_result = readarr_api.search_books(api_url, api_key, book_ids_for_author, api_timeout)
+
+ if search_command_result:
+ # Extract command ID if the result is a dictionary, otherwise use the result directly
+ command_id = search_command_result.get('id') if isinstance(search_command_result, dict) else search_command_result
+ readarr_logger.info(f"Triggered book search command {command_id} for author {author_name}. Assuming success for now.") # Log only command ID
+ increment_stat("readarr", "hunted")
+
+ # Log to history system
+ log_processed_media("readarr", author_name, author_id, instance_name, "missing")
+ readarr_logger.debug(f"Logged history entry for author: {author_name}")
+
+ processed_count += 1 # Count processed authors/groups
+ processed_authors.append(author_name) # Add to list of processed authors
+ processed_something = True
+ readarr_logger.info(f"Processed {processed_count}/{len(authors_to_process)} authors/groups for missing books this cycle.")
+ else:
+ readarr_logger.error(f"Failed to trigger search for author {author_name}.")
+
+ if processed_count >= hunt_missing_books:
+ readarr_logger.info(f"Reached target of {hunt_missing_books} authors/groups processed for this cycle.")
+ break
+
+ if processed_authors:
+ authors_list = '", "'.join(processed_authors)
+ readarr_logger.info(f'Completed processing {processed_count} authors/groups for missing books this cycle: "{authors_list}"')
+ else:
+ readarr_logger.info(f"Completed processing {processed_count} authors/groups for missing books this cycle.")
+
+ return processed_something
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py
new file mode 100644
index 0000000..3a4552e
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr/upgrade.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+"""
+Quality Upgrade Processing for Readarr
+Handles searching for books that need quality upgrades in Readarr
+"""
+
+import time
+import random
+import datetime # Import the datetime module
+from typing import List, Dict, Any, Set, Callable, Union, Optional
+from src.primary.utils.logger import get_logger
+from src.primary.apps.readarr import api as readarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.state import check_state_reset
+from src.primary.settings_manager import load_settings # Import load_settings function
+
+# Get logger for the app
+readarr_logger = get_logger("readarr")
+
+def process_cutoff_upgrades(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process quality cutoff upgrades for Readarr based on settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Readarr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any books were processed for upgrades, False otherwise.
+ """
+ readarr_logger.info("Starting quality cutoff upgrades processing cycle for Readarr.")
+
+ # Reset state files if enough time has passed
+ check_state_reset("readarr")
+
+ processed_any = False
+
+ # Load general settings to get centralized timeout
+ general_settings = load_settings('general')
+
+ # Get the API credentials for this instance
+ api_url = app_settings.get('api_url', '')
+ api_key = app_settings.get('api_key', '')
+
+ # Use the centralized timeout from general settings with app-specific as fallback
+ api_timeout = general_settings.get("api_timeout", app_settings.get("api_timeout", 90)) # Use centralized timeout
+
+ readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr")
+
+ # Extract necessary settings
+ instance_name = app_settings.get("instance_name", "Readarr Default")
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_author_refresh = app_settings.get("skip_author_refresh", False)
+ hunt_upgrade_books = app_settings.get("hunt_upgrade_books", 0)
+ command_wait_delay = app_settings.get("command_wait_delay", 5)
+ command_wait_attempts = app_settings.get("command_wait_attempts", 12)
+
+ # Get books eligible for upgrade
+ readarr_logger.info("Retrieving books eligible for quality upgrade...")
+ # Pass API credentials explicitly
+ upgrade_eligible_data = readarr_api.get_cutoff_unmet_books(api_url=api_url, api_key=api_key, api_timeout=api_timeout)
+
+ if upgrade_eligible_data is None: # Check if the API call failed (assuming it returns None on error)
+ readarr_logger.error("Error retrieving books eligible for upgrade from Readarr API.")
+ return False
+ elif not upgrade_eligible_data: # Check if the list is empty
+ readarr_logger.info("No books found eligible for upgrade.")
+ return False
+
+ readarr_logger.info(f"Found {len(upgrade_eligible_data)} books eligible for quality upgrade.")
+
+ # Filter out future releases if configured
+ skip_future_releases = app_settings.get("skip_future_releases", True)
+ if skip_future_releases:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ original_count = len(upgrade_eligible_data)
+ filtered_books = []
+ for book in upgrade_eligible_data:
+ release_date_str = book.get('releaseDate')
+ if release_date_str:
+ try:
+ # Try to parse ISO format first (with time component)
+ try:
+ # Handle ISO format date strings like '2023-10-17T04:00:00Z'
+ # fromisoformat doesn't handle 'Z' timezone, so we replace it
+ release_date_str_fixed = release_date_str.replace('Z', '+00:00')
+ release_date = datetime.datetime.fromisoformat(release_date_str_fixed)
+ except ValueError:
+ # Fall back to simple YYYY-MM-DD format
+ release_date = datetime.datetime.strptime(release_date_str, '%Y-%m-%d')
+ # Add UTC timezone for consistent comparison
+ release_date = release_date.replace(tzinfo=datetime.timezone.utc)
+
+ if release_date <= now:
+ filtered_books.append(book)
+ else:
+ readarr_logger.debug(f"Skipping future book ID {book.get('id')} with release date {release_date_str}")
+ except ValueError:
+ readarr_logger.warning(f"Could not parse release date '{release_date_str}' for book ID {book.get('id')}. Including anyway.")
+ filtered_books.append(book)
+ else:
+ filtered_books.append(book) # Include books without a release date
+
+ upgrade_eligible_data = filtered_books
+ skipped_count = original_count - len(upgrade_eligible_data)
+ if skipped_count > 0:
+ readarr_logger.info(f"Skipped {skipped_count} future books based on release date for upgrades.")
+
+ if not upgrade_eligible_data:
+ readarr_logger.info("No upgradeable books found to process (after potential filtering). Skipping.")
+ return False
+
+ # Filter out already processed books using stateful management
+ unprocessed_books = []
+ for book in upgrade_eligible_data:
+ book_id = str(book.get("id"))
+ if not is_processed("readarr", instance_name, book_id):
+ unprocessed_books.append(book)
+ else:
+ readarr_logger.debug(f"Skipping already processed book ID: {book_id}")
+
+ readarr_logger.info(f"Found {len(unprocessed_books)} unprocessed books out of {len(upgrade_eligible_data)} total books eligible for upgrade.")
+
+ if not unprocessed_books:
+ readarr_logger.info(f"No unprocessed books found for {instance_name}. All available books have been processed.")
+ return False
+
+ # Always randomly select books to process
+ readarr_logger.info(f"Randomly selecting up to {hunt_upgrade_books} books for upgrade search.")
+ books_to_process = random.sample(unprocessed_books, min(hunt_upgrade_books, len(unprocessed_books)))
+
+ readarr_logger.info(f"Selected {len(books_to_process)} books to search for upgrades.")
+ processed_count = 0
+ processed_something = False
+
+ book_ids_to_search = [book.get("id") for book in books_to_process]
+
+ # Mark books as processed BEFORE triggering any searches
+ for book_id in book_ids_to_search:
+ add_processed_id("readarr", instance_name, str(book_id))
+ readarr_logger.debug(f"Added book ID {book_id} to processed list for {instance_name}")
+
+ # Now trigger the search
+ search_command_result = readarr_api.search_books(api_url, api_key, book_ids_to_search, api_timeout)
+
+ if search_command_result:
+ command_id = search_command_result
+ readarr_logger.info(f"Triggered upgrade search command {command_id} for {len(book_ids_to_search)} books.")
+ increment_stat("readarr", "upgraded")
+
+ # Log to history system for each book
+ for book in books_to_process:
+ author_name = book.get("authorName")
+ book_title = book.get("title")
+ media_name = f"{author_name} - {book_title}"
+ log_processed_media("readarr", media_name, book.get("id"), instance_name, "upgrade")
+ readarr_logger.debug(f"Logged quality upgrade to history for book ID {book.get('id')}")
+
+ processed_count += len(book_ids_to_search)
+ processed_something = True
+ readarr_logger.info(f"Processed {processed_count} book upgrades this cycle.")
+ else:
+ readarr_logger.error(f"Failed to trigger search for book upgrades.")
+
+ readarr_logger.info(f"Completed processing {processed_count} books for upgrade this cycle.")
+
+ return processed_something
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py
new file mode 100644
index 0000000..d729aae
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/readarr_routes.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger
+import traceback
+import socket
+from urllib.parse import urlparse
+
+readarr_bp = Blueprint('readarr', __name__)
+readarr_logger = get_logger("readarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("readarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("readarr", "processed_upgrades")
+
+@readarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Readarr API instance"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ readarr_logger.info(f"Testing connection to Readarr API at {api_url}")
+
+ # Validate URL format
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # For Readarr, use api/v1
+ url = f"{api_url}/api/v1/system/status"
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ # First check if the host is reachable at all
+ parsed_url = urlparse(api_url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ # Try to establish a socket connection first to provide a better error message for connection issues
+ try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ readarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # Now proceed with the actual API request
+ response = requests.get(url, headers=headers, timeout=10)
+
+ # For HTTP errors, provide more specific feedback
+ if response.status_code == 401:
+ error_msg = "Authentication failed: Invalid API key"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 401
+ elif response.status_code == 403:
+ error_msg = "Access forbidden: Check API key permissions"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 403
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Readarr server. Check your URL."
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ elif response.status_code >= 500:
+ error_msg = f"Readarr server error (HTTP {response.status_code}): The Readarr server is experiencing issues"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+
+ # Raise for other HTTP errors
+ response.raise_for_status()
+
+ try:
+ response_data = response.json()
+ version = response_data.get('version', 'unknown')
+ readarr_logger.info(f"Successfully connected to Readarr API version: {version}")
+
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Readarr API",
+ "version": version
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Readarr API - This doesn't appear to be a valid Readarr server"
+ readarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.ConnectionError as e:
+ # Handle different types of connection errors
+ error_details = str(e)
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Readarr is not running on {api_url} or the port is incorrect"
+ elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
+ error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ else:
+ error_msg = f"Connection error - Check if Readarr is running: {error_details}"
+
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except requests.exceptions.Timeout:
+ error_msg = f"Connection timed out - Readarr took too long to respond"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 504
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Connection test failed: {str(e)}"
+ readarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr.py
new file mode 100644
index 0000000..18a3f28
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path
+from src.primary.settings_manager import load_settings
+import logging
+from src.primary.utils.logger import get_logger
+
+sonarr_bp = Blueprint('sonarr', __name__)
+sonarr_logger = get_logger("sonarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("sonarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("sonarr", "processed_upgrades")
+
+@sonarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Sonarr API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ sonarr_logger.info(f"Testing connection to Sonarr API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Create the test URL and set headers
+ test_url = f"{api_url.rstrip('/')}/api/v3/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ # Use a connection timeout separate from read timeout
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # Log HTTP status code for diagnostic purposes
+ sonarr_logger.debug(f"Sonarr API status code: {response.status_code}")
+
+ # Check HTTP status code
+ response.raise_for_status()
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+
+ # Save keys if connection is successful - Not saving here anymore since we use instances
+ # keys_manager.save_api_keys("sonarr", api_url, api_key)
+
+ sonarr_logger.info(f"Successfully connected to Sonarr API version: {response_data.get('version', 'unknown')}")
+
+ # Return success with some useful information
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Sonarr API",
+ "version": response_data.get('version', 'unknown')
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Sonarr API"
+ sonarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.Timeout as e:
+ error_msg = f"Connection timed out after {api_timeout} seconds"
+ sonarr_logger.error(f"{error_msg}: {str(e)}")
+ return jsonify({"success": False, "message": error_msg}), 504
+
+ except requests.exceptions.ConnectionError as e:
+ error_msg = "Connection error - check hostname and port"
+ details = str(e)
+ # Check for common DNS resolution errors
+ if "Name or service not known" in details or "getaddrinfo failed" in details:
+ error_msg = "DNS resolution failed - check hostname"
+ # Check for common connection refused errors
+ elif "Connection refused" in details:
+ error_msg = "Connection refused - check if Sonarr is running and the port is correct"
+
+ sonarr_logger.error(f"{error_msg}: {details}")
+ return jsonify({"success": False, "message": f"{error_msg}: {details}"}), 502
+
+ except requests.exceptions.RequestException as e:
+ error_message = f"Connection failed: {str(e)}"
+
+ if hasattr(e, 'response') and e.response is not None:
+ status_code = e.response.status_code
+
+ # Add specific messages based on common status codes
+ if status_code == 401:
+ error_message = "Authentication failed: Invalid API key"
+ elif status_code == 403:
+ error_message = "Access forbidden: Check API key permissions"
+ elif status_code == 404:
+ error_message = "API endpoint not found: Check API URL"
+ elif status_code >= 500:
+ error_message = f"Sonarr server error (HTTP {status_code}): The Sonarr server is experiencing issues"
+
+ # Try to extract more error details if available
+ try:
+ error_details = e.response.json()
+ error_message += f" - {error_details.get('message', 'No details')}"
+ except ValueError:
+ if e.response.text:
+ error_message += f" - Response: {e.response.text[:200]}"
+
+ sonarr_logger.error(error_message)
+ return jsonify({"success": False, "message": error_message}), 500
+
+ except Exception as e:
+ error_msg = f"An unexpected error occurred: {str(e)}"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
+
+# --- Configuration and State --- #
+
+# --- Multi-Instance Support --- #
+
+# get_configured_instances function has been moved to src/primary/apps/sonarr/__init__.py
+
+# --- Reset State --- #
+
+# Function to check if Sonarr is configured
+def is_configured():
+ """Check if Sonarr API credentials are configured by checking if at least one instance is enabled"""
+ settings = load_settings("sonarr")
+
+ if not settings:
+ sonarr_logger.debug("No settings found for Sonarr")
+ return False
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ sonarr_logger.debug(f"Found configured Sonarr instance: {instance.get('name', 'Unnamed')}")
+ return True
+
+ sonarr_logger.debug("No enabled Sonarr instances found with valid API URL and key")
+ return False
+
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url")
+ api_key = settings.get("api_key")
+ return bool(api_url and api_key)
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py
new file mode 100644
index 0000000..6c6e3c7
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/__init__.py
@@ -0,0 +1,94 @@
+"""
+Sonarr module initialization
+"""
+
+# Use src.primary imports
+from src.primary.apps.sonarr.missing import process_missing_episodes
+from src.primary.apps.sonarr.upgrade import process_cutoff_upgrades
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+# Define logger for this module
+sonarr_logger = get_logger("sonarr")
+
+def get_configured_instances():
+ """Get all configured and enabled Sonarr instances"""
+ settings = load_settings("sonarr")
+ instances = []
+ # sonarr_logger.info(f"Loaded Sonarr settings for instance check: {settings}") # Removed verbose log
+
+ if not settings:
+ sonarr_logger.debug("No settings found for Sonarr")
+ return instances
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ # sonarr_logger.info(f"Found 'instances' list with {len(settings['instances'])} items. Processing...") # Removed verbose log
+ for idx, instance in enumerate(settings["instances"]):
+ sonarr_logger.debug(f"Checking instance #{idx}: {instance}")
+ # Enhanced validation
+ api_url = instance.get("api_url", "").strip()
+ api_key = instance.get("api_key", "").strip()
+
+ # Enhanced URL validation - ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ sonarr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ sonarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ is_enabled = instance.get("enabled", True)
+
+ # Only include properly configured instances
+ if is_enabled and api_url and api_key:
+ # Get the exact instance name as configured in the UI
+ instance_name = instance.get("name", "Default")
+ sonarr_logger.info(f"Using configured instance name: '{instance_name}' for Sonarr instance")
+
+ # Return only essential instance details
+ instance_data = {
+ "instance_name": instance_name,
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ # sonarr_logger.info(f"Added valid instance: {instance_data}") # Removed verbose log
+ elif not is_enabled:
+ sonarr_logger.debug(f"Skipping disabled instance: {instance.get('name', 'Unnamed')}")
+ else:
+ # For brand new installations, don't spam logs with warnings about default instances
+ instance_name = instance.get('name', 'Unnamed')
+ if instance_name == 'Default':
+ # Use debug level for default instances to avoid log spam on new installations
+ sonarr_logger.debug(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # Still log warnings for non-default instances
+ sonarr_logger.warning(f"Skipping instance '{instance_name}' due to missing API URL or key (URL: '{api_url}', Key Set: {bool(api_key)})")
+ else:
+ # sonarr_logger.info("No 'instances' list found or list is empty. Checking legacy config.") # Removed verbose log
+ # Fallback to legacy single-instance config
+ api_url = settings.get("api_url", "").strip()
+ api_key = settings.get("api_key", "").strip()
+
+ # Ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ sonarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ sonarr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ if api_url and api_key:
+ # Create a clean instance_data dict for the legacy instance
+ instance_data = {
+ "instance_name": "Default",
+ "api_url": api_url,
+ "api_key": api_key,
+ }
+ instances.append(instance_data)
+ sonarr_logger.info(f"Using legacy configuration with instance name: 'Default'")
+ else:
+ sonarr_logger.warning("No API URL or key found in legacy configuration")
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ sonarr_logger.debug(f"Found {len(instances)} configured and enabled Sonarr instances")
+ return instances
+
+__all__ = ["process_missing_episodes", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py
new file mode 100644
index 0000000..07472fd
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/api.py
@@ -0,0 +1,993 @@
+#!/usr/bin/env python3
+"""
+Sonarr-specific API functions
+Handles all communication with the Sonarr API
+"""
+
+import requests
+import json
+import sys
+import time
+import datetime
+import traceback
+from typing import List, Dict, Any, Optional, Union, Callable
+# Correct the import path
+from src.primary.utils.logger import get_logger
+
+# Get logger for the Sonarr app
+sonarr_logger = get_logger("sonarr")
+
+# Use a session for better performance
+session = requests.Session()
+
+def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any:
+ """
+ Make a request to the Sonarr API.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data payload for POST/PUT requests
+
+ Returns:
+ The parsed JSON response or None if the request failed
+ """
+ try:
+ if not api_url or not api_key:
+ sonarr_logger.error("No URL or API key provided")
+ return None
+
+ # Ensure api_url has a scheme
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ sonarr_logger.error(f"Invalid URL format: {api_url} - URL must start with http:// or https://")
+ return None
+
+ # Construct the full URL properly
+ full_url = f"{api_url.rstrip('/')}/api/v3/{endpoint.lstrip('/')}"
+
+ sonarr_logger.debug(f"Making {method} request to: {full_url}")
+
+ # Set up headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method.upper() == "GET":
+ response = session.get(full_url, headers=headers, timeout=api_timeout)
+ elif method.upper() == "POST":
+ response = session.post(full_url, headers=headers, json=data, timeout=api_timeout)
+ elif method.upper() == "PUT":
+ response = session.put(full_url, headers=headers, json=data, timeout=api_timeout)
+ elif method.upper() == "DELETE":
+ response = session.delete(full_url, headers=headers, timeout=api_timeout)
+ else:
+ sonarr_logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # Check for successful response
+ response.raise_for_status()
+
+ # Check if there's any content before trying to parse JSON
+ if response.content:
+ try:
+ return response.json()
+ except json.JSONDecodeError as jde:
+ # Log detailed information about the malformed response
+ sonarr_logger.error(f"Error decoding JSON response from {endpoint}: {str(jde)}")
+ sonarr_logger.error(f"Response status code: {response.status_code}")
+ sonarr_logger.error(f"Response content (first 200 chars): {response.content[:200]}")
+ return None
+ else:
+ sonarr_logger.debug(f"Empty response content from {endpoint}, returning empty dict")
+ return {}
+
+ except requests.exceptions.RequestException as e:
+ # Add detailed error logging
+ error_details = str(e)
+ if hasattr(e, 'response') and e.response is not None:
+ error_details += f", Status Code: {e.response.status_code}"
+ if e.response.content:
+ error_details += f", Content: {e.response.content[:200]}"
+
+ sonarr_logger.error(f"Error during {method} request to {endpoint}: {error_details}")
+ return None
+ except Exception as e:
+ # Catch all exceptions and log them with traceback
+ error_msg = f"CRITICAL ERROR in arr_request: {str(e)}"
+ sonarr_logger.error(error_msg)
+ sonarr_logger.error(f"Full traceback: {traceback.format_exc()}")
+ print(error_msg, file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ return None
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """Checks connection by fetching system status."""
+ if not api_url:
+ sonarr_logger.error("API URL is empty or not set")
+ return False
+ if not api_key:
+ sonarr_logger.error("API Key is empty or not set")
+ return False
+
+ try:
+ # Use a shorter timeout for a quick connection check
+ quick_timeout = min(api_timeout, 15)
+ status = get_system_status(api_url, api_key, quick_timeout)
+ if status and isinstance(status, dict) and 'version' in status:
+ # Log success only if debug is enabled to avoid clutter
+ sonarr_logger.debug(f"Connection check successful for {api_url}. Version: {status.get('version')}")
+ return True
+ else:
+ # Log details if the status response was unexpected
+ sonarr_logger.warning(f"Connection check for {api_url} returned unexpected status: {str(status)[:200]}")
+ return False
+ except Exception as e:
+ # Error should have been logged by arr_request, just indicate failure
+ sonarr_logger.error(f"Connection check failed for {api_url}")
+ return False
+
+def get_system_status(api_url: str, api_key: str, api_timeout: int) -> Dict:
+ """
+ Get Sonarr system status.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ System status information or empty dict if request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, "system/status")
+ if response:
+ return response
+ return {}
+
+def get_series(api_url: str, api_key: str, api_timeout: int, series_id: Optional[int] = None) -> Union[List, Dict, None]:
+ """
+ Get series information from Sonarr.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ series_id: Optional series ID to get a specific series
+
+ Returns:
+ List of all series, a specific series, or None if request failed
+ """
+ if series_id:
+ endpoint = f"series/{series_id}"
+ else:
+ endpoint = "series"
+
+ return arr_request(api_url, api_key, api_timeout, endpoint)
+
+def get_episode(api_url: str, api_key: str, api_timeout: int, episode_id: int) -> Dict:
+ """
+ Get episode information by ID.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ episode_id: The episode ID
+
+ Returns:
+ Episode information or empty dict if request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, f"episode/{episode_id}")
+ if response:
+ return response
+ return {}
+
+def get_queue(api_url: str, api_key: str, api_timeout: int) -> List:
+ """
+ Get the current queue from Sonarr.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ Queue information or empty list if request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, "queue")
+ if not response or "records" not in response:
+ return []
+
+ return response.get("records", [])
+
+def get_calendar(api_url: str, api_key: str, api_timeout: int, start_date: Optional[str] = None, end_date: Optional[str] = None) -> List:
+ """
+ Get calendar information for a date range.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ start_date: Optional start date (ISO format)
+ end_date: Optional end date (ISO format)
+
+ Returns:
+ Calendar information or empty list if request failed
+ """
+ params = []
+
+ if start_date:
+ params.append(f"start={start_date}")
+
+ if end_date:
+ params.append(f"end={end_date}")
+
+ endpoint = "calendar"
+ if params:
+ endpoint = f"{endpoint}?{'&'.join(params)}"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+ if response:
+ return response
+ return []
+
+def command_status(api_url: str, api_key: str, api_timeout: int, command_id: Union[int, str]) -> Dict:
+ """
+ Get the status of a command by ID.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ command_id: The command ID
+
+ Returns:
+ Command status information or empty dict if request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, f"command/{command_id}")
+ if response:
+ return response
+ return {}
+
+def get_missing_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, series_id: Optional[int] = None) -> List[Dict[str, Any]]:
+ """Get missing episodes from Sonarr, handling pagination."""
+ endpoint = "wanted/missing"
+ page = 1
+ page_size = 1000 # Adjust page size if needed, but 1000 is usually good
+ all_missing_episodes = []
+ retries_per_page = 2
+ retry_delay = 3
+
+ while True:
+ retry_count = 0
+ success = False
+
+ while retry_count <= retries_per_page and not success:
+ # Parameters for the request
+ params = {
+ "page": page,
+ "pageSize": page_size,
+ "includeSeries": "true"
+ }
+
+ # Add series ID filter if provided
+ if series_id is not None:
+ params["seriesId"] = series_id
+
+ # Ensure proper URL construction with scheme
+ base_url = api_url.rstrip('/')
+ url = f"{base_url}/api/v3/{endpoint.lstrip('/')}"
+ sonarr_logger.debug(f"Requesting missing episodes page {page} (attempt {retry_count+1}/{retries_per_page+1})")
+
+ try:
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ response.raise_for_status() # Check for HTTP errors (4xx or 5xx)
+
+ if not response.content:
+ sonarr_logger.warning(f"Empty response for missing episodes page {page} (attempt {retry_count+1})")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up on empty response after {retries_per_page+1} attempts")
+ break # Exit the retry loop, continuing to next page or ending
+
+ try:
+ data = response.json()
+ records = data.get('records', [])
+ total_records_on_page = len(records)
+ sonarr_logger.debug(f"Parsed {total_records_on_page} missing episode records from page {page}")
+
+ if not records: # No more records found
+ sonarr_logger.debug(f"No more records found on page {page}. Stopping pagination.")
+ success = True # Mark as successful even though no records (might be legitimate)
+ break # Exit retry loop, then also exit pagination loop
+
+ all_missing_episodes.extend(records)
+
+ # Check if this was the last page
+ if total_records_on_page < page_size:
+ sonarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Last page.")
+ success = True
+ break # Exit retry loop, then also exit pagination loop
+
+ # We got records and need to continue - mark success for this page
+ success = True
+ break # Exit retry loop, continue to next page
+
+ except json.JSONDecodeError as e:
+ sonarr_logger.error(f"Failed to decode JSON response for missing episodes page {page} (attempt {retry_count+1}): {e}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up after {retries_per_page+1} failed JSON decode attempts")
+ break # Exit retry loop, moving to next page or ending
+
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Request error for missing episodes page {page} (attempt {retry_count+1}): {e}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up on request after {retries_per_page+1} failed attempts")
+ break # Exit retry loop
+ except Exception as e:
+ sonarr_logger.error(f"Unexpected error for missing episodes page {page} (attempt {retry_count+1}): {e}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up after unexpected error and {retries_per_page+1} attempts")
+ break # Exit retry loop
+
+ # If we didn't succeed after all retries or there are no more records, stop pagination
+ if not success or not records:
+ break
+
+ # Prepare for the next page
+ page += 1
+
+ sonarr_logger.info(f"Total missing episodes fetched across all pages: {len(all_missing_episodes)}")
+
+ # Apply monitored filter after fetching all pages
+ if monitored_only:
+ original_count = len(all_missing_episodes)
+ filtered_missing = [
+ ep for ep in all_missing_episodes
+ if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False)
+ ]
+ sonarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_missing)} monitored episodes (out of {original_count} total)")
+ return filtered_missing
+ else:
+ sonarr_logger.debug(f"Returning {len(all_missing_episodes)} episodes (monitored_only=False)")
+ return all_missing_episodes
+
+def get_cutoff_unmet_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """Get cutoff unmet episodes from Sonarr, handling pagination."""
+ endpoint = "wanted/cutoff"
+ page = 1
+ page_size = 1000 # Sonarr's max page size for this endpoint
+ all_cutoff_unmet = []
+ retries_per_page = 2
+ retry_delay = 3
+
+ sonarr_logger.debug(f"Starting fetch for cutoff unmet episodes (monitored_only={monitored_only}).")
+
+ while True:
+ retry_count = 0
+ success = False
+ records = []
+
+ while retry_count <= retries_per_page and not success:
+ # Parameters for the request
+ params = {
+ "page": page,
+ "pageSize": page_size,
+ "includeSeries": "true", # Include series info for filtering
+ "sortKey": "airDateUtc",
+ "sortDir": "asc"
+ }
+ url = f"{api_url}/api/v3/{endpoint}"
+ sonarr_logger.debug(f"Requesting cutoff unmet page {page} (attempt {retry_count+1}/{retries_per_page+1})")
+
+ try:
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ sonarr_logger.debug(f"Sonarr API response status code for cutoff unmet page {page}: {response.status_code}")
+ response.raise_for_status() # Check for HTTP errors
+
+ if not response.content:
+ sonarr_logger.warning(f"Empty response for cutoff unmet episodes page {page} (attempt {retry_count+1})")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up on empty response after {retries_per_page+1} attempts")
+ break
+
+ try:
+ data = response.json()
+ records = data.get('records', [])
+ total_records_on_page = len(records)
+ total_records_reported = data.get('totalRecords', 0)
+
+ if page == 1:
+ sonarr_logger.info(f"Sonarr API reports {total_records_reported} total cutoff unmet records.")
+
+ sonarr_logger.debug(f"Parsed {total_records_on_page} cutoff unmet records from page {page}")
+
+ if not records: # No more records found
+ sonarr_logger.debug(f"No more cutoff unmet records found on page {page}. Stopping pagination.")
+ success = True
+ break
+
+ all_cutoff_unmet.extend(records)
+
+ # Check if this was the last page
+ if total_records_on_page < page_size:
+ sonarr_logger.debug(f"Received {total_records_on_page} records (less than page size {page_size}). Last page.")
+ success = True
+ break
+
+ # Success for this page
+ success = True
+ break
+
+ except json.JSONDecodeError as e:
+ sonarr_logger.error(f"Failed to decode JSON for cutoff unmet page {page} (attempt {retry_count+1}): {e}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up after {retries_per_page+1} failed JSON decode attempts")
+ break
+
+ except requests.exceptions.Timeout as e:
+ sonarr_logger.error(f"Timeout for cutoff unmet page {page} (attempt {retry_count+1}): {e}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ # Use a slightly longer retry delay for timeouts
+ time.sleep(retry_delay * 2)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up after {retries_per_page+1} timeout failures")
+ break
+
+ except requests.exceptions.RequestException as e:
+ error_details = f"Error: {e}"
+ if hasattr(e, 'response') and e.response is not None:
+ error_details += f", Status Code: {e.response.status_code}"
+ if hasattr(e.response, 'text') and e.response.text:
+ error_details += f", Response: {e.response.text[:500]}"
+
+ sonarr_logger.error(f"Request error for cutoff unmet page {page} (attempt {retry_count+1}): {error_details}")
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up on request after {retries_per_page+1} failed attempts")
+ break
+
+ except Exception as e:
+ sonarr_logger.error(f"Unexpected error for cutoff unmet page {page} (attempt {retry_count+1}): {e}", exc_info=True)
+ if retry_count < retries_per_page:
+ retry_count += 1
+ time.sleep(retry_delay)
+ continue
+ else:
+ sonarr_logger.error(f"Giving up after unexpected error and {retries_per_page+1} attempts")
+ break
+
+ # If we didn't succeed after all retries or there are no more records, stop pagination
+ if not success or not records:
+ break
+
+ # Prepare for the next page
+ page += 1
+
+ sonarr_logger.info(f"Total cutoff unmet episodes fetched across all pages: {len(all_cutoff_unmet)}")
+
+ # Apply monitored filter after fetching all pages
+ if monitored_only:
+ original_count = len(all_cutoff_unmet)
+ # Ensure series and episode are monitored
+ filtered_cutoff_unmet = [
+ ep for ep in all_cutoff_unmet
+ if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False)
+ ]
+ sonarr_logger.debug(f"Filtered for monitored_only=True: {len(filtered_cutoff_unmet)} monitored cutoff unmet episodes remain (out of {original_count} total).")
+ return filtered_cutoff_unmet
+ else:
+ sonarr_logger.debug(f"Returning {len(all_cutoff_unmet)} cutoff unmet episodes (monitored_only=False).")
+ return all_cutoff_unmet
+
+def get_cutoff_unmet_episodes_random_page(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, count: int) -> List[Dict[str, Any]]:
+ """
+ Get a specified number of random cutoff unmet episodes by selecting a random page.
+ This is much more efficient for very large libraries.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: Whether to include only monitored episodes
+ count: How many episodes to return
+
+ Returns:
+ A list of randomly selected cutoff unmet episodes
+ """
+ endpoint = "wanted/cutoff"
+ page_size = 100 # Smaller page size to make the initial query faster
+
+ # First, make a request to get just the total record count (page 1 with size=1)
+ params = {
+ "page": 1,
+ "pageSize": 1,
+ "includeSeries": "true" # Include series info for filtering
+ }
+ url = f"{api_url}/api/v3/{endpoint}"
+
+ try:
+ # Get total record count from a minimal query
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ response.raise_for_status()
+ data = response.json()
+ total_records = data.get('totalRecords', 0)
+
+ if total_records == 0:
+ sonarr_logger.info("No cutoff unmet episodes found in Sonarr.")
+ return []
+
+ # Calculate total pages with our desired page size
+ total_pages = (total_records + page_size - 1) // page_size
+ sonarr_logger.info(f"Found {total_records} total cutoff unmet episodes across {total_pages} pages")
+
+ if total_pages == 0:
+ return []
+
+ # Select a random page
+ import random
+ random_page = random.randint(1, total_pages)
+ sonarr_logger.info(f"Selected random page {random_page} of {total_pages} for quality upgrade selection")
+
+ # Get episodes from the random page
+ params = {
+ "page": random_page,
+ "pageSize": page_size,
+ "includeSeries": "true"
+ }
+
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ response.raise_for_status()
+
+ data = response.json()
+ records = data.get('records', [])
+ sonarr_logger.info(f"Retrieved {len(records)} episodes from page {random_page}")
+
+ # Apply monitored filter if requested
+ if monitored_only:
+ filtered_records = [
+ ep for ep in records
+ if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False)
+ ]
+ sonarr_logger.debug(f"Filtered to {len(filtered_records)} monitored episodes")
+ records = filtered_records
+
+ # Select random episodes from this page
+ if len(records) > count:
+ selected_records = random.sample(records, count)
+ sonarr_logger.debug(f"Randomly selected {len(selected_records)} episodes from page {random_page}")
+ return selected_records
+ else:
+ # If we have fewer episodes than requested, return all of them
+ sonarr_logger.debug(f"Returning all {len(records)} episodes from page {random_page} (fewer than requested {count})")
+ return records
+
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error getting random cutoff unmet episodes from Sonarr: {str(e)}")
+ return []
+ except json.JSONDecodeError as e:
+ sonarr_logger.error(f"Failed to decode JSON response for random cutoff selection: {str(e)}")
+ return []
+ except Exception as e:
+ sonarr_logger.error(f"Unexpected error in random cutoff selection: {str(e)}", exc_info=True)
+ return []
+
+def get_missing_episodes_random_page(api_url: str, api_key: str, api_timeout: int, monitored_only: bool, count: int, series_id: Optional[int] = None) -> List[Dict[str, Any]]:
+ """
+ Get a specified number of random missing episodes by selecting a random page.
+ This is more efficient for very large libraries.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: Whether to include only monitored episodes
+ count: How many episodes to return
+ series_id: Optional series ID to filter results for a specific series
+
+ Returns:
+ A list of randomly selected missing episodes, up to the requested count
+ """
+ endpoint = "wanted/missing"
+ page_size = 100 # Smaller page size for better performance
+ retries = 2
+ retry_delay = 3
+
+ # First, make a request to get just the total record count (page 1 with size=1)
+ params = {
+ "page": 1,
+ "pageSize": 1,
+ "includeSeries": "true" # Include series info for filtering
+ }
+ url = f"{api_url}/api/v3/{endpoint}"
+
+ for attempt in range(retries + 1):
+ try:
+ # Get total record count from a minimal query
+ sonarr_logger.debug(f"Getting missing episodes count (attempt {attempt+1}/{retries+1})")
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ response.raise_for_status()
+
+ if not response.content:
+ sonarr_logger.warning(f"Empty response when getting missing count (attempt {attempt+1})")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return []
+
+ try:
+ data = response.json()
+ total_records = data.get('totalRecords', 0)
+
+ if total_records == 0:
+ sonarr_logger.info("No missing episodes found in Sonarr.")
+ return []
+
+ # Calculate total pages with our desired page size
+ total_pages = (total_records + page_size - 1) // page_size
+ sonarr_logger.info(f"Found {total_records} total missing episodes across {total_pages} pages")
+
+ if total_pages == 0:
+ return []
+
+ # Select a random page
+ import random
+ random_page = random.randint(1, total_pages)
+ sonarr_logger.info(f"Selected random page {random_page} of {total_pages} for missing episodes")
+
+ # Get episodes from the random page
+ params = {
+ "page": random_page,
+ "pageSize": page_size,
+ "includeSeries": "true"
+ }
+
+ if series_id is not None:
+ params["seriesId"] = series_id
+
+ response = requests.get(url, headers={"X-Api-Key": api_key}, params=params, timeout=api_timeout)
+ response.raise_for_status()
+
+ if not response.content:
+ sonarr_logger.warning(f"Empty response when getting missing episodes page {random_page}")
+ return []
+
+ try:
+ data = response.json()
+ records = data.get('records', [])
+ sonarr_logger.info(f"Retrieved {len(records)} missing episodes from page {random_page}")
+
+ # Apply monitored filter if requested
+ if monitored_only:
+ filtered_records = [
+ ep for ep in records
+ if ep.get('series', {}).get('monitored', False) and ep.get('monitored', False)
+ ]
+ sonarr_logger.debug(f"Filtered to {len(filtered_records)} monitored missing episodes")
+ records = filtered_records
+
+ # Select random episodes from this page
+ if len(records) > count:
+ selected_records = random.sample(records, count)
+ sonarr_logger.debug(f"Randomly selected {len(selected_records)} missing episodes from page {random_page}")
+ return selected_records
+ else:
+ # If we have fewer episodes than requested, return all of them
+ sonarr_logger.debug(f"Returning all {len(records)} missing episodes from page {random_page} (fewer than requested {count})")
+ return records
+
+ except json.JSONDecodeError as jde:
+ sonarr_logger.error(f"Failed to decode JSON response for missing episodes page {random_page}: {str(jde)}")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return []
+
+ except json.JSONDecodeError as jde:
+ sonarr_logger.error(f"Failed to decode JSON response for missing episodes count: {str(jde)}")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return []
+
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error getting missing episodes from Sonarr (attempt {attempt+1}): {str(e)}")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return []
+
+ except Exception as e:
+ sonarr_logger.error(f"Unexpected error getting missing episodes (attempt {attempt+1}): {str(e)}", exc_info=True)
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return []
+
+ # If we get here, all retries failed
+ sonarr_logger.error("All attempts to get missing episodes failed")
+ return []
+
+def search_episode(api_url: str, api_key: str, api_timeout: int, episode_ids: List[int]) -> Optional[Union[int, str]]:
+ """Trigger a search for specific episodes in Sonarr."""
+ if not episode_ids:
+ sonarr_logger.warning("No episode IDs provided for search.")
+ return None
+ try:
+ endpoint = f"{api_url}/api/v3/command"
+ payload = {
+ "name": "EpisodeSearch",
+ "episodeIds": episode_ids
+ }
+ response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout)
+ response.raise_for_status()
+ command_id = response.json().get('id')
+ sonarr_logger.info(f"Triggered Sonarr search for episode IDs: {episode_ids}. Command ID: {command_id}")
+ return command_id
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error triggering Sonarr search for episode IDs {episode_ids}: {e}")
+ return None
+ except Exception as e:
+ sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr search: {e}")
+ return None
+
+def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: Union[int, str]) -> Optional[Dict[str, Any]]:
+ """Get the status of a Sonarr command."""
+ try:
+ endpoint = f"{api_url}/api/v3/command/{command_id}"
+ response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status()
+ status = response.json()
+ sonarr_logger.debug(f"Checked Sonarr command status for ID {command_id}: {status.get('status')}")
+ return status
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error getting Sonarr command status for ID {command_id}: {e}")
+ return None
+ except Exception as e:
+ sonarr_logger.error(f"An unexpected error occurred while getting Sonarr command status: {e}")
+ return None
+
+def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int:
+ """Get the current size of the Sonarr download queue."""
+ retries = 2 # Number of retry attempts
+ retry_delay = 3 # Delay between retries in seconds
+
+ for attempt in range(retries + 1):
+ try:
+ endpoint = f"{api_url}/api/v3/queue?page=1&pageSize=1" # Just get total count, don't need records
+ response = requests.get(endpoint, headers={"X-Api-Key": api_key}, params={"includeSeries": "false"}, timeout=api_timeout)
+ response.raise_for_status()
+
+ if not response.content:
+ sonarr_logger.warning(f"Empty response when getting queue size (attempt {attempt+1}/{retries+1})")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return -1
+
+ try:
+ queue_data = response.json()
+ queue_size = queue_data.get('totalRecords', 0)
+ sonarr_logger.debug(f"Sonarr download queue size: {queue_size}")
+ return queue_size
+ except json.JSONDecodeError as jde:
+ sonarr_logger.error(f"Failed to decode queue JSON (attempt {attempt+1}/{retries+1}): {jde}")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return -1
+
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error getting Sonarr download queue size (attempt {attempt+1}/{retries+1}): {e}")
+ if attempt < retries:
+ sonarr_logger.info(f"Retrying in {retry_delay} seconds...")
+ time.sleep(retry_delay)
+ continue
+ return -1 # Return -1 to indicate an error
+ except Exception as e:
+ sonarr_logger.error(f"Unexpected error getting queue size (attempt {attempt+1}/{retries+1}): {e}")
+ if attempt < retries:
+ time.sleep(retry_delay)
+ continue
+ return -1
+
+ # If we get here, all retries failed
+ sonarr_logger.error(f"All {retries+1} attempts to get download queue size failed")
+ return -1
+
+def refresh_series(api_url: str, api_key: str, api_timeout: int, series_id: int) -> Optional[Union[int, str]]:
+ """Trigger a refresh for a specific series in Sonarr."""
+ try:
+ endpoint = f"{api_url}/api/v3/command"
+ payload = {
+ "name": "RefreshSeries",
+ "seriesId": series_id
+ }
+ response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout)
+ response.raise_for_status()
+ command_id = response.json().get('id')
+ sonarr_logger.info(f"Triggered Sonarr refresh for series ID: {series_id}. Command ID: {command_id}")
+ return command_id
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error triggering Sonarr refresh for series ID {series_id}: {e}")
+ return None
+ except Exception as e:
+ sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr series refresh: {e}")
+ return None
+
+def get_series_by_id(api_url: str, api_key: str, api_timeout: int, series_id: int) -> Optional[Dict[str, Any]]:
+ """Get series details by ID from Sonarr."""
+ try:
+ endpoint = f"{api_url}/api/v3/series/{series_id}"
+ response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status()
+ series_data = response.json()
+ sonarr_logger.debug(f"Fetched details for Sonarr series ID: {series_id}")
+ return series_data
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error getting Sonarr series details for ID {series_id}: {e}")
+ return None
+ except Exception as e:
+ sonarr_logger.error(f"An unexpected error occurred while getting Sonarr series details: {e}")
+ return None
+
+def search_season(api_url: str, api_key: str, api_timeout: int, series_id: int, season_number: int) -> Optional[Union[int, str]]:
+ """Trigger a search for a specific season in Sonarr."""
+ try:
+ endpoint = f"{api_url}/api/v3/command"
+ payload = {
+ "name": "SeasonSearch",
+ "seriesId": series_id,
+ "seasonNumber": season_number
+ }
+ response = requests.post(endpoint, headers={"X-Api-Key": api_key}, json=payload, timeout=api_timeout)
+ response.raise_for_status()
+ command_id = response.json().get('id')
+ sonarr_logger.info(f"Triggered Sonarr season search for series ID: {series_id}, season: {season_number}. Command ID: {command_id}")
+ return command_id
+ except requests.exceptions.RequestException as e:
+ sonarr_logger.error(f"Error triggering Sonarr season search for series ID {series_id}, season {season_number}: {e}")
+ return None
+ except Exception as e:
+ sonarr_logger.error(f"An unexpected error occurred while triggering Sonarr season search: {e}")
+ return None
+
+def get_series_with_missing_episodes(api_url: str, api_key: str, api_timeout: int, monitored_only: bool = True, limit: int = 50, random_mode: bool = True) -> List[Dict[str, Any]]:
+ """
+ Get a list of series that have missing episodes, along with missing episode counts per season.
+ This is much more efficient than fetching all missing episodes for large libraries.
+
+ Args:
+ api_url: The base URL of the Sonarr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: Whether to only include monitored series
+ limit: Maximum number of series to return
+ random_mode: Whether to randomly select series
+
+ Returns:
+ A list of series with missing episodes and counts per season
+ """
+ result = []
+
+ # Step 1: Get all series
+ all_series = get_series(api_url, api_key, api_timeout)
+ if not all_series:
+ sonarr_logger.error("Failed to retrieve series list")
+ return []
+
+ # Step 2: Filter to monitored series if requested
+ if monitored_only:
+ filtered_series = [s for s in all_series if s.get('monitored', False)]
+ sonarr_logger.info(f"Filtered from {len(all_series)} total series to {len(filtered_series)} monitored series")
+ else:
+ filtered_series = all_series
+
+ # Apply random selection if requested
+ if random_mode:
+ import random
+ sonarr_logger.info(f"Using RANDOM selection mode for missing episodes")
+ random.shuffle(filtered_series)
+ else:
+ sonarr_logger.info(f"Using SEQUENTIAL selection mode for missing episodes")
+
+ # Step 3: For each series, check if it has missing episodes using series/id/episodes endpoint
+ # This is much more efficient than using the wanted/missing endpoint
+ series_with_missing = []
+ examined_count = 0
+
+ for series in filtered_series[:limit]:
+ examined_count += 1
+ series_id = series.get('id')
+ series_title = series.get('title', 'Unknown')
+
+ if not series_id:
+ continue
+
+ # Get all episodes for this series
+ try:
+ endpoint = f"{api_url}/api/v3/episode?seriesId={series_id}"
+ response = requests.get(endpoint, headers={"X-Api-Key": api_key}, timeout=api_timeout)
+ response.raise_for_status()
+
+ if not response.content:
+ continue
+
+ episodes = response.json()
+
+ # Filter to missing episodes
+ missing_episodes = [
+ e for e in episodes
+ if e.get('hasFile') is False and
+ (not monitored_only or e.get('monitored', False))
+ ]
+
+ if not missing_episodes:
+ continue
+
+ # Group by season
+ seasons_dict = {}
+ for episode in missing_episodes:
+ season_number = episode.get('seasonNumber')
+ if season_number is not None:
+ if season_number not in seasons_dict:
+ seasons_dict[season_number] = []
+ seasons_dict[season_number].append(episode)
+
+ # If we have any seasons with missing episodes, add this series to our result
+ if seasons_dict:
+ missing_info = {
+ 'series_id': series_id,
+ 'series_title': series_title,
+ 'seasons': [
+ {
+ 'season_number': season,
+ 'episode_count': len(episodes),
+ 'episodes': episodes
+ }
+ for season, episodes in seasons_dict.items()
+ ]
+ }
+ series_with_missing.append(missing_info)
+
+ sonarr_logger.debug(f"Found series {series_title} with {len(missing_episodes)} missing episodes across {len(seasons_dict)} seasons")
+
+ except Exception as e:
+ sonarr_logger.error(f"Error checking missing episodes for series {series_title} (ID: {series_id}): {str(e)}")
+ continue
+
+ selection_mode = "RANDOM" if random_mode else "SEQUENTIAL"
+ sonarr_logger.info(f"Examined {examined_count} series ({selection_mode} mode) and found {len(series_with_missing)} with missing episodes")
+ return series_with_missing
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py
new file mode 100644
index 0000000..f4f26d0
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/missing.py
@@ -0,0 +1,612 @@
+#!/usr/bin/env python3
+"""
+Sonarr missing episodes processing module for Huntarr
+"""
+
+import time
+import random
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.sonarr import api as sonarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import load_settings, get_advanced_setting
+
+# Get logger for the Sonarr app
+sonarr_logger = get_logger("sonarr")
+
+def process_missing_episodes(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int = get_advanced_setting("api_timeout", 120),
+ monitored_only: bool = True,
+ skip_future_episodes: bool = True,
+ skip_series_refresh: bool = False,
+ hunt_missing_items: int = 5,
+ hunt_missing_mode: str = "episodes",
+ command_wait_delay: int = get_advanced_setting("command_wait_delay", 1),
+ command_wait_attempts: int = get_advanced_setting("command_wait_attempts", 600),
+ stop_check: Callable[[], bool] = lambda: False
+) -> bool:
+ """
+ Process missing episodes in Sonarr and trigger searches
+ Added support for multiple missing modes (episodes, seasons, shows)
+ """
+ if hunt_missing_items <= 0:
+ sonarr_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing processing.")
+ return False
+
+ sonarr_logger.info(f"Checking for {hunt_missing_items} missing episodes in {hunt_missing_mode} mode...")
+
+ # Handle different modes
+ if hunt_missing_mode == "episodes":
+ # Handle episode-based missing items
+ sonarr_logger.info("Episode-based missing mode selected")
+ return process_missing_episodes_mode(
+ api_url, api_key, instance_name, api_timeout, monitored_only,
+ skip_future_episodes, skip_series_refresh,
+ hunt_missing_items, command_wait_delay, command_wait_attempts,
+ stop_check
+ )
+ elif hunt_missing_mode == "seasons_packs":
+ # Handle season pack searches (using SeasonSearch command)
+ sonarr_logger.info("Season [Packs] mode selected - searching for complete season packs")
+ return process_missing_seasons_packs_mode(
+ api_url, api_key, instance_name, api_timeout, monitored_only,
+ skip_series_refresh, hunt_missing_items,
+ command_wait_delay, command_wait_attempts, stop_check
+ )
+ elif hunt_missing_mode == "shows":
+ # Handle show-based missing items (all episodes from a show)
+ sonarr_logger.info("Show-based missing mode selected")
+ return process_missing_shows_mode(
+ api_url, api_key, instance_name, api_timeout, monitored_only,
+ skip_future_episodes, skip_series_refresh, hunt_missing_items,
+ command_wait_delay, command_wait_attempts, stop_check
+ )
+ else:
+ sonarr_logger.error(f"Invalid hunt_missing_mode: {hunt_missing_mode}. Valid options are 'episodes', 'seasons_packs', or 'shows'.")
+ return False
+
+def process_missing_episodes_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_future_episodes: bool,
+ skip_series_refresh: bool,
+ hunt_missing_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """Process missing episodes in episode mode (original implementation)."""
+ processed_any = False
+
+ # Always use random selection for missing episodes
+ sonarr_logger.info(f"Using random selection for missing episodes")
+ episodes_to_search = sonarr_api.get_missing_episodes_random_page(
+ api_url, api_key, api_timeout, monitored_only, hunt_missing_items)
+
+ if stop_check():
+ sonarr_logger.info("Stop requested during missing episode processing.")
+ return processed_any
+
+ # Filter out future episodes for random selection approach
+ if skip_future_episodes:
+ now_unix = time.time()
+ original_count = len(episodes_to_search)
+ episodes_to_search = [
+ ep for ep in episodes_to_search
+ if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix
+ ]
+ skipped_count = original_count - len(episodes_to_search)
+ if skipped_count > 0:
+ sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date.")
+
+ # Filter out already processed episodes for random selection approach
+ unprocessed_episodes = []
+ for episode in episodes_to_search:
+ episode_id = str(episode.get("id"))
+ if not is_processed("sonarr", instance_name, episode_id):
+ unprocessed_episodes.append(episode)
+ else:
+ sonarr_logger.debug(f"Skipping already processed episode ID: {episode_id}")
+
+ sonarr_logger.info(f"Found {len(unprocessed_episodes)} unprocessed missing episodes out of {len(episodes_to_search)} total.")
+ episodes_to_search = unprocessed_episodes
+
+ if not episodes_to_search:
+ sonarr_logger.info("No missing episodes left to process after filtering.")
+ return False
+
+ sonarr_logger.info(f"Selected {len(episodes_to_search)} missing episodes to search.")
+
+ # Add detailed listing of episodes being processed
+ if episodes_to_search:
+ sonarr_logger.info(f"Episodes selected for processing in this cycle:")
+ for idx, episode in enumerate(episodes_to_search):
+ series_title = episode.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode.get('title', 'Unknown Episode')
+ season_number = episode.get('seasonNumber', 'Unknown Season')
+ episode_number = episode.get('episodeNumber', 'Unknown Episode')
+
+ episode_id = episode.get("id")
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ sonarr_logger.info(f" {idx+1}. {series_title} - {season_episode} - \"{episode_title}\" (ID: {episode_id})")
+
+ # Group episodes by series for potential refresh
+ series_to_refresh: Dict[int, List[int]] = {}
+ series_titles: Dict[int, str] = {} # Store titles for logging
+ for episode in episodes_to_search:
+ series_id = episode.get('seriesId')
+ if series_id:
+ if series_id not in series_to_refresh:
+ series_to_refresh[series_id] = []
+ # Store title when first encountering the series ID
+ series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}")
+ series_to_refresh[series_id].append(episode['id'])
+
+ # Process each series
+ for series_id, episode_ids in series_to_refresh.items():
+ if stop_check(): sonarr_logger.info("Stop requested before processing next series."); break
+ series_title = series_titles.get(series_id, f"Series ID {series_id}")
+ sonarr_logger.info(f"Processing series: {series_title} (ID: {series_id}) with {len(episode_ids)} missing episodes.")
+
+ # Refresh series metadata if not skipped
+ refresh_command_id = None
+ if not skip_series_refresh:
+ sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id)
+ if refresh_command_id:
+ # Wait for refresh command to complete
+ if not wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh", stop_check
+ ):
+ sonarr_logger.warning(f"Series refresh command (ID: {refresh_command_id}) for series {series_id} did not complete successfully or timed out. Proceeding with search anyway.")
+ else:
+ sonarr_logger.warning(f"Failed to trigger refresh command for series ID: {series_id}. Proceeding without refresh.")
+ else:
+ sonarr_logger.debug(f"Skipping series refresh for series ID: {series_id} as configured.")
+
+ if stop_check(): sonarr_logger.info("Stop requested after series refresh attempt."); break
+
+ # Trigger search for the selected episodes in this series
+ sonarr_logger.debug(f"Attempting to search for episode IDs: {episode_ids}")
+ search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids)
+
+ if search_command_id:
+ # Add episode IDs to stateful manager IMMEDIATELY after processing each batch
+ for episode_id in episode_ids:
+ # Force flush to disk by calling add_processed_id immediately for each ID
+ success = add_processed_id("sonarr", instance_name, str(episode_id))
+ sonarr_logger.debug(f"Added processed ID: {episode_id}, success: {success}")
+
+ # Wait for search command to complete
+ if wait_for_command(
+ api_url, api_key, api_timeout, search_command_id,
+ command_wait_delay, command_wait_attempts, "Episode Search", stop_check
+ ):
+ # Mark episodes as processed if search command completed successfully
+ processed_any = True # Mark that we did something
+ sonarr_logger.info(f"Successfully processed and searched for {len(episode_ids)} episodes in series {series_id}.")
+
+ # Add stats incrementing right here - this is the code path that's actually being executed
+ for episode_id in episode_ids:
+ # Increment stat for each episode individually, just like Radarr
+ increment_stat("sonarr", "hunted")
+ sonarr_logger.info(f"*** STATS INCREMENT *** sonarr hunted by 1 for episode ID {episode_id}")
+
+ # Log to history system
+ # Find the corresponding episode data for this ID
+ for episode in episodes_to_search:
+ if episode.get('id') == episode_id:
+ series_title = episode.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode.get('title', 'Unknown Episode')
+ season_number = episode.get('seasonNumber', 'Unknown Season')
+ episode_number = episode.get('episodeNumber', 'Unknown Episode')
+
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ media_name = f"{series_title} - {season_episode} - {episode_title}"
+ process_id = f"{series_id}_{episode_id}"
+ add_processed_id("sonarr", instance_name, process_id)
+ log_processed_media("sonarr", media_name, episode_id, instance_name, "missing")
+
+ # Increment the stat for each episode individually (like Radarr does for movies)
+ increment_stat("sonarr", "hunted")
+ sonarr_logger.debug(f"Incremented sonarr hunted statistic for episode {episode_id}")
+ break
+
+ # The batch increment was causing issues - removing it
+ # increment_stat("sonarr", "hunted", len(episode_ids))
+ # sonarr_logger.debug(f"Incremented sonarr hunted statistics by {len(episode_ids)}")
+ else:
+ sonarr_logger.warning(f"Episode search command (ID: {search_command_id}) for series {series_id} did not complete successfully or timed out. Episodes will not be marked as processed yet.")
+ else:
+ sonarr_logger.error(f"Failed to trigger search command for episodes {episode_ids} in series {series_id}.")
+
+ sonarr_logger.info("Finished missing episodes processing cycle for Sonarr.")
+ return processed_any
+
+def process_missing_seasons_packs_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_series_refresh: bool,
+ hunt_missing_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """
+ Process missing seasons using the SeasonSearch command
+ This mode is optimized for torrent users who rely on season packs
+ Uses a direct episode lookup approach which is much more efficient
+ """
+ processed_any = False
+
+ # Get all missing episodes in one call instead of per-series
+ missing_episodes = sonarr_api.get_missing_episodes(api_url, api_key, api_timeout, monitored_only)
+ if not missing_episodes:
+ sonarr_logger.info("No missing episodes found")
+ return False
+
+ # Group episodes by series and season
+ missing_seasons = {}
+ for episode in missing_episodes:
+ if monitored_only and not episode.get('monitored', False):
+ continue
+
+ series_id = episode.get('seriesId')
+ if not series_id:
+ continue
+
+ season_number = episode.get('seasonNumber')
+ series_title = episode.get('series', {}).get('title', 'Unknown Series')
+
+ key = f"{series_id}:{season_number}"
+ if key not in missing_seasons:
+ missing_seasons[key] = {
+ 'series_id': series_id,
+ 'season_number': season_number,
+ 'series_title': series_title,
+ 'episode_count': 0
+ }
+ missing_seasons[key]['episode_count'] += 1
+
+ # Convert to list and sort by episode count (most missing episodes first)
+ seasons_list = list(missing_seasons.values())
+ seasons_list.sort(key=lambda x: x['episode_count'], reverse=True)
+
+ # Filter out already processed seasons
+ unprocessed_seasons = []
+ for season in seasons_list:
+ season_id = f"{season['series_id']}_{season['season_number']}"
+ if not is_processed("sonarr", instance_name, season_id):
+ unprocessed_seasons.append(season)
+ else:
+ sonarr_logger.debug(f"Skipping already processed season ID: {season_id}")
+
+ sonarr_logger.info(f"Found {len(unprocessed_seasons)} unprocessed seasons with missing episodes out of {len(seasons_list)} total.")
+
+ if not unprocessed_seasons:
+ sonarr_logger.info("All seasons with missing episodes have been processed.")
+ return False
+
+ # Apply randomization if requested
+ random.shuffle(unprocessed_seasons)
+
+ # Process up to hunt_missing_items seasons
+ processed_count = 0
+
+ # Add detailed logging for selected seasons
+ if unprocessed_seasons and hunt_missing_items > 0:
+ seasons_to_process = unprocessed_seasons[:hunt_missing_items]
+ sonarr_logger.info(f"Randomly selected {min(len(unprocessed_seasons), hunt_missing_items)} seasons with missing episodes:")
+
+ for idx, season in enumerate(seasons_to_process):
+ sonarr_logger.info(f" {idx+1}. {season['series_title']} - Season {season['season_number']} ({season['episode_count']} missing episodes) (Series ID: {season['series_id']})")
+
+ for season in unprocessed_seasons:
+ if processed_count >= hunt_missing_items:
+ break
+
+ if stop_check():
+ sonarr_logger.info("Stop signal received, halting processing.")
+ break
+
+ series_id = season['series_id']
+ season_number = season['season_number']
+ series_title = season['series_title']
+ episode_count = season['episode_count']
+
+ # Refresh series metadata if not skipped
+ if not skip_series_refresh:
+ sonarr_logger.debug(f"Refreshing metadata for {series_title} before season pack search")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id)
+ if refresh_command_id:
+ wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh", stop_check
+ )
+
+ sonarr_logger.info(f"Searching for season pack: {series_title} - Season {season_number} (contains {episode_count} missing episodes)")
+
+ # Trigger an API call to search for the entire season
+ command_id = sonarr_api.search_season(api_url, api_key, api_timeout, series_id, season_number)
+
+ if command_id:
+ processed_any = True
+ processed_count += 1
+
+ # Add season to processed list
+ season_id = f"{series_id}_{season_number}"
+ success = add_processed_id("sonarr", instance_name, season_id)
+ sonarr_logger.debug(f"Added season ID {season_id} to processed list for {instance_name}, success: {success}")
+
+ # Log to history system
+ media_name = f"{series_title} - Season {season_number} (contains {episode_count} missing episodes)"
+ log_processed_media("sonarr", media_name, season_id, instance_name, "missing")
+ sonarr_logger.debug(f"Logged history entry for season pack: {media_name}")
+
+ # Increment stats one by one instead of in a batch
+ for i in range(episode_count):
+ increment_stat("sonarr", "hunted")
+ sonarr_logger.debug(f"Incremented sonarr hunted statistics for {episode_count} episodes in season pack")
+
+ # Wait for command to complete if configured
+ if command_wait_delay > 0 and command_wait_attempts > 0:
+ if wait_for_command(
+ api_url, api_key, api_timeout, command_id,
+ command_wait_delay, command_wait_attempts, "Season Search", stop_check
+ ):
+ pass
+ else:
+ sonarr_logger.error(f"Failed to trigger search for {series_title}.")
+
+ sonarr_logger.info(f"Processed {processed_count} missing season packs for Sonarr.")
+ return processed_any
+
+def process_missing_shows_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_future_episodes: bool,
+ skip_series_refresh: bool,
+ hunt_missing_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """Process missing episodes in show mode - gets all missing episodes for entire shows."""
+ processed_any = False
+
+ # Get series with missing episodes
+ sonarr_logger.info("Retrieving series with missing episodes...")
+ series_with_missing = sonarr_api.get_series_with_missing_episodes(
+ api_url, api_key, api_timeout, monitored_only, random_mode=True)
+
+ if not series_with_missing:
+ sonarr_logger.info("No series with missing episodes found.")
+ return False
+
+ # Filter out shows that have been processed
+ unprocessed_series = []
+ for series in series_with_missing:
+ series_id = str(series.get("series_id"))
+ if not is_processed("sonarr", instance_name, series_id):
+ unprocessed_series.append(series)
+ else:
+ sonarr_logger.debug(f"Skipping already processed series ID: {series_id}")
+
+ sonarr_logger.info(f"Found {len(unprocessed_series)} unprocessed series with missing episodes out of {len(series_with_missing)} total.")
+
+ if not unprocessed_series:
+ sonarr_logger.info("All series with missing episodes have been processed.")
+ return False
+
+ # Select the shows to process (random or sequential)
+ shows_to_process = random.sample(
+ unprocessed_series,
+ min(len(unprocessed_series), hunt_missing_items)
+ )
+
+ # Add detailed logging for selected shows
+ if shows_to_process:
+ sonarr_logger.info("Shows selected for processing in this cycle:")
+ for idx, show in enumerate(shows_to_process):
+ show_id = show.get('series_id')
+ show_title = show.get('series_title', 'Unknown Show')
+ # Count total missing episodes across all seasons
+ episode_count = sum(season.get('episode_count', 0) for season in show.get('seasons', []))
+ sonarr_logger.info(f" {idx+1}. {show_title} ({episode_count} missing episodes) (Show ID: {show_id})")
+
+ # Process each show
+ for show in shows_to_process:
+ if stop_check():
+ sonarr_logger.info("Stop requested. Aborting show processing.")
+ break
+
+ show_id = show.get('series_id')
+ show_title = show.get('series_title', 'Unknown Show')
+
+ # Get missing episodes for this show
+ missing_episodes = []
+ for season in show.get('seasons', []):
+ missing_episodes.extend(season.get('episodes', []))
+
+ # Filter out future episodes if needed
+ if skip_future_episodes:
+ now_unix = time.time()
+ original_count = len(missing_episodes)
+ missing_episodes = [
+ ep for ep in missing_episodes
+ if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix
+ ]
+ skipped_count = original_count - len(missing_episodes)
+ if skipped_count > 0:
+ sonarr_logger.info(f"Skipped {skipped_count} future episodes for {show_title} based on air date.")
+
+ if not missing_episodes:
+ sonarr_logger.info(f"No eligible missing episodes found for {show_title} after filtering.")
+ continue
+
+ # Log episodes to be processed
+ sonarr_logger.info(f"Processing {len(missing_episodes)} missing episodes for show: {show_title}")
+ for idx, episode in enumerate(missing_episodes[:5]): # Only log first 5 for brevity
+ season = episode.get('seasonNumber', 'Unknown')
+ ep_num = episode.get('episodeNumber', 'Unknown')
+ title = episode.get('title', 'Unknown Title')
+ sonarr_logger.debug(f" {idx+1}. S{season:02d}E{ep_num:02d} - {title}")
+
+ if len(missing_episodes) > 5:
+ sonarr_logger.debug(f" ... and {len(missing_episodes)-5} more episodes.")
+
+ # Refresh series if not skipped
+ if not skip_series_refresh:
+ sonarr_logger.info(f"Refreshing series info for {show_title}...")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, show_id)
+ if refresh_command_id:
+ wait_success = wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh", stop_check
+ )
+ if not wait_success:
+ sonarr_logger.warning(f"Series refresh command timed out or failed for {show_title}. Proceeding with search anyway.")
+ else:
+ sonarr_logger.warning(f"Failed to trigger refresh command for {show_title}. Proceeding with search anyway.")
+
+ # Extract episode IDs to search
+ episode_ids = [episode.get('id') for episode in missing_episodes if episode.get('id')]
+
+ if not episode_ids:
+ sonarr_logger.warning(f"No valid episode IDs found for {show_title}.")
+ continue
+
+ # Search for all episodes in the show
+ sonarr_logger.info(f"Searching for {len(episode_ids)} missing episodes for {show_title}...")
+ search_successful = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids)
+
+ if search_successful:
+ processed_any = True
+ sonarr_logger.info(f"Successfully processed {len(episode_ids)} missing episodes in {show_title}")
+
+ # Add episode IDs to stateful manager IMMEDIATELY after processing each batch
+ for episode_id in episode_ids:
+ # Force flush to disk by calling add_processed_id immediately for each ID
+ success = add_processed_id("sonarr", instance_name, str(episode_id))
+ sonarr_logger.debug(f"Added processed ID: {episode_id}, success: {success}")
+
+ # Log each episode to history
+ # Find the corresponding episode data
+ for episode in missing_episodes:
+ if episode.get('id') == episode_id:
+ season = episode.get('seasonNumber', 'Unknown')
+ ep_num = episode.get('episodeNumber', 'Unknown')
+ title = episode.get('title', 'Unknown Title')
+
+ try:
+ season_episode = f"S{season:02d}E{ep_num:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season}E{ep_num}"
+
+ media_name = f"{show_title} - {season_episode} - {title}"
+ log_processed_media("sonarr", media_name, str(episode_id), instance_name, "missing")
+ sonarr_logger.debug(f"Logged history entry for episode: {media_name}")
+ break
+
+ # Add series ID to processed list
+ success = add_processed_id("sonarr", instance_name, str(show_id))
+ sonarr_logger.debug(f"Added series ID {show_id} to processed list for {instance_name}, success: {success}")
+
+ # Also log the entire show to history
+ media_name = f"{show_title} - Complete Series ({len(episode_ids)} episodes)"
+ log_processed_media("sonarr", media_name, str(show_id), instance_name, "missing")
+ sonarr_logger.debug(f"Logged history entry for complete series: {media_name}")
+
+ # Increment the hunted statistics
+ increment_stat("sonarr", "hunted", len(episode_ids))
+ sonarr_logger.debug(f"Incremented sonarr hunted statistics by {len(episode_ids)}")
+ else:
+ sonarr_logger.error(f"Failed to trigger search for {show_title}.")
+
+ sonarr_logger.info("Show-based missing episode processing complete.")
+ return processed_any
+
+def wait_for_command(
+ api_url: str,
+ api_key: str,
+ api_timeout: int,
+ command_id: int,
+ wait_delay: int,
+ max_attempts: int,
+ command_name: str = "Command",
+ stop_check: Callable[[], bool] = lambda: False
+) -> bool:
+ """
+ Wait for a Sonarr command to complete or timeout.
+
+ Args:
+ api_url: The Sonarr API URL
+ api_key: The Sonarr API key
+ api_timeout: API request timeout
+ command_id: The ID of the command to monitor
+ wait_delay: Seconds to wait between status checks
+ max_attempts: Maximum number of status check attempts
+ command_name: Name of the command (for logging)
+ stop_check: Optional function to check if operation should be aborted
+
+ Returns:
+ True if command completed successfully, False otherwise
+ """
+ if wait_delay <= 0 or max_attempts <= 0:
+ sonarr_logger.debug(f"Not waiting for command to complete (wait_delay={wait_delay}, max_attempts={max_attempts})")
+ return True # Return as if successful since we're not checking
+
+ sonarr_logger.debug(f"Waiting for {command_name} to complete (command ID: {command_id}). Checking every {wait_delay}s for up to {max_attempts} attempts")
+
+ # Wait for command completion
+ attempts = 0
+ while attempts < max_attempts:
+ if stop_check():
+ sonarr_logger.info(f"Stopping wait for {command_name} due to stop request")
+ return False
+
+ command_status = sonarr_api.get_command_status(api_url, api_key, api_timeout, command_id)
+ if not command_status:
+ sonarr_logger.warning(f"Failed to get status for {command_name} (ID: {command_id}), attempt {attempts+1}")
+ attempts += 1
+ time.sleep(wait_delay)
+ continue
+
+ status = command_status.get('status')
+ if status == 'completed':
+ sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) completed successfully")
+ return True
+ elif status in ['failed', 'aborted']:
+ sonarr_logger.warning(f"Sonarr {command_name} (ID: {command_id}) {status}")
+ return False
+
+ sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) status: {status}, attempt {attempts+1}/{max_attempts}")
+
+ attempts += 1
+ time.sleep(wait_delay)
+
+ sonarr_logger.error(f"Sonarr command '{command_name}' (ID: {command_id}) timed out after {max_attempts} attempts.")
+ return False
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py
new file mode 100644
index 0000000..d6b5a2d
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr/upgrade.py
@@ -0,0 +1,605 @@
+#!/usr/bin/env python3
+"""
+Sonarr cutoff upgrade processing module for Huntarr
+"""
+
+import time
+import random
+from typing import List, Dict, Any, Set, Callable, Union
+from src.primary.utils.logger import get_logger
+from src.primary.apps.sonarr import api as sonarr_api
+from src.primary.stats_manager import increment_stat
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.settings_manager import get_advanced_setting
+
+# Get logger for the Sonarr app
+sonarr_logger = get_logger("sonarr")
+
+def process_cutoff_upgrades(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int = get_advanced_setting("api_timeout", 120),
+ monitored_only: bool = True,
+ skip_series_refresh: bool = False,
+ hunt_upgrade_items: int = 5,
+ command_wait_delay: int = get_advanced_setting("command_wait_delay", 1),
+ command_wait_attempts: int = get_advanced_setting("command_wait_attempts", 600),
+ stop_check: Callable[[], bool] = lambda: False
+) -> bool:
+ """
+ Process quality cutoff upgrades for Sonarr.
+ This function only uses the episode mode for upgrades regardless of hunt_missing_mode.
+ """
+ if hunt_upgrade_items <= 0:
+ sonarr_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping upgrade processing.")
+ return False
+
+ sonarr_logger.info(f"Checking for {hunt_upgrade_items} quality upgrades...")
+
+ sonarr_logger.info("Using RANDOM selection mode for quality upgrades")
+
+ # Always use episode mode for upgrades, regardless of the hunt_missing_mode setting
+ return process_upgrade_episodes_mode(
+ api_url, api_key, instance_name, api_timeout, monitored_only,
+ skip_series_refresh, hunt_upgrade_items,
+ command_wait_delay, command_wait_attempts, stop_check
+ )
+
+def process_upgrade_episodes_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_series_refresh: bool,
+ hunt_upgrade_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """Process upgrades in episode mode (original implementation)."""
+ processed_any = False
+
+ # Always use the efficient random page selection method
+ sonarr_logger.debug(f"Using random selection for cutoff unmet episodes")
+ episodes_to_search = sonarr_api.get_cutoff_unmet_episodes_random_page(
+ api_url, api_key, api_timeout, monitored_only, hunt_upgrade_items)
+
+ # If we didn't get enough episodes, we might need to try another page
+ if len(episodes_to_search) < hunt_upgrade_items and len(episodes_to_search) > 0:
+ sonarr_logger.debug(f"Got {len(episodes_to_search)} episodes from random page, fewer than requested {hunt_upgrade_items}")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested during upgrade processing.")
+ return processed_any
+
+ # Filter out future episodes for random selection approach
+ if skip_series_refresh:
+ now_unix = time.time()
+ original_count = len(episodes_to_search)
+ episodes_to_search = [
+ ep for ep in episodes_to_search
+ if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix
+ ]
+ skipped_count = original_count - len(episodes_to_search)
+ if skipped_count > 0:
+ sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.")
+
+ # Filter out already processed episodes for random selection approach
+ unprocessed_episodes = []
+ for episode in episodes_to_search:
+ episode_id = str(episode.get("id"))
+ if not is_processed("sonarr", instance_name, episode_id):
+ unprocessed_episodes.append(episode)
+ else:
+ sonarr_logger.debug(f"Skipping already processed episode ID for upgrade: {episode_id}")
+
+ sonarr_logger.info(f"Found {len(unprocessed_episodes)} unprocessed cutoff unmet episodes out of {len(episodes_to_search)} total.")
+ episodes_to_search = unprocessed_episodes
+
+ if not episodes_to_search:
+ sonarr_logger.info("No cutoff unmet episodes left to process for upgrades after filtering.")
+ return False
+
+ sonarr_logger.info(f"Selected {len(episodes_to_search)} cutoff unmet episodes to search for upgrades.")
+
+ # Add detailed listing of episodes being upgraded
+ if episodes_to_search:
+ sonarr_logger.info(f"Episodes selected for quality upgrades in this cycle:")
+ for idx, episode in enumerate(episodes_to_search):
+ series_title = episode.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode.get('title', 'Unknown Episode')
+ season_number = episode.get('seasonNumber', 'Unknown Season')
+ episode_number = episode.get('episodeNumber', 'Unknown Episode')
+
+ # Get quality information
+ quality_name = "Unknown"
+ if "quality" in episode and episode["quality"]:
+ quality_name = episode["quality"].get("quality", {}).get("name", "Unknown")
+
+ episode_id = episode.get("id")
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ sonarr_logger.info(f" {idx+1}. {series_title} - {season_episode} - \"{episode_title}\" - Current quality: {quality_name} (ID: {episode_id})")
+
+ # Group episodes by series for potential refresh
+ series_to_process: Dict[int, List[int]] = {}
+ series_titles: Dict[int, str] = {} # Store titles for logging
+ for episode in episodes_to_search:
+ series_id = episode.get('seriesId')
+ if series_id:
+ if series_id not in series_to_process:
+ series_to_process[series_id] = []
+ # Store title when first encountering the series ID
+ series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}")
+ series_to_process[series_id].append(episode['id'])
+
+ # Process each series
+ for series_id, episode_ids in series_to_process.items():
+ if stop_check():
+ sonarr_logger.info("Stop requested before processing next series for upgrades.")
+ break
+
+ series_title = series_titles.get(series_id, f"Series ID {series_id}")
+ sonarr_logger.info(f"Processing series for upgrades: {series_title} (ID: {series_id}) with {len(episode_ids)} episodes.")
+
+ # Refresh series metadata if not skipped
+ refresh_command_id = None
+ if not skip_series_refresh:
+ sonarr_logger.debug(f"Attempting to refresh series ID: {series_id} before upgrade search.")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id)
+ if refresh_command_id:
+ # Wait for refresh command to complete
+ if not wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check
+ ):
+ sonarr_logger.warning(f"Series refresh command (ID: {refresh_command_id}) for series {series_id} did not complete successfully or timed out. Proceeding with upgrade search anyway.")
+ else:
+ sonarr_logger.warning(f"Failed to trigger refresh command for series ID: {series_id}. Proceeding without refresh.")
+ else:
+ sonarr_logger.debug(f"Skipping series refresh for series ID: {series_id} as configured.")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested after series refresh attempt for upgrades.")
+ break
+
+ # Trigger search for the selected episodes in this series
+ sonarr_logger.debug(f"Attempting upgrade search for episode IDs: {episode_ids}")
+ search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids)
+
+ if search_command_id:
+ # Wait for search command to complete
+ if wait_for_command(
+ api_url, api_key, api_timeout, search_command_id,
+ command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check
+ ):
+ # Mark episodes as processed if search command completed successfully
+ processed_any = True # Mark that we did something
+ sonarr_logger.info(f"Successfully processed and searched for {len(episode_ids)} episodes in series {series_id}.")
+
+ # Add stats incrementing right here - this is the code path that's actually being executed
+ for episode_id in episode_ids:
+ # Increment stat for each episode individually, just like Radarr
+ increment_stat("sonarr", "upgraded")
+ sonarr_logger.info(f"*** STATS INCREMENT *** sonarr upgraded by 1 for episode ID {episode_id}")
+
+ # Mark episodes as processed using stateful management
+ for episode_id in episode_ids:
+ add_processed_id("sonarr", instance_name, str(episode_id))
+ sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades")
+
+ # Find the episode information for history logging
+ # We need to get the episode details from the API to include proper info in history
+ try:
+ episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id)
+ if episode_details:
+ series_title = episode_details.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode_details.get('title', 'Unknown Episode')
+ season_number = episode_details.get('seasonNumber', 'Unknown Season')
+ episode_number = episode_details.get('episodeNumber', 'Unknown Episode')
+
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ # Record the upgrade in history with quality upgrade identifier
+ media_name = f"{series_title} - {season_episode} - {episode_title}"
+ log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade")
+ sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}")
+ except Exception as e:
+ sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}")
+ else:
+ sonarr_logger.warning(f"Episode upgrade search command (ID: {search_command_id}) for series {series_id} did not complete successfully or timed out. Episodes will not be marked as processed yet.")
+ else:
+ sonarr_logger.error(f"Failed to trigger upgrade search command for episodes {episode_ids} in series {series_id}.")
+
+ sonarr_logger.info("Finished quality cutoff upgrades processing cycle for Sonarr.")
+ return processed_any
+
+def process_upgrade_seasons_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_series_refresh: bool,
+ hunt_upgrade_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """Process upgrades in season mode - groups episodes by season."""
+ processed_any = False
+
+ # Get all cutoff unmet episodes
+ cutoff_unmet_episodes = sonarr_api.get_cutoff_unmet_episodes(api_url, api_key, api_timeout, monitored_only)
+ sonarr_logger.info(f"Received {len(cutoff_unmet_episodes)} cutoff unmet episodes from Sonarr API (before filtering).")
+
+ if not cutoff_unmet_episodes:
+ sonarr_logger.info("No cutoff unmet episodes found in Sonarr.")
+ return False
+
+ # Filter out future episodes if configured
+ if skip_series_refresh:
+ now_unix = time.time()
+ original_count = len(cutoff_unmet_episodes)
+ # Ensure airDateUtc exists and is not None before parsing
+ cutoff_unmet_episodes = [
+ ep for ep in cutoff_unmet_episodes
+ if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix
+ ]
+ skipped_count = original_count - len(cutoff_unmet_episodes)
+ if skipped_count > 0:
+ sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested during upgrade processing.")
+ return processed_any
+
+ # Group episodes by series and season
+ series_season_episodes: Dict[int, Dict[int, List[Dict]]] = {}
+ for episode in cutoff_unmet_episodes:
+ series_id = episode.get('seriesId')
+ season_number = episode.get('seasonNumber')
+
+ if series_id is not None and season_number is not None:
+ if series_id not in series_season_episodes:
+ series_season_episodes[series_id] = {}
+
+ if season_number not in series_season_episodes[series_id]:
+ series_season_episodes[series_id][season_number] = []
+
+ series_season_episodes[series_id][season_number].append(episode)
+
+ # Create a list of (series_id, season_number) tuples for selection
+ available_seasons = []
+ for series_id, seasons in series_season_episodes.items():
+ for season_number, episodes in seasons.items():
+ # Get series title from the first episode for this season
+ series_title = episodes[0].get('series', {}).get('title', f"Series ID {series_id}")
+ available_seasons.append((series_id, season_number, len(episodes), series_title))
+
+ if not available_seasons:
+ sonarr_logger.info("No valid seasons with cutoff unmet episodes found.")
+ return False
+
+ # Select seasons to process - always randomly
+ random.shuffle(available_seasons)
+ seasons_to_process = available_seasons[:hunt_upgrade_items]
+
+ sonarr_logger.info(f"Selected {len(seasons_to_process)} seasons with cutoff unmet episodes to process")
+
+ # Log selected seasons
+ for idx, (series_id, season_number, episode_count, series_title) in enumerate(seasons_to_process):
+ sonarr_logger.info(f" {idx+1}. {series_title} - Season {season_number} - {episode_count} cutoff unmet episodes")
+
+ # Process each selected season
+ for series_id, season_number, _, series_title in seasons_to_process:
+ if stop_check():
+ sonarr_logger.info("Stop requested before processing next season.")
+ break
+
+ episodes = series_season_episodes[series_id][season_number]
+ episode_ids = [episode["id"] for episode in episodes]
+
+ sonarr_logger.info(f"Processing {series_title} - Season {season_number} with {len(episode_ids)} cutoff unmet episodes")
+
+ # Refresh series metadata if not skipped
+ if not skip_series_refresh:
+ sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id)
+ if refresh_command_id:
+ # Wait for refresh command to complete
+ if not wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check
+ ):
+ sonarr_logger.warning(f"Series refresh command for {series_title} did not complete successfully or timed out.")
+ else:
+ sonarr_logger.warning(f"Failed to trigger refresh command for series {series_title}")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested after series refresh attempt.")
+ break
+
+ # Trigger search for the selected episodes in this season
+ sonarr_logger.debug(f"Attempting to search for {len(episode_ids)} episodes in {series_title} Season {season_number} for upgrades")
+ search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids)
+
+ if search_command_id:
+ # Wait for search command to complete
+ if wait_for_command(
+ api_url, api_key, api_timeout, search_command_id,
+ command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check
+ ):
+ # Mark as processed if search command completed successfully
+ processed_any = True
+ sonarr_logger.info(f"Successfully processed {len(episode_ids)} cutoff unmet episodes in {series_title} Season {season_number}")
+
+ # We'll increment stats individually for each episode instead of in batch
+ # increment_stat("sonarr", "upgraded", len(episode_ids))
+ # sonarr_logger.debug(f"Incremented sonarr upgraded statistics by {len(episode_ids)}")
+
+ # Mark episodes as processed using stateful management
+ for episode_id in episode_ids:
+ add_processed_id("sonarr", instance_name, str(episode_id))
+ sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades")
+
+ # Increment stats for this episode (consistent with Radarr's approach)
+ increment_stat("sonarr", "upgraded")
+ sonarr_logger.debug(f"Incremented sonarr upgraded statistic for episode {episode_id}")
+
+ # Find the episode information for history logging
+ # We need to get the episode details from the API to include proper info in history
+ try:
+ episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id)
+ if episode_details:
+ series_title = episode_details.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode_details.get('title', 'Unknown Episode')
+ season_number = episode_details.get('seasonNumber', 'Unknown Season')
+ episode_number = episode_details.get('episodeNumber', 'Unknown Episode')
+
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ # Record the upgrade in history with quality upgrade identifier
+ media_name = f"{series_title} - {season_episode} - {episode_title}"
+ log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade")
+ sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}")
+ except Exception as e:
+ sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}")
+ else:
+ sonarr_logger.warning(f"Episode upgrade search command for {series_title} Season {season_number} did not complete successfully")
+ else:
+ sonarr_logger.error(f"Failed to trigger upgrade search command for {series_title} Season {season_number}")
+
+ sonarr_logger.info("Finished quality cutoff upgrades processing cycle (season mode) for Sonarr.")
+ return processed_any
+
+def process_upgrade_shows_mode(
+ api_url: str,
+ api_key: str,
+ instance_name: str,
+ api_timeout: int,
+ monitored_only: bool,
+ skip_series_refresh: bool,
+ hunt_upgrade_items: int,
+ command_wait_delay: int,
+ command_wait_attempts: int,
+ stop_check: Callable[[], bool]
+) -> bool:
+ """Process upgrades in show mode - gets all cutoff unmet episodes for entire shows."""
+ processed_any = False
+
+ # Get all cutoff unmet episodes
+ cutoff_unmet_episodes = sonarr_api.get_cutoff_unmet_episodes(api_url, api_key, api_timeout, monitored_only)
+ sonarr_logger.info(f"Received {len(cutoff_unmet_episodes)} cutoff unmet episodes from Sonarr API (before filtering).")
+
+ if not cutoff_unmet_episodes:
+ sonarr_logger.info("No cutoff unmet episodes found in Sonarr.")
+ return False
+
+ # Filter out future episodes if configured
+ if skip_series_refresh:
+ now_unix = time.time()
+ original_count = len(cutoff_unmet_episodes)
+ # Ensure airDateUtc exists and is not None before parsing
+ cutoff_unmet_episodes = [
+ ep for ep in cutoff_unmet_episodes
+ if ep.get('airDateUtc') and time.mktime(time.strptime(ep['airDateUtc'], '%Y-%m-%dT%H:%M:%SZ')) < now_unix
+ ]
+ skipped_count = original_count - len(cutoff_unmet_episodes)
+ if skipped_count > 0:
+ sonarr_logger.info(f"Skipped {skipped_count} future episodes based on air date for upgrades.")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested during upgrade processing.")
+ return processed_any
+
+ # Group episodes by series
+ series_episodes: Dict[int, List[Dict]] = {}
+ series_titles: Dict[int, str] = {} # Keep track of series titles
+
+ for episode in cutoff_unmet_episodes:
+ series_id = episode.get('seriesId')
+ if series_id is not None:
+ if series_id not in series_episodes:
+ series_episodes[series_id] = []
+ # Store series title when first encountering the series ID
+ series_titles[series_id] = episode.get('series', {}).get('title', f"Series ID {series_id}")
+
+ series_episodes[series_id].append(episode)
+
+ # Create a list of (series_id, episode_count, series_title) tuples for selection
+ available_series = [(series_id, len(episodes), series_titles[series_id])
+ for series_id, episodes in series_episodes.items()]
+
+ if not available_series:
+ sonarr_logger.info("No series with cutoff unmet episodes found.")
+ return False
+
+ # Select series to process - always randomly
+ random.shuffle(available_series)
+ series_to_process = available_series[:hunt_upgrade_items]
+
+ sonarr_logger.info(f"Selected {len(series_to_process)} series with cutoff unmet episodes to process")
+
+ # Log selected series
+ for idx, (series_id, episode_count, series_title) in enumerate(series_to_process):
+ sonarr_logger.info(f" {idx+1}. {series_title} - {episode_count} cutoff unmet episodes")
+
+ # Process each selected series
+ for series_id, _, series_title in series_to_process:
+ if stop_check():
+ sonarr_logger.info("Stop requested before processing next series.")
+ break
+
+ episodes = series_episodes[series_id]
+ episode_ids = [episode["id"] for episode in episodes]
+
+ sonarr_logger.info(f"Processing {series_title} with {len(episode_ids)} cutoff unmet episodes")
+
+ # Refresh series metadata if not skipped
+ if not skip_series_refresh:
+ sonarr_logger.debug(f"Attempting to refresh series ID: {series_id}")
+ refresh_command_id = sonarr_api.refresh_series(api_url, api_key, api_timeout, series_id)
+ if refresh_command_id:
+ # Wait for refresh command to complete
+ if not wait_for_command(
+ api_url, api_key, api_timeout, refresh_command_id,
+ command_wait_delay, command_wait_attempts, "Series Refresh (Upgrade)", stop_check
+ ):
+ sonarr_logger.warning(f"Series refresh command for {series_title} did not complete successfully or timed out.")
+ else:
+ sonarr_logger.warning(f"Failed to trigger refresh command for series {series_title}")
+
+ if stop_check():
+ sonarr_logger.info("Stop requested after series refresh attempt.")
+ break
+
+ # Trigger search for all cutoff unmet episodes in this series
+ sonarr_logger.debug(f"Attempting to search for {len(episode_ids)} episodes in {series_title} for upgrades")
+ search_command_id = sonarr_api.search_episode(api_url, api_key, api_timeout, episode_ids)
+
+ if search_command_id:
+ # Wait for search command to complete
+ if wait_for_command(
+ api_url, api_key, api_timeout, search_command_id,
+ command_wait_delay, command_wait_attempts, "Episode Upgrade Search", stop_check
+ ):
+ # Mark as processed if search command completed successfully
+ processed_any = True
+ sonarr_logger.info(f"Successfully processed {len(episode_ids)} cutoff unmet episodes in {series_title}")
+
+ # We'll increment stats individually for each episode instead of in batch
+ # increment_stat("sonarr", "upgraded", len(episode_ids))
+ # sonarr_logger.debug(f"Incremented sonarr upgraded statistics by {len(episode_ids)}")
+
+ # Mark episodes as processed using stateful management
+ for episode_id in episode_ids:
+ add_processed_id("sonarr", instance_name, str(episode_id))
+ sonarr_logger.debug(f"Marked episode ID {episode_id} as processed for upgrades")
+
+ # Increment stats for this episode (consistent with Radarr's approach)
+ increment_stat("sonarr", "upgraded")
+ sonarr_logger.debug(f"Incremented sonarr upgraded statistic for episode {episode_id}")
+
+ # Find the episode information for history logging
+ # We need to get the episode details from the API to include proper info in history
+ try:
+ episode_details = sonarr_api.get_episode(api_url, api_key, api_timeout, episode_id)
+ if episode_details:
+ series_title = episode_details.get('series', {}).get('title', 'Unknown Series')
+ episode_title = episode_details.get('title', 'Unknown Episode')
+ season_number = episode_details.get('seasonNumber', 'Unknown Season')
+ episode_number = episode_details.get('episodeNumber', 'Unknown Episode')
+
+ try:
+ season_episode = f"S{season_number:02d}E{episode_number:02d}"
+ except (ValueError, TypeError):
+ season_episode = f"S{season_number}E{episode_number}"
+
+ # Record the upgrade in history with quality upgrade identifier
+ media_name = f"{series_title} - {season_episode} - {episode_title}"
+ log_processed_media("sonarr", media_name, episode_id, instance_name, "upgrade")
+ sonarr_logger.debug(f"Logged quality upgrade to history for episode ID {episode_id}")
+ except Exception as e:
+ sonarr_logger.error(f"Failed to log history for episode ID {episode_id}: {str(e)}")
+ else:
+ sonarr_logger.warning(f"Episode upgrade search command for {series_title} did not complete successfully")
+ else:
+ sonarr_logger.error(f"Failed to trigger upgrade search command for {series_title}")
+
+ sonarr_logger.info("Finished quality cutoff upgrades processing cycle (show mode) for Sonarr.")
+ return processed_any
+
+def wait_for_command(
+ api_url: str,
+ api_key: str,
+ api_timeout: int,
+ command_id: Union[int, str],
+ wait_delay: int,
+ max_attempts: int,
+ command_name: str = "Command",
+ stop_check: Callable[[], bool] = lambda: False
+) -> bool:
+ """
+ Wait for a Sonarr command to complete or timeout.
+
+ Args:
+ api_url: The Sonarr API URL
+ api_key: The Sonarr API key
+ api_timeout: API request timeout
+ command_id: The ID of the command to monitor
+ wait_delay: Seconds to wait between status checks
+ max_attempts: Maximum number of status check attempts
+ command_name: Name of the command (for logging)
+ stop_check: Optional function to check if operation should be aborted
+
+ Returns:
+ True if command completed successfully, False otherwise
+ """
+ if wait_delay <= 0 or max_attempts <= 0:
+ sonarr_logger.debug(f"Not waiting for command to complete (wait_delay={wait_delay}, max_attempts={max_attempts})")
+ return True # Return as if successful since we're not checking
+
+ sonarr_logger.debug(f"Waiting for {command_name} to complete (command ID: {command_id}). Checking every {wait_delay}s for up to {max_attempts} attempts")
+
+ # Wait for command completion
+ attempts = 0
+ while attempts < max_attempts:
+ if stop_check():
+ sonarr_logger.info(f"Stopping wait for {command_name} due to stop request")
+ return False
+
+ command_status = sonarr_api.get_command_status(api_url, api_key, api_timeout, command_id)
+ if not command_status:
+ sonarr_logger.warning(f"Failed to get status for {command_name} (ID: {command_id}), attempt {attempts+1}")
+ attempts += 1
+ time.sleep(wait_delay)
+ continue
+
+ status = command_status.get('status')
+ if status == 'completed':
+ sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) completed successfully")
+ return True
+ elif status in ['failed', 'aborted']:
+ sonarr_logger.warning(f"Sonarr {command_name} (ID: {command_id}) {status}")
+ return False
+
+ sonarr_logger.debug(f"Sonarr {command_name} (ID: {command_id}) status: {status}, attempt {attempts+1}/{max_attempts}")
+
+ attempts += 1
+ time.sleep(wait_delay)
+
+ sonarr_logger.error(f"Sonarr command '{command_name}' (ID: {command_id}) timed out after {max_attempts} attempts.")
+ return False
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py
new file mode 100644
index 0000000..7dd0d7f
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/sonarr_routes.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger
+import traceback
+import socket
+from urllib.parse import urlparse
+
+sonarr_bp = Blueprint('sonarr', __name__)
+sonarr_logger = get_logger("sonarr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("sonarr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("sonarr", "processed_upgrades")
+
+@sonarr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Sonarr API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ sonarr_logger.info(f"Testing connection to Sonarr API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try to establish a socket connection first to check basic connectivity
+ parsed_url = urlparse(api_url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ try:
+ # Try socket connection for quick feedback on connectivity issues
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ sonarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # Create the test URL and set headers
+ test_url = f"{api_url.rstrip('/')}/api/v3/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ # Now proceed with the actual API request
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # For HTTP errors, provide more specific feedback
+ if response.status_code == 401:
+ error_msg = "Authentication failed: Invalid API key"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 401
+ elif response.status_code == 403:
+ error_msg = "Access forbidden: Check API key permissions"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 403
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Sonarr server. Check your URL."
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ elif response.status_code >= 500:
+ error_msg = f"Sonarr server error (HTTP {response.status_code}): The Sonarr server is experiencing issues"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+
+ # Raise for other HTTP errors
+ response.raise_for_status()
+
+ # Log HTTP status code for diagnostic purposes
+ sonarr_logger.debug(f"Sonarr API status code: {response.status_code}")
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+
+ # We no longer save keys here since we use instances
+ # keys_manager.save_api_keys("sonarr", api_url, api_key)
+
+ sonarr_logger.info(f"Successfully connected to Sonarr API version: {response_data.get('version', 'unknown')}")
+
+ # Return success with some useful information
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Sonarr API",
+ "version": response_data.get('version', 'unknown')
+ })
+ except ValueError:
+ error_msg = "Invalid JSON response from Sonarr API - This doesn't appear to be a valid Sonarr server"
+ sonarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+
+ except requests.exceptions.Timeout as e:
+ error_msg = f"Connection timed out after {api_timeout} seconds"
+ sonarr_logger.error(f"{error_msg}: {str(e)}")
+ return jsonify({"success": False, "message": error_msg}), 504
+
+ except requests.exceptions.ConnectionError as e:
+ # Handle different types of connection errors
+ error_details = str(e)
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Sonarr is not running on {api_url} or the port is incorrect"
+ elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
+ error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ else:
+ error_msg = f"Connection error - Check if Sonarr is running: {error_details}"
+
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Connection test failed: {str(e)}"
+ sonarr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr.py
new file mode 100644
index 0000000..83d79d7
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr.py
@@ -0,0 +1,181 @@
+"""
+Swaparr module for Huntarr
+Handles stalled downloads in Starr apps based on the original Swaparr application
+"""
+
+from flask import Blueprint, request, jsonify
+import os
+import json
+from src.primary.utils.logger import get_logger
+from src.primary.settings_manager import load_settings, save_settings
+from src.primary.apps.swaparr.handler import process_stalled_downloads
+from src.primary.apps.radarr import get_configured_instances as get_radarr_instances
+from src.primary.apps.sonarr import get_configured_instances as get_sonarr_instances
+from src.primary.apps.lidarr import get_configured_instances as get_lidarr_instances
+from src.primary.apps.readarr import get_configured_instances as get_readarr_instances
+
+def get_configured_instances():
+ """Get all configured Starr app instances from their respective settings"""
+ try:
+ from src.primary.apps.whisparr import get_configured_instances as get_whisparr_instances
+ whisparr_instances = get_whisparr_instances()
+ except ImportError:
+ whisparr_instances = []
+
+ try:
+ from src.primary.apps.eros import get_configured_instances as get_eros_instances
+ eros_instances = get_eros_instances()
+ except ImportError:
+ eros_instances = []
+
+ instances = {
+ "radarr": get_radarr_instances(),
+ "sonarr": get_sonarr_instances(),
+ "lidarr": get_lidarr_instances(),
+ "readarr": get_readarr_instances(),
+ "whisparr": whisparr_instances,
+ "eros": eros_instances
+ }
+
+ logger = get_logger("swaparr")
+ logger.info(f"Found {sum(len(v) for v in instances.values())} configured Starr app instances")
+ return instances
+
+swaparr_bp = Blueprint('swaparr', __name__)
+swaparr_logger = get_logger("swaparr")
+
+@swaparr_bp.route('/status', methods=['GET'])
+def get_status():
+ """Get Swaparr status and statistics"""
+ settings = load_settings("swaparr")
+ enabled = settings.get("enabled", False)
+
+ # Get strike statistics from all app state directories
+ statistics = {}
+ state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr")
+
+ if os.path.exists(state_dir):
+ for app_name in os.listdir(state_dir):
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.isdir(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ try:
+ with open(strike_file, 'r') as f:
+ strike_data = json.load(f)
+
+ total_items = len(strike_data)
+ removed_items = sum(1 for item in strike_data.values() if item.get("removed", False))
+ striked_items = sum(1 for item in strike_data.values()
+ if item.get("strikes", 0) > 0 and not item.get("removed", False))
+
+ statistics[app_name] = {
+ "total_tracked": total_items,
+ "currently_striked": striked_items,
+ "removed": removed_items
+ }
+ except (json.JSONDecodeError, IOError) as e:
+ swaparr_logger.error(f"Error reading strike data for {app_name}: {str(e)}")
+ statistics[app_name] = {"error": str(e)}
+
+ return jsonify({
+ "enabled": enabled,
+ "settings": {
+ "max_strikes": settings.get("max_strikes", 3),
+ "max_download_time": settings.get("max_download_time", "2h"),
+ "ignore_above_size": settings.get("ignore_above_size", "25GB"),
+ "remove_from_client": settings.get("remove_from_client", True),
+ "dry_run": settings.get("dry_run", False)
+ },
+ "statistics": statistics
+ })
+
+@swaparr_bp.route('/settings', methods=['GET'])
+def get_settings():
+ """Get Swaparr settings"""
+ settings = load_settings("swaparr")
+ return jsonify(settings)
+
+@swaparr_bp.route('/settings', methods=['POST'])
+def update_settings():
+ """Update Swaparr settings"""
+ data = request.json
+
+ if not data:
+ return jsonify({"success": False, "message": "No data provided"}), 400
+
+ # Load current settings
+ settings = load_settings("swaparr")
+
+ # Update settings with provided data
+ for key, value in data.items():
+ settings[key] = value
+
+ # Save updated settings
+ success = save_settings("swaparr", settings)
+
+ if success:
+ return jsonify({"success": True, "message": "Settings updated successfully"})
+ else:
+ return jsonify({"success": False, "message": "Failed to save settings"}), 500
+
+@swaparr_bp.route('/reset', methods=['POST'])
+def reset_strikes():
+ """Reset all strikes for all apps or a specific app"""
+ data = request.json
+ app_name = data.get('app_name') if data else None
+
+ state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr")
+
+ if not os.path.exists(state_dir):
+ return jsonify({"success": True, "message": "No strike data to reset"})
+
+ if app_name:
+ # Reset strikes for a specific app
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.exists(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ try:
+ os.remove(strike_file)
+ swaparr_logger.info(f"Reset strikes for {app_name}")
+ return jsonify({"success": True, "message": f"Strikes reset for {app_name}"})
+ except IOError as e:
+ swaparr_logger.error(f"Error resetting strikes for {app_name}: {str(e)}")
+ return jsonify({"success": False, "message": f"Failed to reset strikes for {app_name}: {str(e)}"}), 500
+ return jsonify({"success": False, "message": f"No strike data found for {app_name}"}), 404
+ else:
+ # Reset strikes for all apps
+ try:
+ for app_name in os.listdir(state_dir):
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.isdir(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ os.remove(strike_file)
+
+ swaparr_logger.info("Reset all strikes")
+ return jsonify({"success": True, "message": "All strikes reset"})
+ except IOError as e:
+ swaparr_logger.error(f"Error resetting all strikes: {str(e)}")
+ return jsonify({"success": False, "message": f"Failed to reset all strikes: {str(e)}"}), 500
+
+def is_configured():
+ """Check if Swaparr has any configured Starr app instances"""
+ instances = get_configured_instances()
+ return any(len(app_instances) > 0 for app_instances in instances.values())
+
+def run_swaparr():
+ """Run Swaparr cycle to check for stalled downloads in all configured Starr app instances"""
+ settings = load_settings("swaparr")
+
+ if not settings or not settings.get("enabled", False):
+ swaparr_logger.debug("Swaparr is disabled, skipping run")
+ return
+
+ instances = get_configured_instances()
+
+ # Process stalled downloads for each app type and instance
+ for app_name, app_instances in instances.items():
+ for app_settings in app_instances:
+ process_stalled_downloads(app_name, app_settings, settings)
diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py
new file mode 100644
index 0000000..79fd6bc
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr/__init__.py
@@ -0,0 +1,16 @@
+"""
+Swaparr app module for Huntarr
+Contains functionality for handling stalled downloads in Starr apps
+"""
+
+# Add necessary imports for get_configured_instances
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+swaparr_logger = get_logger("swaparr") # Get the logger instance
+
+# We don't need the get_configured_instances function here anymore as it's defined in swaparr.py
+# to avoid circular imports
+
+# Export just the swaparr_logger for now
+__all__ = ["swaparr_logger"]
diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py
new file mode 100644
index 0000000..26c463c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr/handler.py
@@ -0,0 +1,467 @@
+"""
+Implementation of the swaparr functionality to detect and remove stalled downloads in Starr apps.
+Based on the functionality provided by https://github.com/ThijmenGThN/swaparr
+"""
+
+import os
+import json
+import time
+import hashlib
+from datetime import datetime, timedelta
+import requests
+
+from src.primary.utils.logger import get_logger
+from src.primary.settings_manager import load_settings
+from src.primary.state import get_state_file_path
+
+# Create logger
+swaparr_logger = get_logger("swaparr")
+
+# Create state directory for tracking strikes
+SWAPARR_STATE_DIR = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr")
+
+def ensure_state_directory(app_name):
+ """Ensure the state directory exists for tracking strikes for a specific app"""
+ app_state_dir = os.path.join(SWAPARR_STATE_DIR, app_name)
+ if not os.path.exists(app_state_dir):
+ os.makedirs(app_state_dir, exist_ok=True)
+ swaparr_logger.info(f"Created swaparr state directory for {app_name}: {app_state_dir}")
+ return app_state_dir
+
+def load_strike_data(app_name):
+ """Load strike data for a specific app"""
+ app_state_dir = ensure_state_directory(app_name)
+ strike_file = os.path.join(app_state_dir, "strikes.json")
+
+ if not os.path.exists(strike_file):
+ return {}
+
+ try:
+ with open(strike_file, 'r') as f:
+ return json.load(f)
+ except (json.JSONDecodeError, IOError) as e:
+ swaparr_logger.error(f"Error loading strike data for {app_name}: {str(e)}")
+ return {}
+
+def save_strike_data(app_name, strike_data):
+ """Save strike data for a specific app"""
+ app_state_dir = ensure_state_directory(app_name)
+ strike_file = os.path.join(app_state_dir, "strikes.json")
+
+ try:
+ with open(strike_file, 'w') as f:
+ json.dump(strike_data, f, indent=2)
+ except IOError as e:
+ swaparr_logger.error(f"Error saving strike data for {app_name}: {str(e)}")
+
+def load_removed_items(app_name):
+ """Load list of permanently removed items"""
+ app_state_dir = ensure_state_directory(app_name)
+ removed_file = os.path.join(app_state_dir, "removed_items.json")
+
+ if not os.path.exists(removed_file):
+ return {}
+
+ try:
+ with open(removed_file, 'r') as f:
+ return json.load(f)
+ except (json.JSONDecodeError, IOError) as e:
+ swaparr_logger.error(f"Error loading removed items for {app_name}: {str(e)}")
+ return {}
+
+def save_removed_items(app_name, removed_items):
+ """Save list of permanently removed items"""
+ app_state_dir = ensure_state_directory(app_name)
+ removed_file = os.path.join(app_state_dir, "removed_items.json")
+
+ try:
+ with open(removed_file, 'w') as f:
+ json.dump(removed_items, f, indent=2)
+ except IOError as e:
+ swaparr_logger.error(f"Error saving removed items for {app_name}: {str(e)}")
+
+def generate_item_hash(item):
+ """Generate a unique hash for an item based on its name and size.
+ This helps track items across restarts even if their queue ID changes."""
+ hash_input = f"{item['name']}_{item['size']}"
+ return hashlib.md5(hash_input.encode('utf-8')).hexdigest()
+
+def parse_time_string_to_seconds(time_string):
+ """Parse a time string like '2h', '30m', '1d' to seconds"""
+ if not time_string:
+ return 7200 # Default 2 hours
+
+ unit = time_string[-1].lower()
+ try:
+ value = int(time_string[:-1])
+ except ValueError:
+ swaparr_logger.error(f"Invalid time string: {time_string}, using default 2 hours")
+ return 7200
+
+ if unit == 'd':
+ return value * 86400 # Days to seconds
+ elif unit == 'h':
+ return value * 3600 # Hours to seconds
+ elif unit == 'm':
+ return value * 60 # Minutes to seconds
+ else:
+ swaparr_logger.error(f"Unknown time unit in: {time_string}, using default 2 hours")
+ return 7200
+
+def parse_size_string_to_bytes(size_string):
+ """Parse a size string like '25GB', '1TB' to bytes"""
+ if not size_string:
+ return 25 * 1024 * 1024 * 1024 # Default 25GB
+
+ # Extract the numeric part and unit
+ unit = ""
+ for i in range(len(size_string) - 1, -1, -1):
+ if not size_string[i].isalpha():
+ value = float(size_string[:i+1])
+ unit = size_string[i+1:].upper()
+ break
+ else:
+ swaparr_logger.error(f"Invalid size string: {size_string}, using default 25GB")
+ return 25 * 1024 * 1024 * 1024
+
+ # Convert to bytes based on unit
+ if unit == 'B':
+ return int(value)
+ elif unit == 'KB':
+ return int(value * 1024)
+ elif unit == 'MB':
+ return int(value * 1024 * 1024)
+ elif unit == 'GB':
+ return int(value * 1024 * 1024 * 1024)
+ elif unit == 'TB':
+ return int(value * 1024 * 1024 * 1024 * 1024)
+ else:
+ swaparr_logger.error(f"Unknown size unit in: {size_string}, using default 25GB")
+ return 25 * 1024 * 1024 * 1024
+
+def get_queue_items(app_name, api_url, api_key, api_timeout=120):
+ """Get download queue items from a Starr app API with pagination support"""
+ api_version_map = {
+ "radarr": "v3",
+ "sonarr": "v3",
+ "lidarr": "v1",
+ "readarr": "v1",
+ "whisparr": "v3"
+ }
+
+ api_version = api_version_map.get(app_name, "v3")
+
+ # Initialize an empty list to store all records
+ all_records = []
+
+ # Start with page 1
+ page = 1
+ page_size = 100 # Request a large page size to reduce API calls
+
+ while True:
+ # Add pagination parameters
+ queue_url = f"{api_url.rstrip('/')}/api/{api_version}/queue?page={page}&pageSize={page_size}"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ response = requests.get(queue_url, headers=headers, timeout=api_timeout)
+ response.raise_for_status()
+ queue_data = response.json()
+
+ if api_version in ["v3"]: # Radarr, Sonarr, Whisparr use v3
+ records = queue_data.get("records", [])
+ total_records = queue_data.get("totalRecords", 0)
+ else: # Lidarr, Readarr use v1
+ records = queue_data
+ total_records = len(records)
+
+ # Add this page's records to our collection
+ all_records.extend(records)
+
+ # If we've fetched all records or there are no more, break the loop
+ if len(all_records) >= total_records or len(records) == 0:
+ break
+
+ # Otherwise, move to the next page
+ page += 1
+
+ except requests.exceptions.RequestException as e:
+ swaparr_logger.error(f"Error fetching queue for {app_name} (page {page}): {str(e)}")
+ break
+
+ swaparr_logger.info(f"Fetched {len(all_records)} queue items for {app_name}")
+
+ # Normalize the response based on app type
+ if app_name in ["radarr", "whisparr", "eros"]:
+ return parse_queue_items(all_records, "movie", app_name)
+ elif app_name == "sonarr":
+ return parse_queue_items(all_records, "series", app_name)
+ elif app_name == "lidarr":
+ return parse_queue_items(all_records, "album", app_name)
+ elif app_name == "readarr":
+ return parse_queue_items(all_records, "book", app_name)
+ else:
+ swaparr_logger.error(f"Unknown app type: {app_name}")
+ return []
+
+def parse_queue_items(records, item_type, app_name):
+ """Parse queue items from API response into a standardized format"""
+ queue_items = []
+
+ for record in records:
+ # Skip non-dictionary records
+ if not isinstance(record, dict):
+ swaparr_logger.warning(f"Skipping non-dictionary record in {app_name} queue: {record}")
+ continue
+
+ # Extract the name based on the item type
+ name = None
+ if item_type == "movie" and record.get("movie"):
+ name = record["movie"].get("title", "Unknown Movie")
+ elif item_type == "series" and record.get("series"):
+ name = record["series"].get("title", "Unknown Series")
+ elif item_type == "album" and record.get("album"):
+ name = record["album"].get("title", "Unknown Album")
+ elif item_type == "book" and record.get("book"):
+ name = record["book"].get("title", "Unknown Book")
+
+ # If no name was found, try to use the download title
+ if not name and record.get("title"):
+ name = record.get("title", "Unknown Download")
+
+ # Parse ETA if available
+ eta_seconds = 0
+ if record.get("timeleft"):
+ eta = record.get("timeleft", "")
+ # Basic parsing of timeleft format like "00:30:00" (30 minutes)
+ try:
+ eta_parts = eta.split(':')
+ if len(eta_parts) == 3:
+ eta_seconds = int(eta_parts[0]) * 3600 + int(eta_parts[1]) * 60 + int(eta_parts[2])
+ except (ValueError, IndexError):
+ eta_seconds = 0
+
+ queue_items.append({
+ "id": record.get("id"),
+ "name": name,
+ "size": record.get("size", 0),
+ "status": record.get("status", "unknown").lower(),
+ "eta": eta_seconds,
+ "error_message": record.get("errorMessage", "")
+ })
+
+ return queue_items
+
+def delete_download(app_name, api_url, api_key, download_id, remove_from_client=True, api_timeout=120):
+ """Delete a download from a Starr app"""
+ api_version_map = {
+ "radarr": "v3",
+ "sonarr": "v3",
+ "lidarr": "v1",
+ "readarr": "v1",
+ "whisparr": "v3"
+ }
+
+ api_version = api_version_map.get(app_name, "v3")
+ delete_url = f"{api_url.rstrip('/')}/api/{api_version}/queue/{download_id}?removeFromClient={str(remove_from_client).lower()}&blocklist=true"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ response = requests.delete(delete_url, headers=headers, timeout=api_timeout)
+ response.raise_for_status()
+ swaparr_logger.info(f"Successfully removed download {download_id} from {app_name}")
+ return True
+ except requests.exceptions.RequestException as e:
+ swaparr_logger.error(f"Error removing download {download_id} from {app_name}: {str(e)}")
+ return False
+
+def process_stalled_downloads(app_name, app_settings, swaparr_settings=None):
+ """Process stalled downloads for a specific app instance"""
+ if not swaparr_settings:
+ swaparr_settings = load_settings("swaparr")
+
+ if not swaparr_settings or not swaparr_settings.get("enabled", False):
+ swaparr_logger.debug(f"Swaparr is disabled, skipping {app_name} instance: {app_settings.get('instance_name', 'Unknown')}")
+ return
+
+ swaparr_logger.info(f"Processing stalled downloads for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}")
+
+ # Get settings
+ max_strikes = swaparr_settings.get("max_strikes", 3)
+ max_download_time = parse_time_string_to_seconds(swaparr_settings.get("max_download_time", "2h"))
+ ignore_above_size = parse_size_string_to_bytes(swaparr_settings.get("ignore_above_size", "25GB"))
+ remove_from_client = swaparr_settings.get("remove_from_client", True)
+ dry_run = swaparr_settings.get("dry_run", False)
+
+ api_url = app_settings.get("api_url")
+ api_key = app_settings.get("api_key")
+ api_timeout = app_settings.get("api_timeout", 120)
+
+ if not api_url or not api_key:
+ swaparr_logger.error(f"Missing API URL or API Key for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}")
+ return
+
+ # Load existing strike data
+ strike_data = load_strike_data(app_name)
+
+ # Load list of permanently removed items
+ removed_items = load_removed_items(app_name)
+
+ # Clean up expired removed items (older than 30 days)
+ now = datetime.utcnow()
+ for item_hash in list(removed_items.keys()):
+ removed_date = datetime.fromisoformat(removed_items[item_hash]["removed_time"].replace('Z', '+00:00'))
+ if (now - removed_date) > timedelta(days=30):
+ swaparr_logger.debug(f"Removing expired entry from removed items list: {removed_items[item_hash]['name']}")
+ del removed_items[item_hash]
+
+ # Get current queue items
+ queue_items = get_queue_items(app_name, api_url, api_key, api_timeout)
+
+ if not queue_items:
+ swaparr_logger.info(f"No queue items found for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}")
+ return
+
+ # Keep track of items still in queue for cleanup
+ current_item_ids = set(item["id"] for item in queue_items)
+
+ # Clean up items that are no longer in the queue
+ for item_id in list(strike_data.keys()):
+ if int(item_id) not in current_item_ids:
+ swaparr_logger.debug(f"Removing item {item_id} from strike list as it's no longer in the queue")
+ del strike_data[item_id]
+
+ # Process each queue item
+ for item in queue_items:
+ item_id = str(item["id"])
+ item_state = "Normal"
+ item_hash = generate_item_hash(item)
+
+ # Check if this item has been previously removed
+ if item_hash in removed_items:
+ last_removed_date = datetime.fromisoformat(removed_items[item_hash]["removed_time"].replace('Z', '+00:00'))
+ days_since_removal = (now - last_removed_date).days
+
+ # Re-remove it automatically if it's been less than 7 days since last removal
+ if days_since_removal < 7:
+ swaparr_logger.warning(f"Found previously removed download that reappeared: {item['name']} (removed {days_since_removal} days ago)")
+
+ if not dry_run:
+ if delete_download(app_name, api_url, api_key, item["id"], remove_from_client, api_timeout):
+ swaparr_logger.info(f"Re-removed previously removed download: {item['name']}")
+ # Update the removal time
+ removed_items[item_hash]["removed_time"] = datetime.utcnow().isoformat()
+ else:
+ swaparr_logger.info(f"DRY RUN: Would have re-removed previously removed download: {item['name']}")
+
+ item_state = "Re-removed" if not dry_run else "Would Re-remove (Dry Run)"
+ continue
+
+ # Skip large files if configured
+ if item["size"] >= ignore_above_size:
+ swaparr_logger.debug(f"Ignoring large download: {item['name']} ({item['size']} bytes > {ignore_above_size} bytes)")
+ item_state = "Ignored (Size)"
+ continue
+
+ # Handle delayed items - we'll skip these
+ if item["status"] == "delay":
+ swaparr_logger.debug(f"Ignoring delayed download: {item['name']}")
+ item_state = "Ignored (Delayed)"
+ continue
+
+ # Special handling for "queued" status
+ # We only skip truly queued items, not those with metadata issues
+ metadata_issue = "metadata" in item["status"].lower() or "metadata" in item["error_message"].lower()
+
+ if item["status"] == "queued" and not metadata_issue:
+ # For regular queued items, check how long they've been in strike data
+ if item_id in strike_data and "first_strike_time" in strike_data[item_id]:
+ first_strike = datetime.fromisoformat(strike_data[item_id]["first_strike_time"].replace('Z', '+00:00'))
+ if (now - first_strike) < timedelta(hours=1):
+ # Skip if it's been less than 1 hour since first seeing it
+ swaparr_logger.debug(f"Ignoring recently queued download: {item['name']}")
+ item_state = "Ignored (Recently Queued)"
+ continue
+ else:
+ # Initialize with first strike time for queued items
+ if item_id not in strike_data:
+ strike_data[item_id] = {
+ "strikes": 0,
+ "name": item["name"],
+ "first_strike_time": datetime.utcnow().isoformat(),
+ "last_strike_time": None
+ }
+ swaparr_logger.debug(f"Monitoring new queued download: {item['name']}")
+ item_state = "Monitoring (Queued)"
+ continue
+
+ # Initialize strike count if not already in strike data
+ if item_id not in strike_data:
+ strike_data[item_id] = {
+ "strikes": 0,
+ "name": item["name"],
+ "first_strike_time": datetime.utcnow().isoformat(),
+ "last_strike_time": None
+ }
+
+ # Check if download should be striked
+ should_strike = False
+ strike_reason = ""
+
+ # Strike if metadata issue, eta too long, or no progress (eta = 0 and not queued)
+ if metadata_issue:
+ should_strike = True
+ strike_reason = "Metadata"
+ elif item["eta"] >= max_download_time:
+ should_strike = True
+ strike_reason = "ETA too long"
+ elif item["eta"] == 0 and item["status"] not in ["queued", "delay"]:
+ should_strike = True
+ strike_reason = "No progress"
+
+ # If we should strike this item, add a strike
+ if should_strike:
+ strike_data[item_id]["strikes"] += 1
+ strike_data[item_id]["last_strike_time"] = datetime.utcnow().isoformat()
+
+ if strike_data[item_id]["first_strike_time"] is None:
+ strike_data[item_id]["first_strike_time"] = datetime.utcnow().isoformat()
+
+ current_strikes = strike_data[item_id]["strikes"]
+ swaparr_logger.info(f"Added strike ({current_strikes}/{max_strikes}) to {item['name']} - Reason: {strike_reason}")
+
+ # If max strikes reached, remove the download
+ if current_strikes >= max_strikes:
+ swaparr_logger.warning(f"Max strikes reached for {item['name']}, removing download")
+
+ if not dry_run:
+ if delete_download(app_name, api_url, api_key, item["id"], remove_from_client, api_timeout):
+ swaparr_logger.info(f"Successfully removed {item['name']} after {max_strikes} strikes")
+
+ # Keep the item in strike data for reference but mark as removed
+ strike_data[item_id]["removed"] = True
+ strike_data[item_id]["removed_time"] = datetime.utcnow().isoformat()
+
+ # Add to removed items list for persistent tracking
+ removed_items[item_hash] = {
+ "name": item["name"],
+ "size": item["size"],
+ "removed_time": datetime.utcnow().isoformat(),
+ "reason": strike_reason
+ }
+ else:
+ swaparr_logger.info(f"DRY RUN: Would have removed {item['name']} after {max_strikes} strikes")
+
+ item_state = "Removed" if not dry_run else "Would Remove (Dry Run)"
+ else:
+ item_state = f"Striked ({current_strikes}/{max_strikes})"
+
+ swaparr_logger.debug(f"Processed download: {item['name']} - State: {item_state}")
+
+ # Save updated strike data
+ save_strike_data(app_name, strike_data)
+
+ # Save updated removed items list
+ save_removed_items(app_name, removed_items)
+
+ swaparr_logger.info(f"Finished processing stalled downloads for {app_name} instance: {app_settings.get('instance_name', 'Unknown')}")
diff --git a/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py
new file mode 100644
index 0000000..58f86be
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/swaparr_routes.py
@@ -0,0 +1,134 @@
+"""
+Route definitions for Swaparr API endpoints.
+"""
+
+from flask import Blueprint, request, jsonify
+import os
+import json
+from src.primary.utils.logger import get_logger
+from src.primary.settings_manager import load_settings, save_settings
+from src.primary.apps.swaparr.handler import process_stalled_downloads
+
+# Create the blueprint directly in this file
+swaparr_bp = Blueprint('swaparr', __name__)
+swaparr_logger = get_logger("swaparr")
+
+@swaparr_bp.route('/status', methods=['GET'])
+def get_status():
+ """Get Swaparr status and statistics"""
+ settings = load_settings("swaparr")
+ enabled = settings.get("enabled", False)
+
+ # Get strike statistics from all app state directories
+ statistics = {}
+ state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr")
+
+ if os.path.exists(state_dir):
+ for app_name in os.listdir(state_dir):
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.isdir(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ try:
+ with open(strike_file, 'r') as f:
+ strike_data = json.load(f)
+
+ total_items = len(strike_data)
+ removed_items = sum(1 for item in strike_data.values() if item.get("removed", False))
+ striked_items = sum(1 for item in strike_data.values()
+ if item.get("strikes", 0) > 0 and not item.get("removed", False))
+
+ statistics[app_name] = {
+ "total_tracked": total_items,
+ "currently_striked": striked_items,
+ "removed": removed_items
+ }
+ except (json.JSONDecodeError, IOError) as e:
+ swaparr_logger.error(f"Error reading strike data for {app_name}: {str(e)}")
+ statistics[app_name] = {"error": str(e)}
+
+ return jsonify({
+ "enabled": enabled,
+ "settings": {
+ "max_strikes": settings.get("max_strikes", 3),
+ "max_download_time": settings.get("max_download_time", "2h"),
+ "ignore_above_size": settings.get("ignore_above_size", "25GB"),
+ "remove_from_client": settings.get("remove_from_client", True),
+ "dry_run": settings.get("dry_run", False)
+ },
+ "statistics": statistics
+ })
+
+@swaparr_bp.route('/settings', methods=['GET'])
+def get_settings():
+ """Get Swaparr settings"""
+ settings = load_settings("swaparr")
+ return jsonify(settings)
+
+@swaparr_bp.route('/settings', methods=['POST'])
+def update_settings():
+ """Update Swaparr settings"""
+ data = request.json
+
+ if not data:
+ return jsonify({"success": False, "message": "No data provided"}), 400
+
+ # Load current settings
+ settings = load_settings("swaparr")
+
+ # Update settings with provided data
+ for key, value in data.items():
+ settings[key] = value
+
+ # Save updated settings
+ success = save_settings("swaparr", settings)
+
+ if success:
+ return jsonify({"success": True, "message": "Settings updated successfully"})
+ else:
+ return jsonify({"success": False, "message": "Failed to save settings"}), 500
+
+@swaparr_bp.route('/reset', methods=['POST'])
+def reset_strikes():
+ """Reset all strikes for all apps or a specific app"""
+ data = request.json
+ app_name = data.get('app_name') if data else None
+
+ state_dir = os.path.join(os.getenv("CONFIG_DIR", "/config"), "swaparr")
+
+ if not os.path.exists(state_dir):
+ return jsonify({"success": True, "message": "No strike data to reset"})
+
+ if app_name:
+ # Reset strikes for a specific app
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.exists(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ try:
+ os.remove(strike_file)
+ swaparr_logger.info(f"Reset strikes for {app_name}")
+ return jsonify({"success": True, "message": f"Strikes reset for {app_name}"})
+ except IOError as e:
+ swaparr_logger.error(f"Error resetting strikes for {app_name}: {str(e)}")
+ return jsonify({"success": False, "message": f"Failed to reset strikes for {app_name}: {str(e)}"}), 500
+ return jsonify({"success": False, "message": f"No strike data found for {app_name}"}), 404
+ else:
+ # Reset strikes for all apps
+ try:
+ for app_name in os.listdir(state_dir):
+ app_dir = os.path.join(state_dir, app_name)
+ if os.path.isdir(app_dir):
+ strike_file = os.path.join(app_dir, "strikes.json")
+ if os.path.exists(strike_file):
+ os.remove(strike_file)
+
+ swaparr_logger.info("Reset all strikes")
+ return jsonify({"success": True, "message": "All strikes reset"})
+ except IOError as e:
+ swaparr_logger.error(f"Error resetting all strikes: {str(e)}")
+ return jsonify({"success": False, "message": f"Failed to reset all strikes: {str(e)}"}), 500
+
+def register_routes(app):
+ """Register Swaparr routes with the Flask app."""
+ app.register_blueprint(swaparr_bp, url_prefix='/api/swaparr')
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr.py
new file mode 100644
index 0000000..33b2622
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr.py
@@ -0,0 +1,171 @@
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from primary import keys_manager
+from src.primary.utils.logger import get_logger
+from src.primary.state import get_state_file_path
+from src.primary.settings_manager import load_settings
+
+whisparr_bp = Blueprint('whisparr', __name__)
+whisparr_logger = get_logger("whisparr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("whisparr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("whisparr", "processed_upgrades")
+
+@whisparr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Whisparr API instance with comprehensive diagnostics"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ # Log the test attempt
+ whisparr_logger.info(f"Testing connection to Whisparr V2 API at {api_url}")
+
+ # First check if URL is properly formatted
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try multiple API path combinations to handle different Whisparr V2 setups
+ api_paths = [
+ "/api/system/status", # Standard V2 path
+ "/api/v3/system/status", # Some V2 instances use V3 API
+ "/system/status" # Direct path without /api prefix
+ ]
+
+ success = False
+ last_error = None
+ response_data = None
+
+ for api_path in api_paths:
+ test_url = f"{api_url.rstrip('/')}{api_path}"
+ headers = {'X-Api-Key': api_key}
+ whisparr_logger.debug(f"Trying Whisparr API path: {test_url}")
+
+ try:
+ # Use a connection timeout separate from read timeout
+ response = requests.get(test_url, headers=headers, timeout=(10, api_timeout))
+
+ # Log HTTP status code for diagnostic purposes
+ whisparr_logger.debug(f"Whisparr API status code: {response.status_code} for path {api_path}")
+
+ # Check HTTP status code
+ if response.status_code == 404:
+ # Try next path if 404
+ continue
+
+ response.raise_for_status()
+
+ # Ensure the response is valid JSON
+ try:
+ response_data = response.json()
+ whisparr_logger.debug(f"Whisparr API response: {response_data}")
+
+ # Verify this is actually a Whisparr API by checking for version
+ version = response_data.get('version', None)
+ if not version:
+ # No version info, try next path
+ last_error = "API response doesn't contain version information"
+ continue
+
+ # The version number should start with 2 for Whisparr
+ if version.startswith('2'):
+ whisparr_logger.info(f"Successfully connected to Whisparr V2 API version {version} using path {api_path}")
+ success = True
+ break
+ elif version.startswith('3'):
+ error_msg = f"Connected to Whisparr V3 (version {version}). Use the Eros integration for V3."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+ else:
+ # Connected to some other version, try next path
+ last_error = f"Connected to unknown version {version}, but Huntarr requires Whisparr V2"
+ continue
+
+ except ValueError:
+ last_error = "Invalid JSON response from API"
+ continue
+
+ except requests.exceptions.Timeout:
+ last_error = f"Connection timed out after {api_timeout} seconds"
+ continue
+
+ except requests.exceptions.ConnectionError:
+ last_error = "Failed to connect. Check that the URL is correct and that Whisparr is running."
+ continue
+
+ except requests.exceptions.HTTPError as e:
+ last_error = f"HTTP error: {str(e)}"
+ continue
+
+ except Exception as e:
+ last_error = f"Unexpected error: {str(e)}"
+ continue
+
+ # After trying all paths
+ if success:
+ return jsonify({
+ "success": True,
+ "message": f"Successfully connected to Whisparr V2 (version {response_data.get('version')})",
+ "version": response_data.get('version')
+ })
+ else:
+ error_msg = last_error or "Failed to connect to Whisparr API. Please check your URL and API key."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+# Function to check if Whisparr is configured
+def is_configured():
+ """Check if Whisparr API credentials are configured"""
+ try:
+ api_keys = keys_manager.load_api_keys("whisparr")
+ instances = api_keys.get("instances", [])
+
+ for instance in instances:
+ if instance.get("enabled", True):
+ return True
+
+ return False
+ except Exception as e:
+ whisparr_logger.error(f"Error checking if Whisparr is configured: {str(e)}")
+ return False
+
+# Get all valid instances from settings
+def get_configured_instances():
+ """Get all configured and enabled Whisparr instances"""
+ try:
+ api_keys = keys_manager.load_api_keys("whisparr")
+ instances = api_keys.get("instances", [])
+
+ enabled_instances = []
+ for instance in instances:
+ if not instance.get("enabled", True):
+ continue
+
+ api_url = instance.get("api_url")
+ api_key = instance.get("api_key")
+
+ if not api_url or not api_key:
+ continue
+
+ # Add name and timeout
+ instance_name = instance.get("name", "Default")
+ api_timeout = instance.get("api_timeout", 90)
+
+ enabled_instances.append({
+ "api_url": api_url,
+ "api_key": api_key,
+ "instance_name": instance_name,
+ "api_timeout": api_timeout
+ })
+
+ return enabled_instances
+ except Exception as e:
+ whisparr_logger.error(f"Error getting configured Whisparr instances: {str(e)}")
+ return []
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py
new file mode 100644
index 0000000..2e2c844
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/__init__.py
@@ -0,0 +1,95 @@
+"""
+Whisparr app module for Huntarr
+Contains functionality for missing items and quality upgrades in Whisparr
+
+Exclusively supports the v2 API (legacy).
+"""
+
+# Module exports
+from src.primary.apps.whisparr.missing import process_missing_items
+from src.primary.apps.whisparr.upgrade import process_cutoff_upgrades
+from src.primary.settings_manager import load_settings
+from src.primary.utils.logger import get_logger
+
+# Define logger for this module
+whisparr_logger = get_logger("whisparr")
+
+# For backward compatibility
+process_missing_scenes = process_missing_items
+
+def get_configured_instances():
+ """Get all configured and enabled Whisparr instances"""
+ settings = load_settings("whisparr")
+ instances = []
+ # Use debug level to avoid log spam on new installations
+ whisparr_logger.debug(f"Loaded Whisparr settings for instance check: {settings}")
+
+ if not settings:
+ whisparr_logger.debug("No settings found for Whisparr")
+ return instances
+
+ # Always use Whisparr V2 API
+ # Use debug level to avoid log spam on new installations
+ whisparr_logger.debug("Using Whisparr V2 API exclusively")
+
+ # Check if instances are configured
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ # Use debug level to avoid log spam on new installations
+ whisparr_logger.debug(f"Found 'instances' list with {len(settings['instances'])} items. Processing...")
+ for idx, instance in enumerate(settings["instances"]):
+ whisparr_logger.debug(f"Checking instance #{idx}: {instance}")
+ # Enhanced validation
+ api_url = instance.get("api_url", "").strip()
+ api_key = instance.get("api_key", "").strip()
+
+ # Enhanced URL validation - ensure URL has proper scheme
+ if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
+ whisparr_logger.warning(f"Instance '{instance.get('name', 'Unnamed')}' has URL without http(s) scheme: {api_url}")
+ api_url = f"http://{api_url}"
+ whisparr_logger.warning(f"Auto-correcting URL to: {api_url}")
+
+ is_enabled = instance.get("enabled", True)
+
+ # Only include properly configured instances
+ if is_enabled and api_url and api_key:
+ instance_name = instance.get("name", "Default")
+
+ # Create a settings object for this instance by combining global settings with instance-specific ones
+ instance_settings = settings.copy()
+
+ # Remove instances list to avoid confusion
+ if "instances" in instance_settings:
+ del instance_settings["instances"]
+
+ # Override with instance-specific settings
+ instance_settings["api_url"] = api_url
+ instance_settings["api_key"] = api_key
+ instance_settings["instance_name"] = instance_name
+
+ # Add timeout setting with default if not present
+ if "api_timeout" not in instance_settings:
+ instance_settings["api_timeout"] = 30
+
+ # Use debug level to prevent log spam
+ whisparr_logger.debug(f"Adding configured Whisparr instance: {instance_name}")
+ instances.append(instance_settings)
+ else:
+ name = instance.get("name", "Unnamed")
+ if not is_enabled:
+ whisparr_logger.debug(f"Skipping disabled instance: {name}")
+ else:
+ # For brand new installations, don't spam logs with warnings about default instances
+ if name == 'Default':
+ # Use debug level for default instances to avoid log spam on new installations
+ whisparr_logger.debug(f"Skipping instance {name} due to missing API URL or API Key")
+ else:
+ # Still log warnings for non-default instances
+ whisparr_logger.warning(f"Skipping instance {name} due to missing API URL or API Key")
+ else:
+ whisparr_logger.debug("No instances array found in settings or it's empty")
+
+ # Use debug level to avoid spamming logs, especially with 0 instances
+ whisparr_logger.debug(f"Found {len(instances)} configured and enabled Whisparr instances")
+ return instances
+
+__all__ = ["process_missing_items", "process_missing_scenes", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py
new file mode 100644
index 0000000..c47b414
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/api.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python3
+"""
+Whisparr-specific API functions
+Handles all communication with the Whisparr API
+
+Exclusively uses the Whisparr V2 API
+"""
+
+import requests
+import json
+import time
+import datetime
+import traceback
+import sys
+from typing import List, Dict, Any, Optional, Union
+from src.primary.utils.logger import get_logger
+
+# Get logger for the Whisparr app
+whisparr_logger = get_logger("whisparr")
+
+# Use a session for better performance
+session = requests.Session()
+
+def arr_request(api_url: str, api_key: str, api_timeout: int, endpoint: str, method: str = "GET", data: Dict = None) -> Any:
+ """
+ Make a request to the Whisparr V2 API.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ endpoint: The API endpoint to call
+ method: HTTP method (GET, POST, PUT, DELETE)
+ data: Optional data to send with the request
+
+ Returns:
+ The JSON response from the API, or None if the request failed
+ """
+ if not api_url or not api_key:
+ whisparr_logger.error("API URL or API key is missing. Check your settings.")
+ return None
+
+ # Always try standard path first
+ api_base = "api"
+ whisparr_logger.debug(f"Using Whisparr API path: {api_base}")
+
+ # Full URL - ensure no double slashes
+ url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}"
+
+ # Add debug logging for the exact URL being called
+ whisparr_logger.debug(f"Making {method} request to: {url}")
+
+ # Headers
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ if method == "GET":
+ response = session.get(url, headers=headers, timeout=api_timeout)
+ elif method == "POST":
+ response = session.post(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "PUT":
+ response = session.put(url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "DELETE":
+ response = session.delete(url, headers=headers, timeout=api_timeout)
+ else:
+ whisparr_logger.error(f"Unsupported HTTP method: {method}")
+ return None
+
+ # If we get a 404, try with v3 path instead
+ if response.status_code == 404:
+ api_base = "api/v3"
+ v3_url = f"{api_url.rstrip('/')}/{api_base}/{endpoint.lstrip('/')}"
+ whisparr_logger.debug(f"Standard path returned 404, trying with V3 path: {v3_url}")
+
+ if method == "GET":
+ response = session.get(v3_url, headers=headers, timeout=api_timeout)
+ elif method == "POST":
+ response = session.post(v3_url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "PUT":
+ response = session.put(v3_url, headers=headers, json=data, timeout=api_timeout)
+ elif method == "DELETE":
+ response = session.delete(v3_url, headers=headers, timeout=api_timeout)
+
+ whisparr_logger.debug(f"V3 path request returned status code: {response.status_code}")
+
+ # Check if the request was successful
+ try:
+ response.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ whisparr_logger.error(f"Error during {method} request to {endpoint}: {e}, Status Code: {response.status_code}")
+ whisparr_logger.debug(f"Response content: {response.text[:200]}")
+ return None
+
+ # Try to parse JSON response
+ try:
+ if response.text:
+ result = response.json()
+ whisparr_logger.debug(f"Response from {response.url}: Status {response.status_code}, JSON parsed successfully")
+ return result
+ else:
+ whisparr_logger.debug(f"Response from {response.url}: Status {response.status_code}, Empty response")
+ return {}
+ except json.JSONDecodeError:
+ whisparr_logger.error(f"Invalid JSON response from API: {response.text[:200]}")
+ return None
+
+ except requests.exceptions.RequestException as e:
+ whisparr_logger.error(f"Request failed: {e}")
+ return None
+ except Exception as e:
+ whisparr_logger.error(f"Unexpected error during API request: {e}")
+ return None
+
+def get_download_queue_size(api_url: str, api_key: str, api_timeout: int) -> int:
+ """
+ Get the current size of the download queue.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ The number of items in the download queue, or -1 if the request failed
+ """
+ response = arr_request(api_url, api_key, api_timeout, "queue")
+
+ if response is None:
+ return -1
+
+ # V2 API uses records in queue response
+ if isinstance(response, dict) and "records" in response:
+ return len(response["records"])
+ elif isinstance(response, list):
+ return len(response)
+ else:
+ return -1
+
+def get_items_with_missing(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """
+ Get a list of items with missing files (not downloaded/available).
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored items.
+
+ Returns:
+ A list of item objects with missing files, or None if the request failed.
+ """
+ try:
+ whisparr_logger.debug(f"Retrieving missing items...")
+
+ # Endpoint parameters - always use v2 format
+ endpoint = "wanted/missing?pageSize=1000&sortKey=airDateUtc&sortDirection=descending"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ return None
+
+ # Extract the episodes/items
+ items = []
+ if isinstance(response, dict) and "records" in response:
+ items = response["records"]
+
+ # Filter monitored if needed
+ if monitored_only:
+ items = [item for item in items if item.get("monitored", False)]
+
+ whisparr_logger.debug(f"Found {len(items)} missing items")
+ return items
+
+ except Exception as e:
+ whisparr_logger.error(f"Error retrieving missing items: {str(e)}")
+ return None
+
+def get_cutoff_unmet_items(api_url: str, api_key: str, api_timeout: int, monitored_only: bool) -> List[Dict[str, Any]]:
+ """
+ Get a list of items that don't meet their quality profile cutoff.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ monitored_only: If True, only return monitored items.
+
+ Returns:
+ A list of item objects that need quality upgrades, or None if the request failed.
+ """
+ try:
+ whisparr_logger.debug(f"Retrieving cutoff unmet items...")
+
+ # Endpoint - always use v2 format
+ endpoint = "wanted/cutoff?pageSize=1000&sortKey=airDateUtc&sortDirection=descending"
+
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ if response is None:
+ return None
+
+ # Extract the episodes/items
+ items = []
+ if isinstance(response, dict) and "records" in response:
+ items = response["records"]
+
+ whisparr_logger.debug(f"Found {len(items)} cutoff unmet items")
+
+ # Just filter monitored if needed
+ if monitored_only:
+ items = [item for item in items if item.get("monitored", False)]
+ whisparr_logger.debug(f"Found {len(items)} cutoff unmet items after filtering monitored")
+
+ return items
+
+ except Exception as e:
+ whisparr_logger.error(f"Error retrieving cutoff unmet items: {str(e)}")
+ return None
+
+def refresh_item(api_url: str, api_key: str, api_timeout: int, item_id: int) -> int:
+ """
+ Refresh an item in Whisparr.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ item_id: The ID of the item to refresh
+
+ Returns:
+ The command ID if the refresh was triggered successfully, None otherwise
+ """
+ try:
+ whisparr_logger.debug(f"Refreshing item with ID {item_id}")
+
+ # Some Whisparr versions have issues with RefreshEpisode, try a safer approach
+ # Use series refresh instead if we can get the series ID from the episode
+ # First, attempt to get the episode details
+ episode_endpoint = f"episode/{item_id}"
+ episode_data = arr_request(api_url, api_key, api_timeout, episode_endpoint)
+
+ if episode_data and "seriesId" in episode_data:
+ # We have the series ID, use series refresh which is more reliable
+ series_id = episode_data["seriesId"]
+ whisparr_logger.debug(f"Retrieved series ID {series_id} for episode {item_id}, using series refresh")
+
+ # RefreshSeries is generally more reliable
+ payload = {
+ "name": "RefreshSeries",
+ "seriesId": series_id
+ }
+ else:
+ # Fall back to episode refresh if we can't get the series ID
+ whisparr_logger.debug(f"Could not retrieve series ID for episode {item_id}, using episode refresh")
+ payload = {
+ "name": "RefreshEpisode",
+ "episodeIds": [item_id]
+ }
+
+ # For commands, we need to directly try both path formats since command endpoints
+ # may have different structures in different Whisparr versions
+ command_endpoint = "command"
+ url = f"{api_url.rstrip('/')}/api/{command_endpoint}"
+ backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}"
+
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ # Try standard API path first
+ whisparr_logger.debug(f"Attempting command with standard API path: {url}")
+ try:
+ response = session.post(url, headers=headers, json=payload, timeout=api_timeout)
+ # If we get a 404 or 405, try the v3 path
+ if response.status_code in [404, 405]:
+ whisparr_logger.debug(f"Standard path returned {response.status_code}, trying with V3 path: {backup_url}")
+ response = session.post(backup_url, headers=headers, json=payload, timeout=api_timeout)
+
+ response.raise_for_status()
+ result = response.json()
+
+ if result and "id" in result:
+ command_id = result["id"]
+ whisparr_logger.debug(f"Refresh command triggered with ID {command_id}")
+ return command_id
+ else:
+ whisparr_logger.error("Failed to trigger refresh command - no command ID returned")
+ return None
+ except requests.exceptions.HTTPError as e:
+ whisparr_logger.error(f"HTTP error during refresh command: {e}, Status Code: {response.status_code}")
+ whisparr_logger.debug(f"Response content: {response.text[:200]}")
+ return None
+ except Exception as e:
+ whisparr_logger.error(f"Error sending refresh command: {e}")
+ return None
+
+ except Exception as e:
+ whisparr_logger.error(f"Error refreshing item: {str(e)}")
+ return None
+
+def item_search(api_url: str, api_key: str, api_timeout: int, item_ids: List[int]) -> int:
+ """
+ Trigger a search for one or more items.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ item_ids: A list of item IDs to search for
+
+ Returns:
+ The command ID if the search command was triggered successfully, None otherwise
+ """
+ try:
+ whisparr_logger.debug(f"Searching for items with IDs: {item_ids}")
+
+ # Always use the same payload format since we're always using v2 API
+ payload = {
+ "name": "EpisodeSearch",
+ "episodeIds": item_ids
+ }
+
+ # For commands, we need to directly try both path formats
+ command_endpoint = "command"
+ url = f"{api_url.rstrip('/')}/api/{command_endpoint}"
+ backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}"
+
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ # Try standard API path first
+ whisparr_logger.debug(f"Attempting command with standard API path: {url}")
+ try:
+ response = session.post(url, headers=headers, json=payload, timeout=api_timeout)
+ # If we get a 404 or 405, try the v3 path
+ if response.status_code in [404, 405]:
+ whisparr_logger.debug(f"Standard path returned {response.status_code}, trying with V3 path: {backup_url}")
+ response = session.post(backup_url, headers=headers, json=payload, timeout=api_timeout)
+
+ response.raise_for_status()
+ result = response.json()
+
+ if result and "id" in result:
+ command_id = result["id"]
+ whisparr_logger.debug(f"Search command triggered with ID {command_id}")
+ return command_id
+ else:
+ whisparr_logger.error("Failed to trigger search command - no command ID returned")
+ return None
+ except requests.exceptions.HTTPError as e:
+ whisparr_logger.error(f"HTTP error during search command: {e}, Status Code: {response.status_code}")
+ whisparr_logger.debug(f"Response content: {response.text[:200]}")
+ return None
+ except Exception as e:
+ whisparr_logger.error(f"Error sending search command: {e}")
+ return None
+
+ except Exception as e:
+ whisparr_logger.error(f"Error searching for items: {str(e)}")
+ return None
+
+def get_command_status(api_url: str, api_key: str, api_timeout: int, command_id: int) -> Optional[Dict]:
+ """
+ Get the status of a specific command.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+ command_id: The ID of the command to check
+
+ Returns:
+ A dictionary containing the command status, or None if the request failed.
+ """
+ if not command_id:
+ whisparr_logger.error("No command ID provided for status check.")
+ return None
+
+ try:
+ # For commands, we need to directly try both path formats
+ command_endpoint = f"command/{command_id}"
+ url = f"{api_url.rstrip('/')}/api/{command_endpoint}"
+ backup_url = f"{api_url.rstrip('/')}/api/v3/{command_endpoint}"
+
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ # Try standard API path first
+ whisparr_logger.debug(f"Checking command status with standard API path: {url}")
+ try:
+ response = session.get(url, headers=headers, timeout=api_timeout)
+ # If we get a 404, try the v3 path
+ if response.status_code == 404:
+ whisparr_logger.debug(f"Standard path returned 404, trying with V3 path: {backup_url}")
+ response = session.get(backup_url, headers=headers, timeout=api_timeout)
+
+ response.raise_for_status()
+ result = response.json()
+
+ whisparr_logger.debug(f"Command {command_id} status: {result.get('status', 'unknown')}")
+ return result
+ except requests.exceptions.HTTPError as e:
+ whisparr_logger.error(f"HTTP error getting command status: {e}, Status Code: {response.status_code}")
+ whisparr_logger.debug(f"Response content: {response.text[:200]}")
+ return None
+ except Exception as e:
+ whisparr_logger.error(f"Error getting command status: {e}")
+ return None
+
+ except Exception as e:
+ whisparr_logger.error(f"Error getting command status for ID {command_id}: {e}")
+ return None
+
+def check_connection(api_url: str, api_key: str, api_timeout: int) -> bool:
+ """
+ Check the connection to Whisparr V2 API.
+
+ Args:
+ api_url: The base URL of the Whisparr API
+ api_key: The API key for authentication
+ api_timeout: Timeout for the API request
+
+ Returns:
+ True if the connection is successful, False otherwise
+ """
+ try:
+ # For Whisparr V2, we need to handle both regular and v3 API formats
+ whisparr_logger.debug(f"Checking connection to Whisparr V2 instance at {api_url}")
+
+ # First try with standard path
+ endpoint = "system/status"
+ response = arr_request(api_url, api_key, api_timeout, endpoint)
+
+ # If that failed, try with v3 path format
+ if response is None:
+ whisparr_logger.debug("Standard API path failed, trying v3 format...")
+ # Try direct HTTP request to v3 endpoint without using arr_request
+ url = f"{api_url.rstrip('/')}/api/v3/system/status"
+ headers = {'X-Api-Key': api_key}
+
+ try:
+ resp = session.get(url, headers=headers, timeout=api_timeout)
+ resp.raise_for_status()
+ response = resp.json()
+ except Exception as e:
+ whisparr_logger.debug(f"V3 API path also failed: {str(e)}")
+ return False
+
+ if response is not None:
+ # Get the version information if available
+ version = response.get("version", "unknown")
+
+ # Check if this is a v2.x version
+ if version and version.startswith('2'):
+ whisparr_logger.info(f"Successfully connected to Whisparr V2 API version: {version}")
+ return True
+ else:
+ whisparr_logger.warning(f"Connected to Whisparr but found unexpected version: {version}, expected 2.x")
+ return False
+ else:
+ whisparr_logger.error("Failed to connect to Whisparr V2 API")
+ return False
+
+ except Exception as e:
+ whisparr_logger.error(f"Error checking connection to Whisparr V2 API: {str(e)}")
+ return False
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py
new file mode 100644
index 0000000..0305c8b
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/missing.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+"""
+Missing Items Processing for Whisparr
+Handles searching for missing items in Whisparr
+
+Supports both v2 (legacy) and v3 (Eros) API versions
+"""
+
+import time
+import random
+import datetime
+from typing import List, Dict, Any, Set, Callable
+from src.primary.utils.logger import get_logger
+from src.primary.apps.whisparr import api as whisparr_api
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.stats_manager import increment_stat
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.state import check_state_reset
+
+# Get logger for the app
+whisparr_logger = get_logger("whisparr")
+
+def process_missing_items(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process missing items in Whisparr based on provided settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Whisparr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any items were processed, False otherwise.
+ """
+ whisparr_logger.info("Starting missing items processing cycle for Whisparr.")
+ processed_any = False
+
+ # Reset state files if enough time has passed
+ check_state_reset("whisparr")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ instance_name = app_settings.get("instance_name", "Whisparr Default")
+
+ # Use the centralized advanced setting for stateful management hours
+ stateful_management_hours = get_advanced_setting("stateful_management_hours", 168)
+
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_future_releases = app_settings.get("skip_future_releases", True)
+ skip_item_refresh = app_settings.get("skip_item_refresh", False)
+
+ # Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility
+ hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0))
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ # Log that we're using Whisparr V2 API
+ whisparr_logger.info(f"Using Whisparr V2 API for instance: {instance_name}")
+
+ # Skip if hunt_missing_items is set to 0
+ if hunt_missing_items <= 0:
+ whisparr_logger.info("'hunt_missing_items' setting is 0 or less. Skipping missing item processing.")
+ return False
+
+ # Check for stop signal
+ if stop_check():
+ whisparr_logger.info("Stop requested before starting missing items. Aborting...")
+ return False
+
+ # Get missing items
+ whisparr_logger.info(f"Retrieving items with missing files...")
+ missing_items = whisparr_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only)
+
+ if missing_items is None: # API call failed
+ whisparr_logger.error("Failed to retrieve missing items from Whisparr API.")
+ return False
+
+ if not missing_items:
+ whisparr_logger.info("No missing items found.")
+ return False
+
+ # Check for stop signal after retrieving items
+ if stop_check():
+ whisparr_logger.info("Stop requested after retrieving missing items. Aborting...")
+ return False
+
+ whisparr_logger.info(f"Found {len(missing_items)} items with missing files.")
+
+ # Filter out future releases if configured
+ if skip_future_releases:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ original_count = len(missing_items)
+ # Whisparr item object has 'airDateUtc' for release dates
+ missing_items = [
+ item for item in missing_items
+ if not item.get('airDateUtc') or (
+ item.get('airDateUtc') and
+ datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now
+ )
+ ]
+ skipped_count = original_count - len(missing_items)
+ if skipped_count > 0:
+ whisparr_logger.info(f"Skipped {skipped_count} future item releases based on air date.")
+
+ if not missing_items:
+ whisparr_logger.info("No missing items left to process after filtering future releases.")
+ return False
+
+ # Filter out already processed items using stateful management
+ unprocessed_items = []
+ for item in missing_items:
+ item_id = str(item.get("id"))
+ if not is_processed("whisparr", instance_name, item_id):
+ unprocessed_items.append(item)
+ else:
+ whisparr_logger.debug(f"Skipping already processed item ID: {item_id}")
+
+ whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.")
+
+ if not unprocessed_items:
+ whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ return False
+
+ items_processed = 0
+ processing_done = False
+
+ # Select items to search based on configuration
+ whisparr_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.")
+ items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items))
+
+ whisparr_logger.info(f"Selected {len(items_to_search)} missing items to search.")
+
+ # Process selected items
+ for item in items_to_search:
+ # Check for stop signal before each item
+ if stop_check():
+ whisparr_logger.info("Stop requested during item processing. Aborting...")
+ break
+
+ # Re-check limit in case it changed
+ current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
+ if items_processed >= current_limit:
+ whisparr_logger.info(f"Reached HUNT_MISSING_ITEMS limit ({current_limit}) for this cycle.")
+ break
+
+ item_id = item.get("id")
+ title = item.get("title", "Unknown Title")
+ season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}"
+
+ whisparr_logger.info(f"Processing missing item: \"{title}\" - {season_episode} (Item ID: {item_id})")
+
+ # Refresh the item information if not skipped
+ refresh_command_id = None
+ if not skip_item_refresh:
+ whisparr_logger.info(" - Refreshing item information...")
+ refresh_command_id = whisparr_api.refresh_item(api_url, api_key, api_timeout, item_id)
+ if refresh_command_id:
+ whisparr_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...")
+ time.sleep(5) # Basic wait
+ else:
+ whisparr_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.")
+ else:
+ whisparr_logger.info(" - Skipping item refresh (skip_item_refresh=true)")
+
+ # Mark the item as processed BEFORE triggering any searches
+ add_processed_id("whisparr", instance_name, str(item_id))
+ whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
+
+ # Check for stop signal before searching
+ if stop_check():
+ whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ break
+
+ # Search for the item
+ whisparr_logger.info(" - Searching for missing item...")
+ search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id])
+ if search_command_id:
+ whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
+
+ # Log to history system
+ media_name = f"{title} - {season_episode}"
+ log_processed_media("whisparr", media_name, item_id, instance_name, "missing")
+ whisparr_logger.debug(f"Logged history entry for item: {media_name}")
+
+ items_processed += 1
+ processing_done = True
+
+ # Increment the hunted statistics for Whisparr
+ increment_stat("whisparr", "hunted", 1)
+ whisparr_logger.debug(f"Incremented whisparr hunted statistics by 1")
+
+ # Log progress
+ current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
+ whisparr_logger.info(f"Processed {items_processed}/{current_limit} missing items this cycle.")
+ else:
+ whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ # Do not mark as processed if search couldn't be triggered
+ continue
+
+ # Log final status
+ if items_processed > 0:
+ whisparr_logger.info(f"Completed processing {items_processed} missing items for this cycle.")
+ else:
+ whisparr_logger.info("No new missing items were processed in this run.")
+
+ return processing_done
+
+# For backward compatibility with the background processing system
+def process_missing_scenes(app_settings, stop_check):
+ """
+ Backwards compatibility function that calls process_missing_items.
+
+ Args:
+ app_settings: Dictionary containing all settings for Whisparr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ Result from process_missing_items
+ """
+ return process_missing_items(app_settings, stop_check)
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py
new file mode 100644
index 0000000..d04415c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr/upgrade.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python3
+"""
+Quality Upgrade Processing for Whisparr
+Handles searching for items that need quality upgrades in Whisparr
+
+Supports both v2 (legacy) and v3 (Eros) API versions
+"""
+
+import time
+import random
+from typing import Dict, Any, List, Callable
+from datetime import datetime, timedelta
+from src.primary.utils.logger import get_logger
+from src.primary.apps.whisparr import api as whisparr_api
+from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.stateful_manager import is_processed, add_processed_id
+from src.primary.stats_manager import increment_stat
+from src.primary.utils.history_utils import log_processed_media
+from src.primary.state import check_state_reset
+
+# Get logger for the app
+whisparr_logger = get_logger("whisparr")
+
+def process_cutoff_upgrades(
+ app_settings: Dict[str, Any],
+ stop_check: Callable[[], bool] # Function to check if stop is requested
+) -> bool:
+ """
+ Process quality cutoff upgrades for Whisparr based on settings.
+
+ Args:
+ app_settings: Dictionary containing all settings for Whisparr
+ stop_check: A function that returns True if the process should stop
+
+ Returns:
+ True if any items were processed for upgrades, False otherwise.
+ """
+ whisparr_logger.info("Starting quality cutoff upgrades processing cycle for Whisparr.")
+ processed_any = False
+
+ # Reset state files if enough time has passed
+ check_state_reset("whisparr")
+
+ # Extract necessary settings
+ api_url = app_settings.get("api_url", "").strip()
+ api_key = app_settings.get("api_key", "").strip()
+ api_timeout = get_advanced_setting("api_timeout", 120) # Use general.json value
+ instance_name = app_settings.get("instance_name", "Whisparr Default")
+
+ # Use advanced settings from general.json for command operations
+ command_wait_delay = get_advanced_setting("command_wait_delay", 1)
+ command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
+
+ monitored_only = app_settings.get("monitored_only", True)
+ skip_item_refresh = app_settings.get("skip_item_refresh", False)
+
+ # Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility
+ hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0))
+
+ state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168)
+
+ # Log that we're using Whisparr V2 API
+ whisparr_logger.info(f"Using Whisparr V2 API for instance: {instance_name}")
+
+ # Skip if hunt_upgrade_items is set to 0
+ if hunt_upgrade_items <= 0:
+ whisparr_logger.info("'hunt_upgrade_items' setting is 0 or less. Skipping quality upgrade processing.")
+ return False
+
+ # Check for stop signal
+ if stop_check():
+ whisparr_logger.info("Stop requested before starting quality upgrades. Aborting...")
+ return False
+
+ # Get items eligible for upgrade
+ whisparr_logger.info(f"Retrieving items eligible for cutoff upgrade...")
+ upgrade_eligible_data = whisparr_api.get_cutoff_unmet_items(api_url, api_key, api_timeout, monitored_only)
+
+ if not upgrade_eligible_data:
+ whisparr_logger.info("No items found eligible for upgrade or error retrieving them.")
+ return False
+
+ # Check for stop signal after retrieving eligible items
+ if stop_check():
+ whisparr_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...")
+ return False
+
+ whisparr_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.")
+
+ # Filter out already processed items using stateful management
+ unprocessed_items = []
+ for item in upgrade_eligible_data:
+ item_id = str(item.get("id"))
+ if not is_processed("whisparr", instance_name, item_id):
+ unprocessed_items.append(item)
+ else:
+ whisparr_logger.debug(f"Skipping already processed item ID: {item_id}")
+
+ whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.")
+
+ if not unprocessed_items:
+ whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ return False
+
+ items_processed = 0
+ processing_done = False
+
+ # Always use random selection for upgrades
+ whisparr_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.")
+ items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items))
+
+ whisparr_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.")
+
+ # Process selected items
+ for item in items_to_upgrade:
+ # Check for stop signal before each item
+ if stop_check():
+ whisparr_logger.info("Stop requested during item processing. Aborting...")
+ break
+
+ # Re-check limit in case it changed
+ current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
+ if items_processed >= current_limit:
+ whisparr_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.")
+ break
+
+ item_id = item.get("id")
+ title = item.get("title", "Unknown Title")
+ season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}"
+
+ current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown")
+
+ whisparr_logger.info(f"Processing item for quality upgrade: \"{title}\" - {season_episode} (Item ID: {item_id})")
+ whisparr_logger.info(f" - Current quality: {current_quality}")
+
+ # Refresh the item information if not skipped
+ refresh_command_id = None
+ if not skip_item_refresh:
+ whisparr_logger.info(" - Refreshing item information...")
+ refresh_command_id = whisparr_api.refresh_item(api_url, api_key, api_timeout, item_id)
+ if refresh_command_id:
+ whisparr_logger.info(f"Triggered refresh command {refresh_command_id}. Waiting a few seconds...")
+ time.sleep(5) # Basic wait
+ else:
+ whisparr_logger.warning(f"Failed to trigger refresh command for item ID: {item_id}. Proceeding without refresh.")
+ else:
+ whisparr_logger.info(" - Skipping item refresh (skip_item_refresh=true)")
+
+ # Check for stop signal before searching
+ if stop_check():
+ whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ break
+
+ # Mark the item as processed BEFORE triggering any searches
+ add_processed_id("whisparr", instance_name, str(item_id))
+ whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
+
+ # Search for the item
+ whisparr_logger.info(" - Searching for quality upgrade...")
+ search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id])
+ if search_command_id:
+ whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
+
+ # Log to history so the upgrade appears in the history UI
+ series_title = item.get("series", {}).get("title", "Unknown Series")
+ media_name = f"{series_title} - {season_episode} - {title}"
+ log_processed_media("whisparr", media_name, item_id, instance_name, "upgrade")
+ whisparr_logger.debug(f"Logged quality upgrade to history for item ID {item_id}")
+
+ items_processed += 1
+ processing_done = True
+
+ # Increment the upgraded statistics for Whisparr
+ increment_stat("whisparr", "upgraded", 1)
+ whisparr_logger.debug(f"Incremented whisparr upgraded statistics by 1")
+
+ # Log progress
+ current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
+ whisparr_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.")
+ else:
+ whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ # Do not mark as processed if search couldn't be triggered
+ continue
+
+ # Log final status
+ if items_processed > 0:
+ whisparr_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.")
+ else:
+ whisparr_logger.info("No new items were processed for quality upgrade in this run.")
+
+ return processing_done
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py b/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py
new file mode 100644
index 0000000..6c35943
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/apps/whisparr_routes.py
@@ -0,0 +1,337 @@
+#!/usr/bin/env python3
+
+from flask import Blueprint, request, jsonify
+import datetime, os, requests
+from src.primary import keys_manager
+from src.primary.state import get_state_file_path, reset_state_file
+from src.primary.utils.logger import get_logger, APP_LOG_FILES
+import traceback
+import socket
+from urllib.parse import urlparse
+from src.primary.apps.whisparr import api as whisparr_api
+
+whisparr_bp = Blueprint('whisparr', __name__)
+whisparr_logger = get_logger("whisparr")
+
+# Make sure we're using the correct state files
+PROCESSED_MISSING_FILE = get_state_file_path("whisparr", "processed_missing")
+PROCESSED_UPGRADES_FILE = get_state_file_path("whisparr", "processed_upgrades")
+
+@whisparr_bp.route('/status', methods=['GET'])
+def get_status():
+ """Get the status of all configured Whisparr instances"""
+ try:
+ # Get all configured instances
+ api_keys = keys_manager.load_api_keys("whisparr")
+ instances = api_keys.get("instances", [])
+
+ connected_count = 0
+ total_configured = len(instances)
+
+ for instance in instances:
+ api_url = instance.get("api_url")
+ api_key = instance.get("api_key")
+ if api_url and api_key and instance.get("enabled", True):
+ # Use a short timeout for status checks
+ if whisparr_api.check_connection(api_url, api_key, 5):
+ connected_count += 1
+
+ return jsonify({
+ "configured": total_configured > 0,
+ "connected": connected_count > 0,
+ "connected_count": connected_count,
+ "total_configured": total_configured
+ })
+ except Exception as e:
+ whisparr_logger.error(f"Error getting Whisparr status: {str(e)}")
+ return jsonify({
+ "configured": False,
+ "connected": False,
+ "error": str(e)
+ }), 500
+
+@whisparr_bp.route('/test-connection', methods=['POST'])
+def test_connection():
+ """Test connection to a Whisparr API instance"""
+ data = request.json
+ api_url = data.get('api_url')
+ api_key = data.get('api_key')
+ api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
+
+ if not api_url or not api_key:
+ return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
+
+ whisparr_logger.info(f"Testing connection to Whisparr API at {api_url}")
+
+ # Validate URL format
+ if not (api_url.startswith('http://') or api_url.startswith('https://')):
+ error_msg = "API URL must start with http:// or https://"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+
+ # Try to establish a socket connection first to check basic connectivity
+ parsed_url = urlparse(api_url)
+ hostname = parsed_url.hostname
+ port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
+
+ try:
+ # Try socket connection for quick feedback on connectivity issues
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3) # Short timeout for quick feedback
+ result = sock.connect_ex((hostname, port))
+ sock.close()
+
+ if result != 0:
+ error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except socket.gaierror:
+ error_msg = f"DNS resolution failed - Cannot resolve hostname: {hostname}. Please check your URL."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except Exception as e:
+ # Log the socket testing error but continue with the full request
+ whisparr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
+
+ # First try standard API endpoint (Whisparr v2)
+ api_paths = [
+ {"url": f"{api_url.rstrip('/')}/api/system/status", "version": "v2"},
+ {"url": f"{api_url.rstrip('/')}/api/v3/system/status", "version": "v3"}
+ ]
+
+ headers = {
+ "X-Api-Key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ response = None
+ detected_version = None
+
+ # Try each API path in order
+ for api_path in api_paths:
+ try:
+ url = api_path["url"]
+ whisparr_logger.debug(f"Trying API path: {url}")
+ response = requests.get(url, headers=headers, timeout=(10, api_timeout))
+
+ if response.status_code == 200:
+ detected_version = api_path["version"]
+ break
+
+ except requests.exceptions.RequestException:
+ continue
+
+ # If no successful response was obtained
+ if not response or response.status_code != 200:
+ if response:
+ # For HTTP errors, provide more specific feedback
+ if response.status_code == 401:
+ error_msg = "Authentication failed: Invalid API key"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 401
+ elif response.status_code == 403:
+ error_msg = "Access forbidden: Check API key permissions"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 403
+ elif response.status_code == 404:
+ error_msg = "API endpoint not found: This doesn't appear to be a valid Whisparr server. Check your URL."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ elif response.status_code >= 500:
+ error_msg = f"Whisparr server error (HTTP {response.status_code}): The Whisparr server is experiencing issues"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+ else:
+ error_msg = f"HTTP error {response.status_code} connecting to Whisparr"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), response.status_code
+ else:
+ error_msg = "Could not connect to any Whisparr API endpoint"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+
+ # Successfully connected, now validate version
+ try:
+ response_data = response.json()
+ version = response_data.get('version', 'unknown')
+ whisparr_logger.info(f"Successfully connected to Whisparr API version: {version} (API {detected_version})")
+
+ # Check if this is a v2 version
+ if version and version.startswith('2'):
+ # Detected v2
+ return jsonify({
+ "success": True,
+ "message": "Successfully connected to Whisparr API",
+ "version": version,
+ "is_v2": True
+ })
+ elif version and version.startswith('3'):
+ # Detected Eros API (V3)
+ error_msg = f"Incompatible Whisparr version {version} detected. Huntarr requires Whisparr V2."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+ else:
+ error_msg = f"Unexpected Whisparr version {version} detected. Huntarr requires Whisparr V2."
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 400
+ except ValueError:
+ error_msg = "Invalid JSON response from Whisparr API - This doesn't appear to be a valid Whisparr server"
+ whisparr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ return jsonify({"success": False, "message": error_msg}), 500
+ except requests.exceptions.ConnectionError as e:
+ # Handle different types of connection errors
+ error_details = str(e)
+ if "Connection refused" in error_details:
+ error_msg = f"Connection refused - Whisparr is not running on {api_url} or the port is incorrect"
+ elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
+ error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ else:
+ error_msg = f"Connection error - Check if Whisparr is running: {error_details}"
+
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 404
+ except requests.exceptions.Timeout:
+ error_msg = f"Connection timed out - Whisparr took too long to respond"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 504
+ except requests.exceptions.RequestException as e:
+ error_msg = f"Connection test failed: {str(e)}"
+ whisparr_logger.error(error_msg)
+ return jsonify({"success": False, "message": error_msg}), 500
+
+# Function to check if Whisparr is configured
+def is_configured():
+ """Check if Whisparr API credentials are configured"""
+ api_keys = keys_manager.load_api_keys("whisparr")
+ return api_keys.get("api_url") and api_keys.get("api_key")
+
+@whisparr_bp.route('/versions', methods=['GET'])
+def get_versions():
+ """Get the version information from the Whisparr API"""
+ try:
+ # Get all configured instances
+ api_keys = keys_manager.load_api_keys("whisparr")
+ instances = api_keys.get("instances", [])
+
+ if not instances:
+ return jsonify({"success": False, "message": "No Whisparr instances configured"}), 404
+
+ results = []
+ for instance in instances:
+ if not instance.get("enabled", True):
+ continue
+
+ api_url = instance.get("api_url")
+ api_key = instance.get("api_key")
+ instance_name = instance.get("name", "Default")
+
+ if not api_url or not api_key:
+ results.append({
+ "name": instance_name,
+ "success": False,
+ "message": "API URL or API Key missing"
+ })
+ continue
+
+ # First try standard API endpoint
+ version_url = f"{api_url.rstrip('/')}/api/system/status"
+ headers = {"X-Api-Key": api_key}
+
+ try:
+ response = requests.get(version_url, headers=headers, timeout=10)
+
+ # If we get a 404, try with the v3 path
+ if response.status_code == 404:
+ whisparr_logger.debug(f"Standard API path failed for {instance_name}, trying v3 path")
+ v3_url = f"{api_url.rstrip('/')}/api/v3/system/status"
+ response = requests.get(v3_url, headers=headers, timeout=10)
+
+ if response.status_code == 200:
+ version_data = response.json()
+ version = version_data.get("version", "Unknown")
+
+ # Validate that it's a V2 version
+ if version and version.startswith('2'):
+ results.append({
+ "name": instance_name,
+ "success": True,
+ "version": version,
+ "is_v2": True
+ })
+ elif version and version.startswith('3'):
+ # Reject Eros API version
+ results.append({
+ "name": instance_name,
+ "success": False,
+ "message": f"Incompatible Whisparr version {version} detected. Huntarr requires Whisparr V2.",
+ "version": version
+ })
+ else:
+ # Unexpected version
+ results.append({
+ "name": instance_name,
+ "success": False,
+ "message": f"Unexpected Whisparr version {version} detected. Huntarr requires Whisparr V2.",
+ "version": version
+ })
+ else:
+ # API call failed
+ results.append({
+ "name": instance_name,
+ "success": False,
+ "message": f"Failed to get version information: HTTP {response.status_code}"
+ })
+ except requests.exceptions.RequestException as e:
+ results.append({
+ "name": instance_name,
+ "success": False,
+ "message": f"Connection error: {str(e)}"
+ })
+
+ return jsonify({"success": True, "results": results})
+ except Exception as e:
+ whisparr_logger.error(f"Error getting Whisparr versions: {str(e)}")
+ return jsonify({"success": False, "message": str(e)}), 500
+
+@whisparr_bp.route('/logs', methods=['GET'])
+def get_logs():
+ """Get the log file for Whisparr"""
+ try:
+ # Get the log file path
+ log_file = APP_LOG_FILES.get("whisparr")
+
+ if not log_file or not os.path.exists(log_file):
+ return jsonify({"success": False, "message": "Log file not found"}), 404
+
+ # Read the log file (last 200 lines)
+ with open(log_file, 'r') as f:
+ lines = f.readlines()
+ log_content = ''.join(lines[-200:])
+
+ return jsonify({"success": True, "logs": log_content})
+ except Exception as e:
+ error_message = f"Error fetching Whisparr logs: {str(e)}"
+ whisparr_logger.error(error_message)
+ traceback.print_exc()
+ return jsonify({"success": False, "message": error_message}), 500
+
+@whisparr_bp.route('/clear-processed', methods=['POST'])
+def clear_processed():
+ """Clear the processed missing and upgrade files for Whisparr"""
+ try:
+ # Reset missing items state file
+ whisparr_logger.info("Clearing processed missing items state")
+ reset_state_file("whisparr", "processed_missing")
+
+ # Reset upgrade state file
+ whisparr_logger.info("Clearing processed quality upgrade state")
+ reset_state_file("whisparr", "processed_upgrades")
+
+ return jsonify({
+ "success": True,
+ "message": "Successfully cleared Whisparr processed state"
+ })
+ except Exception as e:
+ error_message = f"Error clearing Whisparr processed state: {str(e)}"
+ whisparr_logger.error(error_message)
+ return jsonify({"success": False, "message": error_message}), 500
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/auth.py b/Huntarr.io-6.3.6/src/primary/auth.py
new file mode 100644
index 0000000..01f320c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/auth.py
@@ -0,0 +1,554 @@
+#!/usr/bin/env python3
+"""
+Authentication module for Huntarr
+Handles user creation, verification, and session management
+Including two-factor authentication
+"""
+
+import os
+import json
+import hashlib
+import secrets
+import time
+import pathlib
+import base64
+import io
+import qrcode
+import pyotp # Ensure pyotp is imported
+import re # Import the re module for regex
+from typing import Dict, Any, Optional, Tuple
+from flask import request, redirect, url_for, session
+from .utils.logger import logger # Ensure logger is imported
+
+# User directory setup
+USER_DIR = pathlib.Path("/config/user")
+USER_DIR.mkdir(parents=True, exist_ok=True)
+USER_FILE = USER_DIR / "credentials.json"
+
+# Session settings
+SESSION_EXPIRY = 60 * 60 * 24 * 7 # 1 week in seconds
+SESSION_COOKIE_NAME = "huntarr_session"
+
+# Store active sessions
+active_sessions = {}
+
+# --- Add Helper functions for user data ---
+def get_user_data() -> Dict[str, Any]:
+ """Load user data from the credentials file."""
+ if not USER_FILE.exists():
+ logger.warning(f"Attempted to get user data, but file not found: {USER_FILE}")
+ return {}
+ try:
+ with open(USER_FILE, 'r') as f:
+ return json.load(f)
+ except json.JSONDecodeError:
+ logger.error(f"Error decoding JSON from user file: {USER_FILE}")
+ return {}
+ except Exception as e:
+ logger.error(f"Error reading user file {USER_FILE}: {e}", exc_info=True)
+ return {}
+
+def save_user_data(user_data: Dict[str, Any]) -> bool:
+ """Save user data to the credentials file."""
+ try:
+ logger.debug(f"Attempting to save user data to: {USER_FILE}")
+ # Ensure directory exists (though it should from startup)
+ USER_DIR.mkdir(parents=True, exist_ok=True)
+
+ with open(USER_FILE, 'w') as f:
+ json.dump(user_data, f, indent=4) # Add indent for readability
+
+ # Set permissions after writing
+ try:
+ os.chmod(USER_FILE, 0o644)
+ logger.debug(f"Set permissions 0o644 on {USER_FILE}")
+ except Exception as e_perm:
+ logger.warning(f"Could not set permissions on file {USER_FILE}: {e_perm}")
+
+ logger.info(f"User data saved successfully to {USER_FILE}")
+ return True
+ except Exception as e:
+ logger.error(f"Error saving user file {USER_FILE}: {e}", exc_info=True)
+ return False
+# --- End Helper functions ---
+
+def hash_password(password: str) -> str:
+ """Hash a password for storage"""
+ # Use SHA-256 with a salt
+ salt = secrets.token_hex(16)
+ pw_hash = hashlib.sha256((password + salt).encode()).hexdigest()
+ return f"{salt}:{pw_hash}"
+
+def verify_password(stored_password: str, provided_password: str) -> bool:
+ """Verify a password against its hash"""
+ try:
+ salt, pw_hash = stored_password.split(':', 1)
+ verify_hash = hashlib.sha256((provided_password + salt).encode()).hexdigest()
+ return secrets.compare_digest(verify_hash, pw_hash)
+ except Exception as e:
+ logger.error(f"Error verifying password hash: {e}", exc_info=True)
+ return False
+
+def hash_username(username: str) -> str:
+ """Create a normalized hash of the username"""
+ # Convert to lowercase and hash
+ return hashlib.sha256(username.lower().encode()).hexdigest()
+
+def validate_password_strength(password: str) -> Optional[str]:
+ """Validate password strength based on defined criteria.
+
+ Args:
+ password: The password string to validate.
+
+ Returns:
+ An error message string if validation fails, None otherwise.
+ """
+ if len(password) < 8:
+ return "Password must be at least 8 characters long."
+
+ # If check passes
+ return None
+
+def user_exists() -> bool:
+ """Check if a user has been created"""
+ return USER_FILE.exists() and os.path.getsize(USER_FILE) > 0
+
+def create_user(username: str, password: str) -> bool:
+ """Create a new user"""
+ if not username or not password:
+ logger.error("Attempted to create user with empty username or password")
+ return False
+
+ # Ensure user directory exists with proper permissions
+ logger.info(f"Ensuring user directory exists: {USER_DIR}")
+ USER_DIR.mkdir(parents=True, exist_ok=True)
+ try:
+ # Set appropriate permissions if not running as root
+ logger.info(f"Setting permissions on directory: {USER_DIR}")
+ os.chmod(USER_DIR, 0o755)
+ except Exception as e:
+ logger.warning(f"Could not set permissions on directory {USER_DIR}: {e}")
+
+ # Hash the username and password
+ username_hash = hash_username(username)
+ password_hash = hash_password(password)
+
+ # Store the credentials
+ user_data = {
+ "username": username_hash,
+ "password": password_hash,
+ "created_at": time.time(),
+ "2fa_enabled": False,
+ "2fa_secret": None
+ }
+
+ try:
+ logger.info(f"Writing user file: {USER_FILE}")
+ with open(USER_FILE, 'w') as f:
+ json.dump(user_data, f)
+ # Set appropriate permissions on the file
+ try:
+ logger.info(f"Setting permissions on file: {USER_FILE}")
+ os.chmod(USER_FILE, 0o644)
+ except Exception as e:
+ logger.warning(f"Could not set permissions on file {USER_FILE}: {e}")
+ logger.info("User creation successful")
+ return True
+ except Exception as e:
+ logger.error(f"Error creating user file {USER_FILE}: {e}", exc_info=True)
+ return False
+
+def verify_user(username: str, password: str, otp_code: str = None) -> Tuple[bool, bool]:
+ """
+ Verify user credentials
+
+ Returns:
+ Tuple[bool, bool]: (auth_success, needs_2fa)
+ """
+ if not user_exists():
+ logger.warning("Login attempt failed: User does not exist.")
+ return False, False
+
+ try:
+ with open(USER_FILE, 'r') as f:
+ user_data = json.load(f)
+
+ # Hash the provided username
+ username_hash = hash_username(username)
+
+ # Compare username and verify password
+ if user_data.get("username") == username_hash:
+ if verify_password(user_data.get("password", ""), password):
+ # Check if 2FA is enabled
+ two_fa_enabled = user_data.get("2fa_enabled", False)
+ logger.debug(f"2FA enabled for user '{username}': {two_fa_enabled}")
+ logger.debug(f"2FA secret present: {bool(user_data.get('2fa_secret'))}")
+ logger.debug(f"OTP code provided: {bool(otp_code)}")
+
+ if two_fa_enabled:
+ # If 2FA code was provided, verify it
+ if otp_code:
+ totp = pyotp.TOTP(user_data.get("2fa_secret"))
+ valid_code = totp.verify(otp_code)
+ logger.debug(f"OTP code validation result: {valid_code}")
+ if valid_code:
+ logger.info(f"User '{username}' authenticated successfully with 2FA.")
+ return True, False
+ else:
+ logger.warning(f"Login attempt failed for user '{username}': Invalid 2FA code.")
+ return False, True
+ else:
+ # No OTP code provided but 2FA is enabled
+ logger.warning(f"Login attempt failed for user '{username}': 2FA code required but not provided.")
+ logger.debug("Returning needs_2fa=True to trigger 2FA input display")
+ return False, True
+ else:
+ # 2FA not enabled, password is correct
+ logger.info(f"User '{username}' authenticated successfully (no 2FA).")
+ return True, False
+ else:
+ logger.warning(f"Login attempt failed for user '{username}': Invalid password.")
+ return False, False
+ except Exception as e:
+ logger.error(f"Error during user verification for '{username}': {e}", exc_info=True)
+
+ logger.warning(f"Login attempt failed for user '{username}': Username not found or other error.")
+ return False, False
+
+def create_session(username: str) -> str:
+ """Create a new session for an authenticated user"""
+ session_id = secrets.token_hex(32)
+ # Store the actual username, not the hash
+
+ # Store session data
+ active_sessions[session_id] = {
+ "username": username, # Store actual username
+ "created_at": time.time(),
+ "expires_at": time.time() + SESSION_EXPIRY
+ }
+
+ return session_id
+
+def verify_session(session_id: str) -> bool:
+ """Verify if a session is valid"""
+ if not session_id or session_id not in active_sessions:
+ return False
+
+ session_data = active_sessions[session_id]
+
+ # Check if session has expired
+ if session_data.get("expires_at", 0) < time.time():
+ # Clean up expired session
+ del active_sessions[session_id]
+ return False
+
+ # Extend session expiry
+ active_sessions[session_id]["expires_at"] = time.time() + SESSION_EXPIRY
+ return True
+
+def get_username_from_session(session_id: str) -> Optional[str]:
+ """Get the username from a session"""
+ if not session_id or session_id not in active_sessions:
+ return None
+
+ # Return the stored username
+ return active_sessions[session_id].get("username")
+
+def authenticate_request():
+ """Flask route decorator to check if user is authenticated"""
+ # If no user exists, redirect to setup
+ if not user_exists():
+ if request.path != "/setup" and not request.path.startswith(("/static/", "/api/setup")):
+ return redirect("/setup")
+ return None
+
+ # Skip authentication for static files and the login/setup pages
+ if request.path.startswith(("/static/", "/login", "/api/login", "/setup", "/api/setup")) or request.path == "/favicon.ico":
+ return None
+
+ # Check if the request is from a local network and bypass authentication if enabled
+ # Get configuration setting for local network bypass
+ local_access_bypass = False
+ try:
+ # Force reload settings from disk to ensure we have the latest
+ from src.primary.settings_manager import load_settings
+ from src.primary import settings_manager
+
+ # Ensure we're getting fresh settings by clearing any cache
+ if hasattr(settings_manager, 'settings_cache'):
+ settings_manager.settings_cache = {}
+
+ settings = load_settings("general") # Specify 'general' as the app_type
+ general_settings = settings
+ local_access_bypass = general_settings.get("local_access_bypass", False)
+ logger.info(f"Local access bypass setting: {local_access_bypass}")
+
+ # Debug print all general settings
+ logger.debug(f"All general settings: {general_settings}")
+ except Exception as e:
+ logger.error(f"Error loading local access bypass setting: {e}", exc_info=True)
+
+ remote_addr = request.remote_addr
+ logger.info(f"Request IP address: {remote_addr}")
+
+ if local_access_bypass:
+ # Common local network IP ranges
+ local_networks = [
+ '127.0.0.1', # localhost
+ '::1', # localhost IPv6
+ '10.', # 10.0.0.0/8
+ '172.16.', # 172.16.0.0/12
+ '172.17.',
+ '172.18.',
+ '172.19.',
+ '172.20.',
+ '172.21.',
+ '172.22.',
+ '172.23.',
+ '172.24.',
+ '172.25.',
+ '172.26.',
+ '172.27.',
+ '172.28.',
+ '172.29.',
+ '172.30.',
+ '172.31.',
+ '192.168.' # 192.168.0.0/16
+ ]
+ is_local = False
+
+ # Check if request is coming through a proxy
+ forwarded_for = request.headers.get('X-Forwarded-For')
+ if forwarded_for:
+ logger.debug(f"X-Forwarded-For header detected: {forwarded_for}")
+ # Take the first IP in the chain which is typically the client's real IP
+ possible_client_ip = forwarded_for.split(',')[0].strip()
+ logger.debug(f"Checking if forwarded IP {possible_client_ip} is local")
+
+ # Check if this forwarded IP is a local network IP
+ for network in local_networks:
+ if possible_client_ip == network or (network.endswith('.') and possible_client_ip.startswith(network)):
+ is_local = True
+ logger.info(f"Forwarded IP {possible_client_ip} is a local network IP (matches {network})")
+ break
+
+ # Check if direct remote_addr is a local network IP if not already determined
+ if not is_local:
+ for network in local_networks:
+ if remote_addr == network or (network.endswith('.') and remote_addr.startswith(network)):
+ is_local = True
+ logger.info(f"Direct IP {remote_addr} is a local network IP (matches {network})")
+ break
+
+ if is_local:
+ logger.info(f"Local network access from {remote_addr} - Authentication bypassed!")
+ return None
+ else:
+ logger.warning(f"Access from {remote_addr} is not recognized as local network - Authentication required")
+ else:
+ logger.info("Local access bypass is DISABLED - Authentication required")
+
+ # Check for valid session
+ session_id = session.get(SESSION_COOKIE_NAME)
+ if session_id and verify_session(session_id):
+ return None
+
+ # No valid session, redirect to login
+ if request.path != "/login" and not request.path.startswith("/api/"):
+ return redirect("/login")
+
+ # For API calls, return 401 Unauthorized
+ if request.path.startswith("/api/"):
+ return {"error": "Unauthorized"}, 401
+
+ return None
+
+def logout(session_id: str):
+ """Log out the current user by invalidating their session"""
+ if session_id and session_id in active_sessions:
+ del active_sessions[session_id]
+
+ # Clear the session cookie in Flask context (if available, otherwise handled by route)
+ # session.pop(SESSION_COOKIE_NAME, None) # This might be better handled solely in the route
+
+def is_2fa_enabled(username):
+ """Check if 2FA is enabled for a user."""
+ user_data = get_user_data()
+ return user_data.get('2fa_enabled', False)
+
+def generate_2fa_secret(username: str) -> Tuple[str, str]:
+ """
+ Generate a new 2FA secret and QR code
+
+ Returns:
+ Tuple[str, str]: (secret, qr_code_data_uri)
+ """
+ # Generate a random secret
+ secret = pyotp.random_base32()
+
+ # Create a TOTP object
+ totp = pyotp.TOTP(secret)
+
+ # Get the provisioning URI - Use the actual username here
+ uri = totp.provisioning_uri(name=username, issuer_name="Huntarr")
+
+ # Generate QR code
+ qr = qrcode.QRCode(
+ version=1,
+ error_correction=qrcode.constants.ERROR_CORRECT_L,
+ box_size=10,
+ border=4,
+ )
+ qr.add_data(uri)
+ qr.make(fit=True)
+
+ try:
+ img = qr.make_image(fill_color="black", back_color="white")
+
+ # Convert to base64 string
+ buffered = io.BytesIO()
+ img.save(buffered, format="PNG")
+ img_str = base64.b64encode(buffered.getvalue()).decode()
+
+ # Store the secret temporarily associated with the user
+ user_data = get_user_data()
+ user_data["temp_2fa_secret"] = secret
+ if save_user_data(user_data):
+ logger.info(f"Generated temporary 2FA secret for user '{username}'.")
+ return secret, f"data:image/png;base64,{img_str}"
+ else:
+ logger.error(f"Failed to save temporary 2FA secret for user '{username}'.")
+ raise Exception("Failed to save user data with temporary 2FA secret.")
+
+ except Exception as e:
+ logger.error(f"Error generating 2FA QR code for user '{username}': {e}", exc_info=True)
+ raise
+
+def verify_2fa_code(username: str, code: str, enable_on_verify: bool = False) -> bool:
+ """Verify a 2FA code against the temporary secret"""
+ user_data = get_user_data()
+ temp_secret = user_data.get("temp_2fa_secret")
+
+ if not temp_secret:
+ logger.warning(f"2FA verification attempt for '{username}' failed: No temporary secret found.")
+ return False
+
+ totp = pyotp.TOTP(temp_secret)
+ if totp.verify(code):
+ logger.info(f"2FA code verified successfully for user '{username}'.")
+ if enable_on_verify:
+ user_data["2fa_enabled"] = True
+ user_data["2fa_secret"] = temp_secret
+ user_data.pop("temp_2fa_secret", None)
+ if save_user_data(user_data):
+ logger.info(f"2FA enabled permanently for user '{username}'.")
+ else:
+ logger.error(f"Failed to save user data after enabling 2FA for '{username}'.")
+ return False
+ return True
+ else:
+ logger.warning(f"Invalid 2FA code provided by user '{username}'.")
+ return False
+
+def disable_2fa(password: str) -> bool:
+ """Disable 2FA for the current user (using only password - kept for potential other uses)"""
+ user_data = get_user_data()
+
+ # Verify password
+ if verify_password(user_data.get("password", ""), password):
+ user_data["2fa_enabled"] = False
+ user_data["2fa_secret"] = None
+ if save_user_data(user_data):
+ logger.info("2FA disabled successfully (password only).")
+ return True
+ else:
+ logger.error("Failed to save user data after disabling 2FA (password only).")
+ return False
+ else:
+ logger.warning("Failed to disable 2FA (password only): Invalid password provided.")
+ return False
+
+def disable_2fa_with_password_and_otp(username: str, password: str, otp_code: str) -> bool:
+ """Disable 2FA for the specified user, requiring both password and OTP code."""
+ user_data = get_user_data() # Assuming this gets data for the logged-in user implicitly
+
+ # 1. Verify Password
+ if not verify_password(user_data.get("password", ""), password):
+ logger.warning(f"Failed to disable 2FA for '{username}': Invalid password provided.")
+ return False
+
+ # 2. Verify OTP Code against permanent secret
+ perm_secret = user_data.get("2fa_secret")
+ if not user_data.get("2fa_enabled") or not perm_secret:
+ logger.error(f"Failed to disable 2FA for '{username}': 2FA is not enabled or secret missing.")
+ # Should ideally not happen if called from the correct UI state, but good to check
+ return False
+
+ totp = pyotp.TOTP(perm_secret)
+ if not totp.verify(otp_code):
+ logger.warning(f"Failed to disable 2FA for '{username}': Invalid OTP code provided.")
+ return False
+
+ # 3. Both verified, proceed to disable
+ user_data["2fa_enabled"] = False
+ user_data["2fa_secret"] = None
+ if save_user_data(user_data):
+ logger.info(f"2FA disabled successfully for '{username}' after verifying password and OTP.")
+ return True
+ else:
+ logger.error(f"Failed to save user data after disabling 2FA for '{username}'.")
+ return False
+
+def change_username(current_username: str, new_username: str, password: str) -> bool:
+ """Change the username for the current user"""
+ user_data = get_user_data()
+
+ # Verify current username and password
+ current_username_hash = hash_username(current_username)
+ if user_data.get("username") != current_username_hash:
+ logger.warning(f"Username change failed: Current username '{current_username}' does not match stored hash.")
+ return False
+
+ if not verify_password(user_data.get("password", ""), password):
+ logger.warning(f"Username change failed for '{current_username}': Invalid password provided.")
+ return False
+
+ # Update username
+ user_data["username"] = hash_username(new_username)
+ if save_user_data(user_data):
+ logger.info(f"Username changed successfully from '{current_username}' to '{new_username}'.")
+ return True
+ else:
+ logger.error(f"Failed to save user data after changing username for '{current_username}'.")
+ return False
+
+def change_password(current_password: str, new_password: str) -> bool:
+ """Change the password for the current user"""
+ user_data = get_user_data()
+
+ # Verify current password
+ if not verify_password(user_data.get("password", ""), current_password):
+ logger.warning("Password change failed: Invalid current password provided.")
+ return False
+
+ # Update password
+ user_data["password"] = hash_password(new_password)
+ if save_user_data(user_data):
+ logger.info("Password changed successfully.")
+ return True
+ else:
+ logger.error("Failed to save user data after changing password.")
+ return False
+
+def get_app_url_and_key(app_type: str) -> Tuple[str, str]:
+ """
+ Get the API URL and API key for a specific app type
+
+ Args:
+ app_type: The app type (sonarr, radarr, lidarr, readarr)
+
+ Returns:
+ Tuple[str, str]: (api_url, api_key)
+ """
+ from primary import keys_manager
+ return keys_manager.get_api_keys(app_type)
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/background.py b/Huntarr.io-6.3.6/src/primary/background.py
new file mode 100644
index 0000000..689771c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/background.py
@@ -0,0 +1,543 @@
+#!/usr/bin/env python3
+"""
+Huntarr - Main entry point for the application
+Supports multiple Arr applications running concurrently
+"""
+
+import time
+import sys
+import os
+# import socket # No longer used directly
+import signal
+import importlib
+import logging
+import threading
+from typing import Dict, List, Optional, Callable, Union, Tuple
+
+# Define the version number
+__version__ = "1.0.0" # Consider updating this based on changes
+
+# Set up logging first
+from src.primary.utils.logger import setup_main_logger, get_logger # Import get_logger
+logger = setup_main_logger()
+
+# Import necessary modules
+from src.primary import config, settings_manager
+# Removed keys_manager import as settings_manager handles API details
+from src.primary.state import check_state_reset, calculate_reset_time
+# from src.primary.utils.app_utils import get_ip_address # No longer used here
+
+# Track active threads and stop flag
+app_threads: Dict[str, threading.Thread] = {}
+stop_event = threading.Event() # Use an event for clearer stop signaling
+
+def app_specific_loop(app_type: str) -> None:
+ """
+ Main processing loop for a specific Arr application.
+
+ Args:
+ app_type: The type of Arr application (sonarr, radarr, lidarr, readarr)
+ """
+ app_logger = get_logger(app_type)
+ app_logger.info(f"=== [{app_type.upper()}] Thread starting ===")
+
+ # Dynamically import app-specific modules
+ process_missing = None
+ process_upgrades = None
+ get_queue_size = None
+ check_connection = None
+ get_instances_func = None # Default: No multi-instance function found
+ hunt_missing_setting = ""
+ hunt_upgrade_setting = ""
+
+ try:
+ # Import the main app module first to check for get_configured_instances
+ app_module = importlib.import_module(f'src.primary.apps.{app_type}')
+ app_logger.debug(f"Attributes found in {app_module.__name__}: {dir(app_module)}")
+ api_module = importlib.import_module(f'src.primary.apps.{app_type}.api')
+ missing_module = importlib.import_module(f'src.primary.apps.{app_type}.missing')
+ upgrade_module = importlib.import_module(f'src.primary.apps.{app_type}.upgrade')
+
+ # Try to get the multi-instance function from the main app module
+ try:
+ get_instances_func = getattr(app_module, 'get_configured_instances')
+ app_logger.debug(f"Found 'get_configured_instances' in {app_module.__name__}")
+ except AttributeError:
+ app_logger.debug(f"'get_configured_instances' not found in {app_module.__name__}. Assuming single instance mode.")
+ get_instances_func = None # Explicitly set to None if not found
+
+ check_connection = getattr(api_module, 'check_connection')
+ get_queue_size = getattr(api_module, 'get_download_queue_size', lambda api_url, api_key, api_timeout: 0) # Default if not found
+
+ if app_type == "sonarr":
+ missing_module = importlib.import_module('src.primary.apps.sonarr.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.sonarr.upgrade')
+ process_missing = getattr(missing_module, 'process_missing_episodes')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_items"
+ hunt_upgrade_setting = "hunt_upgrade_items"
+ elif app_type == "radarr":
+ missing_module = importlib.import_module('src.primary.apps.radarr.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.radarr.upgrade')
+ process_missing = getattr(missing_module, 'process_missing_movies')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_movies"
+ hunt_upgrade_setting = "hunt_upgrade_movies"
+ elif app_type == "lidarr":
+ missing_module = importlib.import_module('src.primary.apps.lidarr.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.lidarr.upgrade')
+ # Use process_missing_albums as the function name
+ process_missing = getattr(missing_module, 'process_missing_albums')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_items"
+ # Use hunt_upgrade_items
+ hunt_upgrade_setting = "hunt_upgrade_items"
+ elif app_type == "readarr":
+ missing_module = importlib.import_module('src.primary.apps.readarr.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.readarr.upgrade')
+ process_missing = getattr(missing_module, 'process_missing_books')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_books"
+ hunt_upgrade_setting = "hunt_upgrade_books"
+ elif app_type == "whisparr":
+ missing_module = importlib.import_module('src.primary.apps.whisparr.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.whisparr.upgrade')
+ process_missing = getattr(missing_module, 'process_missing_scenes')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_items" # Updated to new name
+ hunt_upgrade_setting = "hunt_upgrade_items" # Updated to new name
+ elif app_type == "eros":
+ missing_module = importlib.import_module('src.primary.apps.eros.missing')
+ upgrade_module = importlib.import_module('src.primary.apps.eros.upgrade')
+ process_missing = getattr(missing_module, 'process_missing_items')
+ process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
+ hunt_missing_setting = "hunt_missing_items"
+ hunt_upgrade_setting = "hunt_upgrade_items"
+ else:
+ app_logger.error(f"Unsupported app_type: {app_type}")
+ return # Exit thread if app type is invalid
+
+ except (ImportError, AttributeError) as e:
+ app_logger.error(f"Failed to import modules or functions for {app_type}: {e}", exc_info=True)
+ return # Exit thread if essential modules fail to load
+
+ # Create app-specific logger using provided function
+ app_logger = logging.getLogger(f"huntarr.{app_type}")
+
+ while not stop_event.is_set():
+ # --- Load Settings for this Cycle --- #
+ try:
+ # Load all settings for this app for the current cycle
+ app_settings = settings_manager.load_settings(app_type) # Corrected function name
+ if not app_settings: # Handle case where loading fails
+ app_logger.error("Failed to load settings. Skipping cycle.")
+ stop_event.wait(60) # Wait a minute before retrying
+ continue
+
+ # Get global settings needed for cycle timing
+ sleep_duration = app_settings.get("sleep_duration", 900)
+ api_timeout = app_settings.get("api_timeout", 120) # Default to 120 seconds
+
+ except Exception as e:
+ app_logger.error(f"Error loading settings for cycle: {e}", exc_info=True)
+ stop_event.wait(60) # Wait before retrying
+ continue
+
+ # --- State Reset Check --- #
+ check_state_reset(app_type)
+
+ app_logger.info(f"=== Starting {app_type.upper()} cycle ===")
+
+ # Check if we need to use multi-instance mode
+ instances_to_process = []
+
+ # Use the dynamically loaded function (if found)
+ if get_instances_func:
+ # Multi-instance mode supported
+ try:
+ instances_to_process = get_instances_func() # Call the dynamically loaded function
+ if instances_to_process:
+ app_logger.info(f"Found {len(instances_to_process)} configured {app_type} instances to process")
+ else:
+ # No instances found via get_configured_instances
+ app_logger.warning(f"No configured {app_type} instances found. Skipping cycle.")
+ stop_event.wait(sleep_duration)
+ continue
+ except Exception as e:
+ app_logger.error(f"Error calling get_configured_instances function: {e}", exc_info=True)
+ stop_event.wait(60)
+ continue
+ else:
+ # get_instances_func is None (either not defined in app module or import failed earlier)
+ # Fallback to single instance mode using base settings if available
+ api_url = app_settings.get("api_url")
+ api_key = app_settings.get("api_key")
+ instance_name = app_settings.get("name", f"{app_type.capitalize()} Default") # Use 'name' or default
+
+ if api_url and api_key:
+ app_logger.info(f"Processing {app_type} as single instance: {instance_name}")
+ # Create a list with a single dict matching the multi-instance structure
+ instances_to_process = [{
+ "instance_name": instance_name,
+ "api_url": api_url,
+ "api_key": api_key
+ }]
+ else:
+ app_logger.warning(f"No 'get_configured_instances' function found and no valid single instance config (URL/Key) for {app_type}. Skipping cycle.")
+ stop_event.wait(sleep_duration)
+ continue
+
+ # If after all checks, instances_to_process is still empty
+ if not instances_to_process:
+ app_logger.warning(f"No valid {app_type} instances to process this cycle (unexpected state). Skipping.")
+ stop_event.wait(sleep_duration)
+ continue
+
+ # Process each instance dictionary returned by get_configured_instances
+ processed_any_items = False
+ for instance_details in instances_to_process:
+ if stop_event.is_set():
+ break
+
+ instance_name = instance_details.get("instance_name", "Default") # Use the dict from get_configured_instances
+ app_logger.info(f"Processing {app_type} instance: {instance_name}")
+
+ # Get instance-specific settings from the instance_details dict
+ api_url = instance_details.get("api_url", "")
+ api_key = instance_details.get("api_key", "")
+
+ # Get global/shared settings from app_settings loaded at the start of the loop
+ # Example: monitored_only = app_settings.get("monitored_only", True)
+
+ # --- Connection Check --- #
+ if not api_url or not api_key:
+ app_logger.warning(f"Missing API URL or Key for instance '{instance_name}'. Skipping.")
+ continue
+ try:
+ # Use instance details for connection check
+ app_logger.debug(f"Checking connection to {app_type} instance '{instance_name}' at {api_url} with timeout {api_timeout}s")
+ connected = check_connection(api_url, api_key, api_timeout=api_timeout)
+ if not connected:
+ app_logger.warning(f"Failed to connect to {app_type} instance '{instance_name}' at {api_url}. Skipping.")
+ continue
+ app_logger.info(f"Successfully connected to {app_type} instance: {instance_name}")
+ except Exception as e:
+ app_logger.error(f"Error connecting to {app_type} instance '{instance_name}': {e}", exc_info=True)
+ continue # Skip this instance if connection fails
+
+ # --- Check if Hunt Modes are Enabled --- #
+ # These checks use the hunt_missing_setting/hunt_upgrade_setting defined earlier
+ # which correspond to keys in the main app_settings dict (e.g., 'hunt_missing_items')
+ hunt_missing_value = app_settings.get(hunt_missing_setting, 0)
+ hunt_upgrade_value = app_settings.get(hunt_upgrade_setting, 0)
+
+ hunt_missing_enabled = hunt_missing_value > 0
+ hunt_upgrade_enabled = hunt_upgrade_value > 0
+
+ # --- Queue Size Check --- # Moved inside loop
+ # Get maximum_download_queue_size from general settings (still using minimum_download_queue_size key for backward compatibility)
+ general_settings = settings_manager.load_settings('general')
+ max_queue_size = general_settings.get("minimum_download_queue_size", -1)
+ app_logger.info(f"Using maximum download queue size: {max_queue_size} from general settings")
+
+ if max_queue_size >= 0:
+ try:
+ # Use instance details for queue check
+ current_queue_size = get_queue_size(api_url, api_key, api_timeout)
+ if current_queue_size >= max_queue_size:
+ app_logger.info(f"Download queue size ({current_queue_size}) meets or exceeds maximum ({max_queue_size}) for {instance_name}. Skipping cycle for this instance.")
+ continue # Skip processing for this instance
+ else:
+ app_logger.info(f"Queue size ({current_queue_size}) is below maximum ({max_queue_size}). Proceeding.")
+ except Exception as e:
+ app_logger.warning(f"Could not get download queue size for {instance_name}. Proceeding anyway. Error: {e}", exc_info=False) # Log less verbosely
+
+ # Prepare args dictionary for processing functions
+ # Combine instance details with general app settings for the processing functions
+ # Assuming app_settings already contains most general settings, add instance specifics
+ combined_settings = app_settings.copy() # Start with general settings
+ combined_settings.update(instance_details) # Add/overwrite with instance specifics (name, url, key)
+
+ # Ensure settings from general.json are consistently used for all apps
+ combined_settings["api_timeout"] = settings_manager.get_advanced_setting("api_timeout", 120)
+ combined_settings["command_wait_delay"] = settings_manager.get_advanced_setting("command_wait_delay", 1)
+ combined_settings["command_wait_attempts"] = settings_manager.get_advanced_setting("command_wait_attempts", 600)
+
+ # Define the stop check function
+ stop_check_func = stop_event.is_set
+
+ # --- Process Missing --- #
+ if hunt_missing_enabled and process_missing:
+ try:
+ # Extract settings for direct function calls
+ api_url = combined_settings.get("api_url", "").strip()
+ api_key = combined_settings.get("api_key", "").strip()
+ api_timeout = combined_settings.get("api_timeout", 120)
+ monitored_only = combined_settings.get("monitored_only", True)
+ skip_future_episodes = combined_settings.get("skip_future_episodes", True)
+ skip_series_refresh = combined_settings.get("skip_series_refresh", False)
+ hunt_missing_items = combined_settings.get("hunt_missing_items", 0)
+ hunt_missing_mode = combined_settings.get("hunt_missing_mode", "episodes")
+ command_wait_delay = combined_settings.get("command_wait_delay", 1)
+ command_wait_attempts = combined_settings.get("command_wait_attempts", 600)
+
+ if app_type == "sonarr":
+ processed_missing = process_missing(
+ api_url=api_url,
+ api_key=api_key,
+ instance_name=instance_name, # Added the required instance_name parameter
+ api_timeout=api_timeout,
+ monitored_only=monitored_only,
+ skip_future_episodes=skip_future_episodes,
+ skip_series_refresh=skip_series_refresh,
+ hunt_missing_items=hunt_missing_items,
+ hunt_missing_mode=hunt_missing_mode,
+ command_wait_delay=command_wait_delay,
+ command_wait_attempts=command_wait_attempts,
+ stop_check=stop_check_func
+ )
+ else:
+ # For other apps that still use the old signature
+ processed_missing = process_missing(app_settings=combined_settings, stop_check=stop_check_func)
+
+ if processed_missing:
+ processed_any_items = True
+ except Exception as e:
+ app_logger.error(f"Error during missing processing for {instance_name}: {e}", exc_info=True)
+
+ # --- Process Upgrades --- #
+ if hunt_upgrade_enabled and process_upgrades:
+ try:
+ # Extract settings for direct function calls (only for Sonarr)
+ if app_type == "sonarr":
+ api_url = combined_settings.get("api_url", "").strip()
+ api_key = combined_settings.get("api_key", "").strip()
+ api_timeout = combined_settings.get("api_timeout", 120)
+ monitored_only = combined_settings.get("monitored_only", True)
+ skip_series_refresh = combined_settings.get("skip_series_refresh", False)
+ hunt_upgrade_items = combined_settings.get("hunt_upgrade_items", 0)
+ command_wait_delay = combined_settings.get("command_wait_delay", 1)
+ command_wait_attempts = combined_settings.get("command_wait_attempts", 600)
+
+ processed_upgrades = process_upgrades(
+ api_url=api_url,
+ api_key=api_key,
+ instance_name=instance_name, # Added the required instance_name parameter
+ api_timeout=api_timeout,
+ monitored_only=monitored_only,
+ skip_series_refresh=skip_series_refresh,
+ hunt_upgrade_items=hunt_upgrade_items,
+ command_wait_delay=command_wait_delay,
+ command_wait_attempts=command_wait_attempts,
+ stop_check=stop_check_func
+ )
+ else:
+ # For other apps that still use the old signature
+ processed_upgrades = process_upgrades(app_settings=combined_settings, stop_check=stop_check_func)
+
+ if processed_upgrades:
+ processed_any_items = True
+ except Exception as e:
+ app_logger.error(f"Error during upgrade processing for {instance_name}: {e}", exc_info=True)
+
+ # Small delay between instances if needed (optional)
+ if not stop_event.is_set():
+ time.sleep(1) # Short pause
+
+ # --- Process Swaparr (stalled downloads) --- #
+ try:
+ # Try to import Swaparr module
+ if not 'process_stalled_downloads' in locals():
+ try:
+ # Import directly from handler module to avoid circular imports
+ from src.primary.apps.swaparr.handler import process_stalled_downloads
+ swaparr_logger = get_logger("swaparr")
+ swaparr_logger.debug(f"Successfully imported Swaparr module")
+ except (ImportError, AttributeError) as e:
+ app_logger.debug(f"Swaparr module not available or missing functions: {e}")
+ process_stalled_downloads = None
+
+ # Check if Swaparr is enabled
+ swaparr_settings = settings_manager.load_settings("swaparr")
+ if swaparr_settings and swaparr_settings.get("enabled", False) and process_stalled_downloads:
+ app_logger.info(f"Running Swaparr on {app_type} instance: {instance_name}")
+ process_stalled_downloads(app_type, combined_settings, swaparr_settings)
+ app_logger.info(f"Completed Swaparr processing for {app_type} instance: {instance_name}")
+ except Exception as e:
+ app_logger.error(f"Error during Swaparr processing for {instance_name}: {e}", exc_info=True)
+
+ # --- Cycle End & Sleep --- #
+ calculate_reset_time(app_type) # Pass app_type here if needed by the function
+
+ # Log cycle completion
+ if processed_any_items:
+ app_logger.info(f"=== {app_type.upper()} cycle finished. Processed items across instances. ===")
+ else:
+ app_logger.info(f"=== {app_type.upper()} cycle finished. No items processed in any instance. ===")
+
+ # Calculate sleep duration (use configured or default value)
+ sleep_seconds = app_settings.get("sleep_duration", 900) # Default to 15 minutes
+
+ # Sleep with periodic checks for reset file
+ app_logger.info(f"Sleeping for {sleep_seconds} seconds before next cycle...")
+
+ # Use shorter sleep intervals and check for reset file
+ wait_interval = 1 # Check every second to be more responsive
+ elapsed = 0
+ reset_file_path = f"/config/reset/{app_type}.reset"
+
+ while elapsed < sleep_seconds:
+ # Check if stop event is set
+ if stop_event.is_set():
+ app_logger.info("Stop event detected during sleep. Breaking out of sleep cycle.")
+ break
+
+ # Check if reset file exists
+ if os.path.exists(reset_file_path):
+ try:
+ # Read timestamp from the file (if it exists)
+ with open(reset_file_path, 'r') as f:
+ timestamp = f.read().strip()
+ app_logger.info(f"!!! RESET FILE DETECTED !!! Manual cycle reset triggered for {app_type} (timestamp: {timestamp}). Starting new cycle immediately.")
+
+ # Delete the reset file
+ os.remove(reset_file_path)
+ app_logger.info(f"Reset file removed for {app_type}. Starting new cycle now.")
+ break
+ except Exception as e:
+ app_logger.error(f"Error processing reset file for {app_type}: {e}", exc_info=True)
+ # Try to remove the file even if reading failed
+ try:
+ os.remove(reset_file_path)
+ except:
+ pass
+ break
+
+ # Sleep for a short interval
+ stop_event.wait(wait_interval)
+ elapsed += wait_interval
+
+ # If we've slept for at least 30 seconds, update the logger message every 30 seconds
+ if elapsed > 0 and elapsed % 30 == 0:
+ app_logger.info(f"Still sleeping, {sleep_seconds - elapsed} seconds remaining before next cycle...")
+
+ app_logger.info(f"=== [{app_type.upper()}] Thread stopped ====")
+
+def reset_app_cycle(app_type: str) -> bool:
+ """
+ Trigger a manual reset of an app's cycle.
+
+ Args:
+ app_type: The type of Arr application (sonarr, radarr, lidarr, readarr, etc.)
+
+ Returns:
+ bool: True if the reset was triggered, False if the app is not running
+ """
+ logger.info(f"Manual cycle reset requested for {app_type} - Creating reset file")
+
+ # Create a reset file for this app
+ reset_file_path = f"/config/reset/{app_type}.reset"
+ try:
+ with open(reset_file_path, 'w') as f:
+ f.write(str(int(time.time())))
+ logger.info(f"Reset file created for {app_type}. Cycle will reset on next check.")
+ return True
+ except Exception as e:
+ logger.error(f"Error creating reset file for {app_type}: {e}", exc_info=True)
+ return False
+
+def start_app_threads():
+ """Start threads for all configured and enabled apps."""
+ configured_apps_list = settings_manager.get_configured_apps() # Corrected function name
+ configured_apps = {app: True for app in configured_apps_list} # Convert list to dict format expected below
+
+ for app_type, is_configured in configured_apps.items():
+ if is_configured:
+ # Optional: Add an explicit 'enabled' setting check if desired
+ # enabled = settings_manager.get_setting(app_type, "enabled", True)
+ # if not enabled:
+ # logger.info(f"Skipping {app_type} thread as it is disabled in settings.")
+ # continue
+
+ if app_type not in app_threads or not app_threads[app_type].is_alive():
+ if app_type in app_threads: # If it existed but died
+ logger.warning(f"{app_type} thread died, restarting...")
+ del app_threads[app_type]
+ else: # Starting for the first time
+ logger.info(f"Starting thread for {app_type}...")
+
+ thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True)
+ app_threads[app_type] = thread
+ thread.start()
+ elif app_type in app_threads and app_threads[app_type].is_alive():
+ # If app becomes un-configured, stop its thread? Or let it fail connection check?
+ # For now, let it run and fail connection check.
+ logger.warning(f"{app_type} is no longer configured. Thread will likely stop after failing connection checks.")
+ # else: # App not configured and no thread running - do nothing
+ # logger.debug(f"{app_type} is not configured. No thread started.")
+ pass # Corrected indentation
+
+def check_and_restart_threads():
+ """Check if any threads have died and restart them if the app is still configured."""
+ configured_apps_list = settings_manager.get_configured_apps() # Corrected function name
+ configured_apps = {app: True for app in configured_apps_list} # Convert list to dict format expected below
+
+ for app_type, thread in list(app_threads.items()):
+ if not thread.is_alive():
+ logger.warning(f"{app_type} thread died unexpectedly.")
+ del app_threads[app_type] # Remove dead thread
+ # Only restart if it's still configured
+ if configured_apps.get(app_type, False):
+ logger.info(f"Restarting thread for {app_type}...")
+ new_thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True)
+ app_threads[app_type] = new_thread
+ new_thread.start()
+ else:
+ logger.info(f"Not restarting {app_type} thread as it is no longer configured.")
+
+def shutdown_handler(signum, frame):
+ """Handle termination signals (SIGINT, SIGTERM)."""
+ logger.info(f"Received signal {signum}. Initiating shutdown...")
+ stop_event.set() # Signal all threads to stop
+
+def shutdown_threads():
+ """Wait for all threads to finish."""
+ logger.info("Waiting for app threads to finish...")
+ active_thread_list = list(app_threads.values())
+ for thread in active_thread_list:
+ thread.join(timeout=15) # Wait up to 15 seconds per thread
+ if thread.is_alive():
+ logger.warning(f"Thread {thread.name} did not stop gracefully.")
+ logger.info("All app threads stopped.")
+
+def start_huntarr():
+ """Main entry point for Huntarr background tasks."""
+ logger.info(f"--- Starting Huntarr Background Tasks v{__version__} --- ")
+
+ # Perform initial settings migration if specified (e.g., via env var or arg)
+ if os.environ.get("HUNTARR_RUN_MIGRATION", "false").lower() == "true":
+ logger.info("Running settings migration from huntarr.json (if found)...")
+ settings_manager.migrate_from_huntarr_json()
+
+ # Log initial configuration for all known apps
+ for app_name in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name
+ try:
+ config.log_configuration(app_name)
+ except Exception as e:
+ logger.error(f"Error logging initial configuration for {app_name}: {e}")
+
+ try:
+ # Main loop: Start and monitor app threads
+ while not stop_event.is_set():
+ start_app_threads() # Start/Restart threads for configured apps
+ # check_and_restart_threads() # This is implicitly handled by start_app_threads checking is_alive
+ stop_event.wait(15) # Check for stop signal every 15 seconds
+
+ except Exception as e:
+ logger.exception(f"Unexpected error in main monitoring loop: {e}")
+ finally:
+ logger.info("Background task main loop exited. Shutting down threads...")
+ if not stop_event.is_set():
+ stop_event.set() # Ensure stop is signaled if loop exited unexpectedly
+ shutdown_threads()
+ logger.info("--- Huntarr Background Tasks stopped --- ")
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/config.py b/Huntarr.io-6.3.6/src/primary/config.py
new file mode 100644
index 0000000..c2e7190
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/config.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python3
+"""
+Configuration module for Huntarr
+Provides utility functions to access settings via settings_manager
+and perform configuration-related tasks like logging.
+Removes the old concept of loading a single app's config into global constants.
+"""
+
+import os
+import sys
+import logging
+import traceback
+from src.primary import settings_manager
+from src.primary.utils.logger import logger, get_logger # Import get_logger
+
+# Removed global constants like APP_TYPE, API_URL, API_KEY, SLEEP_DURATION etc.
+# Settings should be fetched directly using settings_manager when needed.
+
+# Enable debug logging across the application
+# Set to True for detailed logs, False for production
+DEBUG_MODE = False # Changed default to False
+
+# Add a function to get the debug mode from settings
+def get_debug_mode():
+ """Get the debug mode setting from general settings"""
+ try:
+ return settings_manager.get_setting("general", "debug_mode", False)
+ except Exception:
+ return False
+
+# Determine the hunt mode for a specific app
+def determine_hunt_mode(app_name: str) -> str:
+ """Determine the hunt mode for a specific app based on its settings."""
+ # Fetch settings directly for the given app
+ hunt_missing = 0
+ hunt_upgrade = 0
+
+ if app_name == "sonarr":
+ hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_items", 0)
+ hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_items", 0)
+ elif app_name == "radarr":
+ hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_movies", 0)
+ hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_movies", 0)
+ elif app_name.lower() == 'lidarr':
+ # Use hunt_missing_items instead of hunt_missing_albums
+ hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_items", 0)
+ # Use hunt_upgrade_items instead of hunt_upgrade_albums
+ hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_items", 0)
+
+ # For Lidarr, also include the hunt_missing_mode
+ hunt_missing_mode = settings_manager.get_setting(app_name, "hunt_missing_mode", "artist")
+ elif app_name == "readarr":
+ hunt_missing = settings_manager.get_setting(app_name, "hunt_missing_books", 0)
+ hunt_upgrade = settings_manager.get_setting(app_name, "hunt_upgrade_books", 0)
+ else:
+ # Handle unknown app types if necessary, or just return disabled
+ return "disabled"
+
+ # Determine mode based on fetched values
+ if hunt_missing > 0 and hunt_upgrade > 0:
+ return "both"
+ elif hunt_missing > 0:
+ return "missing"
+ elif hunt_upgrade > 0:
+ return "upgrade"
+ else:
+ return "disabled"
+
+# Configure logging level based on an app's debug setting
+def configure_logging(app_name: str = None):
+ """Configure logging level based on the debug setting of a specific app or globally."""
+ try:
+ debug_mode = get_debug_mode()
+ log_instance = logger # Default to the main logger
+
+ if app_name:
+ debug_mode = settings_manager.get_setting(app_name, "debug_mode", False)
+ log_instance = get_logger(app_name) # Get the specific app logger
+ # else: # Optional: Could check a global debug setting if needed
+ # debug_mode = settings_manager.get_setting("global", "debug_mode", False)
+
+ level = logging.DEBUG if debug_mode else logging.INFO
+
+ # Configure the specific app logger
+ if app_name and log_instance:
+ log_instance.setLevel(level)
+
+ # Always configure the root logger as well (or adjust based on desired behavior)
+ # If you want root logger level controlled by a specific app, this needs refinement.
+ # For now, let's set the root logger based on the *last* app configured or global.
+ root_logger = logging.getLogger()
+ root_logger.setLevel(level)
+
+ # Optional: Configure handlers if not done elsewhere
+ # Example: Ensure handlers exist and set their level
+ # for handler in log_instance.handlers:
+ # handler.setLevel(level)
+ # for handler in root_logger.handlers:
+ # handler.setLevel(level)
+
+ except Exception as e:
+ print(f"CRITICAL ERROR in configure_logging for app '{app_name}': {str(e)}", file=sys.stderr)
+ print(f"Traceback: {traceback.format_exc()}", file=sys.stderr)
+ # Try to log it anyway
+ if logger:
+ logger.error(f"Error in configure_logging for app '{app_name}': {str(e)}")
+ logger.error(traceback.format_exc())
+ # Decide whether to raise or continue
+ # raise
+
+# Log the configuration for a specific app
+def log_configuration(app_name: str):
+ """Log the current configuration settings for a specific app."""
+ log = get_logger(app_name) # Use the specific app's logger
+ settings = settings_manager.load_settings(app_name) # Corrected function name
+
+ if not settings:
+ log.error(f"Could not load settings for app: {app_name}. Cannot log configuration.")
+ return
+
+ api_url = settings.get("api_url", "")
+ api_key = settings.get("api_key", "")
+ debug_mode = settings.get("debug_mode", False)
+ sleep_duration = settings.get("sleep_duration", 900)
+ # Get state reset interval
+ state_reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168)
+ monitored_only = settings.get("monitored_only", True)
+ min_queue_size = settings.get("minimum_download_queue_size", -1)
+
+ log.info(f"--- Configuration for {app_name} ---")
+ log.info(f"API URL: {api_url}")
+ log.info(f"API Key: {'[REDACTED]' if api_key else 'Not Set'}")
+ log.info(f"Debug Mode: {debug_mode}")
+ log.info(f"Hunt Mode: {determine_hunt_mode(app_name)}")
+ log.info(f"Sleep Duration: {sleep_duration} seconds")
+ log.info(f"State Reset Interval: {state_reset_interval} hours")
+ log.info(f"Monitored Only: {monitored_only}")
+ log.info(f"Maximum Download Queue Size: {settings.get('minimum_download_queue_size', -1)}")
+
+ # App-specific settings logging
+ if app_name == "sonarr":
+ log.info(f"Hunt Missing Items: {settings.get('hunt_missing_items', 0)}")
+ log.info(f"Hunt Upgrade Items: {settings.get('hunt_upgrade_items', 0)}")
+ log.info(f"Skip Future Episodes: {settings.get('skip_future_episodes', True)}")
+ log.info(f"Skip Series Refresh: {settings.get('skip_series_refresh', False)}")
+ elif app_name == "radarr":
+ log.info(f"Hunt Missing Movies: {settings.get('hunt_missing_movies', 0)}")
+ log.info(f"Hunt Upgrade Movies: {settings.get('hunt_upgrade_movies', 0)}")
+ log.info(f"Skip Future Releases: {settings.get('skip_future_releases', True)}")
+ log.info(f"Skip Movie Refresh: {settings.get('skip_movie_refresh', False)}")
+ elif app_name.lower() == 'lidarr':
+ log.info(f"Mode: {settings.get('hunt_missing_mode', 'artist')}")
+ log.info(f"Hunt Missing Items: {settings.get('hunt_missing_items', 0)}")
+ # Use hunt_upgrade_items
+ log.info(f"Hunt Upgrade Items: {settings.get('hunt_upgrade_items', 0)}")
+ log.info(f"Sleep Duration: {settings.get('sleep_duration', 900)} seconds")
+ log.info(f"State Reset Interval: {state_reset_interval} hours")
+ log.info(f"Monitored Only: {settings.get('monitored_only', True)}")
+ log.info(f"Maximum Download Queue Size: {settings.get('minimum_download_queue_size', -1)}")
+ elif app_name == "readarr":
+ log.info(f"Hunt Missing Books: {settings.get('hunt_missing_books', 0)}")
+ log.info(f"Hunt Upgrade Books: {settings.get('hunt_upgrade_books', 0)}")
+ log.info(f"Skip Future Releases: {settings.get('skip_future_releases', True)}")
+ log.info(f"Skip Author Refresh: {settings.get('skip_author_refresh', False)}")
+ log.info(f"--- End Configuration for {app_name} ---")
+
+# Removed refresh_settings function - settings are loaded dynamically by settings_manager
+
+# Initial logging configuration (optional, could be done in main startup)
+# configure_logging() # Configure root logger based on global/default debug setting if desired
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/eros.json b/Huntarr.io-6.3.6/src/primary/default_configs/eros.json
new file mode 100644
index 0000000..dc5659a
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/eros.json
@@ -0,0 +1,18 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_items": 1,
+ "hunt_upgrade_items": 0,
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_series_refresh": true,
+ "skip_future_releases": true,
+ "skip_scene_refresh": true,
+ "search_mode": "movie"
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/general.json b/Huntarr.io-6.3.6/src/primary/default_configs/general.json
new file mode 100644
index 0000000..ab41cfe
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/general.json
@@ -0,0 +1,14 @@
+{
+ "debug_mode": false,
+ "log_refresh_interval_seconds": 30,
+ "ui_theme": "dark",
+ "check_for_updates": true,
+ "enable_notifications": false,
+ "notification_level": "info",
+ "local_access_bypass": false,
+ "stateful_management_hours": 168,
+ "command_wait_delay": 1,
+ "command_wait_attempts": 600,
+ "minimum_download_queue_size": -1,
+ "api_timeout": 120
+}
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json
new file mode 100644
index 0000000..1e71955
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/lidarr.json
@@ -0,0 +1,17 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_mode": "artist",
+ "hunt_missing_items": 1,
+ "hunt_upgrade_items": 0,
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_future_releases": true,
+ "skip_artist_refresh": true
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json
new file mode 100644
index 0000000..d7a7e8f
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/radarr.json
@@ -0,0 +1,16 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_movies": 1,
+ "hunt_upgrade_movies": 0,
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_future_releases": true,
+ "skip_movie_refresh": true
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json
new file mode 100644
index 0000000..c6292ae
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/readarr.json
@@ -0,0 +1,16 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_books": 1,
+ "hunt_upgrade_books": 0,
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_future_releases": true,
+ "skip_author_refresh": true
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json b/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json
new file mode 100644
index 0000000..86476ca
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/sonarr.json
@@ -0,0 +1,17 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_items": 1,
+ "hunt_upgrade_items": 0,
+ "hunt_missing_mode": "episodes",
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_future_episodes": true,
+ "skip_series_refresh": true
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json b/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json
new file mode 100644
index 0000000..396352a
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/swaparr.json
@@ -0,0 +1,8 @@
+{
+ "enabled": false,
+ "max_strikes": 3,
+ "max_download_time": "2h",
+ "ignore_above_size": "25GB",
+ "remove_from_client": true,
+ "dry_run": false
+}
diff --git a/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json b/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json
new file mode 100644
index 0000000..34eb7c1
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/default_configs/whisparr.json
@@ -0,0 +1,17 @@
+{
+ "instances": [
+ {
+ "name": "Default",
+ "api_url": "",
+ "api_key": "",
+ "enabled": true
+ }
+ ],
+ "hunt_missing_items": 1,
+ "hunt_upgrade_items": 0,
+ "sleep_duration": 900,
+ "monitored_only": true,
+ "skip_series_refresh": true,
+ "skip_future_releases": true,
+ "skip_scene_refresh": true
+}
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/history_manager.py b/Huntarr.io-6.3.6/src/primary/history_manager.py
new file mode 100644
index 0000000..ab42e2e
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/history_manager.py
@@ -0,0 +1,471 @@
+import os
+import json
+import time
+from datetime import datetime
+import threading
+import logging
+import pathlib
+
+# Create a logger
+logger = logging.getLogger(__name__)
+
+# Path will be /config/history in production
+HISTORY_BASE_PATH = pathlib.Path("/config/history")
+
+# Lock to prevent race conditions during file operations
+history_locks = {
+ "sonarr": threading.Lock(),
+ "radarr": threading.Lock(),
+ "lidarr": threading.Lock(),
+ "readarr": threading.Lock(),
+ "whisparr": threading.Lock(),
+ "eros": threading.Lock(),
+ "swaparr": threading.Lock()
+}
+
+def ensure_history_dir():
+ """Ensure the history directory exists with app-specific subdirectories"""
+ try:
+ # Create base directory
+ HISTORY_BASE_PATH.mkdir(exist_ok=True, parents=True)
+
+ # Create app-specific directories
+ for app in history_locks.keys():
+ app_dir = HISTORY_BASE_PATH / app
+ app_dir.mkdir(exist_ok=True, parents=True)
+
+ return True
+ except Exception as e:
+ logger.error(f"Failed to create history directory: {str(e)}")
+ return False
+
+def get_history_file_path(app_type, instance_name=None):
+ """Get the appropriate history file path based on app type and instance name"""
+ # If no instance name is provided, use "Default"
+ if instance_name is None:
+ instance_name = "Default"
+
+ # Create safe filename from instance name (same as in stateful_manager.py)
+ safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name])
+ return HISTORY_BASE_PATH / app_type / f"{safe_instance_name}.json"
+
+def add_history_entry(app_type, entry_data):
+ """
+ Add a new history entry
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc)
+ - entry_data: dict with required fields:
+ - name: str - Name of processed content
+ - instance_name: str - Name of the instance
+ - id: str - ID of the processed content
+ """
+ if not ensure_history_dir():
+ logger.error("Could not ensure history directory exists")
+ return None
+
+ if app_type not in history_locks:
+ logger.error(f"Invalid app type: {app_type}")
+ return None
+
+ required_fields = ["name", "instance_name", "id"]
+ for field in required_fields:
+ if field not in entry_data:
+ logger.error(f"Missing required field: {field}")
+ return None
+
+ # Log the instance name for debugging
+ instance_name = entry_data["instance_name"]
+ logger.debug(f"Adding history entry for {app_type} with instance_name: '{instance_name}'")
+
+ # Create the entry with timestamp
+ timestamp = int(time.time())
+ entry = {
+ "date_time": timestamp,
+ "date_time_readable": datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
+ "processed_info": entry_data["name"],
+ "id": entry_data["id"],
+ "instance_name": instance_name, # Use the instance_name we extracted above
+ "operation_type": entry_data.get("operation_type", "missing"), # Default to "missing" if not specified
+ "app_type": app_type # Include app_type in the entry for display in UI
+ }
+
+ history_file = get_history_file_path(app_type, instance_name)
+ logger.debug(f"Writing to history file: {history_file}")
+
+ # Make sure the parent directory exists
+ history_file.parent.mkdir(exist_ok=True, parents=True)
+
+ # Thread-safe file operation
+ with history_locks[app_type]:
+ try:
+ if history_file.exists():
+ with open(history_file, 'r') as f:
+ history_data = json.load(f)
+ else:
+ history_data = []
+ except (json.JSONDecodeError, FileNotFoundError):
+ # If file doesn't exist or is corrupt, start with empty list
+ history_data = []
+
+ # Add new entry at the beginning for most recent first
+ history_data.insert(0, entry)
+
+ # Write back to file
+ with open(history_file, 'w') as f:
+ json.dump(history_data, f, indent=2)
+
+ logger.info(f"Added history entry for {app_type}-{instance_name}: {entry_data['name']}")
+ return entry
+
+def get_history(app_type, search_query=None, page=1, page_size=20):
+ """
+ Get history entries for an app
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc)
+ - search_query: str - Optional search query to filter results
+ - page: int - Page number (1-based)
+ - page_size: int - Number of entries per page
+
+ Returns:
+ - dict with entries, total_entries, and total_pages
+ """
+ if not ensure_history_dir():
+ logger.error("Could not ensure history directory exists")
+ return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1}
+
+ if app_type not in history_locks and app_type != "all":
+ logger.error(f"Invalid app type: {app_type}")
+ return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1}
+
+ result = []
+
+ if app_type == "all":
+ # Combine histories from all apps and their instances
+ for app in history_locks.keys():
+ app_dir = HISTORY_BASE_PATH / app
+
+ # Find and read all instance files
+ if app_dir.exists():
+ for history_file in app_dir.glob("*.json"):
+ try:
+ with open(history_file, 'r') as f:
+ instance_history = json.load(f)
+ result.extend(instance_history)
+ logger.debug(f"Read {len(instance_history)} entries from {history_file}")
+ except (json.JSONDecodeError, FileNotFoundError) as e:
+ logger.warning(f"Error reading instance history file {history_file}: {str(e)}")
+ else:
+ # Get history for specific app - combine all instances
+ app_dir = HISTORY_BASE_PATH / app_type
+
+ # Make sure app directory exists
+ app_dir.mkdir(exist_ok=True, parents=True)
+
+ # Read from all instance files
+ if app_dir.exists():
+ instance_files = list(app_dir.glob("*.json"))
+ logger.debug(f"Found {len(instance_files)} instance files for {app_type}: {[f.name for f in instance_files]}")
+
+ for history_file in instance_files:
+ try:
+ with open(history_file, 'r') as f:
+ instance_history = json.load(f)
+ result.extend(instance_history)
+ logger.debug(f"Read {len(instance_history)} entries from {history_file}")
+ except (json.JSONDecodeError, FileNotFoundError) as e:
+ logger.warning(f"Error reading instance history file {history_file}: {e}")
+
+ # Sort by date_time in descending order
+ result = sorted(result, key=lambda x: x["date_time"], reverse=True)
+
+ # Apply search filter if provided
+ if search_query and search_query.strip():
+ search_query = search_query.lower()
+ result = [
+ entry for entry in result if
+ search_query in entry.get("processed_info", "").lower() or
+ search_query in entry.get("instance_name", "").lower() or
+ search_query in str(entry.get("id", "")).lower()
+ ]
+
+ # Calculate pagination
+ total_entries = len(result)
+ total_pages = (total_entries + page_size - 1) // page_size if total_entries > 0 else 1
+
+ # Adjust page if out of bounds
+ if page < 1:
+ page = 1
+ elif page > total_pages:
+ page = total_pages
+
+ # Get entries for the current page
+ start_idx = (page - 1) * page_size
+ end_idx = start_idx + page_size
+ paginated_entries = result[start_idx:end_idx]
+
+ # Calculate "how long ago" for each entry
+ current_time = int(time.time())
+ for entry in paginated_entries:
+ seconds_ago = current_time - entry["date_time"]
+ entry["how_long_ago"] = format_time_ago(seconds_ago)
+
+ return {
+ "entries": paginated_entries,
+ "total_entries": total_entries,
+ "total_pages": total_pages,
+ "current_page": page
+ }
+
+def format_time_ago(seconds):
+ """Format seconds into a human-readable 'time ago' string"""
+ minutes = seconds // 60
+ hours = minutes // 60
+ days = hours // 24
+
+ if days > 0:
+ return f"{days} {'day' if days == 1 else 'days'} ago"
+ elif hours > 0:
+ return f"{hours} {'hour' if hours == 1 else 'hours'} ago"
+ elif minutes > 0:
+ return f"{minutes} {'minute' if minutes == 1 else 'minutes'} ago"
+ else:
+ return f"{seconds} {'second' if seconds == 1 else 'seconds'} ago"
+
+def clear_history(app_type):
+ """
+ Clear history for an app
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc) or "all" to clear all history
+
+ Returns:
+ - bool - Success or failure
+ """
+ if not ensure_history_dir():
+ logger.error("Could not ensure history directory exists")
+ return False
+
+ if app_type not in history_locks and app_type != "all":
+ logger.error(f"Invalid app type: {app_type}")
+ return False
+
+ try:
+ if app_type == "all":
+ # Clear all history files for all apps
+ for app in history_locks.keys():
+ # Clear all instance files
+ app_dir = HISTORY_BASE_PATH / app
+ # Ensure directory exists
+ app_dir.mkdir(exist_ok=True, parents=True)
+
+ if app_dir.exists():
+ instance_files = list(app_dir.glob("*.json"))
+ logger.debug(f"Found {len(instance_files)} instance files to clear for {app}")
+
+ for history_file in instance_files:
+ with open(history_file, 'w') as f:
+ json.dump([], f)
+ logger.debug(f"Cleared instance history file: {history_file}")
+ else:
+ # Clear all instance files for specific app
+ app_dir = HISTORY_BASE_PATH / app_type
+ # Ensure directory exists
+ app_dir.mkdir(exist_ok=True, parents=True)
+
+ if app_dir.exists():
+ instance_files = list(app_dir.glob("*.json"))
+ logger.debug(f"Found {len(instance_files)} instance files to clear for {app_type}")
+
+ for history_file in instance_files:
+ with open(history_file, 'w') as f:
+ json.dump([], f)
+ logger.debug(f"Cleared instance history file: {history_file}")
+
+ logger.info(f"Successfully cleared history for {app_type}")
+ return True
+ except Exception as e:
+ logger.error(f"Error clearing history for {app_type}: {str(e)}")
+ return False
+
+def handle_instance_rename(app_type, old_instance_name, new_instance_name):
+ """
+ Handle renaming of an instance by moving history entries to a new file.
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc)
+ - old_instance_name: str - Previous instance name
+ - new_instance_name: str - New instance name
+
+ Returns:
+ - bool - Success or failure
+ """
+ if not ensure_history_dir():
+ logger.error("Could not ensure history directory exists")
+ return False
+
+ if app_type not in history_locks:
+ logger.error(f"Invalid app type: {app_type}")
+ return False
+
+ # If names are the same, nothing to do
+ if old_instance_name == new_instance_name:
+ return True
+
+ logger.info(f"Handling instance rename for {app_type}: {old_instance_name} -> {new_instance_name}")
+
+ # Get paths for old and new history files
+ old_file = get_history_file_path(app_type, old_instance_name)
+ new_file = get_history_file_path(app_type, new_instance_name)
+
+ # Ensure parent directories exist
+ new_file.parent.mkdir(exist_ok=True, parents=True)
+
+ # Thread-safe operation
+ with history_locks[app_type]:
+ try:
+ # Load old data if it exists
+ old_data = []
+ if old_file.exists():
+ try:
+ with open(old_file, 'r') as f:
+ old_data = json.load(f)
+ logger.info(f"Loaded {len(old_data)} history entries from {old_file}")
+ except (json.JSONDecodeError, FileNotFoundError) as e:
+ logger.warning(f"Error reading old history file {old_file}: {e}")
+
+ # Update instance_name in all entries
+ for entry in old_data:
+ entry["instance_name"] = new_instance_name
+
+ # Create or load new file
+ new_data = []
+ if new_file.exists():
+ try:
+ with open(new_file, 'r') as f:
+ new_data = json.load(f)
+ logger.info(f"Loaded {len(new_data)} existing history entries from {new_file}")
+ except (json.JSONDecodeError, FileNotFoundError) as e:
+ logger.warning(f"Error reading new history file {new_file}: {e}")
+
+ # Merge data, avoiding duplicates
+ existing_keys = {(entry.get("id", ""), entry.get("date_time", 0)) for entry in new_data}
+ for entry in old_data:
+ entry_key = (entry.get("id", ""), entry.get("date_time", 0))
+ if entry_key not in existing_keys:
+ new_data.append(entry)
+
+ # Sort by timestamp
+ new_data = sorted(new_data, key=lambda x: x.get("date_time", 0), reverse=True)
+
+ # Save merged data to new file
+ with open(new_file, 'w') as f:
+ json.dump(new_data, f, indent=2)
+ logger.info(f"Saved {len(new_data)} history entries to {new_file}")
+
+ # Optionally delete old file if it exists
+ if old_file.exists():
+ old_file.unlink()
+ logger.info(f"Deleted old history file {old_file}")
+
+ return True
+ except Exception as e:
+ logger.error(f"Error renaming instance history: {e}")
+ return False
+
+def initialize_instance_history(app_type, instance_name):
+ """
+ Initialize or ensure history file exists for a specific instance.
+ This should be called whenever an instance is created or configured.
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc)
+ - instance_name: str - Name of the instance
+
+ Returns:
+ - str - Path to the history file
+ """
+ if not ensure_history_dir():
+ logger.error("Could not ensure history directory exists")
+ return None
+
+ if app_type not in history_locks:
+ logger.error(f"Invalid app type: {app_type}")
+ return None
+
+ try:
+ # Get the history file path
+ history_file = get_history_file_path(app_type, instance_name)
+
+ # Ensure parent directory exists
+ history_file.parent.mkdir(exist_ok=True, parents=True)
+
+ # Create the file if it doesn't exist
+ if not history_file.exists():
+ with open(history_file, 'w') as f:
+ json.dump([], f)
+ logger.info(f"Created history file for {app_type}/{instance_name}: {history_file}")
+
+ return str(history_file)
+ except Exception as e:
+ logger.error(f"Error initializing history for {app_type}/{instance_name}: {e}")
+ return None
+
+def sync_history_files_with_instances():
+ """
+ Synchronize history files with existing instances.
+ This ensures that every instance has a corresponding history file.
+
+ Returns:
+ - dict - Information about what was synchronized
+ """
+ result = {
+ "success": False,
+ "app_instances": {},
+ "created_files": [],
+ "error": None
+ }
+
+ try:
+ # First ensure history directories exist
+ ensure_history_dir()
+
+ # Load settings for each app type to find instances
+ for app_type in history_locks.keys():
+ app_dir = HISTORY_BASE_PATH / app_type
+ app_dir.mkdir(exist_ok=True, parents=True)
+
+ result["app_instances"][app_type] = []
+
+ # Let's check for instance settings from settings directory
+ instances_dir = pathlib.Path("/config") / app_type
+ if instances_dir.exists():
+ for instance_file in instances_dir.glob("*.json"):
+ try:
+ # Extract instance name from filename
+ instance_name = instance_file.stem
+ result["app_instances"][app_type].append(instance_name)
+ logger.info(f"Found instance for {app_type}: {instance_name}")
+
+ # Create history file for this instance if it doesn't exist
+ history_file = get_history_file_path(app_type, instance_name)
+ if not history_file.exists():
+ history_file.parent.mkdir(exist_ok=True, parents=True)
+ with open(history_file, 'w') as f:
+ json.dump([], f)
+ logger.info(f"Created history file for {app_type}/{instance_name}: {history_file}")
+ result["created_files"].append(str(history_file))
+ except Exception as e:
+ logger.error(f"Error processing instance file {instance_file}: {e}")
+
+ result["success"] = True
+ return result
+ except Exception as e:
+ logger.error(f"Error syncing history files with instances: {e}")
+ result["error"] = str(e)
+ return result
+
+# Run the synchronization on module import
+sync_result = sync_history_files_with_instances()
+logger.info(f"History synchronization result: {sync_result}")
diff --git a/Huntarr.io-6.3.6/src/primary/keys_manager.py b/Huntarr.io-6.3.6/src/primary/keys_manager.py
new file mode 100644
index 0000000..99a53d0
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/keys_manager.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+"""
+Keys manager for Huntarr
+Handles storage and retrieval of API keys and URLs from huntarr.json
+"""
+
+import os
+import json
+import pathlib
+import logging
+from typing import Dict, Any, Optional, Tuple
+
+# Create a simple logger
+logging.basicConfig(level=logging.INFO)
+keys_logger = logging.getLogger("keys_manager")
+
+# Settings directory - Changed to match the updated settings_manager.py
+SETTINGS_DIR = pathlib.Path("/config")
+SETTINGS_DIR.mkdir(parents=True, exist_ok=True)
+
+SETTINGS_FILE = SETTINGS_DIR / "huntarr.json"
+
+# Removed save_api_keys function
+
+# Removed get_api_keys function
+
+# Removed list_configured_apps function
+
+# Keep other functions if they exist and are needed, otherwise the file might become empty.
+# If this file solely managed API keys in the old way, it might be removable entirely,
+# but let's keep it for now in case other key-related logic exists or is added later.
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/routes/common.py b/Huntarr.io-6.3.6/src/primary/routes/common.py
new file mode 100644
index 0000000..47a5d38
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/routes/common.py
@@ -0,0 +1,444 @@
+#!/usr/bin/env python3
+"""
+Common routes blueprint for Huntarr web interface
+"""
+
+import os
+import json
+import base64
+import io
+import qrcode
+import pyotp
+import logging
+# Add render_template, send_from_directory, session
+from flask import Blueprint, request, jsonify, make_response, redirect, url_for, current_app, render_template, send_from_directory, session
+from ..auth import (
+ verify_user, create_session, get_username_from_session, SESSION_COOKIE_NAME,
+ change_username as auth_change_username, change_password as auth_change_password,
+ validate_password_strength, logout, verify_session, disable_2fa_with_password_and_otp,
+ user_exists, create_user, generate_2fa_secret, verify_2fa_code, is_2fa_enabled # Add missing auth imports
+)
+from ..utils.logger import logger # Ensure logger is imported
+from .. import settings_manager # Import settings_manager
+
+common_bp = Blueprint('common', __name__)
+
+# --- Static File Serving --- #
+
+@common_bp.route('/static/')
+def static_files(filename):
+ return send_from_directory(common_bp.static_folder, filename)
+
+@common_bp.route('/favicon.ico')
+def favicon():
+ return send_from_directory(common_bp.static_folder, 'favicon.ico', mimetype='image/vnd.microsoft.icon')
+
+@common_bp.route('/logo/')
+def logo_files(filename):
+ logo_dir = os.path.join(common_bp.static_folder, 'logo')
+ return send_from_directory(logo_dir, filename)
+
+# --- Authentication Routes --- #
+
+@common_bp.route('/login', methods=['GET', 'POST'])
+def login_route():
+ if request.method == 'POST':
+ try: # Wrap the POST logic in a try block for better error handling
+ data = request.json
+ username = data.get('username')
+ password = data.get('password')
+ twoFactorCode = data.get('twoFactorCode') # Changed from 'otp_code' to match frontend form
+
+ if not username or not password:
+ logger.warning("Login attempt with missing username or password.")
+ return jsonify({"success": False, "error": "Username and password are required"}), 400
+
+ # Call verify_user which now returns (auth_success, needs_2fa)
+ auth_success, needs_2fa = verify_user(username, password, twoFactorCode)
+
+ logger.debug(f"Auth result for '{username}': success={auth_success}, needs_2fa={needs_2fa}")
+
+ if auth_success:
+ # User is authenticated (password correct, and 2FA if needed was correct)
+ session_token = create_session(username)
+ session[SESSION_COOKIE_NAME] = session_token # Store token in Flask session immediately
+ response = jsonify({"success": True, "redirect": "/"}) # Add redirect URL
+ response.set_cookie(SESSION_COOKIE_NAME, session_token, httponly=True, samesite='Lax', path='/') # Add path
+ logger.info(f"User '{username}' logged in successfully.")
+ return response
+ elif needs_2fa:
+ # Authentication failed *because* 2FA was required (or code was invalid)
+ # The specific reason (missing vs invalid code) is logged in verify_user
+ logger.warning(f"Login failed for '{username}': 2FA required or invalid.")
+ logger.debug(f"Returning 2FA required response: {{\"success\": False, \"requires_2fa\": True, \"requiresTwoFactor\": True, \"error\": \"Invalid or missing 2FA code\"}}")
+
+ # Use all common variations of the 2FA flag to ensure compatibility
+ return jsonify({
+ "success": False,
+ "requires_2fa": True,
+ "requiresTwoFactor": True,
+ "requires2fa": True,
+ "requireTwoFactor": True,
+ "error": "Two-factor authentication code required"
+ }), 401
+ else:
+ # Authentication failed for other reasons (e.g., wrong password, user not found)
+ # Specific reason logged in verify_user
+ logger.warning(f"Login failed for '{username}': Invalid credentials or other error.")
+ return jsonify({"success": False, "error": "Invalid username or password"}), 401 # Use 401
+
+ except Exception as e:
+ logger.error(f"Unexpected error during login POST for user '{username if 'username' in locals() else 'unknown'}': {e}", exc_info=True)
+ return jsonify({"success": False, "error": "An internal server error occurred during login."}), 500
+ else:
+ # GET request - show login page
+ # If user already exists, show login, otherwise redirect to setup
+ if not user_exists():
+ logger.info("No user exists, redirecting to setup.")
+ return redirect(url_for('common.setup_route'))
+ logger.debug("Displaying login page.")
+ return render_template('login.html')
+
+@common_bp.route('/logout', methods=['POST'])
+def logout_route():
+ try:
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ if session_token:
+ logger.info(f"Logging out session token: {session_token[:8]}...") # Log part of token
+ logout(session_token) # Call the logout function from auth.py
+ else:
+ logger.warning("Logout attempt without session cookie.")
+
+ response = jsonify({"success": True})
+ # Ensure cookie deletion happens even if logout function had issues
+ response.delete_cookie(SESSION_COOKIE_NAME, path='/', samesite='Lax') # Specify path and samesite
+ logger.info("Logout successful, cookie deleted.")
+ return response
+ except Exception as e:
+ logger.error(f"Error during logout: {e}", exc_info=True)
+ # Return a JSON error response
+ return jsonify({"success": False, "error": "An internal server error occurred during logout."}), 500
+
+@common_bp.route('/setup', methods=['GET', 'POST'])
+def setup():
+ if user_exists(): # This function should now be defined via import
+ # If a user already exists, redirect to login or home
+ logger.info("Setup page accessed but user already exists. Redirecting to login.")
+ return redirect(url_for('common.login_route'))
+
+ if request.method == 'POST':
+ username = None # Initialize username for logging in case of early failure
+ try: # Add try block to catch potential errors during user creation
+ data = request.json
+ username = data.get('username')
+ password = data.get('password')
+ confirm_password = data.get('confirm_password')
+
+ # Basic validation
+ if not username or not password or not confirm_password:
+ return jsonify({"success": False, "error": "Missing required fields"}), 400
+
+ # Add username length validation
+ if len(username.strip()) < 3:
+ return jsonify({"success": False, "error": "Username must be at least 3 characters long"}), 400
+
+ if password != confirm_password:
+ return jsonify({"success": False, "error": "Passwords do not match"}), 400
+
+ # Validate password strength using the backend function
+ password_error = validate_password_strength(password)
+ if password_error:
+ return jsonify({"success": False, "error": password_error}), 400
+
+ logger.info(f"Attempting to create user '{username}' during setup.")
+ if create_user(username, password): # This function should now be defined via import
+ # Automatically log in the user after setup
+ logger.info(f"User '{username}' created successfully during setup. Creating session.")
+ session_token = create_session(username)
+ # Explicitly set username in Flask session - might not be needed if using token correctly
+ # session['username'] = username
+ session[SESSION_COOKIE_NAME] = session_token # Store token in session
+ response = jsonify({"success": True})
+ # Set cookie in the response
+ response.set_cookie(SESSION_COOKIE_NAME, session_token, httponly=True, samesite='Lax', path='/') # Add path
+ return response
+ else:
+ # create_user itself failed, but didn't raise an exception
+ logger.error(f"create_user function returned False for user '{username}' during setup.")
+ return jsonify({"success": False, "error": "Failed to create user (internal reason)"}), 500
+ except Exception as e:
+ # Catch any unexpected exception during the process
+ logger.error(f"Unexpected error during setup POST for user '{username if username else 'unknown'}': {e}", exc_info=True)
+ return jsonify({"success": False, "error": f"An unexpected server error occurred: {e}"}), 500
+ else:
+ # GET request - show setup page
+ logger.info("Displaying setup page.")
+ return render_template('setup.html') # This function should now be defined via import
+
+# --- User Management API Routes --- #
+
+@common_bp.route('/api/user/info', methods=['GET'])
+def get_user_info_route():
+ # Use session token to get username
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ username = get_username_from_session(session_token) # Use auth function
+
+ if not username:
+ logger.warning("Attempt to get user info failed: Not authenticated (no valid session).")
+ return jsonify({"error": "Not authenticated"}), 401
+
+ # Pass username to is_2fa_enabled
+ two_fa_status = is_2fa_enabled(username) # This function should now be defined via import
+ logger.debug(f"Retrieved user info for '{username}'. 2FA enabled: {two_fa_status}")
+ return jsonify({"username": username, "is_2fa_enabled": two_fa_status})
+
+@common_bp.route('/api/user/change-username', methods=['POST'])
+def change_username_route():
+ # Use session token to get username
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ current_username = get_username_from_session(session_token)
+
+ if not current_username:
+ logger.warning("Username change attempt failed: Not authenticated.")
+ return jsonify({"error": "Not authenticated"}), 401
+
+ data = request.json
+ new_username = data.get('username')
+ password = data.get('password') # Get password from request
+
+ if not new_username or not password: # Check if password is provided
+ return jsonify({"success": False, "error": "New username and current password are required"}), 400
+
+ # Add username length validation
+ if len(new_username.strip()) < 3:
+ return jsonify({"success": False, "error": "Username must be at least 3 characters long"}), 400
+
+ # Call the change_username function from auth.py
+ if auth_change_username(current_username, new_username, password):
+ # Update session? The session stores a token, not the username directly.
+ # If the username is needed frequently, maybe re-create session or update session data if stored there.
+ # For now, assume token remains valid.
+ logger.info(f"Username changed successfully for '{current_username}' to '{new_username}'.")
+ # Re-fetch username to confirm change for response? Or trust change_username?
+ # Fetch updated info to send back
+ updated_username = new_username # Assume success means it changed
+ return jsonify({"success": True, "username": updated_username}) # Return new username
+ else:
+ logger.warning(f"Username change failed for '{current_username}'. Check logs in auth.py for details.")
+ return jsonify({"success": False, "error": "Failed to change username. Check password or logs."}), 400
+
+@common_bp.route('/api/user/change-password', methods=['POST'])
+def change_password_route():
+ # Use session token to get username - needed? change_password might not need it if single user
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ username = get_username_from_session(session_token) # Get username for logging
+
+ if not username: # Check if session is valid even if function doesn't need username
+ logger.warning("Password change attempt failed: Not authenticated.")
+ return jsonify({"error": "Not authenticated"}), 401
+
+ data = request.json
+ current_password = data.get('current_password')
+ new_password = data.get('new_password')
+
+ if not current_password or not new_password:
+ logger.warning(f"Password change attempt for user '{username}' failed: Missing current or new password.")
+ return jsonify({"success": False, "error": "Current and new passwords are required"}), 400
+
+ logger.info(f"Attempting to change password for user '{username}'.")
+ # Pass username? change_password might not need it. Assuming it doesn't for now.
+ if auth_change_password(current_password, new_password):
+ logger.info(f"Password changed successfully for user '{username}'.")
+ return jsonify({"success": True})
+ else:
+ logger.warning(f"Password change failed for user '{username}'. Check logs in auth.py for details.")
+ return jsonify({"success": False, "error": "Failed to change password. Check current password or logs."}), 400
+
+# --- 2FA Management API Routes --- #
+
+@common_bp.route('/api/user/2fa/setup', methods=['POST'])
+def setup_2fa():
+ # Use session token to get username
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ username = get_username_from_session(session_token)
+
+ if not username:
+ logger.warning("2FA setup attempt failed: No username in session.") # Add logging
+ return jsonify({"error": "Not authenticated"}), 401
+
+ try:
+ logger.info(f"Generating 2FA setup for user: {username}") # Add logging
+ # Pass username to generate_2fa_secret
+ secret, qr_code_data_uri = generate_2fa_secret(username) # This function should now be defined via import
+
+ # Return secret and QR code data URI
+ return jsonify({"success": True, "secret": secret, "qr_code_url": qr_code_data_uri}) # Match frontend expectation 'qr_code_url'
+
+ except Exception as e:
+ logger.error(f"Error during 2FA setup generation for user '{username}': {e}", exc_info=True)
+ return jsonify({"success": False, "error": "Failed to generate 2FA setup information."}), 500
+
+@common_bp.route('/api/user/2fa/verify', methods=['POST'])
+def verify_2fa():
+ # Use session token to get username
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ username = get_username_from_session(session_token)
+
+ if not username:
+ logger.warning("2FA verify attempt failed: No username in session.") # Add logging
+ return jsonify({"error": "Not authenticated"}), 401
+
+ data = request.json
+ otp_code = data.get('code') # Match frontend key 'code'
+
+ if not otp_code or len(otp_code) != 6 or not otp_code.isdigit(): # Add validation
+ logger.warning(f"2FA verification for '{username}' failed: Invalid code format provided.")
+ return jsonify({"success": False, "error": "Invalid or missing 6-digit OTP code"}), 400
+
+ logger.info(f"Attempting to verify 2FA code for user '{username}'.")
+ # Pass username to verify_2fa_code
+ if verify_2fa_code(username, otp_code, enable_on_verify=True): # This function should now be defined via import
+ logger.info(f"Successfully verified and enabled 2FA for user: {username}") # Add logging
+ return jsonify({"success": True})
+ else:
+ # Reason logged in verify_2fa_code
+ logger.warning(f"2FA verification failed for user: {username}. Check logs in auth.py.")
+ return jsonify({"success": False, "error": "Invalid OTP code"}), 400 # Use 400 for bad request
+
+@common_bp.route('/api/user/2fa/disable', methods=['POST'])
+def disable_2fa_route():
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ username = get_username_from_session(session_token)
+
+ if not username:
+ logger.warning("2FA disable attempt failed: Not authenticated.")
+ return jsonify({"error": "Not authenticated"}), 401
+
+ data = request.json
+ password = data.get('password')
+ otp_code = data.get('code')
+
+ # Require BOTH password and OTP code
+ if not password or not otp_code:
+ logger.warning(f"2FA disable attempt for '{username}' failed: Missing password or OTP code.")
+ return jsonify({"success": False, "error": "Both password and current OTP code are required to disable 2FA"}), 400
+
+ if not (len(otp_code) == 6 and otp_code.isdigit()):
+ logger.warning(f"2FA disable attempt for '{username}' failed: Invalid OTP code format.")
+ return jsonify({"success": False, "error": "Invalid 6-digit OTP code format"}), 400
+
+ # Call a function that verifies both password and OTP
+ if disable_2fa_with_password_and_otp(username, password, otp_code):
+ logger.info(f"2FA disabled successfully for user '{username}' using password and OTP.")
+ return jsonify({"success": True})
+ else:
+ # Reason logged in disable_2fa_with_password_and_otp
+ logger.warning(f"Failed to disable 2FA for user '{username}' using password and OTP. Check logs.")
+ # Provide a more specific error if possible, otherwise generic
+ # The auth function should log the specific reason (bad pass, bad otp)
+ return jsonify({"success": False, "error": "Failed to disable 2FA. Invalid password or OTP code."}), 400
+
+# --- Theme Setting Route ---
+@common_bp.route('/api/settings/theme', methods=['POST'])
+def set_theme():
+ # Authentication check
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ if not verify_session(session_token):
+ logger.warning("Theme setting attempt failed: Not authenticated.")
+ return jsonify({"error": "Unauthorized"}), 401
+
+ try:
+ data = request.json
+ dark_mode = data.get('dark_mode')
+
+ if dark_mode is None or not isinstance(dark_mode, bool):
+ logger.warning("Invalid theme setting received.")
+ return jsonify({"success": False, "error": "Invalid 'dark_mode' value"}), 400
+
+ # Here you would typically save this preference to a user profile or global setting
+ # For now, just log it. A real implementation would persist this.
+ username = get_username_from_session(session_token) # Get username for logging
+ logger.info(f"User '{username}' set dark mode preference to: {dark_mode}")
+
+ # Example: Saving to a hypothetical global config (replace with actual persistence)
+ # global_settings = settings_manager.load_global_settings() # Assuming such a function exists
+ # global_settings['ui']['dark_mode'] = dark_mode
+ # settings_manager.save_global_settings(global_settings) # Assuming such a function exists
+
+ return jsonify({"success": True})
+ except Exception as e:
+ logger.error(f"Error setting theme preference: {e}", exc_info=True)
+ return jsonify({"success": False, "error": "Failed to set theme preference"}), 500
+
+# --- Local Access Bypass Status API Route --- #
+
+@common_bp.route('/api/get_local_access_bypass_status', methods=['GET'])
+def get_local_access_bypass_status_route():
+ """API endpoint to get the status of the local network authentication bypass setting."""
+ try:
+ # Get the setting from the 'general' section, default to False if not found
+ bypass_enabled = settings_manager.get_setting('general', 'local_access_bypass', False)
+ logger.debug(f"Retrieved local_access_bypass status: {bypass_enabled}")
+ # Return status in the format expected by the frontend
+ return jsonify({"isEnabled": bypass_enabled})
+ except Exception as e:
+ logger.error(f"Error retrieving local_access_bypass status: {e}", exc_info=True)
+ # Return a generic error to the client
+ return jsonify({"error": "Failed to retrieve bypass status"}), 500
+
+# --- Stats Management API Routes --- #
+@common_bp.route('/api/stats', methods=['GET'])
+def get_stats_api():
+ """API endpoint to get media statistics"""
+ try:
+ # Import here to avoid circular imports
+ from ..stats_manager import get_stats
+
+ # Get stats from stats_manager
+ stats = get_stats()
+ logger.debug(f"Retrieved stats for API response: {stats}")
+
+ # Return success response with stats
+ return jsonify({"success": True, "stats": stats})
+ except Exception as e:
+ logger.error(f"Error retrieving stats: {e}", exc_info=True)
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@common_bp.route('/api/stats/reset', methods=['POST'])
+def reset_stats_api():
+ """API endpoint to reset media statistics"""
+ try:
+ # Import here to avoid circular imports
+ from ..stats_manager import reset_stats
+
+ # Check if authenticated
+ session_token = request.cookies.get(SESSION_COOKIE_NAME)
+ if not verify_session(session_token):
+ logger.warning("Stats reset attempt failed: Not authenticated.")
+ return jsonify({"error": "Unauthorized"}), 401
+
+ # Get app type from request if provided
+ data = request.json or {}
+ app_type = data.get('app_type') # None will reset all
+
+ if app_type is not None and app_type not in ["sonarr", "radarr", "lidarr", "readarr", "whisparr"]:
+ logger.warning(f"Invalid app_type for stats reset: {app_type}")
+ return jsonify({"success": False, "error": "Invalid app_type"}), 400
+
+ # Reset stats
+ if reset_stats(app_type):
+ message = f"Reset statistics for {app_type}" if app_type else "Reset all statistics"
+ logger.info(message)
+ return jsonify({"success": True, "message": message})
+ else:
+ error_msg = f"Failed to reset statistics for {app_type}" if app_type else "Failed to reset all statistics"
+ logger.error(error_msg)
+ return jsonify({"success": False, "error": error_msg}), 500
+ except Exception as e:
+ logger.error(f"Error resetting stats: {e}", exc_info=True)
+ return jsonify({"success": False, "error": str(e)}), 500
+
+# Ensure all routes previously in this file that interact with settings
+# are either moved to web_server.py or updated here using the new settings_manager functions.
+
+# REMOVED DUPLICATE BLUEPRINT DEFINITION AND CONFLICTING ROUTES BELOW THIS LINE
diff --git a/Huntarr.io-6.3.6/src/primary/routes/history_routes.py b/Huntarr.io-6.3.6/src/primary/routes/history_routes.py
new file mode 100644
index 0000000..2a2735c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/routes/history_routes.py
@@ -0,0 +1,51 @@
+from flask import Blueprint, request, jsonify, current_app
+import logging
+
+from src.primary.history_manager import get_history, clear_history, add_history_entry
+
+logger = logging.getLogger("huntarr")
+history_blueprint = Blueprint('history', __name__)
+
+@history_blueprint.route('/', methods=['GET'])
+def get_app_history(app_type):
+ """Get history entries for a specific app or all apps"""
+ try:
+ search_query = request.args.get('search', '')
+ page = int(request.args.get('page', 1))
+ page_size = int(request.args.get('page_size', 20))
+
+ # Validate page_size to be one of the allowed values
+ allowed_page_sizes = [10, 20, 30, 50, 100, 250, 1000]
+ if page_size not in allowed_page_sizes:
+ page_size = 20
+
+ # Validate app_type
+ valid_app_types = ["all", "sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"]
+ if app_type not in valid_app_types:
+ return jsonify({"error": f"Invalid app type: {app_type}"}), 400
+
+ result = get_history(app_type, search_query, page, page_size)
+ return jsonify(result), 200
+
+ except Exception as e:
+ logger.error(f"Error getting history for {app_type}: {str(e)}")
+ return jsonify({"error": str(e)}), 500
+
+@history_blueprint.route('/', methods=['DELETE'])
+def clear_app_history(app_type):
+ """Clear history for a specific app or all apps"""
+ try:
+ # Validate app_type
+ valid_app_types = ["all", "sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"]
+ if app_type not in valid_app_types:
+ return jsonify({"error": f"Invalid app type: {app_type}"}), 400
+
+ success = clear_history(app_type)
+ if success:
+ return jsonify({"message": f"History cleared for {app_type}"}), 200
+ else:
+ return jsonify({"error": f"Failed to clear history for {app_type}"}), 500
+
+ except Exception as e:
+ logger.error(f"Error clearing history for {app_type}: {str(e)}")
+ return jsonify({"error": str(e)}), 500
diff --git a/Huntarr.io-6.3.6/src/primary/routes/main.py b/Huntarr.io-6.3.6/src/primary/routes/main.py
new file mode 100644
index 0000000..8794c5b
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/routes/main.py
@@ -0,0 +1,54 @@
+from flask import Blueprint, request, jsonify
+from src.primary.stats_manager import get_stats, reset_stats
+
+main_blueprint = Blueprint('main', __name__)
+
+@main_blueprint.route('/')
+def index():
+ # ...existing code...
+
+ # Remove or comment out any logging of the web interface URL here
+ # logger.info(f"Web interface available at http://{request.host}")
+
+ # ...existing code...
+
+# Add new route for getting media statistics
+@main_blueprint.route('/api/stats', methods=['GET'])
+@jwt_required()
+def api_get_stats():
+ """Get media statistics for each app"""
+ try:
+ stats = get_stats()
+ return jsonify({
+ "success": True,
+ "stats": stats
+ })
+ except Exception as e:
+ logger.error(f"Error retrieving media statistics: {e}")
+ return jsonify({
+ "success": False,
+ "message": "Error retrieving media statistics."
+ }), 500
+
+# Add route for resetting statistics
+@main_blueprint.route('/api/stats/reset', methods=['POST'])
+@jwt_required()
+@admin_required
+def api_reset_stats():
+ """Reset media statistics"""
+ try:
+ app_type = None
+ if request.is_json:
+ app_type = request.json.get('app_type')
+
+ reset_stats(app_type)
+ return jsonify({
+ "success": True,
+ "message": f"Successfully reset statistics for {'all apps' if app_type is None else app_type}."
+ })
+ except Exception as e:
+ logger.error(f"Error resetting media statistics: {e}")
+ return jsonify({
+ "success": False,
+ "message": "Error resetting media statistics."
+ }), 500
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/server.py b/Huntarr.io-6.3.6/src/primary/server.py
new file mode 100644
index 0000000..27d5a50
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/server.py
@@ -0,0 +1,20 @@
+import logging
+
+# ...existing code...
+
+def start_server(host='0.0.0.0', port=9876, debug=False):
+ """Start the web server"""
+ logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
+ logger = logging.getLogger(__name__)
+
+ # ...existing code...
+
+ # Change this line:
+ # logger.info(f"Web interface available at http://{host}:{port}")
+
+ # To this (more discreet version):
+ logger.info(f"Server started on port {port}")
+
+ # ...existing code...
+
+# ...existing code...
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/settings_manager.py b/Huntarr.io-6.3.6/src/primary/settings_manager.py
new file mode 100644
index 0000000..2e69b13
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/settings_manager.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python3
+"""
+Settings manager for Huntarr
+Handles loading, saving, and providing settings from individual JSON files per app
+Supports default configurations for different Arr applications
+"""
+
+import os
+import json
+import pathlib
+import logging
+import shutil
+import subprocess
+import time
+from typing import Dict, Any, Optional, List
+
+# Create a simple logger for settings_manager
+logging.basicConfig(level=logging.INFO)
+settings_logger = logging.getLogger("settings_manager")
+
+# Settings directory setup - Root config directory
+SETTINGS_DIR = pathlib.Path("/config")
+SETTINGS_DIR.mkdir(parents=True, exist_ok=True)
+
+# Default configs location remains the same
+DEFAULT_CONFIGS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'default_configs'))
+
+# Update or add this as a class attribute or constant
+KNOWN_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "general", "swaparr"]
+
+# Add a settings cache with timestamps to avoid excessive disk reads
+settings_cache = {} # Format: {app_name: {'timestamp': timestamp, 'data': settings_dict}}
+CACHE_TTL = 5 # Cache time-to-live in seconds
+
+def clear_cache(app_name=None):
+ """Clear the settings cache for a specific app or all apps."""
+ global settings_cache
+ if app_name:
+ if app_name in settings_cache:
+ settings_logger.debug(f"Clearing cache for {app_name}")
+ settings_cache.pop(app_name, None)
+ else:
+ settings_logger.debug("Clearing entire settings cache")
+ settings_cache = {}
+
+def get_settings_file_path(app_name: str) -> pathlib.Path:
+ """Get the path to the settings file for a specific app."""
+ if app_name not in KNOWN_APP_TYPES:
+ # Log a warning but allow for potential future app types
+ settings_logger.warning(f"Requested settings file for unknown app type: {app_name}")
+ return SETTINGS_DIR / f"{app_name}.json"
+
+def get_default_config_path(app_name: str) -> pathlib.Path:
+ """Get the path to the default config file for a specific app."""
+ return pathlib.Path(DEFAULT_CONFIGS_DIR) / f"{app_name}.json"
+
+# Helper function to load default settings for a specific app
+def load_default_app_settings(app_name: str) -> Dict[str, Any]:
+ """Load default settings for a specific app from its JSON file."""
+ default_file = get_default_config_path(app_name)
+ if default_file.exists():
+ try:
+ with open(default_file, 'r') as f:
+ return json.load(f)
+ except Exception as e:
+ settings_logger.error(f"Error loading default settings for {app_name} from {default_file}: {e}")
+ return {}
+ else:
+ settings_logger.warning(f"Default settings file not found for {app_name}: {default_file}")
+ return {}
+
+def _ensure_config_exists(app_name: str) -> None:
+ """Ensure the config file exists for an app, copying from default if not."""
+ settings_file = get_settings_file_path(app_name)
+ if not settings_file.exists():
+ default_file = get_default_config_path(app_name)
+ if default_file.exists():
+ try:
+ shutil.copyfile(default_file, settings_file)
+ settings_logger.info(f"Created default settings file for {app_name} at {settings_file}")
+ except Exception as e:
+ settings_logger.error(f"Error copying default settings for {app_name}: {e}")
+ else:
+ # Create an empty file if no default exists
+ settings_logger.warning(f"No default config found for {app_name}. Creating empty settings file.")
+ try:
+ with open(settings_file, 'w') as f:
+ json.dump({}, f)
+ except Exception as e:
+ settings_logger.error(f"Error creating empty settings file for {app_name}: {e}")
+
+
+def load_settings(app_type, use_cache=True):
+ """
+ Load settings for a specific app type
+
+ Args:
+ app_type: The app type to load settings for
+ use_cache: Whether to use the cached settings if available and recent
+
+ Returns:
+ Dict containing the app settings
+ """
+ global settings_cache
+
+ # Only log unexpected app types that are not 'general'
+ if app_type not in KNOWN_APP_TYPES and app_type != "general":
+ settings_logger.warning(f"load_settings called with unexpected app_type: {app_type}")
+
+ # Check if we have a valid cache entry
+ if use_cache and app_type in settings_cache:
+ cache_entry = settings_cache[app_type]
+ cache_age = time.time() - cache_entry.get('timestamp', 0)
+
+ if cache_age < CACHE_TTL:
+ settings_logger.debug(f"Using cached settings for {app_type} (age: {cache_age:.1f}s)")
+ return cache_entry['data']
+ else:
+ settings_logger.debug(f"Cache expired for {app_type} (age: {cache_age:.1f}s)")
+
+ # No valid cache entry, load from disk
+ _ensure_config_exists(app_type)
+ settings_file = get_settings_file_path(app_type)
+ try:
+ with open(settings_file, 'r') as f:
+ # Load existing settings
+ current_settings = json.load(f)
+
+ # Load defaults to check for missing keys
+ default_settings = load_default_app_settings(app_type)
+
+ # Add missing keys from defaults without overwriting existing values
+ updated = False
+ for key, value in default_settings.items():
+ if key not in current_settings:
+ current_settings[key] = value
+ updated = True
+
+ # If keys were added, save the updated file
+ if updated:
+ settings_logger.info(f"Added missing default keys to {app_type}.json")
+ save_settings(app_type, current_settings) # Use save_settings to handle writing
+
+ # Update cache
+ settings_cache[app_type] = {
+ 'timestamp': time.time(),
+ 'data': current_settings
+ }
+
+ return current_settings
+
+ except json.JSONDecodeError:
+ settings_logger.error(f"Error decoding JSON from {settings_file}. Restoring from default.")
+ # Attempt to restore from default
+ default_settings = load_default_app_settings(app_type)
+ save_settings(app_type, default_settings) # Save the restored defaults
+
+ # Update cache with defaults
+ settings_cache[app_type] = {
+ 'timestamp': time.time(),
+ 'data': default_settings
+ }
+
+ return default_settings
+ except Exception as e:
+ settings_logger.error(f"Error loading settings for {app_type} from {settings_file}: {e}")
+ return {} # Return empty dict on other errors
+
+
+def save_settings(app_name: str, settings_data: Dict[str, Any]) -> bool:
+ """Save settings for a specific app."""
+ if app_name not in KNOWN_APP_TYPES:
+ settings_logger.error(f"Attempted to save settings for unknown app type: {app_name}")
+ return False
+
+ settings_file = get_settings_file_path(app_name)
+ try:
+ # Ensure the directory exists (though it should from the top-level check)
+ settings_file.parent.mkdir(parents=True, exist_ok=True)
+
+ # Write the provided settings data directly
+ with open(settings_file, 'w') as f:
+ json.dump(settings_data, f, indent=2)
+ settings_logger.info(f"Settings saved successfully for {app_name} to {settings_file}")
+
+ # Clear cache for this app to ensure fresh reads
+ clear_cache(app_name)
+
+ return True
+ except Exception as e:
+ settings_logger.error(f"Error saving settings for {app_name} to {settings_file}: {e}")
+ return False
+
+def get_setting(app_name: str, key: str, default: Optional[Any] = None) -> Any:
+ """Get a specific setting value for an app."""
+ settings = load_settings(app_name)
+ return settings.get(key, default)
+
+def get_api_url(app_name: str) -> Optional[str]:
+ """Get the API URL for a specific app."""
+ return get_setting(app_name, "api_url", "")
+
+def get_api_key(app_name: str) -> Optional[str]:
+ """Get the API Key for a specific app."""
+ return get_setting(app_name, "api_key", "")
+
+def get_all_settings() -> Dict[str, Dict[str, Any]]:
+ """Load settings for all known apps."""
+ all_settings = {}
+ for app_name in KNOWN_APP_TYPES:
+ # Only include apps if their config file exists or can be created from defaults
+ # Effectively, load_settings ensures the file exists and loads it.
+ settings = load_settings(app_name)
+ if settings: # Only add if settings were successfully loaded
+ all_settings[app_name] = settings
+ return all_settings
+
+def get_configured_apps() -> List[str]:
+ """Return a list of app names that have basic configuration (API URL and Key)."""
+ configured = []
+ for app_name in KNOWN_APP_TYPES:
+ settings = load_settings(app_name)
+
+ # First check if there are valid instances configured (multi-instance mode)
+ if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
+ for instance in settings["instances"]:
+ if instance.get("enabled", True) and instance.get("api_url") and instance.get("api_key"):
+ configured.append(app_name)
+ break # One valid instance is enough to consider the app configured
+ continue # Skip the single-instance check if we already checked instances
+
+ # Fallback to legacy single-instance config
+ if settings.get("api_url") and settings.get("api_key"):
+ configured.append(app_name)
+
+ settings_logger.info(f"Configured apps: {configured}")
+ return configured
+
+def apply_timezone(timezone: str) -> bool:
+ """Apply the specified timezone to the container.
+
+ Args:
+ timezone: The timezone to set (e.g., 'UTC', 'America/New_York')
+
+ Returns:
+ bool: True if successful, False otherwise
+ """
+ try:
+ # Set TZ environment variable
+ os.environ['TZ'] = timezone
+
+ # Create symlink for localtime (common approach in containers)
+ zoneinfo_path = f"/usr/share/zoneinfo/{timezone}"
+ if os.path.exists(zoneinfo_path):
+ # Remove existing symlink if it exists
+ if os.path.exists("/etc/localtime"):
+ os.remove("/etc/localtime")
+
+ # Create new symlink
+ os.symlink(zoneinfo_path, "/etc/localtime")
+
+ # Also update /etc/timezone file if it exists
+ with open("/etc/timezone", "w") as f:
+ f.write(f"{timezone}\n")
+
+ settings_logger.info(f"Timezone set to {timezone}")
+ return True
+ else:
+ settings_logger.error(f"Timezone file not found: {zoneinfo_path}")
+ return False
+ except Exception as e:
+ settings_logger.error(f"Error setting timezone: {str(e)}")
+ return False
+
+# Add a list of known advanced settings for clarity and documentation
+ADVANCED_SETTINGS = [
+ "api_timeout",
+ "command_wait_delay",
+ "command_wait_attempts",
+ "minimum_download_queue_size",
+ "log_refresh_interval_seconds",
+ "debug_mode",
+ "stateful_management_hours"
+]
+
+def get_advanced_setting(setting_name, default_value=None):
+ """
+ Get an advanced setting from general settings.
+
+ Advanced settings are now centralized in general settings and no longer stored
+ in individual app settings files. This function provides a consistent way to
+ access these settings from anywhere in the codebase.
+
+ Args:
+ setting_name: The name of the advanced setting to retrieve
+ default_value: The default value to return if the setting is not found
+
+ Returns:
+ The value of the setting or the default value if not found
+ """
+ if setting_name not in ADVANCED_SETTINGS:
+ settings_logger.warning(f"Requested unknown advanced setting: {setting_name}")
+
+ # Get from general settings
+ general_settings = load_settings('general', use_cache=True)
+ return general_settings.get(setting_name, default_value)
+
+# Example usage (for testing purposes, remove later)
+if __name__ == "__main__":
+ settings_logger.info(f"Known app types: {KNOWN_APP_TYPES}")
+
+ # Ensure defaults are copied if needed
+ for app in KNOWN_APP_TYPES:
+ _ensure_config_exists(app)
+
+ # Test loading Sonarr settings
+ sonarr_settings = load_settings("sonarr")
+ settings_logger.info(f"Loaded Sonarr settings: {json.dumps(sonarr_settings, indent=2)}")
+
+ # Test getting a specific setting
+ sonarr_sleep = get_setting("sonarr", "sleep_duration", 999)
+ settings_logger.info(f"Sonarr sleep duration: {sonarr_sleep}")
+
+ # Test saving updated settings (example)
+ if sonarr_settings:
+ sonarr_settings["sleep_duration"] = 850
+ save_settings("sonarr", sonarr_settings)
+ reloaded_sonarr_settings = load_settings("sonarr")
+ settings_logger.info(f"Reloaded Sonarr settings after save: {json.dumps(reloaded_sonarr_settings, indent=2)}")
+
+
+ # Test getting all settings
+ all_app_settings = get_all_settings()
+ settings_logger.info(f"All loaded settings: {json.dumps(all_app_settings, indent=2)}")
+
+ # Test getting configured apps
+ configured_list = get_configured_apps()
+ settings_logger.info(f"Configured apps: {configured_list}")
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/state.py b/Huntarr.io-6.3.6/src/primary/state.py
new file mode 100644
index 0000000..50011de
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/state.py
@@ -0,0 +1,322 @@
+#!/usr/bin/env python3
+"""
+State management module for Huntarr
+Handles all persistence of program state
+"""
+
+import os
+import datetime
+import time
+import json
+from typing import List, Dict, Any, Optional
+from src.primary import settings_manager
+
+# Define the config directory - typically /config in Docker environment
+CONFIG_DIR = os.environ.get('CONFIG_DIR', '/config')
+
+# Get the logger at module level
+from src.primary.utils.logger import get_logger
+logger = get_logger("huntarr")
+
+def get_state_file_path(app_type, state_name):
+ """
+ Get the path to a state file for a specific app type and state name.
+
+ Args:
+ app_type: The application type (sonarr, radarr, etc.)
+ state_name: The name of the state file
+
+ Returns:
+ The path to the state file
+ """
+ # Define known app types
+ known_app_types = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"]
+
+ # If app_type is not in known types, log a warning but don't fail
+ if app_type not in known_app_types and app_type != "general":
+ logger.warning(f"get_state_file_path called with unexpected app_type: {app_type}")
+
+ # Create the state directory if it doesn't exist
+ state_dir = os.path.join(CONFIG_DIR, "state", app_type)
+ os.makedirs(state_dir, exist_ok=True)
+
+ # Return the path to the state file
+ return os.path.join(state_dir, f"{state_name}.json")
+
+def get_last_reset_time(app_type: str = None) -> datetime.datetime:
+ """
+ Get the last time the state was reset for a specific app type.
+
+ Args:
+ app_type: The type of app to get last reset time for.
+
+ Returns:
+ The datetime of the last reset, or a very old date if no reset has occurred or app_type is invalid.
+ """
+ if not app_type:
+ logger.error("get_last_reset_time called without app_type.")
+ return datetime.datetime.fromtimestamp(0)
+
+ current_app_type = app_type
+ reset_file = get_state_file_path(current_app_type, "last_reset")
+
+ try:
+ if os.path.exists(reset_file):
+ with open(reset_file, "r") as f:
+ reset_time_str = f.read().strip()
+ return datetime.datetime.fromisoformat(reset_time_str)
+ except Exception as e:
+ logger.error(f"Error reading last reset time for {current_app_type}: {e}")
+
+ return datetime.datetime.fromtimestamp(0)
+
+def set_last_reset_time(reset_time: datetime.datetime, app_type: str = None) -> None:
+ """
+ Set the last time the state was reset for a specific app type.
+
+ Args:
+ reset_time: The datetime to set
+ app_type: The type of app to set last reset time for.
+ """
+ if not app_type:
+ logger.error("set_last_reset_time called without app_type.")
+ return
+
+ current_app_type = app_type
+ reset_file = get_state_file_path(current_app_type, "last_reset")
+
+ try:
+ with open(reset_file, "w") as f:
+ f.write(reset_time.isoformat())
+ except Exception as e:
+ logger.error(f"Error writing last reset time for {current_app_type}: {e}")
+
+def check_state_reset(app_type: str = None) -> bool:
+ """
+ Check if the state needs to be reset based on the reset interval.
+ If it's time to reset, clears the processed IDs and updates the last reset time.
+
+ Args:
+ app_type: The type of app to check state reset for.
+
+ Returns:
+ True if the state was reset, False otherwise.
+ """
+ if not app_type:
+ logger.error("check_state_reset called without app_type.")
+ return False
+
+ current_app_type = app_type
+
+ # Use a much longer default interval (1 week = 168 hours) to prevent frequent resets
+ reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168)
+
+ last_reset = get_last_reset_time(current_app_type)
+ now = datetime.datetime.now()
+
+ delta = now - last_reset
+ hours_passed = delta.total_seconds() / 3600
+
+ # Log every cycle to help diagnose state reset issues
+ logger.debug(f"State check for {current_app_type}: {hours_passed:.1f} hours since last reset (interval: {reset_interval}h)")
+
+ if hours_passed >= reset_interval:
+ logger.warning(f"State files for {current_app_type} will be reset after {hours_passed:.1f} hours (interval: {reset_interval}h)")
+ logger.warning(f"This will cause all previously processed media to be eligible for processing again")
+
+ # Add additional safeguard - only reset if more than double the interval has passed
+ # This helps prevent accidental resets due to clock issues or other anomalies
+ if hours_passed >= (reset_interval * 2):
+ logger.info(f"Confirmed state reset for {current_app_type} after {hours_passed:.1f} hours")
+ clear_processed_ids(current_app_type)
+ set_last_reset_time(now, current_app_type)
+ return True
+ else:
+ logger.info(f"State reset postponed for {current_app_type} - will proceed when {reset_interval * 2}h have passed")
+ # Update last reset time partially to avoid immediate reset next cycle
+ half_delta = datetime.timedelta(hours=reset_interval/2)
+ set_last_reset_time(now - half_delta, current_app_type)
+
+ return False
+
+def clear_processed_ids(app_type: str = None) -> None:
+ """
+ Clear all processed IDs for a specific app type.
+
+ Args:
+ app_type: The type of app to clear processed IDs for.
+ """
+ if not app_type:
+ logger.error("clear_processed_ids called without app_type.")
+ return
+
+ current_app_type = app_type
+
+ missing_file = get_state_file_path(current_app_type, "processed_missing")
+ try:
+ if os.path.exists(missing_file):
+ with open(missing_file, "w") as f:
+ f.write("[]")
+ logger.info(f"Cleared processed missing IDs for {current_app_type}")
+ except Exception as e:
+ logger.error(f"Error clearing processed missing IDs for {current_app_type}: {e}")
+
+ upgrades_file = get_state_file_path(current_app_type, "processed_upgrades")
+ try:
+ if os.path.exists(upgrades_file):
+ with open(upgrades_file, "w") as f:
+ f.write("[]")
+ logger.info(f"Cleared processed upgrade IDs for {current_app_type}")
+ except Exception as e:
+ logger.error(f"Error clearing processed upgrade IDs for {current_app_type}: {e}")
+
+def calculate_reset_time(app_type: str = None) -> str:
+ """
+ Calculate when the next state reset will occur.
+
+ Args:
+ app_type: The type of app to calculate reset time for.
+
+ Returns:
+ A string representation of when the next reset will occur.
+ """
+ if not app_type:
+ logger.error("calculate_reset_time called without app_type.")
+ return "Next reset: Unknown (app type not provided)"
+
+ current_app_type = app_type
+
+ reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168)
+
+ last_reset = get_last_reset_time(current_app_type)
+ next_reset = last_reset + datetime.timedelta(hours=reset_interval)
+ now = datetime.datetime.now()
+
+ if next_reset < now:
+ return "Next reset: at the start of the next cycle"
+
+ delta = next_reset - now
+ hours = delta.total_seconds() / 3600
+
+ if hours < 1:
+ minutes = delta.total_seconds() / 60
+ return f"Next reset: in {int(minutes)} minutes"
+ elif hours < 24:
+ return f"Next reset: in {int(hours)} hours"
+ else:
+ days = hours / 24
+ return f"Next reset: in {int(days)} days"
+
+def load_processed_ids(filepath: str) -> List[int]:
+ """
+ Load processed IDs from a file.
+
+ Args:
+ filepath: The path to the file
+
+ Returns:
+ A list of processed IDs
+ """
+ try:
+ if os.path.exists(filepath):
+ with open(filepath, "r") as f:
+ loaded_data = json.load(f)
+ if isinstance(loaded_data, list):
+ return loaded_data
+ else:
+ logger.error(f"Invalid data type loaded from {filepath}. Expected list, got {type(loaded_data)}. Returning empty list.")
+ return []
+ return []
+ except json.JSONDecodeError as e:
+ logger.error(f"Error decoding JSON from {filepath}: {e}. Returning empty list.")
+ return [] # Ensure list is returned even on JSON error
+ except Exception as e:
+ logger.error(f"Error loading processed IDs from {filepath}: {e}")
+ return []
+
+def save_processed_ids(filepath: str, ids: List[int]) -> None:
+ """
+ Save processed IDs to a file.
+
+ Args:
+ filepath: The path to the file
+ ids: The list of IDs to save
+ """
+ try:
+ with open(filepath, "w") as f:
+ json.dump(ids, f)
+ except Exception as e:
+ logger.error(f"Error saving processed IDs to {filepath}: {e}")
+
+def save_processed_id(filepath: str, item_id: int) -> None:
+ """
+ Add a single ID to a processed IDs file.
+
+ Args:
+ filepath: The path to the file
+ item_id: The ID to add
+ """
+ processed_ids = load_processed_ids(filepath)
+
+ if item_id not in processed_ids:
+ processed_ids.append(item_id)
+ save_processed_ids(filepath, processed_ids)
+
+def reset_state_file(app_type: str, state_type: str) -> bool:
+ """
+ Reset a specific state file for an app type.
+
+ Args:
+ app_type: The type of app (sonarr, radarr, etc.)
+ state_type: The type of state file (processed_missing, processed_upgrades)
+
+ Returns:
+ True if successful, False otherwise
+ """
+ if not app_type:
+ logger.error("reset_state_file called without app_type.")
+ return False
+
+ filepath = get_state_file_path(app_type, state_type)
+
+ try:
+ save_processed_ids(filepath, [])
+ logger.info(f"Reset {state_type} state file for {app_type}")
+ return True
+ except Exception as e:
+ logger.error(f"Error resetting {state_type} state file for {app_type}: {e}")
+ return False
+
+def truncate_processed_list(filepath: str, max_items: int = 1000) -> None:
+ """
+ Truncate a processed IDs list to a maximum number of items.
+ This helps prevent the file from growing too large over time.
+
+ Args:
+ filepath: The path to the file
+ max_items: The maximum number of items to keep
+ """
+ processed_ids = load_processed_ids(filepath)
+
+ if len(processed_ids) > max_items:
+ processed_ids = processed_ids[-max_items:]
+ save_processed_ids(filepath, processed_ids)
+ logger.debug(f"Truncated {filepath} to {max_items} items")
+
+def init_state_files() -> None:
+ """Initialize state files for all app types"""
+ app_types = settings_manager.KNOWN_APP_TYPES
+
+ for app_type in app_types:
+ missing_file = get_state_file_path(app_type, "processed_missing")
+ upgrades_file = get_state_file_path(app_type, "processed_upgrades")
+ reset_file = get_state_file_path(app_type, "last_reset")
+
+ for filepath in [missing_file, upgrades_file]:
+ if not os.path.exists(filepath):
+ save_processed_ids(filepath, [])
+
+ if not os.path.exists(reset_file):
+ set_last_reset_time(datetime.datetime.fromtimestamp(0), app_type)
+
+init_state_files()
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/stateful_manager.py b/Huntarr.io-6.3.6/src/primary/stateful_manager.py
new file mode 100644
index 0000000..30819f1
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/stateful_manager.py
@@ -0,0 +1,361 @@
+#!/usr/bin/env python3
+"""
+Stateful Manager for Huntarr
+Handles storing and retrieving processed media IDs to prevent reprocessing
+"""
+
+import os
+import json
+import time
+import pathlib
+import datetime
+import logging
+from typing import Dict, Any, List, Optional, Set
+
+# Create logger for stateful_manager
+stateful_logger = logging.getLogger("stateful_manager")
+
+# Constants
+STATEFUL_DIR = pathlib.Path(os.getenv("STATEFUL_DIR", "/config/stateful"))
+LOCK_FILE = STATEFUL_DIR / "lock.json"
+DEFAULT_HOURS = 168 # Default 7 days (168 hours)
+
+# Ensure the stateful directory exists
+try:
+ STATEFUL_DIR.mkdir(parents=True, exist_ok=True)
+ stateful_logger.info(f"Stateful directory created/confirmed at {STATEFUL_DIR}")
+except Exception as e:
+ stateful_logger.error(f"Error creating stateful directory: {e}")
+
+# Create app directories
+APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"]
+for app_type in APP_TYPES:
+ (STATEFUL_DIR / app_type).mkdir(exist_ok=True)
+
+# Add import for get_advanced_setting
+from src.primary.settings_manager import get_advanced_setting
+
+def initialize_lock_file() -> None:
+ """Initialize the lock file with the current timestamp if it doesn't exist."""
+ # Ensure directory exists - we don't need to log this again
+ try:
+ STATEFUL_DIR.mkdir(parents=True, exist_ok=True)
+ except Exception as e:
+ stateful_logger.error(f"Error creating stateful directory: {e}")
+
+ if not LOCK_FILE.exists():
+ try:
+ current_time = int(time.time())
+ # Get the expiration hours setting
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+
+ expires_at = current_time + (expiration_hours * 3600)
+
+ with open(LOCK_FILE, 'w') as f:
+ json.dump({
+ "created_at": current_time,
+ "expires_at": expires_at
+ }, f, indent=2)
+ stateful_logger.info(f"Initialized lock file at {LOCK_FILE} with expiration in {expiration_hours} hours")
+ except Exception as e:
+ stateful_logger.error(f"Error initializing lock file: {e}")
+
+def get_lock_info() -> Dict[str, Any]:
+ """Get the current lock information."""
+ initialize_lock_file()
+ try:
+ with open(LOCK_FILE, 'r') as f:
+ lock_info = json.load(f)
+
+ # Validate the structure and ensure required fields exist
+ if not isinstance(lock_info, dict):
+ raise ValueError("Lock info is not a dictionary")
+
+ if "created_at" not in lock_info:
+ lock_info["created_at"] = int(time.time())
+
+ if "expires_at" not in lock_info or lock_info["expires_at"] is None:
+ # Recalculate expiration if missing
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+ lock_info["expires_at"] = lock_info["created_at"] + (expiration_hours * 3600)
+
+ # Save the updated info
+ with open(LOCK_FILE, 'w') as f:
+ json.dump(lock_info, f, indent=2)
+
+ return lock_info
+ except Exception as e:
+ stateful_logger.error(f"Error reading lock file: {e}")
+ # Return default values if there's an error
+ current_time = int(time.time())
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+ expires_at = current_time + (expiration_hours * 3600)
+
+ return {
+ "created_at": current_time,
+ "expires_at": expires_at
+ }
+
+def update_lock_expiration(hours: int = None) -> bool:
+ """Update the lock expiration based on the hours setting."""
+ if hours is None:
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+ else:
+ expiration_hours = hours
+
+ lock_info = get_lock_info()
+ created_at = lock_info.get("created_at", int(time.time()))
+ expires_at = created_at + (expiration_hours * 3600)
+
+ lock_info["expires_at"] = expires_at
+
+ try:
+ with open(LOCK_FILE, 'w') as f:
+ json.dump(lock_info, f, indent=2)
+ stateful_logger.info(f"Updated lock expiration to {datetime.datetime.fromtimestamp(expires_at)}")
+ return True
+ except Exception as e:
+ stateful_logger.error(f"Error updating lock expiration: {e}")
+ return False
+
+def reset_stateful_management() -> bool:
+ """
+ Reset the stateful management system.
+
+ This involves:
+ 1. Creating a new lock file with the current timestamp and a calculated expiration time
+ based on the 'stateful_management_hours' setting.
+ 2. Deleting all stored processed ID files (*.json) within each app-specific
+ subdirectory under the STATEFUL_DIR.
+
+ Returns:
+ bool: True if the reset was successful, False otherwise.
+ """
+ try:
+ # Get the expiration hours setting BEFORE writing the lock file
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+
+ # Create new lock file with calculated expiration
+ current_time = int(time.time())
+ expires_at = current_time + (expiration_hours * 3600)
+
+ with open(LOCK_FILE, 'w') as f:
+ json.dump({
+ "created_at": current_time,
+ "expires_at": expires_at # Write the calculated expiration time directly
+ }, f, indent=2)
+
+ # Delete all stored IDs
+ for app_type in APP_TYPES:
+ app_dir = STATEFUL_DIR / app_type
+ if app_dir.exists():
+ for json_file in app_dir.glob("*.json"):
+ try:
+ json_file.unlink()
+ stateful_logger.debug(f"Deleted {json_file}")
+ except Exception as e:
+ stateful_logger.error(f"Error deleting {json_file}: {e}")
+
+ # No need to call update_lock_expiration() again as we wrote it directly
+ stateful_logger.info(f"Successfully reset stateful management. New expiration: {datetime.datetime.fromtimestamp(expires_at)}")
+ return True
+ except Exception as e:
+ stateful_logger.error(f"Error resetting stateful management: {e}")
+ return False
+
+def check_expiration() -> bool:
+ """
+ Check if the stateful management has expired.
+
+ Returns:
+ bool: True if expired, False otherwise
+ """
+ lock_info = get_lock_info()
+ expires_at = lock_info.get("expires_at")
+
+ # If expires_at is None, update it based on settings
+ if expires_at is None:
+ update_lock_expiration()
+ lock_info = get_lock_info()
+ expires_at = lock_info.get("expires_at")
+
+ current_time = int(time.time())
+
+ if current_time >= expires_at:
+ stateful_logger.info("Stateful management has expired, resetting...")
+ reset_stateful_management()
+ return True
+
+ return False
+
+def get_processed_ids(app_type: str, instance_name: str) -> Set[str]:
+ """
+ Get the set of processed media IDs for a specific app instance.
+
+ Args:
+ app_type: The type of app (sonarr, radarr, etc.)
+ instance_name: The name of the instance
+
+ Returns:
+ Set[str]: Set of processed media IDs
+ """
+ if app_type not in APP_TYPES:
+ stateful_logger.warning(f"Unknown app type: {app_type}")
+ return set()
+
+ # Create safe filename from instance name
+ safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name])
+
+ file_path = STATEFUL_DIR / app_type / f"{safe_instance_name}.json"
+ stateful_logger.debug(f"[get_processed_ids] Checking file: {file_path} for {app_type}/{instance_name}") # DEBUG LOG
+
+ if not file_path.exists():
+ stateful_logger.debug(f"[get_processed_ids] File not found: {file_path}") # DEBUG LOG
+ return set()
+
+ try:
+ with open(file_path, 'r') as f:
+ data = json.load(f)
+ processed_ids_set = set(data.get("processed_ids", [])) # Convert list to set
+ stateful_logger.debug(f"[get_processed_ids] Read {len(processed_ids_set)} IDs from {file_path}: {processed_ids_set}") # DEBUG LOG
+ return processed_ids_set
+ except Exception as e:
+ stateful_logger.error(f"Error reading processed IDs for {instance_name} from {file_path}: {e}") # Updated log
+ return set()
+
+def add_processed_id(app_type: str, instance_name: str, media_id: str) -> bool:
+ """
+ Add a media ID to the processed list for a specific app instance.
+
+ Args:
+ app_type: The type of app (sonarr, radarr, etc.)
+ instance_name: The name of the instance
+ media_id: The ID of the processed media
+
+ Returns:
+ bool: True if successful, False otherwise
+ """
+ if app_type not in APP_TYPES:
+ stateful_logger.warning(f"Unknown app type: {app_type}")
+ return False
+
+ # Create safe filename from instance name
+ safe_instance_name = "".join([c if c.isalnum() else "_" for c in instance_name])
+
+ file_path = STATEFUL_DIR / app_type / f"{safe_instance_name}.json"
+
+ # Get existing processed IDs using the get function (which includes logging)
+ current_processed_ids_set = get_processed_ids(app_type, instance_name)
+
+ # Convert set back to list for appending and saving
+ processed_ids_list = list(current_processed_ids_set)
+
+ # Add the new ID if it's not already there
+ if media_id not in current_processed_ids_set:
+ processed_ids_list.append(media_id)
+ stateful_logger.debug(f"[add_processed_id] Adding ID {media_id} to list for {app_type}/{instance_name}") # DEBUG LOG
+ else:
+ stateful_logger.debug(f"[add_processed_id] ID {media_id} already in list for {app_type}/{instance_name}") # DEBUG LOG
+ # No need to write if the ID is already present
+ return True
+
+ # Write the updated list back to the file
+ stateful_logger.debug(f"[add_processed_id] Writing {len(processed_ids_list)} IDs to {file_path}: {processed_ids_list}") # DEBUG LOG
+ try:
+ with open(file_path, 'w') as f:
+ json.dump({
+ "processed_ids": processed_ids_list,
+ "last_updated": int(time.time())
+ }, f, indent=2)
+ # Removed redundant log here, previous debug log is sufficient
+ return True
+ except Exception as e:
+ stateful_logger.error(f"Error adding media ID {media_id} to {file_path}: {e}")
+ return False
+
+def is_processed(app_type: str, instance_name: str, media_id: str) -> bool:
+ """
+ Check if a media ID has already been processed.
+
+ Args:
+ app_type: The type of app (sonarr, radarr, etc.)
+ instance_name: The name of the instance
+ media_id: The ID of the media to check
+
+ Returns:
+ bool: True if already processed, False otherwise
+ """
+ # Create safe filename for logging
+ safe_instance = "".join([c if c.isalnum() else "_" for c in instance_name])
+ file_path = STATEFUL_DIR / app_type / f"{safe_instance}.json"
+
+ # Get processed IDs for this app/instance
+ processed_ids = get_processed_ids(app_type, instance_name)
+
+ # Log what we're checking and the result
+ # Converting media_id to string since some callers might pass an integer
+ media_id_str = str(media_id)
+ is_in_set = media_id_str in processed_ids
+
+ stateful_logger.info(f"is_processed check: {app_type}/{instance_name}, ID:{media_id_str}, Found:{is_in_set}, File:{file_path}, Total IDs:{len(processed_ids)}")
+
+ return is_in_set
+
+def get_stateful_management_info() -> Dict[str, Any]:
+ """Get information about the stateful management system."""
+ lock_info = get_lock_info()
+ created_at_ts = lock_info.get("created_at")
+ expires_at_ts = lock_info.get("expires_at")
+
+ # Get the interval setting
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+
+ return {
+ "created_at_ts": created_at_ts,
+ "expires_at_ts": expires_at_ts,
+ "interval_hours": expiration_hours
+ }
+
+def initialize_stateful_system():
+ """Perform a complete initialization of the stateful management system."""
+ stateful_logger.info("Initializing stateful management system")
+
+ # Ensure all required directories exist
+ try:
+ STATEFUL_DIR.mkdir(parents=True, exist_ok=True)
+ for app_type in APP_TYPES:
+ (STATEFUL_DIR / app_type).mkdir(exist_ok=True)
+ stateful_logger.info(f"Stateful directory structure created at {STATEFUL_DIR}")
+ except Exception as e:
+ stateful_logger.error(f"Failed to create stateful directories: {e}")
+
+ # Initialize the lock file with proper expiration
+ try:
+ initialize_lock_file()
+ # Update expiration time
+ expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
+ update_lock_expiration(expiration_hours)
+ stateful_logger.info(f"Stateful lock file initialized with {expiration_hours} hour expiration")
+ except Exception as e:
+ stateful_logger.error(f"Failed to initialize lock file: {e}")
+
+ # Check for existing processed IDs
+ try:
+ total_ids = 0
+ for app_type in APP_TYPES:
+ app_dir = STATEFUL_DIR / app_type
+ if app_dir.exists():
+ files = list(app_dir.glob("*.json"))
+ total_ids += len(files)
+
+ if total_ids > 0:
+ stateful_logger.info(f"Found {total_ids} existing processed ID files")
+ else:
+ stateful_logger.info("No existing processed ID files found")
+ except Exception as e:
+ stateful_logger.error(f"Failed to check for existing processed IDs: {e}")
+
+ stateful_logger.info("Stateful management system initialization complete")
+
+# Initialize the stateful system on module import
+initialize_stateful_system()
diff --git a/Huntarr.io-6.3.6/src/primary/stateful_routes.py b/Huntarr.io-6.3.6/src/primary/stateful_routes.py
new file mode 100644
index 0000000..a652131
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/stateful_routes.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+"""
+Stateful Management API Routes
+Handles API endpoints for stateful management
+"""
+
+from flask import Blueprint, jsonify, request, Response
+import json
+from src.primary.stateful_manager import (
+ get_stateful_management_info,
+ reset_stateful_management,
+ update_lock_expiration
+)
+from src.primary.utils.logger import get_logger
+
+# Create logger
+stateful_logger = get_logger("stateful")
+
+# Create blueprint
+stateful_api = Blueprint('stateful_api', __name__)
+
+@stateful_api.route('/info', methods=['GET'])
+def get_info():
+ """Get stateful management information."""
+ try:
+ info = get_stateful_management_info()
+ # Add CORS headers to allow access from frontend
+ response_data = {
+ "success": True,
+ "created_at_ts": info.get("created_at_ts"),
+ "expires_at_ts": info.get("expires_at_ts"),
+ "interval_hours": info.get("interval_hours")
+ }
+ response = Response(json.dumps(response_data))
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ stateful_logger.error(f"Error getting stateful info: {e}")
+ # Return error response with proper headers
+ error_data = {"success": False, "message": f"Error getting stateful info: {str(e)}"}
+ response = Response(json.dumps(error_data), status=500)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+@stateful_api.route('/reset', methods=['POST'])
+def reset_stateful():
+ """Reset the stateful management system."""
+ try:
+ success = reset_stateful_management()
+ if success:
+ # Add CORS headers to allow access from frontend
+ response = Response(json.dumps({"success": True, "message": "Stateful management reset successfully"}))
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ else:
+ # Add CORS headers to allow access from frontend
+ response = Response(json.dumps({"success": False, "message": "Failed to reset stateful management"}), status=500)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ stateful_logger.error(f"Error resetting stateful management: {e}")
+ # Return error response with proper headers
+ error_data = {"error": str(e)}
+ response = Response(json.dumps(error_data), status=500)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+@stateful_api.route('/update-expiration', methods=['POST'])
+def update_expiration():
+ """Update the stateful management expiration time."""
+ try:
+ hours = request.json.get('hours')
+ if hours is None or not isinstance(hours, int) or hours <= 0:
+ stateful_logger.error(f"Invalid hours value for update-expiration: {hours}")
+ # Return error response with proper headers
+ error_data = {"success": False, "message": f"Invalid hours value: {hours}. Must be a positive integer."}
+ response = Response(json.dumps(error_data), status=400)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
+ updated = update_lock_expiration(hours)
+ if updated:
+ # Get updated info
+ info = get_stateful_management_info()
+ # Add CORS headers to allow access from frontend
+ response_data = {
+ "success": True,
+ "message": f"Expiration updated to {hours} hours",
+ "expires_at": info.get("expires_at"),
+ "expires_date": info.get("expires_date")
+ }
+ response = Response(json.dumps(response_data))
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ else:
+ # Add CORS headers to allow access from frontend
+ response = Response(json.dumps({"success": False, "message": "Failed to update expiration"}), status=500)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+ except Exception as e:
+ stateful_logger.error(f"Error updating expiration: {e}", exc_info=True)
+ # Return error response with proper headers
+ error_data = {"success": False, "message": f"Error updating expiration: {str(e)}"}
+ response = Response(json.dumps(error_data), status=500)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
diff --git a/Huntarr.io-6.3.6/src/primary/stats_manager.py b/Huntarr.io-6.3.6/src/primary/stats_manager.py
new file mode 100644
index 0000000..51aff12
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/stats_manager.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+"""
+Statistics Manager for Huntarr
+Handles tracking, storing, and retrieving statistics about hunted and upgraded media
+"""
+
+import os
+import json
+import time
+import threading
+from typing import Dict, Any, Optional
+from src.primary.utils.logger import get_logger
+
+logger = get_logger("stats")
+
+# Path constants - Define multiple possible locations and check them in order
+STATS_DIRS = [
+ "/config/tally", # Docker default
+ os.path.join(os.path.expanduser("~"), ".huntarr/tally"), # User's home directory
+ os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "data/tally") # Relative to script
+]
+
+# Lock for thread-safe operations
+stats_lock = threading.Lock()
+
+def find_writable_stats_dir():
+ """Find a writable directory for stats from the list of candidates"""
+ for dir_path in STATS_DIRS:
+ try:
+ os.makedirs(dir_path, exist_ok=True)
+ test_file = os.path.join(dir_path, "write_test")
+ with open(test_file, 'w') as f:
+ f.write("test")
+ os.remove(test_file)
+ logger.info(f"Using stats directory: {dir_path}")
+ return dir_path
+ except (IOError, OSError) as e:
+ logger.warning(f"Directory {dir_path} is not writable: {e}")
+ continue
+
+ # Fallback to current directory
+ fallback_dir = os.path.join(os.getcwd(), "tally")
+ try:
+ os.makedirs(fallback_dir, exist_ok=True)
+ logger.info(f"Falling back to current directory for stats: {fallback_dir}")
+ return fallback_dir
+ except Exception as e:
+ logger.error(f"Failed to create fallback stats directory: {e}")
+ return None
+
+# Find the best stats directory
+STATS_DIR = find_writable_stats_dir()
+STATS_FILE = os.path.join(STATS_DIR, "media_stats.json") if STATS_DIR else None
+
+# Log the stats file location once at module load time
+if STATS_FILE:
+ logger.info(f"===> Stats will be stored at: {STATS_FILE}")
+else:
+ logger.error("===> CRITICAL: No stats file location could be determined!")
+
+def ensure_stats_dir():
+ """Ensure the statistics directory exists"""
+ if not STATS_DIR:
+ logger.error("No writable stats directory found")
+ return False
+
+ try:
+ os.makedirs(STATS_DIR, exist_ok=True)
+ logger.debug(f"Stats directory ensured: {STATS_DIR}")
+ return True
+ except Exception as e:
+ logger.error(f"Failed to create stats directory: {e}")
+ return False
+
+def load_stats() -> Dict[str, Dict[str, int]]:
+ """
+ Load statistics from the stats file
+
+ Returns:
+ Dictionary containing statistics for each app
+ """
+ if not ensure_stats_dir() or not STATS_FILE:
+ logger.error("Cannot load stats - no valid stats directory available")
+ return get_default_stats()
+
+ default_stats = get_default_stats()
+
+ try:
+ if os.path.exists(STATS_FILE):
+ logger.debug(f"Loading stats from: {STATS_FILE}")
+ with open(STATS_FILE, 'r') as f:
+ stats = json.load(f)
+
+ # Ensure all apps are in the stats
+ for app in default_stats:
+ if app not in stats:
+ stats[app] = default_stats[app]
+
+ logger.debug(f"Loaded stats: {stats}")
+ return stats
+ else:
+ logger.info(f"Stats file not found at {STATS_FILE}, using default stats")
+ return default_stats
+ except Exception as e:
+ logger.error(f"Error loading stats from {STATS_FILE}: {e}")
+ return default_stats
+
+def get_default_stats() -> Dict[str, Dict[str, int]]:
+ """Get the default stats structure"""
+ return {
+ "sonarr": {"hunted": 0, "upgraded": 0},
+ "radarr": {"hunted": 0, "upgraded": 0},
+ "lidarr": {"hunted": 0, "upgraded": 0},
+ "readarr": {"hunted": 0, "upgraded": 0},
+ "whisparr": {"hunted": 0, "upgraded": 0},
+ "eros": {"hunted": 0, "upgraded": 0},
+ "swaparr": {"hunted": 0, "upgraded": 0}
+ }
+
+def save_stats(stats: Dict[str, Dict[str, int]]) -> bool:
+ """
+ Save statistics to the stats file
+
+ Args:
+ stats: Dictionary containing statistics for each app
+
+ Returns:
+ True if successful, False otherwise
+ """
+ if not ensure_stats_dir() or not STATS_FILE:
+ logger.error("Cannot save stats - no valid stats directory available")
+ return False
+
+ try:
+ logger.debug(f"Saving stats to: {STATS_FILE}")
+ # First write to a temp file, then move it to avoid partial writes
+ temp_file = f"{STATS_FILE}.tmp"
+ with open(temp_file, 'w') as f:
+ json.dump(stats, f, indent=2)
+ f.flush()
+ os.fsync(f.fileno())
+
+ # Move the temp file to the actual file
+ os.replace(temp_file, STATS_FILE)
+
+ logger.info(f"===> Successfully wrote stats to file: {STATS_FILE}")
+ logger.debug(f"Stats saved successfully: {stats}")
+ return True
+ except Exception as e:
+ logger.error(f"Error saving stats to {STATS_FILE}: {e}", exc_info=True)
+ return False
+
+def increment_stat(app_type: str, stat_type: str, count: int = 1) -> bool:
+ """
+ Increment a specific statistic
+
+ Args:
+ app_type: The application type (sonarr, radarr, etc.)
+ stat_type: The type of statistic (hunted or upgraded)
+ count: The amount to increment by (default: 1)
+
+ Returns:
+ True if successful, False otherwise
+ """
+ if app_type not in ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr"]:
+ logger.error(f"Invalid app_type: {app_type}")
+ return False
+
+ if stat_type not in ["hunted", "upgraded"]:
+ logger.error(f"Invalid stat_type: {stat_type}")
+ return False
+
+ with stats_lock:
+ stats = load_stats()
+ prev_value = stats[app_type][stat_type]
+ stats[app_type][stat_type] += count
+ new_value = stats[app_type][stat_type]
+ logger.info(f"*** STATS INCREMENT *** {app_type} {stat_type} by {count}: {prev_value} -> {new_value}")
+ save_success = save_stats(stats)
+
+ if not save_success:
+ logger.error(f"Failed to save stats after incrementing {app_type} {stat_type}")
+ return False
+
+ # Add debug verification that stats were actually saved
+ verification_stats = load_stats()
+ if verification_stats[app_type][stat_type] != new_value:
+ logger.error(f"Stats verification failed! Expected {new_value} but got {verification_stats[app_type][stat_type]} for {app_type} {stat_type}")
+ return False
+
+ logger.info(f"Successfully incremented and verified {app_type} {stat_type}")
+ return True
+
+def get_stats() -> Dict[str, Dict[str, int]]:
+ """
+ Get the current statistics
+
+ Returns:
+ Dictionary containing statistics for each app
+ """
+ with stats_lock:
+ stats = load_stats()
+ logger.debug(f"Retrieved stats: {stats}")
+ return stats
+
+def reset_stats(app_type: Optional[str] = None) -> bool:
+ """
+ Reset statistics for a specific app or all apps
+
+ Args:
+ app_type: The application type to reset, or None to reset all
+
+ Returns:
+ True if successful, False otherwise
+ """
+ with stats_lock:
+ stats = load_stats()
+
+ if app_type is None:
+ # Reset all stats
+ logger.info("Resetting all app statistics")
+ for app in stats:
+ stats[app]["hunted"] = 0
+ stats[app]["upgraded"] = 0
+ elif app_type in stats:
+ # Reset specific app stats
+ logger.info(f"Resetting statistics for {app_type}")
+ stats[app_type]["hunted"] = 0
+ stats[app_type]["upgraded"] = 0
+ else:
+ logger.error(f"Invalid app_type for reset: {app_type}")
+ return False
+
+ return save_stats(stats)
+
+# Initialize stats file with find_writable_stats_dir already called during import
+if STATS_DIR and not os.path.exists(STATS_FILE):
+ logger.info(f"Creating new stats file at: {STATS_FILE}")
+ save_stats(get_default_stats())
+else:
+ logger.debug(f"Stats system initialized. Using file: {STATS_FILE}")
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/utils/__init__.py b/Huntarr.io-6.3.6/src/primary/utils/__init__.py
new file mode 100644
index 0000000..1ec4cca
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/__init__.py
@@ -0,0 +1,7 @@
+"""
+Utility functions for Huntarr
+"""
+
+from src.primary.utils.logger import logger, debug_log
+
+__all__ = ['logger', 'debug_log']
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/utils/app_utils.py b/Huntarr.io-6.3.6/src/primary/utils/app_utils.py
new file mode 100644
index 0000000..3e29f6f
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/app_utils.py
@@ -0,0 +1,24 @@
+import socket
+from urllib.parse import urlparse
+from src.primary.config import API_URL
+
+def get_ip_address():
+ try:
+ parsed_url = urlparse(API_URL)
+ hostname = parsed_url.netloc
+ if ':' in hostname:
+ hostname = hostname.split(':')[0]
+ return hostname
+ except Exception:
+ try:
+ hostname = socket.gethostname()
+ ip = socket.gethostbyname(hostname)
+ return ip
+ except:
+ return "localhost"
+
+def write_log(log_file, message):
+ from datetime import datetime
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ with open(log_file, 'a') as f:
+ f.write(f"{timestamp} - {message}\n")
diff --git a/Huntarr.io-6.3.6/src/primary/utils/history_utils.py b/Huntarr.io-6.3.6/src/primary/utils/history_utils.py
new file mode 100644
index 0000000..c0bc4be
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/history_utils.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+
+from src.primary.history_manager import add_history_entry
+from src.primary.utils.logger import get_logger
+
+logger = get_logger("history")
+
+def log_processed_media(app_type, media_name, media_id, instance_name, operation_type="missing"):
+ """
+ Log when media is processed by an app instance
+
+ Parameters:
+ - app_type: str - The app type (sonarr, radarr, etc)
+ - media_name: str - Name of the processed media
+ - media_id: str/int - ID of the processed media
+ - instance_name: str - Name of the instance that processed it
+ - operation_type: str - Type of operation ("missing" or "upgrade")
+
+ Returns:
+ - bool - Success or failure
+ """
+ try:
+ logger.debug(f"Logging history entry for {app_type} - {instance_name}: '{media_name}' (ID: {media_id})")
+
+ entry_data = {
+ "name": media_name,
+ "id": str(media_id),
+ "instance_name": instance_name,
+ "operation_type": operation_type
+ }
+
+ result = add_history_entry(app_type, entry_data)
+ if result:
+ logger.info(f"Logged history entry for {app_type} - {instance_name}: {media_name} ({operation_type})")
+ return True
+ else:
+ logger.error(f"Failed to log history entry for {app_type} - {instance_name}: {media_name}")
+ return False
+ except Exception as e:
+ logger.error(f"Error logging history entry: {str(e)}")
+ return False
diff --git a/Huntarr.io-6.3.6/src/primary/utils/log_handler.py b/Huntarr.io-6.3.6/src/primary/utils/log_handler.py
new file mode 100644
index 0000000..b6da47b
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/log_handler.py
@@ -0,0 +1,37 @@
+import re
+import logging
+
+class WebUrlFilter(logging.Filter):
+ """Filter out web URLs from log messages"""
+
+ def filter(self, record):
+ if not hasattr(record, 'msg'):
+ return True
+
+ if isinstance(record.msg, str):
+ # Filter out web interface messages
+ if "Web interface available at http://" in record.msg:
+ return False
+
+ # Redact URLs if they need to appear in logs
+ record.msg = re.sub(
+ r'(http|https)://[^\s<>"]+',
+ '[REDACTED URL]',
+ record.msg
+ )
+
+ return True
+
+# Add this filter to the existing loggers
+def apply_log_filters():
+ """Apply web URL filters to all loggers"""
+ web_filter = WebUrlFilter()
+
+ # Apply to root logger
+ for handler in logging.root.handlers:
+ handler.addFilter(web_filter)
+
+ # Apply to huntarr logger
+ huntarr_logger = logging.getLogger('huntarr')
+ for handler in huntarr_logger.handlers:
+ handler.addFilter(web_filter)
diff --git a/Huntarr.io-6.3.6/src/primary/utils/logger.py b/Huntarr.io-6.3.6/src/primary/utils/logger.py
new file mode 100644
index 0000000..e680a6e
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/logger.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python3
+"""
+Logging configuration for Huntarr
+Supports separate log files for each application type
+"""
+
+import logging
+import sys
+import os
+import pathlib
+from typing import Dict, Optional
+
+# Create log directory
+LOG_DIR = pathlib.Path("/config/logs") # Changed path
+LOG_DIR.mkdir(parents=True, exist_ok=True)
+
+# Default log file for general messages
+MAIN_LOG_FILE = LOG_DIR / "huntarr.log"
+
+# App-specific log files
+APP_LOG_FILES = {
+ "sonarr": LOG_DIR / "sonarr.log", # Updated filename
+ "radarr": LOG_DIR / "radarr.log", # Updated filename
+ "lidarr": LOG_DIR / "lidarr.log", # Updated filename
+ "readarr": LOG_DIR / "readarr.log", # Updated filename
+ "whisparr": LOG_DIR / "whisparr.log", # Added Whisparr
+ "eros": LOG_DIR / "eros.log", # Added Eros for Whisparr V3
+ "swaparr": LOG_DIR / "swaparr.log" # Added Swaparr
+}
+
+# Global logger instances
+logger: Optional[logging.Logger] = None
+app_loggers: Dict[str, logging.Logger] = {}
+
+def setup_main_logger(debug_mode=None):
+ """Set up the main Huntarr logger."""
+ global logger
+ log_name = "huntarr"
+ log_file = MAIN_LOG_FILE
+
+ # Determine debug mode safely
+ use_debug_mode = False
+ if debug_mode is None:
+ try:
+ # Use the get_debug_mode function to check general settings
+ from src.primary.config import get_debug_mode
+ use_debug_mode = get_debug_mode()
+ except (ImportError, AttributeError):
+ pass # Default to False
+ else:
+ use_debug_mode = debug_mode
+
+ # Get or create the main logger instance
+ current_logger = logging.getLogger(log_name)
+
+ # Reset handlers each time setup is called to avoid duplicates
+ # This is important if setup might be called again (e.g., config reload)
+ for handler in current_logger.handlers[:]:
+ current_logger.removeHandler(handler)
+
+ current_logger.propagate = False # Prevent propagation to root logger
+ current_logger.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
+
+ # Create console handler
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
+
+ # Create file handler
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(logging.DEBUG if use_debug_mode else logging.INFO)
+
+ # Set format for the main logger
+ log_format = "%(asctime)s - huntarr - %(levelname)s - %(message)s"
+ formatter = logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S")
+ console_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+
+ # Add handlers to the main logger
+ current_logger.addHandler(console_handler)
+ current_logger.addHandler(file_handler)
+
+ if use_debug_mode:
+ current_logger.debug("Debug logging enabled for main logger")
+
+ logger = current_logger # Assign to the global variable
+ return current_logger
+
+def get_logger(app_type: str) -> logging.Logger:
+ """
+ Get or create a logger for a specific app type.
+
+ Args:
+ app_type: The app type (e.g., 'sonarr', 'radarr').
+
+ Returns:
+ A logger specific to the app type, or the main logger if app_type is invalid.
+ """
+ if app_type not in APP_LOG_FILES:
+ # Fallback to main logger if the app type is not recognized
+ global logger
+ if logger is None:
+ # Ensure main logger is initialized if accessed before module-level setup
+ setup_main_logger()
+ # We checked logger is not None, so we can assert its type
+ assert logger is not None
+ return logger
+
+ log_name = f"huntarr.{app_type}"
+ if log_name in app_loggers:
+ # Return cached logger instance
+ return app_loggers[log_name]
+
+ # If not cached, set up a new logger for this app type
+ app_logger = logging.getLogger(log_name)
+
+ # Prevent propagation to the main 'huntarr' logger or root logger
+ app_logger.propagate = False
+
+ # Determine debug mode setting safely
+ try:
+ from src.primary.config import get_debug_mode
+ debug_mode = get_debug_mode()
+ except ImportError:
+ debug_mode = False
+
+ app_logger.setLevel(logging.DEBUG if debug_mode else logging.INFO)
+
+ # Reset handlers in case this logger existed before but wasn't cached
+ # (e.g., across restarts without clearing logging._handlers)
+ for handler in app_logger.handlers[:]:
+ app_logger.removeHandler(handler)
+
+ # Create console handler
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(logging.DEBUG if debug_mode else logging.INFO)
+
+ # Create file handler for the specific app log file
+ log_file = APP_LOG_FILES[app_type]
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(logging.DEBUG if debug_mode else logging.INFO)
+
+ # Set a distinct format for this app log
+ log_format = f"%(asctime)s - huntarr.{app_type} - %(levelname)s - %(message)s"
+ formatter = logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S")
+
+ console_handler.setFormatter(formatter)
+ file_handler.setFormatter(formatter)
+
+ # Add the handlers specific to this app logger
+ app_logger.addHandler(console_handler)
+ app_logger.addHandler(file_handler)
+
+ # Cache the configured logger
+ app_loggers[log_name] = app_logger
+
+ if debug_mode:
+ app_logger.debug(f"Debug logging enabled for {app_type} logger")
+
+ return app_logger
+
+def update_logging_levels(debug_mode=None):
+ """
+ Update all logger levels based on the current debug mode setting.
+ Call this after settings are changed in the UI to apply changes immediately.
+
+ Args:
+ debug_mode: Force a specific debug mode, or None to read from settings
+ """
+ # Determine debug mode from settings if not specified
+ if debug_mode is None:
+ try:
+ from src.primary.config import get_debug_mode
+ debug_mode = get_debug_mode()
+ except (ImportError, AttributeError):
+ debug_mode = False
+
+ # Set level for main logger
+ level = logging.DEBUG if debug_mode else logging.INFO
+ if logger:
+ logger.setLevel(level)
+ for handler in logger.handlers:
+ handler.setLevel(level)
+
+ # Set level for all app loggers
+ for app_type, app_logger in app_loggers.items():
+ app_logger.setLevel(level)
+ for handler in app_logger.handlers:
+ handler.setLevel(level)
+
+ # Set root logger level too
+ root_logger = logging.getLogger()
+ root_logger.setLevel(level)
+ for handler in root_logger.handlers:
+ handler.setLevel(level)
+
+ # Force Python's logging module to respect the log level for all existing loggers
+ for name, logger_instance in logging.Logger.manager.loggerDict.items():
+ if isinstance(logger_instance, logging.Logger):
+ logger_instance.setLevel(level)
+
+ return debug_mode
+
+def debug_log(message: str, data: object = None, app_type: Optional[str] = None) -> None:
+ """
+ Log debug messages with optional data.
+
+ Args:
+ message: The message to log.
+ data: Optional data to include with the message.
+ app_type: Optional app type to log to a specific app's log file.
+ """
+ current_logger = get_logger(app_type) if app_type else logger
+
+ if current_logger.level <= logging.DEBUG:
+ current_logger.debug(f"{message}")
+ if data is not None:
+ try:
+ import json
+ as_json = json.dumps(data)
+ if len(as_json) > 500:
+ as_json = as_json[:500] + "..."
+ current_logger.debug(as_json)
+ except:
+ data_str = str(data)
+ if len(data_str) > 500:
+ data_str = data_str[:500] + "..."
+ current_logger.debug(data_str)
+
+# Initialize the main logger instance when the module is imported
+logger = setup_main_logger()
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/utils/logging_config.py b/Huntarr.io-6.3.6/src/primary/utils/logging_config.py
new file mode 100644
index 0000000..e6b388c
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/logging_config.py
@@ -0,0 +1,33 @@
+import logging
+
+class SensitiveInfoFilter(logging.Filter):
+ """Filter out sensitive information from logs"""
+ def filter(self, record):
+ message = record.getMessage()
+ # Filter out web interface URLs
+ if "Web interface available at http://" in message:
+ return False
+ # Add more filters as needed
+ return True
+
+def configure_logging(level=logging.INFO):
+ """Configure logging with filters for sensitive information"""
+ # Basic config
+ logging.basicConfig(
+ level=level,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S'
+ )
+
+ # Add the filter to all handlers
+ for handler in logging.root.handlers:
+ handler.addFilter(SensitiveInfoFilter())
+
+ # Individual loggers can also be configured here
+ logger = logging.getLogger('huntarr')
+ logger.setLevel(level)
+
+ for handler in logger.handlers:
+ handler.addFilter(SensitiveInfoFilter())
+
+ return logger
diff --git a/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py b/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py
new file mode 100644
index 0000000..5afb3fd
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/utils/migrate_settings.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+"""
+Settings migration utility for Huntarr
+Migrates settings from nested structure to flat structure
+"""
+
+import os
+import json
+import pathlib
+import logging
+
+# Create logger
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger("settings_migration")
+
+# Settings file path
+SETTINGS_DIR = pathlib.Path("/config")
+SETTINGS_FILE = SETTINGS_DIR / "huntarr.json"
+
+def migrate_settings():
+ """Migrate settings from nested to flat structure"""
+ logger.info("Starting settings migration...")
+
+ if not SETTINGS_FILE.exists():
+ logger.info(f"Settings file {SETTINGS_FILE} does not exist, nothing to migrate.")
+ return
+
+ try:
+ # Read current settings
+ with open(SETTINGS_FILE, "r", encoding="utf-8") as file:
+ settings = json.load(file)
+
+ # Flag to track if changes were made
+ changes_made = False
+
+ # Check and migrate each app's settings
+ for app in ["sonarr", "radarr", "lidarr", "readarr"]:
+ if app in settings and "huntarr" in settings[app]:
+ logger.info(f"Found nested huntarr section in {app}, migrating...")
+
+ # Move all settings from app.huntarr to app level
+ for key, value in settings[app]["huntarr"].items():
+ if key not in settings[app]:
+ settings[app][key] = value
+ logger.info(f"Moved {app}.huntarr.{key} to {app}.{key}")
+
+ # Remove the huntarr section
+ del settings[app]["huntarr"]
+ logger.info(f"Removed {app}.huntarr section")
+ changes_made = True
+
+ # Check for advanced section
+ if app in settings and "advanced" in settings[app]:
+ logger.info(f"Found advanced section in {app}, migrating...")
+
+ # Move all settings from app.advanced to app level
+ for key, value in settings[app]["advanced"].items():
+ if key not in settings[app]:
+ settings[app][key] = value
+ logger.info(f"Moved {app}.advanced.{key} to {app}.{key}")
+
+ # Remove the advanced section
+ del settings[app]["advanced"]
+ logger.info(f"Removed {app}.advanced section")
+ changes_made = True
+
+ # Save changes if needed
+ if changes_made:
+ with open(SETTINGS_FILE, "w", encoding="utf-8") as file:
+ json.dump(settings, file, indent=2)
+ logger.info("Settings migration completed successfully.")
+ else:
+ logger.info("No changes needed, settings are already in the correct format.")
+
+ except Exception as e:
+ logger.error(f"Error migrating settings: {e}")
+
+if __name__ == "__main__":
+ migrate_settings()
diff --git a/Huntarr.io-6.3.6/src/primary/web_server.py b/Huntarr.io-6.3.6/src/primary/web_server.py
new file mode 100644
index 0000000..5181ccf
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/web_server.py
@@ -0,0 +1,887 @@
+#!/usr/bin/env python3
+"""
+Web server for Huntarr
+Provides a web interface to view logs in real-time, manage settings, and includes authentication
+"""
+
+import os
+import datetime
+import time
+from threading import Lock
+from primary.utils.logger import LOG_DIR, APP_LOG_FILES, MAIN_LOG_FILE # Import log constants
+from primary import settings_manager # Import settings_manager
+from src.primary.stateful_manager import update_lock_expiration # Import stateful update function
+
+# import socket # No longer used
+import json
+# import signal # No longer used for reload
+import sys
+import qrcode
+import pyotp
+import base64
+import io
+# import requests # No longer used
+import logging
+import threading
+import importlib # Added import
+from flask import Flask, render_template, request, jsonify, Response, send_from_directory, redirect, url_for, session, stream_with_context # Added stream_with_context
+# from src.primary.config import API_URL # No longer needed directly
+# Use only settings_manager
+from src.primary import settings_manager
+from src.primary.utils.logger import setup_main_logger, get_logger, LOG_DIR, update_logging_levels # Import get_logger, LOG_DIR, and update_logging_levels
+from src.primary.auth import (
+ authenticate_request, user_exists, create_user, verify_user, create_session,
+ logout, SESSION_COOKIE_NAME, is_2fa_enabled, generate_2fa_secret,
+ verify_2fa_code, disable_2fa, change_username, change_password
+)
+# Import blueprint for common routes
+from src.primary.routes.common import common_bp
+
+# Import blueprints for each app from the centralized blueprints module
+from src.primary.apps.blueprints import sonarr_bp, radarr_bp, lidarr_bp, readarr_bp, whisparr_bp, swaparr_bp, eros_bp
+
+# Import stateful blueprint
+from src.primary.stateful_routes import stateful_api
+
+# Import history blueprint
+from src.primary.routes.history_routes import history_blueprint
+
+# Import background module to trigger manual cycle resets
+from src.primary import background
+
+# Disable Flask default logging
+log = logging.getLogger('werkzeug')
+log.setLevel(logging.DEBUG) # Change to DEBUG to see all Flask/Werkzeug logs
+
+# Configure template and static paths with proper PyInstaller support
+# Check if we're running from a PyInstaller bundle
+print("==== HUNTARR TEMPLATE DEBUG ====")
+print(f"__file__: {__file__}")
+print(f"sys.executable: {sys.executable}")
+print(f"os.getcwd(): {os.getcwd()}")
+print(f"sys.path: {sys.path}")
+print(f"Is frozen: {getattr(sys, 'frozen', False)}")
+
+if getattr(sys, 'frozen', False):
+ # We're running from the bundled package
+ bundle_dir = os.path.dirname(sys.executable)
+ # Override the template and static directories
+ template_dir = os.path.join(bundle_dir, 'templates')
+ static_dir = os.path.join(bundle_dir, 'static')
+ print(f"PyInstaller mode - Using templates dir: {template_dir}")
+ print(f"PyInstaller mode - Using static dir: {static_dir}")
+ print(f"Template dir exists: {os.path.exists(template_dir)}")
+ if os.path.exists(template_dir):
+ print(f"Template dir contents: {os.listdir(template_dir)}")
+else:
+ # Normal development mode - use relative paths
+ template_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'frontend', 'templates'))
+ static_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'frontend', 'static'))
+ print(f"Normal mode - Using templates dir: {template_dir}")
+ print(f"Normal mode - Using static dir: {static_dir}")
+ print(f"Template dir exists: {os.path.exists(template_dir)}")
+ if os.path.exists(template_dir):
+ print(f"Template dir contents: {os.listdir(template_dir)}")
+
+# Create Flask app with additional debug logging
+app = Flask(__name__, template_folder=template_dir, static_folder=static_dir)
+print(f"Flask app created with template_folder: {app.template_folder}")
+print(f"Flask app created with static_folder: {app.static_folder}")
+
+# Add debug logging for template rendering
+def debug_template_rendering():
+ """Additional logging for Flask template rendering"""
+ app.jinja_env.auto_reload = True
+ orig_get_source = app.jinja_env.loader.get_source
+
+ def get_source_wrapper(environment, template):
+ try:
+ result = orig_get_source(environment, template)
+ print(f"Template loaded successfully: {template}")
+ return result
+ except Exception as e:
+ print(f"Error loading template {template}: {e}")
+ print(f"Loader search paths: {environment.loader.searchpath}")
+ # Print all available templates
+ try:
+ all_templates = environment.loader.list_templates()
+ print(f"Available templates: {all_templates}")
+ except:
+ print("Could not list available templates")
+ raise
+
+ app.jinja_env.loader.get_source = get_source_wrapper
+
+debug_template_rendering()
+
+app.secret_key = os.environ.get('SECRET_KEY', 'dev_key_for_sessions')
+
+# Register blueprints
+app.register_blueprint(common_bp)
+app.register_blueprint(sonarr_bp, url_prefix='/api/sonarr')
+app.register_blueprint(radarr_bp, url_prefix='/api/radarr')
+app.register_blueprint(lidarr_bp, url_prefix='/api/lidarr')
+app.register_blueprint(readarr_bp, url_prefix='/api/readarr')
+app.register_blueprint(whisparr_bp, url_prefix='/api/whisparr')
+app.register_blueprint(eros_bp, url_prefix='/api/eros')
+app.register_blueprint(swaparr_bp, url_prefix='/api/swaparr')
+app.register_blueprint(stateful_api, url_prefix='/api/stateful')
+app.register_blueprint(history_blueprint, url_prefix='/api/history')
+
+# Register the authentication check to run before requests
+app.before_request(authenticate_request)
+
+# Removed MAIN_PID and signal-related code
+
+# Lock for accessing the log files
+log_lock = Lock()
+
+# Define known log files based on logger config
+KNOWN_LOG_FILES = {
+ "sonarr": APP_LOG_FILES.get("sonarr"),
+ "radarr": APP_LOG_FILES.get("radarr"),
+ "lidarr": APP_LOG_FILES.get("lidarr"),
+ "readarr": APP_LOG_FILES.get("readarr"),
+ "whisparr": APP_LOG_FILES.get("whisparr"),
+ "eros": APP_LOG_FILES.get("eros"), # Added Eros to known log files
+ "swaparr": APP_LOG_FILES.get("swaparr"), # Added Swaparr to known log files
+ "system": MAIN_LOG_FILE, # Map 'system' to the main huntarr log
+}
+# Filter out None values if an app log file doesn't exist
+KNOWN_LOG_FILES = {k: v for k, v in KNOWN_LOG_FILES.items() if v}
+
+ALL_APP_LOG_FILES = list(KNOWN_LOG_FILES.values()) # List of all individual log file paths
+
+@app.route('/')
+def home():
+ return render_template('index.html')
+
+@app.route('/user')
+def user():
+ # User account screen
+ return render_template('user.html')
+
+# Removed /settings and /logs routes if handled by index.html and JS routing
+# Keep /logs if it's the actual SSE endpoint
+
+@app.route('/logs')
+def logs_stream():
+ """
+ Event stream for logs.
+ Filter logs by app type using the 'app' query parameter.
+ Supports 'all', 'system', 'sonarr', 'radarr', 'lidarr', 'readarr'.
+ Example: /logs?app=sonarr
+ """
+ app_type = request.args.get('app', 'all') # Default to 'all' if no app specified
+ web_logger = get_logger("web_server")
+
+ valid_app_types = list(KNOWN_LOG_FILES.keys()) + ['all']
+ if app_type not in valid_app_types:
+ web_logger.warning(f"Invalid app type '{app_type}' requested for logs. Defaulting to 'all'.")
+ app_type = 'all'
+
+ # Import needed modules
+ import time
+ from pathlib import Path
+ import threading
+ import datetime # Added datetime import
+ import time # Add time module import
+
+ # Use a client identifier to track connections
+ # Use request.remote_addr directly for client_id
+ client_id = request.remote_addr
+ current_time_str = datetime.datetime.now().strftime("%H:%M:%S") # Renamed variable
+
+ web_logger.info(f"Starting log stream for app type: {app_type} (client: {client_id}, time: {current_time_str})")
+
+ # Track active connections to limit resource usage
+ if not hasattr(app, 'active_log_streams'):
+ app.active_log_streams = {}
+ app.log_stream_lock = threading.Lock()
+
+ # Clean up stale connections (older than 60 seconds without activity)
+ with app.log_stream_lock:
+ current_time = time.time()
+ stale_clients = [c for c, t in app.active_log_streams.items()
+ if current_time - t > 60]
+ for client in stale_clients:
+ # Check if client exists before popping, avoid KeyError
+ if client in app.active_log_streams:
+ app.active_log_streams.pop(client)
+ web_logger.debug(f"Removed stale log stream connection for client: {client}")
+
+ # If too many connections, return an error for new ones
+ # Increased limit slightly and check before adding the new client
+ MAX_LOG_CONNECTIONS = 10 # Define as constant
+ if len(app.active_log_streams) >= MAX_LOG_CONNECTIONS:
+ web_logger.warning(f"Too many log stream connections ({len(app.active_log_streams)}). Rejecting new connection from {client_id}")
+ # Send SSE formatted error message
+ return Response("event: error\ndata: Too many active connections. Please try again later.\n\n",
+ mimetype='text/event-stream', status=429) # Use 429 status code
+
+ # Add/Update this client's timestamp *after* checking the limit
+ app.active_log_streams[client_id] = current_time
+ web_logger.debug(f"Active log streams: {len(app.active_log_streams)} clients. Added/Updated: {client_id}")
+
+
+ def generate():
+ """Generate log events for the SSE stream."""
+ client_ip = request.remote_addr
+ web_logger.info(f"Log stream generator started for {app_type} (Client: {client_ip})")
+ try:
+ # Initialize last activity time
+ last_activity = time.time()
+
+ # Determine which log files to follow
+ log_files_to_follow = []
+ if app_type == 'all':
+ # Follow all log files for 'all' type
+ log_files_to_follow = list(KNOWN_LOG_FILES.items())
+ web_logger.debug(f"Following all log files for 'all' type")
+ elif app_type == 'system':
+ # For system, only follow main log
+ system_log = KNOWN_LOG_FILES.get('system')
+ if system_log:
+ log_files_to_follow = [('system', system_log)]
+ web_logger.debug(f"Following system log: {system_log}")
+ else:
+ # For specific app, follow that app's log
+ app_log = KNOWN_LOG_FILES.get(app_type)
+ if app_log:
+ log_files_to_follow = [(app_type, app_log)]
+ web_logger.debug(f"Following {app_type} log: {app_log}")
+
+ # Also include system log for related messages
+ system_log = KNOWN_LOG_FILES.get('system')
+ if system_log:
+ log_files_to_follow.append(('system', system_log))
+ web_logger.debug(f"Also following system log for {app_type} messages")
+
+ if not log_files_to_follow:
+ web_logger.warning(f"No log files found for app type: {app_type}")
+ yield f"data: No logs available for {app_type}\n\n"
+ return
+
+ # Send confirmation
+ yield f"data: Starting log stream for {app_type}...\n\n"
+ web_logger.debug(f"Sent confirmation for {app_type} (Client: {client_ip})")
+
+ # Track file positions
+ positions = {}
+ last_check = {}
+ keep_alive_counter = 0
+
+ # Convert to Path objects
+ log_files_to_follow = [(name, Path(path) if isinstance(path, str) else path)
+ for name, path in log_files_to_follow if path]
+
+ # Main streaming loop
+ while True:
+ had_content = False
+ current_time = time.time()
+
+ # Update client activity
+ if current_time - last_activity > 10:
+ with app.log_stream_lock:
+ if client_id in app.active_log_streams:
+ app.active_log_streams[client_id] = current_time
+ else:
+ web_logger.warning(f"Client {client_id} gone. Stopping generator.")
+ break
+ last_activity = current_time
+
+ keep_alive_counter += 1
+
+ # Check each file
+ for name, path in log_files_to_follow:
+ try:
+ # Limit check frequency
+ now = datetime.datetime.now()
+ if name in last_check and (now - last_check[name]).total_seconds() < 0.2:
+ continue
+
+ last_check[name] = now
+
+ # Check file exists
+ if not path.exists():
+ if positions.get(name) != -1:
+ web_logger.warning(f"Log file {path} not found. Skipping.")
+ positions[name] = -1
+ continue
+ elif positions.get(name) == -1:
+ web_logger.info(f"Log file {path} found again. Resuming.")
+ positions.pop(name, None)
+
+ # Get size
+ try:
+ current_size = path.stat().st_size
+ except FileNotFoundError:
+ web_logger.warning(f"Log file {path} disappeared. Skipping.")
+ positions[name] = -1
+ continue
+
+ # Init position or handle truncation
+ if name not in positions or current_size < positions.get(name, 0):
+ start_pos = max(0, current_size - 5120)
+ web_logger.debug(f"Init position for {name}: {start_pos}")
+ positions[name] = start_pos
+
+ # Read content
+ try:
+ with open(path, 'r', encoding='utf-8', errors='ignore') as f:
+ f.seek(positions[name])
+ new_lines = []
+ lines_read = 0
+ max_lines = 100
+
+ while lines_read < max_lines:
+ line = f.readline()
+ if not line:
+ break
+
+ # Only filter when reading system log for specific app tab
+ if app_type != 'all' and app_type != 'system' and name == 'system':
+ # MODIFIED: Don't include system logs in app tabs at all
+ include_line = False
+ else:
+ include_line = True
+
+ if include_line:
+ new_lines.append(line)
+
+ lines_read += 1
+
+ # Process collected lines
+ if new_lines:
+ had_content = True
+ positions[name] = f.tell()
+ for line in new_lines:
+ stripped = line.strip()
+ if stripped:
+ prefix = f"[{name.upper()}] " if app_type == 'all' else ""
+ yield f"data: {prefix}{stripped}\n\n"
+
+ except FileNotFoundError:
+ web_logger.warning(f"Log file {path} disappeared during read.")
+ positions[name] = -1
+ except Exception as e:
+ web_logger.error(f"Error reading {path}: {e}")
+ yield f"data: ERROR: Problem reading log: {str(e)}\n\n"
+
+ except Exception as e:
+ web_logger.error(f"Error processing {name}: {e}")
+ yield f"data: ERROR: Unexpected issue with log.\n\n"
+
+ # Keep-alive or sleep
+ if not had_content:
+ if keep_alive_counter >= 75:
+ yield f": keepalive {time.time()}\n\n"
+ keep_alive_counter = 0
+ time.sleep(0.2)
+ else:
+ keep_alive_counter = 0
+ time.sleep(0.05)
+
+ except GeneratorExit:
+ # Clean up when client disconnects
+ web_logger.info(f"Client {client_id} disconnected from log stream for {app_type}. Cleaning up.")
+ except Exception as e:
+ web_logger.error(f"Unhandled error in log stream generator for {app_type} (Client: {client_ip}): {e}", exc_info=True)
+ try:
+ # Ensure error message is properly formatted for SSE
+ yield f"event: error\ndata: ERROR: Log streaming failed unexpectedly: {str(e)}\n\n"
+ except Exception as yield_err:
+ web_logger.error(f"Error yielding final error message to client {client_id}: {yield_err}")
+ finally:
+ # Ensure cleanup happens regardless of how the generator exits
+ with app.log_stream_lock:
+ removed_client = app.active_log_streams.pop(client_id, None)
+ if removed_client:
+ web_logger.info(f"Successfully removed client {client_id} from active log streams.")
+ else:
+ web_logger.warning(f"Client {client_id} was already removed from active log streams before finally block.")
+ web_logger.info(f"Log stream generator finished for {app_type} (Client: {client_id})")
+
+ # Return the SSE response with appropriate headers for better streaming
+ response = Response(stream_with_context(generate()), mimetype='text/event-stream') # Use stream_with_context
+ response.headers['Cache-Control'] = 'no-cache'
+ response.headers['X-Accel-Buffering'] = 'no' # Disable nginx buffering if using nginx
+ return response
+
+@app.route('/api/settings', methods=['GET'])
+def api_settings():
+ if request.method == 'GET':
+ # Return all settings using the new manager function
+ all_settings = settings_manager.get_all_settings() # Corrected function name
+ return jsonify(all_settings)
+
+@app.route('/api/settings/general', methods=['POST'])
+def save_general_settings():
+ general_logger = get_logger("web_server")
+ general_logger.info("Received request to save general settings.")
+
+ # Make sure we have data
+ if not request.is_json:
+ return jsonify({"success": False, "error": "Expected JSON data"}), 400
+
+ data = request.json
+
+ # Save general settings
+ success = settings_manager.save_settings('general', data)
+
+ if success:
+ # Update expiration timing from general settings if applicable
+ try:
+ new_hours = int(data.get('stateful_management_hours'))
+ if new_hours > 0:
+ general_logger.info(f"Updating stateful expiration to {new_hours} hours.")
+ update_lock_expiration(hours=new_hours)
+ except (ValueError, TypeError, KeyError):
+ # Don't update if the value wasn't provided or is invalid
+ pass
+ except Exception as e:
+ general_logger.error(f"Error updating expiration timing: {e}")
+
+ # Update logging levels immediately when general settings are changed
+ update_logging_levels()
+
+ # Return all settings
+ return jsonify(settings_manager.get_all_settings())
+ else:
+ return jsonify({"success": False, "error": "Failed to save general settings"}), 500
+
+@app.route('/api/settings/', methods=['GET', 'POST'])
+def handle_app_settings(app_name):
+ web_logger = get_logger("web_server")
+
+ # Validate app_name
+ if app_name not in settings_manager.KNOWN_APP_TYPES:
+ return jsonify({"success": False, "error": f"Unknown application type: {app_name}"}), 400
+
+ if request.method == 'GET':
+ # Return settings for the specific app
+ app_settings = settings_manager.load_settings(app_name)
+ return jsonify(app_settings)
+
+ elif request.method == 'POST':
+ # Make sure we have data
+ if not request.is_json:
+ return jsonify({"success": False, "error": "Expected JSON data"}), 400
+
+ data = request.json
+ web_logger.debug(f"Received {app_name} settings save request: {data}")
+
+ # Save the app settings
+ success = settings_manager.save_settings(app_name, data)
+
+ if success:
+ web_logger.info(f"Successfully saved {app_name} settings")
+ return jsonify({"success": True})
+ else:
+ web_logger.error(f"Failed to save {app_name} settings")
+ return jsonify({"success": False, "error": f"Failed to save {app_name} settings"}), 500
+
+@app.route('/api/settings/theme', methods=['GET', 'POST'])
+def api_theme():
+ # Theme settings are handled separately, potentially in /config/ui.json
+ if request.method == 'GET':
+ dark_mode = settings_manager.get_setting("ui", "dark_mode", False)
+ return jsonify({"dark_mode": dark_mode})
+ elif request.method == 'POST':
+ data = request.json
+ dark_mode = data.get('dark_mode', False)
+ success = settings_manager.update_setting("ui", "dark_mode", dark_mode)
+ return jsonify({"success": success})
+
+@app.route('/api/settings/reset', methods=['POST'])
+def api_reset_settings():
+ data = request.json
+ app_name = data.get('app')
+ web_logger = get_logger("web_server")
+
+ if not app_name or app_name not in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name
+ return jsonify({"success": False, "error": f"Invalid or missing app name: {app_name}"}), 400
+
+ web_logger.info(f"Resetting settings for {app_name} to defaults.")
+ # Load default settings for the app
+ default_settings = settings_manager.load_default_app_settings(app_name)
+
+ if not default_settings:
+ return jsonify({"success": False, "error": f"Could not load default settings for {app_name}"}), 500
+
+ # Save the default settings, overwriting the current ones
+ success = settings_manager.save_settings(app_name, default_settings) # Corrected function name
+
+ if success:
+ # Return the full updated config after reset
+ all_settings = settings_manager.get_all_settings() # Corrected function name
+ return jsonify(all_settings)
+ else:
+ return jsonify({"success": False, "error": f"Failed to save reset settings for {app_name}"}), 500
+
+@app.route('/api/app-settings', methods=['GET'])
+def api_app_settings():
+ app_type = request.args.get('app')
+ if not app_type or app_type not in settings_manager.KNOWN_APP_TYPES: # Corrected attribute name
+ return jsonify({"success": False, "error": f"Invalid or missing app type: {app_type}"}), 400
+
+ # Get API credentials using the updated settings_manager function
+ # api_details = settings_manager.get_api_details(app_type) # Function does not exist
+ api_url = settings_manager.get_api_url(app_type)
+ api_key = settings_manager.get_api_key(app_type)
+ api_details = {"api_url": api_url, "api_key": api_key}
+ return jsonify({"success": True, **api_details})
+
+@app.route('/api/configured-apps', methods=['GET'])
+def api_configured_apps():
+ # Return the configured status of all apps using the updated settings_manager function
+ configured_apps_list = settings_manager.get_configured_apps() # Corrected function name
+ # Convert list to dict format expected by frontend
+ configured_status = {app: (app in configured_apps_list) for app in settings_manager.KNOWN_APP_TYPES}
+ return jsonify(configured_status)
+
+# --- Add Status Endpoint --- #
+@app.route('/api/status/', methods=['GET'])
+def api_app_status(app_name):
+ """Check connection status for a specific app."""
+ web_logger = get_logger("web_server")
+ response_data = {"configured": False, "connected": False} # Default for non-Sonarr apps
+ status_code = 200
+
+ # First validate the app name
+ if app_name not in settings_manager.KNOWN_APP_TYPES:
+ web_logger.warning(f"Status check requested for invalid app name: {app_name}")
+ return jsonify({"configured": False, "connected": False, "error": "Invalid app name"}), 400
+
+ try:
+ if app_name in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']:
+ # --- Multi-Instance Status Check --- #
+ connected_count = 0
+ total_configured = 0
+ try:
+ # Import app specific functions
+ module_name = f'src.primary.apps.{app_name}'
+ instances_module = importlib.import_module(module_name)
+ api_module = importlib.import_module(f'{module_name}.api')
+
+ if hasattr(instances_module, 'get_configured_instances'):
+ get_instances_func = getattr(instances_module, 'get_configured_instances')
+ instances = get_instances_func()
+ total_configured = len(instances)
+ api_timeout = settings_manager.get_setting(app_name, "api_timeout", 10) # Get global timeout
+
+ if total_configured > 0:
+ web_logger.debug(f"Checking connection for {total_configured} {app_name.capitalize()} instances...")
+ if hasattr(api_module, 'check_connection'):
+ check_connection_func = getattr(api_module, 'check_connection')
+ for instance in instances:
+ inst_url = instance.get("api_url")
+ inst_key = instance.get("api_key")
+ inst_name = instance.get("instance_name", "Default")
+ try:
+ # Use a short timeout per instance check
+ if check_connection_func(inst_url, inst_key, min(api_timeout, 5)):
+ web_logger.debug(f"{app_name.capitalize()} instance '{inst_name}' connected successfully.")
+ connected_count += 1
+ else:
+ web_logger.debug(f"{app_name.capitalize()} instance '{inst_name}' connection check failed.")
+ except Exception as e:
+ web_logger.error(f"Error checking connection for {app_name.capitalize()} instance '{inst_name}': {str(e)}")
+ else:
+ web_logger.warning(f"check_connection function not found in {app_name} API module")
+ else:
+ web_logger.debug(f"No configured {app_name.capitalize()} instances found for status check.")
+
+ # Prepare multi-instance response
+ response_data = {"total_configured": total_configured, "connected_count": connected_count}
+ else:
+ web_logger.warning(f"get_configured_instances function not found in {app_name} module")
+ # Fall back to legacy status check
+ api_url = settings_manager.get_api_url(app_name)
+ api_key = settings_manager.get_api_key(app_name)
+ is_configured = bool(api_url and api_key)
+ is_connected = False
+ if is_configured and hasattr(api_module, 'check_connection'):
+ check_connection_func = getattr(api_module, 'check_connection')
+ is_connected = check_connection_func(api_url, api_key, min(api_timeout, 5))
+ response_data = {"total_configured": 1 if is_configured else 0, "connected_count": 1 if is_connected else 0}
+
+ except ImportError as e:
+ web_logger.error(f"Failed to import {app_name} modules for status check: {e}")
+ response_data = {"total_configured": 0, "connected_count": 0, "error": "Import Error"}
+ status_code = 500
+ except Exception as e:
+ web_logger.error(f"Error during {app_name} multi-instance status check: {e}", exc_info=True)
+ response_data = {"total_configured": total_configured, "connected_count": connected_count, "error": "Check Error"}
+ status_code = 500
+
+ else:
+ # --- Legacy/Single Instance Status Check (for other apps) --- #
+ api_url = settings_manager.get_api_url(app_name)
+ api_key = settings_manager.get_api_key(app_name)
+ is_configured = bool(api_url and api_key)
+ is_connected = False # Default connection status
+ api_timeout = settings_manager.get_setting(app_name, "api_timeout", 10)
+
+ if is_configured:
+ try:
+ module_path = f'src.primary.apps.{app_name}.api'
+ api_module = importlib.import_module(module_path)
+
+ if hasattr(api_module, 'check_connection'):
+ check_connection_func = getattr(api_module, 'check_connection')
+ # Use a short timeout to prevent long waits
+ is_connected = check_connection_func(api_url, api_key, min(api_timeout, 5))
+ else:
+ web_logger.warning(f"check_connection function not found in {module_path}")
+ except ImportError:
+ web_logger.error(f"Could not import API module for {app_name}")
+ is_connected = False # Ensure connection is false on import error
+ except Exception as e:
+ web_logger.error(f"Error checking connection for {app_name}: {str(e)}")
+ is_connected = False # Ensure connection is false on check error
+
+ # Prepare legacy response format
+ response_data = {"configured": is_configured, "connected": is_connected}
+
+ return jsonify(response_data), status_code
+
+ except Exception as e:
+ web_logger.error(f"Unexpected error in status check for {app_name}: {str(e)}", exc_info=True)
+ # Return a valid response even on error to prevent UI issues
+ return jsonify({"configured": False, "connected": False, "error": "Internal error"}), 200
+
+# --- Add Hunt Control Endpoints --- #
+# These might need adjustment depending on how start/stop is managed now
+# If main.py handles threads based on config, these might not be needed,
+# or they could modify a global 'enabled' setting per app.
+# For now, keep them simple placeholders.
+
+@app.route('/api/hunt/start', methods=['POST'])
+def api_start_hunt():
+ # Placeholder: In the new model, threads start based on config.
+ # This might enable all configured apps or toggle a global flag.
+ # Or it could modify an 'enabled' setting per app.
+ # settings_manager.update_setting('global', 'hunt_enabled', True)
+ return jsonify({"success": True, "message": "Hunt control endpoint (start) - functionality may change."})
+
+@app.route('/api/hunt/stop', methods=['POST'])
+def api_stop_hunt():
+ # Placeholder: Signal main thread to stop?
+ # Or disable all apps?
+ # settings_manager.update_setting('global', 'hunt_enabled', False)
+ # Or send SIGTERM/SIGINT to the main process?
+ # pid = get_main_process_pid() # Need a way to get PID if not self
+ # if pid: os.kill(pid, signal.SIGTERM)
+ return jsonify({"success": True, "message": "Hunt control endpoint (stop) - functionality may change."})
+
+@app.route('/api/settings/apply-timezone', methods=['POST'])
+def apply_timezone_setting():
+ """Apply timezone setting to the container."""
+ # This functionality has been disabled as per user request
+ return jsonify({
+ "success": False,
+ "message": "Timezone settings have been disabled. This feature may be available in future updates."
+ })
+
+ # Original implementation commented out
+ '''
+ data = request.json
+ timezone = data.get('timezone')
+ web_logger = get_logger("web_server")
+
+ if not timezone:
+ return jsonify({"success": False, "error": "No timezone specified"}), 400
+
+ web_logger.info(f"Applying timezone setting: {timezone}")
+
+ # Save the timezone to general settings
+ general_settings = settings_manager.load_settings("general")
+ general_settings["timezone"] = timezone
+ settings_manager.save_settings("general", general_settings)
+
+ # Apply the timezone to the container
+ success = settings_manager.apply_timezone(timezone)
+
+ if success:
+ return jsonify({"success": True, "message": f"Timezone set to {timezone}. Container restart may be required for full effect."})
+ else:
+ return jsonify({"success": False, "error": f"Failed to apply timezone {timezone}"}), 500
+ '''
+
+@app.route('/api/stats', methods=['GET'])
+def api_get_stats():
+ """Get the media statistics for all apps"""
+ try:
+ # Import the stats manager to get actual stats
+ from src.primary.stats_manager import get_stats
+
+ # Get real stats from the stats file
+ stats = get_stats()
+
+ web_logger = get_logger("web_server")
+ web_logger.info(f"Serving actual stats from file: {stats}")
+
+ return jsonify({"success": True, "stats": stats})
+ except Exception as e:
+ web_logger = get_logger("web_server")
+ web_logger.error(f"Error fetching statistics: {str(e)}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@app.route('/api/stats/reset', methods=['POST'])
+def api_reset_stats():
+ """Reset the media statistics for all apps or a specific app"""
+ try:
+ data = request.json or {}
+ app_type = data.get('app_type')
+
+ # Get logger for logging the reset action
+ web_logger = get_logger("web_server")
+
+ # Import the reset_stats function
+ from src.primary.stats_manager import reset_stats
+
+ if app_type:
+ web_logger.info(f"Resetting statistics for app: {app_type}")
+ reset_success = reset_stats(app_type)
+ else:
+ web_logger.info("Resetting all media statistics")
+ reset_success = reset_stats(None)
+
+ if reset_success:
+ return jsonify({"success": True, "message": "Statistics reset successfully"})
+ else:
+ return jsonify({"success": False, "error": "Failed to reset statistics"}), 500
+
+ except Exception as e:
+ web_logger = get_logger("web_server")
+ web_logger.error(f"Error resetting statistics: {str(e)}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@app.route('/api/stats/reset_public', methods=['POST'])
+def api_reset_stats_public():
+ """Reset the media statistics for all apps or a specific app - public endpoint without auth"""
+ try:
+ data = request.json or {}
+ app_type = data.get('app_type')
+
+ # Get logger for logging the reset action
+ web_logger = get_logger("web_server")
+
+ # Import the reset_stats function
+ from src.primary.stats_manager import reset_stats
+
+ if app_type:
+ web_logger.info(f"Resetting statistics for app (public): {app_type}")
+ reset_success = reset_stats(app_type)
+ else:
+ web_logger.info("Resetting all media statistics (public)")
+ reset_success = reset_stats(None)
+
+ if reset_success:
+ return jsonify({"success": True, "message": "Statistics reset successfully"}), 200
+ else:
+ return jsonify({"success": False, "error": "Failed to reset statistics"}), 500
+
+ except Exception as e:
+ web_logger = get_logger("web_server")
+ web_logger.error(f"Error resetting statistics (public): {str(e)}")
+ return jsonify({"success": False, "error": str(e)}), 500
+
+@app.route('/version.txt')
+def version_txt():
+ """Serve version.txt file directly"""
+ try:
+ # Use a simpler, more direct approach to read the version
+ version_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'version.txt')
+ if os.path.exists(version_path):
+ with open(version_path, 'r') as f:
+ version = f.read().strip()
+ return version, 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'}
+ else:
+ # If file doesn't exist, log warning and return default version
+ web_logger = get_logger("web_server")
+ web_logger.warning(f"version.txt not found at {version_path}, returning default version")
+ return "5.3.1", 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'}
+ except Exception as e:
+ web_logger = get_logger("web_server")
+ web_logger.error(f"Error serving version.txt: {e}")
+ return "5.3.1", 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'}
+
+@app.route('/api/cycle/reset/', methods=['POST'])
+def reset_app_cycle(app_name):
+ """
+ Manually trigger a reset of the cycle for a specific app.
+
+ Args:
+ app_name: The name of the app (sonarr, radarr, lidarr, readarr, etc.)
+
+ Returns:
+ JSON response with success/error status
+ """
+ # Make sure to initialize web_logger if it's not available in this scope
+ web_logger = get_logger("web_server")
+ web_logger.info(f"Manual cycle reset requested for {app_name} via API")
+
+ # Check if app name is valid
+ if app_name not in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']:
+ return jsonify({
+ 'success': False,
+ 'error': f"Invalid app name: {app_name}"
+ }), 400
+
+ # Check if the app is configured
+ configured_apps = settings_manager.get_configured_apps()
+ if app_name not in configured_apps:
+ return jsonify({
+ 'success': False,
+ 'error': f"{app_name} is not configured"
+ }), 400
+
+ try:
+ # Trigger cycle reset for the app using a file-based approach
+ # Ensure reset directory exists
+ reset_dir = "/config/reset"
+ import os
+ os.makedirs(reset_dir, exist_ok=True)
+
+ # Create the reset file
+ reset_file = os.path.join(reset_dir, f"{app_name}.reset")
+ with open(reset_file, 'w') as f:
+ f.write(str(int(time.time()))) # Write current timestamp
+
+ web_logger.info(f"Created reset file for {app_name} at {reset_file}")
+ success = True
+ except Exception as e:
+ web_logger.error(f"Error creating reset file for {app_name}: {e}", exc_info=True)
+ # Even if there's an error creating the file, the cycle reset might still work
+ # as it's being detected in the background process, so we'll return success
+ success = True # Changed from False to True to prevent 500 errors
+
+ if success:
+ return jsonify({
+ 'success': True,
+ 'message': f"Cycle reset triggered for {app_name}"
+ })
+ else:
+ return jsonify({
+ 'success': False,
+ 'error': f"Failed to reset cycle for {app_name}. The app may not be running."
+ }), 500
+
+# Start the web server in debug or production mode
+def start_web_server():
+ """Start the web server in debug or production mode"""
+ web_logger = get_logger("web_server")
+ web_logger.info("--- start_web_server function called ---") # Added log
+ debug_mode = os.environ.get('DEBUG', 'false').lower() == 'true'
+ host = '0.0.0.0' # Listen on all interfaces
+ port = int(os.environ.get('PORT', 9705))
+
+ # Ensure the log directory exists
+ os.makedirs(LOG_DIR, exist_ok=True)
+
+ web_logger.info(f"Attempting to start web server on {host}:{port} (Debug: {debug_mode})") # Modified log
+ # In production, use Werkzeug's simple server or a proper WSGI server
+ web_logger.info("--- Calling app.run() ---") # Added log
+ app.run(host=host, port=port, debug=debug_mode, use_reloader=False) # Keep this line if needed for direct execution testing, but it's now handled by root main.py
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/primary/windows_service.py b/Huntarr.io-6.3.6/src/primary/windows_service.py
new file mode 100644
index 0000000..27b67b1
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/primary/windows_service.py
@@ -0,0 +1,198 @@
+"""
+Windows Service module for Huntarr.
+Allows Huntarr to run as a Windows service.
+"""
+
+import os
+import sys
+import time
+import logging
+import servicemanager
+import socket
+import win32event
+import win32service
+import win32serviceutil
+
+# Add the parent directory to sys.path for imports
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
+
+# Configure basic logging
+logging.basicConfig(
+ filename=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ 'config', 'logs', 'windows_service.log'),
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger('HuntarrWindowsService')
+
+class HuntarrService(win32serviceutil.ServiceFramework):
+ """Windows Service implementation for Huntarr"""
+
+ _svc_name_ = "Huntarr"
+ _svc_display_name_ = "Huntarr Service"
+ _svc_description_ = "Automated media collection management for Arr apps"
+
+ def __init__(self, args):
+ win32serviceutil.ServiceFramework.__init__(self, args)
+ self.stop_event = win32event.CreateEvent(None, 0, 0, None)
+ self.is_running = False
+ socket.setdefaulttimeout(60)
+ self.main_thread = None
+ self.huntarr_app = None
+ self.stop_flag = None
+
+ def SvcStop(self):
+ """Stop the service"""
+ logger.info('Stopping Huntarr service...')
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ win32event.SetEvent(self.stop_event)
+ self.is_running = False
+
+ # Signal Huntarr to stop properly
+ if hasattr(self, 'stop_flag') and self.stop_flag:
+ logger.info('Setting stop flag for Huntarr...')
+ self.stop_flag.set()
+
+ def SvcDoRun(self):
+ """Run the service"""
+ servicemanager.LogMsg(
+ servicemanager.EVENTLOG_INFORMATION_TYPE,
+ servicemanager.PYS_SERVICE_STARTED,
+ (self._svc_name_, '')
+ )
+ self.is_running = True
+ self.main()
+
+ def main(self):
+ """Main service loop"""
+ try:
+ logger.info('Starting Huntarr service...')
+
+ # Import here to avoid import errors when installing the service
+ import threading
+ from primary.background import start_huntarr, stop_event, shutdown_threads
+ from primary.web_server import app
+ from waitress import serve
+
+ # Store the stop event for proper shutdown
+ self.stop_flag = stop_event
+
+ # Configure service environment
+ os.environ['FLASK_HOST'] = '0.0.0.0'
+ os.environ['PORT'] = '9705'
+ os.environ['DEBUG'] = 'false'
+
+ # Start background tasks in a thread
+ background_thread = threading.Thread(
+ target=start_huntarr,
+ name="HuntarrBackground",
+ daemon=True
+ )
+ background_thread.start()
+
+ # Start the web server in a thread
+ web_thread = threading.Thread(
+ target=lambda: serve(app, host='0.0.0.0', port=9705, threads=8),
+ name="HuntarrWebServer",
+ daemon=True
+ )
+ web_thread.start()
+
+ logger.info('Huntarr service started successfully')
+
+ # Main service loop - keep running until stop event
+ while self.is_running:
+ # Wait for the stop event (or timeout for checking if threads are alive)
+ event_result = win32event.WaitForSingleObject(self.stop_event, 5000)
+
+ # Check if we should exit
+ if event_result == win32event.WAIT_OBJECT_0:
+ break
+
+ # Check if threads are still alive
+ if not background_thread.is_alive() or not web_thread.is_alive():
+ logger.error("Critical: One of the Huntarr threads has died unexpectedly")
+ # Try to restart the threads if they died
+ if not background_thread.is_alive():
+ logger.info("Attempting to restart background thread...")
+ background_thread = threading.Thread(
+ target=start_huntarr,
+ name="HuntarrBackground",
+ daemon=True
+ )
+ background_thread.start()
+
+ if not web_thread.is_alive():
+ logger.info("Attempting to restart web server thread...")
+ web_thread = threading.Thread(
+ target=lambda: serve(app, host='0.0.0.0', port=9705, threads=8),
+ name="HuntarrWebServer",
+ daemon=True
+ )
+ web_thread.start()
+
+ # Service is stopping, clean up
+ logger.info('Huntarr service is shutting down...')
+
+ # Set the stop event for Huntarr's background tasks
+ if not stop_event.is_set():
+ stop_event.set()
+
+ # Wait for threads to finish
+ logger.info('Waiting for Huntarr threads to finish...')
+ background_thread.join(timeout=30)
+ web_thread.join(timeout=10)
+
+ logger.info('Huntarr service shutdown complete')
+
+ except Exception as e:
+ logger.exception(f"Critical error in Huntarr service: {e}")
+ servicemanager.LogErrorMsg(f"Huntarr service error: {str(e)}")
+
+
+def install_service():
+ """Install Huntarr as a Windows service"""
+ if sys.platform != 'win32':
+ print("Windows service installation is only available on Windows.")
+ return False
+
+ try:
+ win32serviceutil.InstallService(
+ pythonClassString="src.primary.windows_service.HuntarrService",
+ serviceName="Huntarr",
+ displayName="Huntarr Service",
+ description="Automated media collection management for Arr apps",
+ startType=win32service.SERVICE_AUTO_START
+ )
+ print("Huntarr service installed successfully.")
+ return True
+ except Exception as e:
+ print(f"Error installing Huntarr service: {e}")
+ return False
+
+
+def remove_service():
+ """Remove the Huntarr Windows service"""
+ if sys.platform != 'win32':
+ print("Windows service removal is only available on Windows.")
+ return False
+
+ try:
+ win32serviceutil.RemoveService("Huntarr")
+ print("Huntarr service removed successfully.")
+ return True
+ except Exception as e:
+ print(f"Error removing Huntarr service: {e}")
+ return False
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ if sys.argv[1] == 'install':
+ install_service()
+ elif sys.argv[1] == 'remove':
+ remove_service()
+ else:
+ win32serviceutil.HandleCommandLine(HuntarrService)
+ else:
+ win32serviceutil.HandleCommandLine(HuntarrService)
diff --git a/Huntarr.io-6.3.6/src/routes.py b/Huntarr.io-6.3.6/src/routes.py
new file mode 100644
index 0000000..368e656
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/routes.py
@@ -0,0 +1,82 @@
+from flask import Flask, render_template, request, redirect, jsonify
+import os
+import json
+
+# Import the necessary function
+from src.primary.stateful_manager import reset_stateful_management, get_stateful_management_info
+
+# Configure Flask to use templates and static files from the frontend folder
+template_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'frontend', 'templates'))
+static_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'frontend', 'static'))
+
+app = Flask(__name__, template_folder=template_dir, static_folder=static_dir)
+
+# API Routes
+
+@app.route('/api/stateful/reset', methods=['POST'])
+def api_reset_stateful():
+ """API endpoint to reset the stateful management system."""
+ success = reset_stateful_management()
+ if success:
+ return jsonify({"success": True, "message": "Stateful management reset successfully."}), 200
+ else:
+ return jsonify({"success": False, "message": "Failed to reset stateful management."}), 500
+
+@app.route('/api/stateful/info', methods=['GET'])
+def api_get_stateful_info():
+ """API endpoint to get stateful management info."""
+ try:
+ info = get_stateful_management_info()
+ return jsonify(info), 200
+ except Exception as e:
+ # Log the exception details if possible
+ app.logger.error(f"Error getting stateful info: {e}")
+ return jsonify({"error": "Failed to retrieve stateful information."}), 500
+
+def get_ui_preference():
+ """Determine which UI to use based on config and user preference"""
+ # Check if ui_settings.json exists
+ config_file = os.path.join(os.path.dirname(__file__), 'config/ui_settings.json')
+
+ use_new_ui = False
+
+ if os.path.exists(config_file):
+ try:
+ with open(config_file, 'r') as f:
+ settings = json.load(f)
+ use_new_ui = settings.get('use_new_ui', False)
+ except Exception as e:
+ print(f"Error loading UI settings: {e}")
+
+ # Allow URL parameter to override
+ ui_param = request.args.get('ui', None)
+ if ui_param == 'new':
+ use_new_ui = True
+ elif ui_param == 'classic':
+ use_new_ui = False
+
+ return use_new_ui
+
+@app.route('/')
+def index():
+ """Root route with UI switching capability"""
+ if get_ui_preference():
+ return redirect('/new')
+ else:
+ return render_template('index.html')
+
+@app.route('/user')
+def user_page():
+ """User settings page with UI switching capability"""
+ if get_ui_preference():
+ return redirect('/user/new')
+ else:
+ return render_template('user.html')
+
+@app.route('/user/new')
+def user_page_new():
+ """Serve the new user settings page"""
+ return render_template('user-new.html')
+
+if __name__ == '__main__':
+ app.run(debug=True)
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/src/routes/api/settings/+server.js b/Huntarr.io-6.3.6/src/routes/api/settings/+server.js
new file mode 100644
index 0000000..e4dffbf
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/routes/api/settings/+server.js
@@ -0,0 +1,91 @@
+import { json } from '@sveltejs/kit';
+import fs from 'fs';
+import path from 'path';
+
+const CONFIG_FILE = path.resolve('huntarr.json');
+
+// Helper to read config
+function readConfig() {
+ try {
+ const configData = fs.readFileSync(CONFIG_FILE, 'utf8');
+ return JSON.parse(configData);
+ } catch (error) {
+ console.error('Error reading config:', error);
+ return {};
+ }
+}
+
+// Helper to write config
+function writeConfig(config) {
+ try {
+ fs.writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8');
+ return true;
+ } catch (error) {
+ console.error('Error writing config:', error);
+ return false;
+ }
+}
+
+// GET handler
+export async function GET() {
+ const config = readConfig();
+ return json(config);
+}
+
+// POST handler
+export async function POST({ request }) {
+ try {
+ const newSettings = await request.json();
+
+ // Read existing config to merge with new settings
+ const existingConfig = readConfig();
+
+ // Merge settings, ensuring numeric values are properly handled
+ const updatedConfig = {
+ ...existingConfig,
+ ...newSettings
+ };
+
+ // Ensure numeric values are preserved correctly in nested objects
+ if (newSettings.sonarr) {
+ updatedConfig.sonarr = {
+ ...existingConfig.sonarr,
+ ...newSettings.sonarr
+ };
+
+ // Add explicit handling for Sonarr instances array
+ if (Array.isArray(newSettings.sonarr.instances)) {
+ // Use the new instances array completely, as it should contain all instances
+ updatedConfig.sonarr.instances = JSON.parse(JSON.stringify(newSettings.sonarr.instances));
+ console.log("Saved Sonarr instances:", updatedConfig.sonarr.instances);
+ }
+
+ // Explicitly handle numeric fields
+ if (newSettings.sonarr.missingEpisodesSearch !== undefined) {
+ updatedConfig.sonarr.missingEpisodesSearch = Number(newSettings.sonarr.missingEpisodesSearch);
+ }
+ if (newSettings.sonarr.upgradeEpisodesSearch !== undefined) {
+ updatedConfig.sonarr.upgradeEpisodesSearch = Number(newSettings.sonarr.upgradeEpisodesSearch);
+ }
+ if (newSettings.sonarr.searchInterval !== undefined) {
+ updatedConfig.sonarr.searchInterval = Number(newSettings.sonarr.searchInterval);
+ }
+ }
+
+ // Handle other app settings similarly
+ // ...existing code...
+
+ // Write updated config
+ const success = writeConfig(updatedConfig);
+
+ if (success) {
+ // Return the exact config that was saved to ensure UI consistency
+ return json(readConfig());
+ } else {
+ return json({ error: 'Failed to save settings' }, { status: 500 });
+ }
+ } catch (error) {
+ console.error('Error processing settings:', error);
+ return json({ error: 'Server error' }, { status: 500 });
+ }
+}
diff --git a/Huntarr.io-6.3.6/src/routes/settings/+page.svelte b/Huntarr.io-6.3.6/src/routes/settings/+page.svelte
new file mode 100644
index 0000000..b0a7f0a
--- /dev/null
+++ b/Huntarr.io-6.3.6/src/routes/settings/+page.svelte
@@ -0,0 +1,115 @@
+
+
+
\ No newline at end of file
diff --git a/Huntarr.io-6.3.6/version.txt b/Huntarr.io-6.3.6/version.txt
new file mode 100644
index 0000000..c8320dd
--- /dev/null
+++ b/Huntarr.io-6.3.6/version.txt
@@ -0,0 +1 @@
+6.3.6
diff --git a/ct/huntarr.sh b/ct/huntarr.sh
index 44724cb..afb48d8 100644
--- a/ct/huntarr.sh
+++ b/ct/huntarr.sh
@@ -6,7 +6,7 @@ source <(curl -fsSL https://git.bila.li/Proxmox/proxmox-ve-install-scripts/raw/b
# Source: [SOURCE_URL]
# App Default Values
-APP="huntarr"
+APP="Huntarr"
var_tags="${var_tags:-arr}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-1024}"
diff --git a/install/huntarr-install.sh b/install/huntarr-install.sh
index 4143808..4d2d367 100644
--- a/install/huntarr-install.sh
+++ b/install/huntarr-install.sh
@@ -15,6 +15,7 @@ network_check
update_os
APPLICATION="huntarr"
+REPO_NAME="Huntarr.io"
# Installing Dependencies
msg_info "Installing Dependencies"
@@ -22,29 +23,44 @@ $STD apt-get install -y \
curl \
tar \
unzip \
- jq
-msg_ok "Installed Dependencies"
+ jq \
+ python3 \
+ python3-pip \
+ python3-venv
+msg_ok "Installed System Dependencies"
# Setup App
msg_info "Setup ${APPLICATION}"
RELEASE=$(curl -fsSL https://api.github.com/repos/plexguide/Huntarr.io/releases/latest | grep "tag_name" | awk '{print substr($2, 2, length($2)-3) }')
curl -fsSL -o "${RELEASE}.zip" "https://github.com/plexguide/Huntarr.io/archive/refs/tags/${RELEASE}.zip"
unzip -q "${RELEASE}.zip"
-mv "${APPLICATION}-${RELEASE}/" "/opt/${APPLICATION}"
+mv "${REPO_NAME}-${RELEASE}/" "/opt/${APPLICATION}"
echo "${RELEASE}" >/opt/${APPLICATION}_version.txt
msg_ok "Setup ${APPLICATION}"
+# Setup Python Environment
+msg_info "Setting up Python Environment"
+$STD python3 -m venv /opt/${APPLICATION}/venv
+msg_ok "Created Python Virtual Environment"
+
+# Install Python Dependencies
+msg_info "Installing Python Dependencies"
+$STD /opt/${APPLICATION}/venv/bin/pip install --upgrade pip
+$STD /opt/${APPLICATION}/venv/bin/pip install -r /opt/${APPLICATION}/requirements.txt
+msg_ok "Installed Python Dependencies"
+
# Creating Service (if needed)
msg_info "Creating Service"
cat </etc/systemd/system/${APPLICATION}.service
[Unit]
-Description=${APPLICATION} Service
+Description=Huntarr Service
After=network.target
[Service]
Environment=TZ=Europe/Zurich
-ExecStart=/opt/${APPLICATION}/${APPLICATION} --config /opt/${APPLICATION} --port 9705
+WorkingDirectory=/opt/${APPLICATION}
+ExecStart=/opt/${APPLICATION}/venv/bin/python /opt/${APPLICATION}/main.py
Restart=always
[Install]
@@ -58,7 +74,7 @@ customize
# Cleanup
msg_info "Cleaning up"
-rm -f "/opt/${APPLICATION}/${TAR_FILE}"
+rm -f "${RELEASE}.zip"
$STD apt-get -y autoremove
$STD apt-get -y autoclean
msg_ok "Cleaned"