Compare commits

...

2 commits

Author SHA1 Message Date
Cody Cook
f7a919ebf2 Docker and more calendar work 2025-06-17 16:00:46 -07:00
Cody Cook
4527504c80 Add podgrab featureset 2025-06-16 23:07:36 -07:00
24 changed files with 2093 additions and 76 deletions

39
Dockerfile Normal file
View file

@ -0,0 +1,39 @@
# Use Python 3.8 as the base image
FROM python:3.8-slim
# Set working directory
WORKDIR /app
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
FLASK_ENV=production
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
gcc \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements file and install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy the application code
COPY . .
# Create necessary directories
RUN mkdir -p instance downloads
# Make sure the application has the right permissions
RUN chmod -R 755 .
# Expose the port the app runs on
EXPOSE 5000
# Make the entrypoint script executable
COPY docker-entrypoint.sh .
RUN chmod +x docker-entrypoint.sh
# Set the entrypoint
ENTRYPOINT ["./docker-entrypoint.sh"]

99
README_DOCKER.md Normal file
View file

@ -0,0 +1,99 @@
# Docker Setup for Podcastrr
This document explains how to run Podcastrr using Docker and Docker Compose.
## Prerequisites
- [Docker](https://docs.docker.com/get-docker/)
- [Docker Compose](https://docs.docker.com/compose/install/)
## Quick Start
1. Clone the repository:
```
git clone https://github.com/yourusername/podcastrr.git
cd podcastrr
```
2. Start the application using Docker Compose:
```
docker-compose up -d
```
3. Access the application at http://localhost:5000
## Configuration
The application is configured using environment variables in the `docker-compose.yml` file. You can modify these variables to customize the application:
- `FLASK_ENV`: Set to `development` for development mode or `production` for production mode
- `SECRET_KEY`: A secret key for securing the Flask application (change this to a secure random string)
- `DATABASE_URI`: The URI for the SQLite database
- `DOWNLOAD_PATH`: The path where podcast episodes will be downloaded
- `LOG_LEVEL`: The logging level (INFO, DEBUG, WARNING, ERROR, CRITICAL)
## Persistent Data
The Docker Compose configuration creates two volumes for persistent data:
1. `./downloads:/app/downloads`: Stores downloaded podcast episodes
2. `./instance:/app/instance`: Stores the SQLite database
These directories will be created in your project folder and will persist data between container restarts.
## Building the Docker Image
If you've made changes to the application code and want to rebuild the Docker image:
```
docker-compose build
```
## Stopping the Application
To stop the application:
```
docker-compose down
```
## Viewing Logs
To view the application logs:
```
docker-compose logs -f
```
## Troubleshooting
### Database Issues
If you encounter database issues, you can try:
1. Stopping the application:
```
docker-compose down
```
2. Removing the instance directory:
```
rm -rf instance
```
3. Starting the application again:
```
docker-compose up -d
```
This will recreate the database from scratch.
### Permission Issues
If you encounter permission issues with the downloads or instance directories, ensure that they are writable by the Docker container:
```
chmod -R 777 downloads instance
```
Note: This is not recommended for production environments. Instead, configure proper user permissions.

View file

@ -110,6 +110,8 @@ class Episode(db.Model):
downloaded = db.Column(db.Boolean, default=False) downloaded = db.Column(db.Boolean, default=False)
file_path = db.Column(db.String(512)) file_path = db.Column(db.String(512))
explicit = db.Column(db.Boolean, nullable=True) # Whether the episode is marked as explicit explicit = db.Column(db.Boolean, nullable=True) # Whether the episode is marked as explicit
download_error = db.Column(db.String(255), nullable=True) # Error message if download failed
status_code = db.Column(db.Integer, nullable=True) # HTTP status code from last download attempt
def __repr__(self): def __repr__(self):
return f'<Episode {self.title}>' return f'<Episode {self.title}>'
@ -133,5 +135,7 @@ class Episode(db.Model):
'guid': self.guid, 'guid': self.guid,
'downloaded': self.downloaded, 'downloaded': self.downloaded,
'file_path': self.file_path, 'file_path': self.file_path,
'explicit': self.explicit 'explicit': self.explicit,
'download_error': self.download_error,
'status_code': self.status_code
} }

View file

@ -15,6 +15,8 @@ class Settings(db.Model):
auto_download = db.Column(db.Boolean, default=False) auto_download = db.Column(db.Boolean, default=False)
max_downloads = db.Column(db.Integer, default=5) max_downloads = db.Column(db.Integer, default=5)
delete_after_days = db.Column(db.Integer, default=30) delete_after_days = db.Column(db.Integer, default=30)
calendar_first_day = db.Column(db.String(10), default="Monday") # "Monday" or "Sunday"
calendar_show_monitored_only = db.Column(db.Boolean, default=False) # False = show all, True = monitored only
def __repr__(self): def __repr__(self):
return f'<Settings id={self.id}>' return f'<Settings id={self.id}>'
@ -29,5 +31,7 @@ class Settings(db.Model):
'naming_format': self.naming_format, 'naming_format': self.naming_format,
'auto_download': self.auto_download, 'auto_download': self.auto_download,
'max_downloads': self.max_downloads, 'max_downloads': self.max_downloads,
'delete_after_days': self.delete_after_days 'delete_after_days': self.delete_after_days,
'calendar_first_day': self.calendar_first_day,
'calendar_show_monitored_only': self.calendar_show_monitored_only
} }

View file

@ -22,16 +22,16 @@ def parse_opml(opml_content):
""" """
try: try:
root = ET.fromstring(opml_content) root = ET.fromstring(opml_content)
# Find all outline elements that represent podcasts # Find all outline elements that represent podcasts
podcasts = [] podcasts = []
# Look for outlines in the body # Look for outlines in the body
body = root.find('body') body = root.find('body')
if body is None: if body is None:
logger.error("OPML file has no body element") logger.error("OPML file has no body element")
return [] return []
# Process all outline elements # Process all outline elements
for outline in body.findall('.//outline'): for outline in body.findall('.//outline'):
# Check if this is a podcast outline (has xmlUrl attribute) # Check if this is a podcast outline (has xmlUrl attribute)
@ -44,7 +44,7 @@ def parse_opml(opml_content):
'html_url': outline.get('htmlUrl', '') 'html_url': outline.get('htmlUrl', '')
} }
podcasts.append(podcast) podcasts.append(podcast)
logger.info(f"Parsed OPML file and found {len(podcasts)} podcasts") logger.info(f"Parsed OPML file and found {len(podcasts)} podcasts")
return podcasts return podcasts
except Exception as e: except Exception as e:
@ -65,17 +65,17 @@ def generate_opml(podcasts):
# Create the root element # Create the root element
root = ET.Element('opml') root = ET.Element('opml')
root.set('version', '2.0') root.set('version', '2.0')
# Create the head element # Create the head element
head = ET.SubElement(root, 'head') head = ET.SubElement(root, 'head')
title = ET.SubElement(head, 'title') title = ET.SubElement(head, 'title')
title.text = 'Podcastrr Subscriptions' title.text = 'Podcastrr Subscriptions'
date_created = ET.SubElement(head, 'dateCreated') date_created = ET.SubElement(head, 'dateCreated')
date_created.text = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') date_created.text = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
# Create the body element # Create the body element
body = ET.SubElement(root, 'body') body = ET.SubElement(root, 'body')
# Add each podcast as an outline element # Add each podcast as an outline element
for podcast in podcasts: for podcast in podcasts:
outline = ET.SubElement(body, 'outline') outline = ET.SubElement(body, 'outline')
@ -85,24 +85,25 @@ def generate_opml(podcasts):
outline.set('xmlUrl', podcast.feed_url) outline.set('xmlUrl', podcast.feed_url)
if podcast.description: if podcast.description:
outline.set('description', podcast.description) outline.set('description', podcast.description)
# Convert to pretty-printed XML # Convert to pretty-printed XML
xml_str = ET.tostring(root, encoding='utf-8') xml_str = ET.tostring(root, encoding='utf-8')
parsed_xml = minidom.parseString(xml_str) parsed_xml = minidom.parseString(xml_str)
pretty_xml = parsed_xml.toprettyxml(indent=" ") pretty_xml = parsed_xml.toprettyxml(indent=" ")
logger.info(f"Generated OPML file with {len(podcasts)} podcasts") logger.info(f"Generated OPML file with {len(podcasts)} podcasts")
return pretty_xml return pretty_xml
except Exception as e: except Exception as e:
logger.error(f"Error generating OPML file: {str(e)}") logger.error(f"Error generating OPML file: {str(e)}")
return "" return ""
def import_podcasts_from_opml(opml_content): def import_podcasts_from_opml(opml_content, progress_callback=None):
""" """
Import podcasts from OPML content into the database. Import podcasts from OPML content into the database.
Args: Args:
opml_content (str): OPML file content. opml_content (str): OPML file content.
progress_callback (callable, optional): Function to call with progress updates.
Returns: Returns:
dict: Statistics about the import process. dict: Statistics about the import process.
@ -110,46 +111,59 @@ def import_podcasts_from_opml(opml_content):
from app.models.podcast import Podcast from app.models.podcast import Podcast
from app.models.database import db from app.models.database import db
from app.services.podcast_updater import update_podcast from app.services.podcast_updater import update_podcast
podcasts = parse_opml(opml_content) podcasts = parse_opml(opml_content)
stats = { stats = {
'total': len(podcasts), 'total': len(podcasts),
'imported': 0, 'imported': 0,
'skipped': 0, 'skipped': 0,
'errors': 0 'errors': 0
} }
for podcast_data in podcasts: # Initial progress update
if progress_callback:
progress_callback(0, f"Starting import of {len(podcasts)} podcasts")
for i, podcast_data in enumerate(podcasts):
try: try:
# Check if podcast already exists # Check if podcast already exists
existing = Podcast.query.filter_by(feed_url=podcast_data['feed_url']).first() existing = Podcast.query.filter_by(feed_url=podcast_data['feed_url']).first()
if existing: if existing:
logger.info(f"Podcast already exists: {podcast_data['title']}") logger.info(f"Podcast already exists: {podcast_data['title']}")
stats['skipped'] += 1 stats['skipped'] += 1
continue continue
# Create new podcast # Create new podcast
podcast = Podcast( podcast = Podcast(
title=podcast_data['title'], title=podcast_data['title'],
description=podcast_data.get('description', ''), description=podcast_data.get('description', ''),
feed_url=podcast_data['feed_url'] feed_url=podcast_data['feed_url']
) )
db.session.add(podcast) db.session.add(podcast)
db.session.commit() db.session.commit()
# Update podcast to fetch episodes # Update podcast to fetch episodes
try: try:
update_podcast(podcast.id) update_podcast(podcast.id)
except Exception as e: except Exception as e:
logger.error(f"Error updating podcast {podcast.title}: {str(e)}") logger.error(f"Error updating podcast {podcast.title}: {str(e)}")
stats['imported'] += 1 stats['imported'] += 1
logger.info(f"Imported podcast: {podcast.title}") logger.info(f"Imported podcast: {podcast.title}")
except Exception as e: except Exception as e:
stats['errors'] += 1 stats['errors'] += 1
logger.error(f"Error importing podcast: {str(e)}") logger.error(f"Error importing podcast: {str(e)}")
return stats # Update progress during the loop
if progress_callback and len(podcasts) > 0:
progress = int((i + 1) / len(podcasts) * 100)
progress_callback(progress, f"Processed {i + 1}/{len(podcasts)} podcasts")
# Final progress update
if progress_callback:
progress_callback(100, f"Import completed. Imported: {stats['imported']}, Skipped: {stats['skipped']}, Errors: {stats['errors']}")
return stats

View file

@ -88,12 +88,12 @@ def get_podcast_episodes(feed_url):
feed_url (str): URL of the podcast RSS feed. feed_url (str): URL of the podcast RSS feed.
Returns: Returns:
list: List of episode dictionaries. tuple: (list of episode dictionaries, podcast metadata dictionary)
""" """
try: try:
if not feed_url: if not feed_url:
logger.error("Empty feed URL provided") logger.error("Empty feed URL provided")
return [] return [], {}
logger.info(f"Fetching episodes from feed: {feed_url}") logger.info(f"Fetching episodes from feed: {feed_url}")
@ -130,6 +130,27 @@ def get_podcast_episodes(feed_url):
logger.info(f"Found {len(feed.entries)} entries in feed") logger.info(f"Found {len(feed.entries)} entries in feed")
# Extract podcast metadata
podcast_metadata = {
'title': feed.feed.get('title', ''),
'description': feed.feed.get('description', feed.feed.get('subtitle', '')),
'author': feed.feed.get('author', feed.feed.get('itunes_author', '')),
'image_url': None # Default to None, will try to extract below
}
# Try to get podcast image URL from various locations in the feed
if hasattr(feed.feed, 'image') and hasattr(feed.feed.image, 'href'):
podcast_metadata['image_url'] = feed.feed.image.href
logger.debug(f"Found podcast image in feed.image.href: {podcast_metadata['image_url']}")
elif hasattr(feed.feed, 'itunes_image') and hasattr(feed.feed.itunes_image, 'href'):
podcast_metadata['image_url'] = feed.feed.itunes_image.href
logger.debug(f"Found podcast image in feed.itunes_image.href: {podcast_metadata['image_url']}")
elif 'image' in feed.feed and 'href' in feed.feed.image:
podcast_metadata['image_url'] = feed.feed.image.href
logger.debug(f"Found podcast image in feed.image['href']: {podcast_metadata['image_url']}")
logger.info(f"Extracted podcast metadata: title='{podcast_metadata['title']}', image_url={podcast_metadata['image_url']}")
episodes = [] episodes = []
for entry in feed.entries: for entry in feed.entries:
# Log entry details for debugging # Log entry details for debugging
@ -344,38 +365,33 @@ def get_podcast_episodes(feed_url):
logger.warning(f"Invalid audio URL format: {episode['audio_url']}") logger.warning(f"Invalid audio URL format: {episode['audio_url']}")
continue continue
# Try to validate the URL without downloading the file # Skip validation for now - we'll validate when downloading
import requests # This prevents the import process from getting stuck on slow HEAD requests
head_response = requests.head(episode['audio_url'], timeout=5, allow_redirects=True) # The previous implementation made a HEAD request for each episode, which could
# cause timeouts or hanging connections with feeds containing many episodes
# Validation will happen when the episode is actually downloaded instead
logger.debug(f"Skipping audio URL validation for {episode['title']}")
episode['download_error'] = None
episode['status_code'] = 200 # Assume success
# Check if the URL is accessible # Add the episode regardless of status code
if head_response.status_code >= 400:
logger.warning(f"Audio URL returned status code {head_response.status_code}: {episode['audio_url']}")
continue
# Check if the content type is audio
content_type = head_response.headers.get('Content-Type', '')
if not content_type.startswith('audio/') and 'application/octet-stream' not in content_type:
logger.warning(f"Audio URL has non-audio content type: {content_type}")
# Don't skip here as some servers might not report the correct content type
# If we got here, the audio URL is probably valid
episodes.append(episode) episodes.append(episode)
logger.debug(f"Added episode with valid audio URL: {episode['title']}") logger.debug(f"Added episode: {episode['title']} (Status: {episode.get('status_code')})")
except Exception as e: except Exception as e:
# If we can't validate the URL, still add the episode but log a warning # If we can't validate the URL, still add the episode but log a warning
logger.warning(f"Could not validate audio URL: {str(e)}") logger.warning(f"Could not validate audio URL: {str(e)}")
episode['download_error'] = f"Could not validate URL: {str(e)}"
episodes.append(episode) episodes.append(episode)
logger.debug(f"Added episode with unvalidated audio URL: {episode['title']}") logger.debug(f"Added episode with unvalidated audio URL: {episode['title']}")
else: else:
logger.warning(f"Skipping episode without audio URL: {episode['title']}") logger.warning(f"Skipping episode without audio URL: {episode['title']}")
logger.info(f"Processed {len(episodes)} valid episodes") logger.info(f"Processed {len(episodes)} valid episodes")
return episodes return episodes, podcast_metadata
except Exception as e: except Exception as e:
logger.error(f"Error getting podcast episodes: {str(e)}") logger.error(f"Error getting podcast episodes: {str(e)}")
return [] return [], {}
def _parse_date(date_str): def _parse_date(date_str):
""" """

View file

@ -13,14 +13,18 @@ from app.services.podcast_downloader import download_episode
# Set up logging # Set up logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def update_all_podcasts(): def update_all_podcasts(progress_callback=None):
""" """
Update all podcasts in the database. Update all podcasts in the database.
Args:
progress_callback (callable, optional): Callback function for progress updates.
Returns: Returns:
dict: Statistics about the update process. dict: Statistics about the update process.
""" """
podcasts = Podcast.query.all() podcasts = Podcast.query.all()
total_podcasts = len(podcasts)
stats = { stats = {
'podcasts_updated': 0, 'podcasts_updated': 0,
@ -29,16 +33,32 @@ def update_all_podcasts():
'errors': 0 'errors': 0
} }
for podcast in podcasts: if progress_callback:
progress_callback(0, f"Starting update of {total_podcasts} podcasts")
for i, podcast in enumerate(podcasts):
try: try:
if progress_callback:
progress = int((i / total_podcasts) * 100)
progress_callback(progress, f"Updating podcast {i+1}/{total_podcasts}: {podcast.title}")
result = update_podcast(podcast.id) result = update_podcast(podcast.id)
stats['podcasts_updated'] += 1 stats['podcasts_updated'] += 1
stats['new_episodes'] += result['new_episodes'] stats['new_episodes'] += result['new_episodes']
stats['episodes_downloaded'] += result['episodes_downloaded'] stats['episodes_downloaded'] += result['episodes_downloaded']
if progress_callback:
progress_callback(progress, f"Updated podcast {i+1}/{total_podcasts}: {podcast.title} - Found {result['new_episodes']} new episodes")
except Exception as e: except Exception as e:
logger.error(f"Error updating podcast {podcast.title}: {str(e)}") logger.error(f"Error updating podcast {podcast.title}: {str(e)}")
stats['errors'] += 1 stats['errors'] += 1
if progress_callback:
progress_callback(progress, f"Error updating podcast {i+1}/{total_podcasts}: {podcast.title} - {str(e)}")
if progress_callback:
progress_callback(100, f"Update complete. Updated {stats['podcasts_updated']} podcasts, found {stats['new_episodes']} new episodes.")
return stats return stats
def update_podcast(podcast_id, progress_callback=None): def update_podcast(podcast_id, progress_callback=None):
@ -67,18 +87,56 @@ def update_podcast(podcast_id, progress_callback=None):
if progress_callback: if progress_callback:
progress_callback(10, f"Fetching episodes for {podcast.title}") progress_callback(10, f"Fetching episodes for {podcast.title}")
# Get episodes from feed # Get episodes and podcast metadata from feed
episodes = get_podcast_episodes(podcast.feed_url) episodes, podcast_metadata = get_podcast_episodes(podcast.feed_url)
# Update podcast last_checked timestamp # Update podcast last_checked timestamp
podcast.last_checked = datetime.utcnow() podcast.last_checked = datetime.utcnow()
# Update podcast metadata if available
updated = False
# Update image URL if available
if podcast_metadata.get('image_url'):
if podcast.image_url != podcast_metadata['image_url']:
logger.info(f"Updating podcast image URL from {podcast.image_url} to {podcast_metadata['image_url']}")
podcast.image_url = podcast_metadata['image_url']
updated = True
# Update author if available
if podcast_metadata.get('author'):
if podcast.author != podcast_metadata['author']:
logger.info(f"Updating podcast author from '{podcast.author}' to '{podcast_metadata['author']}'")
podcast.author = podcast_metadata['author']
updated = True
# Update description if available
if podcast_metadata.get('description'):
if podcast.description != podcast_metadata['description']:
logger.info(f"Updating podcast description")
podcast.description = podcast_metadata['description']
updated = True
# Commit changes if any updates were made
if updated:
db.session.commit()
if progress_callback: if progress_callback:
progress_callback(30, f"Found {len(episodes)} episodes") progress_callback(30, f"Found {len(episodes)} episodes")
if not episodes: if not episodes:
logger.warning(f"No episodes found for podcast: {podcast.title}") logger.warning(f"No episodes found for podcast: {podcast.title}")
stats['feed_status'] = 'no_episodes' stats['feed_status'] = 'no_episodes'
else:
# Check if all episodes have download errors
error_episodes = [ep for ep in episodes if ep.get('download_error')]
if len(error_episodes) == len(episodes):
logger.warning(f"All {len(episodes)} episodes have download errors for podcast: {podcast.title}")
stats['feed_status'] = 'all_episodes_have_errors'
# Store the most common error for reporting
if error_episodes:
stats['error_message'] = error_episodes[0].get('download_error', 'Unknown error')
stats['status_code'] = error_episodes[0].get('status_code')
# Check if we need to refresh the feed URL from iTunes # Check if we need to refresh the feed URL from iTunes
if podcast.external_id: if podcast.external_id:
@ -93,8 +151,36 @@ def update_podcast(podcast_id, progress_callback=None):
db.session.commit() db.session.commit()
# Try again with the new feed URL # Try again with the new feed URL
episodes = get_podcast_episodes(podcast.feed_url) episodes, updated_metadata = get_podcast_episodes(podcast.feed_url)
logger.info(f"Found {len(episodes)} episodes with updated feed URL") logger.info(f"Found {len(episodes)} episodes with updated feed URL")
# Update podcast metadata with the new feed
updated_from_new_feed = False
# Update image URL if available
if updated_metadata.get('image_url'):
if podcast.image_url != updated_metadata['image_url']:
logger.info(f"Updating podcast image URL from new feed: {updated_metadata['image_url']}")
podcast.image_url = updated_metadata['image_url']
updated_from_new_feed = True
# Update author if available
if updated_metadata.get('author'):
if podcast.author != updated_metadata['author']:
logger.info(f"Updating podcast author from new feed: '{updated_metadata['author']}'")
podcast.author = updated_metadata['author']
updated_from_new_feed = True
# Update description if available
if updated_metadata.get('description'):
if podcast.description != updated_metadata['description']:
logger.info(f"Updating podcast description from new feed")
podcast.description = updated_metadata['description']
updated_from_new_feed = True
# Commit changes if any updates were made
if updated_from_new_feed:
db.session.commit()
except Exception as e: except Exception as e:
logger.error(f"Error refreshing feed URL: {str(e)}") logger.error(f"Error refreshing feed URL: {str(e)}")
@ -132,7 +218,9 @@ def update_podcast(podcast_id, progress_callback=None):
episode_number=episode_data.get('episode_number'), episode_number=episode_data.get('episode_number'),
guid=episode_data['guid'], guid=episode_data['guid'],
downloaded=False, downloaded=False,
explicit=episode_data.get('explicit') # Explicit flag explicit=episode_data.get('explicit'), # Explicit flag
download_error=episode_data.get('download_error'), # Error message if download failed
status_code=episode_data.get('status_code') # HTTP status code
) )
db.session.add(episode) db.session.add(episode)

View file

@ -93,9 +93,34 @@ def add_podcast():
logger.info(f"Fetching episodes for podcast: {podcast.title} (ID: {podcast.id})") logger.info(f"Fetching episodes for podcast: {podcast.title} (ID: {podcast.id})")
logger.info(f"Feed URL: {podcast.feed_url}") logger.info(f"Feed URL: {podcast.feed_url}")
episodes_data = get_podcast_episodes(podcast.feed_url) episodes_data, podcast_metadata = get_podcast_episodes(podcast.feed_url)
logger.info(f"Found {len(episodes_data)} episodes in feed") logger.info(f"Found {len(episodes_data)} episodes in feed")
# Update podcast metadata if available from feed
updated = False
# Update image URL if available
if podcast_metadata.get('image_url') and not podcast.image_url:
logger.info(f"Updating podcast image URL from feed: {podcast_metadata['image_url']}")
podcast.image_url = podcast_metadata['image_url']
updated = True
# Update author if available
if podcast_metadata.get('author') and (not podcast.author or podcast.author == "Unknown Author"):
logger.info(f"Updating podcast author from feed: '{podcast_metadata['author']}'")
podcast.author = podcast_metadata['author']
updated = True
# Update description if available
if podcast_metadata.get('description') and not podcast.description:
logger.info(f"Updating podcast description from feed")
podcast.description = podcast_metadata['description']
updated = True
# Commit changes if any updates were made
if updated:
db.session.commit()
episodes_added = 0 episodes_added = 0
for episode_data in episodes_data: for episode_data in episodes_data:
# Check if episode has required fields # Check if episode has required fields
@ -236,3 +261,27 @@ def delete_episode(episode_id):
return jsonify({'error': str(e)}), 500 return jsonify({'error': str(e)}), 500
else: else:
return jsonify({'error': 'Episode file not found'}), 404 return jsonify({'error': 'Episode file not found'}), 404
# Update all podcasts API
@api_bp.route('/podcasts/update-all', methods=['POST'])
def update_all_podcasts_api():
"""
Update all podcasts to fetch new episodes.
"""
from app.services.podcast_updater import update_all_podcasts
from app.services.task_manager import task_manager
try:
# Create a background task for updating all podcasts
task_id = task_manager.create_task(
'update_all',
"Updating all podcasts",
update_all_podcasts
)
return jsonify({
'message': 'Update of all podcasts started in the background. Check the status in the tasks panel.',
'task_id': task_id
})
except Exception as e:
return jsonify({'error': str(e)}), 500

287
app/web/routes/calendar.py Normal file
View file

@ -0,0 +1,287 @@
"""
Calendar routes for the Podcastrr application.
"""
from flask import Blueprint, render_template, request, jsonify, current_app, url_for
from app.models.podcast import Podcast, Episode
from app.models.settings import Settings
from app.models.database import db
from datetime import datetime, timedelta
import calendar
import json
calendar_bp = Blueprint('calendar', __name__)
@calendar_bp.route('/')
def index():
"""
Display the calendar view.
"""
# Get current settings
settings = Settings.query.first()
# If no settings exist, create default settings
if not settings:
settings = Settings(
download_path=current_app.config['DOWNLOAD_PATH'],
naming_format="{podcast_title}/{episode_title}",
auto_download=False,
max_downloads=5,
delete_after_days=30,
calendar_first_day="Monday",
calendar_show_monitored_only=False
)
db.session.add(settings)
db.session.commit()
# Get view type (month, week, day, agenda)
view_type = request.args.get('view', 'month')
# Get current date or use the one from the query parameters
today = datetime.today()
year = int(request.args.get('year', today.year))
month = int(request.args.get('month', today.month))
day = int(request.args.get('day', today.day))
# Create a date object for the selected date
selected_date = datetime(year, month, day)
# Get the first day of the month
first_day = datetime(year, month, 1)
# Get the last day of the month
if month == 12:
last_day = datetime(year + 1, 1, 1) - timedelta(days=1)
else:
last_day = datetime(year, month + 1, 1) - timedelta(days=1)
# Get all days in the month
days_in_month = []
current_day = first_day
while current_day <= last_day:
days_in_month.append(current_day)
current_day += timedelta(days=1)
# For week view, get the start and end of the week
if view_type == 'week':
# Determine the first day of the week based on settings
first_day_of_week = 0 if settings.calendar_first_day == 'Sunday' else 1
# Calculate the start of the week
start_of_week = selected_date - timedelta(days=(selected_date.weekday() - first_day_of_week) % 7)
# Calculate the end of the week
end_of_week = start_of_week + timedelta(days=6)
elif view_type == 'day':
# For day view, just use the selected date
start_of_week = selected_date
end_of_week = selected_date
else:
# For month view, use the first and last day of the month
start_of_week = first_day
end_of_week = last_day
# Get episodes for the selected view (month, week, or day)
query = Episode.query.filter(
Episode.published_date >= start_of_week,
Episode.published_date <= end_of_week
).order_by(Episode.published_date)
# Apply filter for monitored podcasts only if setting is enabled
if settings.calendar_show_monitored_only:
query = query.join(Podcast).filter(Podcast.auto_download == True)
episodes = query.all()
# Group episodes by day
episodes_by_day = {}
# Determine which days to include based on the view type
if view_type == 'week':
# For week view, include all days of the week
days_to_include = []
current_day = start_of_week
while current_day <= end_of_week:
days_to_include.append(current_day)
current_day += timedelta(days=1)
elif view_type == 'day':
# For day view, just include the selected date
days_to_include = [selected_date]
else:
# For month view, include all days in the month
days_to_include = days_in_month
# Initialize the episodes_by_day dictionary
for day in days_to_include:
day_str = day.strftime('%Y-%m-%d')
episodes_by_day[day_str] = []
for episode in episodes:
day_str = episode.published_date.strftime('%Y-%m-%d')
if day_str in episodes_by_day:
# Get podcast info
podcast = Podcast.query.get(episode.podcast_id)
# Determine status color
status_class = 'status-unmonitored'
if podcast and podcast.auto_download:
if episode.downloaded:
status_class = 'status-downloaded'
elif episode.download_error:
status_class = 'status-error'
else:
status_class = 'status-downloading'
# Format air time
air_time = episode.published_date.strftime('%I:%M%p').lower()
# Calculate end time (using duration if available, otherwise add 30 minutes)
if episode.duration:
end_time = (episode.published_date + timedelta(seconds=episode.duration)).strftime('%I:%M%p').lower()
else:
end_time = (episode.published_date + timedelta(minutes=30)).strftime('%I:%M%p').lower()
# Format episode info
episode_info = {
'id': episode.id,
'podcast_id': episode.podcast_id,
'podcast_title': podcast.title if podcast else 'Unknown',
'title': episode.title,
'season': episode.season,
'episode_number': episode.episode_number,
'air_time': f"{air_time} - {end_time}",
'status_class': status_class,
'url': url_for('podcasts.view', podcast_id=episode.podcast_id)
}
episodes_by_day[day_str].append(episode_info)
return render_template('calendar/index.html',
title='Calendar',
settings=settings,
view_type=view_type,
selected_date=selected_date,
days_in_month=days_in_month,
episodes_by_day=episodes_by_day,
start_of_week=start_of_week,
end_of_week=end_of_week,
days_to_include=days_to_include,
today=today,
first_day=first_day)
@calendar_bp.route('/events')
def events():
"""
Get events for the calendar as JSON.
"""
# Get current settings
settings = Settings.query.first()
# Get date range from query parameters
start_date_str = request.args.get('start', '')
end_date_str = request.args.get('end', '')
try:
start_date = datetime.fromisoformat(start_date_str.replace('Z', '+00:00'))
end_date = datetime.fromisoformat(end_date_str.replace('Z', '+00:00'))
except ValueError:
# If dates are invalid, use current month
today = datetime.today()
start_date = datetime(today.year, today.month, 1)
end_date = datetime(today.year, today.month + 1 if today.month < 12 else 1, 1) - timedelta(days=1)
# Query episodes within the date range
query = Episode.query.filter(
Episode.published_date >= start_date,
Episode.published_date <= end_date
)
# Apply filter for monitored podcasts only if setting is enabled
if settings.calendar_show_monitored_only:
query = query.join(Podcast).filter(Podcast.auto_download == True)
episodes = query.all()
# Convert episodes to calendar events
events = []
for episode in episodes:
# Determine color based on status
color = '#999999' # Default gray for unmonitored
# Check if podcast is monitored
podcast = Podcast.query.get(episode.podcast_id)
if podcast and podcast.auto_download:
if episode.downloaded:
color = '#28a745' # Green for downloaded
elif episode.download_error:
color = '#dc3545' # Red for error/missing
else:
color = '#6f42c1' # Purple for downloading/pending
events.append({
'id': episode.id,
'title': episode.title,
'start': episode.published_date.isoformat(),
'url': url_for('podcasts.view', podcast_id=episode.podcast_id),
'color': color,
'description': f"Podcast: {podcast.title if podcast else 'Unknown'}"
})
return jsonify(events)
@calendar_bp.route('/ical')
def ical():
"""
Generate iCal feed for podcast episodes.
"""
# Get current settings
settings = Settings.query.first()
# Query episodes (limit to recent and upcoming)
start_date = datetime.today() - timedelta(days=30) # Past 30 days
end_date = datetime.today() + timedelta(days=30) # Next 30 days
query = Episode.query.filter(
Episode.published_date >= start_date,
Episode.published_date <= end_date
)
# Apply filter for monitored podcasts only if setting is enabled
if settings.calendar_show_monitored_only:
query = query.join(Podcast).filter(Podcast.auto_download == True)
episodes = query.all()
# Generate iCal content
ical_content = [
"BEGIN:VCALENDAR",
"VERSION:2.0",
"PRODID:-//Podcastrr//EN",
"CALSCALE:GREGORIAN",
"METHOD:PUBLISH",
f"X-WR-CALNAME:Podcastrr Episodes",
f"X-WR-CALDESC:Podcast episodes from Podcastrr"
]
for episode in episodes:
podcast = Podcast.query.get(episode.podcast_id)
pub_date = episode.published_date
# Format date for iCal
dt_stamp = datetime.now().strftime("%Y%m%dT%H%M%SZ")
dt_start = pub_date.strftime("%Y%m%dT%H%M%SZ")
ical_content.extend([
"BEGIN:VEVENT",
f"UID:{episode.id}@podcastrr",
f"DTSTAMP:{dt_stamp}",
f"DTSTART:{dt_start}",
f"SUMMARY:{episode.title}",
f"DESCRIPTION:{podcast.title if podcast else 'Unknown podcast'}: {episode.description[:100] if episode.description else ''}...",
"END:VEVENT"
])
ical_content.append("END:VCALENDAR")
response = "\r\n".join(ical_content)
return response, 200, {
'Content-Type': 'text/calendar; charset=utf-8',
'Content-Disposition': 'attachment; filename=podcastrr.ics'
}

View file

@ -1,7 +1,7 @@
""" """
Main routes for the Podcastrr application. Main routes for the Podcastrr application.
""" """
from flask import Blueprint, render_template, current_app from flask import Blueprint, render_template, current_app, redirect, url_for, request
from app.models.podcast import Podcast from app.models.podcast import Podcast
main_bp = Blueprint('main', __name__) main_bp = Blueprint('main', __name__)
@ -9,14 +9,23 @@ main_bp = Blueprint('main', __name__)
@main_bp.route('/') @main_bp.route('/')
def index(): def index():
""" """
Render the home page. Redirect to the podcasts page or handle specific views.
""" """
# Get recent podcasts view = request.args.get('view')
recent_podcasts = Podcast.query.order_by(Podcast.last_updated.desc()).limit(5).all()
return render_template('index.html', if view in ['history', 'wanted', 'status']:
title='Home', # Get recent podcasts for the template
recent_podcasts=recent_podcasts) recent_podcasts = Podcast.query.order_by(Podcast.last_updated.desc()).limit(5).all()
# For now, just render the index template with the view parameter
# In the future, these could be implemented as separate views
return render_template('index.html',
title=view.capitalize(),
view=view,
recent_podcasts=recent_podcasts)
# Default: redirect to podcasts page
return redirect(url_for('podcasts.index'))
@main_bp.route('/about') @main_bp.route('/about')
def about(): def about():

View file

@ -90,6 +90,12 @@ def add(podcast_id):
if stats and stats.get('new_episodes', 0) > 0: if stats and stats.get('new_episodes', 0) > 0:
flash(f'Podcast added successfully! Found {stats["new_episodes"]} episodes.', 'success') flash(f'Podcast added successfully! Found {stats["new_episodes"]} episodes.', 'success')
elif stats and stats.get('feed_status') == 'all_episodes_have_errors':
error_msg = stats.get('error_message', 'Unknown error')
status_code = stats.get('status_code', '')
status_info = f" (Status code: {status_code})" if status_code else ""
flash(f'Podcast added successfully! Found episodes but all have download issues: {error_msg}{status_info}. You can try updating later.', 'warning')
logger.warning(f"All episodes have download errors for podcast: {podcast.title}")
else: else:
flash('Podcast added successfully! No episodes found yet. The feed might be empty or inaccessible.', 'info') flash('Podcast added successfully! No episodes found yet. The feed might be empty or inaccessible.', 'info')
logger.warning(f"No episodes found for podcast: {podcast.title}") logger.warning(f"No episodes found for podcast: {podcast.title}")
@ -399,8 +405,8 @@ def add_by_url():
return redirect(url_for('podcasts.view', podcast_id=existing.id)) return redirect(url_for('podcasts.view', podcast_id=existing.id))
try: try:
# Try to get podcast episodes to validate the feed # Try to get podcast episodes and metadata to validate the feed
episodes = get_podcast_episodes(feed_url) episodes, podcast_metadata = get_podcast_episodes(feed_url)
if not episodes: if not episodes:
flash('No episodes found in the feed. Please check the URL and try again.', 'error') flash('No episodes found in the feed. Please check the URL and try again.', 'error')
@ -409,9 +415,12 @@ def add_by_url():
# Get the first episode to extract podcast info # Get the first episode to extract podcast info
first_episode = episodes[0] first_episode = episodes[0]
# Create podcast record with basic info # Create podcast record with info from feed
podcast = Podcast( podcast = Podcast(
title=first_episode.get('podcast_title', 'Unknown Podcast'), title=podcast_metadata.get('title', first_episode.get('podcast_title', 'Unknown Podcast')),
description=podcast_metadata.get('description', ''),
author=podcast_metadata.get('author', ''),
image_url=podcast_metadata.get('image_url', ''),
feed_url=feed_url feed_url=feed_url
) )

View file

@ -35,6 +35,8 @@ def index():
auto_download = 'auto_download' in request.form auto_download = 'auto_download' in request.form
max_downloads = int(request.form.get('max_downloads', 5)) max_downloads = int(request.form.get('max_downloads', 5))
delete_after_days = int(request.form.get('delete_after_days', 30)) delete_after_days = int(request.form.get('delete_after_days', 30))
calendar_first_day = request.form.get('calendar_first_day', 'Monday')
calendar_show_monitored_only = 'calendar_show_monitored_only' in request.form
# Validate download path # Validate download path
if not os.path.exists(download_path): if not os.path.exists(download_path):
@ -52,6 +54,8 @@ def index():
settings.auto_download = auto_download settings.auto_download = auto_download
settings.max_downloads = max_downloads settings.max_downloads = max_downloads
settings.delete_after_days = delete_after_days settings.delete_after_days = delete_after_days
settings.calendar_first_day = calendar_first_day
settings.calendar_show_monitored_only = calendar_show_monitored_only
db.session.commit() db.session.commit()

View file

@ -43,12 +43,14 @@ def create_app(config=None):
from app.web.routes.settings import settings_bp from app.web.routes.settings import settings_bp
from app.web.routes.api import api_bp from app.web.routes.api import api_bp
from app.web.routes.tasks import tasks_bp from app.web.routes.tasks import tasks_bp
from app.web.routes.calendar import calendar_bp
app.register_blueprint(main_bp) app.register_blueprint(main_bp)
app.register_blueprint(podcasts_bp, url_prefix='/podcasts') app.register_blueprint(podcasts_bp, url_prefix='/podcasts')
app.register_blueprint(settings_bp, url_prefix='/settings') app.register_blueprint(settings_bp, url_prefix='/settings')
app.register_blueprint(api_bp, url_prefix='/api') app.register_blueprint(api_bp, url_prefix='/api')
app.register_blueprint(tasks_bp) app.register_blueprint(tasks_bp)
app.register_blueprint(calendar_bp, url_prefix='/calendar')
# Ensure the download directory exists # Ensure the download directory exists
os.makedirs(app.config['DOWNLOAD_PATH'], exist_ok=True) os.makedirs(app.config['DOWNLOAD_PATH'], exist_ok=True)

26
check_db.py Normal file
View file

@ -0,0 +1,26 @@
"""
Script to check the structure of the settings table in the database.
"""
import sqlite3
import os
# Get the database path
db_path = os.path.join(os.path.dirname(__file__), 'instance', 'podcastrr.db')
print(f"Database path: {db_path}")
# Connect to the database
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Check the structure of the settings table
print("Checking settings table structure...")
cursor.execute("PRAGMA table_info(settings)")
columns = cursor.fetchall()
# Print the columns
print("Settings table columns:")
for column in columns:
print(f" {column[1]} ({column[2]})")
# Close the connection
conn.close()

19
docker-compose.yml Normal file
View file

@ -0,0 +1,19 @@
version: '3.8'
services:
app:
build:
context: .
dockerfile: Dockerfile
ports:
- "5000:5000"
volumes:
- ./downloads:/app/downloads
- ./instance:/app/instance
environment:
- FLASK_ENV=production
- SECRET_KEY=change_this_to_a_secure_random_string
- DATABASE_URI=sqlite:///instance/podcastrr.db
- DOWNLOAD_PATH=/app/downloads
- LOG_LEVEL=INFO
restart: unless-stopped

17
docker-entrypoint.sh Normal file
View file

@ -0,0 +1,17 @@
#!/bin/bash
set -e
# Create necessary directories
mkdir -p instance downloads
# Check if the database needs to be initialized
if [ ! -f instance/podcastrr.db ]; then
echo "Initializing database..."
python init_db.py
else
echo "Database already exists, checking for migrations..."
python run_migrations.py
fi
# Start the application
exec python main.py

View file

@ -0,0 +1,37 @@
"""
Migration script to add calendar-related fields to the settings table.
"""
import sqlite3
from flask import current_app
def run_migration():
"""
Run the migration to add calendar-related fields to the settings table.
"""
# Get the database path from the app config
db_path = current_app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', '')
# Connect to the database
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Check if the columns already exist in the settings table
cursor.execute("PRAGMA table_info(settings)")
columns = [column[1] for column in cursor.fetchall()]
# Add the calendar_first_day column if it doesn't exist
if 'calendar_first_day' not in columns:
print("Adding 'calendar_first_day' column to settings table...")
cursor.execute("ALTER TABLE settings ADD COLUMN calendar_first_day TEXT DEFAULT 'Monday'")
# Add the calendar_show_monitored_only column if it doesn't exist
if 'calendar_show_monitored_only' not in columns:
print("Adding 'calendar_show_monitored_only' column to settings table...")
cursor.execute("ALTER TABLE settings ADD COLUMN calendar_show_monitored_only BOOLEAN DEFAULT 0")
# Commit the changes and close the connection
conn.commit()
conn.close()
print("Calendar settings migration completed successfully!")
return True

View file

@ -0,0 +1,37 @@
"""
Migration script to add download_error and status_code fields to the Episode model.
"""
import sqlite3
from flask import current_app
def run_migration():
"""
Run the migration to add the new fields to the database.
"""
# Get the database path from the app config
db_path = current_app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', '')
# Connect to the database
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Check if the columns already exist in the episodes table
cursor.execute("PRAGMA table_info(episodes)")
columns = [column[1] for column in cursor.fetchall()]
# Add the download_error column if it doesn't exist
if 'download_error' not in columns:
print("Adding 'download_error' column to episodes table...")
cursor.execute("ALTER TABLE episodes ADD COLUMN download_error TEXT")
# Add the status_code column if it doesn't exist
if 'status_code' not in columns:
print("Adding 'status_code' column to episodes table...")
cursor.execute("ALTER TABLE episodes ADD COLUMN status_code INTEGER")
# Commit the changes and close the connection
conn.commit()
conn.close()
print("Migration completed successfully!")
return True

View file

@ -84,6 +84,38 @@ html, body {
border-left-color: #58a6ff; border-left-color: #58a6ff;
} }
/* Section Headers in Sidebar */
.sidebar-nav .section-header {
padding: 12px 16px 4px;
margin-top: 8px;
}
.sidebar-nav .section-header span {
font-size: 11px;
font-weight: 600;
color: #7d8590;
text-transform: uppercase;
letter-spacing: 0.5px;
}
/* Sub-items in Sidebar */
.sidebar-nav .sub-item a {
padding-left: 24px;
font-size: 12px;
color: #c9d1d9;
}
.sidebar-nav .sub-item a:hover {
background-color: #21262d;
color: #f0f6fc;
}
.sidebar-nav .sub-item a.active {
background-color: #1f6feb;
color: #ffffff;
border-left-color: #58a6ff;
}
/* Main Content Area */ /* Main Content Area */
.main-area { .main-area {
flex: 1; flex: 1;

View file

@ -16,37 +16,70 @@
</div> </div>
<div class="sidebar-nav"> <div class="sidebar-nav">
<ul> <ul>
<li> <!-- Podcasts Section -->
<a href="{{ url_for('main.index') }}"
class="{% if request.endpoint == 'main.index' %}active{% endif %}">
Home
</a>
</li>
<li> <li>
<a href="{{ url_for('podcasts.index') }}" <a href="{{ url_for('podcasts.index') }}"
class="{% if request.endpoint in ['podcasts.index', 'podcasts.view'] %}active{% endif %}"> class="{% if request.endpoint in ['podcasts.index', 'podcasts.view'] %}active{% endif %}">
Podcasts Podcasts
</a> </a>
</li> </li>
<li> <!-- Add New option appears only when on Podcasts page -->
{% if request.endpoint in ['podcasts.index', 'podcasts.view', 'podcasts.search'] %}
<li class="sub-item">
<a href="{{ url_for('podcasts.search') }}" <a href="{{ url_for('podcasts.search') }}"
class="{% if request.endpoint == 'podcasts.search' %}active{% endif %}"> class="{% if request.endpoint == 'podcasts.search' %}active{% endif %}">
Add New Add New
</a> </a>
</li> </li>
{% endif %}
<!-- Calendar Section -->
<li> <li>
<a href="{{ url_for('main.dashboard') }}" <a href="{{ url_for('calendar.index') }}"
class="{% if request.endpoint == 'main.dashboard' %}active{% endif %}"> class="{% if request.endpoint == 'calendar.index' %}active{% endif %}">
Dashboard Calendar
</a> </a>
</li> </li>
<li>
<!-- Activity Section -->
<li class="section-header">
<span>Activity</span>
</li>
<li class="sub-item">
<a href="{{ url_for('main.index') }}?view=history"
class="{% if request.endpoint == 'main.index' and request.args.get('view') == 'history' %}active{% endif %}">
History
</a>
</li>
<li class="sub-item">
<a href="{{ url_for('main.index') }}?view=wanted"
class="{% if request.endpoint == 'main.index' and request.args.get('view') == 'wanted' %}active{% endif %}">
Wanted
</a>
</li>
<!-- Settings Section -->
<li class="section-header">
<span>Settings</span>
</li>
<li class="sub-item">
<a href="{{ url_for('settings.index') }}" <a href="{{ url_for('settings.index') }}"
class="{% if request.endpoint == 'settings.index' %}active{% endif %}"> class="{% if request.endpoint == 'settings.index' %}active{% endif %}">
Settings Media Management
</a> </a>
</li> </li>
<li>
<!-- System Section -->
<li class="section-header">
<span>System</span>
</li>
<li class="sub-item">
<a href="{{ url_for('main.index') }}?view=status"
class="{% if request.endpoint == 'main.index' and request.args.get('view') == 'status' %}active{% endif %}">
Status
</a>
</li>
<li class="sub-item">
<a href="{{ url_for('tasks.view_tasks') }}" <a href="{{ url_for('tasks.view_tasks') }}"
class="{% if request.endpoint == 'tasks.view_tasks' %}active{% endif %}"> class="{% if request.endpoint == 'tasks.view_tasks' %}active{% endif %}">
Tasks Tasks

File diff suppressed because it is too large Load diff

View file

@ -100,8 +100,27 @@
<script> <script>
function updateAllPodcasts() { function updateAllPodcasts() {
// This would trigger an update for all podcasts // Call the API to update all podcasts
alert('Update All functionality would be implemented here'); fetch('/api/podcasts/update-all', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
})
.then(response => {
if (!response.ok) {
throw new Error('Network response was not ok');
}
return response.json();
})
.then(data => {
// Show success message
alert(data.message);
})
.catch(error => {
// Show error message
alert('Error updating podcasts: ' + error.message);
});
} }
function refreshPage() { function refreshPage() {

View file

@ -188,6 +188,8 @@
<td class="cell-center"> <td class="cell-center">
{% if episode.downloaded %} {% if episode.downloaded %}
<span class="status-badge status-active">Downloaded</span> <span class="status-badge status-active">Downloaded</span>
{% elif episode.download_error %}
<span class="status-badge status-error" title="{{ episode.download_error }}">Error {{ episode.status_code }}</span>
{% else %} {% else %}
<span class="status-badge status-pending">Available</span> <span class="status-badge status-pending">Available</span>
{% endif %} {% endif %}
@ -203,6 +205,9 @@
{% if episode.audio_url %} {% if episode.audio_url %}
<a href="{{ episode.audio_url }}" target="_blank" class="btn btn-sm btn-secondary" style="margin-left: 4px;">Stream</a> <a href="{{ episode.audio_url }}" target="_blank" class="btn btn-sm btn-secondary" style="margin-left: 4px;">Stream</a>
{% endif %} {% endif %}
{% if episode.download_error %}
<span class="tooltip-icon" title="{{ episode.download_error }}"></span>
{% endif %}
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
@ -259,6 +264,8 @@
<td class="cell-center"> <td class="cell-center">
{% if episode.downloaded %} {% if episode.downloaded %}
<span class="status-badge status-active">Downloaded</span> <span class="status-badge status-active">Downloaded</span>
{% elif episode.download_error %}
<span class="status-badge status-error" title="{{ episode.download_error }}">Error {{ episode.status_code }}</span>
{% else %} {% else %}
<span class="status-badge status-pending">Available</span> <span class="status-badge status-pending">Available</span>
{% endif %} {% endif %}
@ -274,6 +281,9 @@
{% if episode.audio_url %} {% if episode.audio_url %}
<a href="{{ episode.audio_url }}" target="_blank" class="btn btn-sm btn-secondary" style="margin-left: 4px;">Stream</a> <a href="{{ episode.audio_url }}" target="_blank" class="btn btn-sm btn-secondary" style="margin-left: 4px;">Stream</a>
{% endif %} {% endif %}
{% if episode.download_error %}
<span class="tooltip-icon" title="{{ episode.download_error }}"></span>
{% endif %}
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
@ -293,6 +303,26 @@
{% endblock %} {% endblock %}
{% block scripts %} {% block scripts %}
<style>
.status-error {
background-color: #f85149;
color: white;
}
.tooltip-icon {
display: inline-block;
width: 16px;
height: 16px;
border-radius: 50%;
background-color: #58a6ff;
color: white;
text-align: center;
line-height: 16px;
font-size: 12px;
margin-left: 4px;
cursor: help;
}
</style>
<script> <script>
function toggleSeason(seasonId) { function toggleSeason(seasonId) {
// Find the clicked header element // Find the clicked header element

View file

@ -38,6 +38,21 @@
<small>Set to 0 to never delete</small> <small>Set to 0 to never delete</small>
</div> </div>
<h3>Calendar Settings</h3>
<div class="form-group">
<label for="calendar_first_day">First Day of Week</label>
<select name="calendar_first_day" id="calendar_first_day">
<option value="Monday">Monday</option>
<option value="Sunday">Sunday</option>
</select>
</div>
<div class="form-group checkbox">
<input type="checkbox" name="calendar_show_monitored_only" id="calendar_show_monitored_only" {% if settings.calendar_show_monitored_only %}checked{% endif %}>
<label for="calendar_show_monitored_only">Show monitored podcasts only</label>
</div>
<div class="form-actions"> <div class="form-actions">
<button type="submit" class="btn">Save Settings</button> <button type="submit" class="btn">Save Settings</button>
</div> </div>