Erster Wurf
This commit is contained in:
@@ -1,66 +1,83 @@
|
||||
import pandas as pd
|
||||
import requests
|
||||
import markdown
|
||||
from io import StringIO
|
||||
from datetime import datetime, timedelta
|
||||
from .config_loader import config
|
||||
|
||||
_cache = {"events": None, "remarks": None}
|
||||
_cache = {"events": None, "remarks": None, "timestamp": None}
|
||||
HEADERS = {"User-Agent": "Mozilla/5.0"}
|
||||
|
||||
def invalidate_cache():
|
||||
global _cache
|
||||
_cache = {"events": None, "remarks": None}
|
||||
print("DEBUG: Cache wurde manuell geleert.")
|
||||
_cache = {"events": None, "remarks": None, "timestamp": None}
|
||||
return "Cache gelöscht"
|
||||
|
||||
def get_upcoming_events():
|
||||
if _cache["events"] is not None:
|
||||
return _cache["events"]
|
||||
|
||||
url = config['links']['times_csv']
|
||||
print(f"DEBUG: Lade Zeiten von URL: {url}")
|
||||
|
||||
response = requests.get(url, headers=HEADERS)
|
||||
df = pd.read_csv(StringIO(response.text))
|
||||
|
||||
# Debug: Spaltennamen prüfen
|
||||
print(f"DEBUG: Spalten in Zeiten-Sheet gefunden: {df.columns.tolist()}")
|
||||
def _is_cache_valid():
|
||||
if _cache["timestamp"] is None:
|
||||
return False
|
||||
return (datetime.now() - _cache["timestamp"]) < timedelta(hours=1)
|
||||
|
||||
def get_upcoming_events(days_to_show=None, limit=None):
|
||||
# Sofort Standardwert aus Config setzen, falls None oder 0
|
||||
if not days_to_show:
|
||||
days_to_show = config['processing']['days_to_show']
|
||||
|
||||
# 1. Daten laden (entweder aus Cache oder von Google)
|
||||
if not _is_cache_valid() or _cache["events"] is None:
|
||||
url = config['links']['times_csv']
|
||||
response = requests.get(url, headers=HEADERS)
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
|
||||
df = pd.read_csv(StringIO(response.text))
|
||||
|
||||
# Typografie: Bindestrich durch En-Dash (–) ersetzen
|
||||
for col in ['Morgen', 'Nachmittag']:
|
||||
df[col] = df[col].fillna('').astype(str).str.replace('-', '–', regex=False)
|
||||
|
||||
date_col = config['google_sheet']['date_column']
|
||||
df = df.dropna(subset=[date_col])
|
||||
df[date_col] = pd.to_datetime(df[date_col], dayfirst=True, errors='coerce')
|
||||
|
||||
wt_map = {0: 'Mo', 1: 'Di', 2: 'Mi', 3: 'Do', 4: 'Fr', 5: 'Sa', 6: 'So'}
|
||||
df['Wochentag'] = df[date_col].dt.weekday.map(wt_map)
|
||||
|
||||
_cache["events"] = df.sort_values(by=date_col).to_dict(orient='records')
|
||||
_cache["timestamp"] = datetime.now()
|
||||
|
||||
date_col = config['google_sheet']['date_column']
|
||||
df = df.dropna(subset=[date_col])
|
||||
df[date_col] = pd.to_datetime(df[date_col], dayfirst=True, errors='coerce')
|
||||
df = df.dropna(subset=[date_col]).fillna('')
|
||||
|
||||
heute = pd.Timestamp(datetime.now().date())
|
||||
ende = heute + timedelta(days=config['processing']['days_to_show'])
|
||||
date_col = config['google_sheet']['date_column']
|
||||
|
||||
# PRIORITÄT 1: Zeilen-Limit (gewinnt immer)
|
||||
if limit and limit > 0:
|
||||
return [e for e in _cache["events"] if e[date_col] >= heute][:limit]
|
||||
|
||||
mask = (df[date_col] >= heute) & (df[date_col] <= ende)
|
||||
gefilterte = df.loc[mask].sort_values(by=date_col).copy()
|
||||
|
||||
wt_map = {0: 'Mo', 1: 'Di', 2: 'Mi', 3: 'Do', 4: 'Fr', 5: 'Sa', 6: 'So'}
|
||||
gefilterte['Wochentag'] = gefilterte[date_col].dt.weekday.map(wt_map)
|
||||
|
||||
_cache["events"] = gefilterte.to_dict(orient='records')
|
||||
return _cache["events"]
|
||||
# PRIORITÄT 2: Tage-Logik
|
||||
ende = heute + timedelta(days=int(days_to_show))
|
||||
return [e for e in _cache["events"] if heute <= e[date_col] <= ende]
|
||||
|
||||
def get_remarks():
|
||||
if _cache["remarks"] is not None:
|
||||
if _is_cache_valid() and _cache["remarks"] is not None:
|
||||
return _cache["remarks"]
|
||||
|
||||
url = config['links']['remarks_csv']
|
||||
print(f"DEBUG: Lade Bemerkungen von URL: {url}")
|
||||
|
||||
response = requests.get(url, headers=HEADERS)
|
||||
|
||||
# Debug: Die ersten 100 Zeichen der Antwort sehen
|
||||
print(f"DEBUG: Raw Data Bemerkungen (Anfang): {response.text[:100]}...")
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
|
||||
df = pd.read_csv(StringIO(response.text), skiprows=2, header=None)
|
||||
|
||||
if not df.empty:
|
||||
print(f"DEBUG: Bemerkungen-DF Head:\n{df.head(3)}")
|
||||
remarks = df[0].dropna().astype(str).tolist()
|
||||
_cache["remarks"] = [r.strip() for r in remarks if r.strip()]
|
||||
raw_remarks = df[0].dropna().astype(str).tolist()
|
||||
processed = []
|
||||
for r in raw_remarks:
|
||||
html = markdown.markdown(r.strip())
|
||||
if html.startswith("<p>") and html.endswith("</p>"):
|
||||
html = html[3:-4]
|
||||
processed.append(html)
|
||||
_cache["remarks"] = processed
|
||||
_cache["timestamp"] = datetime.now()
|
||||
else:
|
||||
_cache["remarks"] = []
|
||||
|
||||
|
||||
Reference in New Issue
Block a user