This commit is contained in:
2026-02-21 11:45:58 +01:00
commit 70ba4a0279
10 changed files with 270 additions and 0 deletions

23
core/config_loader.py Normal file
View File

@@ -0,0 +1,23 @@
import yaml
from pathlib import Path
CONFIG_PATH = Path(__file__).parent.parent / "config.yaml"
def load_config():
with open(CONFIG_PATH, "r", encoding="utf-8") as f:
cfg = yaml.safe_load(f)
s_id = cfg['google_sheet']['sheet_id']
gid_t = cfg['google_sheet']['gid_times']
gid_r = cfg['google_sheet']['gid_remarks']
# Wir nutzen exakt das Format, das bei deinem ersten Test funktionierte
cfg['links'] = {
'times_csv': f"https://docs.google.com/spreadsheets/d/{s_id}/export?format=csv&gid={gid_t}",
'remarks_csv': f"https://docs.google.com/spreadsheets/d/{s_id}/export?format=csv&gid={gid_r}",
'times_edit': f"https://docs.google.com/spreadsheets/d/{s_id}/edit#gid={gid_t}",
'remarks_edit': f"https://docs.google.com/spreadsheets/d/{s_id}/edit#gid={gid_r}"
}
return cfg
config = load_config()

67
core/data_processor.py Normal file
View File

@@ -0,0 +1,67 @@
import pandas as pd
import requests
from io import StringIO
from datetime import datetime, timedelta
from .config_loader import config
_cache = {"events": None, "remarks": None}
HEADERS = {"User-Agent": "Mozilla/5.0"}
def invalidate_cache():
global _cache
_cache = {"events": None, "remarks": None}
print("DEBUG: Cache wurde manuell geleert.")
return "Cache gelöscht"
def get_upcoming_events():
if _cache["events"] is not None:
return _cache["events"]
url = config['links']['times_csv']
print(f"DEBUG: Lade Zeiten von URL: {url}")
response = requests.get(url, headers=HEADERS)
df = pd.read_csv(StringIO(response.text))
# Debug: Spaltennamen prüfen
print(f"DEBUG: Spalten in Zeiten-Sheet gefunden: {df.columns.tolist()}")
date_col = config['google_sheet']['date_column']
df = df.dropna(subset=[date_col])
df[date_col] = pd.to_datetime(df[date_col], dayfirst=True, errors='coerce')
df = df.dropna(subset=[date_col]).fillna('')
heute = pd.Timestamp(datetime.now().date())
ende = heute + timedelta(days=config['processing']['days_to_show'])
mask = (df[date_col] >= heute) & (df[date_col] <= ende)
gefilterte = df.loc[mask].sort_values(by=date_col).copy()
wt_map = {0: 'Mo', 1: 'Di', 2: 'Mi', 3: 'Do', 4: 'Fr', 5: 'Sa', 6: 'So'}
gefilterte['Wochentag'] = gefilterte[date_col].dt.weekday.map(wt_map)
_cache["events"] = gefilterte.to_dict(orient='records')
return _cache["events"]
def get_remarks():
if _cache["remarks"] is not None:
return _cache["remarks"]
url = config['links']['remarks_csv']
print(f"DEBUG: Lade Bemerkungen von URL: {url}")
response = requests.get(url, headers=HEADERS)
# Debug: Die ersten 100 Zeichen der Antwort sehen
print(f"DEBUG: Raw Data Bemerkungen (Anfang): {response.text[:100]}...")
df = pd.read_csv(StringIO(response.text), skiprows=2, header=None)
if not df.empty:
print(f"DEBUG: Bemerkungen-DF Head:\n{df.head(3)}")
remarks = df[0].dropna().astype(str).tolist()
_cache["remarks"] = [r.strip() for r in remarks if r.strip()]
else:
_cache["remarks"] = []
return _cache["remarks"]