diff --git a/web/app.py b/web/app.py index 1b0961de4..583de100f 100644 --- a/web/app.py +++ b/web/app.py @@ -450,7 +450,7 @@ def main(): page = st.sidebar.selectbox( "切换功能模块", - ["📊 股票分析", "⚙️ 配置管理", "💾 缓存管理", "💰 Token统计", "📈 历史记录", "🔧 系统状态"], + ["📊 股票分析","📰 新闻概况", "⚙️ 配置管理", "💾 缓存管理", "💰 Token统计", "📈 历史记录", "🔧 系统状态"], label_visibility="collapsed" ) @@ -458,6 +458,14 @@ def main(): st.sidebar.markdown("---") # 根据选择的页面渲染不同内容 + if page == "📰 新闻概况": + try: + from modules.news_display import render_news_display + render_news_display() + except ImportError as e: + st.error(f"新闻概况模块加载失败: {e}") + st.info("请确保已安装所有依赖包") + return if page == "⚙️ 配置管理": try: from modules.config_management import render_config_management diff --git a/web/modules/news_display.py b/web/modules/news_display.py new file mode 100644 index 000000000..0b653d2b1 --- /dev/null +++ b/web/modules/news_display.py @@ -0,0 +1,255 @@ +import streamlit as st +import os +import json +import hashlib +from datetime import datetime, timedelta +import akshare as ak +import pandas as pd +from typing import List, Dict, Any + +# ====================================================================== +# 1. 配置与样式 +# ====================================================================== +st.set_page_config(page_title="市场动态看板", layout="wide") + +def load_css(): + st.markdown(""" + + """, unsafe_allow_html=True) + +# ====================================================================== +# 2. 数据获取与处理 +# ====================================================================== + +@st.cache_resource +def get_news_fetcher(): + return NewsFetcher() + +class NewsFetcher: + def __init__(self, save_dir="data/news"): + self.save_dir = save_dir + os.makedirs(self.save_dir, exist_ok=True) + self.news_hashes = self._load_existing_hashes() + def _load_existing_hashes(self) -> set: + hashes = set() + today = datetime.now() + for i in range(3): + filename = self._get_news_filename(today - timedelta(days=i)) + if os.path.exists(filename): + try: + with open(filename, 'r', encoding='utf-8') as f: + news_data = json.load(f) + for item in news_data: + h = item.get("hash") or self._calculate_hash(item.get("content", "")) + hashes.add(h) + except (json.JSONDecodeError, IOError): + continue + return hashes + def _calculate_hash(self, content: str) -> str: + return hashlib.md5(str(content).encode('utf-8')).hexdigest() + def _get_news_filename(self, date: datetime = None) -> str: + dt_str = (date or datetime.now()).strftime('%Y%m%d') + return os.path.join(self.save_dir, f"news_{dt_str}.json") + def fetch_and_save(self) -> bool: + try: + stock_info_global_cls_df = ak.stock_info_global_cls(symbol="全部") + except Exception as e: + st.error(f"获取新闻时出错: {e}") + return False + if stock_info_global_cls_df.empty: return True + news_list = [] + for _, row in stock_info_global_cls_df.iterrows(): + content = str(row.get("内容", "")) + content_hash = self._calculate_hash(content) + if content_hash in self.news_hashes: continue + self.news_hashes.add(content_hash) + pub_date = str(row.get("发布日期", "")) + pub_time = str(row.get("发布时间", "")) + news_item = { + "title": str(row.get("标题", "")), + "content": content, + "datetime": f"{pub_date} {pub_time}", + "hash": content_hash, + } + news_list.append(news_item) + if not news_list: return True + filename = self._get_news_filename() + existing_data = [] + if os.path.exists(filename): + try: + with open(filename, 'r', encoding='utf-8') as f: + existing_data = json.load(f) + except (json.JSONDecodeError, IOError): pass + merged_news = sorted(existing_data + news_list, key=lambda x: x.get('datetime', '0'), reverse=True) + with open(filename, 'w', encoding='utf-8') as f: + json.dump(merged_news, f, ensure_ascii=False, indent=2) + return True + def get_latest_news(self, days: int = 1) -> List[Dict[str, Any]]: + news_data = [] + today = datetime.now() + for i in range(days): + filename = self._get_news_filename(today - timedelta(days=i)) + if os.path.exists(filename): + try: + with open(filename, 'r', encoding='utf-8') as f: + news_data.extend(json.load(f)) + except (json.JSONDecodeError, IOError): continue + seen_hashes = set() + unique_news = [] + for item in sorted(news_data, key=lambda x: x.get('datetime', '0'), reverse=True): + item_hash = item.get('hash') + if item_hash not in seen_hashes: + unique_news.append(item) + seen_hashes.add(item_hash) + return unique_news + +@st.cache_data(ttl=600) +def get_fund_flow_data(data_type: str) -> pd.DataFrame: + try: + if data_type == "industry": + return ak.stock_fund_flow_industry() + elif data_type == "concept": + return ak.stock_fund_flow_concept() + else: + return pd.DataFrame() + except Exception as e: + st.error(f"获取 {data_type} 资金流数据失败: {e}", icon="🚨") + return pd.DataFrame() + +# ====================================================================== +# 3. UI 渲染函数 +# ====================================================================== + +def display_news_timeline(news_list: List[Dict[str, Any]], limit: int): + st.header("实时新闻") + if not news_list: + st.info("暂无新闻数据。请尝试调整侧边栏天数。") + return + + for news in news_list[:limit]: + try: + dt_obj = datetime.strptime(news['datetime'], "%Y-%m-%d %H:%M:%S") + time_str = dt_obj.strftime("%H:%M") + except (ValueError, KeyError): + time_str = "N/A" + + col_time, col_content = st.columns([1, 9]) + with col_time: + st.markdown(f'
{time_str}
', unsafe_allow_html=True) + with col_content: + st.markdown(f"{news.get('title', '无标题')}
", unsafe_allow_html=True) + st.markdown(f"{news.get('content', '无内容')}
", unsafe_allow_html=True) + + st.divider() + +def display_fund_flow_card(title: str, df: pd.DataFrame, name_col: str, value_col: str): + html_parts = [f'暂无数据
") + else: + if value_col in df.columns: + df_sorted = df.sort_values(by=value_col, ascending=False).head(15) + else: + df_sorted = df.head(15) + st.warning(f"资金流数据中未找到列 '{value_col}' 用于排序。") + for _, row in df_sorted.iterrows(): + name = row.get(name_col, '未知') + value = row.get(value_col, 'N/A') + value_str = f"{value}亿" if isinstance(value, (int, float)) else "N/A" + html_parts.append( + f'