From 794435c14c8ad4d18a201a498951cc8009c307b3 Mon Sep 17 00:00:00 2001 From: MIGHTY1o1 Date: Thu, 24 Oct 2024 03:43:42 +0530 Subject: [PATCH] add changeog page --- Web_app/pages/ChangeLog.py | 284 +++++++++++++++++++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 Web_app/pages/ChangeLog.py diff --git a/Web_app/pages/ChangeLog.py b/Web_app/pages/ChangeLog.py new file mode 100644 index 00000000..81192d36 --- /dev/null +++ b/Web_app/pages/ChangeLog.py @@ -0,0 +1,284 @@ +import streamlit as st +import requests +from datetime import datetime +import pandas as pd + +# GitHub repository URL +REPO_URL = "https://api.github.com/repos/recodehive/Scrape-ML" + + +# Function to fetch repository statistics +def fetch_repo_statistics(): + closed_pr_count = fetch_closed_pr_count() + return { + "total_prs": closed_pr_count, + "total_projects": fetch_total_projects(), + "total_contributors": fetch_contributors_count(), + } + + +# Existing fetch functions remain the same +def fetch_closed_pr_count(): + closed_prs_url = f"{REPO_URL}/pulls?state=closed" + closed_pr_count = 0 + page = 1 + while True: + response = requests.get(f"{closed_prs_url}&page={page}") + if response.status_code != 200 or not response.json(): + break + closed_pr_count += len(response.json()) + page += 1 + return closed_pr_count + + +def fetch_total_projects(): + return 0 + + +def fetch_closed_prs(): + closed_prs_url = f"{REPO_URL}/pulls?state=closed" + closed_prs = [] + page = 1 + while True: + response = requests.get(f"{closed_prs_url}&page={page}") + if response.status_code != 200 or not response.json(): + break + pulls = response.json() + for pr in pulls: + if pr["merged_at"]: + closed_prs.append( + { + "title": pr["title"], + "url": pr["html_url"], + "date": pr["merged_at"], + "user": pr["user"]["login"], + "avatar_url": pr["user"]["avatar_url"], + } + ) + page += 1 + return closed_prs + + +def fetch_upcoming_issues(): + issues_url = f"{REPO_URL}/issues?state=open" + upcoming_issues = [] + response = requests.get(issues_url) + if response.status_code == 200: + issues = response.json() + for issue in issues: + if issue.get("assignee"): + upcoming_issues.append( + { + "title": issue["title"], + "url": issue["html_url"], + "date": issue["created_at"], + "assignee": issue["assignee"]["login"], + "avatar_url": issue["assignee"]["avatar_url"], + } + ) + return upcoming_issues + + +def fetch_contributors_count(): + contributors_url = f"{REPO_URL}/contributors" + response = requests.get(contributors_url) + if response.status_code == 200: + return len(response.json()) + return 0 + + +# Custom CSS for modern design +st.set_page_config( + page_title="Changelog - Scrape ML", + page_icon="📝", + layout="wide", + initial_sidebar_state="expanded", +) + +# Custom CSS +st.markdown( + """ + +""", + unsafe_allow_html=True, +) + +# Title with gradient +st.markdown( + """ +

+ Scrape ML Changelog 📝 +

+""", + unsafe_allow_html=True, +) + +# Fetch data +repo_stats = fetch_repo_statistics() +closed_prs = fetch_closed_prs() +upcoming_issues = fetch_upcoming_issues() + +# Stats dashboard +st.markdown("### Project Statistics") +cols = st.columns(4) + +with cols[0]: + st.markdown( + """ +
+
{}
+
PRs Merged
+
+ """.format( + repo_stats["total_prs"] + ), + unsafe_allow_html=True, + ) + +with cols[1]: + st.markdown( + """ +
+
{}
+
Contributors
+
+ """.format( + repo_stats["total_contributors"] + ), + unsafe_allow_html=True, + ) + +# Timeline section +st.markdown("### Recent Activity Timeline") + +for pr in closed_prs[:5]: # Show only last 5 PRs + date = datetime.strptime(pr["date"], "%Y-%m-%dT%H:%M:%SZ").strftime("%B %d, %Y") + st.markdown( + f""" +
+ + {pr['user']} merged PR: + {pr['title']} +
{date}
+
+ """, + unsafe_allow_html=True, + ) + +# Upcoming Features +st.markdown("### 🚀 Upcoming Features") +cols = st.columns(3) + +upcoming_features = [ + { + "title": "Personalized Watchlist", + "progress": 75, + "desc": "Implementation of user preferences-based watchlist", + }, + { + "title": "External DB Integration", + "progress": 45, + "desc": "Integration with external movie databases", + }, + { + "title": "Advanced Filtering", + "progress": 30, + "desc": "Enhanced search and filter capabilities", + }, +] + +for idx, feature in enumerate(upcoming_features): + with cols[idx]: + st.markdown( + f""" +
+

{feature['title']}

+

{feature['desc']}

+
+ """, + unsafe_allow_html=True, + ) + st.progress(feature["progress"] / 100) + +# Footer +st.markdown("---") +st.markdown( + """ +
+

About Scrape ML

+

Scrape ML is a robust web scraping tool designed to simplify the extraction of data from various online sources. + With its user-friendly interface and powerful features, Scrape ML allows users to collect, organize, + and analyze data seamlessly. Ideal for developers, data scientists, and anyone interested in leveraging + web data for their projects.

+

Made with 💜 by the Scrape ML Team

+
+""", + unsafe_allow_html=True, +)