Skip to content

Commit

Permalink
Function to get live matches
Browse files Browse the repository at this point in the history
  • Loading branch information
bkowshik committed Sep 14, 2024
1 parent 1c9324b commit 89a54b4
Show file tree
Hide file tree
Showing 7 changed files with 231 additions and 5 deletions.
3 changes: 2 additions & 1 deletion isl_2024/_modidx.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,5 @@
'syms': { 'isl_2024.core': {'isl_2024.core.foo': ('core.html#foo', 'isl_2024/core.py')},
'isl_2024.scrape_live_stats': {},
'isl_2024.scrape_matches': {},
'isl_2024.scrape_wallstream': {}}}
'isl_2024.scrape_wallstream': {},
'isl_2024.utils': {'isl_2024.utils.get_live_matches': ('utils.html#get_live_matches', 'isl_2024/utils.py')}}}
2 changes: 1 addition & 1 deletion isl_2024/scrape_live_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename=os.path.join(log_dir, 'scrape_live_stats.log'), filemode='a')

# %% ../nbs/02_scrape_live_stats.ipynb 5
match_id = 66795
match_id = 66796
url = f'https://www.indiansuperleague.com/football/live/india_sl_stats/json/{match_id}.json'
headers = {
'accept': '*/*',
Expand Down
2 changes: 1 addition & 1 deletion isl_2024/scrape_wallstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename=os.path.join(log_dir, 'scrape_wallstream.log'), filemode='a')

# %% ../nbs/03_scrape_wallstream.ipynb 5
match_id = 66795
match_id = 66796
url = f"https://www.indiansuperleague.com/functions/wallstream/?sport_id=2&client_id=5KEUfrMT/+2lgecJyh42zA==&match_id={match_id}"
headers = {
'accept': '*/*',
Expand Down
65 changes: 65 additions & 0 deletions isl_2024/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_utils.ipynb.

# %% auto 0
__all__ = ['parent_dir', 'log_dir', 'data_dir', 'get_live_matches']

# %% ../nbs/00_utils.ipynb 2
import warnings
warnings.filterwarnings('ignore')

import json
import logging
import os
import requests
import datetime
import pytz

import pandas as pd

# %% ../nbs/00_utils.ipynb 4
try:
# This will work when running as a script
script_dir = os.path.dirname(os.path.abspath(__file__))
except NameError:
# This will work when running in a Jupyter notebook
script_dir = os.getcwd()

parent_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
log_dir = os.path.join(parent_dir, 'logs')
data_dir = os.path.join(parent_dir, 'data')

if not os.path.exists(log_dir):
os.makedirs(log_dir)

if not os.path.exists(data_dir):
os.makedirs(data_dir)

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename=os.path.join(log_dir, 'utils.log'), filemode='a')

# %% ../nbs/00_utils.ipynb 6
def get_live_matches(now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))):
with open(os.path.join(data_dir, 'matches.txt'), 'r') as f:
logs = f.readlines()

latest = logs[-1]
matches = json.loads(latest)['matches']

matches_df = []
for match in matches:
matches_df.append({
'start_at': match['start_date'],
'end_at': match['end_date'],
'game_id': match['game_id'],
})

matches_df = pd.DataFrame(matches_df)
matches_df['start_at'] = pd.to_datetime(matches_df['start_at'])
matches_df['end_at'] = pd.to_datetime(matches_df['end_at'])

live_matches = matches_df[
(now >= matches_df["start_at"] - datetime.timedelta(minutes = 15)) &
(now <= matches_df["end_at"] + datetime.timedelta(minutes = 60))
]
game_ids = list(live_matches['game_id'].values)
logging.info('Live matches: {} [{}]'.format(len(game_ids), ', '.join(game_ids)))
return(game_ids)
160 changes: 160 additions & 0 deletions nbs/00_utils.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"#| default_exp utils"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Utils\n",
"\n",
"Utility functions to be used in other scripts."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"import warnings\n",
"warnings.filterwarnings('ignore')\n",
"\n",
"import json\n",
"import logging\n",
"import os\n",
"import requests\n",
"import datetime\n",
"import pytz\n",
"\n",
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"from nbdev.showdoc import *"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"try:\n",
" # This will work when running as a script\n",
" script_dir = os.path.dirname(os.path.abspath(__file__))\n",
"except NameError:\n",
" # This will work when running in a Jupyter notebook\n",
" script_dir = os.getcwd()\n",
"\n",
"parent_dir = os.path.abspath(os.path.join(script_dir, os.pardir))\n",
"log_dir = os.path.join(parent_dir, 'logs')\n",
"data_dir = os.path.join(parent_dir, 'data')\n",
" \n",
"if not os.path.exists(log_dir):\n",
" os.makedirs(log_dir)\n",
"\n",
"if not os.path.exists(data_dir):\n",
" os.makedirs(data_dir)\n",
"\n",
"logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', filename=os.path.join(log_dir, 'utils.log'), filemode='a')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"def get_live_matches(now = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))):\n",
" with open(os.path.join(data_dir, 'matches.txt'), 'r') as f:\n",
" logs = f.readlines()\n",
"\n",
" latest = logs[-1]\n",
" matches = json.loads(latest)['matches']\n",
"\n",
" matches_df = []\n",
" for match in matches:\n",
" matches_df.append({\n",
" 'start_at': match['start_date'],\n",
" 'end_at': match['end_date'],\n",
" 'game_id': match['game_id'],\n",
" })\n",
"\n",
" matches_df = pd.DataFrame(matches_df)\n",
" matches_df['start_at'] = pd.to_datetime(matches_df['start_at'])\n",
" matches_df['end_at'] = pd.to_datetime(matches_df['end_at'])\n",
"\n",
" live_matches = matches_df[\n",
" (now >= matches_df[\"start_at\"] - datetime.timedelta(minutes = 15)) &\n",
" (now <= matches_df[\"end_at\"] + datetime.timedelta(minutes = 60))\n",
" ]\n",
" game_ids = list(live_matches['game_id'].values)\n",
" logging.info('Live matches: {} [{}]'.format(len(game_ids), ', '.join(game_ids)))\n",
" return(game_ids)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['66795', '66796']"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#| hide\n",
"get_live_matches()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "python3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
2 changes: 1 addition & 1 deletion nbs/02_scrape_live_stats.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
"outputs": [],
"source": [
"#| export\n",
"match_id = 66795\n",
"match_id = 66796\n",
"url = f'https://www.indiansuperleague.com/football/live/india_sl_stats/json/{match_id}.json'\n",
"headers = {\n",
" 'accept': '*/*',\n",
Expand Down
2 changes: 1 addition & 1 deletion nbs/03_scrape_wallstream.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
"outputs": [],
"source": [
"#| export\n",
"match_id = 66795\n",
"match_id = 66796\n",
"url = f\"https://www.indiansuperleague.com/functions/wallstream/?sport_id=2&client_id=5KEUfrMT/+2lgecJyh42zA==&match_id={match_id}\"\n",
"headers = {\n",
" 'accept': '*/*',\n",
Expand Down

0 comments on commit 89a54b4

Please sign in to comment.