-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
73 lines (59 loc) · 3.04 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import sys
import time
import pytz
from datetime import datetime
from utils import get_daily_papers_by_keyword_with_retries, generate_table, back_up_files,\
restore_files, remove_backups, get_daily_date
beijing_timezone = pytz.timezone('Asia/Shanghai')
# NOTE: arXiv API seems to sometimes return an unexpected empty list.
# get current beijing time date in the format of "2021-08-01"
current_date = datetime.now(beijing_timezone).strftime("%Y-%m-%d")
# get last update date from README.md
with open("README.md", "r") as f:
while True:
line = f.readline()
if "Last update:" in line: break
last_update_date = line.split(": ")[1].strip()
if last_update_date == current_date:
sys.exit("Already updated today!")
# keywords = ["DNA", "Sequences", "Large", "Foundation", "DNA Sequences", "DNA Language Model", "Genomic Language Model"] # TODO add more keywords
keywords = ["DNA", "Sequences", "Large", "Foundation"]
max_result = 100 # maximum query results from arXiv API for each keyword
issues_result = 15 # maximum papers to be included in the issue
# all columns: Title, Authors, Abstract, Link, Tags, Comment, Date
# fixed_columns = ["Title", "Link", "Date"]
column_names = ["Title", "Link", "Abstract", "Date", "Comment"]
back_up_files() # back up README.md and ISSUE_TEMPLATE.md
# write to README.md
f_rm = open("README.md", "w") # file for README.md
f_rm.write("# Daily Papers\n")
f_rm.write("The project automatically fetches the latest papers from arXiv based on keywords.\n\nThe subheadings in the README file represent the search keywords.\n\nOnly the most recent articles for each keyword are retained, up to a maximum of 100 papers.\n\nYou can click the 'Watch' button to receive daily email notifications.\n\nLast update: {0}\n\n".format(current_date))
# write to ISSUE_TEMPLATE.md
f_is = open(".github/ISSUE_TEMPLATE.md", "w") # file for ISSUE_TEMPLATE.md
f_is.write("---\n")
f_is.write("title: Latest {0} Papers - {1}\n".format(issues_result, get_daily_date()))
f_is.write("labels: documentation\n")
f_is.write("---\n")
f_is.write("**Please check the [Github](https://github.com/ychuest/BioArXiv) page for a better reading experience and more papers.**\n\n")
for keyword in keywords:
f_rm.write("## {0}\n".format(keyword))
f_is.write("## {0}\n".format(keyword))
if len(keyword.split()) == 1: link = "AND" # for keyword with only one word, We search for papers containing this keyword in both the title and abstract.
else: link = "OR"
papers = get_daily_papers_by_keyword_with_retries(keyword, column_names, max_result, link)
if papers is None: # failed to get papers
print("Failed to get papers!")
f_rm.close()
f_is.close()
restore_files()
sys.exit("Failed to get papers!")
rm_table = generate_table(papers)
is_table = generate_table(papers[:issues_result], ignore_keys=["Abstract"])
f_rm.write(rm_table)
f_rm.write("\n\n")
f_is.write(is_table)
f_is.write("\n\n")
time.sleep(5) # avoid being blocked by arXiv API
f_rm.close()
f_is.close()
remove_backups()