├── academics_scholar_scraper ├── __init__.py ├── openai_summarizer.py └── academics_scholar_scraper.py ├── LICENSE ├── README.md └── .gitignore /academics_scholar_scraper/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .academics_scholar_scraper import main, conduct_litreview -------------------------------------------------------------------------------- /academics_scholar_scraper/openai_summarizer.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import os 3 | 4 | MAX_TOKENS = 512 5 | 6 | openai.api_key = os.environ.get("OPENAI_API_KEY") 7 | if not openai.api_key: 8 | raise ValueError("Environment variable OPENAI_API_KEY not set") 9 | 10 | def generate_summary(prompt): 11 | response = openai.ChatCompletion.create( 12 | model="gpt-4", # Replace with the GPT-4 model name when it is available 13 | messages=[ 14 | { 15 | "role": "system", 16 | "content": "You are a research assistant with expertise in conducting literature reviews." 17 | }, 18 | {"role": "user", "content": prompt} 19 | ], 20 | max_tokens=MAX_TOKENS, 21 | n=1, 22 | stop=None, 23 | temperature=0.7, 24 | ) 25 | 26 | message_with_summary = response['choices'][0]['message']['content'] 27 | return message_with_summary -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 The Academic’s Field Guide to Writing Code 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scholarly Article Summarizer 2 | 3 | This Python script allows you to search for scholarly articles using the Elsevier Scopus API and generate summaries of the articles using OpenAI's GPT-3 model. 4 | 5 | ## Installation 6 | 7 | 1. Install the required Python packages using `pip`: 8 | 9 | ``` 10 | pip install argparse requests tqdm concurrent.futures openai 11 | ``` 12 | 13 | 2. Set up your Elsevier API key and OpenAI API key as environment variables: 14 | 15 | ``` 16 | export ELSEVIER_API_KEY=your_elsevier_api_key 17 | export OPENAI_API_KEY=your_openai_api_key 18 | ``` 19 | 20 | ## Usage 21 | 22 | Run the script from the command line with the following arguments: 23 | 24 | - `keyword`: The keyword to search for in the articles. 25 | - `-n`, `--num_papers`: The number of papers to retrieve (default: 10). 26 | - `-o`, `--output`: The output CSV file (default: papers.csv). 27 | - `-s`, `--subject`: The subject area (e.g., AGRI, ARTS, BIOC, etc.) (optional). 28 | 29 | Example: 30 | 31 | ``` 32 | python main.py "machine learning" -n 10 -o results.csv -s COMP 33 | ``` 34 | 35 | This will search for 10 papers related to machine learning in the computer science subject area and save the summaries in the `results.csv` file. 36 | 37 | ## Output 38 | 39 | The output CSV file will contain the following columns: 40 | 41 | - Title: The title of the paper. 42 | - Authors: The authors of the paper. 43 | - Publication Name: The name of the publication where the paper was published. 44 | - Publication Date: The date when the paper was published. 45 | - DOI: The DOI of the paper. 46 | - Summary: A summary of the paper generated by the GPT-3 model. 47 | - Hypotheses: Hypotheses in the paper as interpreted by the GPT-3 model. 48 | - Methods: Methods used in the paper as interpreted by the GPT-3 model. 49 | - Findings: Findings in the paper as interpreted by the GPT-3 model. 50 | 51 | ## Note 52 | 53 | This script uses GPT-3 to generate summaries, which may not always be perfectly accurate. Use the generated summaries as a starting point for further investigation and always refer to the original articles for accurate information. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /academics_scholar_scraper/academics_scholar_scraper.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import requests 4 | import json 5 | import csv 6 | from tqdm import tqdm 7 | from concurrent.futures import ThreadPoolExecutor 8 | from .openai_summarizer import generate_summary 9 | 10 | API_KEY = os.environ.get("ELSEVIER_API_KEY") 11 | if not API_KEY: 12 | raise ValueError("Environment variable ELSEVIER_API_KEY not set") 13 | 14 | SUMMARY_PROMPT = "can you provide the following for the content of this article? Give one sentence for each of the following: summary, hypotheses, methods, findings. Even if no abstract is provided, use what knowledge is openly available about the author and their works to estimate. There's no need to restate what's been given to you in the prompt. format your output as json." 15 | NUM_THREADS = 8 16 | 17 | 18 | def parse_args(): 19 | """Parse command-line arguments.""" 20 | parser = argparse.ArgumentParser( 21 | description="Search for scholarly articles using the Elsevier Scopus API" 22 | ) 23 | parser.add_argument("keyword", help="Keyword to search for") 24 | parser.add_argument( 25 | "-n", 26 | "--num_papers", 27 | type=int, 28 | default=10, 29 | help="Number of papers to retrieve (default: 10)", 30 | ) 31 | parser.add_argument( 32 | "-o", 33 | "--output", 34 | type=str, 35 | default="papers.csv", 36 | help="Output CSV file (default: papers.csv)", 37 | ) 38 | parser.add_argument( 39 | "-s", 40 | "--subject", 41 | type=str, 42 | default="", 43 | help="Subject area (e.g., AGRI, ARTS, BIOC, etc.)", 44 | ) 45 | return parser.parse_args() 46 | 47 | 48 | def search_papers(keyword, num_papers, subject): 49 | """ 50 | Search for papers using the Elsevier Scopus API. 51 | 52 | Args: 53 | keyword (str): Keyword to search for. 54 | num_papers (int): Number of papers to retrieve. 55 | subject (str): Subject area (e.g., AGRI, ARTS, BIOC, etc.). 56 | 57 | Returns: 58 | dict: JSON response from the API. 59 | """ 60 | base_url = "https://api.elsevier.com/content/search/scopus" 61 | headers = { 62 | "Accept": "application/json", 63 | "X-ELS-APIKey": API_KEY, 64 | } 65 | query = f'TITLE-ABS-KEY("{keyword}")' 66 | if subject: 67 | query += f" AND SUBJAREA({subject})" 68 | params = { 69 | "query": query, 70 | "count": num_papers, 71 | "view": "STANDARD", 72 | "sort": "citedby-count", # Sort by the number of citations 73 | } 74 | 75 | response = requests.get(base_url, headers=headers, params=params) 76 | 77 | if response.status_code == 200: 78 | return response.json() 79 | else: 80 | raise Exception(f"API request failed with status code {response.status_code}") 81 | 82 | 83 | def process_paper(paper): 84 | """ 85 | Process a paper by generating a summary and updating the paper dictionary. 86 | 87 | Args: 88 | paper (dict): Paper data dictionary. 89 | 90 | Returns: 91 | dict: Updated paper data dictionary with summary information. 92 | """ 93 | data_dict = { 94 | "Title": paper.get("dc:title", "N/A"), 95 | "Authors": paper.get("dc:creator", "N/A"), 96 | "Publication Name": paper.get("prism:publicationName", "N/A"), 97 | "Publication Date": paper.get("prism:coverDate", "N/A"), 98 | "DOI": paper.get("prism:doi"), 99 | } 100 | 101 | prompt = f"{SUMMARY_PROMPT}\n\n{data_dict}" 102 | summary = generate_summary(prompt) 103 | try: 104 | new_data = json.loads(summary) 105 | data_dict.update(new_data) 106 | except: 107 | Exception() 108 | 109 | return data_dict 110 | 111 | def export_to_csv(paper_data, output_file): 112 | """ 113 | Export paper data to a CSV file. 114 | 115 | Args: 116 | paper_data (dict): Paper data. 117 | output_file (str): Output CSV file path. 118 | """ 119 | fieldnames = [ 120 | "Title", 121 | "Authors", 122 | "Publication Name", 123 | "Publication Date", 124 | "DOI", 125 | "Summary", 126 | "Hypotheses", 127 | "Methods", 128 | "Findings", 129 | ] 130 | with open(output_file, "w", newline="", encoding="utf-8") as csvfile: 131 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames) 132 | writer.writeheader() 133 | 134 | papers = paper_data["search-results"]["entry"] 135 | 136 | with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor: 137 | results = list(tqdm(executor.map(process_paper, papers), desc="Processing papers", total=len(papers))) 138 | 139 | for result in results: 140 | writer.writerow(result) 141 | 142 | 143 | def main(): 144 | """Main function.""" 145 | args = parse_args() 146 | paper_data = search_papers(args.keyword, args.num_papers, args.subject) 147 | export_to_csv(paper_data, args.output) 148 | print(f"Data exported to {args.output}") 149 | 150 | def conduct_litreview(keyword, num_papers, subject, output): 151 | """Conducts a literature review on Google Scholar for the given keyword and subject, 152 | and exports the results to a CSV file with the given filename.""" 153 | 154 | # Search for papers on Google Scholar 155 | paper_data = search_papers(keyword, num_papers, subject) 156 | 157 | # Export the paper data to a CSV file 158 | # export_to_csv(paper_data, output) 159 | 160 | # Print a message indicating the output filename 161 | # print(f"Data exported to {output}") 162 | return paper_data 163 | 164 | 165 | if __name__ == "__main__": 166 | main() --------------------------------------------------------------------------------