├── .gitattributes ├── LICENSE ├── app.py ├── movies_to_search.csv ├── .gitignore ├── requirements.txt ├── utils.py └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Wen Jian 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import utils 3 | import importlib 4 | import pickle 5 | from alive_progress import alive_bar 6 | 7 | # %load_ext autoreload 8 | # %autoreload 2 9 | importlib.reload(utils) 10 | from utils import movie_search, create_markdown_page 11 | 12 | # Import a list of movie names to search 13 | to_search = pd.read_csv('movies_to_search.csv') 14 | 15 | # Using the function to fill up the csv 16 | results = [] 17 | my_search = to_search['Movies'].tolist() 18 | 19 | with alive_bar(len(my_search)) as bar: 20 | for m in my_search: 21 | info = movie_search(m) 22 | results.append(info) 23 | bar() 24 | 25 | myresults = pd.DataFrame(results) 26 | 27 | # Save a copy of myresults 28 | myresults.to_pickle('download_results.pkl') 29 | myresults.to_csv('myresult.csv') 30 | 31 | ## OPTION1: CREATING INDIVIDUAL MARKDOWN FILES 32 | # Create small markdown pages 33 | create_markdown_page(myresults) 34 | 35 | ## OPTION 2: CREATE A MAIN MARKDOWN PAGE 36 | # Add obsidian links to the title 37 | myresults['title'] = '[['+myresults['title']+']]' 38 | 39 | ## OPTION 3: FILTER OUT JUST THE FACTS 40 | facts = ['title', 'year', 'rating','genre', 'country', 'director', 'cast'] 41 | simpleresults = myresults[facts] 42 | 43 | ## SAVE OUT AS TEXT 44 | with open('movie_main.txt','w') as file_out: 45 | simpleresults.to_markdown(buf=file_out) 46 | 47 | -------------------------------------------------------------------------------- /movies_to_search.csv: -------------------------------------------------------------------------------- 1 | Movies 2 | Dune 3 | 12 Angry Men 4 | 13 Hours The secret soldiers of Benghazi 5 | 1917 6 | 3 idiots 7 | 99 Franc 8 | A Scanner Darkly 9 | A star is born 10 | Amelie Poulain 11 | Amy Tan: Unintended memoir 12 | Athlete A 13 | Black Hawk Down 14 | Capernaum 15 | Catch Me If You Can 16 | Challenger the final flight 17 | The Trial of the Chicago 7 18 | Cloud Atlas 19 | Coach Carter 20 | Coco 21 | "Crack: Cocaine, Corruption & Conspiracy" 22 | Cuba and the Cameraman 23 | Enron: The Smartest Guys in the Room 24 | First Man 25 | Ford vs Ferrari 26 | Forest Gump 27 | Full Metal Jacket 28 | The Game Changers 29 | Gattaca 30 | Gone Girl 31 | Good will hunting 32 | Hacksaw Ridge 33 | Hamilton 34 | Hidden Figures 35 | Inception 36 | Inside the real narcos 37 | Interstellar 38 | Into the Wild 39 | Iron Man 40 | Jiro Dreams of Sushi 41 | Juno 42 | Jurassic Park 43 | La La Land 44 | Les miserables (2012) 45 | Les misérables (2019) 46 | Letters from Iwo Jima 47 | Minari 48 | Minority Report 49 | Music and Lyrics 50 | My Octopus Teacher 51 | Nomadland 52 | Parasite 53 | Pearl Harbour 54 | RBG 55 | Restrepo 56 | Saving Private Ryan 57 | Secret 58 | Shoplifters 59 | Sky Ladder 60 | Straight Outta Compton 61 | Tampopo 62 | Taste of China 63 | A Taxi Driver 64 | The Artist 65 | The Big Short 66 | The Dark Knight 67 | The Florida Project 68 | The Hurt Locker 69 | The Imitation game 70 | Joker 71 | The Man who knew Infinity 72 | The Martian 73 | The Matrix 74 | The Pianist 75 | The Schindler's List 76 | The Truman Show 77 | Ip Man (2008) 78 | Chasing the dragon 79 | The piano in the factory 80 | Farewell my concubine 81 | The Outpost 82 | A sun 83 | The breadwinner 84 | Osama 85 | Tiong Bahru Social Club 86 | I lost my body 87 | Black Coal Thin Ice 88 | 24 City 89 | Silenced 90 | Eternal Sunshine of the spotless mind 91 | Shershaah 92 | 13 Hour 93 | Bottle shock 94 | 1984 95 | Shiva baby 96 | Another round -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .nox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | db.sqlite3 59 | 60 | # Flask stuff: 61 | instance/ 62 | .webassets-cache 63 | 64 | # Scrapy stuff: 65 | .scrapy 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # IPython 77 | profile_default/ 78 | ipython_config.py 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # mypy 109 | .mypy_cache/ 110 | .dmypy.json 111 | dmypy.json 112 | 113 | # Pyre type checker 114 | .pyre/ 115 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | about-time==3.1.1 2 | alive-progress==2.1.0 3 | appnope @ file:///Users/runner/miniforge3/conda-bld/appnope_1635819660018/work 4 | argcomplete @ file:///home/conda/feedstock_root/build_artifacts/argcomplete_1619128689661/work 5 | backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work 6 | backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1618230623929/work 7 | Bottleneck==1.3.2 8 | certifi==2021.10.8 9 | debugpy @ file:///Users/runner/miniforge3/conda-bld/debugpy_1636043402768/work 10 | decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1631346842025/work 11 | entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1605121927639/work/dist/entrypoints-0.3-py2.py3-none-any.whl 12 | grapheme==0.6.0 13 | greenlet==1.1.2 14 | IMDbPY==2021.4.18 15 | importlib-metadata @ file:///Users/runner/miniforge3/conda-bld/importlib-metadata_1636431629957/work 16 | ipykernel @ file:///Users/runner/miniforge3/conda-bld/ipykernel_1636998649625/work/dist/ipykernel-6.5.0-py3-none-any.whl 17 | ipython @ file:///Users/runner/miniforge3/conda-bld/ipython_1637978887333/work 18 | jedi @ file:///Users/runner/miniforge3/conda-bld/jedi_1637175421629/work 19 | jupyter-client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1637611911738/work 20 | jupyter-core @ file:///Users/runner/miniforge3/conda-bld/jupyter_core_1636814373394/work 21 | lxml==4.6.4 22 | matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1631080358261/work 23 | mdutils==1.3.1 24 | mkl-fft==1.3.1 25 | mkl-random @ file:///opt/concourse/worker/volumes/live/133f1d0b-8fd8-4fa0-679e-7214401cbd75/volume/mkl_random_1626186083386/work 26 | mkl-service==2.4.0 27 | nest-asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1617163391303/work 28 | numexpr @ file:///opt/concourse/worker/volumes/live/6bbc9b0e-ade0-40db-6ee6-74c979385fbb/volume/numexpr_1618856531941/work 29 | numpy @ file:///opt/concourse/worker/volumes/live/046e897c-3622-47d5-79cf-02f72d341f0d/volume/numpy_and_numpy_base_1634106712173/work 30 | pandas==1.3.4 31 | parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1617148930513/work 32 | pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1602535608087/work 33 | pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work 34 | prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1636045889479/work 35 | ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl 36 | Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1629119114968/work 37 | python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work 38 | pytz==2021.3 39 | pyzmq @ file:///Users/runner/miniforge3/conda-bld/pyzmq_1635877593895/work 40 | six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work 41 | SQLAlchemy==1.4.27 42 | tabulate==0.8.9 43 | tornado @ file:///Users/runner/miniforge3/conda-bld/tornado_1635819700605/work 44 | traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1635260543454/work 45 | typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1637155965157/work 46 | wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1600965781394/work 47 | zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1633302054558/work 48 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | 2 | from imdb import IMDb 3 | from mdutils.mdutils import MdUtils 4 | 5 | # Write a function to get intended results given a movie title 6 | def movie_search(my_title): 7 | """ 8 | Given a string (movie title), find the metadata for that movie (or the closest 9 | one matching that name). 10 | 11 | Parameters 12 | ---------- 13 | my_title : string 14 | 15 | returns a dictionary of metadata 16 | """ 17 | # create a dictionary to store data 18 | information = dict() 19 | 20 | # create an instance of the IMDb class 21 | ia = IMDb() 22 | 23 | # search for a movie 24 | lookups = ia.search_movie(my_title) 25 | 26 | # use the first search result to return the id 27 | try: 28 | # Test if any movies are found 29 | movie_id = lookups[0].movieID 30 | 31 | # get a movie 32 | movie = ia.get_movie(movie_id) 33 | 34 | # RETRIEVE DATA 35 | # Get the movie title searched for 36 | t = movie['title'] 37 | information['title'] = t 38 | print(f'Retrieving data for {t}') 39 | 40 | # Get the plot outline 41 | try: 42 | information['plot'] = movie['plot outline'] 43 | except: 44 | print('plot outline not available') 45 | try: 46 | information['plot'] = movie['plot'][0] 47 | except: 48 | print('plot also not available') 49 | information['plot'] = '' 50 | 51 | # Get the genre 52 | try: 53 | information['genre'] = ','.join(movie['genre']) 54 | except: 55 | print('no information on genre') 56 | information['genre'] = '' 57 | 58 | # Get the country 59 | try: 60 | information['country'] = ','.join(movie['countries']) 61 | except: 62 | print('no information on country') 63 | information['country'] = '' 64 | 65 | # Get the director 66 | # Extract the names in each Person object 67 | try: 68 | director_names = [d['name'] for d in movie['directors']] 69 | information['director'] = ','.join(director_names) 70 | except: 71 | print('director information not available') 72 | information['director'] = '' 73 | 74 | # Get the cast 75 | # Extract the top 5 names in cast (if it exists) 76 | try: 77 | cast_names = [d['name'] for d in movie['cast']] 78 | if len(cast_names) >5: 79 | cast_names = cast_names[0:5] 80 | information['cast'] = ','.join(cast_names) 81 | except: 82 | print('No information on cast') 83 | information['cast'] = '' 84 | 85 | # Get the rating 86 | try: 87 | information['rating'] = movie['rating'] 88 | except: 89 | print('rating info is not available') 90 | information['rating'] = 'N/A' 91 | 92 | # Get the year 93 | try: 94 | information['year'] = movie['year'] 95 | except: 96 | print('no information on year') 97 | information['year'] = '' 98 | 99 | # Get the type 100 | try: 101 | information['kind'] = movie['kind'] 102 | except: 103 | print('type not available') 104 | information['kind'] = '' 105 | 106 | # Get the cover url 107 | try: 108 | information['cover url'] = movie['cover url'] 109 | except: 110 | print('cover url not available') 111 | information['cover url'] = '' 112 | 113 | # Get sypnosis 114 | try: 115 | information['synopsis'] = movie['synopsis'][0] 116 | except: 117 | print('no synopsis') 118 | information['synopsis'] = '' 119 | except: 120 | print('NO RESULTS FOUND FOR {my_title}') 121 | information['title'] = my_title 122 | information['plot'] = '' 123 | information['genre'] = '' 124 | information['country'] = '' 125 | information['cover url'] = '' 126 | information['director'] = '' 127 | information['kind'] = '' 128 | information['rating'] = '' 129 | information['synopsis'] = '' 130 | information['year'] = '' 131 | 132 | return(information) 133 | 134 | # Write a function to make a markdown page filled with information retrieved 135 | def create_markdown_page(myresults): 136 | """ 137 | Given a dataframe containing all the information about movies, 138 | we can create individual pages 139 | 140 | Parameters 141 | ---------- 142 | myresults : DataFrame 143 | Contains essential headers: title, year, rating, genre, country, director 144 | cast, synopsis and plot 145 | """ 146 | # Create iterations 147 | for index, row in myresults.iterrows(): 148 | # Create a file 149 | mdFile = MdUtils(file_name=row['title']) 150 | 151 | # Create a metadata section 152 | mdFile.new_line(text='---\n') 153 | facts = ['year', 'rating','genre', 'country', 'director', 'cast'] 154 | for f in facts: 155 | mdFile.new_line(text=f'{f.title()}: {row[f]}') 156 | mdFile.new_line(text='Type: Review') 157 | mdFile.new_line(text='') 158 | mdFile.new_line(text='---\n') 159 | 160 | # Create a title 161 | mdFile.new_header(level=1, title=row['title']) 162 | 163 | # Create a cover 164 | # i could use new_inline_image but the text wraps at 20 characters 165 | # creating a break. Hence, I code my own image line and unwrap it 166 | cover_link = '!['+ row['title'] + '](' + row['cover url'] + ')' 167 | mdFile.write(text = cover_link, wrap_width=0) 168 | 169 | # Create a section for the plot 170 | mdFile.new_header(level=1, title="Plot") 171 | 172 | # Clean up plot text 173 | plot_text = row['plot'] 174 | plot_text = ' '.join(plot_text.split()) 175 | plot_text = plot_text.replace('\ ','') 176 | mdFile.new_line(text=plot_text, wrap_width=0) 177 | 178 | # Create my own review section 179 | mdFile.new_header(level=1, title="My own thoughts") 180 | 181 | # Create markdown 182 | mdFile.create_md_file() 183 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Motivation 2 | Obsidian is a great productivity tool that I use to keep my thoughts and ideas in place. One area I want to log and review are movies. When I sat down and listed all my favourite movies, it came down to a list of more than 100 movies! Trying to search through the cast and directors for all these movies is time consuming and I just want to get a quick summary of the movie before trying to write my own thoughts on them. So I decided to use Python to automate this for me. 3 | 4 | # Requirements 5 | You need to install Python and its required packages for this to work. To install these, type `pip install requirements.txt`in the command line, assuming you are in the right folder. 6 | 7 | # How this works 8 | 1. Input all the movies to search into the csv `movies_to_search.csv`. 9 | 2. The script makes use of the [`imdb` package](https://imdbpy.github.io/) to download the metadata from IMDB. This does not require you to have an IMDB api account. You can check out their documentations on what data is available to adjust as required. I created a function that allows me to down the information regarding: 10 | - title 11 | - plot 12 | - genre 13 | - country 14 | - cover url 15 | - director 16 | - kind 17 | - rating 18 | - synopsis 19 | - year 20 | 21 | ```python 22 | from imdb import IMDb 23 | 24 | def movie_search(my_title): 25 | """ 26 | Given a string (movie title), find the metadata for that movie (or the closest 27 | one matching that name). 28 | 29 | Parameters 30 | ---------- 31 | my_title : string 32 | 33 | returns a dictionary of metadata 34 | """ 35 | # create a dictionary to store data 36 | information = dict() 37 | 38 | # create an instance of the IMDb class 39 | ia = IMDb() 40 | 41 | # search for a movie 42 | lookups = ia.search_movie(my_title) 43 | 44 | # use the first search result to return the id 45 | try: 46 | # Test if any movies are found 47 | movie_id = lookups[0].movieID 48 | 49 | # get a movie 50 | movie = ia.get_movie(movie_id) 51 | 52 | # RETRIEVE DATA 53 | # Get the movie title searched for 54 | t = movie['title'] 55 | information['title'] = t 56 | print(f'Retrieving data for {t}') 57 | 58 | # Get the plot outline 59 | try: 60 | information['plot'] = movie['plot outline'] 61 | except: 62 | print('plot outline not available') 63 | try: 64 | information['plot'] = movie['plot'][0] 65 | except: 66 | print('plot also not available') 67 | information['plot'] = '' 68 | 69 | # Get the genre 70 | try: 71 | information['genre'] = ','.join(movie['genre']) 72 | except: 73 | print('no information on genre') 74 | information['genre'] = '' 75 | 76 | # Get the country 77 | try: 78 | information['country'] = ','.join(movie['countries']) 79 | except: 80 | print('no information on country') 81 | information['country'] = '' 82 | 83 | # Get the director 84 | # Extract the names in each Person object 85 | try: 86 | director_names = [d['name'] for d in movie['directors']] 87 | information['director'] = ','.join(director_names) 88 | except: 89 | print('director information not available') 90 | information['director'] = '' 91 | 92 | # Get the cast 93 | # Extract the top 5 names in cast (if it exists) 94 | try: 95 | cast_names = [d['name'] for d in movie['cast']] 96 | if len(cast_names) >5: 97 | cast_names = cast_names[0:5] 98 | information['cast'] = ','.join(cast_names) 99 | except: 100 | print('No information on cast') 101 | information['cast'] = '' 102 | 103 | # Get the rating 104 | try: 105 | information['rating'] = movie['rating'] 106 | except: 107 | print('rating info is not available') 108 | information['rating'] = 'N/A' 109 | 110 | # Get the year 111 | try: 112 | information['year'] = movie['year'] 113 | except: 114 | print('no information on year') 115 | information['year'] = '' 116 | 117 | # Get the type 118 | try: 119 | information['kind'] = movie['kind'] 120 | except: 121 | print('type not available') 122 | information['kind'] = '' 123 | 124 | # Get the cover url 125 | try: 126 | information['cover url'] = movie['cover url'] 127 | except: 128 | print('cover url not available') 129 | information['cover url'] = '' 130 | 131 | # Get sypnosis 132 | try: 133 | information['synopsis'] = movie['synopsis'][0] 134 | except: 135 | print('no synopsis') 136 | information['synopsis'] = '' 137 | except: 138 | print('NO RESULTS FOUND FOR {my_title}') 139 | information['title'] = my_title 140 | information['plot'] = '' 141 | information['genre'] = '' 142 | information['country'] = '' 143 | information['cover url'] = '' 144 | information['director'] = '' 145 | information['kind'] = '' 146 | information['rating'] = '' 147 | information['synopsis'] = '' 148 | information['year'] = '' 149 | 150 | return(information) 151 | ``` 152 | 153 | 3. Obsidian reads markdown pages so I automate the creation of markdown pages using python using [mdutils](https://pypi.org/project/mdutils/). I create a function to automate the creation of a standard template filled in with the new metadata: 154 | 155 | ```python 156 | def create_markdown_page(myresults): 157 | """ 158 | Given a dataframe containing all the information about movies, 159 | we can create individual pages 160 | 161 | Parameters 162 | ---------- 163 | myresults : DataFrame 164 | Contains essential headers: title, year, rating, genre, country, director 165 | cast, synopsis and plot 166 | """ 167 | # Create iterations 168 | for index, row in myresults.iterrows(): 169 | # Create a file 170 | mdFile = MdUtils(file_name=row['title']) 171 | 172 | # Create a metadata section 173 | mdFile.new_line(text='---\n') 174 | facts = ['year', 'rating','genre', 'country', 'director', 'cast'] 175 | for f in facts: 176 | mdFile.new_line(text=f'{f.title()}: {row[f]}') 177 | mdFile.new_line(text='Type: Review') 178 | mdFile.new_line(text='') 179 | mdFile.new_line(text='---\n') 180 | 181 | # Create a title 182 | mdFile.new_header(level=1, title=row['title']) 183 | 184 | # Create a cover 185 | # i could use new_inline_image but the text wraps at 20 characters 186 | # creating a break. Hence, I code my own image line and unwrap it 187 | cover_link = '!['+ row['title'] + '](' + row['cover url'] + ')' 188 | mdFile.write(text = cover_link, wrap_width=0) 189 | 190 | # Create a section for the plot 191 | mdFile.new_header(level=1, title="Plot") 192 | 193 | # Clean up plot text 194 | plot_text = row['plot'] 195 | plot_text = ' '.join(plot_text.split()) 196 | plot_text = plot_text.replace('\ ','') 197 | mdFile.new_line(text=plot_text, wrap_width=0) 198 | 199 | # Create my own review section 200 | mdFile.new_header(level=1, title="My own thoughts") 201 | 202 | # Create markdown 203 | mdFile.create_md_file() 204 | ``` 205 | 206 | You can drag and drop these newly created markdown sheets into Obsidian. 207 | --------------------------------------------------------------------------------