├── .gitignore ├── Air Pollution Tracker ├── .gitkeep ├── app.py └── requirements.txt ├── Blogging App with Flask ├── __pycache__ │ └── data.cpython-37.pyc ├── app.py ├── data.py └── templates │ ├── about.html │ ├── add_article.html │ ├── article.html │ ├── articles.html │ ├── dashboard.html │ ├── edit_article.html │ ├── home.html │ ├── includes │ ├── _formhelpers.html │ ├── _messages.html │ └── _navbar.html │ ├── layout.html │ ├── login.html │ └── register.html ├── Book Renting App ├── BookRental.py ├── README.md ├── main.py ├── requirements.txt ├── setup.py ├── test_BookRental.py └── tox.ini ├── Classification Model Simulator with Plotly-Dash ├── Data │ ├── Test.csv │ └── Train.csv ├── README.md ├── app.py ├── assets │ ├── dash-logo.png │ ├── resizing_script.js │ ├── s1.css │ ├── style1.css │ └── styles.css ├── defintion.py ├── fileUpload.py ├── home.png ├── models.py └── multiModel.py ├── Customer Lifetime Value ├── Home.png ├── README.md ├── app.py ├── assets │ ├── logo-plotly.svg │ └── style.css ├── layout.py └── pre_processing.py ├── Customer Loyalty Program ├── Images │ ├── .gitkeep │ ├── Home1.PNG │ └── Home2.PNG ├── README.md ├── RFM.py ├── app.py ├── assets │ ├── base.css │ ├── custom.css │ ├── dash-logo-new.png │ ├── resizing_script.js │ ├── s1.css │ ├── style1.css │ └── styles.css ├── data │ └── data.csv ├── dataPreprocess.py └── layout.py ├── Dependency_mapper ├── .gitkeep └── dependency_mapper.py ├── Games ├── .gitkeep ├── Card Deck.py ├── Pong_Game.py └── Turtle_Race.py ├── Generative AI ├── AI-Flowcharts │ ├── .gitkeep │ ├── ai-workflow.gif │ ├── app.py │ └── requirements.txt ├── Chatbot With OpenAI │ ├── .gitkeep │ └── app.py ├── Content Risk Analyzer │ ├── .gitkeep │ ├── app.py │ ├── helper.py │ └── requirements.txt ├── CrewAI │ ├── .gitkeep │ └── CrewAI.py ├── LLM_wrappers │ ├── .gitkeep │ ├── wrapper_for_API.py │ └── wrapper_for_app.py ├── NL2SQL APP │ ├── NL2SQL_APP.py │ └── requirements.txt ├── OCR App With Gemini and Dash │ ├── app.py │ └── requirements.txt ├── OCR App With Gemini and Streamlit │ ├── ContentExtractor.py │ └── requirements.txt └── Streamlit vs Gradio │ ├── .gitkeep │ ├── language_translation_gradio.py │ └── language_translation_streamlit.py ├── Interactive Modelling With R Shiny ├── Images │ ├── .gitkeep │ ├── Correlation.PNG │ ├── DataSummary.PNG │ ├── HomePage.PNG │ ├── Model.PNG │ └── prediction.PNG ├── README.md ├── server.R └── ui.R ├── LICENSE ├── Monte Carlo Simulation for Pipe Failure ├── README.md ├── Requirements.txt ├── app.py ├── assets │ ├── dash-logo.png │ ├── equation.png │ ├── home.png │ ├── hoopstress.jpg │ ├── logo-plotly.svg │ └── style.css ├── config.yaml ├── layout.py └── pre_processing.py ├── Portfolio Simulator Using Python ├── Preview.gif ├── README.md ├── app.py ├── dataload.py ├── layout.py └── requirements.txt ├── Price Optimization ├── Data │ └── price.csv ├── Home.png ├── Python │ ├── optimize_price.py │ └── optimize_quantity.py ├── README.md ├── app.py ├── assets │ ├── base.css │ ├── custom.css │ ├── dash-logo-new.png │ ├── resizing_script.js │ ├── s1.css │ ├── style1.css │ └── styles.css └── requirements.txt ├── Project Management With R Shiny ├── Global.R ├── Images │ ├── DIY.gif │ ├── Download.gif │ ├── Projects.gif │ └── Tasks.gif ├── Projects.rds ├── README.md ├── Tasks.rds └── report.Rmd ├── README.md ├── Shiny App Los Angeles Crash Analysis ├── App Preview.gif ├── Global.R ├── README.md ├── server.R └── ui.R ├── Shopping List Optimizer ├── .gitkeep ├── UI.py ├── app.py └── style.css ├── Text Analysis Using Azure AI Services ├── .gitkeep └── app.py └── Virtual Assistant └── main.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | -------------------------------------------------------------------------------- /Air Pollution Tracker/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Air Pollution Tracker/app.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import dash 3 | from dash import dcc, html, Output, Input 4 | import plotly.graph_objs as go 5 | 6 | # Load air quality data from a CSV file 7 | df = pd.read_csv('AQI and Lat Long of Countries.csv') 8 | 9 | # Extract unique countries 10 | countries = sorted(df['Country'].dropna().astype(str).unique()) 11 | 12 | 13 | app = dash.Dash(__name__) 14 | 15 | server = app.server 16 | 17 | app.title = 'Air Quality Dashboard' 18 | 19 | 20 | 21 | app.layout = html.Div([ 22 | html.H2('Air Quality Dashboard (CSV-based)'), 23 | 24 | html.Label('Select a Country:'), 25 | dcc.Dropdown( 26 | id='country-dropdown', 27 | options=[{'label': country, 'value': country} for country in sorted(countries)], 28 | value=sorted(countries)[0] 29 | ), 30 | 31 | html.Br(), 32 | html.Label('Select a City:'), 33 | dcc.Dropdown(id='city-dropdown'), 34 | 35 | html.Br(), 36 | html.Div(id='aqi-output'), 37 | 38 | html.Div([ 39 | dcc.Graph(id='pollution-graph', style={'width': '48%', 'display': 'inline-block'}), 40 | dcc.Graph(id='map-graph', style={'width': '48%', 'display': 'inline-block'}) 41 | ], style={'display': 'flex', 'justify-content': 'space-between'}) 42 | ], style={'width': '90%', 'margin': 'auto'}) 43 | 44 | # Callback to update city dropdown based on country 45 | @app.callback( 46 | Output('city-dropdown', 'options'), 47 | Output('city-dropdown', 'value'), 48 | Input('country-dropdown', 'value') 49 | ) 50 | def update_cities(selected_country): 51 | filtered_df = df[df['Country'] == selected_country] 52 | city_options = [{'label': city, 'value': city} for city in sorted(filtered_df['City'].unique())] 53 | first_city = city_options[0]['value'] if city_options else None 54 | return city_options, first_city 55 | 56 | # Callback to update visuals based on city selection 57 | @app.callback( 58 | Output('aqi-output', 'children'), 59 | Output('pollution-graph', 'figure'), 60 | Output('map-graph', 'figure'), 61 | Input('city-dropdown', 'value') 62 | ) 63 | def update_air_quality(city): 64 | city_data = df[df['City'] == city].iloc[0] 65 | aqi = city_data['AQI Value'] 66 | lat = city_data['lat'] 67 | lon = city_data['lng'] 68 | country = city_data['Country'] 69 | 70 | # Pollutant data 71 | components = city_data.drop(['City', 'AQI Value', 'lat', 'lng', 'Country']).to_dict() 72 | aqi_text = f"Air Quality Index for {city}, {country}: {aqi}" 73 | 74 | # Horizontal bar chart 75 | bar_fig = go.Figure(data=[ 76 | go.Bar( 77 | x=list(components.values()), 78 | y=list(components.keys()), 79 | orientation='h', 80 | marker_color='skyblue' 81 | ) 82 | ]) 83 | bar_fig.update_layout(title=f'Pollutants in {city}', xaxis_title='Concentration (μg/m³)', yaxis_title='Pollutants') 84 | 85 | # Map figure 86 | map_fig = go.Figure(go.Scattergeo( 87 | lon=[lon], 88 | lat=[lat], 89 | text=f"{city}, {country}
AQI: {aqi}", 90 | mode='markers', 91 | marker=dict(size=10, color='red', symbol='circle') 92 | )) 93 | map_fig.update_layout( 94 | title='City Location', 95 | geo=dict(showland=True, landcolor='lightgrey'), 96 | height=400 97 | ) 98 | 99 | return aqi_text, bar_fig, map_fig 100 | 101 | if __name__ == '__main__': 102 | app.run(debug=True) 103 | -------------------------------------------------------------------------------- /Air Pollution Tracker/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Air Pollution Tracker/requirements.txt -------------------------------------------------------------------------------- /Blogging App with Flask/__pycache__/data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Blogging App with Flask/__pycache__/data.cpython-37.pyc -------------------------------------------------------------------------------- /Blogging App with Flask/data.py: -------------------------------------------------------------------------------- 1 | def Articles(): 2 | articles = [ 3 | { 4 | 'id': 1, 5 | 'title':'Article One', 6 | 'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.', 7 | 'author':'Brad Traversy', 8 | 'create_date':'04-25-2017' 9 | }, 10 | { 11 | 'id': 2, 12 | 'title':'Article Two', 13 | 'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.', 14 | 'author':'John Doe', 15 | 'create_date':'04-25-2017' 16 | }, 17 | { 18 | 'id': 3, 19 | 'title':'Article Three', 20 | 'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.', 21 | 'author':'Brad Traversy', 22 | 'create_date':'04-25-2017' 23 | } 24 | ] 25 | return articles 26 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/about.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body%} 4 |

About Us

5 |

Lorem, ipsum dolor sit amet consectetur adipisicing elit. Sed laudantium dolore nam voluptatum ipsum ullam minima, consequuntur aliquid quisquam aliquam amet alias vero voluptate sint eius recusandae quam, quidem error.

6 | {% endblock %} -------------------------------------------------------------------------------- /Blogging App with Flask/templates/add_article.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

Add Article

5 | {% from "includes/_formhelpers.html" import render_field %} 6 |
7 |
8 | {{ render_field(form.title, class_="form-control") }} 9 |
10 |
11 | {{ render_field(form.body, class_="form-control", id="editor") }} 12 |
13 |

14 |

15 | {% endblock %} 16 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/article.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

{{article.title}}

5 | Written by {{article.author}} on {{article.create_date}} 6 |
7 |
8 | {{article.body | safe}} 9 |
10 | {% endblock %} 11 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/articles.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body%} 4 |

Articles

5 | 10 | {% endblock %} -------------------------------------------------------------------------------- /Blogging App with Flask/templates/dashboard.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

Dashboard Welcome {{session.username}}

5 | Add Article 6 |
7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | {% for article in articles %} 17 | 18 | 19 | 20 | 21 | 22 | 23 | 29 | 30 | {% endfor %} 31 |
IDTitleAuthorDate
{{article.ID}}{{article.title}}{{article.author}}{{article.create_date}}Edit 24 |
25 | 26 | 27 |
28 |
32 | {% endblock %} 33 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/edit_article.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

Edit Article

5 | {% from "includes/_formhelpers.html" import render_field %} 6 |
7 |
8 | {{ render_field(form.title, class_="form-control") }} 9 |
10 |
11 | {{ render_field(form.body, class_="form-control", id="editor") }} 12 |
13 |

14 |

15 | {% endblock %} 16 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/home.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |
5 |

Welcome To FlaskApp

6 |

This application is built on the Python Flask framework and is the focus for the "Python Flask From Scratch" YouTube series

7 | {% if session.logged_in == NULL %} 8 | Register 9 | Login 10 | {% endif %} 11 |
12 |
13 |

This application is built on the Python Flask framework and is the focus for the "Python Flask From Scratch" YouTube series

14 |
15 | {% endblock %} 16 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/includes/_formhelpers.html: -------------------------------------------------------------------------------- 1 | {% macro render_field(field) %} 2 | {{ field.label }} 3 | {{ field(**kwargs)|safe }} 4 | {% if field.errors %} 5 | {% for error in field.errors %} 6 | {{ error }} 7 | {% endfor %} 8 | {% endif %} 9 | {% endmacro %} 10 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/includes/_messages.html: -------------------------------------------------------------------------------- 1 | {% with messages = get_flashed_messages(with_categories=true) %} 2 | {% if messages %} 3 | {% for category, message in messages %} 4 |
{{ message }}
5 | {% endfor %} 6 | {% endif %} 7 | {% endwith %} 8 | 9 | {% if error %} 10 |
{{error}}
11 | {% endif %} 12 | 13 | {% if msg %} 14 |
{{msg}}
15 | {% endif %} 16 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/includes/_navbar.html: -------------------------------------------------------------------------------- 1 | 31 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/layout.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | MyFlaskApp 6 | 7 | 8 | 9 | 10 | {% include 'includes/_navbar.html'%} 11 |
12 | {% include 'includes/_messages.html' %} 13 | {% block body %}{% endblock %} 14 |
15 | 16 | 17 | 18 | 19 | 22 | 23 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/login.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

Login

5 |
6 |
7 | 8 | 9 |
10 |
11 | 12 | 13 |
14 | 15 |
16 | {% endblock %} 17 | -------------------------------------------------------------------------------- /Blogging App with Flask/templates/register.html: -------------------------------------------------------------------------------- 1 | {% extends 'layout.html' %} 2 | 3 | {% block body %} 4 |

Register

5 | {% from "includes/_formhelpers.html" import render_field %} 6 | 7 |
8 |
9 | {{render_field(form.name, class_="form-control")}} 10 |
11 |
12 | {{render_field(form.email, class_="form-control")}} 13 |
14 |
15 | {{render_field(form.username, class_="form-control")}} 16 |
17 |
18 | {{render_field(form.password, class_="form-control")}} 19 |
20 |
21 | {{render_field(form.confirm, class_="form-control")}} 22 |
23 |

24 |
25 | {% endblock %} 26 | -------------------------------------------------------------------------------- /Book Renting App/BookRental.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import sys 3 | 4 | # def logged(function): 5 | # def wrapper(*args, **kwargs): 6 | # value = function(*args, **kwargs) 7 | # with open('logfile.txt', '+a') as f: 8 | # fname = function.__name__ 9 | # # print(f'{fname} returned value {value}') 10 | # f.write(f'{fname} returned value {value}\n') 11 | # return value 12 | # return wrapper 13 | 14 | class BookRental: 15 | 16 | def __init__(self, stock:int = 0): 17 | self.__stock = stock 18 | 19 | @property 20 | def stock(self) -> int: 21 | """The stock is a property with default value set to zero 22 | 23 | Args: 24 | stock: Number of books in stock. The default value is zero 25 | 26 | 27 | Returns: 28 | Number of books currently in stock 29 | """ 30 | return self.__stock 31 | 32 | @stock.setter 33 | def stock(self, val: int) -> int: 34 | """_summary_ 35 | 36 | Args: 37 | val (int): Let user set new value of the book stock 38 | """ 39 | self.__stock = val 40 | 41 | # @logged 42 | def display_books_stock(self): 43 | """A method to display the books in stock 44 | 45 | Returns: 46 | int: Returns the update book stock 47 | """ 48 | print(f'There are {self.stock} books available for rent') 49 | return self.stock 50 | 51 | #@logged 52 | def rent_books(self, n: int): 53 | """ Specify the number of books to rent 54 | 55 | Args: 56 | param1 n (int): Specify the number of books to rent 57 | 58 | Returns: 59 | int: Returns the updated book stock 60 | """ 61 | 62 | if n <=0: 63 | print("Number of books should be at least one") 64 | return None 65 | elif n > self.stock: 66 | print(f'Sorry we only have {self.stock} books available to rent') 67 | return None 68 | else: 69 | now = datetime.datetime.now() 70 | print(f'You have successfully rented {n} books') 71 | self.stock -= n 72 | print(f'Current available stock is {self.stock} ') 73 | # return now 74 | return self.stock 75 | 76 | # @logged 77 | def return_books(self,n: int) -> int: 78 | """ Update the current stock and return the updated stock. 79 | 80 | Args: 81 | n (int): Number of books to return 82 | 83 | Returns: 84 | stock (int): Returns the updated book stock 85 | """ 86 | self.stock += n 87 | print(f'You have successfully returned {n} books') 88 | print(f'Current available stock is {self.stock}') 89 | # return 0 90 | return self.stock 91 | 92 | 93 | 94 | class Customer: 95 | def __init__(self): 96 | self.__books = 0 97 | 98 | 99 | @property 100 | def books(self): 101 | """ A property 102 | 103 | Returns: 104 | int: returns the number of books 105 | """ 106 | return self.__books 107 | 108 | @books.setter 109 | def books(self, val): 110 | self.__books = val 111 | 112 | @staticmethod 113 | def validate_choice(val): 114 | try: 115 | val = int(val) 116 | return val 117 | except ValueError: 118 | print("Enter valid input") 119 | 120 | 121 | # @logged 122 | def request_books(self): 123 | """ A method to request books 124 | 125 | Returns: 126 | int : Returns the number of books requested 127 | """ 128 | 129 | books = input("How many books do you wish to rent?: ") 130 | 131 | books = Customer.validate_choice(books) 132 | 133 | if books < 1: 134 | print("Please rent at least one book") 135 | return None 136 | else: 137 | self.books = books 138 | return self.books 139 | 140 | # @logged 141 | def return_books(self): 142 | """ A method to return the books 143 | 144 | Returns: 145 | int: Number of books to return 146 | """ 147 | return_books = input("How many books do you wish to return?: ") 148 | 149 | return_books = Customer.validate_choice(return_books) 150 | 151 | if return_books < 1: 152 | print("You need at least one book to return") 153 | return None 154 | else: 155 | return return_books 156 | 157 | 158 | 159 | 160 | -------------------------------------------------------------------------------- /Book Renting App/README.md: -------------------------------------------------------------------------------- 1 | # Automate And Standardize Testing With Tox In Python 2 | Learn to automate test execution in different python environments with tools like Pytest and Tox. we look into Pytest and Tox libraries to automate the entire testing process. This will let developers focus more on the core functionalities and not worry about their compatibility with a wide range of past and present versions of libraries and environments. In case something fails, the tox would highlight the specific instance of failure for further investigation and fixes 3 | 4 | * Building Book Renting application 5 | * Learn and write test cases using pytest 6 | * Learn to configure Tox and set up multiple python environments 7 | * Executing test cases using Tox and analyzing the results 8 | 9 | 10 | ## [Read more on this on my blog](https://amitvkulkarni.medium.com/automate-and-standardize-testing-with-tox-in-python-6e4ea54a48ec) 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /Book Renting App/main.py: -------------------------------------------------------------------------------- 1 | from BookRental import BookRental, Customer 2 | 3 | 4 | def main(): 5 | """ 6 | The main function initiating the objects and the process 7 | """ 8 | 9 | book = BookRental(100) 10 | customer = Customer() 11 | 12 | 13 | while True: 14 | print(""" 15 | ====== Book Rental Service ======= 16 | 1. Display available books 17 | 2. Request books 18 | 3. Return books 19 | 4. Exit 20 | """) 21 | choice = input("Enter your choice: ") 22 | 23 | try: 24 | choice = int(choice) 25 | except ValueError: 26 | print("Error with data type") 27 | continue 28 | 29 | if choice == 1: 30 | book.display_books_stock() 31 | elif choice == 2: 32 | book.rent_books(customer.request_books()) 33 | elif choice == 3: 34 | book.return_books(customer.return_books()) 35 | else: 36 | break 37 | 38 | print("Thank you for using the book rental service.") 39 | 40 | 41 | 42 | if __name__ == '__main__': 43 | main() -------------------------------------------------------------------------------- /Book Renting App/requirements.txt: -------------------------------------------------------------------------------- 1 | attrs==22.1.0 2 | cachetools==5.2.0 3 | chardet==5.1.0 4 | colorama==0.4.6 5 | distlib==0.3.6 6 | exceptiongroup==1.0.4 7 | filelock==3.8.2 8 | iniconfig==1.1.1 9 | mypy==0.991 10 | mypy-extensions==0.4.3 11 | packaging==22.0 12 | platformdirs==2.6.0 13 | pluggy==1.0.0 14 | pyproject_api==1.2.1 15 | pytest==7.2.0 16 | tomli==2.0.1 17 | tox==4.0.8 18 | typing_extensions==4.4.0 19 | virtualenv==20.17.1 20 | pytest-cov == 4.0.0 21 | -------------------------------------------------------------------------------- /Book Renting App/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='Book-renting-service', 5 | packages=find_packages() 6 | ) -------------------------------------------------------------------------------- /Book Renting App/test_BookRental.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from BookRental import Customer, BookRental 3 | 4 | STOCK = 100 5 | 6 | def test_display_books_stock(): 7 | """ Test for negative value 8 | There should be minimum one book to return. It cannot be less than one. 9 | 10 | """ 11 | 12 | book1 = BookRental(STOCK) 13 | assert STOCK == book1.display_books_stock() 14 | 15 | book2 = BookRental(0) 16 | assert 0 == book2.display_books_stock() 17 | 18 | book3 = BookRental(-1) 19 | assert -1 == book3.display_books_stock() 20 | 21 | 22 | def test_rent_books(): 23 | """ Test for negative value 24 | There should be minimum one book to return. It cannot be less than one. 25 | 26 | """ 27 | rent = BookRental(STOCK) 28 | InStock = STOCK - rent.rent_books(50) 29 | assert 50 == InStock 30 | 31 | 32 | def test_rent_books_negative(): 33 | """ Test for negative value 34 | There should be minimum one book to return. It cannot be less than one. 35 | 36 | """ 37 | rent = BookRental(STOCK) 38 | assert None == rent.rent_books(-5) 39 | 40 | def test_rent_books_zero(): 41 | """ Test for negative value 42 | There should be minimum one book to return. It cannot be less than one. 43 | 44 | """ 45 | rent = BookRental(STOCK) 46 | assert None == rent.rent_books(0) 47 | 48 | 49 | def test_return_books(): 50 | """ Test for negative value 51 | There should be minimum one book to return. It cannot be less than one. 52 | 53 | """ 54 | ret_book = BookRental(STOCK) 55 | assert 110 == ret_book.return_books(10) 56 | 57 | def test_return_books_negative(): 58 | """ Test for negative value 59 | There should be minimum one book to return. It cannot be less than one. 60 | 61 | """ 62 | ret_book = BookRental(STOCK) 63 | assert 95 == ret_book.return_books(-5) 64 | 65 | 66 | def test_return_books_zero(): 67 | """ Test for negative value 68 | There should be minimum one book to return. It cannot be less than one. 69 | 70 | """ 71 | return_book = BookRental(STOCK) 72 | assert 100 == return_book.return_books(0) -------------------------------------------------------------------------------- /Book Renting App/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = {py38, py311}-{pytest} 3 | 4 | [testenv] 5 | deps = 6 | -r requirements.txt 7 | 8 | [testenv:{py38, py311}-pytest] 9 | description = Run pytest 10 | deps = 11 | pytest 12 | {[testenv]deps} 13 | commands = 14 | pytest --cov 15 | 16 | -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/README.md: -------------------------------------------------------------------------------- 1 | # Classification Model Simulator Application Using Dash in Python 2 | This is an analytics web application built using plotly-dash. A simnulator for classification models with the following features. 3 | 4 | 1. Built with plotly-dash, boostrap, DAQ with various charts and widgets. 5 | 2. User can build classification models of given choice and see the results on screen instantly. 6 | 3. Refer the variable importance / model metrics and adjust inputs to visualize model performance. 7 | 8 | ## [Read more on my blog](https://www.analyticsvidhya.com/blog/2020/11/classification-model-simulator-application-using-dash-in-python/) 9 | 10 | ## Here is the preview of the app:
11 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Classification%20Model%20Simulator%20with%20Plotly-Dash/home.png) 12 | -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/assets/dash-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Classification Model Simulator with Plotly-Dash/assets/dash-logo.png -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/assets/resizing_script.js: -------------------------------------------------------------------------------- 1 | if (!window.dash_clientside) { 2 | window.dash_clientside = {}; 3 | } 4 | window.dash_clientside.clientside = { 5 | resize: function(value) { 6 | console.log("resizing..."); // for testing 7 | setTimeout(function() { 8 | window.dispatchEvent(new Event("resize")); 9 | console.log("fired resize"); 10 | }, 500); 11 | return null; 12 | } 13 | }; 14 | -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/assets/styles.css: -------------------------------------------------------------------------------- 1 | .js-plotly-plot .plotly .modebar { 2 | padding-top: 5%; 3 | margin-right: 3.5%; 4 | } 5 | 6 | body { 7 | background-color: #f2f2f2; 8 | margin: 5%; 9 | } 10 | 11 | .two.columns { 12 | width: 16.25%; 13 | } 14 | 15 | .column, 16 | .columns { 17 | margin-left: 0.5%; 18 | } 19 | 20 | .pretty_container { 21 | border-radius: 5px; 22 | background-color: #f9f9f9; 23 | margin: 10px; 24 | padding: 15px; 25 | position: relative; 26 | box-shadow: 2px 2px 2px lightgrey; 27 | } 28 | 29 | .bare_container { 30 | margin: 0 0 0 0; 31 | padding: 0 0 0 0; 32 | } 33 | 34 | .dcc_control { 35 | margin: 0; 36 | padding: 5px; 37 | width: calc(100%-40px); 38 | } 39 | 40 | .control_label { 41 | margin: 0; 42 | padding: 10px; 43 | padding-bottom: 0px; 44 | margin-bottom: 0px; 45 | width: calc(100%-40px); 46 | } 47 | 48 | .rc-slider { 49 | margin-left: 0px; 50 | padding-left: 0px; 51 | } 52 | 53 | .flex-display { 54 | display: flex; 55 | } 56 | 57 | .container-display { 58 | display: flex; 59 | } 60 | 61 | #individual_graph, 62 | #aggregate_graph { 63 | width: calc(100% - 30px); 64 | position: absolute; 65 | } 66 | 67 | #count_graph { 68 | position: absolute; 69 | height: calc(100% - 30px); 70 | width: calc(100% - 30px); 71 | } 72 | 73 | #countGraphContainer { 74 | flex: 5; 75 | position: relative; 76 | } 77 | 78 | #header { 79 | align-items: center; 80 | } 81 | 82 | #learn-more-button { 83 | text-align: center; 84 | height: 100%; 85 | padding: 0 20px; 86 | text-transform: none; 87 | font-size: 15px; 88 | float: right; 89 | margin-right: 10px; 90 | margin-top: 30px; 91 | } 92 | #title { 93 | text-align: center; 94 | } 95 | 96 | .mini_container { 97 | border-radius: 5px; 98 | background-color:#e4e7e7; 99 | margin: 10px; 100 | padding: 15px; 101 | position: relative; 102 | box-shadow: 2px 2px 2px lightgrey; 103 | } 104 | 105 | #right-column { 106 | display: flex; 107 | flex-direction: column; 108 | } 109 | 110 | #wells { 111 | flex: 1; 112 | } 113 | 114 | #gas { 115 | flex: 1; 116 | } 117 | 118 | #aggregate_data { 119 | align-items: center; 120 | } 121 | 122 | #oil { 123 | flex: 1; 124 | } 125 | 126 | #water { 127 | flex: 1; 128 | } 129 | 130 | #tripleContainer { 131 | display: flex; 132 | flex: 3; 133 | } 134 | 135 | #mainContainer { 136 | display: flex; 137 | flex-direction: column; 138 | } 139 | 140 | #pie_graph > div > div > svg:nth-child(3) > g.infolayer > g.legend { 141 | pointer-events: all; 142 | transform: translate(30px, 349px); 143 | } 144 | -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/defintion.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import os 4 | import plotly.express as px 5 | 6 | class Data: 7 | def __init__(self): 8 | 9 | self.df_train = pd.read_csv('Classification Model Simulator with Plotly-Dash/Data/Train.csv') 10 | self.df_test = pd.read_csv('Classification Model Simulator with Plotly-Dash//Data/Test.csv') 11 | self.df_train_dummies = pd.get_dummies(self.df_train,columns=['Gender','Married','Education','Self_Employed','Property_Area'],drop_first=True) 12 | self.df_test_dummies = pd.get_dummies(self.df_train,columns=['Gender','Married','Education','Self_Employed','Property_Area'],drop_first=True) 13 | self.df = pd.read_csv('Classification Model Simulator with Plotly-Dash//Data/Train.csv') 14 | 15 | 16 | # creating data object 17 | obj_Data = Data() 18 | 19 | 20 | -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/fileUpload.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import datetime 3 | import io 4 | 5 | import dash 6 | from dash.dependencies import Input, Output, State 7 | import dash_core_components as dcc 8 | import dash_html_components as html 9 | import dash_table 10 | 11 | import pandas as pd 12 | 13 | 14 | external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] 15 | 16 | app = dash.Dash(__name__, external_stylesheets=external_stylesheets) 17 | 18 | upload_layout = html.Div([ 19 | dcc.Upload( 20 | id='upload-data', 21 | children=html.Div([ 22 | 'Drag and Drop or ', 23 | html.A('Select Files') 24 | ]), 25 | style={ 26 | 'width': '100%', 27 | 'height': '60px', 28 | 'lineHeight': '60px', 29 | 'borderWidth': '1px', 30 | 'borderStyle': 'dashed', 31 | 'borderRadius': '5px', 32 | 'textAlign': 'center', 33 | 'margin': '10px' 34 | }, 35 | # Allow multiple files to be uploaded 36 | multiple=True 37 | ), 38 | html.Div(id='output-data-upload'), 39 | ]) 40 | 41 | 42 | def parse_contents(contents, filename, date): 43 | content_type, content_string = contents.split(',') 44 | 45 | decoded = base64.b64decode(content_string) 46 | try: 47 | if 'csv' in filename: 48 | # Assume that the user uploaded a CSV file 49 | df = pd.read_csv( 50 | io.StringIO(decoded.decode('utf-8'))) 51 | elif 'xls' in filename: 52 | # Assume that the user uploaded an excel file 53 | df = pd.read_excel(io.BytesIO(decoded)) 54 | except Exception as e: 55 | print(e) 56 | return html.Div([ 57 | 'There was an error processing this file.' 58 | ]) 59 | 60 | return html.Div([ 61 | html.H5(filename), 62 | html.H6(datetime.datetime.fromtimestamp(date)), 63 | 64 | dash_table.DataTable( 65 | data=df.to_dict('records'), 66 | columns=[{'name': i, 'id': i} for i in df.columns] 67 | ), 68 | 69 | html.Hr(), # horizontal line 70 | 71 | # For debugging, display the raw contents provided by the web browser 72 | html.Div('Raw Content'), 73 | html.Pre(contents[0:200] + '...', style={ 74 | 'whiteSpace': 'pre-wrap', 75 | 'wordBreak': 'break-all' 76 | }) 77 | ]) 78 | 79 | 80 | @app.callback(Output('output-data-upload', 'children'), 81 | [Input('upload-data', 'contents')], 82 | [State('upload-data', 'filename'), 83 | State('upload-data', 'last_modified')]) 84 | def update_output(list_of_contents, list_of_names, list_of_dates): 85 | if list_of_contents is not None: 86 | children = [ 87 | parse_contents(c, n, d) for c, n, d in 88 | zip(list_of_contents, list_of_names, list_of_dates)] 89 | return children 90 | 91 | 92 | # if __name__ == '__main__': 93 | # app.run_server(debug=True) -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Classification Model Simulator with Plotly-Dash/home.png -------------------------------------------------------------------------------- /Classification Model Simulator with Plotly-Dash/models.py: -------------------------------------------------------------------------------- 1 | import lightgbm as lgb 2 | import logging 3 | import pandas as pd 4 | import numpy as np 5 | import plotly.express as px 6 | from defintion import * 7 | from sklearn import tree 8 | import plotly.figure_factory as ff 9 | from sklearn.naive_bayes import GaussianNB 10 | from sklearn.datasets import make_classification 11 | from sklearn.linear_model import LogisticRegression 12 | from sklearn.model_selection import train_test_split 13 | from sklearn.neighbors import KNeighborsClassifier 14 | from sklearn.model_selection import train_test_split 15 | from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier 16 | from sklearn.metrics import roc_curve, roc_auc_score, recall_score, precision_score,accuracy_score 17 | 18 | logging.basicConfig(filename= 'model_specific.log', level = logging.DEBUG,format='%(asctime)s:%(levelname)s:%(filename)s:%(funcName)s:%(message)s') 19 | 20 | def buildModel(target, independent, slider, bestModel): 21 | 22 | """[This function builds a classification model with relavant metrics/plots to measure the performance of the model. The model to be built passed as argument from getModel method ] 23 | 24 | Returns: 25 | [type]: [Returns various metrics such as accuracy, precision, recall for chosen model along with plots] 26 | """ 27 | 28 | try: 29 | X = pd.DataFrame(independent) 30 | y = pd.DataFrame(target) 31 | 32 | X = X.drop(['Loan_ID'], axis=1) 33 | trainX, testX, trainy, testy = train_test_split(X, y, train_size= slider/100, random_state=2) 34 | 35 | if bestModel == 'GNB': 36 | model = GaussianNB() 37 | elif bestModel == 'LGBM': 38 | model = lgb.LGBMClassifier() 39 | elif bestModel == 'Logistic': 40 | model = LogisticRegression() 41 | elif bestModel == 'KNN': 42 | model = KNeighborsClassifier() 43 | elif bestModel == 'Raondom Forest': 44 | model = RandomForestClassifier() 45 | elif bestModel == 'DT': 46 | model = tree.DecisionTreeClassifier() 47 | else: 48 | model = AdaBoostClassifier() 49 | 50 | model.fit(trainX, trainy) 51 | 52 | lr_probs = model.predict_proba(testX) 53 | yhat = model.predict(testX) 54 | 55 | 56 | lr_probs = lr_probs[:, 1] 57 | #ns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs) 58 | lr_fpr, lr_tpr, thresholds = roc_curve(testy, lr_probs) 59 | 60 | lr_auc = round(roc_auc_score(testy, lr_probs),2) 61 | fig_ROC = px.area( 62 | x=lr_fpr, y=lr_tpr, 63 | title=f'ROC Curve (AUC={lr_auc:.4f})', 64 | 65 | labels=dict(x='False Positive Rate', y='True Positive Rate') 66 | 67 | ) 68 | fig_ROC.add_shape( 69 | type='line', line=dict(dash='dash'), 70 | x0=0, x1=1, y0=0, y1=1 71 | ) 72 | 73 | fig_ROC.update_yaxes(scaleanchor="x", scaleratio=1) 74 | fig_ROC.update_xaxes(constrain='domain') 75 | 76 | 77 | fig_precision = px.histogram( 78 | x = lr_probs, color=testy, nbins=50, 79 | labels=dict(color='True Labels', x='Score') 80 | ) 81 | 82 | 83 | # Evaluating model performance at various thresholds 84 | df_threshold = pd.DataFrame({ 85 | 'False Positive Rate': lr_fpr, 86 | 'True Positive Rate': lr_tpr 87 | }, index=thresholds) 88 | df_threshold.index.name = "Thresholds" 89 | df_threshold.columns.name = "Rate" 90 | 91 | fig_thresh = px.line( 92 | df_threshold, title='TPR and FPR at every threshold' 93 | 94 | ) 95 | 96 | fig_thresh.update_yaxes(scaleanchor="x", scaleratio=1) 97 | fig_threshold = fig_thresh.update_xaxes(range=[0, 1], constrain='domain') 98 | 99 | # precision tp / (tp + fp) 100 | precision = round(precision_score(testy, yhat),2) 101 | # recall: tp / (tp + fn) 102 | recall = round(recall_score(testy, yhat),2) 103 | accuracy = round(accuracy_score(testy, yhat)*100,1) 104 | 105 | 106 | logging.debug(accuracy) 107 | logging.debug(precision) 108 | logging.debug(recall) 109 | logging.debug(lr_auc) 110 | 111 | return fig_ROC, fig_precision, fig_threshold, precision, recall, accuracy, trainX.shape[0], testX.shape[0],lr_auc 112 | 113 | except: 114 | logging.exception('Something went wrong with AUC curve and Precision/Recall plot') 115 | 116 | -------------------------------------------------------------------------------- /Customer Lifetime Value/Home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Customer Lifetime Value/Home.png -------------------------------------------------------------------------------- /Customer Lifetime Value/README.md: -------------------------------------------------------------------------------- 1 | # Python App to Measure Customer Lifetime Value (CLV) 2 | 3 | Customer Lifetime Value is the profit that a business will make from a specific customer over the period of their association with the business. Every industry has its own set of metrics that are tracked and measured to help businesses target the right customer and forecast their customer base for the future. The CLV may change depending on the business model and its objective which means the definition and calculation need to be revisited regularly. The app helps businesses identify, segment, understand customers and products better. 4 | 5 | ## [Read more on my blog](https://www.analyticsvidhya.com/blog/2021/08/create-a-python-app-to-measure-customer-lifetime-value-clv/?) 6 | 7 | ## Here is the preview of the app:
8 | ![ScreenShot](Home.png) 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Customer Lifetime Value/app.py: -------------------------------------------------------------------------------- 1 | import dash 2 | import dash_core_components as dcc 3 | import dash_html_components as html 4 | from dash.dependencies import Input, Output, State 5 | import dash_daq as daq 6 | import dash_bootstrap_components as dbc 7 | import numpy as np 8 | import pandas as pd 9 | import logging 10 | import plotly.graph_objs as go 11 | import plotly.express as px 12 | import layout 13 | import pre_processing as pp 14 | # pd.options.display.float_format = '${:,.2f}'.format 15 | 16 | 17 | app = dash.Dash(__name__) 18 | 19 | server = app.server 20 | 21 | app.layout = layout.layout_all 22 | 23 | 24 | @app.callback( 25 | [ 26 | Output("id_total_customer", 'children'), 27 | Output("id_total_transactions", 'children'), 28 | Output("id_total_sales", 'children'), 29 | Output("id_order_value", 'children'), 30 | # Output("id_churn", 'children'), 31 | Output("id-results", 'data'), 32 | Output("fig-UnitPriceVsQuantity", 'figure'), 33 | Output("fig-ProductPie", 'figure'), 34 | ], 35 | [ 36 | Input("id-country-dropdown", "value") 37 | ] 38 | ) 39 | def update_output_All(country_selected): 40 | 41 | try: 42 | if (country_selected != 'All' and country_selected != None): 43 | df_selectedCountry = pp.filtered_data.loc[pp.filtered_data['Country'] == country_selected] 44 | df_selectedCountry_p = pp.filtered_data_group.loc[pp.filtered_data_group['Country'] == country_selected] 45 | 46 | cnt_transactions = df_selectedCountry.Country.shape[0] 47 | cnt_customers = len(df_selectedCountry.CustomerID.unique()) 48 | cnt_sales = round(df_selectedCountry.groupby('Country').agg({'TotalPurchase':'sum'})['TotalPurchase'].sum(),2) 49 | 50 | cnt_avgsales = round(df_selectedCountry_p.groupby('Country').agg({'avg_order_value': 'mean'})['avg_order_value'].mean()) 51 | 52 | 53 | repeat_rate = round(df_selectedCountry_p[df_selectedCountry_p.num_transactions > 1].shape[0]/df_selectedCountry_p.shape[0],2) 54 | churn_rate = round(1-repeat_rate,2) 55 | 56 | 57 | # scatter plot for purchase trend 58 | df2 = pp.df_plot.loc[pp.df_plot['Country'] == country_selected] 59 | fig_UnitPriceVsQuantity_country = px.scatter(df2[:25000], x="UnitPrice", y="TotalPurchase", color = 'Quantity', 60 | size='Quantity', size_max=20, log_y= True, log_x= True, title= "PURCHASE TRENDS FOR SELECTED COUNTRY") 61 | 62 | # Pie chart listing top products 63 | df_plot_bar = df_selectedCountry.groupby('Description').agg({'TotalPurchase':'sum'}).sort_values(by = 'TotalPurchase', ascending=False).reset_index().head(5) 64 | df_plot_bar['percent'] = round((df_plot_bar['TotalPurchase'] / df_plot_bar['TotalPurchase'].sum()) * 100,2) 65 | 66 | fir_plotbar = px.bar(df_plot_bar, y='percent', x='Description', title='Top selling products', 67 | text='percent', color='percent') 68 | fir_plotbar.update_traces(texttemplate='%{text:.2s}', textposition='inside') 69 | fir_plotbar.update_layout(uniformtext_minsize=8, uniformtext_mode='hide',showlegend=False) 70 | 71 | return [cnt_customers, cnt_transactions, cnt_sales, cnt_avgsales, df_selectedCountry_p.drop(['num_days','num_units'], axis = 1).to_dict('records'), 72 | fig_UnitPriceVsQuantity_country, fir_plotbar] 73 | 74 | else: 75 | 76 | cnt_transactions = pp.filtered_data.shape[0] 77 | cnt_customers = len(pp.filtered_data.CustomerID.unique()) 78 | cnt_sales = round(pp.filtered_data.groupby('Country').agg({'TotalPurchase':'sum'})['TotalPurchase'].sum(),2) 79 | cnt_avgsales = round(pp.filtered_data_group.groupby('Country').agg({'avg_order_value': 'mean'})['avg_order_value'].mean()) 80 | 81 | 82 | repeat_rate = round(pp.filtered_data_group[pp.filtered_data_group.num_transactions > 1].shape[0]/pp.filtered_data_group.shape[0],2) 83 | churn_rate = round(1-repeat_rate,2) 84 | 85 | # Bar chart listing top products 86 | df_plot_bar = pp.filtered_data.groupby('Description').agg({'TotalPurchase':'sum'}).sort_values(by = 'TotalPurchase', ascending=False).reset_index().head(5) 87 | df_plot_bar['percent'] = round((df_plot_bar['TotalPurchase'] / df_plot_bar['TotalPurchase'].sum()) * 100,2).apply(lambda x : "{:,}".format(x)) 88 | 89 | fir_plotbar = px.bar(df_plot_bar, y='percent', x='Description', title='TOP SELLING PRODUCTS', text='percent', color='percent',) 90 | fir_plotbar.update_traces(texttemplate='%{text:.2s}', textposition='inside') 91 | fir_plotbar.update_layout(uniformtext_minsize=8, uniformtext_mode='hide', showlegend=False) 92 | 93 | return [cnt_customers, cnt_transactions, cnt_sales,cnt_avgsales, pp.filtered_data_group.drop(['num_days','num_units'], axis = 1).to_dict('records'), 94 | pp.fig_UnitPriceVsQuantity, fir_plotbar] 95 | 96 | 97 | 98 | except Exception as e: 99 | logging.exception('Something went wrong with interaction logic:', e) 100 | 101 | 102 | 103 | 104 | if __name__ == '__main__': 105 | app.run_server(debug=True, use_reloader=False, dev_tools_ui=False) 106 | -------------------------------------------------------------------------------- /Customer Lifetime Value/assets/logo-plotly.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Customer Lifetime Value/assets/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color:#EDEFEB; 3 | font-family: "Open Sans", verdana, arial, sans-serif; 4 | } 5 | 6 | .side_bar { 7 | height: 100%; 8 | width: 14%; 9 | position: fixed; 10 | top: 0; 11 | left: 0; 12 | background-color: #ADD8E6; 13 | padding-left: 1%; 14 | padding-right: 1%; 15 | color:rgb(7, 7, 7); 16 | } 17 | 18 | .main { 19 | font-size: 15px; 20 | padding: 0px 10px; 21 | padding-left: 16%; 22 | color:#2F4F4F; 23 | } 24 | 25 | .footer { 26 | position: relative; 27 | left: 0; 28 | bottom: 0; 29 | width: 100%; 30 | background-color: #4b9072a7; 31 | padding-left: 1%; 32 | } 33 | 34 | h1 { 35 | color:#000080; 36 | font-family: ui-monospace; 37 | } 38 | 39 | h3 { 40 | margin-block-start:0.3em; 41 | font-size: smaller; 42 | } 43 | 44 | h4 { 45 | margin-block-end:0.3em; 46 | font-size: smaller; 47 | } 48 | 49 | .text{ 50 | margin: 10px; 51 | padding: 25px; 52 | align-self: center; 53 | } 54 | 55 | .row{ 56 | display: flex; 57 | } 58 | 59 | /* Boxes */ 60 | 61 | .box { 62 | border-radius: 20px; 63 | background-color: #F9F9F8; 64 | margin: 10px; 65 | padding: 25px; 66 | box-shadow: 2px 2px 2px lightgrey; 67 | } 68 | 69 | .box_comment { 70 | border-radius: 20px; 71 | background-color: #d4f5c29a; 72 | margin: 10px; 73 | padding: 10px; 74 | box-shadow: 2px 2px 2px lightgrey; 75 | font-size: small; 76 | text-align: center; 77 | font-family: system-ui; 78 | } 79 | 80 | .box_emissions{ 81 | border-radius: 20px; 82 | background-color: #DCDCDC; 83 | margin: 2px; 84 | box-shadow: 2px 2px 2px lightgrey; 85 | justify-content: center; 86 | width: 50%; 87 | text-align: center; 88 | } 89 | /* Drop down */ 90 | #drop_map .VirtualizedSelectFocusedOption { 91 | background-color:#ebb36abf; 92 | } 93 | 94 | #drop_continent .VirtualizedSelectFocusedOption { 95 | background-color:#ebb36abf; 96 | } 97 | 98 | /* Radio buttons */ 99 | .radio { 100 | display:flex; 101 | justify-content: left; 102 | place-content:space-around; 103 | width: 88%; 104 | } 105 | 106 | .radio input[type="radio"] { 107 | opacity: 0; 108 | position: absolute; 109 | } 110 | 111 | .radio label { 112 | display: table-caption; 113 | background-color: #F9F9F8; 114 | padding: 15px 15px; 115 | font-size: 16px; 116 | border: 2px solid #e9a8538e; 117 | border-radius: 10px; 118 | width: 200%; 119 | text-align: center; 120 | } 121 | 122 | .radio input[type="radio"]:checked + label { 123 | background-color: #e98b1044; 124 | border: 2px solid #F9F9F8; 125 | } 126 | 127 | .radio label:hover { 128 | background-color: #e1e2df; 129 | border: 2px solid #e1e2df; 130 | } 131 | 132 | /* Scrollbars */ 133 | *::-webkit-scrollbar { 134 | width: 12px; 135 | } 136 | *::-webkit-scrollbar-track { 137 | background: #EDEFEB; 138 | } 139 | *::-webkit-scrollbar-thumb { 140 | background-color:#ebb36abf; 141 | border-radius: 20px; 142 | border: 3px solid #EDEFEB; 143 | } -------------------------------------------------------------------------------- /Customer Lifetime Value/pre_processing.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import seaborn as sns 4 | import datetime as dt 5 | import numpy as np 6 | import plotly.express as px 7 | # pd.options.display.float_format = '${:,.2f}'.format 8 | 9 | 10 | 11 | # Load the data 12 | data = pd.read_excel("./data/Online_Retail.xlsx") 13 | 14 | # remove duplicate rows 15 | filtered_data = data.drop_duplicates() 16 | 17 | filtered_data.columns 18 | 19 | # Plot the bar chart of countries 20 | filtered_data.Country.value_counts()[:10].plot(kind='bar') 21 | 22 | # Filter all quantities that are greater than zero 23 | filtered_data = filtered_data[(filtered_data['Quantity']>0)] 24 | 25 | # list(filtered_data.Country.unique()) 26 | 27 | filtered_data = filtered_data [['CustomerID','Description','InvoiceDate','InvoiceNo','Quantity','UnitPrice', 'Country']] 28 | 29 | # Calculate total purchase 30 | filtered_data['TotalPurchase'] = filtered_data['Quantity'] * filtered_data['UnitPrice'] 31 | 32 | filtered_data_group = filtered_data.groupby(['CustomerID','Country']).agg({'InvoiceDate': lambda date: (date.max() - date.min()).days, 33 | 'InvoiceNo': lambda num: len(num), 34 | 'Quantity': lambda quant: quant.sum(), 35 | 'TotalPurchase': lambda price: price.sum()}) 36 | 37 | 38 | # Change the name of columns 39 | filtered_data_group.columns=['num_days','num_transactions','num_units','spent_money'] 40 | 41 | # Average Order Value 42 | filtered_data_group['avg_order_value'] = filtered_data_group['spent_money']/filtered_data_group['num_transactions'] 43 | 44 | # Calculate purchase frequency 45 | purchase_frequency = sum(filtered_data_group['num_transactions'])/filtered_data_group.shape[0] 46 | 47 | # Repeat rate 48 | repeat_rate = round(filtered_data_group[filtered_data_group.num_transactions > 1].shape[0]/filtered_data_group.shape[0],2) 49 | 50 | # Churn Percentage 51 | churn_rate = round(1-repeat_rate,2) 52 | 53 | filtered_data_group.reset_index() 54 | 55 | filtered_data_group['profit_margin'] = filtered_data_group['spent_money']*0.05 56 | 57 | # Customer Value 58 | filtered_data_group['CLV'] = (filtered_data_group['avg_order_value']*purchase_frequency)/churn_rate 59 | 60 | # Resetting the index 61 | filtered_data_group.reset_index(inplace = True) 62 | 63 | # Formatting the currency fields 64 | # filtered_data_group['spent_money', 'avg_order_value','profit_margin'] = filtered_data_group.spent_money.apply(lambda x : "{:,}".format(x)) 65 | 66 | df_plot = filtered_data.groupby(['Country','Description','UnitPrice','Quantity']).agg({'TotalPurchase': 'sum'},{'Quantity':'sum'}).reset_index() 67 | # df2 = df1.loc[df1['Country'] == 'USA'] 68 | # px.scatter(df_plot[:25000], x="UnitPrice", y="TotalPurchase", color = 'Quantity', size='Quantity', title="Product Sales", size_max=20, log_y= True, log_x= True) 69 | fig_UnitPriceVsQuantity = px.scatter(df_plot[:25000], x="UnitPrice", y="Quantity", color = 'Country', 70 | size='TotalPurchase', size_max=20, log_y= True, log_x= True, title= "PURCHASE TREND ACROSS COUNTRIES") 71 | 72 | 73 | 74 | # formating the float fields 75 | var_float_filtered_group = [i for i in filtered_data_group.columns if filtered_data_group.dtypes[i]=='float64'] 76 | for i in var_float_filtered_group: 77 | filtered_data_group[i] = filtered_data_group[i].round(2) 78 | filtered_data_group[i].apply(lambda x : "{:,}".format(x)) 79 | 80 | 81 | var_float_filtered = [i for i in filtered_data.columns if filtered_data.dtypes[i]=='float64'] 82 | for i in var_float_filtered: 83 | filtered_data[i] = filtered_data[i].round(2) 84 | filtered_data[i].apply(lambda x : "{:,}".format(x)) 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /Customer Loyalty Program/Images/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Customer Loyalty Program/Images/Home1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Customer Loyalty Program/Images/Home1.PNG -------------------------------------------------------------------------------- /Customer Loyalty Program/Images/Home2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Customer Loyalty Program/Images/Home2.PNG -------------------------------------------------------------------------------- /Customer Loyalty Program/README.md: -------------------------------------------------------------------------------- 1 | # Customer Loyalty Program with Python 2 | The app helps businesses identify, segment, and understand customers better. The companies can provide their frequent customer’s free merchandise, rewards, coupons, or even advance released products to encourage loyalty. They can also identify potential & promising customers, make suitable engagement programs to encourage/improve loyalty and long-term business. 3 | The objective of the blog was to showcase the features of Python-Dash and how easy it is to build a bare-metal UI without HTML/CSS/JavaScript hassles. Add quality/consistent data to the mix and you have an app to make better business decisions. 4 | 5 | ## [Read more on this on my blog](https://www.analyticsvidhya.com/blog/2021/03/customer-loyalty-program-with-python/) 6 | 7 | ## Here is the preview of the app:
8 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/254187b7d141518d6e7921c6fa5ce980d0f613e7/Customer%20Loyalty%20Program/Images/Home1.PNG) 9 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/254187b7d141518d6e7921c6fa5ce980d0f613e7/Customer%20Loyalty%20Program/Images/Home2.PNG) 10 | 11 | -------------------------------------------------------------------------------- /Customer Loyalty Program/RFM.py: -------------------------------------------------------------------------------- 1 | # Import libraries 2 | import pandas as pd 3 | from datetime import timedelta 4 | import matplotlib.pyplot as plt 5 | import seaborn as sns 6 | #import squarify 7 | 8 | 9 | def RFM(): 10 | 11 | # Read dataset 12 | # online = pd.read_csv('data.csv', encoding = "ISO-8859-1")# Convert InvoiceDate from object to datetime format 13 | # online['InvoiceDate'] = pd.to_datetime(online['InvoiceDate']) 14 | 15 | #print('{:,} rows; {:,} columns'.format(online.shape[0], online.shape[1])) 16 | #print('{:,} transactions don\'t have a customer id'.format(online[online.CustomerID.isnull()].shape[0])) 17 | #print('Transactions timeframe from {} to {}'.format(online['InvoiceDate'].min(),online['InvoiceDate'].max())) 18 | 19 | 20 | #online.dropna() 21 | 22 | #Create TotalSum column for online dataset 23 | online['TotalSum'] = online['Quantity'] * online['UnitPrice']# Create snapshot date 24 | snapshot_date = online['InvoiceDate'].max() + timedelta(days=1) 25 | print(snapshot_date)# Grouping by CustomerID 26 | data_process = online.groupby(['CustomerID']).agg({ 27 | 'InvoiceDate': lambda x: (snapshot_date - x.max()).days, 28 | 'InvoiceNo': 'count', 29 | 'TotalSum': 'sum'})# Rename the columns 30 | data_process.rename(columns={'InvoiceDate': 'Recency', 31 | 'InvoiceNo': 'Frequency', 32 | 'TotalSum': 'MonetaryValue'}, inplace=True) 33 | 34 | 35 | 36 | # print(data_process.head()) 37 | # print('{:,} rows; {:,} columns' 38 | # .format(data_process.shape[0], data_process.shape[1])) 39 | 40 | # Plot RFM distributions 41 | plt.figure(figsize=(12,10))# Plot distribution of R 42 | plt.subplot(3, 1, 1); sns.distplot(data_process['Recency'])# Plot distribution of F 43 | plt.subplot(3, 1, 2); sns.distplot(data_process['Frequency'])# Plot distribution of M 44 | plt.subplot(3, 1, 3); sns.distplot(data_process['MonetaryValue'])# Show the plot 45 | #plt.show() 46 | 47 | 48 | # --Calculate R and F groups--# Create labels for Recency and Frequency 49 | r_labels = range(4, 0, -1); f_labels = range(1, 5)# Assign these labels to 4 equal percentile groups 50 | r_groups = pd.qcut(data_process['Recency'], q=4, labels=r_labels)# Assign these labels to 4 equal percentile groups 51 | f_groups = pd.qcut(data_process['Frequency'], q=4, labels=f_labels)# Create new columns R and F 52 | data_process = data_process.assign(R = r_groups.values, F = f_groups.values) 53 | data_process.head() 54 | 55 | # Create labels for MonetaryValue 56 | m_labels = range(1, 5)# Assign these labels to three equal percentile groups 57 | m_groups = pd.qcut(data_process['MonetaryValue'], q=4, labels=m_labels)# Create new column M 58 | data_process = data_process.assign(M = m_groups.values) 59 | 60 | 61 | # Concat RFM quartile values to create RFM Segments 62 | def join_rfm(x): return str(x['R']) + str(x['F']) + str(x['M']) 63 | data_process['RFM_Segment_Concat'] = data_process.apply(join_rfm, axis=1) 64 | rfm = data_process 65 | rfm.head() 66 | 67 | 68 | # Count num of unique segments 69 | rfm_count_unique = rfm.groupby('RFM_Segment_Concat')['RFM_Segment_Concat'].nunique() 70 | print(rfm_count_unique.sum()) 71 | 72 | 73 | # Calculate RFM_Score 74 | rfm['RFM_Score'] = rfm[['R','F','M']].sum(axis=1) 75 | print(rfm['RFM_Score'].head()) 76 | 77 | 78 | # Define rfm_level function 79 | def rfm_level(df): 80 | if df['RFM_Score'] >= 9: 81 | return 'Can\'t Loose Them' 82 | elif ((df['RFM_Score'] >= 8) and (df['RFM_Score'] < 9)): 83 | return 'Champions' 84 | elif ((df['RFM_Score'] >= 7) and (df['RFM_Score'] < 8)): 85 | return 'Loyal' 86 | elif ((df['RFM_Score'] >= 6) and (df['RFM_Score'] < 7)): 87 | return 'Potential' 88 | elif ((df['RFM_Score'] >= 5) and (df['RFM_Score'] < 6)): 89 | return 'Promising' 90 | elif ((df['RFM_Score'] >= 4) and (df['RFM_Score'] < 5)): 91 | return 'Needs Attention' 92 | else: 93 | return 'Require Activation'# Create a new variable RFM_Level 94 | rfm['RFM_Level'] = rfm.apply(rfm_level, axis=1)# Print the header with top 5 rows to the console 95 | rfm.head() 96 | 97 | # Calculate average values for each RFM_Level, and return a size of each segment 98 | rfm_level_agg = rfm.groupby('RFM_Level').agg({ 99 | 'Recency': 'mean', 100 | 'Frequency': 'mean', 101 | 'MonetaryValue': ['mean', 'count'] 102 | }).round(1)# Print the aggregated dataset 103 | print(rfm_level_agg) 104 | 105 | # Heatmap 106 | rfm_level_agg.columns = rfm_level_agg.columns.droplevel() 107 | rfm_level_agg.columns = ['RecencyMean','FrequencyMean','MonetaryMean', 'Count']#Create our plot and resize it. 108 | fig = plt.gcf() 109 | ax = fig.add_subplot() 110 | fig.set_size_inches(16, 9) 111 | squarify.plot(sizes=rfm_level_agg['Count'], 112 | label=['Can\'t Loose Them', 113 | 'Champions', 114 | 'Loyal', 115 | 'Needs Attention', 116 | 'Potential', 117 | 'Promising', 118 | 'Require Activation'], alpha=.6 ) 119 | plt.title("RFM Segments",fontsize=18,fontweight="bold") 120 | plt.axis('off') 121 | #plt.show() 122 | return plt 123 | -------------------------------------------------------------------------------- /Customer Loyalty Program/assets/custom.css: -------------------------------------------------------------------------------- 1 | /*Fonts ––––––––––––––––––––––––––––––––––––––––––––––––––*/ 2 | @import url('https://fonts.googleapis.com/css?family=Roboto&display=swap'); 3 | 4 | body{ 5 | margin: 0px; 6 | padding: 0px; 7 | background-color: #fcfcfc; 8 | font-family: 'Roboto'; 9 | color: #203cb3; 10 | } 11 | 12 | .study-browser-banner { 13 | background: rgb(2,21,70); 14 | color: white; 15 | padding: 12px; 16 | padding-left: 2px; 17 | } 18 | 19 | .div-logo{ 20 | display: inline-block; 21 | float: right; 22 | } 23 | 24 | .logo{ 25 | height: 35px; 26 | padding: 6px; 27 | margin-top: 3px; 28 | } 29 | 30 | .h2-title, .h2-title-mobile{ 31 | font-family: 'Roboto'; 32 | display: inline-block; 33 | letter-spacing: 3.8px; 34 | font-weight: 800; 35 | font-size: 20px; 36 | } 37 | 38 | .h2-title-mobile{ 39 | display: none; 40 | } 41 | 42 | h5, h6{ 43 | font-family: 'Roboto'; 44 | font-weight: 600; 45 | font-size: 16px; 46 | } 47 | 48 | h5{ 49 | padding-left: 42px; 50 | } 51 | 52 | .alert { 53 | padding: 20px; 54 | background-color: #f44336; 55 | color: white; 56 | } 57 | 58 | .bg-white{ 59 | background-color: #fcfcfc; 60 | padding: 24px 32px; 61 | height: 400px; 62 | } 63 | 64 | .app-body{ 65 | margin-left: 4%; 66 | } 67 | 68 | .card{ 69 | padding:48px 24px 48px 24px; 70 | margin-left: 4%; 71 | } 72 | 73 | .card-left{ 74 | padding:48px 24px 48px 24px; 75 | margin-left: 0px; 76 | } 77 | 78 | .padding-top-bot{ 79 | padding-top: 2px; 80 | padding-bottom: 2px; 81 | } 82 | 83 | .upload{ 84 | width: 100%; 85 | line-height: 60px; 86 | border-width: 1px; 87 | border-style: dashed; 88 | border-radius: 5px; 89 | text-align: center; 90 | } 91 | 92 | .upload p, .upload a { 93 | display: inline; 94 | } 95 | 96 | .Select-control{ 97 | border: 1px solid #203cb3; 98 | } 99 | 100 | @media only screen and (max-width: 320px){ 101 | .Select-menu-outer, .Select-value{ 102 | font-size: 10.5px; 103 | } 104 | .upload{ 105 | padding: 5px; 106 | } 107 | } 108 | 109 | /* mobile */ 110 | @media only screen and (max-width: 768px) { 111 | .upload{ 112 | line-height: 60px; 113 | border-width: 1px; 114 | border-style: dashed; 115 | border-radius: 5px; 116 | text-align: center; 117 | font-size: small; 118 | } 119 | 120 | .columns{ 121 | width: 100%; 122 | } 123 | 124 | .card, .card-left{ 125 | padding: 24px; 126 | margin: 0px; 127 | } 128 | 129 | .bg-white{ 130 | height: auto; 131 | } 132 | 133 | .study-browser-banner{ 134 | padding-left: 24px; 135 | } 136 | 137 | .logo{ 138 | height: 28px; 139 | padding-left:0px; 140 | padding-bottom:0px; 141 | } 142 | 143 | .div-logo{ 144 | float: left; 145 | display: block; 146 | width: 100%; 147 | } 148 | 149 | .h2-title{ 150 | display:none; 151 | } 152 | 153 | .h2-title-mobile{ 154 | display:block; 155 | float:left; 156 | } 157 | 158 | .app-body{ 159 | margin-left: 0px; 160 | } 161 | 162 | .four.columns { width: 100%; } 163 | .eight.columns { width: 100%; } 164 | 165 | .columns{ 166 | text-align: center; 167 | } 168 | 169 | .user-control{ 170 | padding-top: 24px; 171 | padding-bottom: 24px; 172 | 173 | 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /Customer Loyalty Program/assets/dash-logo-new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Customer Loyalty Program/assets/dash-logo-new.png -------------------------------------------------------------------------------- /Customer Loyalty Program/assets/resizing_script.js: -------------------------------------------------------------------------------- 1 | if (!window.dash_clientside) { 2 | window.dash_clientside = {}; 3 | } 4 | window.dash_clientside.clientside = { 5 | resize: function(value) { 6 | console.log("resizing..."); // for testing 7 | setTimeout(function() { 8 | window.dispatchEvent(new Event("resize")); 9 | console.log("fired resize"); 10 | }, 500); 11 | return null; 12 | } 13 | }; 14 | -------------------------------------------------------------------------------- /Customer Loyalty Program/assets/styles.css: -------------------------------------------------------------------------------- 1 | .js-plotly-plot .plotly .modebar { 2 | padding-top: 5%; 3 | margin-right: 3.5%; 4 | } 5 | 6 | body { 7 | background-color: #f2f2f2; 8 | margin: 5%; 9 | } 10 | 11 | .two.columns { 12 | width: 16.25%; 13 | } 14 | 15 | .column, 16 | .columns { 17 | margin-left: 0.5%; 18 | } 19 | 20 | .pretty_container { 21 | border-radius: 2px; 22 | background-color: #f9f9f9; 23 | margin: 10px; 24 | padding: 2px; 25 | position: relative; 26 | box-shadow: 2px 2px 2px lightgrey; 27 | } 28 | 29 | .bare_container { 30 | margin: 0 0 0 0; 31 | padding: 0 0 0 0; 32 | } 33 | 34 | .dcc_control { 35 | margin: 0; 36 | padding: 5px; 37 | width: calc(100%-40px); 38 | } 39 | 40 | .control_label { 41 | margin: 0; 42 | padding: 10px; 43 | padding-bottom: 0px; 44 | margin-bottom: 0px; 45 | width: calc(100%-40px); 46 | } 47 | 48 | .rc-slider { 49 | margin-left: 0px; 50 | padding-left: 0px; 51 | } 52 | 53 | .flex-display { 54 | display: flex; 55 | } 56 | 57 | .container-display { 58 | display: flex; 59 | } 60 | 61 | #individual_graph, 62 | #aggregate_graph { 63 | width: calc(100% - 30px); 64 | position: absolute; 65 | } 66 | 67 | #count_graph { 68 | position: absolute; 69 | height: calc(100% - 30px); 70 | width: calc(100% - 30px); 71 | } 72 | 73 | #countGraphContainer { 74 | flex: 5; 75 | position: relative; 76 | } 77 | 78 | #header { 79 | align-items: center; 80 | } 81 | 82 | #learn-more-button { 83 | text-align: center; 84 | height: 100%; 85 | padding: 0 20px; 86 | text-transform: none; 87 | font-size: 15px; 88 | float: right; 89 | margin-right: 10px; 90 | margin-top: 30px; 91 | } 92 | #title { 93 | text-align: center; 94 | } 95 | 96 | .mini_container { 97 | border-radius: 5px; 98 | background-color:#e4e7e7; 99 | margin: 10px; 100 | padding: 15px; 101 | position: relative; 102 | box-shadow: 2px 2px 2px lightgrey; 103 | } 104 | 105 | #right-column { 106 | display: flex; 107 | flex-direction: column; 108 | } 109 | 110 | #wells { 111 | flex: 1; 112 | } 113 | 114 | #gas { 115 | flex: 1; 116 | } 117 | 118 | #aggregate_data { 119 | align-items: center; 120 | } 121 | 122 | #oil { 123 | flex: 1; 124 | } 125 | 126 | #water { 127 | flex: 1; 128 | } 129 | 130 | #tripleContainer { 131 | display: flex; 132 | flex: 3; 133 | } 134 | 135 | #mainContainer { 136 | display: flex; 137 | flex-direction: column; 138 | } 139 | 140 | #pie_graph > div > div > svg:nth-child(3) > g.infolayer > g.legend { 141 | pointer-events: all; 142 | transform: translate(30px, 349px); 143 | } 144 | -------------------------------------------------------------------------------- /Customer Loyalty Program/data/data.csv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Customer Loyalty Program/data/data.csv -------------------------------------------------------------------------------- /Customer Loyalty Program/dataPreprocess.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import io 3 | import dash 4 | import dash_core_components as dcc 5 | import dash_html_components as html 6 | import plotly.graph_objs as go 7 | import pandas as pd 8 | import numpy as np 9 | import pathlib 10 | import logging 11 | import dash_table 12 | import dash_bootstrap_components as dbc 13 | from dash.dependencies import Input, Output, State 14 | from RFM import * 15 | import dash_daq as daq 16 | import plotly.figure_factory as ff 17 | 18 | 19 | # Logging in DEBUG mode in the file RFM.log 20 | logging.basicConfig(filename= 'RFM.log', level = logging.DEBUG,format='%(asctime)s:%(levelname)s:%(filename)s:%(funcName)s:%(message)s') 21 | 22 | 23 | def data_load(): 24 | 25 | try: 26 | # load dataset 27 | df = pd.read_csv('./data/data.csv', encoding = "ISO-8859-1")# Convert InvoiceDate from object to datetime format 28 | df['InvoiceDate'] = pd.to_datetime(df['InvoiceDate']) 29 | df.dropna() 30 | 31 | 32 | # prepare dataset and create TotalSum column for df dataset 33 | df['TotalSum'] = df['Quantity'] * df['UnitPrice']# Create snapshot date 34 | snapshot_date = df['InvoiceDate'].max() + timedelta(days=1) 35 | #print(snapshot_date)# Grouping by CustomerID 36 | data_process = df.groupby(['CustomerID']).agg({ 37 | 'InvoiceDate': lambda x: (snapshot_date - x.max()).days, 38 | 'InvoiceNo': 'count', 39 | 'TotalSum': 'sum'})# Rename the columns 40 | data_process.rename(columns={'InvoiceDate': 'Recency', 41 | 'InvoiceNo': 'Frequency', 42 | 'TotalSum': 'MonetaryValue'}, inplace=True) 43 | 44 | 45 | 46 | # --Calculate R and F groups--# Create labels for Recency and Frequency 47 | r_labels = range(4, 0, -1); f_labels = range(1, 5)# Assign these labels to 4 equal percentile groups 48 | r_groups = pd.qcut(data_process['Recency'], q=4, labels=r_labels)# Assign these labels to 4 equal percentile groups 49 | f_groups = pd.qcut(data_process['Frequency'], q=4, labels=f_labels)# Create new columns R and F 50 | data_process = data_process.assign(R = r_groups.values, F = f_groups.values) 51 | data_process.head() 52 | 53 | # Create labels for MonetaryValue 54 | m_labels = range(1, 5)# Assign these labels to three equal percentile groups 55 | m_groups = pd.qcut(data_process['MonetaryValue'], q=4, labels=m_labels)# Create new column M 56 | data_process = data_process.assign(M = m_groups.values) 57 | 58 | 59 | # Concat RFM quartile values to create RFM Segments 60 | def join_rfm(x): return str(x['R']) + str(x['F']) + str(x['M']) 61 | data_process['RFM_Segment_Concat'] = data_process.apply(join_rfm, axis=1) 62 | rfm = data_process 63 | rfm.head() 64 | 65 | 66 | # Count num of unique segments 67 | rfm_count_unique = rfm.groupby('RFM_Segment_Concat')['RFM_Segment_Concat'].nunique() 68 | #print(rfm_count_unique.sum()) 69 | 70 | 71 | # Calculate RFM_Score 72 | rfm['RFM_Score'] = rfm[['R','F','M']].sum(axis=1) 73 | #print(rfm['RFM_Score'].head()) 74 | 75 | 76 | # Define rfm_level function 77 | def rfm_level(df): 78 | try: 79 | if df['RFM_Score'] >= 9: 80 | return 'Cant Loose Them' 81 | elif ((df['RFM_Score'] >= 8) and (df['RFM_Score'] < 9)): 82 | return 'Champions' 83 | elif ((df['RFM_Score'] >= 7) and (df['RFM_Score'] < 8)): 84 | return 'Loyal' 85 | elif ((df['RFM_Score'] >= 6) and (df['RFM_Score'] < 7)): 86 | return 'Potential' 87 | elif ((df['RFM_Score'] >= 5) and (df['RFM_Score'] < 6)): 88 | return 'Promising' 89 | elif ((df['RFM_Score'] >= 4) and (df['RFM_Score'] < 5)): 90 | return 'Needs Attention' 91 | else: 92 | return 'Require Activation'# Create a new variable RFM_Level 93 | except: 94 | logging.exception('Something went wrong with rfm_level segmentation logic') 95 | 96 | rfm['RFM_Level'] = rfm.apply(rfm_level, axis=1)# Print the header with top 5 rows to the console 97 | rfm.reset_index(inplace=True) 98 | 99 | 100 | # Calculate average values for each RFM_Level, and return a size of each segment 101 | rfm_level_agg = rfm.groupby('RFM_Level').agg({ 102 | 'Recency': 'mean', 103 | 'Frequency': 'mean', 104 | 'MonetaryValue': 'mean' 105 | #'Monetary': 'count' 106 | 107 | }).round(1)# Print the aggregated dataset 108 | 109 | rfm_level_agg.reset_index(inplace = True) 110 | #rfm_level_agg.set_index('RFM_Level') 111 | 112 | df_head = df.head(1500) 113 | df_rfm = rfm 114 | 115 | return [df_head, df_rfm, rfm_level_agg, df] 116 | 117 | except Exception as e: 118 | logging.exception('Something went wrong with data preprocessing: ', e) 119 | -------------------------------------------------------------------------------- /Customer Loyalty Program/layout.py: -------------------------------------------------------------------------------- 1 | import dash_table 2 | import dataPreprocess 3 | import pandas as pd 4 | from datetime import timedelta 5 | 6 | tmp_df = dataPreprocess.data_load() 7 | 8 | 9 | df9 = tmp_df[3][['CustomerID', 'Country']] 10 | df11 = tmp_df[1][['CustomerID', 'RFM_Score', 'RFM_Level']] 11 | df9.drop_duplicates(subset = ['CustomerID'], inplace=True) 12 | df12 = pd.merge(df11, df9, on='CustomerID', how='left') 13 | 14 | 15 | def tbl_layout(): 16 | tbl_score = dash_table.DataTable( 17 | id='id_score', 18 | columns=[{"name": i, "id": i} for i in tmp_df[0].columns], 19 | data=tmp_df[0].to_dict('records'), 20 | 21 | style_data_conditional=[ 22 | { 23 | 'if': {'row_index': 'odd'}, 24 | 'backgroundColor': 'rgb(248, 248, 248)' 25 | } 26 | ], 27 | style_header={ 28 | 'backgroundColor': 'rgb(230, 230, 230)', 29 | 'fontWeight': 'bold' 30 | }, 31 | 32 | editable=True, 33 | filter_action="native", 34 | sort_action="native", 35 | sort_mode="multi", 36 | column_selectable="single", 37 | row_selectable="multi", 38 | row_deletable=True, 39 | selected_columns=[], 40 | selected_rows=[], 41 | page_action="native", 42 | page_current= 0, 43 | page_size= 10, 44 | ) 45 | 46 | tbl_RFMVal = dash_table.DataTable( 47 | id='id_RFMVal', 48 | #columns=[{"name": i, "id": i} for i in tmp_df[1].columns], 49 | columns=[{"name": i, "id": i} for i in df12.columns], 50 | #data=tmp_df[1].to_dict('records'), 51 | data = df12.to_dict('records'), 52 | style_data_conditional=[ 53 | { 54 | 'if': {'row_index': 'odd'}, 55 | 'backgroundColor': 'rgb(248, 248, 248)' 56 | } 57 | ], 58 | style_header={ 59 | 'backgroundColor': 'rgb(230, 230, 230)', 60 | 'fontWeight': 'bold', 61 | #'border': '1px solid black' 62 | }, 63 | 64 | # editable=True, 65 | filter_action="native", 66 | sort_action="native", 67 | # sort_mode="multi", 68 | # column_selectable="single", 69 | #row_selectable="multi", 70 | # row_deletable=True, 71 | # selected_columns=[], 72 | # selected_rows=[], 73 | # page_action="native", 74 | page_current= 0, 75 | page_size= 6, 76 | ) 77 | 78 | tbl_RFMAggregate = dash_table.DataTable( 79 | id='id_RFMAggregate', 80 | columns=[{"name": i, "id": i} for i in tmp_df[2].columns], 81 | data=tmp_df[2].to_dict('records'), 82 | 83 | style_data_conditional=[ 84 | { 85 | 'if': {'row_index': 'odd'}, 86 | 'backgroundColor': 'rgb(248, 248, 248)' 87 | } 88 | ], 89 | style_header={ 90 | 'backgroundColor': 'rgb(230, 230, 230)', 91 | 'fontWeight': 'bold' 92 | } 93 | 94 | # editable=True, 95 | # filter_action="native", 96 | # sort_action="native", 97 | # sort_mode="multi", 98 | # column_selectable="single", 99 | # row_selectable="multi", 100 | # row_deletable=True, 101 | # selected_columns=[], 102 | # selected_rows=[], 103 | # page_action="native", 104 | # page_current= 0, 105 | # page_size= 10, 106 | ) 107 | 108 | return [tbl_score, tbl_RFMVal, tbl_RFMAggregate] 109 | 110 | 111 | -------------------------------------------------------------------------------- /Dependency_mapper/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Games/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Games/Card Deck.py: -------------------------------------------------------------------------------- 1 | # print("Hello") 2 | import random 3 | 4 | class Card(object): 5 | def __init__(self, suit, val): 6 | self.suit = suit 7 | self.value = val 8 | 9 | def show(self): 10 | print(f'{self.value} of {self.suit}') 11 | 12 | class Deck(object): 13 | def __init__(self): 14 | self.cards = [] 15 | self.build() 16 | 17 | def build(self): 18 | for s in ['Spades', "Clubs"," Diamonds", "Hearts"]: 19 | for v in range(1, 14): 20 | self.cards.append(Card(s, v)) 21 | 22 | def show(self): 23 | for c in self.cards: 24 | c.show() 25 | 26 | 27 | def shuffle(self): 28 | for i in range(len(self.cards)-1, 0, -1): 29 | r = random.randint(0, i) 30 | self.cards[i], self.cards[r] = self.cards[r], self.cards[i] 31 | 32 | def drawCard(self): 33 | return self.cards.pop() 34 | 35 | 36 | class Player(object): 37 | def __init__(self, name): 38 | self.hand = [] 39 | self.name = name 40 | 41 | def draw(self, deck): 42 | self.hand.append(deck.drawCard()) 43 | return self 44 | 45 | 46 | def showHand(self): 47 | for card in self.hand: 48 | card.show() 49 | 50 | def discard(self): 51 | return self.hand.pop() 52 | 53 | # card = Card("clubs", 6) 54 | # card.show() 55 | 56 | deck = Deck() 57 | 58 | deck.shuffle() 59 | # deck.show() 60 | 61 | 62 | bob = Player('Bob') 63 | bob.draw(deck).draw(deck).draw(deck) 64 | bob.showHand() 65 | 66 | bob.draw(deck) 67 | 68 | # card = deck.drawCard() 69 | # card.show() 70 | 71 | 72 | -------------------------------------------------------------------------------- /Games/Pong_Game.py: -------------------------------------------------------------------------------- 1 | import turtle 2 | 3 | wn = turtle.Screen() 4 | wn.title("The pong game") 5 | wn.bgcolor("black") 6 | wn.setup(width =800, height = 600) 7 | wn.tracer(0) 8 | 9 | # score 10 | 11 | score_a = 0 12 | score_b = 0 13 | 14 | 15 | 16 | # Paddle A 17 | paddle_a = turtle.Turtle() 18 | paddle_a.speed(0) 19 | paddle_a.shape("square") 20 | paddle_a.color("white") 21 | paddle_a.shapesize(stretch_wid=5, stretch_len=1) 22 | paddle_a.penup() 23 | paddle_a.goto(-350,0) 24 | 25 | #Paddle B 26 | paddle_b = turtle.Turtle() 27 | paddle_b.speed(0) 28 | paddle_b.shape("square") 29 | paddle_b.color("white") 30 | paddle_b.shapesize(stretch_wid=5, stretch_len=1) 31 | paddle_b.penup() 32 | paddle_b.goto(350,0) 33 | 34 | 35 | 36 | # Ball 37 | ball = turtle.Turtle() 38 | ball.speed(0) 39 | ball.shape("square") 40 | ball.color("white") 41 | ball.penup() 42 | ball.goto(0,0) 43 | ball.dx = 2 44 | ball.dy = 2 45 | 46 | 47 | pen = turtle.Turtle() 48 | pen.speed(0) 49 | pen.color("white") 50 | pen.penup() 51 | pen.hideturtle() 52 | pen.goto(0,260) 53 | pen.write("Player A: 0 Plyaer B: 0", align="center", font = ("Courier",24, "normal")) 54 | 55 | # Function to moe the paddles 56 | def paddle_a_up(): 57 | y = paddle_a.ycor() 58 | y += 20 59 | paddle_a.sety(y) 60 | 61 | def paddle_a_down(): 62 | y = paddle_a.ycor() 63 | y -= 20 64 | paddle_a.sety(y) 65 | 66 | 67 | def paddle_b_up(): 68 | y = paddle_b.ycor() 69 | y += 20 70 | paddle_b.sety(y) 71 | 72 | def paddle_b_down(): 73 | y = paddle_b.ycor() 74 | y -= 20 75 | paddle_b.sety(y) 76 | 77 | # Keyboard binding 78 | wn.listen() 79 | wn.onkeypress(paddle_a_up, "w") 80 | wn.onkeypress(paddle_a_down, "s") 81 | wn.onkeypress(paddle_b_up, "Up") 82 | wn.onkeypress(paddle_b_down, "Down") 83 | 84 | # Main game loop 85 | while True: 86 | wn.update() 87 | 88 | # Move the ball 89 | ball.setx(ball.xcor() + ball.dx) 90 | ball.sety(ball.ycor() + ball.dy) 91 | 92 | # Border checking 93 | if ball.ycor() > 290: 94 | ball.sety(290) 95 | ball.dy *= -1 96 | 97 | if ball.ycor() < -290: 98 | ball.sety(-290) 99 | ball.dy *= -1 100 | 101 | if ball.xcor() > 390: 102 | ball.goto(0,0) 103 | ball.dx *= -1 104 | score_a +=1 105 | pen.clear() 106 | pen.write("Player A: {} Plyaer B: {}".format(score_a, score_b), align="center", font = ("Courier",24, "normal")) 107 | 108 | if ball.xcor() < -390: 109 | ball.setx(-390) 110 | ball.dx *= -1 111 | score_b += 1 112 | pen.clear() 113 | pen.write("Player A: {} Plyaer B: {}".format(score_a, score_b), align="center", font = ("Courier",24, "normal")) 114 | 115 | # paddle and balls collusion 116 | 117 | if (ball.xcor() > 340 and ball.xcor() <350) and (ball.ycor() < paddle_b.ycor()+40 and ball.ycor() > paddle_b.ycor() -40): 118 | ball.setx(340) 119 | ball.dx *= -1 120 | 121 | 122 | if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor()+40 and ball.ycor() > paddle_a.ycor() -40): 123 | ball.setx(-340) 124 | ball.dx *= -1 125 | 126 | 127 | -------------------------------------------------------------------------------- /Games/Turtle_Race.py: -------------------------------------------------------------------------------- 1 | import time 2 | import turtle 3 | from turtle import * 4 | from random import randint 5 | 6 | 7 | setup(800,500) 8 | title("Turtle Race") 9 | bgcolor("forestgreen") 10 | speed(0) 11 | 12 | penup() 13 | goto(-100,205) 14 | color("white") 15 | write("TURTLE RACE", font= ("Arial", 20, "bold")) 16 | 17 | #DIRT 18 | goto(-350, 200) 19 | pendown() 20 | color("chocolate") 21 | begin_fill() 22 | for i in range(2): 23 | forward(700) 24 | right(90) 25 | forward(400) 26 | right(90) 27 | end_fill() 28 | 29 | # FINISH LINE 30 | gap_size = 20 31 | shape("square") 32 | penup() 33 | 34 | color("white") 35 | for i in range(10): 36 | goto(250, (170-(i * gap_size *2))) 37 | stamp() 38 | 39 | for i in range(10): 40 | goto(250 + gap_size, ((210- gap_size) - (i * gap_size *2))) 41 | stamp() 42 | 43 | 44 | # BLACK squares 45 | color("black") 46 | for i in range(10): 47 | goto(250, (190-(i * gap_size *2))) 48 | stamp() 49 | 50 | for i in range(10): 51 | goto(251 + gap_size, ((190- gap_size) - (i * gap_size *2))) 52 | stamp() 53 | 54 | 55 | # TURTLE 1 - BLUE 56 | blue_turtle = Turtle() 57 | blue_turtle.color("cyan") 58 | blue_turtle.shape("turtle") 59 | blue_turtle.shapesize(1.5) 60 | blue_turtle.penup() 61 | blue_turtle.goto(-300,150) 62 | blue_turtle.pendown() 63 | 64 | 65 | # TURTLE 2 - PINK 66 | pink_turtle = Turtle() 67 | pink_turtle.color("magenta") 68 | pink_turtle.shape("turtle") 69 | pink_turtle.shapesize(1.5) 70 | pink_turtle.penup() 71 | pink_turtle.goto(-300,50) 72 | pink_turtle.pendown() 73 | 74 | 75 | # TURTLE 3 - YELLOW 76 | yellow_turtle = Turtle() 77 | yellow_turtle.color("yellow") 78 | yellow_turtle.shape("turtle") 79 | yellow_turtle.shapesize(1.5) 80 | yellow_turtle.penup() 81 | yellow_turtle.goto(-300,-50) 82 | yellow_turtle.pendown() 83 | 84 | 85 | # TURTLE 4 - GREEN 86 | green_turtle = Turtle() 87 | green_turtle.color("lime") 88 | green_turtle.shape("turtle") 89 | green_turtle.shapesize(1.5) 90 | green_turtle.penup() 91 | green_turtle.goto(-300,-150) 92 | green_turtle.pendown() 93 | 94 | 95 | time.sleep(1) 96 | 97 | while blue_turtle.xcor() <= 230 and blue_turtle.xcor() <= 230 and yellow_turtle.xcor() <= 230 and green_turtle.xcor() <= 230 : 98 | blue_turtle.forward(randint(1,10)) 99 | pink_turtle.forward(randint(1,10)) 100 | yellow_turtle.forward(randint(1,10)) 101 | green_turtle.forward(randint(1,10)) 102 | 103 | 104 | if blue_turtle.xcor() > pink_turtle.xcor() and blue_turtle.xcor() > yellow_turtle.xcor() and blue_turtle.xcor() > green_turtle.xcor(): 105 | print("Blue turtle wins!") 106 | for i in range(72): 107 | blue_turtle.right(5) 108 | blue_turtle.shapesize(2.5) 109 | elif pink_turtle.xcor() > blue_turtle.xcor() and pink_turtle.xcor() > yellow_turtle.xcor() and pink_turtle.xcor() > green_turtle.xcor(): 110 | print("Pink turtle wins!") 111 | for i in range(72): 112 | pink_turtle.right(5) 113 | pink_turtle.shapesize(2.5) 114 | elif yellow_turtle.xcor() > blue_turtle.xcor() and yellow_turtle.xcor() > pink_turtle.xcor() and yellow_turtle.xcor() > green_turtle.xcor(): 115 | print("Yellow turtle wins!") 116 | for i in range(72): 117 | yellow_turtle.right(5) 118 | yellow_turtle.shapesize(2.5) 119 | else: 120 | print("Green turtle wins!") 121 | for i in range(72): 122 | green_turtle.right(5) 123 | green_turtle.shapesize(2.5) 124 | 125 | turtle.done() -------------------------------------------------------------------------------- /Generative AI/AI-Flowcharts/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/AI-Flowcharts/ai-workflow.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Generative AI/AI-Flowcharts/ai-workflow.gif -------------------------------------------------------------------------------- /Generative AI/AI-Flowcharts/app.py: -------------------------------------------------------------------------------- 1 | import dash 2 | from dash import dcc, html, Input, Output, State 3 | import dash_bootstrap_components as dbc 4 | import base64 5 | import os 6 | import google.generativeai as genai 7 | import requests 8 | 9 | # Initialize Dash app 10 | app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) 11 | 12 | # Layout of the app 13 | app.layout = dbc.Container( 14 | [ 15 | dbc.Row( 16 | [ 17 | dbc.Col( 18 | [ 19 | html.H2("Description your workflow:"), 20 | dcc.Textarea( 21 | id="description-input", 22 | style={"width": "100%", "height": 150}, 23 | ), 24 | dbc.Button( 25 | "Submit", 26 | id="submit-button", 27 | color="primary", 28 | className="mt-3", 29 | ), 30 | dcc.Loading( 31 | id="loading-spinner", 32 | type="default", 33 | children=[ 34 | html.Div(id="loading-output", style={"display": "none"}) 35 | ], 36 | ), 37 | ], 38 | width=12, 39 | ), 40 | ] 41 | ), 42 | dbc.Row( 43 | [ 44 | dbc.Col( 45 | [ 46 | html.H2("Generated Flowchart"), 47 | html.Img( 48 | id="flowchart-image", 49 | style={"width": "50%"}, 50 | width="auto", 51 | height="auto", 52 | ), 53 | ], 54 | width=12, 55 | ), 56 | ] 57 | ), 58 | ], 59 | fluid=True, 60 | ) 61 | 62 | 63 | def get_gemini_response(input): 64 | """ 65 | Generate flowchart DOT code based on the user input. 66 | 67 | Parameters: 68 | input (str): Description of the workflow. 69 | 70 | Returns: 71 | str: Generated flowchart DOT code. 72 | """ 73 | try: 74 | model = genai.GenerativeModel("gemini-1.5-flash-latest") 75 | response = model.generate_content(input) 76 | return response.text 77 | except Exception as e: 78 | print(f"Error generating flowchart: {e}") 79 | return None 80 | 81 | 82 | def get_image_graphviz(dot_code): 83 | """ 84 | Convert flowchart DOT code to PNG image. 85 | 86 | Parameters: 87 | dot_code (str): Flowchart DOT code. 88 | 89 | Returns: 90 | str: Base64-encoded PNG image. 91 | """ 92 | try: 93 | # Remove triple backticks and 'dot' text 94 | formatted_out = dot_code.strip("```dot") 95 | # Remove leading and trailing whitespace 96 | formatted_out = formatted_out.strip() 97 | 98 | # Find the line containing the rankdir attribute 99 | lines = formatted_out.split("\n") 100 | for i, line in enumerate(lines): 101 | if "rankdir" in line: 102 | # Modify the rankdir attribute to display the workflow vertically 103 | lines[i] = " rankdir=TB;" 104 | 105 | # Reconstruct the DOT code with the modified rankdir attribute 106 | formatted_out = "\n".join(lines) 107 | 108 | quickchart_url = "https://quickchart.io/graphviz" 109 | 110 | dot_code = formatted_out 111 | # Prepare the payload for QuickChart 112 | post_data = { 113 | "graph": dot_code, 114 | "format": "png", 115 | } # Specify that we want a PNG image 116 | 117 | # Send the POST request to QuickChart API 118 | response = requests.post(quickchart_url, json=post_data, verify=False) 119 | response.raise_for_status() 120 | 121 | # Check if the response content is PNG 122 | content_type = response.headers.get("content-type", "").lower() 123 | if "image/png" in content_type: 124 | # Save PNG content to a file 125 | png_content = response.content 126 | with open("flowchart.png", "wb") as f: 127 | f.write(png_content) 128 | print("Flowchart saved as flowchart.png") 129 | 130 | # Encode PNG content to base64 131 | encoded_image = base64.b64encode(png_content).decode("ascii") 132 | return f"data:image/png;base64,{encoded_image}" 133 | else: 134 | print("Unexpected response content type:", content_type) 135 | return None 136 | except Exception as e: 137 | print(f"Error converting DOT code to PNG: {e}") 138 | return None 139 | 140 | 141 | # Define callback function to update flowchart image 142 | @app.callback( 143 | [Output("flowchart-image", "src"), Output("loading-output", "children")], 144 | Input("submit-button", "n_clicks"), 145 | State("description-input", "value"), 146 | ) 147 | def update_flowchart(n_clicks, description): 148 | """ 149 | Update the flowchart image when the Submit button is clicked. 150 | 151 | Parameters: 152 | n_clicks (int): Number of times the Submit button has been clicked. 153 | description (str): Description of the workflow. 154 | 155 | Returns: 156 | str: Base64-encoded PNG image. 157 | str: Empty string. 158 | """ 159 | if n_clicks is None or description is None: 160 | return dash.no_update, dash.no_update 161 | 162 | # Generate flowchart DOT code based on user input 163 | description += ". Please use Graphviz DOT Language. Try to make it as detailed as possible with all the steps involved in the process." 164 | 165 | dot_code = get_gemini_response(description) 166 | 167 | if dot_code is None: 168 | return dash.no_update, "Error generating flowchart" 169 | 170 | # Convert DOT code to PNG image and get base64-encoded string 171 | encoded_image = get_image_graphviz(dot_code) 172 | 173 | if encoded_image is None: 174 | return dash.no_update, "Error converting DOT code to PNG" 175 | 176 | return encoded_image, "" 177 | 178 | 179 | if __name__ == "__main__": 180 | app.run_server(debug=True) 181 | -------------------------------------------------------------------------------- /Generative AI/AI-Flowcharts/requirements.txt: -------------------------------------------------------------------------------- 1 | dash 2 | dash_bootstrap_components 3 | langchain-google-genai 4 | requests -------------------------------------------------------------------------------- /Generative AI/Chatbot With OpenAI/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/Chatbot With OpenAI/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import openai 4 | from datetime import datetime 5 | from dash import Dash, dcc, html, Input, Output, State 6 | import dash_bootstrap_components as dbc 7 | from dash.exceptions import PreventUpdate 8 | 9 | # Set up OpenAI API credentials 10 | # Create .env file and insert your api key like so: 11 | OPENAI_API_KEY = "YOUR_OPENAI_API_KEY" 12 | # openai.api_key = os.getenv("OPENAI_API_KEY") # pip install python-dotenv 13 | openai.api_key = OPENAI_API_KEY 14 | # Initialize the ChatGPT model 15 | model_engine = "text-davinci-003" 16 | 17 | # Instantiate the Dash app 18 | app = Dash(__name__) 19 | 20 | answer_list = [] 21 | df = pd.DataFrame() 22 | 23 | app.layout = dbc.Container( 24 | [ 25 | dbc.Row( 26 | [ 27 | dbc.Col( 28 | [ 29 | html.H1( 30 | "CHAT WITH GENERATIVE AI", 31 | style={ 32 | "text-align": "center", 33 | "font-size": 30, 34 | "font-weight": "bold", 35 | "color": "#2E86C1", 36 | }, 37 | ), 38 | ] 39 | ), 40 | ] 41 | ), 42 | dbc.Row( 43 | [ 44 | dcc.Textarea( 45 | id="input-text", 46 | style={"width": "50%", "height": 75, "margin-left": "350px"}, 47 | ) 48 | ] 49 | ), 50 | dbc.Row( 51 | [ 52 | dbc.Button( 53 | "SEND", 54 | id="submit-button", 55 | n_clicks=0, 56 | style={ 57 | "backgroundColor": "blue", 58 | "font-size": 12, 59 | "font-weight": "bold", 60 | "color": "white", 61 | "margin-left": "350px", 62 | }, 63 | ) 64 | ] 65 | ), 66 | html.Br(), 67 | dbc.Row( 68 | [ 69 | dcc.Loading( 70 | [ 71 | html.Div( 72 | id="output-text", 73 | style={ 74 | "margin-left": "350px", 75 | "font-size": 17, 76 | "font-weight": "bold", 77 | "color": "Teal", 78 | "width": "50%", 79 | }, 80 | ) 81 | ], 82 | type="default", 83 | ) 84 | ] 85 | ), 86 | html.Br(), 87 | html.Br(), 88 | html.Div( 89 | id="output-chat-history", 90 | style={ 91 | "text-align": "left", 92 | "margin-left": "350px", 93 | "font-size": 12, 94 | "font-weight": "bold", 95 | "color": "#2E86C1", 96 | }, 97 | ), 98 | html.Div( 99 | id="output-text-track", 100 | style={ 101 | "whiteSpace": "pre-line", 102 | "margin-left": "350px", 103 | "width": "50%", 104 | "margin-top": "10px", 105 | }, 106 | ), 107 | ] 108 | ) 109 | 110 | 111 | # Define the callback function 112 | @app.callback( 113 | [ 114 | Output("output-text", "children"), 115 | Output("output-text-track", "children"), 116 | Output("output-chat-history", "children"), 117 | ], 118 | Input("submit-button", "n_clicks"), 119 | State("input-text", "value"), 120 | ) 121 | def update_output(n_clicks, input_text): 122 | if n_clicks > 0: 123 | # Get the response from ChatGPT 124 | response = openai.Completion.create( 125 | engine=model_engine, 126 | prompt=f"{input_text}\n", 127 | max_tokens=4000, 128 | n=1, 129 | stop=None, 130 | temperature=0.7, 131 | ) 132 | 133 | generated_text = response.choices[0].text 134 | answer_list_time = f'\n{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}: {input_text}\n{generated_text}\n' 135 | answer_list.insert(0, answer_list_time) 136 | 137 | return ( 138 | generated_text, 139 | answer_list, 140 | f"RECENT CHAT HISTORY ( {len(answer_list)} )", 141 | ) 142 | 143 | else: 144 | raise PreventUpdate 145 | 146 | 147 | if __name__ == "__main__": 148 | app.run_server(debug=True) 149 | -------------------------------------------------------------------------------- /Generative AI/Content Risk Analyzer/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/Content Risk Analyzer/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dash 3 | from dash import dcc, html, Input, Output, State 4 | import plotly.graph_objs as go 5 | import dash_bootstrap_components as dbc 6 | from helper import get_analysis, get_verdict_message, get_severity_label 7 | 8 | 9 | 10 | # --------------------------------------------------------------------------- 11 | # Initialize Dash app with Bootstrap theme for enhanced UI 12 | # --------------------------------------------------------------------------- 13 | app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) 14 | app.title = "Azure Content Safety Checker" 15 | 16 | 17 | # --------------------------------------------------------------------------- 18 | # App Layout: Main UI structure using Dash Bootstrap Components 19 | # --------------------------------------------------------------------------- 20 | app.layout = dbc.Container( 21 | html.Div([ 22 | dbc.Row([ 23 | dbc.Col(html.H2("🔍 Smart Content Risk Analyzer"), className="text-center my-4") 24 | ]), 25 | dbc.Row([ 26 | dbc.Col([ 27 | dbc.Textarea( 28 | id='input-text', 29 | placeholder='Enter your text here...', 30 | style={'height': '150px', 'resize': 'none'} 31 | ), 32 | dbc.Button('Analyze Text', id='analyze-btn', color="primary", className="mt-3"), 33 | html.Div(id='output-alert', className="mt-4"), 34 | html.Div(id='graph-container', className="mt-4") 35 | ], width=12) 36 | ]) 37 | ], 38 | style={ 39 | 'border': '1px solid #dcdcdc', 40 | 'borderRadius': '12px', 41 | 'boxShadow': '2px 2px 8px rgba(0,0,0,0.05)', 42 | 'padding': '30px', 43 | 'backgroundColor': 'white' 44 | }), 45 | style={ 46 | 'maxWidth': '700px', 47 | 'margin': '40px auto', 48 | 'padding': '10px' 49 | }, 50 | fluid=True 51 | ) 52 | 53 | # --------------------------------------------------------------------------- 54 | # Callback: Analyze text and update UI with verdict and severity chart 55 | # --------------------------------------------------------------------------- 56 | @app.callback( 57 | [Output('output-alert', 'children'), 58 | Output('graph-container', 'children')], 59 | [Input('analyze-btn', 'n_clicks')], 60 | [State('input-text', 'value')] 61 | ) 62 | def analyze(n_clicks, input_text): 63 | """ 64 | Callback to analyze the input text when the button is clicked. 65 | Displays a verdict alert and a severity bar chart if applicable. 66 | """ 67 | if not input_text: 68 | return dbc.Alert("❗ Please enter some text.", color="info"), None 69 | 70 | analysis = get_analysis(input_text) 71 | if "error" in analysis: 72 | return dbc.Alert(f"Error: {analysis['error']}", color="danger"), None 73 | 74 | verdict = get_verdict_message(analysis) 75 | 76 | # If content is safe in all categories, skip the chart 77 | if all(score == 0 for score in analysis.values()): 78 | return verdict, None 79 | 80 | # Prepare horizontal bar chart for severity by category 81 | bars = go.Bar( 82 | y=list(analysis.keys()), 83 | x=list(analysis.values()), 84 | orientation='h', 85 | marker=dict( 86 | color=[ 87 | 'green' if val == 0 else 88 | 'orange' if val <= 4 else 89 | 'red' for val in analysis.values() 90 | ] 91 | ), 92 | text=[f"Severity: {val}" for val in analysis.values()], 93 | textposition='inside' 94 | ) 95 | 96 | fig = go.Figure(data=[bars]) 97 | fig.update_layout( 98 | title="Content Severity by Category", 99 | xaxis=dict(title="Severity (0–7)", range=[0, 7]), 100 | yaxis=dict(title="Category"), 101 | height=300, 102 | margin=dict(l=60, r=30, t=40, b=40) 103 | ) 104 | 105 | return verdict, dcc.Graph(figure=fig) 106 | 107 | # --------------------------------------------------------------------------- 108 | # Main entry point: Run the Dash app 109 | # --------------------------------------------------------------------------- 110 | if __name__ == '__main__': 111 | app.run(debug=True) -------------------------------------------------------------------------------- /Generative AI/Content Risk Analyzer/helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | from azure.ai.contentsafety import ContentSafetyClient 3 | from azure.ai.contentsafety.models import AnalyzeTextOptions 4 | from azure.core.credentials import AzureKeyCredential 5 | from dotenv import load_dotenv 6 | import dash_bootstrap_components as dbc 7 | 8 | # --------------------------------------------------------------------------- 9 | # Load environment variables and initialize Azure Content Safety client 10 | # --------------------------------------------------------------------------- 11 | load_dotenv() 12 | key = os.getenv("CONTENT_SAFETY_KEY") 13 | endpoint = os.getenv("CONTENT_SAFETY_ENDPOINT") 14 | 15 | client = None 16 | client_init_error = None 17 | try: 18 | if not key or not endpoint: 19 | raise ValueError("Azure Content Safety key or endpoint not set in environment variables.") 20 | client = ContentSafetyClient(endpoint, AzureKeyCredential(key)) 21 | except Exception as e: 22 | client_init_error = str(e) 23 | 24 | # --------------------------------------------------------------------------- 25 | # Helper function: Map severity score to human-readable label 26 | # --------------------------------------------------------------------------- 27 | def get_severity_label(score): 28 | """ 29 | Returns a label for the given severity score. 30 | """ 31 | if score == 0: 32 | return "✅ Safe" 33 | elif score <= 2: 34 | return "⚠️ Mild" 35 | elif score <= 4: 36 | return "⚠️ Moderate" 37 | else: 38 | return "🚫 High" 39 | 40 | # --------------------------------------------------------------------------- 41 | # Helper function: Analyze text using Azure Content Safety API 42 | # --------------------------------------------------------------------------- 43 | def get_analysis(text): 44 | """ 45 | Calls Azure Content Safety API to analyze the input text. 46 | Returns a dictionary of category: severity or an error message. 47 | """ 48 | if client_init_error: 49 | return {"error": f"Client initialization failed: {client_init_error}"} 50 | if not text or not isinstance(text, str): 51 | return {"error": "Input text must be a non-empty string."} 52 | try: 53 | response = client.analyze_text(AnalyzeTextOptions(text=text)) 54 | return {cat.category: cat.severity for cat in response.categories_analysis} 55 | except Exception as e: 56 | return {"error": f"Analysis failed: {str(e)}"} 57 | 58 | # --------------------------------------------------------------------------- 59 | # Helper function: Generate verdict message based on analysis 60 | # --------------------------------------------------------------------------- 61 | def get_verdict_message(analysis): 62 | """ 63 | Returns a Dash Bootstrap Alert component with a verdict message 64 | based on the severity scores in the analysis. 65 | """ 66 | if "error" in analysis: 67 | return dbc.Alert(f"Error: {analysis['error']}", color="danger") 68 | if any(score >= 5 for score in analysis.values()): 69 | return dbc.Alert("🚫 Highly Harmful Content Detected", color="danger") 70 | elif any(score >= 3 for score in analysis.values()): 71 | return dbc.Alert("⚠️ Moderately Harmful Content", color="warning") 72 | else: 73 | return dbc.Alert("✅ Content is Safe", color="success") 74 | -------------------------------------------------------------------------------- /Generative AI/Content Risk Analyzer/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Generative AI/Content Risk Analyzer/requirements.txt -------------------------------------------------------------------------------- /Generative AI/CrewAI/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/CrewAI/CrewAI.py: -------------------------------------------------------------------------------- 1 | import os 2 | from langchain_google_genai import ChatGoogleGenerativeAI 3 | from crewai import Agent, Task, Crew, Process 4 | 5 | 6 | # Set the environment variable for the Google API key 7 | os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE_API_KEY") 8 | 9 | 10 | # Set gemini pro as llm 11 | llm = ChatGoogleGenerativeAI(model="gemini-pro", verbose=True, temperature=0.6) 12 | 13 | 14 | researcher = Agent( 15 | role="Senior Finance Analyst", 16 | goal="Fetch the stock value of Tesla Inc.", 17 | backstory="""You work at a leading investment banking think tank. 18 | Your expertise lies analysis data and you have a knack for dissecting complex data and presenting 19 | actionable insights. Keep the report simple and easy to understand within 300 words""", 20 | verbose=True, 21 | allow_delegation=False, 22 | llm=llm, 23 | ) 24 | 25 | 26 | analyst = Agent( 27 | role="Investor", 28 | goal="Highlight the key metrics of Tesla Inc. stock value", 29 | backstory="""You are a renowned analyst and investor, known for 30 | your number crunching and insights from data. You transform complex concepts into compelling narratives.""", 31 | verbose=True, 32 | allow_delegation=False, 33 | llm=llm, 34 | ) 35 | 36 | designer = Agent( 37 | role="Visualizer", 38 | goal="Visualize the key metrics from task2 using a graph", 39 | backstory="""You are a renowned graphics designer known for amazing visualization of numbers and great story telling""", 40 | verbose=True, 41 | allow_delegation=False, 42 | llm=llm, 43 | ) 44 | 45 | 46 | # Create tasks for your agents 47 | task1 = Task( 48 | description="""Conduct a comprehensive analysis, get the key stock metrics and 49 | your final answer MUST be a paragraph not exceeding 100 words.""", 50 | expected_output="""A detailed report summarizing key findings highlighting information that could be relevant for the research.""", 51 | agent=researcher, 52 | ) 53 | 54 | 55 | task2 = Task( 56 | description="""Using the content from task1, highlight key metrics. 57 | Avoid complex words so it doesn't sound like AI. 58 | """, 59 | expected_output="""A blog highlighting findings from the research.""", 60 | agent=analyst, 61 | ) 62 | 63 | task3 = Task( 64 | description="""Visualize the key metrics from task2 using a graph""", 65 | expected_output="""write a python code and create a chart using the findings from the research.""", 66 | agent=designer, 67 | ) 68 | 69 | 70 | # Instantiate your crew with a sequential process 71 | crew = Crew( 72 | agents=[researcher, analyst, designer], 73 | tasks=[task1, task2, task3], 74 | verbose=1, 75 | ) 76 | 77 | 78 | # Get your crew to work! 79 | result = crew.kickoff() 80 | -------------------------------------------------------------------------------- /Generative AI/LLM_wrappers/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/LLM_wrappers/wrapper_for_API.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, HTTPException 2 | from pydantic import BaseModel 3 | from typing import Optional 4 | import os, openai 5 | 6 | # Initialize the FastAPI app 7 | app = FastAPI() 8 | 9 | 10 | # LLMWrapper class (reuse from above) 11 | class LLMWrapper: 12 | def __init__(self, m_name: str, api_key: str): 13 | self.m_name = m_name 14 | openai.api_key = api_key 15 | 16 | def _preprocess_input(self, user_input: str) -> list[dict[str, str]]: 17 | return [ 18 | {"role": "system", "content": "You are a helpful assistant."}, 19 | {"role": "user", "content": user_input}, 20 | ] 21 | 22 | def _postprocess_output(self, response: dict[str, str]) -> str: 23 | # return response["choices"][0]["message"]["content"].strip() 24 | return response 25 | 26 | def query(self, user_input: str) -> str: 27 | preprocessed_input = self._preprocess_input(user_input) 28 | try: 29 | response = openai.chat.completions.create( 30 | model=self.m_name, 31 | messages=preprocessed_input, 32 | max_tokens=100, 33 | ) 34 | return self._postprocess_output(response) 35 | except Exception as e: 36 | return f"Error occurred: {str(e)}" 37 | 38 | 39 | # Pydantic model for input validation 40 | class QueryRequest(BaseModel): 41 | query: str 42 | m_name: Optional[str] = "gpt-3.5-turbo" # Default model 43 | 44 | 45 | # Initialize the LLMWrapper (set your OpenAI API key) 46 | llm_wrapper = LLMWrapper(m_name="gpt-3.5-turbo", api_key=os.getenv("OPENAI_API_KEY")) 47 | 48 | 49 | @app.post("/query/") 50 | async def get_llm_response(request: QueryRequest): 51 | """Endpoint to handle user queries.""" 52 | try: 53 | # Use the LLMWrapper to get the model's response 54 | response = llm_wrapper.query(request.query) 55 | return {"query": request.query, "response": response} 56 | except Exception as e: 57 | raise HTTPException(status_code=500, detail=str(e)) 58 | 59 | 60 | @app.get("/") 61 | async def read_root(): 62 | """Basic endpoint to check if the API is running.""" 63 | return { 64 | "message": "Welcome to the LLM API. Use POST /query/ to interact with the model." 65 | } 66 | -------------------------------------------------------------------------------- /Generative AI/LLM_wrappers/wrapper_for_app.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import os 3 | import dash 4 | from dash import dcc, html 5 | from dash.dependencies import Input, Output 6 | from typing import List, Dict 7 | 8 | 9 | # Define the LLMWrapper class 10 | class LLMWrapper: 11 | def __init__(self, model_name: str, api_key: str): 12 | self.model_name = model_name 13 | openai.api_key = api_key 14 | 15 | def _preprocess_input(self, user_input: str) -> list[dict[str, str]]: 16 | return [ 17 | {"role": "system", "content": "You are a helpful assistant."}, 18 | {"role": "user", "content": user_input}, 19 | ] 20 | 21 | def _postprocess_output(self, response: dict[str, str]) -> str: 22 | return response.choices[0].message.content.strip() 23 | 24 | def query(self, user_input: str) -> str: 25 | preprocessed_input = self._preprocess_input(user_input) 26 | try: 27 | response = openai.chat.completions.create( 28 | model=self.model_name, messages=preprocessed_input, max_tokens=100 29 | ) 30 | return self._postprocess_output(response) 31 | except Exception as e: 32 | return f"Error occurred: {str(e)}" 33 | 34 | 35 | # Initialize the Dash app 36 | app = dash.Dash(__name__) 37 | 38 | # Instantiate LLMWrapper 39 | llm_wrapper = LLMWrapper( 40 | model_name="gpt-3.5-turbo", api_key=os.getenv("OPENAI_API_KEY") 41 | ) 42 | 43 | # Define the layout of the app 44 | app.layout = html.Div( 45 | [ 46 | html.H1("LLM Query Interface", style={"textAlign": "center"}), 47 | # User input text box 48 | dcc.Input( 49 | id="user-input", 50 | type="text", 51 | placeholder="Enter your query here...", 52 | style={"width": "80%", "padding": "10px"}, 53 | ), 54 | # Submit button 55 | html.Button( 56 | "Submit", 57 | id="submit-btn", 58 | n_clicks=0, 59 | style={"padding": "10px", "margin": "10px"}, 60 | ), 61 | # Display the response from the LLM 62 | html.Div( 63 | id="output-response", 64 | style={"padding": "10px", "marginTop": "20px", "fontSize": "18px"}, 65 | ), 66 | ] 67 | ) 68 | 69 | 70 | # Define callback to update output based on user input 71 | @app.callback( 72 | Output("output-response", "children"), 73 | [Input("submit-btn", "n_clicks")], 74 | [dash.dependencies.State("user-input", "value")], 75 | ) 76 | def update_output(n_clicks, user_input): 77 | if n_clicks > 0 and user_input: 78 | # Get the LLM response 79 | llm_response = llm_wrapper.query(user_input) 80 | return llm_response 81 | return "" 82 | 83 | 84 | # Run the app 85 | if __name__ == "__main__": 86 | app.run_server(debug=False) 87 | -------------------------------------------------------------------------------- /Generative AI/NL2SQL APP/requirements.txt: -------------------------------------------------------------------------------- 1 | dash 2 | dash-bootstrap-components 3 | langchain 4 | langchain-openai 5 | langchain-community 6 | openai 7 | pymysql 8 | -------------------------------------------------------------------------------- /Generative AI/OCR App With Gemini and Dash/app.py: -------------------------------------------------------------------------------- 1 | import dash 2 | from dash import dcc, html 3 | from dash.dependencies import Input, Output, State 4 | import base64 5 | from datetime import datetime 6 | from PIL import Image 7 | import io 8 | import os 9 | import google.generativeai as genai 10 | 11 | 12 | # Fetch the API key from the environment variable 13 | os.getenv("GOOGLE_API_KEY") 14 | genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) 15 | 16 | 17 | response_list = [] 18 | 19 | # Initialize the Dash app 20 | app = dash.Dash(__name__) 21 | app.title = "Gemini Image Demo" 22 | 23 | external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] 24 | app = dash.Dash(__name__, external_stylesheets=external_stylesheets) 25 | 26 | 27 | # Define style for the gray border 28 | section_style = { 29 | "border": "1px solid #dddddd", 30 | "border-radius": "5px", 31 | "padding": "10px", 32 | "margin": "10px auto", 33 | "width": "50%", 34 | "textAlign": "center", 35 | } 36 | 37 | # Define style for the button without border 38 | button_style = { 39 | "background-color": "#2E86C1", 40 | "color": "white", 41 | } 42 | 43 | app.layout = html.Div( 44 | [ 45 | html.H1( 46 | "MULTI-LANGUAGE OCR USING GEMINI & DASH", 47 | style={ 48 | "textAlign": "center", 49 | "color": "#2E86C1", 50 | "fontSize": "25px", 51 | "fontWeight": "bold", 52 | }, 53 | ), 54 | html.Div( 55 | [ 56 | html.H2( 57 | "UPLOAD IMAGE", 58 | style={ 59 | "color": "#F08080", 60 | "fontSize": "18px", 61 | "fontWeight": "bold", 62 | "textAlign": "left", 63 | }, 64 | ), 65 | dcc.Upload( 66 | id="upload-image", 67 | children=html.Div(["Drag and Drop or ", html.A("Select an Image")]), 68 | style={ 69 | "width": "100%", 70 | "height": "60px", 71 | "lineHeight": "60px", 72 | "borderWidth": "1px", 73 | "borderStyle": "dashed", 74 | "borderRadius": "5px", 75 | "textAlign": "center", 76 | "color": "#333333", 77 | }, 78 | # Allow multiple files to be uploaded 79 | multiple=False, 80 | ), 81 | ], 82 | style=section_style, 83 | ), 84 | html.Div( 85 | [ 86 | html.H2( 87 | "INPUT PROMPT", 88 | style={ 89 | "color": "#F08080", 90 | "textAlign": "left", 91 | "fontSize": "18px", 92 | "fontWeight": "bold", 93 | }, 94 | ), 95 | dcc.Input( 96 | id="input", 97 | type="text", 98 | placeholder="Enter Input Prompt", 99 | style={"width": "100%"}, 100 | ), 101 | ], 102 | style=section_style, 103 | ), 104 | html.Div( 105 | [ 106 | html.Button( 107 | "Fetch Information", 108 | id="submit-button", 109 | n_clicks=0, 110 | style={**button_style}, # Use button_style here 111 | ), 112 | ], 113 | style={ 114 | **section_style, 115 | "border": "none", 116 | }, # Exclude border from button section 117 | ), 118 | html.Div( 119 | [ 120 | html.H1( 121 | "RESPONSE FROM AI:", 122 | style={ 123 | "color": "#F08080", 124 | "fontSize": "18px", 125 | "textAlign": "left", 126 | "fontWeight": "bold", 127 | }, 128 | ), 129 | html.Div( 130 | id="output-container-button", 131 | children="\n".join(response_list), 132 | style={ 133 | "textAlign": "left", 134 | "color": "#333333", 135 | "whiteSpace": "pre-wrap", 136 | }, 137 | ), 138 | ], 139 | style=section_style, 140 | ), 141 | ] 142 | ) 143 | 144 | 145 | def get_gemini_response(input, image, prompt): 146 | 147 | model = genai.GenerativeModel("gemini-pro-vision") 148 | response = model.generate_content([input, image[0], prompt]) 149 | return response.text 150 | 151 | 152 | def process_image(contents, filename): 153 | content_type, content_string = contents.split(",") 154 | 155 | decoded = base64.b64decode(content_string) 156 | image = Image.open(io.BytesIO(decoded)) 157 | return image 158 | 159 | 160 | @app.callback( 161 | Output("output-container-button", "children"), 162 | [Input("submit-button", "n_clicks")], 163 | [ 164 | State("input", "value"), 165 | State("upload-image", "contents"), 166 | State("upload-image", "filename"), 167 | ], 168 | ) 169 | def update_output(n_clicks, input_value, contents, filename): 170 | if n_clicks > 0: 171 | if contents is not None: 172 | image = process_image(contents, filename) 173 | response = get_gemini_response(input_value, [image], input_value) 174 | response_list_time = ( 175 | f'\n{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}: {response}\n' 176 | ) 177 | response_list.insert(0, response_list_time) 178 | return response_list 179 | 180 | 181 | if __name__ == "__main__": 182 | app.run_server(debug=True) 183 | -------------------------------------------------------------------------------- /Generative AI/OCR App With Gemini and Dash/requirements.txt: -------------------------------------------------------------------------------- 1 | Dash 2 | google-generativeai 3 | Pillow 4 | -------------------------------------------------------------------------------- /Generative AI/OCR App With Gemini and Streamlit/ContentExtractor.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import os 3 | from PIL import Image 4 | 5 | from dotenv import load_dotenv 6 | import google.generativeai as genai 7 | 8 | load_dotenv() 9 | 10 | os.getenv("GOOGLE_API_KEY") 11 | genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) 12 | 13 | 14 | ## Function to load OpenAI model and get response 15 | def get_gemini_response(input, image, prompt): 16 | model = genai.GenerativeModel("gemini-pro-vision") 17 | response = model.generate_content([input, image[0], prompt]) 18 | return response.text 19 | 20 | 21 | def input_image_setup(uploaded_file): 22 | # Check if a file has been uploaded 23 | if uploaded_file is not None: 24 | # Read the file into bytes 25 | bytes_data = uploaded_file.getvalue() 26 | 27 | image_parts = [ 28 | { 29 | "mime_type": uploaded_file.type, 30 | "data": bytes_data, 31 | } 32 | ] 33 | return image_parts 34 | else: 35 | raise FileNotFoundError("No file uploaded") 36 | 37 | 38 | st.set_page_config(page_title="Document & Image Analyzer") 39 | st.sidebar.title("Upload Image") 40 | input = st.text_input("Input Prompt: ", key="input") 41 | uploaded_file = st.sidebar.file_uploader( 42 | "Choose an image...", type=["jpg", "png", "jpeg"] 43 | ) 44 | 45 | # Right portion to show uploaded image 46 | if uploaded_file is not None: 47 | st.image(uploaded_file, caption="Uploaded Image", use_column_width=True) 48 | 49 | submit = st.button("Fetch Information") 50 | input_prompt = """ 51 | You are an expert in understanding invoices. 52 | You will receive input images as invoices & 53 | you will have to answer questions based on the input image 54 | """ 55 | 56 | ## If ask button is clicked 57 | if submit: 58 | if uploaded_file is not None: # Add this check to ensure file is uploaded 59 | image_data = input_image_setup(uploaded_file) 60 | response = get_gemini_response(input_prompt, image_data, input) 61 | st.subheader("The Response is") 62 | st.write(response) 63 | else: 64 | st.write("Please upload an image first.") 65 | -------------------------------------------------------------------------------- /Generative AI/OCR App With Gemini and Streamlit/requirements.txt: -------------------------------------------------------------------------------- 1 | Streamlit 2 | PIL 3 | google-generativeai 4 | dotenv -------------------------------------------------------------------------------- /Generative AI/Streamlit vs Gradio/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Generative AI/Streamlit vs Gradio/language_translation_gradio.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | from transformers import MarianMTModel, MarianTokenizer 3 | 4 | # Load translation model and tokenizer 5 | model_name = "Helsinki-NLP/opus-mt-en-de" 6 | tokenizer = MarianTokenizer.from_pretrained(model_name) 7 | model = MarianMTModel.from_pretrained(model_name) 8 | 9 | 10 | # Define translation function 11 | def translate_to_german(text): 12 | translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True)) 13 | return tokenizer.decode(translated[0], skip_special_tokens=True) 14 | 15 | 16 | # Gradio interface 17 | iface = gr.Interface( 18 | fn=translate_to_german, 19 | inputs="text", 20 | outputs="text", 21 | title="English to German Translator", 22 | description="Enter an English sentence, and get the German translation!", 23 | ) 24 | 25 | iface.launch(share=True) 26 | -------------------------------------------------------------------------------- /Generative AI/Streamlit vs Gradio/language_translation_streamlit.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from transformers import MarianMTModel, MarianTokenizer 3 | 4 | # Load translation model and tokenizer 5 | model_name = "Helsinki-NLP/opus-mt-en-de" 6 | tokenizer = MarianTokenizer.from_pretrained(model_name) 7 | model = MarianMTModel.from_pretrained(model_name) 8 | 9 | 10 | # Define translation function 11 | def translate_to_german(text): 12 | translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True)) 13 | return tokenizer.decode(translated[0], skip_special_tokens=True) 14 | 15 | 16 | # Streamlit app layout 17 | st.title("English to German Translator") 18 | st.write("Enter an English sentence, and see the German translation on the right.") 19 | 20 | # Create two columns: one for input and one for output 21 | col1, col2 = st.columns(2) 22 | 23 | with col1: 24 | st.subheader("Input (English)") 25 | # Text input box 26 | english_text = st.text_area("Enter English text here", height=200) 27 | 28 | with col2: 29 | st.subheader("Output (German)") 30 | # Placeholder for the output text box 31 | translation_placeholder = st.empty() 32 | 33 | # Translate and display result when button is clicked 34 | if st.button("Translate"): 35 | if english_text: 36 | german_translation = translate_to_german(english_text) 37 | # Display the German translation in the output text box 38 | translation_placeholder.text_area( 39 | "Translated Text", value=german_translation, height=200 40 | ) 41 | else: 42 | st.write("Please enter some text.") 43 | -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/.gitkeep: -------------------------------------------------------------------------------- 1 | temp file 2 | -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/Correlation.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Interactive Modelling With R Shiny/Images/Correlation.PNG -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/DataSummary.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Interactive Modelling With R Shiny/Images/DataSummary.PNG -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/HomePage.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Interactive Modelling With R Shiny/Images/HomePage.PNG -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/Model.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Interactive Modelling With R Shiny/Images/Model.PNG -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/Images/prediction.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Interactive Modelling With R Shiny/Images/prediction.PNG -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/README.md: -------------------------------------------------------------------------------- 1 | # Interactive-Modelling-with-Shiny 2 | R Shiny application for data exploration, interactive model building, identifying variable importance and predicting on test data. 3 | 4 | ## [**Read more on this in my blog**](https://www.analyticsvidhya.com/blog/2021/05/build-interactive-models-with-r-shiny/) 5 | 6 | Step1: Select the prefered train/test data split on the left panel.
7 | Step2: Select the independent and dependent variables from the dropdowns
8 | ![alt text](https://github.com/amitvkulkarni/Interactive-Modelling-with-Shiny/blob/main/Images/HomePage.PNG) 9 | 10 | Navigate to respective tabs to view the result:
11 | ![alt text](https://github.com/amitvkulkarni/Interactive-Modelling-with-Shiny/blob/main/Images/DataSummary.PNG) 12 | Correlation plot:
13 | ![alt text](https://github.com/amitvkulkarni/Interactive-Modelling-with-Shiny/blob/main/Images/Correlation.PNG) 14 | Select various combinations of X and Y variables to build model:
15 | ![alt text](https://github.com/amitvkulkarni/Interactive-Modelling-with-Shiny/blob/main/Images/Model.PNG) 16 | Prediction on test data:
17 | ![alt text](https://github.com/amitvkulkarni/Interactive-Modelling-with-Shiny/blob/main/Images/prediction.PNG) 18 | -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/server.R: -------------------------------------------------------------------------------- 1 | library(shiny) 2 | library(shinydashboard) 3 | library(maps) 4 | library(dplyr) 5 | library(leaflet) 6 | library(ggplot2) 7 | library(tidyverse) 8 | library(DT) 9 | library(plotly) 10 | library(corrplot) 11 | library(caret) 12 | library(stargazer) 13 | 14 | dd <- mtcars 15 | 16 | shinyServer(function(input, output, session) { 17 | 18 | InputDataset <- reactive({ 19 | mtcars 20 | }) 21 | 22 | 23 | InputDataset_model <- reactive({ 24 | if (is.null(input$SelectX)) { 25 | dt <- mtcars 26 | } 27 | else{ 28 | dt <- mtcars[, c(input$SelectX)] 29 | } 30 | 31 | }) 32 | 33 | 34 | observe({ 35 | lstname <- names(InputDataset()) 36 | updateSelectInput(session = session, 37 | inputId = "SelectY", 38 | choices = lstname) 39 | }) 40 | 41 | splitSlider <- reactive({ 42 | input$Slider1 / 100 43 | }) 44 | output$Summ <- 45 | renderPrint( 46 | stargazer( 47 | InputDataset(), 48 | type = "text", 49 | title = "Descriptive statistics", 50 | digits = 1, 51 | out = "table1.txt" 52 | ) 53 | ) 54 | output$Summ_old <- renderPrint(summary(InputDataset())) 55 | output$structure <- renderPrint(str(InputDataset())) 56 | 57 | set.seed(100) # setting seed to reproduce results of random sampling 58 | trainingRowIndex <- 59 | reactive({ 60 | sample(1:nrow(InputDataset_model()), 61 | splitSlider() * nrow(InputDataset_model())) 62 | })# row indices for training data 63 | 64 | trainingData <- reactive({ 65 | tmptraindt <- InputDataset_model() 66 | tmptraindt[trainingRowIndex(), ] 67 | }) 68 | 69 | testData <- reactive({ 70 | tmptestdt <- InputDataset_model() 71 | tmptestdt[-trainingRowIndex(),] 72 | }) 73 | 74 | 75 | 76 | output$cntTrain <- 77 | renderText(paste("Train Data:", NROW(trainingData()), "records")) 78 | output$cntTest <- 79 | renderText(paste("Test Data:", NROW(testData()), "records")) 80 | 81 | output$Data <- renderDT(InputDataset()) 82 | 83 | 84 | cormat <- reactive({ 85 | round(cor(InputDataset()), 1) 86 | }) 87 | output$Corr <- 88 | renderPlot(corrplot( 89 | cormat(), 90 | type = "lower", 91 | order = "hclust", 92 | method = "number" 93 | )) 94 | 95 | 96 | #Code section for Linear Regression----------------------------------------------------------------------------- 97 | 98 | f <- reactive({ 99 | as.formula(paste(input$SelectY, "~.")) 100 | }) 101 | 102 | 103 | Linear_Model <- reactive({ 104 | lm(f(), data = trainingData()) 105 | }) 106 | 107 | output$Model <- renderPrint(summary(Linear_Model())) 108 | output$Model_new <- 109 | renderPrint( 110 | stargazer( 111 | Linear_Model(), 112 | type = "text", 113 | title = "Model Results", 114 | digits = 1, 115 | out = "table1.txt" 116 | ) 117 | ) 118 | 119 | Importance <- reactive({ 120 | varImp(Linear_Model(), scale = FALSE) 121 | }) 122 | 123 | tmpImp <- reactive({ 124 | 125 | imp <- as.data.frame(varImp(Linear_Model())) 126 | imp <- data.frame(overall = imp$Overall, 127 | names = rownames(imp)) 128 | imp[order(imp$overall, decreasing = T),] 129 | 130 | }) 131 | 132 | output$ImpVar <- renderPrint(tmpImp()) 133 | 134 | price_predict <- reactive({ 135 | predict(Linear_Model(), testData()) 136 | }) 137 | 138 | tmp <- reactive({ 139 | tmp1 <- testData() 140 | tmp1[, c(input$SelectY)] 141 | }) 142 | 143 | 144 | actuals_preds <- 145 | reactive({ 146 | data.frame(cbind(actuals = tmp(), predicted = price_predict())) 147 | }) 148 | 149 | Fit <- 150 | reactive({ 151 | ( 152 | plot( 153 | actuals_preds()$actuals, 154 | actuals_preds()$predicted, 155 | pch = 16, 156 | cex = 1.3, 157 | col = "blue", 158 | main = "Best Fit Line", 159 | xlab = "Actual", 160 | ylab = "Predicted" 161 | ) 162 | ) 163 | }) 164 | 165 | output$Prediction <- renderPlot(Fit()) 166 | 167 | output$residualPlots <- renderPlot({ 168 | par(mfrow = c(2, 2)) # Change the panel layout to 2 x 2 169 | plot(Linear_Model()) 170 | par(mfrow = c(1, 1)) # Change back to 1 x 1 171 | 172 | }) 173 | 174 | output$digest <- renderExplorer({ 175 | 176 | explorer(data = dd$data, demo = F) 177 | 178 | }) 179 | 180 | }) 181 | 182 | -------------------------------------------------------------------------------- /Interactive Modelling With R Shiny/ui.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | library(shiny) 4 | library(shinydashboard) 5 | library(maps) 6 | library(dplyr) 7 | library(leaflet) 8 | library(shinycssloaders) 9 | library(shinythemes) 10 | library(datadigest) 11 | library(rio) 12 | library(DT) 13 | library(stargazer) 14 | 15 | 16 | dashboardPage( 17 | dashboardHeader(title = "Machine Learning", dropdownMenuOutput("msgOutput")), 18 | dashboardSidebar( 19 | sliderInput( 20 | "Slider1", 21 | label = h3("Train/Test Split %"), 22 | min = 0, 23 | max = 100, 24 | value = 75 25 | ), 26 | textOutput("cntTrain"), 27 | textOutput("cntTest"), 28 | br() 29 | 30 | # 31 | # menuItem( 32 | # "Generate Report", 33 | # tabName = "sectors", 34 | # icon = icon("download"), 35 | # radioButtons( 36 | # 'format', 37 | # 'Document format', 38 | # c('HTML', 'Word'), 39 | # inline = FALSE, 40 | # selected = 1 41 | # ), 42 | # downloadButton("report", "Download Report", class = "butt"), 43 | # tags$head(tags$style(".butt{color: blue !important;}")) 44 | # ) 45 | 46 | ), 47 | dashboardBody( 48 | fluidPage( 49 | box( 50 | selectInput( 51 | "SelectX", 52 | label = "Select variables:", 53 | choices = names(mtcars), 54 | multiple = TRUE, 55 | selected = names(mtcars) 56 | ), 57 | solidHeader = TRUE, 58 | width = "3", 59 | status = "primary", 60 | title = "X variable" 61 | ), 62 | box( 63 | selectInput("SelectY", label = "Select variable to predict:", choices = names(mtcars)), 64 | solidHeader = TRUE, 65 | width = "3", 66 | status = "primary", 67 | title = "Y variable" 68 | ) 69 | 70 | 71 | 72 | ), 73 | 74 | fluidPage( 75 | 76 | tabBox( 77 | id = "tabset1", 78 | height = "1000px", 79 | width = 12, 80 | 81 | tabPanel("Data", 82 | box(withSpinner(DTOutput( 83 | "Data" 84 | )), width = 12)), 85 | tabPanel( 86 | "Data Summary", 87 | box(withSpinner(verbatimTextOutput("Summ")), width = 6), 88 | box(withSpinner(verbatimTextOutput("Summ_old")), width = 6) 89 | ), 90 | 91 | # 92 | # tabPanel("Data Strucure", 93 | # # box( 94 | # # withSpinner(verbatimTextOutput("structure")), width = "100%" 95 | # # ), 96 | # explorerOutput("digest") 97 | # ), 98 | tabPanel("Plots", 99 | box(withSpinner(plotOutput( 100 | "Corr" 101 | )), width = 12)), 102 | #box(withSpinner(verbatimTextOutput("CorrMatrix")), width = 12), 103 | tabPanel( 104 | "Model", 105 | box( 106 | withSpinner(verbatimTextOutput("Model")), 107 | width = 6, 108 | title = "Model Summary" 109 | ), 110 | # box( 111 | # withSpinner(verbatimTextOutput("Model_new")), 112 | # width = 6, 113 | # title = "Model Summary" 114 | # ), 115 | # 116 | box( 117 | withSpinner(verbatimTextOutput("ImpVar")), 118 | width = 5, 119 | title = "Variable Importance" 120 | ) 121 | ), 122 | #textOutput("correlation_accuracy"), 123 | tabPanel( 124 | "Prediction", 125 | box(withSpinner(plotOutput("Prediction")), width = 6, title = "Best Fit Line"), 126 | box(withSpinner(plotOutput("residualPlots")), width = 6, title = "Diagnostic Plots") 127 | ) 128 | ) 129 | ) 130 | ) 131 | ) 132 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Amit Kulkarni 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/README.md: -------------------------------------------------------------------------------- 1 | # Monte Carlo Simulation For Pipe Failure 2 | 3 | Monte Carlo Simulation is a technique that allows decision-makers to account for risk in quantitative analysis and decision making. It increases the variability by infusing a wide range of values that are part of the same distribution (values that are in a similar range as actual data) and present with various possible outcomes. Further, there are many iterations sometimes in thousands, and the outcome from each iteration is measured. Simulation outcomes that are backed by domain expertise can assist in making decisions that are more than a hunch or gut and are supported by the data that mimics the possible business scenarios. 4 | 5 | ## [Read more on my blog](https://amitvkulkarni.medium.com/monte-carlo-simulation-for-pipe-failure-in-python-3e9729728281) 6 | 7 | ## Here is the preview of the app:
8 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Monte%20Carlo%20Simulation%20for%20Pipe%20Failure/assets/home.png) 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/Requirements.txt: -------------------------------------------------------------------------------- 1 | dash==2.3.1 2 | dash-bootstrap-components==1.1.0 3 | dash-core-components==2.0.0 4 | dash-daq==0.5.0 5 | dash-html-components==2.0.0 6 | dash-table==5.0.0 7 | matplotlib==3.5.2 8 | numpy==1.22.3 9 | pandas==1.4.2 10 | PyYAML==6.0 11 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/app.py: -------------------------------------------------------------------------------- 1 | import dash 2 | import time 3 | import dash_core_components as dcc 4 | # from dash import dcc 5 | import dash_html_components as html 6 | # from dash import html,Input, Output, State 7 | from dash.dependencies import Input, Output, State 8 | import dash_daq as daq 9 | import dash_bootstrap_components as dbc 10 | import numpy as np 11 | import pandas as pd 12 | import plotly.graph_objs as go 13 | import plotly.express as px 14 | import layout 15 | import pre_processing as pp 16 | import dash_daq as daq 17 | 18 | 19 | app = dash.Dash(__name__,external_stylesheets=[dbc.themes.BOOTSTRAP]) 20 | 21 | server = app.server 22 | 23 | app.layout = layout.layout_all 24 | 25 | 26 | @app.callback( 27 | [ 28 | Output("plt-failure", 'figure'), 29 | Output('textarea-description', 'children') 30 | ], 31 | 32 | [Input('btn-run-simulation', 'n_clicks')], 33 | state = [State("val-diameter", "value"), 34 | State("val-diameter-cov", "value"), 35 | State("val-thickness", "value"), 36 | State("val-thickness-cov", "value"), 37 | State("val-strength", "value"), 38 | State("val-strength-cov", "value"), 39 | State("val-internal-pressure", "value")] 40 | 41 | ) 42 | def updatechart(n_clicks, diameter, diameter_cov, thickness, thickness_cov, strength, strength_cov, internal_pressue): 43 | """_summary_ 44 | 45 | Args: 46 | n_clicks (_type_): _description_ 47 | diameter (int): Diameter of the pipe 48 | diameter_cov (int): Covariance of the diameter 49 | thickness (int): Thickness of the pipe 50 | thickness_cov (int): Covariance of the thickness 51 | strength (int): Strength / Yield strength of the pipe 52 | strength_cov (int): Covariance of the yield strength 53 | internal_pressue (int): Inter Pressue in the pipe. 54 | 55 | Returns: 56 | Figure: Return a line chart fig 57 | str: Returns the description of the hoop stress. 58 | """ 59 | 60 | str_description = "The pipe will fail if the Hoop stress becomes greater than its yield strength. This app will help simulate this scenario hundreds or maybe a thousand times, and calculates the probability of failure for each iteration. Depending on the result, the decision can be made to either continue to the current design or to review and redesign the pipe. Here is the mathematical way to define the hoop stress." 61 | fig_linechart = pp.initiate_simulation(diameter, diameter_cov, thickness, thickness_cov, strength, strength_cov, internal_pressue) 62 | return fig_linechart,str_description 63 | 64 | 65 | if __name__ == '__main__': 66 | app.run_server(debug=True, use_reloader=False, dev_tools_ui=False) 67 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/dash-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Monte Carlo Simulation for Pipe Failure/assets/dash-logo.png -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/equation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Monte Carlo Simulation for Pipe Failure/assets/equation.png -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Monte Carlo Simulation for Pipe Failure/assets/home.png -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/hoopstress.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Monte Carlo Simulation for Pipe Failure/assets/hoopstress.jpg -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/logo-plotly.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/assets/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color:#EDEFEB; 3 | font-family: "Open Sans", verdana, arial, sans-serif; 4 | font-size: 18px; 5 | color:#000080; 6 | } 7 | 8 | .side_bar { 9 | height: 100%; 10 | width: 14%; 11 | position: fixed; 12 | top: 0; 13 | left: 0; 14 | background-color: #ADD8E6; 15 | padding-left: 1%; 16 | padding-right: 1%; 17 | color:rgb(7, 7, 7); 18 | } 19 | 20 | .main { 21 | font-size: 15px; 22 | padding: 0px 10px; 23 | padding-left: 16%; 24 | color:#2F4F4F; 25 | } 26 | 27 | .footer { 28 | position: relative; 29 | left: 0; 30 | bottom: 0; 31 | width: 100%; 32 | background-color: #4b9072a7; 33 | padding-left: 1%; 34 | } 35 | 36 | h1 { 37 | color:#000080; 38 | font-family: ui-monospace; 39 | } 40 | 41 | h3 { 42 | margin-block-start:0.3em; 43 | font-size: smaller; 44 | } 45 | 46 | h4 { 47 | margin-block-end:0.3em; 48 | font-size: smaller; 49 | } 50 | 51 | .text{ 52 | margin: 10px; 53 | padding: 25px; 54 | align-self: center; 55 | } 56 | 57 | .row{ 58 | display: flex; 59 | } 60 | 61 | /* Boxes */ 62 | 63 | .box { 64 | border-radius: 10px; 65 | background-color: #F9F9F8; 66 | margin: 10px; 67 | padding: 25px; 68 | box-shadow: 2px 2px 8px lightgrey; 69 | } 70 | 71 | .boxImage { 72 | border-radius: 10px; 73 | background-color: #FFFFFF; 74 | margin: 10px; 75 | padding: 25px; 76 | box-shadow: 2px 2px 8px lightgrey; 77 | } 78 | 79 | 80 | .box_comment { 81 | border-radius: 20px; 82 | background-color: #d4f5c29a; 83 | margin: 10px; 84 | padding: 10px; 85 | box-shadow: 2px 2px 2px lightgrey; 86 | font-size: small; 87 | text-align: center; 88 | font-family: system-ui; 89 | } 90 | 91 | .box_emissions{ 92 | border-radius: 20px; 93 | background-color: #DCDCDC; 94 | margin: 2px; 95 | box-shadow: 2px 2px 2px lightgrey; 96 | justify-content: center; 97 | width: 50%; 98 | text-align: center; 99 | } 100 | /* Drop down */ 101 | #drop_map .VirtualizedSelectFocusedOption { 102 | background-color:#ebb36abf; 103 | } 104 | 105 | #drop_continent .VirtualizedSelectFocusedOption { 106 | background-color:#ebb36abf; 107 | } 108 | 109 | /* Radio buttons */ 110 | .radio { 111 | display:flex; 112 | justify-content: left; 113 | place-content:space-around; 114 | width: 88%; 115 | } 116 | 117 | .radio input[type="radio"] { 118 | opacity: 0; 119 | position: absolute; 120 | } 121 | 122 | .radio label { 123 | display: table-caption; 124 | background-color: #F9F9F8; 125 | padding: 15px 15px; 126 | font-size: 16px; 127 | border: 2px solid #e9a8538e; 128 | border-radius: 10px; 129 | width: 200%; 130 | text-align: center; 131 | } 132 | 133 | .radio input[type="radio"]:checked + label { 134 | background-color: #e98b1044; 135 | border: 2px solid #F9F9F8; 136 | } 137 | 138 | .radio label:hover { 139 | background-color: #e1e2df; 140 | border: 2px solid #e1e2df; 141 | } 142 | 143 | /* Scrollbars */ 144 | *::-webkit-scrollbar { 145 | width: 10px; 146 | } 147 | *::-webkit-scrollbar-track { 148 | background: #EDEFEB; 149 | } 150 | *::-webkit-scrollbar-thumb { 151 | background-color:#63615fbf; 152 | border-radius: 5px; 153 | border: 3px solid #EDEFEB; 154 | } 155 | 156 | .study-browser-banner { 157 | background: rgb(2,21,70); 158 | color: white; 159 | padding: 1px; 160 | padding-left: 2px; 161 | /* text-align: center; */ 162 | } 163 | 164 | .footer-banner { 165 | background: hwb(240 82% 17%); 166 | color: white; 167 | padding: 1px; 168 | padding-left: 2px; 169 | /* height: 15px; */ 170 | /* text-align: center; */ 171 | } 172 | 173 | .div-logo{ 174 | display: inline-block; 175 | float: right; 176 | /* text-align: center; */ 177 | } 178 | 179 | .logo{ 180 | height: 75px; 181 | padding: 6px; 182 | margin-top: 3px; 183 | } 184 | 185 | .logo1{ 186 | height: 200px; 187 | padding: 6px; 188 | margin-top: 3px; 189 | } 190 | 191 | -------------------------------------------------------------------------------- /Monte Carlo Simulation for Pipe Failure/config.yaml: -------------------------------------------------------------------------------- 1 | #INITIAL SETTINGS 2 | 3 | # Diameter 4 | diameter_mean: 150 5 | diameter_cov: 5 6 | 7 | # Thickness 8 | thickness_mean: 4 9 | thickness_cov: 5 10 | 11 | # Yield Strength 12 | yield_mean: 200 13 | yield_cov: 10 14 | 15 | # Internal Pressure 16 | internal_pressure: 10 17 | 18 | # Iterations 19 | iter_start: 1000 20 | iter_end: 20000 21 | iter_step: 1000 -------------------------------------------------------------------------------- /Portfolio Simulator Using Python/Preview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Portfolio Simulator Using Python/Preview.gif -------------------------------------------------------------------------------- /Portfolio Simulator Using Python/README.md: -------------------------------------------------------------------------------- 1 | # Build A Personalized VaR Simulator App Using Python 2 | The objective of the app is to enable value at risk(VaR) risk assessment of the portfolio. 3 | 4 | * **Step1**: Open the terminal, navigate to the project directory 5 | 6 | * **Step 2**: Type the below command 7 | python app.py 8 | 9 | * **Step 3**: The terminal will run the application 10 | 11 | * **Step 4**: Navigate to the browser and type the localhost URL. The app will be loaded as shown below and click on the CREATE button 12 | 13 | ## Here is the preview of the app:
14 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/1c78a4f77e0d690ec505423cd4d4d09d29f82bb3/Portfolio%20Simulator%20Using%20Python/Preview.gif) 15 | 16 | 17 | ## [Click to read more on this on my blog](https://amitvkulkarni.medium.com/build-a-personalized-var-simulator-app-using-python-476e95774554) 18 | 19 | 20 | -------------------------------------------------------------------------------- /Portfolio Simulator Using Python/dataload.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import datetime 4 | from datetime import date 5 | from nsepy import get_history as gh 6 | 7 | 8 | 9 | ################################################################## 10 | # Data Loading 11 | ################################################################## 12 | def load_stock_data(start_date, end_date, ticker): 13 | """_summary_ 14 | 15 | Args: 16 | start_date (Date): Start data for stock selection 17 | end_date (Date): End date for stock selection 18 | ticker (List): List of stocks 19 | 20 | Returns: 21 | Dataframe: price data for selected stock for a selected period 22 | """ 23 | try: 24 | df = pd.DataFrame() 25 | 26 | for i in range(len(ticker)): 27 | data = gh(symbol=ticker[i],start= start_date, end=end_date)[['Symbol','Close']] 28 | data.rename(columns={'Close':data['Symbol'][0]},inplace=True) 29 | data.drop(['Symbol'], axis=1,inplace=True) 30 | if i == 0: 31 | df = data 32 | 33 | if i != 0: 34 | df = df.join(data) 35 | 36 | 37 | return df 38 | except Exception as e: 39 | print(f'An exception occurred while executing data load: {e}') 40 | 41 | 42 | 43 | 44 | ################################################################## 45 | # Data Loading 46 | ################################################################## 47 | 48 | def sim_portfolio(weights, dd_returns): 49 | """_summary_ 50 | 51 | Args: 52 | weights (List): Weights for each of the stock 53 | 54 | Returns: 55 | Float: Returns the risk at 95% percentile 56 | """ 57 | try: 58 | tmp_pp = (weights * dd_returns.values).sum(axis=1) 59 | var_sim_port = np.percentile(tmp_pp, 5, interpolation = 'lower') 60 | return var_sim_port 61 | except Exception as e: 62 | print(f'An exception occurred while executing sim_portfolio: {e}') 63 | 64 | 65 | def sim_bootstrap(dd_returns, company): 66 | """_summary_ 67 | 68 | Args: 69 | dd_returns (Dataframe): the dataframe which has stock returns. 70 | company (List): List of stocks 71 | 72 | Returns: 73 | int: returns various metrics 74 | """ 75 | 76 | try: 77 | 78 | port_returns = [] 79 | port_volatility = [] 80 | port_weights = [] 81 | 82 | num_assets = len(company) 83 | num_portfolios = 100 84 | np.random.seed(1357) 85 | for port in range(num_portfolios): 86 | weights = np.random.random(num_assets) 87 | weights = weights/sum(weights) 88 | port_weights.append(weights) 89 | df_wts_returns = dd_returns.mean().dot(weights) 90 | port_returns.append(df_wts_returns*100) 91 | 92 | var_port_95 = sim_portfolio(weights, dd_returns) 93 | port_volatility.append(var_port_95) 94 | 95 | 96 | port_weights = [wt for wt in port_weights] 97 | dff = {'Returns': port_returns, 'Risk': port_volatility, 'Weights': port_weights} 98 | df_risk = pd.DataFrame(dff) 99 | 100 | min_risk = df_risk.iloc[df_risk['Risk'].idxmax()] 101 | 102 | # low_risk_return = f'{round(abs(min_risk[0]),4)*100:.2f}' 103 | # low_risk_volatility = f'{round(abs(min_risk[1]),4)*100:.2f}' 104 | low_risk_return = f'{round((min_risk[0]),4)*100:.2f}' 105 | low_risk_volatility = f'{round((min_risk[1]),4)*100:.2f}' 106 | low_risk_wts = min_risk[2] 107 | 108 | print(f'{low_risk_volatility} and {low_risk_return}') 109 | 110 | 111 | max_risk = df_risk.iloc[df_risk['Risk'].idxmin()] 112 | # high_risk_return = f'{round(abs(max_risk[0]),4)*100:.2f}' 113 | # high_risk_volatility = f'{round(abs(max_risk[1]),4)*100:.2f}' 114 | high_risk_return = f'{round((max_risk[0]),4)*100:.2f}' 115 | high_risk_volatility = f'{round((max_risk[1]),4)*100:.2f}' 116 | high_risk_wts = max_risk[2] 117 | 118 | print(f'{high_risk_volatility} and {high_risk_return}') 119 | 120 | return low_risk_wts, high_risk_wts, low_risk_return,low_risk_volatility, high_risk_return, high_risk_volatility, df_risk 121 | 122 | except Exception as e: 123 | print(f'An exception occurred while executing sim_bootstrap: {e}') -------------------------------------------------------------------------------- /Portfolio Simulator Using Python/requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4==4.11.2 2 | certifi==2022.12.7 3 | charset-normalizer==3.1.0 4 | click==8.1.3 5 | colorama==0.4.6 6 | dash==2.8.1 7 | dash-bootstrap-components==1.4.0 8 | dash-core-components==2.0.0 9 | dash-daq==0.5.0 10 | dash-html-components==2.0.0 11 | dash-table==5.0.0 12 | Flask==2.2.3 13 | idna==3.4 14 | importlib-metadata==6.0.0 15 | itsdangerous==2.1.2 16 | Jinja2==3.1.2 17 | lxml==4.9.2 18 | MarkupSafe==2.1.2 19 | nsepy==0.8 20 | numpy==1.24.2 21 | pandas==1.5.3 22 | plotly==5.13.1 23 | python-dateutil==2.8.2 24 | pytz==2022.7.1 25 | requests==2.28.2 26 | six==1.16.0 27 | soupsieve==2.4 28 | tenacity==8.2.2 29 | urllib3==1.26.15 30 | Werkzeug==2.2.3 31 | zipp==3.15.0 32 | -------------------------------------------------------------------------------- /Price Optimization/Data/price.csv: -------------------------------------------------------------------------------- 1 | Year,Quarter,Quantity,Price 2 | 1977,1,22.9976,142.1667 3 | 1977,2,22.6131,143.9333 4 | 1977,3,23.4054,146.5 5 | 1977,4,22.7401,150.8 6 | 1978,1,22.0441,160 7 | 1978,2,21.7602,182.5333 8 | 1978,3,21.6064,186.2 9 | 1978,4,21.8814,186.4333 10 | 1979,1,20.5086,211.7 11 | 1979,2,19.0408,231.5 12 | 1979,3,19.1457,222.7 13 | 1979,4,19.3989,223.8333 14 | 1980,1,18.947,231.1667 15 | 1980,2,18.898,227.5 16 | 1980,3,19.2127,237.5333 17 | 1980,4,19.4988,238.1667 18 | 1981,1,19.3429,233.5 19 | 1981,2,18.9816,230.7 20 | 1981,3,19.6459,239.0333 21 | 1981,4,19.2868,235.4333 22 | 1982,1,18.8193,233.3 23 | 1982,2,18.7968,242.9667 24 | 1982,3,19.9882,244.0333 25 | 1982,4,19.4242,233.1333 26 | 1983,1,19.2252,233.8667 27 | 1983,2,19.3361,240.9333 28 | 1983,3,20.4952,234.3667 29 | 1983,4,19.5995,227.1667 30 | 1984,1,19.3191,238.4667 31 | 1984,2,19.3744,238 32 | 1984,3,19.9738,232.2 33 | 1984,4,19.7597,233.2333 34 | 1985,1,19.0586,234.9 35 | 1985,2,20.0757,230.4333 36 | 1985,3,20.8175,222.7333 37 | 1985,4,19.2488,226.4333 38 | 1986,1,19.0212,229.2667 39 | 1986,2,20.1607,222.9 40 | 1986,3,20.7108,225.6333 41 | 1986,4,18.9416,229.3 42 | 1987,1,18.2746,230.6 43 | 1987,2,18.5266,239.1 44 | 1987,3,19.175,242.1667 45 | 1987,4,17.8807,241.6667 46 | 1988,1,18.1678,241.7333 47 | 1988,2,18.4639,250.1 48 | 1988,3,18.7622,254.5333 49 | 1988,4,17.2552,255 50 | 1989,1,16.9765,260.7 51 | 1989,2,17.5192,266.9667 52 | 1989,3,17.5975,268.0333 53 | 1989,4,17.2416,266.9333 54 | 1990,1,16.5516,272.6333 55 | 1990,2,17.3763,281.2 56 | 1990,3,17.4207,280.3667 57 | 1990,4,16.4315,289.8667 58 | 1991,1,15.9842,294.2667 59 | 1991,2,17.0449,295.2 60 | 1991,3,17.5591,284.6333 61 | 1991,4,16.2093,279.2 62 | 1992,1,16.3952,282.2667 63 | 1992,2,16.9538,286.8333 64 | 1992,3,17.227,282.6667 65 | 1992,4,15.9093,286.6667 66 | 1993,1,15.8915,292.1333 67 | 1993,2,16.2058,300.4 68 | 1993,3,17.043,292 69 | 1993,4,15.9513,289.2333 70 | 1994,1,16.2494,286.6667 71 | 1994,2,16.8868,286.1667 72 | 1994,3,17.3775,279.5 73 | 1994,4,16.4723,279.1667 74 | 1995,1,14.3097,283.8667 75 | 1995,2,15.0895,283.1 76 | 1995,3,15.647,285.1 77 | 1995,4,14.3476,285.2667 78 | 1996,1,15.0783,278.7 79 | 1996,2,15.6763,277.4 80 | 1996,3,15.1606,279.8 81 | 1996,4,14.2764,285.0333 82 | 1997,1,14.2921,278.8 83 | 1997,2,15.2994,278.9667 84 | 1997,3,15.0513,281.0667 85 | 1997,4,14.2354,279.3 86 | 1998,1,14.6884,273.4667 87 | 1998,2,15.1985,278.1 88 | 1998,3,15.5085,277.3667 89 | 1998,4,14.6475,279.5333 90 | 1999,1,14.6785,278 91 | 1999,2,15.7635,284.7667 92 | 1999,3,15.6689,289.2333 93 | -------------------------------------------------------------------------------- /Price Optimization/Home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Price Optimization/Home.png -------------------------------------------------------------------------------- /Price Optimization/Python/optimize_price.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from pandas import DataFrame 4 | import matplotlib.pyplot as plt 5 | import seaborn as sns 6 | from statsmodels.formula.api import ols 7 | import plotly.express as px 8 | import plotly.graph_objects as go 9 | 10 | # Load the data 11 | df = pd.read_csv('Data/price.csv') 12 | 13 | def fun_optimize(var_opt, var_range, var_cost, df): 14 | """[summary] 15 | 16 | Args: 17 | var_opt ([string]): [The value will be either price or quantity based on the selection made from UI] 18 | var_range ([int]): [The value will be maximum & minimum price based on selection made from range slider from UI] 19 | var_cost ([type]): [This is the fixed cost entered from UI] 20 | df ([type]): [The data set for our usecase] 21 | 22 | Returns: 23 | [list]: [Returns a dataframe for table, 24 | chart for Price Vs Quantity, 25 | chart for optimized price set for maximum revenue, 26 | Optimized value of revenue] 27 | """ 28 | 29 | fig_PriceVsQuantity = px.scatter( 30 | df, x="Price", y="Quantity", color="Year", trendline="ols") 31 | 32 | # fit OLS model 33 | model = ols("Quantity ~ Price", data=df).fit() 34 | 35 | Price = list(range(var_range[0], var_range[1], 10)) 36 | cost = int(var_cost) 37 | quantity = [] 38 | Revenue = [] 39 | for i in Price: 40 | demand = model.params[0] + (model.params[1] * i) 41 | quantity.append(demand) 42 | Revenue.append((i-cost) * demand) 43 | 44 | profit = pd.DataFrame( 45 | {"Price": Price, "Revenue": Revenue, "Quantity": quantity}) 46 | 47 | max_val = profit.loc[(profit['Revenue'] == profit['Revenue'].max())] 48 | 49 | fig_PriceVsRevenue = go.Figure() 50 | fig_PriceVsRevenue.add_trace(go.Scatter( 51 | x=profit['Price'], y=profit['Revenue'])) 52 | fig_PriceVsRevenue.add_annotation(x=int(max_val['Price']), y=int(max_val['Revenue']), 53 | text="Maximum Revenue", 54 | showarrow=True, 55 | arrowhead=1) 56 | 57 | fig_PriceVsRevenue.update_layout( 58 | showlegend=False, 59 | xaxis_title="Price", 60 | yaxis_title="Revenue") 61 | 62 | fig_PriceVsRevenue.add_vline(x=int(max_val['Price']), line_width=2, line_dash="dash", 63 | line_color="red", opacity=0.25) 64 | 65 | # Identify the optimal price at which the revenue is maximum 66 | # profit[profit['Revenue'] == profit['Revenue'].max()] 67 | # pd.set_option('display.max_rows', profit.shape[0]+1) 68 | # profit.style.highlight_max(color = 'blue', axis = None) 69 | 70 | return [profit, fig_PriceVsRevenue, fig_PriceVsQuantity, round(max_val['Price'].values[0],2),round(max_val['Revenue'].values[0],3)] 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /Price Optimization/Python/optimize_quantity.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from pandas import DataFrame 4 | import matplotlib.pyplot as plt 5 | import seaborn as sns 6 | from statsmodels.formula.api import ols 7 | import plotly.express as px 8 | import plotly.graph_objects as go 9 | 10 | 11 | def fun_optimize(var_opt, var_range, var_cost, df): 12 | """[summary] 13 | 14 | Args: 15 | var_opt ([string]): [The value will be either price or quantity based on the selection made from UI] 16 | var_range ([int]): [The value will be maximum & minimum quantity based on selection made from range slider from UI] 17 | var_cost ([type]): [This is the fixed cost entered from UI] 18 | df ([type]): [The data set for our usecase] 19 | 20 | Returns: 21 | [list]: [Returns a dataframe for table, 22 | chart for Price Vs Quantity, 23 | chart for optimized quantity set for maximum revenue, 24 | Optimized value of revenue] 25 | """ 26 | 27 | fig_PriceVsQuantity = px.scatter( 28 | df, x="Price", y="Quantity", color="Year", trendline="ols") 29 | 30 | # fit OLS model 31 | model = ols("Price ~ Quantity ", data=df).fit() 32 | 33 | Quantity = list(range(var_range[0], var_range[1], 10)) 34 | cost = int(var_cost) 35 | Price = [] 36 | Revenue = [] 37 | for i in Quantity: 38 | demand = model.params[0] + (model.params[1] * i) 39 | Price.append(demand) 40 | Revenue.append((i) * (demand - cost)) 41 | 42 | # create data frame of price and revenue 43 | profit = pd.DataFrame( 44 | {"Price": Price, "Quantity": Quantity, "Revenue": Revenue}) 45 | 46 | max_val = profit.loc[(profit['Revenue'] == profit['Revenue'].max())] 47 | 48 | 49 | fig_QuantityVsRevenue = go.Figure() 50 | fig_QuantityVsRevenue.add_trace(go.Scatter( 51 | x=profit['Quantity'], y=profit['Revenue'])) 52 | fig_QuantityVsRevenue.add_annotation(x=int(max_val['Quantity']), y=int(max_val['Revenue']), 53 | text="Maximum Revenue", 54 | showarrow=True, 55 | arrowhead=1) 56 | 57 | fig_QuantityVsRevenue.update_layout( 58 | showlegend=False, 59 | xaxis_title="Quantity", 60 | yaxis_title="Revenue") 61 | 62 | fig_QuantityVsRevenue.add_vline(x=int(max_val['Quantity']), line_width=2, line_dash="dash", 63 | line_color="red", opacity=0.25) 64 | 65 | return [profit, fig_QuantityVsRevenue, fig_PriceVsQuantity, round(max_val['Quantity'].values[0],2), round(max_val['Revenue'].values[0],3)] 66 | -------------------------------------------------------------------------------- /Price Optimization/README.md: -------------------------------------------------------------------------------- 1 | # Product Price Optimization App build with Python & Dash 2 | This is a simulation app which helps businesses to quickly identify the optimal price or a quantity of a given product so that revenue is maximized. The user can set constraints, add additional inputs eg: current cost of production etc on the UI and instantly visualize the trends. The App not only recommends the optimized value but also shows various possible values both visually and also in the tabular form for ease of analysis. 3 | 4 | ## [Read more on my blog](https://www.analyticsvidhya.com/blog/2021/08/build-a-price-recommender-app-with-python/) 5 | 6 | ## Here is the preview of the app:
7 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/75accd84f0a89fadfb42cc62484a78d68c51ed6d/Price%20Optimization/Home.png) 8 | 9 | 10 | -------------------------------------------------------------------------------- /Price Optimization/assets/custom.css: -------------------------------------------------------------------------------- 1 | /*Fonts ––––––––––––––––––––––––––––––––––––––––––––––––––*/ 2 | @import url('https://fonts.googleapis.com/css?family=Roboto&display=swap'); 3 | 4 | body{ 5 | margin: 0px; 6 | padding: 0px; 7 | background-color: #fcfcfc; 8 | font-family: 'Roboto'; 9 | color: #203cb3; 10 | } 11 | 12 | .study-browser-banner { 13 | background: rgb(2,21,70); 14 | color: white; 15 | padding: 12px; 16 | padding-left: 2px; 17 | } 18 | 19 | .div-logo{ 20 | display: inline-block; 21 | float: right; 22 | } 23 | 24 | .logo{ 25 | height: 35px; 26 | padding: 6px; 27 | margin-top: 3px; 28 | } 29 | 30 | .h2-title, .h2-title-mobile{ 31 | font-family: 'Roboto'; 32 | display: inline-block; 33 | letter-spacing: 3.8px; 34 | font-weight: 800; 35 | font-size: 20px; 36 | } 37 | 38 | .h2-title-mobile{ 39 | display: none; 40 | } 41 | 42 | h5, h6{ 43 | font-family: 'Roboto'; 44 | font-weight: 600; 45 | font-size: 16px; 46 | } 47 | 48 | h5{ 49 | padding-left: 42px; 50 | } 51 | 52 | .alert { 53 | padding: 20px; 54 | background-color: #f44336; 55 | color: white; 56 | } 57 | 58 | .bg-white{ 59 | background-color: #fcfcfc; 60 | padding: 24px 32px; 61 | height: 400px; 62 | } 63 | 64 | .app-body{ 65 | margin-left: 4%; 66 | } 67 | 68 | .card{ 69 | padding:48px 24px 48px 24px; 70 | margin-left: 4%; 71 | } 72 | 73 | .card-left{ 74 | padding:48px 24px 48px 24px; 75 | margin-left: 0px; 76 | } 77 | 78 | .padding-top-bot{ 79 | padding-top: 2px; 80 | padding-bottom: 2px; 81 | } 82 | 83 | .upload{ 84 | width: 100%; 85 | line-height: 60px; 86 | border-width: 1px; 87 | border-style: dashed; 88 | border-radius: 5px; 89 | text-align: center; 90 | } 91 | 92 | .upload p, .upload a { 93 | display: inline; 94 | } 95 | 96 | .Select-control{ 97 | border: 1px solid #203cb3; 98 | } 99 | 100 | @media only screen and (max-width: 320px){ 101 | .Select-menu-outer, .Select-value{ 102 | font-size: 10.5px; 103 | } 104 | .upload{ 105 | padding: 5px; 106 | } 107 | } 108 | 109 | /* mobile */ 110 | @media only screen and (max-width: 768px) { 111 | .upload{ 112 | line-height: 60px; 113 | border-width: 1px; 114 | border-style: dashed; 115 | border-radius: 5px; 116 | text-align: center; 117 | font-size: small; 118 | } 119 | 120 | .columns{ 121 | width: 100%; 122 | } 123 | 124 | .card, .card-left{ 125 | padding: 24px; 126 | margin: 0px; 127 | } 128 | 129 | .bg-white{ 130 | height: auto; 131 | } 132 | 133 | .study-browser-banner{ 134 | padding-left: 24px; 135 | } 136 | 137 | .logo{ 138 | height: 28px; 139 | padding-left:0px; 140 | padding-bottom:0px; 141 | } 142 | 143 | .div-logo{ 144 | float: left; 145 | display: block; 146 | width: 100%; 147 | } 148 | 149 | .h2-title{ 150 | display:none; 151 | } 152 | 153 | .h2-title-mobile{ 154 | display:block; 155 | float:left; 156 | } 157 | 158 | .app-body{ 159 | margin-left: 0px; 160 | } 161 | 162 | .four.columns { width: 100%; } 163 | .eight.columns { width: 100%; } 164 | 165 | .columns{ 166 | text-align: center; 167 | } 168 | 169 | .user-control{ 170 | padding-top: 24px; 171 | padding-bottom: 24px; 172 | 173 | 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /Price Optimization/assets/dash-logo-new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Price Optimization/assets/dash-logo-new.png -------------------------------------------------------------------------------- /Price Optimization/assets/resizing_script.js: -------------------------------------------------------------------------------- 1 | if (!window.dash_clientside) { 2 | window.dash_clientside = {}; 3 | } 4 | window.dash_clientside.clientside = { 5 | resize: function(value) { 6 | console.log("resizing..."); // for testing 7 | setTimeout(function() { 8 | window.dispatchEvent(new Event("resize")); 9 | console.log("fired resize"); 10 | }, 500); 11 | return null; 12 | } 13 | }; 14 | -------------------------------------------------------------------------------- /Price Optimization/assets/styles.css: -------------------------------------------------------------------------------- 1 | .js-plotly-plot .plotly .modebar { 2 | padding-top: 5%; 3 | margin-right: 3.5%; 4 | } 5 | 6 | body { 7 | background-color: #f2f2f2; 8 | margin: 5%; 9 | } 10 | 11 | .two.columns { 12 | width: 16.25%; 13 | } 14 | 15 | .column, 16 | .columns { 17 | margin-left: 0.5%; 18 | } 19 | 20 | .pretty_container { 21 | border-radius: 2px; 22 | background-color: #f9f9f9; 23 | margin: 10px; 24 | padding: 2px; 25 | position: relative; 26 | box-shadow: 2px 2px 2px lightgrey; 27 | } 28 | 29 | .bare_container { 30 | margin: 0 0 0 0; 31 | padding: 0 0 0 0; 32 | } 33 | 34 | .dcc_control { 35 | margin: 0; 36 | padding: 5px; 37 | width: calc(100%-40px); 38 | } 39 | 40 | .control_label { 41 | margin: 0; 42 | padding: 10px; 43 | padding-bottom: 0px; 44 | margin-bottom: 0px; 45 | width: calc(100%-40px); 46 | } 47 | 48 | .rc-slider { 49 | margin-left: 0px; 50 | padding-left: 0px; 51 | } 52 | 53 | .flex-display { 54 | display: flex; 55 | } 56 | 57 | .container-display { 58 | display: flex; 59 | } 60 | 61 | #individual_graph, 62 | #aggregate_graph { 63 | width: calc(100% - 30px); 64 | position: absolute; 65 | } 66 | 67 | #count_graph { 68 | position: absolute; 69 | height: calc(100% - 30px); 70 | width: calc(100% - 30px); 71 | } 72 | 73 | #countGraphContainer { 74 | flex: 5; 75 | position: relative; 76 | } 77 | 78 | #header { 79 | align-items: center; 80 | } 81 | 82 | #learn-more-button { 83 | text-align: center; 84 | height: 100%; 85 | padding: 0 20px; 86 | text-transform: none; 87 | font-size: 15px; 88 | float: right; 89 | margin-right: 10px; 90 | margin-top: 30px; 91 | } 92 | #title { 93 | text-align: center; 94 | } 95 | 96 | .mini_container { 97 | border-radius: 5px; 98 | background-color:#e4e7e7; 99 | margin: 10px; 100 | padding: 15px; 101 | position: relative; 102 | box-shadow: 2px 2px 2px lightgrey; 103 | } 104 | 105 | #right-column { 106 | display: flex; 107 | flex-direction: column; 108 | } 109 | 110 | #wells { 111 | flex: 1; 112 | } 113 | 114 | #gas { 115 | flex: 1; 116 | } 117 | 118 | #aggregate_data { 119 | align-items: center; 120 | } 121 | 122 | #oil { 123 | flex: 1; 124 | } 125 | 126 | #water { 127 | flex: 1; 128 | } 129 | 130 | #tripleContainer { 131 | display: flex; 132 | flex: 3; 133 | } 134 | 135 | #mainContainer { 136 | display: flex; 137 | flex-direction: column; 138 | } 139 | 140 | #pie_graph > div > div > svg:nth-child(3) > g.infolayer > g.legend { 141 | pointer-events: all; 142 | transform: translate(30px, 349px); 143 | } 144 | -------------------------------------------------------------------------------- /Price Optimization/requirements.txt: -------------------------------------------------------------------------------- 1 | # local package install 2 | -e . 3 | 4 | # third party packages 5 | dash 6 | pandas 7 | numpy 8 | dash_table 9 | logging 10 | plotly.graph_objs 11 | plotly.express 12 | dash_core_components 13 | dash_html_components 14 | dash_bootstrap_components 15 | dash.dependencies 16 | Python.optimize_price 17 | Python.optimize_quantity 18 | dash_daq as daq 19 | matplotlib.pyplot 20 | seaborn 21 | statsmodels.formula.api 22 | -------------------------------------------------------------------------------- /Project Management With R Shiny/Global.R: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # LOAD PACKAGES AND MODULES # 3 | ############################################################################### 4 | 5 | library(shiny) 6 | library(shinydashboard) 7 | library(dplyr) 8 | library(leaflet) 9 | library(ggplot2) 10 | library(tidyverse) 11 | library(DT) 12 | library(plotly) 13 | library(purrr) 14 | library(glue) 15 | library(rhandsontable) 16 | library(tidyr) 17 | library(shinyalert) 18 | library(shinyjs) 19 | library(supercaliheatmapwidget) 20 | library(lubridate) 21 | library(vistime) 22 | library(timevis) 23 | library(rmarkdown) 24 | 25 | 26 | 27 | ############################################################################### 28 | # LOAD DATA # 29 | ############################################################################### 30 | 31 | 32 | # raw_data_tasks <- read.csv("./Data/task_tracker.csv") 33 | #raw_data_projects <- read.csv("./Data/project_tracker.csv") 34 | # raw_data_team <- read.csv("./Data/team_tracker.csv") 35 | # raw_data_time <- read.csv("./Data/time_tracker.csv") 36 | # raw_data_time$day <- dmy(raw_data_time$day) 37 | 38 | 39 | #saveRDS(raw_data_projects,"Projects.rds") 40 | 41 | 42 | raw_data_projects <- readRDS("Projects.rds") 43 | raw_data_tasks <- readRDS("Tasks.rds") 44 | 45 | -------------------------------------------------------------------------------- /Project Management With R Shiny/Images/DIY.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Images/DIY.gif -------------------------------------------------------------------------------- /Project Management With R Shiny/Images/Download.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Images/Download.gif -------------------------------------------------------------------------------- /Project Management With R Shiny/Images/Projects.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Images/Projects.gif -------------------------------------------------------------------------------- /Project Management With R Shiny/Images/Tasks.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Images/Tasks.gif -------------------------------------------------------------------------------- /Project Management With R Shiny/Projects.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Projects.rds -------------------------------------------------------------------------------- /Project Management With R Shiny/README.md: -------------------------------------------------------------------------------- 1 | # ProjectManagement 2 | The application can be used for creation of projects and related tasks. The user will be able to carry out all the basic CRUD operations on the data and save the changes. 3 | 4 | # [Read more about this on my blog](https://www.analyticsvidhya.com/blog/2021/05/project-task-management-with-r-shiny/) 5 | 6 | 7 | Home page --> Showcases some of the metrics related to projects and tasks which are dynamically updated based on the changes that are made to the data. 8 | 9 | Projects & Tasks --> View all the projects and tasks on the same screen. You can add/delete/edit/update the data in the table in the same way you use excel sheets. 10 | ![alt text](https://github.com/amitvkulkarni/ProjectManagement/blob/master/Images/Tasks.gif) 11 | ![alt text](https://github.com/amitvkulkarni/ProjectManagement/blob/master/Images/Projects.gif) 12 | 13 | Do it Yourself --> The user will be able to select esither a projects or Tasks and carry out piviot operations on the data for data analysis. The user can generate the charts and also color code/ heatmap the data on the pivot. 14 | 15 | ![alt text](https://github.com/amitvkulkarni/ProjectManagement/blob/master/Images/DIY.gif) 16 | 17 | Help --> Allows users to take a quick look at various features of the application. 18 | 19 | Download reports --> Choose either a PDF or WORD document format and download the report. 20 | ![alt text](https://github.com/amitvkulkarni/ProjectManagement/blob/master/Images/Download.gif) 21 | -------------------------------------------------------------------------------- /Project Management With R Shiny/Tasks.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Project Management With R Shiny/Tasks.rds -------------------------------------------------------------------------------- /Project Management With R Shiny/report.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Project Management" 3 | date: "`r Sys.Date()`" 4 | output: 5 | html_document: 6 | fig_caption: yes 7 | fig_height: 6 8 | fig_width: 7 9 | highlight: tango 10 | number_sections: yes 11 | theme: cerulean 12 | toc: yes 13 | toc_depth: 2 14 | toc_float: yes 15 | pdf_document: default 16 | word_document: 17 | toc: yes 18 | toc_depth: '2' 19 | params: 20 | n: NA 21 | --- 22 | 23 | 24 | ```{r setup, include=FALSE, message=FALSE} 25 | knitr::opts_chunk$set(echo = FALSE) 26 | ``` 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | # **project share of research area pie chart** 63 | 64 | 65 | 66 | ```{r message=FALSE, warning=FALSE} 67 | 68 | ##---------------Home page / project share of research area pie chart------------------------------------------ 69 | 70 | df_ProjectType <- raw_data_projects %>% 71 | group_by(PROJECT.TYPE) %>% 72 | summarise("#Projects" = n()) 73 | 74 | fig <- plot_ly(type='pie', labels=df_ProjectType$PROJECT.TYPE, values=df_ProjectType$`#Projects`, 75 | textinfo='label+percent',insidetextorientation='radial') 76 | 77 | fig <- fig %>% layout(legend = list(orientation = 'h')) 78 | fig 79 | 80 | 81 | 82 | ``` 83 | 84 | # **Project status chart** 85 | ```{r message=FALSE, warning=FALSE} 86 | 87 | ##---------------Home page / Project status chart-------------------------------------------------------------- 88 | 89 | ggplot(raw_data_projects) + 90 | aes(x = PROJECT.TYPE, fill = PROJECT.STATUS) + 91 | geom_bar(position = "dodge") + 92 | scale_fill_hue() + 93 | coord_flip() + 94 | theme_bw() + 95 | theme(legend.position = "none") + 96 | facet_wrap(vars(PROJECT.STATUS)) 97 | 98 | 99 | 100 | ``` 101 | 102 | # **project complexity donut chart** 103 | ```{r message=FALSE, warning=FALSE} 104 | 105 | ##---------------Home page / project complexity donut chart------------------------------------------------- 106 | p1 <- raw_data_projects %>% 107 | group_by(COMPLEXITY) %>% 108 | summarise("Projects" = n()) 109 | 110 | fig <- p1 %>% plot_ly(labels = ~COMPLEXITY, values = ~Projects) 111 | fig <- fig %>% add_pie(hole = 0.6) 112 | fig <- fig %>% layout(showlegend = T,xaxis = list(showgrid = T),yaxis = list(showgrid = T)) 113 | fig 114 | 115 | ``` 116 | 117 | # **Overview of projects** 118 | ```{r message=FALSE, warning=FALSE} 119 | 120 | ##---------------Home page / Overview of projects------------------------------------------------- 121 | 122 | df_tmp_projects <-raw_data_projects 123 | 124 | df_tmp_projects[,-1] 125 | 126 | 127 | ``` 128 | # **List of upcoming tasks for delivery** 129 | 130 | ```{r message=FALSE, warning=FALSE} 131 | 132 | df_tmp_upcoming <- raw_data_tasks 133 | 134 | df_tmp_upcoming$END.DATE <- dmy(df_tmp_upcoming$END.DATE) 135 | df_tmp_upcoming$START.DATE <- dmy(df_tmp_upcoming$START.DATE) 136 | 137 | df_tmp_upcoming <- df_tmp_upcoming%>% 138 | filter((as.Date(END.DATE) > today()) & (STATUS != "Completed")) 139 | 140 | 141 | df_tmp_upcoming[,c("PROJECT.NAME", "TASK.NAME", "START.DATE","END.DATE", "STATUS")] 142 | 143 | ``` 144 | 145 | 146 | # **List of overdew tasks** 147 | ```{r message=FALSE, warning=FALSE} 148 | 149 | df_tmp_overdue <- raw_data_tasks 150 | df_tmp_overdue$END.DATE <- dmy(df_tmp_overdue$END.DATE) 151 | df_tmp_overdue$START.DATE <- dmy(df_tmp_overdue$START.DATE) 152 | 153 | df_tmp_overdue <- df_tmp_overdue %>% 154 | filter(as.Date(END.DATE) < today() & STATUS != "Completed") 155 | 156 | 157 | df_tmp_overdue[,c("PROJECT.NAME", "TASK.NAME", "START.DATE","END.DATE", "STATUS") ] 158 | 159 | 160 | 161 | ``` 162 | 163 | # **Status of audit for each of the projects** 164 | 165 | 166 | ```{r message=FALSE, warning=FALSE} 167 | 168 | p1 <- raw_data_projects %>% 169 | group_by(AUDIT) %>% 170 | summarise("count" = n()) 171 | 172 | fig <- p1 %>% plot_ly(labels = ~AUDIT, values = ~count) 173 | fig <- fig %>% add_pie(hole = 0.6) 174 | fig <- fig %>% layout(showlegend = T,xaxis = list(showgrid = T),yaxis = list(showgrid = T)) 175 | fig 176 | 177 | ``` 178 | 179 | # **List of all the tasks for each of the projects** 180 | 181 | ```{r message=FALSE, warning=FALSE} 182 | 183 | 184 | df_proj_task <- raw_data_tasks %>% 185 | group_by(PROJECT.NAME, TASK.NAME) %>% 186 | summarise("cnt" = n()) %>% 187 | arrange(desc(cnt)) 188 | 189 | 190 | ggplot(df_proj_task) + 191 | aes(x = PROJECT.NAME) + 192 | geom_bar(fill = "#6baed6") + 193 | labs(y = "Number of Tasks") + 194 | coord_flip() + 195 | theme_bw() 196 | 197 | ``` 198 | 199 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Build A Personalized VaR Simulator App Using Python:
3 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/1c78a4f77e0d690ec505423cd4d4d09d29f82bb3/Portfolio%20Simulator%20Using%20Python/Preview.gif) 4 | 5 | ## Customer Loyalty Program App:
6 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/2437b0c2a49ccad29af4db634846238d480ea1f1/Customer%20Loyalty%20Program/Images/Home1.PNG) 7 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/2437b0c2a49ccad29af4db634846238d480ea1f1/Customer%20Loyalty%20Program/Images/Home2.PNG) 8 | 9 | ## Python App to Measure Customer Lifetime Value (CLV) 10 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/c48a6964033d92d1e7f31adf80bb08869c446af/Customer%20Lifetime%20Value/Home.png) 11 | 12 | ## Product Price Optimization App build with Python & Dash:
13 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/75accd84f0a89fadfb42cc62484a78d68c51ed6d/Price%20Optimization/Home.png) 14 | 15 | ## Describe to Draw: Building AI-Powered Python App For Generating Flowcharts:
16 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/e43212c4ece8892dbd8d1c2d0b751f586ba94e7b/Generative%20AI/AI-Flowcharts/ai-workflow.gif) 17 | 18 | ## Shiny App Los Angeles Crash Analysis:

19 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Shiny%20App%20Los%20Angeles%20Crash%20Analysis/App%20Preview.gif) 20 | 21 | ## Monte Carlo Simulation for Pipe Failure:
22 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Monte%20Carlo%20Simulation%20for%20Pipe%20Failure/assets/home.png) 23 | 24 | ## Classification Model Simulator:
25 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Classification%20Model%20Simulator%20with%20Plotly-Dash/home.png) 26 | 27 | 28 | -------------------------------------------------------------------------------- /Shiny App Los Angeles Crash Analysis/App Preview.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitvkulkarni/Data-Apps/c7c9e14d98d40e7fae2acde475952a7a96029f8f/Shiny App Los Angeles Crash Analysis/App Preview.gif -------------------------------------------------------------------------------- /Shiny App Los Angeles Crash Analysis/Global.R: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # LOAD PACKAGES AND MODULES # 3 | ############################################################################### 4 | 5 | library(shiny) 6 | library(shinydashboard) 7 | library(dplyr) 8 | library(ggplot2) 9 | library(tidyverse) 10 | library(shinycssloaders) 11 | library(shinythemes) 12 | library(lubridate) 13 | library(shinyWidgets) 14 | 15 | 16 | 17 | 18 | ############################################################################### 19 | # LOAD DATA # 20 | ############################################################################### 21 | 22 | # Download data set from the site -- https://hub.arcgis.com/datasets/66d96f15d4e14e039caa6134e6eab8e5_0 23 | 24 | #crash <- read.csv("Los_Angeles_Collisions_2012through2018.csv",stringsAsFactors = FALSE) 25 | 26 | crash$ymd <- crash$collision_date 27 | crash$month <- month(crash$ymd, label = TRUE) 28 | crash$year <- crash$accident_year 29 | crash$wday <- wday(crash$ymd, label = TRUE) 30 | crash$hour <- crash$Hours 31 | 32 | 33 | crash$day_of_week[crash$day_of_week == "1"] <- "Monday" 34 | crash$day_of_week[crash$day_of_week == "2"] <- "Tuesday" 35 | crash$day_of_week[crash$day_of_week == "3"] <- "Wednesday" 36 | crash$day_of_week[crash$day_of_week == "4"] <- "Thursday" 37 | crash$day_of_week[crash$day_of_week == "5"] <- "Friday" 38 | crash$day_of_week[crash$day_of_week == "6"] <- "Saturday" 39 | crash$day_of_week[crash$day_of_week == "7"] <- "Sunday" 40 | 41 | 42 | crash$alcohol_involved[crash$alcohol_involved == "Y"] <- TRUE 43 | crash$alcohol_involved[crash$alcohol_involved == "N" |crash$alcohol_involved == "" ] <- FALSE 44 | 45 | crash$intersection[crash$intersection == "Y"] <- TRUE 46 | crash$intersection[crash$intersection == "N" | crash$intersection == ""] <- FALSE 47 | 48 | crash$pedestrian_accident[crash$pedestrian_accident == "Y"] <- TRUE 49 | crash$pedestrian_accident[crash$pedestrian_accident == "N"] <- FALSE 50 | 51 | crash$bicycle_accident[crash$bicycle_accident == "Y"] <- TRUE 52 | #crash$bicycle_accident[crash$bicycle_accident == "N"] <- FALSE 53 | 54 | crash$motorcycle_accident[crash$motorcycle_accident == "Y"] <- TRUE 55 | crash$motorcycle_accident[crash$motorcycle_accident == "N"] <- FALSE 56 | 57 | crash$truck_accident[crash$truck_accident == "Y"] <- TRUE 58 | #crash$truck_accident[crash$truck_accident == "N"] <- FALSE 59 | 60 | 61 | 62 | df_crash <- crash %>% 63 | group_by(accident_year, month, day_of_week, collision_severity, number_injured,number_killed, hit_and_run, pedestrian_accident,bicycle_accident, motorcycle_accident,truck_accident) %>% 64 | summarise(cnt = n()) 65 | 66 | 67 | dataMap1 <- read.csv("collapse_tree.csv") 68 | LA_crash <- data.frame(lapply(dataMap1,as.character), stringsAsFactors = FALSE ) 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /Shiny App Los Angeles Crash Analysis/README.md: -------------------------------------------------------------------------------- 1 | This is an web application built using R shiny as part of Mobility Hackathon. The application lets user visualize crash patterns by changing various factors which influenced the crash. 2 | 3 | Data: Historical crash data from the city Los Angeles.
4 | Source: Download data set from the site https://hub.arcgis.com/datasets/66d96f15d4e14e039caa6134e6eab8e5_0 5 | 6 | Preview of the app:

7 | ![alt text](https://github.com/amitvkulkarni/Data-Apps/blob/main/Shiny%20App%20Los%20Angeles%20Crash%20Analysis/App%20Preview.gif) 8 | -------------------------------------------------------------------------------- /Shiny App Los Angeles Crash Analysis/ui.R: -------------------------------------------------------------------------------- 1 | 2 | dashboardPage( 3 | 4 | dashboardHeader( 5 | title = "Los Angeles Crash Analysis" 6 | ), 7 | 8 | dashboardSidebar( 9 | sidebarMenu( 10 | menuItem("Select Data", tabName = "Summary", icon = icon("dashboard"),selectInput("selectData", label = h4("Select DataSet"),choices = list("crash"), selected = 1) 11 | ) 12 | ) 13 | 14 | 15 | ), 16 | 17 | dashboardBody( 18 | h1("CRASH ANALYSIS FOR THE CITY OF LOS ANGLES YEARS: 2006 TO 2019", style="color:#3498DB; text-align:center"), 19 | 20 | fluidRow( 21 | tabBox(id = "tabset1", width = 12, height = "1400px", 22 | 23 | 24 | tabPanel(tags$b("Crash Analysis"), 25 | 26 | box( 27 | selectInput("selectYear", label = h5(tags$b("Select Year")),choices = list("2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019"), selected = 1, multiple = TRUE), 28 | 29 | radioButtons("days", label = h5(tags$b("Weekday / Weekend")),inline = TRUE, 30 | choices = list("All" = 1, "Weekdays" = 2, "Weekend" = 3), 31 | selected = 2), 32 | 33 | radioButtons("severity", label = h5(tags$b("Severity of collision")),inline = FALSE, 34 | choices = list("All" = 0, "Fatal" = 1, "Major" = 2, "Minor" = 3, "Injuries" = 4), 35 | selected = 0), 36 | 37 | h5(tags$b("Other Factors")), 38 | checkboxInput("chkAlcohol", label = "Alcohol Involved", value = FALSE), 39 | checkboxInput("chkIntersection", label = "Intersection", value = FALSE), 40 | 41 | solidHeader = TRUE, status = "primary", width = 3, title = "Motor Vechicle Collision Analysis", collapsible = TRUE 42 | ), 43 | box( 44 | valueBoxOutput("yearSelect", width = 5), 45 | 46 | valueBoxOutput("yearMean", width = 5), 47 | 48 | 49 | collapsible = TRUE, solidHeader = TRUE, status = "primary", width = 5, title = "Motor Vechicle Collision Analysis" 50 | ), 51 | box( 52 | withSpinner(plotOutput("timeDay")), 53 | withSpinner(plotOutput("yearMonth")), 54 | collapsible = TRUE, solidHeader = TRUE, status = "primary", width = 5, title = "Year / Month / hourly trends" 55 | ), 56 | box(collapsible = TRUE, solidHeader = TRUE, status = "primary", width = 4, title = "Collision trends over the years", 57 | 58 | withSpinner(plotOutput("TrendsWeeks")), 59 | withSpinner(plotOutput("TrendsAllYears")) 60 | ) 61 | 62 | 63 | ) 64 | 65 | ) 66 | 67 | ) 68 | ) 69 | ) 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /Shopping List Optimizer/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Shopping List Optimizer/app.py: -------------------------------------------------------------------------------- 1 | import dash 2 | from dash.dependencies import Input, Output, State 3 | import dash_bootstrap_components as dbc 4 | import pandas as pd 5 | import pulp 6 | import dash_table 7 | from dash import dcc 8 | from dash import html 9 | import UI 10 | 11 | 12 | app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) 13 | 14 | server = app.server 15 | 16 | app.layout = UI.Layout 17 | 18 | shopping_list = [] 19 | 20 | 21 | @app.callback( 22 | [Output("shopping_table", "figure"), Output("total_cost_initial", "children")], 23 | [Input("add_button", "n_clicks")], 24 | [ 25 | State("product_name", "value"), 26 | State("min_qty", "value"), 27 | State("max_qty", "value"), 28 | State("unit_price", "value"), 29 | State("discount_rate", "value"), 30 | ], 31 | ) 32 | def update_shopping_list( 33 | n_clicks, product_name, min_qty, max_qty, unit_price, discount_rate 34 | ): 35 | 36 | if n_clicks is None or n_clicks == 0: 37 | return { 38 | "data": [ 39 | { 40 | "type": "table", 41 | "header": { 42 | "values": [ 43 | "No Shopping List Created. Add items to the shopping list" 44 | ] 45 | }, 46 | # "cells": {"values": [["Add items to the shopping list"]]}, 47 | } 48 | ] 49 | }, None 50 | if n_clicks: 51 | actual_cost = unit_price * max_qty 52 | discount = (unit_price * discount_rate / 100) * max_qty 53 | total_cost = actual_cost - discount 54 | 55 | shopping_list.append( 56 | { 57 | "Product Name": product_name, 58 | "Min Qty": min_qty, 59 | "Max Qty": max_qty, 60 | "Unit Price": unit_price, 61 | "Discount Rate": discount_rate, 62 | "Actual Cost": actual_cost, 63 | "Total Cost": total_cost, 64 | } 65 | ) 66 | 67 | df = pd.DataFrame(shopping_list) 68 | 69 | total_cost_initial = df["Total Cost"].sum() 70 | 71 | return { 72 | "data": [ 73 | { 74 | "type": "table", 75 | "header": {"values": list(df.columns)}, 76 | "cells": {"values": [df[col] for col in df.columns]}, 77 | } 78 | ] 79 | }, f"Total Cost (Initial): ${total_cost_initial:.2f}" 80 | 81 | 82 | @app.callback( 83 | [ 84 | Output("optimization_results", "figure"), 85 | Output("total_cost_optimized", "children"), 86 | ], 87 | [Input("optimize_button", "n_clicks")], 88 | [State("budget", "value")], 89 | ) 90 | def optimize_shopping_list(n_clicks, budget): 91 | if n_clicks is None or n_clicks == 0: 92 | return ( 93 | { 94 | "data": [ 95 | { 96 | "type": "table", 97 | "header": { 98 | "values": [ 99 | [ 100 | 'If you have added items to list then click "Optimize" to see results' 101 | ] 102 | ] 103 | }, 104 | # "cells": {"values": [['Click "Optimize" to see results']]}, 105 | } 106 | ] 107 | }, 108 | None, 109 | ) 110 | 111 | df = pd.DataFrame(shopping_list) 112 | 113 | if df.empty: 114 | return ( 115 | { 116 | "data": [ 117 | { 118 | "type": "table", 119 | "header": {"values": ["No Data"]}, 120 | "cells": {"values": [["Add items to the list first"]]}, 121 | } 122 | ] 123 | }, 124 | "Total Cost (Optimized): N/A", 125 | ) 126 | 127 | prob = pulp.LpProblem("Shopping_Optimization", pulp.LpMinimize) 128 | 129 | qty_vars = { 130 | i: pulp.LpVariable( 131 | f"qty_{i}", 132 | lowBound=row["Min Qty"], 133 | upBound=row["Max Qty"], 134 | cat="Continuous", 135 | ) 136 | for i, row in df.iterrows() 137 | } 138 | 139 | total_cost_expr = pulp.lpSum( 140 | [ 141 | qty_vars[i] * row["Unit Price"] * (1 - row["Discount Rate"] / 100) 142 | for i, row in df.iterrows() 143 | ] 144 | ) 145 | 146 | prob += total_cost_expr == budget, "Budget Constraint" 147 | 148 | dummy_var = pulp.LpVariable("dummy", 0) 149 | prob += dummy_var 150 | 151 | prob.solve() 152 | 153 | if pulp.LpStatus[prob.status] != "Optimal": 154 | return { 155 | "data": [ 156 | { 157 | "type": "table", 158 | "header": {"values": ["Optimization Error"]}, 159 | "cells": {"values": [["No optimal solution found"]]}, 160 | } 161 | ] 162 | } 163 | 164 | df["Optimized Qty"] = [qty_vars[i].varValue for i in df.index] 165 | df["Optimized Total Cost"] = round( 166 | df["Optimized Qty"] * df["Unit Price"] * (1 - df["Discount Rate"] / 100), 1 167 | ) 168 | 169 | df["Optimized Qty"] = df[["Min Qty", "Optimized Qty", "Max Qty"]].apply( 170 | lambda x: min(max(x["Optimized Qty"], x["Min Qty"]), x["Max Qty"]), axis=1 171 | ) 172 | 173 | total_cost_optimized = df["Optimized Total Cost"].sum() 174 | 175 | return ( 176 | { 177 | "data": [ 178 | { 179 | "type": "table", 180 | "header": { 181 | "values": df[ 182 | ["Product Name", "Optimized Qty", "Optimized Total Cost"] 183 | ].columns.tolist() 184 | }, 185 | "cells": { 186 | "values": [ 187 | df[col] 188 | for col in df[ 189 | [ 190 | "Product Name", 191 | "Optimized Qty", 192 | "Optimized Total Cost", 193 | ] 194 | ].columns 195 | ] 196 | }, 197 | } 198 | ] 199 | }, 200 | f"Total Cost (Optimized): ${total_cost_optimized:.2f}", 201 | ) 202 | 203 | 204 | if __name__ == "__main__": 205 | app.run_server(debug=True, use_reloader=False, dev_tools_ui=False) 206 | -------------------------------------------------------------------------------- /Shopping List Optimizer/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color:#EDEFEB; 3 | font-family: "Open Sans", verdana, arial, sans-serif; 4 | font-size: 18px; 5 | color:#000080; 6 | } 7 | 8 | .side_bar { 9 | height: 100%; 10 | width: 14%; 11 | position: fixed; 12 | top: 0; 13 | left: 0; 14 | background-color: #ADD8E6; 15 | padding-left: 1%; 16 | padding-right: 1%; 17 | color:rgb(7, 7, 7); 18 | } 19 | 20 | .main { 21 | font-size: 15px; 22 | padding: 0px 10px; 23 | padding-left: 16%; 24 | color:#2F4F4F; 25 | } 26 | 27 | .footer { 28 | position: relative; 29 | left: 0; 30 | bottom: 0; 31 | width: 100%; 32 | background-color: #4b9072a7; 33 | padding-left: 1%; 34 | } 35 | 36 | h1 { 37 | color:#000080; 38 | font-family: ui-monospace; 39 | } 40 | 41 | h3 { 42 | margin-block-start:0.3em; 43 | font-size: smaller; 44 | } 45 | 46 | h4 { 47 | margin-block-end:0.3em; 48 | font-size: smaller; 49 | } 50 | 51 | .text{ 52 | margin: 10px; 53 | padding: 25px; 54 | align-self: center; 55 | } 56 | 57 | .row{ 58 | display: flex; 59 | } 60 | 61 | /* Boxes */ 62 | 63 | .box { 64 | border-radius: 10px; 65 | background-color: #F9F9F8; 66 | margin: 10px; 67 | padding: 25px; 68 | box-shadow: 2px 2px 8px lightgrey; 69 | } 70 | 71 | .boxImage { 72 | border-radius: 10px; 73 | background-color: #FFFFFF; 74 | margin: 10px; 75 | padding: 25px; 76 | box-shadow: 2px 2px 8px lightgrey; 77 | } 78 | 79 | 80 | .box_comment { 81 | border-radius: 20px; 82 | background-color: #d4f5c29a; 83 | margin: 10px; 84 | padding: 10px; 85 | box-shadow: 2px 2px 2px lightgrey; 86 | font-size: small; 87 | text-align: center; 88 | font-family: system-ui; 89 | } 90 | 91 | .box_emissions{ 92 | border-radius: 20px; 93 | background-color: #DCDCDC; 94 | margin: 2px; 95 | box-shadow: 2px 2px 2px lightgrey; 96 | justify-content: center; 97 | width: 50%; 98 | text-align: center; 99 | } 100 | /* Drop down */ 101 | #drop_map .VirtualizedSelectFocusedOption { 102 | background-color:#ebb36abf; 103 | } 104 | 105 | #drop_continent .VirtualizedSelectFocusedOption { 106 | background-color:#ebb36abf; 107 | } 108 | 109 | /* Radio buttons */ 110 | .radio { 111 | display:flex; 112 | justify-content: left; 113 | place-content:space-around; 114 | width: 88%; 115 | } 116 | 117 | .radio input[type="radio"] { 118 | opacity: 0; 119 | position: absolute; 120 | } 121 | 122 | .radio label { 123 | display: table-caption; 124 | background-color: #F9F9F8; 125 | padding: 15px 15px; 126 | font-size: 16px; 127 | border: 2px solid #e9a8538e; 128 | border-radius: 10px; 129 | width: 200%; 130 | text-align: center; 131 | } 132 | 133 | .radio input[type="radio"]:checked + label { 134 | background-color: #e98b1044; 135 | border: 2px solid #F9F9F8; 136 | } 137 | 138 | .radio label:hover { 139 | background-color: #e1e2df; 140 | border: 2px solid #e1e2df; 141 | } 142 | 143 | /* Scrollbars */ 144 | *::-webkit-scrollbar { 145 | width: 10px; 146 | } 147 | *::-webkit-scrollbar-track { 148 | background: #EDEFEB; 149 | } 150 | *::-webkit-scrollbar-thumb { 151 | background-color:#63615fbf; 152 | border-radius: 5px; 153 | border: 3px solid #EDEFEB; 154 | } 155 | 156 | .study-browser-banner { 157 | background: rgb(2,21,70); 158 | color: white; 159 | padding: 1px; 160 | padding-left: 2px; 161 | /* text-align: center; */ 162 | } 163 | 164 | .footer-banner { 165 | background: hwb(240 82% 17%); 166 | color: white; 167 | padding: 1px; 168 | padding-left: 2px; 169 | /* height: 15px; */ 170 | /* text-align: center; */ 171 | } 172 | 173 | .div-logo{ 174 | display: inline-block; 175 | float: right; 176 | /* text-align: center; */ 177 | } 178 | 179 | .logo{ 180 | height: 75px; 181 | padding: 6px; 182 | margin-top: 3px; 183 | } 184 | 185 | .logo1{ 186 | height: 200px; 187 | padding: 6px; 188 | margin-top: 3px; 189 | } -------------------------------------------------------------------------------- /Text Analysis Using Azure AI Services/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Virtual Assistant/main.py: -------------------------------------------------------------------------------- 1 | import pyttsx3 #pip install pyttsx3 2 | import speech_recognition as sr #pip install speechRecognition 3 | import datetime 4 | import wikipedia #pip install wikipedia 5 | import webbrowser 6 | import os 7 | 8 | 9 | engine = pyttsx3.init('sapi5') 10 | voices = engine.getProperty('voices') 11 | # print(voices[1].id) 12 | engine.setProperty('voice', voices[1].id) 13 | 14 | 15 | def speak(audio): 16 | engine.say(audio) 17 | engine.runAndWait() 18 | 19 | 20 | def wishMe(): 21 | hour = int(datetime.datetime.now().hour) 22 | if hour>=0 and hour<12: 23 | speak("Good Morning!") 24 | 25 | elif hour>=12 and hour<18: 26 | speak("Good Afternoon!!") 27 | 28 | else: 29 | speak("Good Evening!!") 30 | 31 | speak("I am your digital assistant sir. Please tell me how may I help you") 32 | 33 | def takeCommand(): 34 | #It takes microphone input from the user and returns string output 35 | 36 | r = sr.Recognizer() 37 | print(r) 38 | with sr.Microphone() as source: 39 | r.adjust_for_ambient_noise(source,duration=1) 40 | print("Listening...") 41 | r.pause_threshold = 1 42 | audio = r.listen(source) 43 | 44 | try: 45 | print("Recognizing...") 46 | query = r.recognize_google(audio, language='en-in') 47 | print(f"User said: {query}\n") 48 | 49 | except Exception as e: 50 | # print(e) 51 | speak("Say that again please...") 52 | return "None" 53 | return query 54 | 55 | 56 | if __name__ == "__main__": 57 | wishMe() 58 | while True: 59 | # if 1: 60 | query = takeCommand().lower() 61 | 62 | # Logic for executing tasks based on query 63 | if 'wikipedia' in query: 64 | speak('Searching Wikipedia...') 65 | query = query.replace("wikipedia", "") 66 | results = wikipedia.summary(query, sentences=2) 67 | speak("According to Wikipedia") 68 | print(results) 69 | speak(results) 70 | 71 | elif 'open youtube' in query: 72 | webbrowser.open("youtube.com") 73 | 74 | elif 'open google' in query: 75 | webbrowser.open("google.com") 76 | 77 | elif 'open stackoverflow' in query: 78 | webbrowser.open("stackoverflow.com") 79 | 80 | elif 'the time' in query: 81 | strTime = datetime.datetime.now().strftime("%H:%M:%S") 82 | speak(f"Sir, the time is {strTime}") 83 | 84 | 85 | --------------------------------------------------------------------------------