├── .devcontainer ├── devcontainer.json └── requirements.txt ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── benchmarks ├── aspirin.xyz ├── geometries.tar.gz ├── input_spe-m062x.ipynb ├── input_spe-wb97x.ipynb ├── m062x_results.tar.gz ├── m062x_results_v2.tar.gz ├── wb97x_results.tar.gz └── wb97x_results_v2.tar.gz └── samples ├── build_example.ipynb ├── go.ipynb ├── properties ├── README.md ├── env_properties.yaml ├── load_qcschema_result_compute_CHELPG.py ├── load_qcschema_result_compute_Dipole_Moment_rks.py ├── load_qcschema_result_compute_Dipole_Moment_uks.py ├── load_qcschema_result_compute_Frequencies_and_Thermochemistry.py ├── load_qcschema_result_compute_HFC.py ├── load_qcschema_result_compute_IR.py ├── load_qcschema_result_compute_LocalizedOrbitals.py ├── load_qcschema_result_compute_MEP.py ├── load_qcschema_result_compute_Mulliken_Population.py ├── load_qcschema_result_compute_NMR.py ├── load_qcschema_result_compute_Polarizability.py ├── load_qcschema_result_compute_RESP.py ├── load_qcschema_result_compute_Spin.py ├── load_qcschema_result_compute_cubegen.py ├── load_qcschema_result_compute_gTensor.py └── tools │ ├── chelpg.py │ ├── espfit.py │ ├── heat_of_formation.py │ ├── libqcschema.py │ ├── resp.py │ ├── vdw_surface.py │ ├── visualize.py │ └── wavefunction_hdf5_to_qcschema.py ├── spe.ipynb └── spf.ipynb /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/python 3 | { 4 | "name": "Python 3", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "image": "mcr.microsoft.com/devcontainers/python:3.11", 7 | "features": { 8 | "ghcr.io/devcontainers/features/azure-cli:1": { 9 | "version": "latest" 10 | } 11 | }, 12 | "onCreateCommand": "pip3 install --user -r .devcontainer/requirements.txt", 13 | "customizations": { 14 | "codespaces": { 15 | "openFiles": [] 16 | }, 17 | "vscode": { 18 | "extensions": [ 19 | "ms-toolsai.jupyter", 20 | "ms-python.python" 21 | ] 22 | } 23 | }, 24 | "secrets": { 25 | } 26 | } -------------------------------------------------------------------------------- /.devcontainer/requirements.txt: -------------------------------------------------------------------------------- 1 | azure-quantum 2 | pyscf ==2.4 3 | pyscf-properties @ git+https://github.com/pyscf/properties@master 4 | numpy 5 | ase 6 | rdkit 7 | py3dmol 8 | jupyterlab_widgets 9 | pint # for converting the energy unit 10 | ipykernel # Make kernel discoverable in Jupyter lab 11 | ipywidgets # Provide interactive widgets; pinned to match the version of the `jupyterlab` base environment 12 | nglview # Visualizing atomic structures; pinned to match the version of the `jupyterlab` base environment 13 | matplotlib 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | [Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Ww][Ii][Nn]32/ 27 | [Aa][Rr][Mm]/ 28 | [Aa][Rr][Mm]64/ 29 | bld/ 30 | [Bb]in/ 31 | [Oo]bj/ 32 | [Ll]og/ 33 | [Ll]ogs/ 34 | 35 | # Visual Studio 2015/2017 cache/options directory 36 | .vs/ 37 | # Uncomment if you have tasks that create the project's static files in wwwroot 38 | #wwwroot/ 39 | 40 | # Visual Studio 2017 auto generated files 41 | Generated\ Files/ 42 | 43 | # MSTest test Results 44 | [Tt]est[Rr]esult*/ 45 | [Bb]uild[Ll]og.* 46 | 47 | # NUnit 48 | *.VisualState.xml 49 | TestResult.xml 50 | nunit-*.xml 51 | 52 | # Build Results of an ATL Project 53 | [Dd]ebugPS/ 54 | [Rr]eleasePS/ 55 | dlldata.c 56 | 57 | # Benchmark Results 58 | BenchmarkDotNet.Artifacts/ 59 | 60 | # .NET Core 61 | project.lock.json 62 | project.fragment.lock.json 63 | artifacts/ 64 | 65 | # ASP.NET Scaffolding 66 | ScaffoldingReadMe.txt 67 | 68 | # StyleCop 69 | StyleCopReport.xml 70 | 71 | # Files built by Visual Studio 72 | *_i.c 73 | *_p.c 74 | *_h.h 75 | *.ilk 76 | *.meta 77 | *.obj 78 | *.iobj 79 | *.pch 80 | *.pdb 81 | *.ipdb 82 | *.pgc 83 | *.pgd 84 | *.rsp 85 | *.sbr 86 | *.tlb 87 | *.tli 88 | *.tlh 89 | *.tmp 90 | *.tmp_proj 91 | *_wpftmp.csproj 92 | *.log 93 | *.tlog 94 | *.vspscc 95 | *.vssscc 96 | .builds 97 | *.pidb 98 | *.svclog 99 | *.scc 100 | 101 | # Chutzpah Test files 102 | _Chutzpah* 103 | 104 | # Visual C++ cache files 105 | ipch/ 106 | *.aps 107 | *.ncb 108 | *.opendb 109 | *.opensdf 110 | *.sdf 111 | *.cachefile 112 | *.VC.db 113 | *.VC.VC.opendb 114 | 115 | # Visual Studio profiler 116 | *.psess 117 | *.vsp 118 | *.vspx 119 | *.sap 120 | 121 | # Visual Studio Trace Files 122 | *.e2e 123 | 124 | # TFS 2012 Local Workspace 125 | $tf/ 126 | 127 | # Guidance Automation Toolkit 128 | *.gpState 129 | 130 | # ReSharper is a .NET coding add-in 131 | _ReSharper*/ 132 | *.[Rr]e[Ss]harper 133 | *.DotSettings.user 134 | 135 | # TeamCity is a build add-in 136 | _TeamCity* 137 | 138 | # DotCover is a Code Coverage Tool 139 | *.dotCover 140 | 141 | # AxoCover is a Code Coverage Tool 142 | .axoCover/* 143 | !.axoCover/settings.json 144 | 145 | # Coverlet is a free, cross platform Code Coverage Tool 146 | coverage*.json 147 | coverage*.xml 148 | coverage*.info 149 | 150 | # Visual Studio code coverage results 151 | *.coverage 152 | *.coveragexml 153 | 154 | # NCrunch 155 | _NCrunch_* 156 | .*crunch*.local.xml 157 | nCrunchTemp_* 158 | 159 | # MightyMoose 160 | *.mm.* 161 | AutoTest.Net/ 162 | 163 | # Web workbench (sass) 164 | .sass-cache/ 165 | 166 | # Installshield output folder 167 | [Ee]xpress/ 168 | 169 | # DocProject is a documentation generator add-in 170 | DocProject/buildhelp/ 171 | DocProject/Help/*.HxT 172 | DocProject/Help/*.HxC 173 | DocProject/Help/*.hhc 174 | DocProject/Help/*.hhk 175 | DocProject/Help/*.hhp 176 | DocProject/Help/Html2 177 | DocProject/Help/html 178 | 179 | # Click-Once directory 180 | publish/ 181 | 182 | # Publish Web Output 183 | *.[Pp]ublish.xml 184 | *.azurePubxml 185 | # Note: Comment the next line if you want to checkin your web deploy settings, 186 | # but database connection strings (with potential passwords) will be unencrypted 187 | *.pubxml 188 | *.publishproj 189 | 190 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 191 | # checkin your Azure Web App publish settings, but sensitive information contained 192 | # in these scripts will be unencrypted 193 | PublishScripts/ 194 | 195 | # NuGet Packages 196 | *.nupkg 197 | # NuGet Symbol Packages 198 | *.snupkg 199 | # The packages folder can be ignored because of Package Restore 200 | **/[Pp]ackages/* 201 | # except build/, which is used as an MSBuild target. 202 | !**/[Pp]ackages/build/ 203 | # Uncomment if necessary however generally it will be regenerated when needed 204 | #!**/[Pp]ackages/repositories.config 205 | # NuGet v3's project.json files produces more ignorable files 206 | *.nuget.props 207 | *.nuget.targets 208 | 209 | # Microsoft Azure Build Output 210 | csx/ 211 | *.build.csdef 212 | 213 | # Microsoft Azure Emulator 214 | ecf/ 215 | rcf/ 216 | 217 | # Windows Store app package directories and files 218 | AppPackages/ 219 | BundleArtifacts/ 220 | Package.StoreAssociation.xml 221 | _pkginfo.txt 222 | *.appx 223 | *.appxbundle 224 | *.appxupload 225 | 226 | # Visual Studio cache files 227 | # files ending in .cache can be ignored 228 | *.[Cc]ache 229 | # but keep track of directories ending in .cache 230 | !?*.[Cc]ache/ 231 | 232 | # Others 233 | ClientBin/ 234 | ~$* 235 | *~ 236 | *.dbmdl 237 | *.dbproj.schemaview 238 | *.jfm 239 | *.pfx 240 | *.publishsettings 241 | orleans.codegen.cs 242 | 243 | # Including strong name files can present a security risk 244 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 245 | #*.snk 246 | 247 | # Since there are multiple workflows, uncomment next line to ignore bower_components 248 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 249 | #bower_components/ 250 | 251 | # RIA/Silverlight projects 252 | Generated_Code/ 253 | 254 | # Backup & report files from converting an old project file 255 | # to a newer Visual Studio version. Backup files are not needed, 256 | # because we have git ;-) 257 | _UpgradeReport_Files/ 258 | Backup*/ 259 | UpgradeLog*.XML 260 | UpgradeLog*.htm 261 | ServiceFabricBackup/ 262 | *.rptproj.bak 263 | 264 | # SQL Server files 265 | *.mdf 266 | *.ldf 267 | *.ndf 268 | 269 | # Business Intelligence projects 270 | *.rdl.data 271 | *.bim.layout 272 | *.bim_*.settings 273 | *.rptproj.rsuser 274 | *- [Bb]ackup.rdl 275 | *- [Bb]ackup ([0-9]).rdl 276 | *- [Bb]ackup ([0-9][0-9]).rdl 277 | 278 | # Microsoft Fakes 279 | FakesAssemblies/ 280 | 281 | # GhostDoc plugin setting file 282 | *.GhostDoc.xml 283 | 284 | # Node.js Tools for Visual Studio 285 | .ntvs_analysis.dat 286 | node_modules/ 287 | 288 | # Visual Studio 6 build log 289 | *.plg 290 | 291 | # Visual Studio 6 workspace options file 292 | *.opt 293 | 294 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 295 | *.vbw 296 | 297 | # Visual Studio 6 auto-generated project file (contains which files were open etc.) 298 | *.vbp 299 | 300 | # Visual Studio 6 workspace and project file (working project files containing files to include in project) 301 | *.dsw 302 | *.dsp 303 | 304 | # Visual Studio 6 technical files 305 | *.ncb 306 | *.aps 307 | 308 | # Visual Studio LightSwitch build output 309 | **/*.HTMLClient/GeneratedArtifacts 310 | **/*.DesktopClient/GeneratedArtifacts 311 | **/*.DesktopClient/ModelManifest.xml 312 | **/*.Server/GeneratedArtifacts 313 | **/*.Server/ModelManifest.xml 314 | _Pvt_Extensions 315 | 316 | # Paket dependency manager 317 | .paket/paket.exe 318 | paket-files/ 319 | 320 | # FAKE - F# Make 321 | .fake/ 322 | 323 | # CodeRush personal settings 324 | .cr/personal 325 | 326 | # Python Tools for Visual Studio (PTVS) 327 | __pycache__/ 328 | *.pyc 329 | 330 | # Cake - Uncomment if you are using it 331 | # tools/** 332 | # !tools/packages.config 333 | 334 | # Tabs Studio 335 | *.tss 336 | 337 | # Telerik's JustMock configuration file 338 | *.jmconfig 339 | 340 | # BizTalk build output 341 | *.btp.cs 342 | *.btm.cs 343 | *.odx.cs 344 | *.xsd.cs 345 | 346 | # OpenCover UI analysis results 347 | OpenCover/ 348 | 349 | # Azure Stream Analytics local run output 350 | ASALocalRun/ 351 | 352 | # MSBuild Binary and Structured Log 353 | *.binlog 354 | 355 | # NVidia Nsight GPU debugger configuration file 356 | *.nvuser 357 | 358 | # MFractors (Xamarin productivity tool) working folder 359 | .mfractor/ 360 | 361 | # Local History for Visual Studio 362 | .localhistory/ 363 | 364 | # Visual Studio History (VSHistory) files 365 | .vshistory/ 366 | 367 | # BeatPulse healthcheck temp database 368 | healthchecksdb 369 | 370 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 371 | MigrationBackup/ 372 | 373 | # Ionide (cross platform F# VS Code tools) working folder 374 | .ionide/ 375 | 376 | # Fody - auto-generated XML schema 377 | FodyWeavers.xsd 378 | 379 | # VS Code files for those working on multiple tools 380 | .vscode/* 381 | !.vscode/settings.json 382 | !.vscode/tasks.json 383 | !.vscode/launch.json 384 | !.vscode/extensions.json 385 | *.code-workspace 386 | 387 | # Local History for Visual Studio Code 388 | .history/ 389 | 390 | # Windows Installer files from build outputs 391 | *.cab 392 | *.msi 393 | *.msix 394 | *.msm 395 | *.msp 396 | 397 | # JetBrains Rider 398 | *.sln.iml 399 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Project 2 | 3 | Accelerated DFT is a cloud-native, GPU-accelerated DFT program for molecular systems engineered by Microsoft offered through [Azure Quantum Elements](https://quantum.microsoft.com/en-us/quantum-elements/product-overview). 4 | 5 | This repository contains benchmarking data and sample inputs for Accelerated DFT as detailed in this [preprint](https://arxiv.org/abs/2406.11185). 6 | 7 | - [benchmarks](./benchmarks) 8 | - Geometries of 329 molecules comprising the test set (geometries.tar.gz) 9 | - Accelerated-DFT input files and settings used to run calculations on the test set [input_spe-m062x.ipynb](./benchmarks/input_spe-m062x.ipynb) , [input_spe-wb97x.ipynb](./benchmarks/input_spe-wb97x.ipynb) 10 | - Accelerated-DFT output files for the test set using both M06-2X and wB97x functionals (m062x_results.tar.gz , wb97x_results.tar.gz) 11 | 12 | - [samples](./samples) 13 | - single point energy example input [spe.ipynb](./samples/spe.ipynb) 14 | - single point force example input [spf.ipynb](./samples/spf.ipynb) 15 | - geometry optimization example input [go.ipynb](./samples/go.ipynb) 16 | - example of how to build QCSchema inputs for Accelerated-DFT [build_example.ipynb](./samples/build_example.ipynb) 17 | 18 | The samples included in this project are for reference only. You need an active Accelerated DFT service (which supplies you the access key) to run the samples. 19 | 20 | If you are interested in trying out Accelerated DFT, please [sign up](https://smt.microsoft.com/en-US/AQEPrivatePreviewSignup/) for private preview of Azure Quantum Elements. 21 | 22 | ## Contributing 23 | 24 | This project welcomes contributions and suggestions. Most contributions require you to agree to a 25 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us 26 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. 27 | 28 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide 29 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions 30 | provided by the bot. You will only need to do this once across all repos using our CLA. 31 | 32 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 33 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or 34 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 35 | 36 | ## Trademarks 37 | 38 | This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft 39 | trademarks or logos is subject to and must follow 40 | [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). 41 | Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. 42 | Any use of third-party trademarks or logos are subject to those third-party's policies. 43 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | ## How to file issues and get help 4 | 5 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 6 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 7 | feature request as a new Issue. 8 | 9 | ## Microsoft Support Policy 10 | 11 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 12 | -------------------------------------------------------------------------------- /benchmarks/aspirin.xyz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/accelerated-dft/82fd7177db081fd6fb1e4b55248ed3ce7a516237/benchmarks/aspirin.xyz -------------------------------------------------------------------------------- /benchmarks/input_spe-m062x.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "nteract": { 7 | "transient": { 8 | "deleting": false 9 | } 10 | } 11 | }, 12 | "source": [ 13 | "# Submission of Calculation with Subsequent Results Query\n", 14 | "\n", 15 | "In this demo, we will submit a calculation, check the status of the job and query the results after it is finished." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "metadata": { 22 | "collapsed": false, 23 | "jupyter": { 24 | "outputs_hidden": false, 25 | "source_hidden": false 26 | }, 27 | "nteract": { 28 | "transient": { 29 | "deleting": false 30 | } 31 | } 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "from azure.quantum import Workspace\n", 36 | "from azure.quantum.job import JobFailedWithResultsError\n", 37 | "\n", 38 | "# insert connection string form Azure Portal Workspace Access Keys\n", 39 | "connection_string = \"\" \n", 40 | "workspace = Workspace.from_connection_string(connection_string)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false, 50 | "source_hidden": false 51 | }, 52 | "nteract": { 53 | "transient": { 54 | "deleting": false 55 | } 56 | } 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "print(\"Verifying access to DFT target.\")\n", 61 | "\n", 62 | "# To submit DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 63 | "target = workspace.get_targets(\"microsoft.dft\")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": { 70 | "collapsed": false, 71 | "jupyter": { 72 | "outputs_hidden": false, 73 | "source_hidden": false 74 | }, 75 | "nteract": { 76 | "transient": { 77 | "deleting": false 78 | } 79 | } 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "# First, let's define the molecular structure, loaded from an xyz file.\n", 84 | "from pathlib import Path\n", 85 | "GeomFile = \"aspirin.xyz\"" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "# Secondly, let's give a name for the job.\n", 95 | "job_name = 'aspirin_spe'" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": { 101 | "nteract": { 102 | "transient": { 103 | "deleting": false 104 | } 105 | } 106 | }, 107 | "source": [ 108 | "Now we submit the calculations to MADFT service, looping over three different basis sets." 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": { 115 | "collapsed": false, 116 | "jupyter": { 117 | "outputs_hidden": false, 118 | "source_hidden": false 119 | }, 120 | "nteract": { 121 | "transient": { 122 | "deleting": false 123 | } 124 | } 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "# Next, we create a dictionary variable to specify the parameters for the DFT calculation. \n", 129 | "dft_input_params = {\n", 130 | " \"tasks\": [\n", 131 | " {\n", 132 | " \"taskType\": \"spe\", \n", 133 | " \"basisSet\": { \"name\": 'def2-tzvpp'},\n", 134 | " \"xcFunctional\": { \"name\": \"m06-2x\", \"gridLevel\": 4 },\n", 135 | " \"molecule\": { \"charge\": 0, \"multiplicity\": 1 },\n", 136 | " \"scf\": { \"method\": \"rks\", \"maxSteps\": 100, \"convergeThreshold\": 1e-8 }\n", 137 | " }\n", 138 | " ]\n", 139 | "}\n", 140 | "\n", 141 | "# We are now ready to submit the Job using the target.submit call. It takes three parameters-\n", 142 | "# 1. The input molecule in xyz format.\n", 143 | "# 2. The DFT parameters that we declared above.\n", 144 | "# 3. A friendly name to help identify the job in the Azure Portal later.\n", 145 | "\n", 146 | "print(\"Submitting DFT job.\")\n", 147 | "\n", 148 | "job = target.submit(\n", 149 | " input_data=Path(GeomFile).read_text(),\n", 150 | " input_params = dft_input_params,\n", 151 | " name= job_name)\n", 152 | " \n", 153 | "print(\"\\nDFT job has been submitted.\")\n", 154 | "print(f\"\\nJob name: {job_name}\")\n" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": { 160 | "nteract": { 161 | "transient": { 162 | "deleting": false 163 | } 164 | } 165 | }, 166 | "source": [ 167 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "metadata": { 174 | "collapsed": false, 175 | "jupyter": { 176 | "outputs_hidden": false, 177 | "source_hidden": false 178 | }, 179 | "nteract": { 180 | "transient": { 181 | "deleting": false 182 | } 183 | } 184 | }, 185 | "outputs": [], 186 | "source": [ 187 | "# query the latest job that match the given name\n", 188 | "job = workspace.list_jobs(name_match=job_name)[-1]\n", 189 | "\n", 190 | "# refresh the job SAS for using the API\n", 191 | "job.refresh()\n", 192 | "\n", 193 | "# show the status of the job\n", 194 | "print(f'Job: \"{job_name}\" is {job.details.status}')" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": { 201 | "collapsed": false, 202 | "jupyter": { 203 | "outputs_hidden": false, 204 | "source_hidden": false 205 | }, 206 | "nteract": { 207 | "transient": { 208 | "deleting": false 209 | } 210 | } 211 | }, 212 | "outputs": [], 213 | "source": [ 214 | "# read the results of the job\n", 215 | "if job.details.status == 'Succeeded':\n", 216 | " results = job.get_results()\n", 217 | "else:\n", 218 | " results = f'\"{job_name}\" is still {job.details.status}...'\n", 219 | "\n", 220 | "# QCSchema Output\n", 221 | "qcschema = results[\"results\"][0]\n", 222 | "qcschema[\"return_result\"]" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "## Output to QCSchema json file" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "import json\n", 239 | "n = job_name + \"_output\"\n", 240 | "with open(n+\".json\", \"w\") as fp:\n", 241 | " json.dump(qcschema, fp)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [] 250 | } 251 | ], 252 | "metadata": { 253 | "kernel_info": { 254 | "name": "python3" 255 | }, 256 | "kernelspec": { 257 | "display_name": "Python [conda env:.conda-dft]", 258 | "language": "python", 259 | "name": "conda-env-.conda-dft-py" 260 | }, 261 | "language_info": { 262 | "codemirror_mode": { 263 | "name": "ipython", 264 | "version": 3 265 | }, 266 | "file_extension": ".py", 267 | "mimetype": "text/x-python", 268 | "name": "python", 269 | "nbconvert_exporter": "python", 270 | "pygments_lexer": "ipython3", 271 | "version": "3.11.8" 272 | }, 273 | "nteract": { 274 | "version": "nteract-front-end@1.0.0" 275 | } 276 | }, 277 | "nbformat": 4, 278 | "nbformat_minor": 4 279 | } 280 | -------------------------------------------------------------------------------- /benchmarks/input_spe-wb97x.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "nteract": { 7 | "transient": { 8 | "deleting": false 9 | } 10 | } 11 | }, 12 | "source": [ 13 | "# Submission of Calculation with Subsequent Results Query\n", 14 | "\n", 15 | "In this demo, we will submit a calculation, check the status of the job and query the results after it is finished." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "metadata": { 22 | "collapsed": false, 23 | "jupyter": { 24 | "outputs_hidden": false, 25 | "source_hidden": false 26 | }, 27 | "nteract": { 28 | "transient": { 29 | "deleting": false 30 | } 31 | } 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "from azure.quantum import Workspace\n", 36 | "from azure.quantum.job import JobFailedWithResultsError\n", 37 | "\n", 38 | "# insert connection string form Azure Portal Workspace Access Keys\n", 39 | "connection_string = \"\" \n", 40 | "workspace = Workspace.from_connection_string(connection_string)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false, 50 | "source_hidden": false 51 | }, 52 | "nteract": { 53 | "transient": { 54 | "deleting": false 55 | } 56 | } 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "print(\"Verifying access to DFT target.\")\n", 61 | "\n", 62 | "# To submit DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 63 | "target = workspace.get_targets(\"microsoft.dft\")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": { 70 | "collapsed": false, 71 | "jupyter": { 72 | "outputs_hidden": false, 73 | "source_hidden": false 74 | }, 75 | "nteract": { 76 | "transient": { 77 | "deleting": false 78 | } 79 | } 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "# First, let's define the molecular structure, loaded from an xyz file.\n", 84 | "from pathlib import Path\n", 85 | "GeomFile = \"aspirin.xyz\"" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "# Secondly, let's give a name for the job.\n", 95 | "job_name = 'aspirin_spe'" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": { 101 | "nteract": { 102 | "transient": { 103 | "deleting": false 104 | } 105 | } 106 | }, 107 | "source": [ 108 | "Now we submit the calculations to MADFT service, looping over three different basis sets." 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": { 115 | "collapsed": false, 116 | "jupyter": { 117 | "outputs_hidden": false, 118 | "source_hidden": false 119 | }, 120 | "nteract": { 121 | "transient": { 122 | "deleting": false 123 | } 124 | } 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "# Next, we create a dictionary variable to specify the parameters for the DFT calculation. \n", 129 | "dft_input_params = {\n", 130 | " \"tasks\": [\n", 131 | " {\n", 132 | " \"taskType\": \"spe\", \n", 133 | " \"basisSet\": { \"name\": 'def2-tzvpp'},\n", 134 | " \"xcFunctional\": { \"name\": \"m06-2x\", \"gridLevel\": 4 },\n", 135 | " \"molecule\": { \"charge\": 0, \"multiplicity\": 1 },\n", 136 | " \"scf\": { \"method\": \"rks\", \"maxSteps\": 100, \"convergeThreshold\": 1e-8 }\n", 137 | " }\n", 138 | " ]\n", 139 | "}\n", 140 | "\n", 141 | "# We are now ready to submit the Job using the target.submit call. It takes three parameters-\n", 142 | "# 1. The input molecule in xyz format.\n", 143 | "# 2. The DFT parameters that we declared above.\n", 144 | "# 3. A friendly name to help identify the job in the Azure Portal later.\n", 145 | "\n", 146 | "print(\"Submitting DFT job.\")\n", 147 | "\n", 148 | "job = target.submit(\n", 149 | " input_data=Path(GeomFile).read_text(),\n", 150 | " input_params = dft_input_params,\n", 151 | " name= job_name)\n", 152 | " \n", 153 | "print(\"\\nDFT job has been submitted.\")\n", 154 | "print(f\"\\nJob name: {job_name}\")\n" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": { 160 | "nteract": { 161 | "transient": { 162 | "deleting": false 163 | } 164 | } 165 | }, 166 | "source": [ 167 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "metadata": { 174 | "collapsed": false, 175 | "jupyter": { 176 | "outputs_hidden": false, 177 | "source_hidden": false 178 | }, 179 | "nteract": { 180 | "transient": { 181 | "deleting": false 182 | } 183 | } 184 | }, 185 | "outputs": [], 186 | "source": [ 187 | "# query the latest job that match the given name\n", 188 | "job = workspace.list_jobs(name_match=job_name)[-1]\n", 189 | "\n", 190 | "# refresh the job SAS for using the API\n", 191 | "job.refresh()\n", 192 | "\n", 193 | "# show the status of the job\n", 194 | "print(f'Job: \"{job_name}\" is {job.details.status}')" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": { 201 | "collapsed": false, 202 | "jupyter": { 203 | "outputs_hidden": false, 204 | "source_hidden": false 205 | }, 206 | "nteract": { 207 | "transient": { 208 | "deleting": false 209 | } 210 | } 211 | }, 212 | "outputs": [], 213 | "source": [ 214 | "# read the results of the job\n", 215 | "if job.details.status == 'Succeeded':\n", 216 | " results = job.get_results()\n", 217 | "else:\n", 218 | " results = f'\"{job_name}\" is still {job.details.status}...'\n", 219 | "\n", 220 | "# QCSchema Output\n", 221 | "qcschema = results[\"results\"][0]\n", 222 | "qcschema[\"return_result\"]" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "## Output to QCSchema json file" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "import json\n", 239 | "n = job_name + \"_output\"\n", 240 | "with open(n+\".json\", \"w\") as fp:\n", 241 | " json.dump(qcschema, fp)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [] 250 | } 251 | ], 252 | "metadata": { 253 | "kernel_info": { 254 | "name": "python3" 255 | }, 256 | "kernelspec": { 257 | "display_name": "Python [conda env:.conda-dft]", 258 | "language": "python", 259 | "name": "conda-env-.conda-dft-py" 260 | }, 261 | "language_info": { 262 | "codemirror_mode": { 263 | "name": "ipython", 264 | "version": 3 265 | }, 266 | "file_extension": ".py", 267 | "mimetype": "text/x-python", 268 | "name": "python", 269 | "nbconvert_exporter": "python", 270 | "pygments_lexer": "ipython3", 271 | "version": "3.11.8" 272 | }, 273 | "nteract": { 274 | "version": "nteract-front-end@1.0.0" 275 | } 276 | }, 277 | "nbformat": 4, 278 | "nbformat_minor": 4 279 | } 280 | -------------------------------------------------------------------------------- /samples/build_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": { 7 | "nteract": { 8 | "transient": { 9 | "deleting": false 10 | } 11 | } 12 | }, 13 | "source": [ 14 | "# ADFT Input Examples\n", 15 | "\n", 16 | "In this demo, we submit a calculation, check the status of the job and query the results after it is finished. \n", 17 | "\n", 18 | "We demonstrate different options for building input for ADFT." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": { 25 | "collapsed": false, 26 | "jupyter": { 27 | "outputs_hidden": false, 28 | "source_hidden": false 29 | }, 30 | "nteract": { 31 | "transient": { 32 | "deleting": false 33 | } 34 | } 35 | }, 36 | "outputs": [], 37 | "source": [ 38 | "from azure.quantum import Workspace\n", 39 | "from azure.quantum.job import JobFailedWithResultsError\n", 40 | "\n", 41 | "# insert connection string form Azure Portal Workspace Access Keys\n", 42 | "connection_string = \"\"\n", 43 | "\n", 44 | "workspace = Workspace.from_connection_string(connection_string)" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 2, 50 | "metadata": { 51 | "collapsed": false, 52 | "jupyter": { 53 | "outputs_hidden": false, 54 | "source_hidden": false 55 | }, 56 | "nteract": { 57 | "transient": { 58 | "deleting": false 59 | } 60 | } 61 | }, 62 | "outputs": [ 63 | { 64 | "name": "stdout", 65 | "output_type": "stream", 66 | "text": [ 67 | "Verifying access to Accelerated DFT target.\n", 68 | "Verification complete.\n" 69 | ] 70 | } 71 | ], 72 | "source": [ 73 | "# To submit Accelerated DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 74 | "print(\"Verifying access to Accelerated DFT target.\")\n", 75 | "target = workspace.get_targets(\"microsoft.dft\")\n", 76 | "print(\"Verification complete.\")" 77 | ] 78 | }, 79 | { 80 | "attachments": {}, 81 | "cell_type": "markdown", 82 | "metadata": { 83 | "nteract": { 84 | "transient": { 85 | "deleting": false 86 | } 87 | } 88 | }, 89 | "source": [ 90 | "# Section 1: QCSchema Input" 91 | ] 92 | }, 93 | { 94 | "attachments": {}, 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "ADFT supports the QCSchema input format, an open standard for compatibility across computational chemistry software. \n", 99 | "An example of this format is shown below. Note the 'driver' here is 'energy', which will perform a single point energy calculation. \n", 100 | "The driver may also be 'gradient', 'hessian', 'go' or 'bomd', see the documentation for more detail. \n", 101 | "\n", 102 | "In this format the atom labels and coordinates are separated into separate arrays. Note that QCSchema coordinates are in Bohr. \n" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 27, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "qcschema_input = {\n", 112 | " \"driver\": \"energy\",\n", 113 | " \"model\": {\n", 114 | " \"method\": \"m06-2x\",\n", 115 | " \"basis\": \"def2-svpd\"\n", 116 | " },\n", 117 | " \"schema_name\": \"qcschema_input\",\n", 118 | " \"schema_version\": 1,\n", 119 | " \"molecule\": {\n", 120 | " \"extras\": {},\n", 121 | " \"symbols\": [\n", 122 | " \"O\",\n", 123 | " \"C\",\n", 124 | " \"C\",\n", 125 | " \"C\",\n", 126 | " \"C\",\n", 127 | " \"C\",\n", 128 | " \"C\",\n", 129 | " \"H\",\n", 130 | " \"H\",\n", 131 | " \"H\",\n", 132 | " \"H\",\n", 133 | " \"H\",\n", 134 | " \"H\"\n", 135 | " ],\n", 136 | " \"geometry\": [\n", 137 | " 4.730542147965709,\n", 138 | " 0.034826575331843086,\n", 139 | " 0.07810088784463559,\n", 140 | " 2.1361232242687977,\n", 141 | " 0.017709001458524106,\n", 142 | " 0.009088108672780787,\n", 143 | " 0.7996954919209014,\n", 144 | " 2.290483253979806,\n", 145 | " 0.10106814673106823,\n", 146 | " -1.8298562750208616,\n", 147 | " 2.2732950799384737,\n", 148 | " -0.04537958079912547,\n", 149 | " -3.1327572801516967,\n", 150 | " -0.00564083248182671,\n", 151 | " -0.28742004920350506,\n", 152 | " -1.790388872477789,\n", 153 | " -2.271959799458856,\n", 154 | " -0.38978844089184156,\n", 155 | " 0.8394687277399734,\n", 156 | " -2.2656284043593296,\n", 157 | " -0.24392044354214196,\n", 158 | " 5.279447115915874,\n", 159 | " -0.07938333158181043,\n", 160 | " 1.8109098053069272,\n", 161 | " 1.8583211818406624,\n", 162 | " 4.051452964636673,\n", 163 | " 0.2691141588512759,\n", 164 | " -2.8675310249318393,\n", 165 | " 4.053900197762506,\n", 166 | " 0.0241508699472927,\n", 167 | " -5.190440656400895,\n", 168 | " -0.014523603513912258,\n", 169 | " -0.4052054313284032,\n", 170 | " -2.796624853566738,\n", 171 | " -4.060585444078858,\n", 172 | " -0.5909607661605761,\n", 173 | " 1.9285725820008635,\n", 174 | " -4.013248220398251,\n", 175 | " -0.3415529925897059\n", 176 | " ],\n", 177 | " \"molecular_charge\": 0, \n", 178 | " \"molecular_multiplicity\": 1\n", 179 | " }\n", 180 | "}" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 28, 186 | "metadata": {}, 187 | "outputs": [ 188 | { 189 | "name": "stdout", 190 | "output_type": "stream", 191 | "text": [ 192 | "\n", 193 | "DFT job has been submitted.\n", 194 | ".............\n", 195 | "DFT job completed.\n" 196 | ] 197 | } 198 | ], 199 | "source": [ 200 | "job_1 = target.submit(input_data=[qcschema_input],name='phenol_spe_method_1')\n", 201 | "print(\"\\nDFT job has been submitted.\")\n", 202 | "job_1.wait_until_completed()\n", 203 | "print(\"\\nDFT job completed.\")" 204 | ] 205 | }, 206 | { 207 | "attachments": {}, 208 | "cell_type": "markdown", 209 | "metadata": {}, 210 | "source": [ 211 | "# Section 2: Submitting using an XYZ file and a partial Schema\n" 212 | ] 213 | }, 214 | { 215 | "attachments": {}, 216 | "cell_type": "markdown", 217 | "metadata": {}, 218 | "source": [ 219 | "ADFT calculations can also be submitted using an xyz file and a partial QCSchema input via use of the Azure-Quantum SDK. \n", 220 | "Note that the charge and mutliplicity cannot be changed from neutral singlet and it is suggested to use method 1 or 3. " 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 11, 226 | "metadata": {}, 227 | "outputs": [ 228 | { 229 | "name": "stdout", 230 | "output_type": "stream", 231 | "text": [ 232 | "\n", 233 | "DFT job has been submitted.\n", 234 | ".............\n", 235 | "DFT job completed.\n" 236 | ] 237 | } 238 | ], 239 | "source": [ 240 | "import glob\n", 241 | "\n", 242 | "# Next, we create a dictionary variable to specify the parameters for the DFT calculation. \n", 243 | "params = {\n", 244 | " \"driver\": \"energy\",\n", 245 | " \"model\": { \"method\": \"m06-2x\", \"basis\": \"def2-svpd\" },\n", 246 | " \"keywords\": {\n", 247 | " \"max_scf_steps\": 100,\n", 248 | " \"convergence_threshold\": 1e-8,\n", 249 | " },\n", 250 | "}\n", 251 | "\n", 252 | "# specify the molecule:\n", 253 | "xyz_file = glob.glob('molecules/phenol.xyz')\n", 254 | "\n", 255 | "# We are now ready to submit the Job using the target.submit call. It takes three parameters-\n", 256 | "# 1. The input molecule in xyz format.\n", 257 | "# 2. The DFT parameters that we declared above.\n", 258 | "# 3. A friendly name to help identify the job in the Azure Portal later.\n", 259 | "\n", 260 | "job_2 = target.submit(\n", 261 | " input_params=params,\n", 262 | " #input_data=['molecules/phenol.xyz'],\n", 263 | " input_data=xyz_file,\n", 264 | " name='phenol_spe_method_2',\n", 265 | ")\n", 266 | "\n", 267 | "print(\"\\nDFT job has been submitted.\")\n", 268 | "job_2.wait_until_completed() # this would wait until each ADFT calculation is completed\n", 269 | "print(\"\\nDFT job completed.\")" 270 | ] 271 | }, 272 | { 273 | "attachments": {}, 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "# Section 3: Building a QCSchema Input Using Azure_quantum SDK" 278 | ] 279 | }, 280 | { 281 | "attachments": {}, 282 | "cell_type": "markdown", 283 | "metadata": {}, 284 | "source": [ 285 | "The main input of ADFT is the QCSchema format. \n", 286 | "It is therefore advantageous to use this format and offers the most flexibility in input. \n", 287 | "Additionally, if the charge and multiplicity are to be altered from a neutral singlet the QCSchema is the way to do this." 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": 14, 293 | "metadata": {}, 294 | "outputs": [], 295 | "source": [ 296 | "xyz_files = ['molecules/phenol.xyz']\n", 297 | "params = {\n", 298 | " \"driver\": \"energy\",\n", 299 | " \"model\": { \n", 300 | " \"method\": \"m06-2x\", \n", 301 | " \"basis\": \"def2-svpd\" \n", 302 | " }\n", 303 | "}\n", 304 | "\n", 305 | "# create input qcschema\n", 306 | "qcschema_input = target.assemble_qcschema_from_files(xyz_files, params)\n", 307 | "\n", 308 | "# view the fields using qcschema_input.keys()\n", 309 | "\n", 310 | "# specifiy charge and multiplicity\n", 311 | "# (note the index '0' as we only have a single qcschema) \n", 312 | "qcschema_input[0]['molecule']['molecular_charge'] = 0\n", 313 | "qcschema_input[0]['molecule']['molecular_multiplicity'] = 1 " 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 15, 319 | "metadata": {}, 320 | "outputs": [ 321 | { 322 | "name": "stdout", 323 | "output_type": "stream", 324 | "text": [ 325 | "\n", 326 | "DFT job has been submitted.\n", 327 | ".............\n", 328 | "DFT job completed.\n" 329 | ] 330 | } 331 | ], 332 | "source": [ 333 | "job_3 = target.submit(input_data=qcschema_input,name='phenol_spe_method_3')\n", 334 | "print(\"\\nDFT job has been submitted.\")\n", 335 | "job_3.wait_until_completed()\n", 336 | "print(\"\\nDFT job completed.\")" 337 | ] 338 | }, 339 | { 340 | "attachments": {}, 341 | "cell_type": "markdown", 342 | "metadata": {}, 343 | "source": [ 344 | "# Query Job Status and Retreive Results" 345 | ] 346 | }, 347 | { 348 | "cell_type": "markdown", 349 | "metadata": { 350 | "nteract": { 351 | "transient": { 352 | "deleting": false 353 | } 354 | } 355 | }, 356 | "source": [ 357 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": 29, 363 | "metadata": {}, 364 | "outputs": [], 365 | "source": [ 366 | "results_1 = job_1.get_results()\n", 367 | "results_2 = job_2.get_results()\n", 368 | "results_3 = job_3.get_results()\n", 369 | "\n", 370 | "# QCSchema Output\n", 371 | "qcschema_1 = results_1[\"results\"][0]\n", 372 | "qcschema_2 = results_2[\"results\"][0]\n", 373 | "qcschema_3 = results_3[\"results\"][0]" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": 30, 379 | "metadata": {}, 380 | "outputs": [ 381 | { 382 | "name": "stdout", 383 | "output_type": "stream", 384 | "text": [ 385 | "Method 1, Total Energy (Hartree): -307.12139308\n", 386 | "Method 2, Total Energy (Hartree): -307.12139308\n", 387 | "Method 3, Total Energy (Hartree): -307.12139308\n" 388 | ] 389 | } 390 | ], 391 | "source": [ 392 | "# The energy can be accessed:\n", 393 | "print(\"Method 1, Total Energy (Hartree): {:.8f}\".format(qcschema_1['properties']['return_energy']))\n", 394 | "# The energy can be accessed:\n", 395 | "print(\"Method 2, Total Energy (Hartree): {:.8f}\".format(qcschema_2['properties']['return_energy']))\n", 396 | "# The energy can be accessed:\n", 397 | "print(\"Method 3, Total Energy (Hartree): {:.8f}\".format(qcschema_3['properties']['return_energy']))" 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": null, 403 | "metadata": {}, 404 | "outputs": [], 405 | "source": [] 406 | } 407 | ], 408 | "metadata": { 409 | "kernel_info": { 410 | "name": "python3" 411 | }, 412 | "kernelspec": { 413 | "display_name": "testtest", 414 | "language": "python", 415 | "name": "python3" 416 | }, 417 | "language_info": { 418 | "codemirror_mode": { 419 | "name": "ipython", 420 | "version": 3 421 | }, 422 | "file_extension": ".py", 423 | "mimetype": "text/x-python", 424 | "name": "python", 425 | "nbconvert_exporter": "python", 426 | "pygments_lexer": "ipython3", 427 | "version": "3.11.10" 428 | }, 429 | "nteract": { 430 | "version": "nteract-front-end@1.0.0" 431 | } 432 | }, 433 | "nbformat": 4, 434 | "nbformat_minor": 4 435 | } 436 | -------------------------------------------------------------------------------- /samples/go.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "nteract": { 7 | "transient": { 8 | "deleting": false 9 | } 10 | } 11 | }, 12 | "source": [ 13 | "# Submission of Calculation with Subsequent Results Query\n", 14 | "\n", 15 | "In this demo, we will submit a calculation, check the status of the job and query the results after it is finished." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 14, 21 | "metadata": { 22 | "collapsed": false, 23 | "jupyter": { 24 | "outputs_hidden": false, 25 | "source_hidden": false 26 | }, 27 | "nteract": { 28 | "transient": { 29 | "deleting": false 30 | } 31 | } 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "from azure.quantum import Workspace\n", 36 | "from azure.quantum.job import JobFailedWithResultsError\n", 37 | "\n", 38 | "# insert connection string form Azure Portal Workspace Access Keys\n", 39 | "connection_string = \"\"\n", 40 | "workspace = Workspace.from_connection_string(connection_string)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 15, 46 | "metadata": { 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false, 50 | "source_hidden": false 51 | }, 52 | "nteract": { 53 | "transient": { 54 | "deleting": false 55 | } 56 | } 57 | }, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "Verifying access to Accelerated DFT target.\n", 64 | "Verification complete.\n" 65 | ] 66 | } 67 | ], 68 | "source": [ 69 | "# To submit Accelerated DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 70 | "print(\"Verifying access to Accelerated DFT target.\")\n", 71 | "target = workspace.get_targets(\"microsoft.dft\")\n", 72 | "print(\"Verification complete.\")" 73 | ] 74 | }, 75 | { 76 | "attachments": {}, 77 | "cell_type": "markdown", 78 | "metadata": { 79 | "nteract": { 80 | "transient": { 81 | "deleting": false 82 | } 83 | } 84 | }, 85 | "source": [ 86 | "Now we submit the calculations to MADFT service.\n", 87 | "\n", 88 | "The QCSchema input below is for a DFT geometry optimization on the molecule phenol (with the geometry given in Bohr)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 16, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "# Note that for geometry optmimization, the input molecule field is 'initial_molecule'\n", 98 | "qcschema_input = {\n", 99 | " \"schema_name\": \"qcschema_optimization_input\",\n", 100 | " \"schema_version\": 1,\n", 101 | " \"initial_molecule\": {\n", 102 | " \"extras\": {},\n", 103 | " \"symbols\": [\n", 104 | " \"O\",\n", 105 | " \"C\",\n", 106 | " \"C\",\n", 107 | " \"C\",\n", 108 | " \"C\",\n", 109 | " \"C\",\n", 110 | " \"C\",\n", 111 | " \"H\",\n", 112 | " \"H\",\n", 113 | " \"H\",\n", 114 | " \"H\",\n", 115 | " \"H\",\n", 116 | " \"H\"\n", 117 | " ],\n", 118 | " \"geometry\": [\n", 119 | " 4.730542147965709,\n", 120 | " 0.034826575331843086,\n", 121 | " 0.07810088784463559,\n", 122 | " 2.1361232242687977,\n", 123 | " 0.017709001458524106,\n", 124 | " 0.009088108672780787,\n", 125 | " 0.7996954919209014,\n", 126 | " 2.290483253979806,\n", 127 | " 0.10106814673106823,\n", 128 | " -1.8298562750208616,\n", 129 | " 2.2732950799384737,\n", 130 | " -0.04537958079912547,\n", 131 | " -3.1327572801516967,\n", 132 | " -0.00564083248182671,\n", 133 | " -0.28742004920350506,\n", 134 | " -1.790388872477789,\n", 135 | " -2.271959799458856,\n", 136 | " -0.38978844089184156,\n", 137 | " 0.8394687277399734,\n", 138 | " -2.2656284043593296,\n", 139 | " -0.24392044354214196,\n", 140 | " 5.279447115915874,\n", 141 | " -0.07938333158181043,\n", 142 | " 1.8109098053069272,\n", 143 | " 1.8583211818406624,\n", 144 | " 4.051452964636673,\n", 145 | " 0.2691141588512759,\n", 146 | " -2.8675310249318393,\n", 147 | " 4.053900197762506,\n", 148 | " 0.0241508699472927,\n", 149 | " -5.190440656400895,\n", 150 | " -0.014523603513912258,\n", 151 | " -0.4052054313284032,\n", 152 | " -2.796624853566738,\n", 153 | " -4.060585444078858,\n", 154 | " -0.5909607661605761,\n", 155 | " 1.9285725820008635,\n", 156 | " -4.013248220398251,\n", 157 | " -0.3415529925897059\n", 158 | " ]\n", 159 | " },\n", 160 | " \"input_specification\": {\n", 161 | " \"driver\": \"gradient\",\n", 162 | " \"model\": {\n", 163 | " \"method\": \"m06-2x\",\n", 164 | " \"basis\": \"def2-svp\"\n", 165 | " }\n", 166 | " }\n", 167 | " }" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": 17, 173 | "metadata": {}, 174 | "outputs": [ 175 | { 176 | "data": { 177 | "text/plain": [ 178 | "" 179 | ] 180 | }, 181 | "execution_count": 17, 182 | "metadata": {}, 183 | "output_type": "execute_result" 184 | } 185 | ], 186 | "source": [ 187 | "# Submit jobs:\n", 188 | "job_name = 'phenol_go' \n", 189 | "target.submit(input_data=[qcschema_input],name=job_name)" 190 | ] 191 | }, 192 | { 193 | "attachments": {}, 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "# Query Job Status and Retreive Results" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": { 203 | "nteract": { 204 | "transient": { 205 | "deleting": false 206 | } 207 | } 208 | }, 209 | "source": [ 210 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": 19, 216 | "metadata": { 217 | "collapsed": false, 218 | "jupyter": { 219 | "outputs_hidden": false, 220 | "source_hidden": false 221 | }, 222 | "nteract": { 223 | "transient": { 224 | "deleting": false 225 | } 226 | } 227 | }, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "Job: \"phenol_go\" is Succeeded\n" 234 | ] 235 | } 236 | ], 237 | "source": [ 238 | "# query the latest job that match the given name\n", 239 | "job = workspace.list_jobs(name_match=job_name)[-1]\n", 240 | "\n", 241 | "# refresh the job SAS for using the API\n", 242 | "job.refresh()\n", 243 | "\n", 244 | "# show the status of the job\n", 245 | "print(f'Job: \"{job_name}\" is {job.details.status}')" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 20, 251 | "metadata": { 252 | "collapsed": false, 253 | "jupyter": { 254 | "outputs_hidden": false, 255 | "source_hidden": false 256 | }, 257 | "nteract": { 258 | "transient": { 259 | "deleting": false 260 | } 261 | } 262 | }, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | " Job Succeeded \n" 269 | ] 270 | } 271 | ], 272 | "source": [ 273 | "# read the results of the job\n", 274 | "if job.details.status == 'Succeeded':\n", 275 | " print(\" Job Succeeded \")\n", 276 | " results = job.get_results()\n", 277 | " # QCSchema Output\n", 278 | " qcschema = results[\"results\"][0]\n", 279 | "else:\n", 280 | " results = f'\"{job_name}\" is {job.details.status}...'\n" 281 | ] 282 | }, 283 | { 284 | "attachments": {}, 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "# Results" 289 | ] 290 | }, 291 | { 292 | "attachments": {}, 293 | "cell_type": "markdown", 294 | "metadata": {}, 295 | "source": [ 296 | "The results of the calculation are stored in the QCSchema format dict.\n", 297 | "\n", 298 | "We can print the energy of the optimized structure:" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": 21, 304 | "metadata": {}, 305 | "outputs": [ 306 | { 307 | "name": "stdout", 308 | "output_type": "stream", 309 | "text": [ 310 | "Total Energy of optimized geometry (Hartree): -307.1041205787441\n" 311 | ] 312 | } 313 | ], 314 | "source": [ 315 | "# The energy can be accessed:\n", 316 | "\n", 317 | "print(\"Total Energy of optimized geometry (Hartree): \", qcschema[\"energies\"][-1])" 318 | ] 319 | }, 320 | { 321 | "attachments": {}, 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "We can print the coordinates of the optimized geometry. Note that QCSchema output uses Bohr." 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": 30, 331 | "metadata": {}, 332 | "outputs": [ 333 | { 334 | "name": "stdout", 335 | "output_type": "stream", 336 | "text": [ 337 | "Geometry in Bohr\n", 338 | "[[ 4.73022902 0.03386118 0.0777041 ]\n", 339 | " [ 2.13574875 0.01746496 0.00889083]\n", 340 | " [ 0.79944694 2.29028248 0.10084778]\n", 341 | " [-1.8301604 2.2730637 -0.04546647]\n", 342 | " [-3.13299875 -0.00582002 -0.28754239]\n", 343 | " [-1.7907981 -2.27225391 -0.39007255]\n", 344 | " [ 0.83898948 -2.26583795 -0.24409322]\n", 345 | " [ 5.27894218 -0.07729315 1.81079002]\n", 346 | " [ 1.85800432 4.05132768 0.26855208]\n", 347 | " [-2.86767161 4.0537677 0.02398202]\n", 348 | " [-5.19069024 -0.01466732 -0.40521116]\n", 349 | " [-2.79714373 -4.06082458 -0.59117622]\n", 350 | " [ 1.92810522 -4.01343886 -0.34167041]]\n", 351 | "\n", 352 | "\n", 353 | " Geometry in Angstrom\n", 354 | "[[ 2.5031284 0.01791856 0.04111922]\n", 355 | " [ 1.13018912 0.00924206 0.00470482]\n", 356 | " [ 0.42304893 1.21196481 0.05336633]\n", 357 | " [-0.96847879 1.20285303 -0.02405981]\n", 358 | " [-1.65791088 -0.00307982 -0.15216082]\n", 359 | " [-0.94764917 -1.20242451 -0.20641742]\n", 360 | " [ 0.44397394 -1.19902933 -0.12916852]\n", 361 | " [ 2.79349479 -0.04090176 0.95822843]\n", 362 | " [ 0.98321315 2.14386943 0.14211158]\n", 363 | " [-1.51750586 2.14516063 0.01269073]\n", 364 | " [-2.74679389 -0.00776161 -0.21442843]\n", 365 | " [-1.48018413 -2.14889497 -0.31283686]\n", 366 | " [ 1.02030894 -2.12381954 -0.18080412]]\n" 367 | ] 368 | } 369 | ], 370 | "source": [ 371 | "import numpy as np\n", 372 | "syms = np.array(qcschema[\"final_molecule\"][\"symbols\"])\n", 373 | "coords = np.array(qcschema[\"final_molecule\"][\"geometry\"] )\n", 374 | "NAtoms = len(syms)\n", 375 | "\n", 376 | "print(\"Geometry in Bohr\")\n", 377 | "coords_bohr = np.reshape(coords, (-1,3))\n", 378 | "print(coords_bohr)\n", 379 | "print(\"\")\n", 380 | "\n", 381 | "# Convert coordinates to Angstrom\n", 382 | "bohr_to_angstrom = 0.529177\n", 383 | "coords_angstrom = coords * bohr_to_angstrom\n", 384 | "coords_angstrom = np.reshape(coords_angstrom, (NAtoms,3))\n", 385 | "print(f'\\n Geometry in Angstrom')\n", 386 | "print(coords_angstrom)\n" 387 | ] 388 | }, 389 | { 390 | "cell_type": "markdown", 391 | "metadata": {}, 392 | "source": [ 393 | "## Output to QCSchema json file" 394 | ] 395 | }, 396 | { 397 | "cell_type": "code", 398 | "execution_count": 12, 399 | "metadata": {}, 400 | "outputs": [], 401 | "source": [ 402 | "import json\n", 403 | "n = job_name + \"_output\"\n", 404 | "with open(n+\".json\", \"w\") as fp:\n", 405 | " json.dump(qcschema, fp, indent=4)" 406 | ] 407 | } 408 | ], 409 | "metadata": { 410 | "kernel_info": { 411 | "name": "python3" 412 | }, 413 | "kernelspec": { 414 | "display_name": "testtest", 415 | "language": "python", 416 | "name": "python3" 417 | }, 418 | "language_info": { 419 | "codemirror_mode": { 420 | "name": "ipython", 421 | "version": 3 422 | }, 423 | "file_extension": ".py", 424 | "mimetype": "text/x-python", 425 | "name": "python", 426 | "nbconvert_exporter": "python", 427 | "pygments_lexer": "ipython3", 428 | "version": "3.11.10" 429 | }, 430 | "nteract": { 431 | "version": "nteract-front-end@1.0.0" 432 | } 433 | }, 434 | "nbformat": 4, 435 | "nbformat_minor": 4 436 | } 437 | -------------------------------------------------------------------------------- /samples/properties/README.md: -------------------------------------------------------------------------------- 1 | # Properties 2 | Accelerated DFT produces QCSchema formatted output. Energies (total energies, repulsions energies etc) and useful system information is output into the QCSchema json file, while orbitals (MO coefficients and basis information) are output to a QCSchema compliant hdf5 file. 3 | These, as well as the molecule information (and the hessian, if computed) can be read into PySCF, in order to use additional functionality and to compute properties, such as Mulliken population analysis, frequency calculations and thermochemistry. 4 | 5 | In order to use the scripts below, a conda environment must first be created. `conda env create --file env_properties.yaml` will create the environment 'adft_properties', which can be activated using `conda activate adft_properties`. 6 | 7 | This directory contains example scripts for loading information into PySCF and computing a range of properties. For example: 8 | 9 | 10 | [load_qcschema_result_compute_Dipole_Moment_uks.py](./load_qcschema_result_compute_Dipole_Moment_uks.py) - computes the molecular dipole moment. 11 | 12 | [load_qcschema_result_compute_Dipole_Moment_rks.py](./load_qcschema_result_compute_Dipole_Moment_rks.py) - computes the molecular dipole moment. 13 | 14 | [load_qcschema_result_compute_RESP.py](./load_qcschema_result_compute_RESP.py) - generates RESP atomic charges from the molecular electrostatic potential. 15 | 16 | [load_qcschema_result_compute_CHELPG.py](./load_qcschema_result_compute_CHELPG.py) - generates CHELPG atomic charges from the molecular electrostatic potential. 17 | 18 | [load_qcschema_result_compute_Mulliken_Population.py](./load_qcschema_result_compute_Mulliken_Population.py) - performs Mulliken population analysis. Provides partitioning of charge to atoms (and spin for open-shell systems). 19 | 20 | [load_qcschema_result_compute_Spin.py](./load_qcschema_result_compute_Spin.py) - computes the spin values and spin density (via Mulliken population analysis). 21 | 22 | [load_qcschema_result_compute_LocalizedOrbitals.py](./load_qcschema_result_compute_LocalizedOrbitals.py) - loads MOs and generates localized orbitals using several approaches. 23 | 24 | [load_qcschema_result_compute_Frequencies_and_Thermochemistry.py](./load_qcschema_result_compute_Frequencies_and_Thermochemistry.py) - computes frequencies and normal modes as well as thermochemistry info. 25 | 26 | [load_qcschema_result_compute_IR.py](./load_qcschema_result_compute_IR.py) - computes the InfraRed spectrum. 27 | 28 | [load_qcschema_result_compute_NMR.py](./load_qcschema_result_compute_NMR.py) - computes the NMR chemical shifts. 29 | 30 | [load_qcschema_result_compute_MEP.py](./load_qcschema_result_compute_MEP.py) - computes the Molecular Electrostatic Potential (MEP/MESP) 31 | 32 | [load_qcschema_result_compute_Polarizability.py](./load_qcschema_result_compute_Polarizability.py) - computes the polarizability of the molecule. 33 | 34 | [load_qcschema_result_compute_gTensor.py](./load_qcschema_result_compute_gTensor.py) - computes the g-tensor. 35 | 36 | [load_qcschema_result_compute_HFC.py](./load_qcschema_result_compute_HFC.py) - computes the hyper-fine coupling (HFC). 37 | 38 | [load_qcschema_result_compute_cubegen.py](load_qcschema_result_compute_cubegen.py) - generates cube files for density and an MO, for external viewing. 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /samples/properties/env_properties.yaml: -------------------------------------------------------------------------------- 1 | name: adft_properties 2 | channels: 3 | - pyscf 4 | - conda-forge 5 | - anaconda 6 | dependencies: 7 | - python =3.11 8 | - pyscf = 2.4 9 | - nglview =3.0.6 10 | - jupyterlab_widgets =1.1.2 11 | - jinja2 12 | - qcelemental =0.29.0 13 | - pip: 14 | - git+https://github.com/pyscf/properties@master 15 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_CHELPG.py: -------------------------------------------------------------------------------- 1 | ## Computation of CHELPG charges. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.chelpg import chelpg_charges 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute CHELPG charges from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | print("") 39 | 40 | #### Compute CHELPG charges #### 41 | # we have multiple choices for Van der Waals radii schemes and settings. 42 | # Some examples uses: 43 | 44 | # 1. simplest use, does default VDW scheme 45 | q = chelpg_charges(ks) 46 | print("method 1, charges: ") 47 | print(q) 48 | 49 | # 2. Use 'VDW' radii scheme and set grid settings 50 | options = {'VDW_SCHEME': 'VDW', 'deltaR':0.3, 'Rhead':2.8} 51 | q = chelpg_charges(ks,options) 52 | print("method 2, charges: ") 53 | print(q) 54 | 55 | # 3. Use 'UFF' radii scheme 56 | options = {'VDW_SCHEME': 'UFF'} 57 | q = chelpg_charges(ks,options) 58 | print("method 3, charges: ") 59 | print(q) 60 | 61 | # 4. user defined radii scheme (a dictionary) 62 | RVDW_bondi = {1: 1.1, 2: 1.40, 63 | 3: 1.82, 6: 1.70, 7: 1.55, 8: 1.52, 9: 1.47, 10: 1.54, 64 | 11: 2.27, 12: 1.73, 14: 2.10, 15: 1.80, 16: 1.80, 17: 1.75, 18: 1.88, 65 | 19: 2.75, 35: 1.85} 66 | options = {'VDW_RADII': RVDW_bondi} 67 | q = chelpg_charges(ks,options) 68 | print("method 4, charges: ") 69 | print(q) 70 | ############# 71 | 72 | #### compare to Mulliken charges #### 73 | # First compute density matrix 74 | mo_occ = ks.mo_occ 75 | mo_coeff = ks.mo_coeff 76 | dm = ks.make_rdm1(mo_coeff, mo_occ) 77 | # add verbose=0 to turn off printing of pop stuff 78 | mpop = pyscf.scf.hf.mulliken_pop(mol, dm, s=None,verbose=0)[-1] 79 | print("Mulliken Population charges:",mpop) 80 | print("") 81 | ############# 82 | 83 | 84 | 85 | if __name__ == "__main__": 86 | main() 87 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Dipole_Moment_rks.py: -------------------------------------------------------------------------------- 1 | ## Computation of Dipole Moment. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.libqcschema import * 12 | from tools.wavefunction_hdf5_to_qcschema import * 13 | import argparse 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Compute dipole moment (RKS) from QCSchema and wavefunction files.") 17 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 18 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 19 | args = parser.parse_args() 20 | 21 | qcschema_json = args.qcschema_json 22 | qcwavefunction_h5 = args.qcwavefunction_h5 23 | 24 | # Load Accelerated DFT output json 25 | qcschema_dict = load_qcschema_json(qcschema_json) 26 | 27 | # Load wavefunction from hdf5 28 | qcwavefunction = {} 29 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 30 | 31 | # add wfn info to the total qcschema output 32 | qcschema_dict.update(qcwavefunction) 33 | 34 | # Create DFT object 35 | mol, ks = recreate_scf_obj(qcschema_dict) 36 | 37 | #### Compute Molecular Dipole Moment #### 38 | # load density matrix 39 | dm = qcwavefunction['wavefunction']['scf_density_a'] 40 | # OR compute density matrix from MOs 41 | #mo_occ = ks.mo_occ 42 | #mo_coeff = ks.mo_coeff 43 | #dm = ks.make_rdm1(mo_coeff, mo_occ) 44 | # compute dipole 45 | DipMom = ks.dip_moment(ks.mol, dm, unit='Debye', verbose=3) 46 | # note: instead one can also use using single 'analyze' will compute dipole moment and mulliken population 47 | #an = pyscf.scf.hf.analyze(ks) 48 | ############# 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Dipole_Moment_uks.py: -------------------------------------------------------------------------------- 1 | ## Computation of Dipole Moment. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.libqcschema import * 12 | from tools.wavefunction_hdf5_to_qcschema import * 13 | import argparse 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Compute dipole moment (UKS) from QCSchema and wavefunction files.") 17 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 18 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 19 | args = parser.parse_args() 20 | 21 | qcschema_json = args.qcschema_json 22 | qcwavefunction_h5 = args.qcwavefunction_h5 23 | 24 | # Load Accelerated DFT output json 25 | qcschema_dict = load_qcschema_json(qcschema_json) 26 | 27 | # Load wavefunction from hdf5 28 | qcwavefunction = {} 29 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 30 | 31 | # add wfn info to the total qcschema output 32 | qcschema_dict.update(qcwavefunction) 33 | 34 | # Create DFT object 35 | mol, ks = recreate_scf_obj(qcschema_dict) 36 | 37 | #### Compute Molecular Dipole Moment #### 38 | # load density matrix 39 | alpha_dm = qcwavefunction['wavefunction']['scf_density_a'] 40 | beta_dm = qcwavefunction['wavefunction']['scf_density_b'] 41 | # Combine them into the UKS density matrix with shape (2, n, n) 42 | dm = np.array([alpha_dm, beta_dm]) 43 | # OR compute density matrix from MOs 44 | #mo_occ = ks.mo_occ 45 | #mo_coeff = ks.mo_coeff 46 | #dm = ks.make_rdm1(mo_coeff, mo_occ) 47 | # compute dipole 48 | DipMom = ks.dip_moment(ks.mol, dm, unit='Debye', verbose=3) 49 | # note: instead one can also use using single 'analyze' will compute dipole moment and mulliken population 50 | #an = pyscf.scf.hf.analyze(ks) 51 | ############# 52 | 53 | if __name__ == "__main__": 54 | main() 55 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Frequencies_and_Thermochemistry.py: -------------------------------------------------------------------------------- 1 | ## Computation of Vibrational Frequencies 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task must be 'hessian', 5 | ## with the wavefunction output e.g. "write_wavefunction": "last" 6 | import pyscf 7 | from pyscf import gto, dft 8 | from pyscf import hessian 9 | from pyscf.hessian.thermo import * 10 | import json 11 | import numpy as np 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute frequencies and thermochemistry from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Load Accelerated DFT qschema_output json and Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | # Load Hessian from QcSchema output 39 | h = load_qcschema_hessian(qcschema_dict) 40 | 41 | # Compute Vibrational Frequencies 42 | freq = harmonic_analysis(mol,h) 43 | dump_normal_mode(mol,freq) 44 | 45 | # Compute Thermochemistry 46 | thermochem = thermo(ks,freq['freq_au'], 298.15) 47 | print("Thermochem:",thermochem) 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_HFC.py: -------------------------------------------------------------------------------- 1 | ## Computation of Hyper-Fine Coupling. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from tools.libqcschema import * 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute hyperfine coupling from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | #### HFC #### 39 | from pyscf.prop import hfc 40 | # first populate Grid that scf would normally populate 41 | ks.grids = dft.gen_grid.Grids(mol) 42 | ks.grids.build(with_non0tab=True) 43 | print("*** G-Tensor Ouput ***") 44 | # PySCF requires use of uhf not uks 45 | gobj = hfc.uhf.HFC(ks).set(verbose=4) 46 | 47 | # 2-electron SOC for para-magnetic term. 48 | # For settings see: https://github.com/pyscf/properties/blob/master/examples/02-g_tensor.py 49 | gobj.para_soc2e = 'SSO+SOO' 50 | 51 | # Disable Koseki effective SOC charge 52 | gobj.so_eff_charge = False 53 | 54 | gobj.kernel() 55 | ############# 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_IR.py: -------------------------------------------------------------------------------- 1 | ## Computation of Vibrational Frequencies and IR Spectrum. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task must be hessian, 5 | ## with the wavefunction output e.g. "write_wavefunction": "last" 6 | import pyscf 7 | from pyscf import gto, dft 8 | from pyscf import hessian 9 | from pyscf.hessian.thermo import * 10 | from pyscf.prop import infrared 11 | import json 12 | import numpy as np 13 | from tools.libqcschema import * 14 | from tools.wavefunction_hdf5_to_qcschema import * 15 | import argparse 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser(description="Compute IR spectrum from QCSchema and wavefunction files.") 19 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 20 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 21 | args = parser.parse_args() 22 | 23 | qcschema_json = args.qcschema_json 24 | qcwavefunction_h5 = args.qcwavefunction_h5 25 | 26 | # Load Accelerated DFT output json 27 | qcschema_dict = load_qcschema_json(qcschema_json) 28 | 29 | # Load wavefunction from hdf5 30 | qcwavefunction = {} 31 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 32 | 33 | # add wfn info to the total qcschema output 34 | qcschema_dict.update(qcwavefunction) 35 | 36 | # Load Accelerated DFT qschema_output json and Create DFT object 37 | mol, ks = recreate_scf_obj(qcschema_dict) 38 | 39 | # Form Hessian object and Load Hessian from QCSchema json 40 | hessian = ks.Hessian() 41 | hessian.de = load_qcschema_hessian(qcschema_dict) 42 | print("ks.hessian.de",hessian.de) 43 | 44 | # Compute Vibrational Frequencies 45 | freq = harmonic_analysis(mol,hessian.de) 46 | dump_normal_mode(mol,freq) 47 | 48 | # Compute Thermochemistry if desired 49 | #thermochem = thermo(ks,freq['freq_au'], 298.15) 50 | #print("Thermochem:",thermochem) 51 | 52 | ############################### 53 | ##### Compute IR Spectrum ##### 54 | ############################### 55 | # make IR object and populate with info 56 | ks_ir = prepare_ir(ks,hessian,freq) 57 | 58 | # compute IR intensities 59 | infrared.rhf.kernel_dipderiv(ks_ir) 60 | ir_intensity = infrared.rhf.kernel_ir(ks_ir) 61 | # summarise 62 | ks_ir.summary() 63 | # plot - uncomment to show 64 | #fig = ks_ir.plot_ir()[0] 65 | #fig.show() 66 | #fig.savefig("ir_spectrum_C2H4.png") 67 | ############################### 68 | 69 | if __name__ == "__main__": 70 | main() 71 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_LocalizedOrbitals.py: -------------------------------------------------------------------------------- 1 | ## Generation fo Localized orbitals 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.libqcschema import * 12 | from tools.wavefunction_hdf5_to_qcschema import * 13 | import argparse 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Compute localized orbitals from QCSchema and wavefunction files.") 17 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 18 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 19 | args = parser.parse_args() 20 | 21 | qcschema_json = args.qcschema_json 22 | qcwavefunction_h5 = args.qcwavefunction_h5 23 | 24 | # Load Accelerated DFT output json 25 | qcschema_dict = load_qcschema_json(qcschema_json) 26 | 27 | # Load wavefunction from hdf5 28 | qcwavefunction = {} 29 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 30 | 31 | # add wfn info to the total qcschema output 32 | qcschema_dict.update(qcwavefunction) 33 | 34 | # Create DFT object 35 | mol, ks = recreate_scf_obj(qcschema_dict) 36 | 37 | #### Localize Orbitals #### 38 | from pyscf import lo 39 | from pyscf.tools import molden 40 | # Localize occupied orbitals only 41 | select_mo = ks.mo_occ>0 42 | # Localize ALL orbitals (note the significant number of virtual orbitals) 43 | #select_mo = len(ks.mo_occ) 44 | 45 | # Examples of 3 Different Localization methods 46 | # 1. Boys Localization 47 | loc_orb = lo.Boys(mol, ks.mo_coeff[:,select_mo]).kernel() 48 | # save orbitals in molden format 49 | #molden.from_mo(mol, 'boys.molden', loc_orb) 50 | 51 | # 2. Edmiston-Ruedenberg localization 52 | loc_orb = lo.ER(mol, ks.mo_coeff[:,select_mo]).kernel() 53 | # save orbitals in molden format 54 | #molden.from_mo(mol, 'edmiston.molden', loc_orb) 55 | 56 | # 3. Pipek-Mezey localization 57 | loc_orb = lo.PM(mol, ks.mo_coeff[:,select_mo]).kernel() 58 | # save orbitals in molden format 59 | #molden.from_mo(mol, 'pm.molden', loc_orb) 60 | 61 | # There are other localization options in PySCF! 62 | # see https://pyscf.org/pyscf_api_docs/pyscf.lo.html 63 | ############# 64 | 65 | if __name__ == "__main__": 66 | main() 67 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_MEP.py: -------------------------------------------------------------------------------- 1 | ## Computation of Molecular Electrostatic Potential (MEP). 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, 5 | ## geometry optimization or hessian, 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.libqcschema import * 12 | from tools.wavefunction_hdf5_to_qcschema import * 13 | import argparse 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Compute MEP from QCSchema and wavefunction files.") 17 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 18 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 19 | args = parser.parse_args() 20 | 21 | qcschema_json = args.qcschema_json 22 | qcwavefunction_h5 = args.qcwavefunction_h5 23 | 24 | # Load Accelerated DFT output json 25 | qcschema_dict = load_qcschema_json(qcschema_json) 26 | 27 | # Load wavefunction from hdf5 28 | qcwavefunction = {} 29 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 30 | 31 | # add wfn info to the total qcschema output 32 | qcschema_dict.update(qcwavefunction) 33 | 34 | # Load Accelerated DFT qschema_output json and Create DFT object 35 | mol, ks = recreate_scf_obj(qcschema_dict) 36 | 37 | #### Molecular Electrostatic Potential MEP/MESP #### 38 | from pyscf.tools import cubegen 39 | cubegen.mep(mol, 'C2H4_pot.cube', ks.make_rdm1()) 40 | ############# 41 | 42 | if __name__ == "__main__": 43 | main() 44 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Mulliken_Population.py: -------------------------------------------------------------------------------- 1 | ## Computation of Mulliken Population Analaysis. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.resp import resp 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute Mulliken population analysis from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | # Perform SCF - comment out because we are loading result 39 | #ks.kernel() 40 | 41 | #### Compute Mulliken Population Analysis #### 42 | # multiple ways to do this, simplest is 'analyze' call that does Mulliken pop on meta-lowdin orthogonal AOs 43 | an = pyscf.scf.hf.analyze(ks) 44 | ############# 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_NMR.py: -------------------------------------------------------------------------------- 1 | ## Compute NMR shift. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from tools.libqcschema import * 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | import tools.resp 13 | from tools.resp import resp 14 | from tools.libqcschema import * 15 | from tools.wavefunction_hdf5_to_qcschema import * 16 | import argparse 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser(description="Compute NMR shifts from QCSchema and wavefunction files.") 20 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 21 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 22 | args = parser.parse_args() 23 | 24 | qcschema_json = args.qcschema_json 25 | qcwavefunction_h5 = args.qcwavefunction_h5 26 | 27 | # Load Accelerated DFT output json 28 | qcschema_dict = load_qcschema_json(qcschema_json) 29 | 30 | # Load wavefunction from hdf5 31 | qcwavefunction = {} 32 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 33 | 34 | # add wfn info to the total qcschema output 35 | qcschema_dict.update(qcwavefunction) 36 | 37 | # Create DFT object 38 | mol, ks = recreate_scf_obj(qcschema_dict) 39 | 40 | #### NMR #### 41 | # first populate things that scf will normally populate 42 | ks.grids = dft.gen_grid.Grids(mol) 43 | ks.grids.build(with_non0tab=True) 44 | # now compute NMR chemical shifts 45 | print("*** NMR Ouput ***") 46 | from pyscf.prop import nmr 47 | nmr.RKS(ks).kernel() 48 | ############# 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Polarizability.py: -------------------------------------------------------------------------------- 1 | ## Computation of Polarizability. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf.prop import polarizability 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute polarizability from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | ### Compute Polarizability ### 39 | print(polarizability.rks.Polarizability(ks).polarizability()) 40 | 41 | if __name__ == "__main__": 42 | main() 43 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_RESP.py: -------------------------------------------------------------------------------- 1 | ## Computation of RESP Charges. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from tools.libqcschema import * 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | import tools.resp 13 | from tools.resp import resp 14 | from tools.libqcschema import * 15 | from tools.wavefunction_hdf5_to_qcschema import * 16 | import argparse 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser(description="Compute RESP charges from QCSchema and wavefunction files.") 20 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 21 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 22 | args = parser.parse_args() 23 | 24 | qcschema_json = args.qcschema_json 25 | qcwavefunction_h5 = args.qcwavefunction_h5 26 | 27 | # Load Accelerated DFT output json 28 | qcschema_dict = load_qcschema_json(qcschema_json) 29 | 30 | # Load wavefunction from hdf5 31 | qcwavefunction = {} 32 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 33 | 34 | # add wfn info to the total qcschema output 35 | qcschema_dict.update(qcwavefunction) 36 | 37 | # Create DFT object 38 | mol, ks = recreate_scf_obj(qcschema_dict) 39 | 40 | #### Compute RESP Charges #### 41 | # Assigns charge to each atom by fitting the electrostatic potential evaulated on a grid. 42 | # Function returns a list of charges. 43 | # The sum of the charges is constrained to the molecular charge given. 44 | # Charges are restrained using a penlaty function so they don't grow too large . 45 | # By default, Hydrogen is the only atom unrestrained (i.e. it is FREE), but this can be changed by setting 'IHFREE': False. 46 | # Below are 4 separate examples of computing RESP charges. 47 | 48 | # 1. simplest case, default options. 49 | print("") 50 | print('1. partial charge with RESP') 51 | options = {} 52 | q = resp(ks,options) 53 | print(q) # default 54 | 55 | # 2. Adding options. Set VDW radii scheme. and save the results to file. 56 | # Note: generates new files. 57 | # Also turn off printing 58 | print("") 59 | print('2. partial charge with RESP') 60 | options = {'VDW_SCHEME': 'VDW_mod', 'SAVE': True} 61 | q = resp(ks,options) 62 | print(q) 63 | 64 | # 3. Hydrogens charge is not restricted by default, restricting here. 65 | # Also turning off printing in resp code and print here instead 66 | print("") 67 | print('3. partial charge with RESP') 68 | options = {'VDW_SCHEME': 'VDW', 'IHFREE': False, 'PRINTQ': False} 69 | q = resp(ks,options) 70 | print(q) 71 | print("") 72 | ############# 73 | 74 | 75 | if __name__ == "__main__": 76 | main() 77 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_Spin.py: -------------------------------------------------------------------------------- 1 | ## Computation of RESP Charges. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from pyscf import gto, dft 9 | import json 10 | import numpy as np 11 | from tools.libqcschema import * 12 | from tools.wavefunction_hdf5_to_qcschema import * 13 | import argparse 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Compute spin properties from QCSchema and wavefunction files.") 17 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 18 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 19 | args = parser.parse_args() 20 | 21 | qcschema_json = args.qcschema_json 22 | qcwavefunction_h5 = args.qcwavefunction_h5 23 | 24 | # Load Accelerated DFT output json 25 | qcschema_dict = load_qcschema_json(qcschema_json) 26 | 27 | # Load wavefunction from hdf5 28 | qcwavefunction = {} 29 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 30 | 31 | # add wfn info to the total qcschema output 32 | qcschema_dict.update(qcwavefunction) 33 | 34 | # Create DFT object 35 | mol, ks = recreate_scf_obj(qcschema_dict) 36 | 37 | # Compute Milliken charges, spin density and Dipole moment 38 | # note: need uhf or hf in analyze call 39 | an = pyscf.scf.uhf.analyze(ks) 40 | 41 | # Compute < \hat{S}^2 > amd 2S+1 using occupied MOs 42 | # note hf or uhf in call 43 | mo = (ks.mo_coeff[0][:,ks.mo_occ[0]>0], ks.mo_coeff[1][:,ks.mo_occ[1]>0]) 44 | print('S^2 = %.7f, 2S+1 = %.7f' % pyscf.scf.uhf.spin_square(mo, mol.intor('int1e_ovlp_sph'))) 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_cubegen.py: -------------------------------------------------------------------------------- 1 | ## Generation of Cube file. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from tools.libqcschema import * 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | from tools.libqcschema import * 13 | from tools.wavefunction_hdf5_to_qcschema import * 14 | import argparse 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser(description="Compute cube file from QCSchema and wavefunction files.") 18 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 19 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 20 | args = parser.parse_args() 21 | 22 | qcschema_json = args.qcschema_json 23 | qcwavefunction_h5 = args.qcwavefunction_h5 24 | 25 | # Load Accelerated DFT output json 26 | qcschema_dict = load_qcschema_json(qcschema_json) 27 | 28 | # Load wavefunction from hdf5 29 | qcwavefunction = {} 30 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 31 | 32 | # add wfn info to the total qcschema output 33 | qcschema_dict.update(qcwavefunction) 34 | 35 | # Create DFT object 36 | mol, ks = recreate_scf_obj(qcschema_dict) 37 | 38 | #### Molecular Electrostatic Potential MEP/MESP #### 39 | from pyscf.tools import cubegen 40 | cubegen.mep(mol, 'C2H4_pot.cube', ks.make_rdm1()) 41 | ############# 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /samples/properties/load_qcschema_result_compute_gTensor.py: -------------------------------------------------------------------------------- 1 | ## Compute g-Tensor. 2 | ## Loads QCSchema format json and hdf5 result from Accelerated DFT 3 | ## and computes dipole moment. 4 | ## Accelerated DFT task can be energy, gradient, hessian, 5 | ## or geometry optimization 6 | ## with the wavefunction output e.g. "write_wavefunction": "last" 7 | import pyscf 8 | from tools.libqcschema import * 9 | from pyscf import gto, dft 10 | import json 11 | import numpy as np 12 | import tools.resp 13 | from tools.resp import resp 14 | from tools.libqcschema import * 15 | from tools.wavefunction_hdf5_to_qcschema import * 16 | import argparse 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser(description="Compute g-tensor from QCSchema and wavefunction files.") 20 | parser.add_argument("qcschema_json", help="Path to the QCSchema JSON file.") 21 | parser.add_argument("qcwavefunction_h5", help="Path to the wavefunction HDF5 file.") 22 | args = parser.parse_args() 23 | 24 | qcschema_json = args.qcschema_json 25 | qcwavefunction_h5 = args.qcwavefunction_h5 26 | 27 | # Load Accelerated DFT output json 28 | qcschema_dict = load_qcschema_json(qcschema_json) 29 | 30 | # Load wavefunction from hdf5 31 | qcwavefunction = {} 32 | qcwavefunction['wavefunction'] = read_hdf5_wavefunction(qcwavefunction_h5) 33 | 34 | # add wfn info to the total qcschema output 35 | qcschema_dict.update(qcwavefunction) 36 | 37 | # Create DFT object 38 | mol, ks = recreate_scf_obj(qcschema_dict) 39 | 40 | #### G-Tensor #### 41 | # first populate Grid that scf would normally populate 42 | ks.grids = dft.gen_grid.Grids(mol) 43 | ks.grids.build(with_non0tab=True) 44 | print("*** G-Tensor Ouput ***") 45 | from pyscf.prop import gtensor 46 | # create object 47 | gobj = gtensor.uks.GTensor(ks).set(verbose=4) 48 | # settings may be manipulated, for details see 49 | # https://github.com/pyscf/properties/blob/master/examples/02-g_tensor.py 50 | gobj.kernel() 51 | ############# 52 | 53 | if __name__ == "__main__": 54 | main() 55 | -------------------------------------------------------------------------------- /samples/properties/tools/chelpg.py: -------------------------------------------------------------------------------- 1 | ## CHarges from ELectrostatic Potentials using a Grid-based method ## 2 | ## CHELPG charges: 3 | ## Breneman and Wiberg (J. Comp. Chem. 1990, 11, 361) 4 | ## Defaults for grid spacing and size are taken from this paper. 5 | ## For options see function definition below 6 | # 7 | ## Description: compute ESP on grid points around the molecule. 8 | # grid points are arranged in a cuboid, 9 | # with any points falling inside the VDW radii being removed. 10 | # Hence VWD defs are important. 11 | # 12 | import pyscf 13 | import numpy as np 14 | import scipy 15 | import ctypes 16 | from pyscf import lib, gto 17 | from pyscf.scf import _vhf 18 | from pyscf.lib.parameters import BOHR 19 | from pyscf import data 20 | 21 | def chelpg_charges(mf, options=None): 22 | """Calculate chelpg charges 23 | 24 | Args: 25 | mf: mean field object in pyscf 26 | options (optional): 27 | 28 | deltaR (float, optional): the interval in the cube. Defaults to 0.3. 29 | Rhead (float, optional): the head length of the cube. Defaults to 2.8. 30 | ifqchem (bool, optional): whether to use the modification in qchem. Defaults to True. 31 | VDW_SCHEME(str,optional): what VDW radii scheme to use. 32 | For details see https://github.com/pyscf/pyscf/blob/master/pyscf/data/radii.py 33 | Choices: VDW - vdw from ASE a.k.a. Bondi 34 | VDW_mod - (default) VDW but with H modified to 1.1A. (recommended) 35 | UFF - Universal Force Field 36 | MM3 - Allinger's MM3 37 | BRAGG - # JCP 41, 3199 (1964) 38 | COVALENT - covalent radius 39 | VDW_RADII(dict,optional): dictionary of user defined vdw radii to use, in Angstrom 40 | printq (bool, optional): whether to print charges and scheme at end of this function. Default=True 41 | Returns: 42 | numpy.array: charges 43 | """ 44 | # # Check input options: 45 | if options is None: 46 | options = {} 47 | #print(options) 48 | 49 | # check print 50 | if 'printq' not in options: 51 | options['printq'] = False 52 | 53 | # VDW surface options 54 | if 'deltaR' not in options: 55 | deltaR = 0.3 56 | else: 57 | deltaR = options['deltaR'] 58 | 59 | if 'Rhead' not in options: 60 | Rhead = 2.8 61 | else: 62 | Rhead = options['Rhead'] 63 | 64 | if 'ifqchem' not in options: 65 | ifqchem = True 66 | else: 67 | Rhead = options['ifqchem'] 68 | 69 | radii = {} 70 | if 'VDW_RADII' in options: 71 | options['VDW_SCHEME'] = 'USER' 72 | # Use PySCF built-in VDW schemes from pyscf.data.radii 73 | if 'VDW_SCHEME' not in options: 74 | options['VDW_SCHEME'] = 'VDW_mod' 75 | 76 | if( options['VDW_SCHEME'] == 'VDW_mod' ): 77 | # modified Bondi 78 | vdw_array = data.radii.VDW 79 | vdw_array[1] = 1.1/BOHR 80 | if( options['VDW_SCHEME'] == 'VDW' ): 81 | vdw_array = data.radii.VDW 82 | if( options['VDW_SCHEME'] == 'UFF' ): 83 | vdw_array = data.radii.UFF 84 | if( options['VDW_SCHEME'] == 'MM3' ): 85 | vdw_array = data.radii.MM3 86 | if( options['VDW_SCHEME'] == 'BRAGG' ): 87 | vdw_array = data.radii.BRAGG 88 | if( options['VDW_SCHEME'] == 'COVALENT' ): 89 | vdw_array = data.radii.COVALENT 90 | 91 | if( options['VDW_SCHEME'] != 'USER' ): 92 | # in all pyscf data element 0 is not useful. H is at 1 93 | vdw_array = vdw_array[1:] 94 | # convert to Dict 95 | radii = dict(enumerate(vdw_array.flatten(), 1)) 96 | 97 | # or a user defined scheme? 98 | if options['VDW_SCHEME'] == 'USER': 99 | radii = options['VDW_RADII'] 100 | # convert to Bohr 101 | for key in radii: 102 | radii[key] /= BOHR 103 | 104 | ## 105 | 106 | # define extra params 107 | Roff = Rhead/BOHR 108 | Deltar = 0.1 109 | 110 | # smoothing function 111 | def tau_f(R, Rcut, Roff): 112 | return (R - Rcut)**2 * (3*Roff - Rcut - 2*R) / (Roff - Rcut)**3 113 | 114 | #### Check the atoms have a defined VDW in the scheme 115 | elements = np.array(mf.mol._atm[:, 0]) 116 | for i in elements: 117 | if i not in radii.keys() or radii[i] == 0.0: 118 | raise KeyError('%s is not a supported element; ' %i 119 | + 'use the "VDW_RADII" option to add ' 120 | + 'its van der Waals radius.') 121 | #### atom check complete 122 | 123 | atomcoords = mf.mol.atom_coords(unit='B') 124 | dm = np.array(mf.make_rdm1()) 125 | 126 | Rshort = np.array([radii[iatom] for iatom in mf.mol._atm[:, 0]]) 127 | idxxmin = np.argmin(atomcoords[:, 0] - Rshort) 128 | idxxmax = np.argmax(atomcoords[:, 0] + Rshort) 129 | idxymin = np.argmin(atomcoords[:, 1] - Rshort) 130 | idxymax = np.argmax(atomcoords[:, 1] + Rshort) 131 | idxzmin = np.argmin(atomcoords[:, 2] - Rshort) 132 | idxzmax = np.argmax(atomcoords[:, 2] + Rshort) 133 | atomtypes = np.array(mf.mol._atm[:, 0]) 134 | # Generate the grids in the cube 135 | xmin = atomcoords[:, 0].min() - Rhead/BOHR - radii[atomtypes[idxxmin]] 136 | xmax = atomcoords[:, 0].max() + Rhead/BOHR + radii[atomtypes[idxxmax]] 137 | ymin = atomcoords[:, 1].min() - Rhead/BOHR - radii[atomtypes[idxymin]] 138 | ymax = atomcoords[:, 1].max() + Rhead/BOHR + radii[atomtypes[idxymax]] 139 | zmin = atomcoords[:, 2].min() - Rhead/BOHR - radii[atomtypes[idxzmin]] 140 | zmax = atomcoords[:, 2].max() + Rhead/BOHR + radii[atomtypes[idxzmax]] 141 | x = np.arange(xmin, xmax, deltaR/BOHR) 142 | y = np.arange(ymin, ymax, deltaR/BOHR) 143 | z = np.arange(zmin, zmax, deltaR/BOHR) 144 | gridcoords = np.meshgrid(x, y, z) 145 | gridcoords = np.vstack(list(map(np.ravel, gridcoords))).T 146 | 147 | # [natom, ngrids] distance between an atom and a grid 148 | r_pX = scipy.spatial.distance.cdist(atomcoords, gridcoords) 149 | # delete the grids in the vdw surface and out the Rhead surface. 150 | # the minimum distance to any atom 151 | Rkmin = (r_pX - np.expand_dims(Rshort, axis=1)).min(axis=0) 152 | Ron = Rshort + Deltar 153 | Rlong = Roff - Deltar 154 | AJk = np.ones(r_pX.shape) # the short-range weight 155 | idx = r_pX < np.expand_dims(Rshort, axis=1) 156 | AJk[idx] = 0 157 | if ifqchem: 158 | idx2 = (r_pX < np.expand_dims(Ron, axis=1)) * \ 159 | (r_pX >= np.expand_dims(Rshort, axis=1)) 160 | AJk[idx2] = tau_f(r_pX, np.expand_dims(Rshort, axis=1), 161 | np.expand_dims(Ron, axis=1))[idx2] 162 | wLR = 1 - tau_f(Rkmin, Rlong, Roff) # the long-range weight 163 | idx1 = Rkmin < Rlong 164 | idx2 = Rkmin > Roff 165 | wLR[idx1] = 1 166 | wLR[idx2] = 0 167 | else: 168 | wLR = np.ones(r_pX.shape[-1]) # the long-range weight 169 | idx = Rkmin > Roff 170 | wLR[idx] = 0 171 | w = wLR*np.prod(AJk, axis=0) # weight for a specific point 172 | idx = w <= 1.0E-14 173 | w = np.delete(w, idx) 174 | r_pX = np.delete(r_pX, idx, axis=1) 175 | gridcoords = np.delete(gridcoords, idx, axis=0) 176 | 177 | ngrids = gridcoords.shape[0] 178 | r_pX = np.array(r_pX) 179 | r_pX_potential = 1/r_pX 180 | # nuclear part of electrostatic potential (ESP) 181 | potential_real = np.dot(np.array( 182 | mf.mol.atom_charges()), r_pX_potential) 183 | 184 | # add in the electronic part of ESP..need to do in batches if large 185 | # amount of memory required: ngrdis*(NBasis*NBasis)*8, divide by (1024**3) for GB 186 | ## non-batched: 187 | ## Vele = np.einsum('pij,ij->p', mf.mol.intor('int1e_grids', grids=gridcoords), mf.make_rdm1()) 188 | ## potential_real -= Vele 189 | ## batched: 190 | try: 191 | ### define batch size based on available memory - if psutil installed 192 | import psutil 193 | NBasis = int(mf.mol.nao_nr()) 194 | mem_avail = psutil.virtual_memory()[1] 195 | grid_avail = int(mem_avail/(8*(NBasis)*(NBasis))) 196 | # batch size, use 90% of available memory 197 | nbatch = min( int( grid_avail*0.90) , ngrids) 198 | except: 199 | # hard coded batch size - may fail, just adjust nbatch 200 | nbatch = 128*128 201 | 202 | # need density matrix 203 | dm = mf.make_rdm1() 204 | 205 | try: 206 | for ibatch in range(0, ngrids, nbatch): 207 | max_grid = min(ibatch+nbatch, ngrids) 208 | num_grids = max_grid - ibatch 209 | grid_bit = gridcoords[ibatch:max_grid] 210 | potential_real[ibatch:max_grid] -= np.einsum('pij,ij->p', mf.mol.intor('int1e_grids', grids=grid_bit), dm) 211 | except: 212 | print("Out of memory in func chelpg_charges. Make batch size (nbatch) smaller") 213 | return() 214 | 215 | w = np.array(w) 216 | r_pX_potential_omega = r_pX_potential*w 217 | GXA = r_pX_potential_omega@r_pX_potential.T 218 | eX = r_pX_potential_omega@potential_real 219 | GXA_inv = np.linalg.inv(GXA) 220 | g = GXA_inv@eX 221 | alpha = (g.sum() - mf.mol.charge)/(GXA_inv.sum()) 222 | q = g - alpha*GXA_inv@np.ones((mf.mol.natm)) 223 | 224 | 225 | # print output 226 | if(options['printq']): 227 | print('VDW SCHEME', options['VDW_SCHEME']) 228 | print("charges:", q) 229 | 230 | return q 231 | 232 | -------------------------------------------------------------------------------- /samples/properties/tools/espfit.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fitting procedure for RESP charges. 3 | 4 | Reference: 5 | Equations taken from Bayly, J.Phys.Chem,97,10271 (1993) 6 | """ 7 | # Adapted and Extended from Psi4NumPy by Microsoft under BSD-3. 8 | # Original credit: 9 | #__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers" 10 | #__license__ = "BSD-3-Clause" 11 | # 12 | import copy 13 | import warnings 14 | import numpy as np 15 | 16 | 17 | def esp_solve(A, B): 18 | """Solves for point charges: A*q = B 19 | 20 | Parameters 21 | ---------- 22 | A : ndarray 23 | array of matrix A 24 | B : ndarray 25 | array of matrix B 26 | 27 | Return 28 | ------ 29 | q : ndarray 30 | array of charges 31 | 32 | """ 33 | 34 | q = np.linalg.solve(A, B) 35 | # Warning for near singular matrix 36 | # in case np.linalg.solve does not detect singularity 37 | if np.linalg.cond(A) > 1/np.finfo(A.dtype).eps: 38 | warnings.warn("Possible fit problem; singular matrix") 39 | 40 | return q 41 | 42 | 43 | def restraint(q, A_unrestrained, resp_a, resp_b, ihfree, symbols): 44 | """Adds hyperbolic restraint to matrix A 45 | 46 | Parameters 47 | ---------- 48 | q : ndarray 49 | array of charges 50 | A_unrestrained : ndarray 51 | array of unrestrained A matrix 52 | resp_a : float 53 | restraint scale a 54 | resp_b : float 55 | restraint parabola tightness b 56 | ihfree : bool 57 | whether hydrogens are excluded or included in restraint 58 | symbols : ndarray 59 | array of element symbols 60 | 61 | Returns 62 | ------- 63 | a : ndarray 64 | restrained A array 65 | """ 66 | 67 | # hyperbolic Restraint 68 | # Bayly, J.Phys.Chem,97,10271 (1993) (Eqs. 10,13) 69 | A = copy.deepcopy(A_unrestrained) 70 | for i in range(len(symbols)): 71 | # if an element is not hydrogen or if hydrogens are to be restrained 72 | if not ihfree or symbols[i] != '1': 73 | A[i, i] = A_unrestrained[i, i] + resp_a/np.sqrt(q[i]**2 + resp_b**2) 74 | 75 | return A 76 | 77 | 78 | def iterate(q, A_unrestrained, B, resp_a, resp_b, ihfree, symbols, toler, maxit): 79 | """Iterates the RESP fitting procedure 80 | 81 | Parameters 82 | ---------- 83 | q : ndarray 84 | array of initial charges 85 | A_unrestrained : ndarray 86 | array of unrestrained A matrix 87 | B : ndarray 88 | array of matrix B 89 | resp_a : float 90 | restraint scale a 91 | resp_b : float 92 | restraint parabola tightness b 93 | ihfree : bool 94 | whether hydrogens are excluded or included in restraint 95 | symbols : ndarray 96 | array of element symbols 97 | toler : float 98 | tolerance for charges in the fitting 99 | maxit : int 100 | maximum number of iterations 101 | 102 | Returns 103 | ------- 104 | q : ndarray 105 | array of the fitted charges 106 | 107 | """ 108 | q_last = copy.deepcopy(q) 109 | niter, dif, note = 0, 2*toler, '' 110 | while dif > toler and niter < maxit: 111 | niter += 1 112 | A = restraint(q, A_unrestrained, resp_a, resp_b, ihfree, symbols) 113 | q = esp_solve(A, B) 114 | # Extract vector elements that correspond to charges 115 | dif = np.sqrt(np.max((q[:len(symbols)] - q_last[:len(symbols)])**2)) 116 | q_last = copy.deepcopy(q) 117 | 118 | if dif > toler: 119 | note += ('\nCharge fitting did not converge; ' + 120 | 'try increasing the maximum number of iterations to ' + 121 | '> %i.' %maxit) 122 | return q[:len(symbols)], note 123 | 124 | 125 | def intramolecular_constraints(constraint_charge, constraint_groups): 126 | """Extracts intramolecular constraints from user constraint input 127 | 128 | Parameters 129 | ---------- 130 | constraint_charge : list 131 | list of lists of charges and atom indices list 132 | e.g. [[0, [1, 2]], [1, [3, 4]]] 133 | The sum of charges on 1 and 2 will equal 0 134 | The sum of charges on 3 and 4 will equal 1 135 | constraint_group : list 136 | list of lists of indices of atoms to have equal charge 137 | e.g. [[1, 2], [3, 4]] 138 | atoms 1 and 2 will have equal charge 139 | atoms 3 and 4 will have equal charge 140 | 141 | Returns 142 | ------- 143 | constrained_charges : list 144 | list of fixed charges 145 | constrained_indices : list 146 | list of lists of indices of atoms in a constraint 147 | negative number before an index means 148 | the charge of that atom will be subtracted. 149 | 150 | Notes 151 | ----- 152 | Atom indices starts with 1 not 0. 153 | Total charge constraint is added by default for the first molecule. 154 | 155 | """ 156 | constrained_charges = [] 157 | constrained_indices = [] 158 | for i in constraint_charge: 159 | constrained_charges.append(i[0]) 160 | group = [] 161 | for k in i[1]: 162 | group.append(k) 163 | constrained_indices.append(group) 164 | 165 | for i in constraint_groups: 166 | for j in range(1, len(i)): 167 | group = [] 168 | constrained_charges.append(0) 169 | group.append(-i[j-1]) 170 | group.append(i[j]) 171 | constrained_indices.append(group) 172 | 173 | return constrained_charges, constrained_indices 174 | 175 | 176 | def fit(options, data): 177 | """Performs ESP and RESP fits. 178 | 179 | Parameters 180 | ---------- 181 | options : list 182 | list of dictionaries of fitting options and internal data 183 | 184 | Returns 185 | ------- 186 | qf : list 187 | list of ndarrays of fitted charges 188 | labelf : list 189 | list of strings of fitting methods i.e. ESP and RESP 190 | note : str 191 | string of notes on the fitting 192 | 193 | """ 194 | qf = [] 195 | labelf = [] 196 | constraint_charges, constraint_indices = intramolecular_constraints(options['CONSTRAINT_CHARGE'], 197 | options['CONSTRAINT_GROUP']) 198 | natoms = data['natoms'] 199 | ndim = natoms + 1 + len(constraint_charges) 200 | A = np.zeros((ndim, ndim)) 201 | B = np.zeros(ndim) 202 | 203 | # Bayly, J.Phys.Chem,97,10271 (1993) (Eqs. 12-14) 204 | for mol in range(len(data['invr'])): 205 | r_inverse, V = data['invr'][mol], data['esp_values'][mol] 206 | 207 | # Lower case a and b are the A matrix and B vector for one molecule 208 | # and without the addition of constraints 209 | 210 | # Construct a: a_jk = sum_i [(1/r_ij)*(1/r_ik)] 211 | a = np.einsum("ij, ik -> jk", r_inverse, r_inverse) 212 | 213 | # Construct b: b_j = sum_i (V_i/r_ij) 214 | b = np.einsum('i, ij->j', V, r_inverse) 215 | 216 | A[:natoms, :natoms] += a 217 | B[:natoms] += b 218 | 219 | # Add total charge constraint 220 | A[:natoms, natoms] = 1 221 | A[natoms, :natoms] = 1 222 | B[natoms] = data['mol_charge'] 223 | 224 | # Add constraints to matrices A and B 225 | for i in range(len(constraint_charges)): 226 | B[natoms + 1 + i] = constraint_charges[i] 227 | for k in constraint_indices[i]: 228 | if k > 0: 229 | A[natoms + 1 + i, k - 1] = 1 230 | A[k - 1, natoms + 1 + i] = 1 231 | else: 232 | A[natoms + 1 + i, -k - 1] = -1 233 | A[-k - 1, natoms + 1 + i] = -1 234 | 235 | labelf.append('ESP') 236 | q = esp_solve(A, B) 237 | qf.append(q[:natoms]) 238 | if not options['RESTRAINT']: 239 | return qf, labelf, '' 240 | else: 241 | # Restrained ESP 242 | labelf.append('RESP') 243 | q, note = iterate(q, A, B, options['RESP_A'], options['RESP_B'], options['IHFREE'], data['symbols'], options['TOLER'], options['MAX_IT']) 244 | qf.append(q) 245 | return qf, labelf, note 246 | -------------------------------------------------------------------------------- /samples/properties/tools/heat_of_formation.py: -------------------------------------------------------------------------------- 1 | from pyscf.hessian.thermo import harmonic_analysis, thermo 2 | from pyscf import dft 3 | from .libqcschema import * 4 | from pint import UnitRegistry 5 | import numpy as np 6 | 7 | 8 | # multiplicity of the atom 9 | 10 | # experimental value of heat of formation, kcal/mol 11 | # Experimental enthalpy values taken from Curtiss, et. al., J. Chem. Phys. 106, 1063 (1997). 12 | 13 | # the correction to enthalpy at 298K, kcal/mol 14 | # Calculated enthalpy values taken from J. Am. Chem. Soc. 117, 11299 (1995). 15 | 16 | # the entropy of the atom at 298K, cal/mol*k 17 | # Entropy values taken from JANAF Thermochemical Tables: M. W. Chase, Jr., C. A. Davies, J. R. Downey, Jr., D. J. Frurip, R. A. McDonald, and A. N. Syverud, J. Phys. Ref. Data 14 Suppl. No. 1 (1985). 18 | 19 | atom_info = {'H' :{'multiplicity':2, 'enthalpy_0K':51.63 , 'correction_298K': 1.01, 'entropy_298K': 27.418}, 20 | 'Li':{'multiplicity':2, 'enthalpy_0K':37.69 , 'correction_298K': 1.10, 'entropy_298K': 33.169}, 21 | 'Be':{'multiplicity':1, 'enthalpy_0K':76.48 , 'correction_298K': 0.46, 'entropy_298K': 32.570}, 22 | 'B' :{'multiplicity':2, 'enthalpy_0K':136.20, 'correction_298K': 0.29, 'entropy_298K': 36.672}, 23 | 'C' :{'multiplicity':3, 'enthalpy_0K':169.98, 'correction_298K': 0.25, 'entropy_298K': 37.787}, 24 | 'N' :{'multiplicity':4, 'enthalpy_0K':112.53, 'correction_298K': 1.04, 'entropy_298K': 36.640}, 25 | 'O' :{'multiplicity':3, 'enthalpy_0K':58.99 , 'correction_298K': 1.04, 'entropy_298K': 38.494}, 26 | 'F' :{'multiplicity':2, 'enthalpy_0K':18.47 , 'correction_298K': 1.05, 'entropy_298K': 37.942}, 27 | 'Na':{'multiplicity':2, 'enthalpy_0K':25.69 , 'correction_298K': 1.54, 'entropy_298K': 36.727}, 28 | 'Mg':{'multiplicity':1, 'enthalpy_0K':34.87 , 'correction_298K': 1.19, 'entropy_298K': 8.237 }, 29 | 'Al':{'multiplicity':2, 'enthalpy_0K':78.23 , 'correction_298K': 1.08, 'entropy_298K': 39.329}, 30 | 'Si':{'multiplicity':3, 'enthalpy_0K':106.6 , 'correction_298K': 0.76, 'entropy_298K': 40.148}, 31 | 'P' :{'multiplicity':4, 'enthalpy_0K':75.42 , 'correction_298K': 1.28, 'entropy_298K': 39.005}, 32 | 'S' :{'multiplicity':3, 'enthalpy_0K':65.66 , 'correction_298K': 1.05, 'entropy_298K': 40.112}, 33 | 'Cl':{'multiplicity':2, 'enthalpy_0K':28.59 , 'correction_298K': 1.10, 'entropy_298K': 39.481}, 34 | } 35 | 36 | 37 | # define N as Avogadro constant 38 | u = UnitRegistry() 39 | N = u.Quantity(1, u.avogadro_constant) 40 | 41 | 42 | def convert_energy_au_to_kJmol(energy_au: float): 43 | 44 | energy_au = energy_au * u.hartree 45 | energy_kJmol = energy_au.to('kilojoule') * N.to_base_units() 46 | 47 | return energy_kJmol.magnitude 48 | 49 | 50 | def convert_energy_kcalmol_to_au(energy_kcalmol: float): 51 | 52 | energy_kcalmol = energy_kcalmol * u.kilocalorie 53 | energy_au = energy_kcalmol.to('hartree') / N.to_base_units() 54 | 55 | return energy_au.magnitude 56 | 57 | 58 | def get_optmized_xyz(json): 59 | 60 | BtoA = 0.5291772109 61 | 62 | syms = np.array(json["final_molecule"]["symbols"]) 63 | geo = np.array(json["final_molecule"]["geometry"])*BtoA 64 | NAtoms = len(syms) 65 | geo = np.reshape(geo, (NAtoms,3)) 66 | 67 | # Concatenate the symbols and coordinates along the second axis 68 | combined = np.concatenate([syms[:, np.newaxis], geo], axis=1) 69 | 70 | # Convert the combined array to a string with spaces as separators 71 | output = np.array2string(combined, separator=' ', max_line_width=np.inf) 72 | 73 | # Remove the brackets and quotes from the output string 74 | coords = output.replace('[', '').replace(']', '').replace("'", '') 75 | 76 | # Remove leading whitespace from each line 77 | coords = '\n'.join([line.strip() for line in coords.split('\n')]) 78 | 79 | xyz = f'{NAtoms}\n\n' 80 | 81 | return xyz+coords 82 | 83 | 84 | def get_composition(xyz): 85 | 86 | composition = {} 87 | for line in xyz.split('\n')[2:]: 88 | element = line.split()[0] 89 | if element in composition: 90 | composition[element] += 1 91 | else: 92 | composition[element] = 1 93 | 94 | return composition 95 | 96 | 97 | def get_cbs_result(e_0, e_1, e_2): 98 | return (e_0*e_2-e_1**2)/(e_0-2*e_1+e_2) 99 | 100 | 101 | def get_34cbs_result(e_3, e_4): 102 | # using def2 basis set, a=7.88, ref orca manual 5.0.3 eq8.1 103 | from math import exp, sqrt 104 | return (e_4*exp(-7.88*sqrt(3))-e_3*exp(-7.88*sqrt(4)))/(exp(-7.88*sqrt(3))-exp(-7.88*sqrt(4))) 105 | 106 | 107 | def get_thermo_dict(hessian_results, functional): 108 | 109 | # Create Pyscf Molecule 110 | scf_dict, mol = load_qcschema_mol_scf(hessian_results) 111 | mol.unit = 'B' # QCSchema outputs in Bohr AU 112 | mol.build() 113 | 114 | # Create DFT object 115 | ks = dft.RKS(mol) 116 | ks.xc = functional 117 | 118 | # optional 119 | ks.grids.level = 4 120 | ks.init_guess = 'minao' 121 | ks.conv_tol=1e-8 122 | ks.direct_scf = True 123 | ks.direct_scf_tol = 1e-12 124 | # REQUIRED:Load 4 key pieces of info we got from json into DFT object 125 | ks.mo_coeff = scf_dict["mo_coeff"] 126 | ks.mo_energy = scf_dict["mo_energy"] 127 | ks.mo_occ = scf_dict["mo_occ"] 128 | ks.e_tot = scf_dict["e_tot"] 129 | 130 | # Compute Hessian 131 | h = load_qcschema_hessian(hessian_results) 132 | 133 | # Compute Vibrational Frequencies 134 | freq = harmonic_analysis(mol,h) 135 | 136 | # Compute Thermochemistry 137 | thermochem = thermo(ks,freq['freq_au'], 298.15) 138 | 139 | return thermochem 140 | 141 | 142 | def get_heat_of_formation(energies, thermo, composition): 143 | 144 | heat_of_formation = {'unit': 'kJ/mol'} 145 | 146 | # atomization energy 147 | sum_atom_energy = 0 148 | for element in composition: 149 | atom_energy = energies[element] 150 | sum_atom_energy += composition[element] * atom_energy 151 | 152 | # internal energy is E_0K = E_elec + ZPE 153 | E_internal = thermo['E_0K'] 154 | atomization_energy = sum_atom_energy - E_internal 155 | 156 | # Heat of formation at 0K of the molecule 157 | sum_atom_hof_0K = 0 158 | for atom in composition: 159 | atom_hof_0K = atom_info[atom]['enthalpy_0K'] 160 | sum_atom_hof_0K += composition[atom] * atom_hof_0K 161 | 162 | sum_atom_hof_0K_au = convert_energy_kcalmol_to_au(sum_atom_hof_0K) 163 | hof_0K = sum_atom_hof_0K_au - atomization_energy 164 | 165 | hof_0K_kJmol = convert_energy_au_to_kJmol(hof_0K) 166 | 167 | heat_of_formation['0K'] = hof_0K_kJmol 168 | 169 | # sum of ethalpy correction at 298K for constituent atoms 170 | sum_atom_hof_298K_correction = 0 171 | for atom in composition: 172 | atom_hof_298K_correction = atom_info[atom]['correction_298K'] 173 | sum_atom_hof_298K_correction += composition[atom] * atom_hof_298K_correction 174 | 175 | sum_atom_hof_298K_correction_au = convert_energy_kcalmol_to_au(sum_atom_hof_298K_correction) 176 | 177 | hof_298K_correction = thermo['H_tot'] - E_internal 178 | hof_298K = hof_0K + hof_298K_correction - sum_atom_hof_298K_correction_au 179 | 180 | hof_298K_kJmol = convert_energy_au_to_kJmol(hof_298K) 181 | heat_of_formation['298K'] = hof_298K_kJmol 182 | 183 | return heat_of_formation 184 | 185 | 186 | -------------------------------------------------------------------------------- /samples/properties/tools/libqcschema.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import sys 4 | 5 | def load_qcschema_json( file_name ): 6 | # load qcschema output json file 7 | data = None 8 | with open(file_name,'r') as f: 9 | data = json.load(f) 10 | return data 11 | 12 | def load_qcschema_go_final_json( file_name ): 13 | # load qcschema GO output json file 14 | # and return last 'trajectory' point's entries 15 | # (this is the optimized molecule) 16 | data = None 17 | temp = None 18 | with open(file_name,'r') as f: 19 | temp = json.load(f) 20 | data = temp["trajectory"][-1] 21 | return data 22 | 23 | def combine_syms_coords(syms,coords,to_Angstrom = False, xyz=False): 24 | # QCSchema molecules are split into symbols and coordinates, 25 | # combine them here. 26 | # Optionally convert from Bohr to Angstrom. 27 | # Returns: either a geometry string (default) or 28 | # optionally it can return an xyz format string. 29 | if(to_Angstrom): 30 | # convert Bohr to Angstrom 31 | BtoA = 0.5291772109 32 | coords = coords*BtoA 33 | 34 | NAtoms = len(syms) 35 | geo = np.reshape(coords, (NAtoms,3)) 36 | 37 | # Concatenate the symbols and coordinates along the second axis 38 | combined = np.concatenate([syms[:, np.newaxis], geo], axis=1) 39 | 40 | # Convert the combined array to a string with spaces as separators 41 | output = np.array2string(combined, separator=' ', max_line_width=np.inf, threshold=np.inf) 42 | 43 | # Remove the brackets and quotes from the output string 44 | PySCF_atoms = output.replace('[', '').replace(']', '').replace("'", '') 45 | 46 | # Return as string or return as xyz-format string (i.e. top is NAtoms,blankline) 47 | if(xyz): 48 | xyz = f'{NAtoms}\n\n' 49 | PySCF_atoms = xyz+PySCF_atoms 50 | 51 | return PySCF_atoms 52 | 53 | def load_qcschema_molecule(qcschema_dict, to_Angstrom = False, xyz=False): 54 | # Load QCSchema molecule. 55 | # Optionally convert geometry to angstrom 56 | # Returns: either a geometry string (default) or 57 | # optionally it can return an xyz format string. 58 | 59 | syms = np.array(qcschema_dict["molecule"]["symbols"]) 60 | geo = np.array(qcschema_dict["molecule"]["geometry"]) 61 | 62 | # combine together the symbols and coordinates 63 | PySCF_atoms = combine_syms_coords(syms,geo,to_Angstrom,xyz) 64 | 65 | return PySCF_atoms 66 | 67 | def load_qcschema_final_molecule(qcschema_dict, to_Angstrom = False, xyz=False): 68 | # Load final molecule in QCSchema. 69 | # In GO job this is the optimized geometry. 70 | # Optionally convert geometry to angstrom 71 | # Returns: either a geometry string (default) or 72 | # optionally it can return an xyz format string. 73 | 74 | syms = np.array(qcschema_dict["final_molecule"]["symbols"]) 75 | geo = np.array(qcschema_dict["final_molecule"]["geometry"]) 76 | 77 | # combine together the symbols and coordinates 78 | PySCF_atoms = combine_syms_coords(syms,geo,to_Angstrom,xyz) 79 | 80 | return PySCF_atoms 81 | 82 | def load_qcschema_hessian(qcschema_dict): 83 | # qcschema_dict: pass in dict that has the qcschema output json loaded into it 84 | 85 | # load qcschema hessian 86 | qc_h = [] 87 | qc_h = qcschema_dict["return_result"] 88 | 89 | # Get Number of atoms N 90 | syms = np.array(qcschema_dict["molecule"]["symbols"]) 91 | NAtom = len(syms) 92 | 93 | # reshape from (3N)**2 array to (N,N,3,3) 94 | hessian = np.array(qc_h).reshape(NAtom,NAtom,3,3) 95 | return hessian 96 | 97 | def load_qcschema_mol_scf(qcschema_dict,save_chk=False,chkfile="output.chk",to_Angstrom=False): 98 | # uses qcschema dict and re-creates PySCF mol and scf info 99 | # returns mol and scf and optionally saves to chkfile 100 | # qcschema_dict: dict containing the qcschema output loaded into it 101 | # save_chk: whether to save chkfile or not 102 | # chkfile: the name of the chkfile we may create 103 | try: 104 | import pyscf 105 | from pyscf.lib.chkfile import load_chkfile_key, load 106 | from pyscf.lib.chkfile import dump_chkfile_key, dump, save 107 | from pyscf.lib.chkfile import load_mol, save_mol 108 | except ImportError: 109 | raise ImportError( 110 | "Missing optional 'pyscf' dependencies. \ 111 | To install run: pip install pyscf" 112 | ) 113 | 114 | # Accelerated DFT service return scf_occupations_a only for R, so occ is 1 or 0. 115 | # Need to double if RHF/RKS/ROHF 116 | hf_only = qcschema_dict["model"]["method"] == 'hf' 117 | unrestricted = qcschema_dict["keywords"]["unrestricted"] 118 | if hf_only: 119 | method = 'uhf' if unrestricted else 'rhf' 120 | else: 121 | method = 'uks' if unrestricted else 'rks' 122 | if(method == 'rks' or method == 'roks' or method == 'rhf' or method == 'rohf'): 123 | OccFactor = 2.0 124 | have_beta = False 125 | elif(method == 'uks' or method == 'uhf'): 126 | OccFactor = 1.0 127 | have_beta = True 128 | elif(method == 'gks' or method == 'ghf'): 129 | OccFactor = 1.0 130 | have_beta = False 131 | 132 | # Need to reshape MO coefficients for PySCF shape. 133 | # NOTE: assumes NMO=NAO which isn't the case if linear dependencies etc. 134 | nao = qcschema_dict["properties"]["calcinfo_nbasis"] 135 | nmo = qcschema_dict["properties"]["calcinfo_nmo"] 136 | if nao != nmo: 137 | print("Warning: nao and nmo are not the same") 138 | #sys.exit(1) # Exit the script with a non-zero status to indicate an error 139 | 140 | ## chkfile info ## 141 | # Get the 4 things that PySCF chkfile wants 142 | # ...remembering to reshape coeffs and scale occupancies. 143 | e_tot = float( qcschema_dict["properties"]["return_energy"] ) 144 | mo_coeff = np.reshape(qcschema_dict["wavefunction"]["scf_orbitals_a"],(nao,nmo)) 145 | mo_occ = np.array( qcschema_dict["wavefunction"]["scf_occupations_a"] )*OccFactor 146 | mo_energy = np.array( qcschema_dict["wavefunction"]["scf_eigenvalues_a"] ) 147 | if(have_beta): 148 | # for each useful piece of info we need to combine alpha and beta into 2d array, with alpha first 149 | # MO occupations 150 | mo_occ_beta = qcschema_dict["wavefunction"]["scf_occupations_b"] 151 | mo_occ = np.vstack( (mo_occ, mo_occ_beta) ) 152 | # MO coefficients 153 | mo_coeff_beta = np.reshape(qcschema_dict["wavefunction"]["scf_orbitals_b"],(nao,nmo)) 154 | mo_coeff = np.vstack( (mo_coeff,mo_coeff_beta)) 155 | mo_coeff = np.reshape(mo_coeff,(2,nao,nmo)) 156 | # MO energies 157 | mo_energy_beta = np.array( qcschema_dict["wavefunction"]["scf_eigenvalues_b"] ) 158 | mo_energy = np.vstack( (mo_energy, mo_energy_beta) ) 159 | # etot obviously doesn't need manipulation 160 | 161 | # Convert to dictionary for PySCF 162 | scf_dic = {'e_tot' : e_tot, 163 | 'mo_energy': mo_energy, 164 | 'mo_occ' : mo_occ, 165 | 'mo_coeff' : mo_coeff} 166 | 167 | ## Mol info: ## 168 | PySCF_charge = int( qcschema_dict["molecule"]["molecular_charge"] ) 169 | # PySCF 'spin' is number of unpaired electrons, it will be mult-1 170 | PySCF_spin = int( qcschema_dict["molecule"]["molecular_multiplicity"] - 1 ) 171 | PySCF_basis = str( qcschema_dict["model"]["basis"] ) 172 | 173 | # Cartesian/Pure basis 174 | PySCF_cart = bool( qcschema_dict["keywords"]["cartesian_basis"] ) #changed from ["basisSet"]["cartesian"] 175 | 176 | # Get molecular structure. 177 | # QCSchema has separate atom symbols and coordinates 178 | syms = np.array(qcschema_dict["molecule"]["symbols"]) 179 | geo = np.array(qcschema_dict["molecule"]["geometry"]) 180 | PySCF_atoms = load_qcschema_molecule(qcschema_dict, to_Angstrom,False) 181 | 182 | # Unit Bohr or Angstrom. QCSchema default is Bohr but can change here. 183 | if(to_Angstrom): 184 | units='A' 185 | else: 186 | units='B' 187 | 188 | ## Create mol and save to chkfile ## 189 | mol = pyscf.gto.Mole(atom=PySCF_atoms,basis=PySCF_basis,ecp=PySCF_basis,charge=PySCF_charge,spin=PySCF_spin,cart=PySCF_cart,unit=units) 190 | 191 | ## Save scf info data into chk ## 192 | if(save_chk): 193 | save(chkfile, 'scf', scf_dic) 194 | save_mol(mol,chkfile) 195 | 196 | return scf_dic, mol 197 | 198 | import pyscf 199 | def recreate_scf_obj(qcschema_dict,save_chk=False,chkfile=""): 200 | 201 | # Create Pyscf Molecule 202 | scf_dict, mol = load_qcschema_mol_scf(qcschema_dict,save_chk,chkfile,False) 203 | mol.build() 204 | 205 | # Create DFT (or HF) object 206 | # no longer exists method = qcschema_dict["keywords"]["scf"]["method"] 207 | hf_only = qcschema_dict["model"]["method"] == 'hf' 208 | unrestricted = qcschema_dict["keywords"]["unrestricted"] 209 | if hf_only: 210 | method = 'uhf' if unrestricted else 'rhf' 211 | else: 212 | method = 'uks' if unrestricted else 'rks' 213 | 214 | if(method =='rks'): 215 | ks = pyscf.dft.RKS(mol) 216 | elif(method =='uks'): 217 | ks = pyscf.dft.UKS(mol) 218 | elif(method =='rhf'): 219 | ks = pyscf.hf.RHF(mol) 220 | elif(method =='uhf'): 221 | ks = pyscf.hf.UHF(mol) 222 | else: 223 | print("cannot determine method..exit") 224 | return 225 | 226 | #temp set functional...could get it from the json 227 | if(method == 'rks' or method == 'uks'): 228 | #functional = qcschema_dict["keywords"]["xcFunctional"]["name"] 229 | functional = qcschema_dict["model"]["method"] 230 | ks.xc = functional 231 | 232 | # Load 4 key pieces of info we got from json into DFT object 233 | ks.mo_coeff = scf_dict["mo_coeff"] 234 | ks.mo_energy = scf_dict["mo_energy"] 235 | ks.mo_occ = scf_dict["mo_occ"] 236 | ks.e_tot = scf_dict["e_tot"] 237 | return mol, ks 238 | 239 | def prepare_ir(ks,hessian,freq): 240 | import pyscf 241 | from pyscf import gto, dft 242 | from pyscf.prop import infrared 243 | # make IR object but don't run 244 | ks_ir = infrared.rks.Infrared(ks) # check what type rks or uks 245 | # populate with hessian and other info we already have 246 | ks_ir.vib_dict = freq 247 | hessian.base = ks 248 | ks_ir.mf_hess = hessian 249 | # create orbital rotation deriv mo1_grad otherwise infrared will recompute hessian! 250 | h1ao_grad = ks_ir.mf_hess.make_h1(ks.mo_coeff, ks.mo_occ) 251 | moao1_grad, mo_e1_grad = ks_ir.mf_hess.solve_mo1(ks.mo_energy, ks.mo_coeff, ks.mo_occ, h1ao_grad) 252 | ks_ir._mo1_grad = pyscf.lib.einsum("up, uv, Axvi -> Axpi", ks.mo_coeff, ks.get_ovlp(), moao1_grad) 253 | 254 | return ks_ir 255 | -------------------------------------------------------------------------------- /samples/properties/tools/resp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Driver for the RESP code. 3 | """ 4 | # Adapted and Extended from Psi4NumPy by Microsoft under BSD-3. 5 | # Original credit: 6 | #__authors__ = "Asem Alenaizan" 7 | #__credits__ = ["Asem Alenaizan"] 8 | # 9 | #__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers" 10 | #__license__ = "BSD-3-Clause" 11 | #__date__ = "2018-04-28" 12 | # 13 | import os 14 | 15 | import numpy as np 16 | import scipy 17 | from . import espfit 18 | from . import vdw_surface 19 | 20 | import pyscf 21 | from pyscf.lib.parameters import BOHR 22 | from pyscf import data 23 | 24 | bohr_to_angstrom = BOHR #0.52917721092 25 | 26 | 27 | def resp(mf, options=None): 28 | """RESP code driver. 29 | 30 | see: Bayly, J.Phys.Chem,97,10271 (1993) 31 | 32 | Assigns charge to each atom by fitting the electrostatic potential evaulated on a grid. 33 | Function returns a list of charges. 34 | The sum of the charges is constrained to the molecular charge given. 35 | Charges are restrained using a penlaty function so they don't grow too large . 36 | By default, Hydrogen is the only atom unrestrained (i.e. it is FREE), but this can be changed by setting 'IHFREE': False. 37 | 38 | Parameters 39 | ---------- 40 | molecules : list 41 | list of psi4.Molecule instances 42 | options_list : dict, optional 43 | a dictionary of user's defined options 44 | RESP_A : float 45 | restraint scale a 46 | RESP_B : float 47 | restraint parabola tightness b 48 | IHFREE : bool 49 | whether hydrogens are excluded or included in restraint 50 | PRINTQ : bool 51 | Default=False. Whether to print charges and scheme at end of this function. 52 | SAVE : bool 53 | Default=False. Whether to save a result.dat and grid points and ESP values (at the grid points) to file. 54 | VDW_SCALE_FACTORS : list of floats 55 | Default = [1.2]. Scales the VDW radii by this factor, and puts grid on it. 56 | VDW_POINT_DENSITY : float 57 | Default=1.0. Controls grid point density. Warning: computation time will increase with more grid points. 58 | 59 | Returns 60 | ------- 61 | charges : list 62 | list of RESP atomic charges 63 | 64 | Note 65 | ---- 66 | output files : (if 'SAVE' == True) 67 | results.dat: fitting results 68 | grid.dat: grid points in Bohr 69 | grid_esp.dat: QM esp valuese in a.u. 70 | """ 71 | import pyscf 72 | from pyscf.lib.parameters import BOHR 73 | from pyscf import data 74 | 75 | if options is None: 76 | options = {} 77 | 78 | # Check options 79 | # RESP options have large case keys 80 | options = {k.upper(): v for k, v in sorted(options.items())} 81 | 82 | # VDW surface options 83 | if 'ESP' not in options: 84 | options['ESP'] = [] 85 | if 'GRID' not in options: 86 | options['GRID'] = [] 87 | if 'VDW_SCALE_FACTORS' not in options: 88 | options['VDW_SCALE_FACTORS'] = [1.2] #[1.2, 1.4, 1.6, 1.8, 2.0] 89 | if 'VDW_POINT_DENSITY' not in options: 90 | options['VDW_POINT_DENSITY'] = 1.0 91 | # Hyperbolic restraint options 92 | if 'RESTRAINT' not in options: 93 | options['RESTRAINT'] = True 94 | if options['RESTRAINT']: 95 | if 'RESP_A' not in options: 96 | options['RESP_A'] = 0.0005 97 | if 'RESP_B' not in options: 98 | options['RESP_B'] = 0.1 99 | if 'IHFREE' not in options: 100 | options['IHFREE'] = True 101 | if 'TOLER' not in options: 102 | options['TOLER'] = 1e-5 103 | if 'MAX_IT' not in options: 104 | options['MAX_IT'] = 25 105 | 106 | # check print 107 | if 'PRINTQ' not in options: 108 | options['PRINTQ'] = False 109 | # Check save 110 | if 'SAVE' not in options: 111 | options['SAVE'] = False 112 | 113 | # VDW surface options 114 | # all VDw scheme in PyCSF data are in Bohr, so need to covert to Angstrom 115 | # For user defined radii: User should directly supply Angstrom. 116 | radii = {} 117 | if 'VDW_RADII' in options: 118 | options['VDW_SCHEME'] = 'USER' 119 | # Use PySCF built-in VDW schemes from pyscf.data.radii 120 | if 'VDW_SCHEME' not in options: 121 | options['VDW_SCHEME'] = 'VDW_mod' 122 | 123 | if( options['VDW_SCHEME'] == 'VDW_mod' ): 124 | # modified Bondi 125 | vdw_array = data.radii.VDW 126 | vdw_array[1] = 1.1/BOHR 127 | if( options['VDW_SCHEME'] == 'VDW' ): 128 | vdw_array = data.radii.VDW 129 | if( options['VDW_SCHEME'] == 'UFF' ): 130 | vdw_array = data.radii.UFF 131 | if( options['VDW_SCHEME'] == 'MM3' ): 132 | vdw_array = data.radii.MM3 133 | if( options['VDW_SCHEME'] == 'BRAGG' ): 134 | vdw_array = data.radii.BRAGG 135 | if( options['VDW_SCHEME'] == 'COVALENT' ): 136 | vdw_array = data.radii.COVALENT 137 | 138 | if( options['VDW_SCHEME'] != 'USER' ): 139 | # in all pyscf data element 0 is not useful. H is at 1 140 | # they are also all defined in Bohr, so convert to Angstrom 141 | vdw_array = vdw_array[1:]#*bohr_to_angstrom 142 | # convert to Dict 143 | radii = dict(enumerate(vdw_array.flatten(), 1)) 144 | 145 | # or a user defined scheme? 146 | if options['VDW_SCHEME'] == 'USER': 147 | radii = options['VDW_RADII'] 148 | # convert to Bohr - NO, this resp uses Angstrom. User should input angstrom. 149 | for key in radii: 150 | radii[key] /= BOHR 151 | 152 | 153 | #print("Using VDW Scheme: ", options['VDW_SCHEME']) 154 | 155 | # Constraint options 156 | if 'CONSTRAINT_CHARGE' not in options: 157 | options['CONSTRAINT_CHARGE'] = [] 158 | if 'CONSTRAINT_GROUP' not in options: 159 | options['CONSTRAINT_GROUP'] = [] 160 | 161 | data = {} 162 | data['natoms'] = mf.mol.natm #molecules[0].natom() 163 | data['symbols'] = [] 164 | data['symbols'] = mf.mol.elements # do i need unique ones? like make this a set? 165 | data['atom_types'] = np.array(mf.mol._atm[:, 0]) 166 | data['mol_charge'] = mf.mol.charge #molecules[0].molecular_charge() 167 | 168 | data['coordinates'] = [] 169 | data['esp_values'] = [] 170 | data['invr'] = [] 171 | 172 | data['coordinates'] = mf.mol.atom_coords(unit='B') #(mf.mol.atom_coords(unit='B'))*bohr_to_angstrom 173 | coordinates = mf.mol.atom_coords(unit='B') #(mf.mol.atom_coords(unit='B'))*bohr_to_angstrom 174 | if options['GRID']: 175 | # Read grid points 176 | points = [] 177 | points = np.loadtxt('grid.dat') 178 | #if 'Bohr' in str(molecules[imol].units()): 179 | # points *= bohr_to_angstrom 180 | # read Points in Bohr 181 | print("Reading Grid points (in Bohr) from file grid.dat") 182 | else: 183 | # Get the points at which we're going to calculate the ESP 184 | points = [] 185 | for scale_factor in options['VDW_SCALE_FACTORS']: 186 | # pass atomic number 187 | shell, radii_scaled = vdw_surface.vdw_surface(coordinates, data['atom_types'], scale_factor, 188 | options['VDW_POINT_DENSITY'], radii) 189 | points.append(shell) 190 | points = np.concatenate(points) 191 | #print("points",points) 192 | #if 'Bohr' in str(molecules[imol].units()): 193 | # points /= bohr_to_angstrom 194 | # np.savetxt('grid.dat', points, fmt='%15.10f') 195 | # points *= bohr_to_angstrom 196 | #else: 197 | # np.savetxt('grid.dat', points, fmt='%15.10f') 198 | if(options['SAVE'] == True): 199 | np.savetxt('grid.dat', points, fmt='%15.10f') 200 | 201 | #coordinates = coordinates/BOHR 202 | 203 | # Calculate ESP values at the grid 204 | if options['ESP']: 205 | # Read electrostatic potential values 206 | #data['esp_values'].append(np.loadtxt(options['ESP'][imol])) 207 | #np.savetxt('grid_esp.dat', data['esp_values'][-1], fmt='%15.10f') 208 | print("Read ESP from file: Not Yet Implemented") 209 | return 210 | else: 211 | from pyscf import gto, scf, lib 212 | # ESP nuclear contribution: 213 | r_pX = scipy.spatial.distance.cdist(coordinates, points) 214 | r_pX = np.array(r_pX) 215 | r_pX_potential = 1/r_pX 216 | # nuclear part of electrostatic potential (ESP) 217 | potential_real = np.dot(np.array(mf.mol.atom_charges()), r_pX_potential) 218 | 219 | # add in the electronic part of ESP..need to do in batches if large 220 | # amount of memory required: ngrdis*(NBasis*NBasis)*8, divide by (1024**3) for GB 221 | ## non-batched: 222 | ## Vele = np.einsum('pij,ij->p', mf.mol.intor('int1e_grids', grids=gridcoords), mf.make_rdm1()) 223 | ## potential_real -= Vele 224 | ## batched: 225 | ngrids = len(points) 226 | try: 227 | ### define batch size based on available memory - if psutil installed 228 | import psutil 229 | NBasis = int(mf.mol.nao_nr()) 230 | mem_avail = psutil.virtual_memory()[1] 231 | grid_avail = int(mem_avail/(8*(NBasis)*(NBasis))) 232 | # batch size, use 90% of available memory 233 | nbatch = min( int( grid_avail*0.90) , ngrids) 234 | except: 235 | # hard coded batch size - may fail, just adjust nbatch 236 | nbatch = 128*128 237 | 238 | # need density matrix 239 | dm = mf.make_rdm1() 240 | try: 241 | for ibatch in range(0, ngrids, nbatch): 242 | max_grid = min(ibatch+nbatch, ngrids) 243 | num_grids = max_grid - ibatch 244 | grid_bit = points[ibatch:max_grid] 245 | potential_real[ibatch:max_grid] -= np.einsum('pij,ij->p', mf.mol.intor('int1e_grids', grids=grid_bit), dm) 246 | except: 247 | print("Out of memory in func resp. Make batch size (nbatch) smaller") 248 | return() 249 | 250 | data['esp_values'].append(potential_real) 251 | 252 | if(options['SAVE'] == True): 253 | np.savetxt('grid_esp.dat', data['esp_values'], fmt='%15.10f') 254 | 255 | # Build a matrix of the inverse distance from each ESP point to each nucleus 256 | invr = np.zeros((len(points), len(coordinates))) 257 | for i in range(invr.shape[0]): 258 | for j in range(invr.shape[1]): 259 | invr[i, j] = 1/np.linalg.norm(points[i]-coordinates[j]) 260 | data['invr'].append(invr) 261 | #data['invr'].append(invr*bohr_to_angstrom) # convert to atomic units 262 | #data['coordinates'][-1] /= bohr_to_angstrom # convert to angstroms 263 | 264 | # Calculate charges 265 | qf, labelf, notes = espfit.fit(options, data) 266 | 267 | # Write the results to disk 268 | if(options['SAVE'] == True): 269 | with open("results.out", "w") as f: 270 | f.write("Electrostatic potential parameters\n") 271 | if not options['GRID']: 272 | f.write(" van der Waals radii (Bohr):\n") 273 | for i, j in radii.items(): 274 | f.write(" %8s%8.3f\n" %(i, j/scale_factor)) 275 | f.write(" VDW scale factors: ") 276 | for i in options["VDW_SCALE_FACTORS"]: 277 | f.write('%6.2f' %i) 278 | f.write('\n') 279 | f.write(" VDW point density: %.3f\n" %(options["VDW_POINT_DENSITY"])) 280 | 281 | f.write("\nGrid information (see %i_%s_grid.dat in %s)\n") 282 | f.write(" Number of grid points: %d\n" %len(data['esp_values'])) 283 | f.write("\nQuantum electrostatic potential (see grid_esp.dat)\n") 284 | 285 | f.write("\nConstraints\n") 286 | if options['CONSTRAINT_CHARGE']: 287 | f.write(" Charge constraints\n") 288 | for i in options['CONSTRAINT_CHARGE']: 289 | f.write(" Total charge of %12.8f on the set" %i[0]) 290 | for j in i[1]: 291 | f.write("%4d" %j) 292 | f.write("\n") 293 | if options['CONSTRAINT_GROUP']: 294 | f.write(" Equality constraints\n") 295 | f.write(" Equal charges on atoms\n") 296 | for i in options['CONSTRAINT_GROUP']: 297 | f.write(" ") 298 | for j in i: 299 | f.write("%4d" %j) 300 | f.write("\n") 301 | 302 | f.write("\nRestraint\n") 303 | if options['RESTRAINT']: 304 | f.write(" Hyperbolic restraint to a charge of zero\n") 305 | if options['IHFREE']: 306 | f.write(" Hydrogen atoms are not restrained\n") 307 | f.write(" resp_a: %.4f\n" %(options["RESP_A"])) 308 | f.write(" resp_b: %.4f\n" %(options["RESP_B"])) 309 | 310 | f.write("\nFit\n") 311 | f.write(notes) 312 | f.write("\nElectrostatic Potential Charges\n") 313 | f.write(" Center Symbol") 314 | for i in labelf: 315 | f.write("%10s" %i) 316 | f.write("\n") 317 | for i in range(data['natoms']): 318 | f.write(" %5d %s " %(i+1, data['symbols'][i])) 319 | for j in qf: 320 | f.write("%12.8f" %j[i]) 321 | f.write("\n") 322 | f.write("Total Charge: ") 323 | for i in qf: 324 | f.write("%12.8f" %np.sum(i)) 325 | f.write('\n') 326 | 327 | # print output 328 | if(options['PRINTQ']): 329 | print('VDW SCHEME', options['VDW_SCHEME']) 330 | print("charges:", qf[1]) 331 | 332 | #np.savetxt('grid.dat', points, fmt='%15.10f') 333 | # return charges. qf[0] are ESP values. qf[1] are the charges 334 | return qf[1] 335 | -------------------------------------------------------------------------------- /samples/properties/tools/vdw_surface.py: -------------------------------------------------------------------------------- 1 | # Adapted and Extended from Psi4NumPy by Microsoft under BSD-3. 2 | # Original credit: 3 | #__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers" 4 | #__license__ = "BSD-3-Clause" 5 | # 6 | import numpy as np 7 | 8 | """ 9 | A sript to generate van der Waals surface of molecules. 10 | """ 11 | 12 | def surface(n): 13 | """Computes approximately n points on unit sphere. Code adapted from GAMESS. 14 | 15 | Parameters 16 | ---------- 17 | n : int 18 | approximate number of requested surface points 19 | 20 | Returns 21 | ------- 22 | ndarray 23 | numpy array of xyz coordinates of surface points 24 | """ 25 | 26 | u = [] 27 | eps = 1e-10 28 | nequat = int(np.sqrt(np.pi*n)) 29 | nvert = int(nequat/2) 30 | nu = 0 31 | for i in range(nvert+1): 32 | fi = np.pi*i/nvert 33 | z = np.cos(fi) 34 | xy = np.sin(fi) 35 | nhor = int(nequat*xy+eps) 36 | if nhor < 1: 37 | nhor = 1 38 | for j in range(nhor): 39 | fj = 2*np.pi*j/nhor 40 | x = np.cos(fj)*xy 41 | y = np.sin(fj)*xy 42 | if nu >= n: 43 | return np.array(u) 44 | nu += 1 45 | u.append([x, y, z]) 46 | return np.array(u) 47 | 48 | def vdw_surface(coordinates, elements, scale_factor, density, input_radii): 49 | """Computes points outside the van der Waals surface of molecules. 50 | 51 | Parameters 52 | ---------- 53 | coordinates : ndarray 54 | cartesian coordinates of the nuclei, in units of angstrom 55 | elements : list 56 | The symbols (e.g. C, H) for the atoms 57 | scale_factor : float 58 | The points on the molecular surface are set at a distance of 59 | scale_factor * vdw_radius away from each of the atoms. 60 | density : float 61 | The (approximate) number of points to generate per square angstrom 62 | of surface area. 1.0 is the default recommended by Kollman & Singh. 63 | input_radii : dict 64 | dictionary of PySCF (or user's) defined VDW radii 65 | 66 | Returns 67 | ------- 68 | radii : dict 69 | A dictionary of scaled VDW radii 70 | surface_points : ndarray 71 | array of the coordinates of the points on the surface 72 | 73 | """ 74 | radii = {} 75 | surface_points = [] 76 | # scale radii 77 | for i in elements: 78 | if i in input_radii.keys(): 79 | radii[i] = input_radii[i] * scale_factor 80 | else: 81 | raise KeyError('%s is not a supported element; ' %i 82 | + 'use the "VDW_RADII" option to add ' 83 | + 'its van der Waals radius.') 84 | # loop over atomic coordinates 85 | for i in range(len(coordinates)): 86 | # calculate approximate number of ESP grid points 87 | n_points = int(density * 4.0 * np.pi* np.power(radii[elements[i]], 2)) 88 | # generate an array of n_points in a unit sphere around the atom 89 | dots = surface(n_points) 90 | # scale the unit sphere by the VDW radius and translate 91 | dots = coordinates[i] + radii[elements[i]] * dots 92 | for j in range(len(dots)): 93 | save = True 94 | for k in range(len(coordinates)): 95 | if i == k: 96 | continue 97 | # exclude points within the scaled VDW radius of other atoms 98 | d = np.linalg.norm(dots[j] - coordinates[k]) 99 | if d < radii[elements[k]]: 100 | save = False 101 | break 102 | if save: 103 | surface_points.append(dots[j]) 104 | return np.array(surface_points), radii 105 | -------------------------------------------------------------------------------- /samples/properties/tools/visualize.py: -------------------------------------------------------------------------------- 1 | def niceview(view,data): 2 | view.addVolumetricData( 3 | data, 4 | "cube", 5 | { 6 | "isoval": 0.05, 7 | "smoothness": 5, 8 | "opacity": 0.8, 9 | "volformat": "cube", 10 | "color": "blue", 11 | }, 12 | ) 13 | view.addVolumetricData( 14 | data, 15 | "cube", 16 | { 17 | "isoval": -0.05, 18 | "smoothness": 5, 19 | "opacity": 0.8, 20 | "volformat": "cube", 21 | "color": "orange", 22 | }, 23 | ) 24 | return 25 | -------------------------------------------------------------------------------- /samples/properties/tools/wavefunction_hdf5_to_qcschema.py: -------------------------------------------------------------------------------- 1 | """ 2 | Reads a wavefunction from a HDF5 file and converts it to a QC-Schema JSON. 3 | """ 4 | 5 | import h5py 6 | import numpy as np 7 | import argparse 8 | from typing import Any, Dict 9 | import os 10 | from qcelemental import periodictable 11 | from qcelemental.models.basis import BasisCenter, BasisSet, ECPPotential, ElectronShell 12 | 13 | 14 | def read_basis(h5f, qc_wavefunction): 15 | """Reads a basis set from a HDF5 file and returns it as a QC-Schema dictionary.""" 16 | basis_dict = {} 17 | basis_dict["schema_name"] = "qcschema_basis" 18 | basis_dict["schema_version"] = 1 19 | basis_dict["name"] = h5f["/wavefunction/basis/name"][()].decode() 20 | basis_dict["nbf"] = h5f['wavefunction/basis/nbf'][()] 21 | 22 | symbols = [ x.decode() for x in h5f["/molecule/symbols"][()] ] 23 | numbers = [periodictable.to_Z(x) for x in symbols] 24 | atoms = list(range(len(numbers))) 25 | 26 | basis_dict["atom_map"] = [ str(x) for x in atoms ] 27 | 28 | #assert h5f['wavefunction/basis/schema_name'][()].decode() == "madft_basis" 29 | #assert h5f['wavefunction/basis/schema_version'][()] == 1 30 | 31 | # read the raw shells data from file 32 | electron_shells = [] 33 | pure = bool(h5f['/wavefunction/basis/pure'][()]) 34 | if "/wavefunction/basis/electron_shells" in h5f: 35 | bf_idx = 0 36 | for sh in h5f["/wavefunction/basis/electron_shells"][()]: 37 | 38 | L = sh[1] 39 | size = 2 * L + 1 if pure else (L + 1) * (L + 2) // 2 40 | 41 | electron_shells.append( 42 | ( 43 | sh[0], 44 | ElectronShell( 45 | angular_momentum=[sh[1]], 46 | harmonic_type="spherical" if pure else "cartesian", 47 | exponents=tuple(sh[3]), 48 | coefficients=[tuple(sh[4])], 49 | ), 50 | sh[2], 51 | np.arange(bf_idx, bf_idx + size), 52 | ) 53 | ) 54 | bf_idx += size 55 | 56 | restricted = qc_wavefunction['restricted'] = bool(h5f['/wavefunction/restricted'][()]) 57 | # hessian job doesn't require reorder 58 | if h5f['driver'][()].decode() == "hessian": 59 | pass 60 | else: 61 | electron_shells = sorted(electron_shells, key=lambda x: ( 62 | x[0], 63 | x[1].angular_momentum[0], 64 | -x[2], 65 | -x[1].exponents[0], -x[1].coefficients[0][0] 66 | )) 67 | ao_order = np.hstack([sh[-1] for sh in electron_shells]) 68 | qc_wavefunction['scf_fock_a'] = qc_wavefunction['scf_fock_a'][ao_order[:,None], ao_order] 69 | qc_wavefunction['scf_density_a'] = qc_wavefunction['scf_density_a'][ao_order[:,None], ao_order] 70 | qc_wavefunction['scf_orbitals_a'] = qc_wavefunction['scf_orbitals_a'][ao_order[:,None]] 71 | if not restricted: 72 | qc_wavefunction['scf_fock_b'] = qc_wavefunction['scf_fock_b'][ao_order[:,None], ao_order] 73 | qc_wavefunction['scf_density_b'] = qc_wavefunction['scf_density_b'][ao_order[:,None], ao_order] 74 | qc_wavefunction['scf_orbitals_b'] = qc_wavefunction['scf_orbitals_b'][ao_order[:,None]] 75 | electron_shells = [ (x[0],x[1]) for x in electron_shells ] 76 | 77 | ecp_potentials = None 78 | if "/wavefunction/basis/ecp_shells" in h5f: 79 | ecp_potentials = [ 80 | ( 81 | sh[0], 82 | ECPPotential( 83 | ecp_type="scalar", 84 | angular_momentum=tuple(sh[1]), 85 | gaussian_exponents=tuple(sh[3]), 86 | coefficients=[tuple(sh[4])], 87 | r_exponents=tuple(sh[5]), 88 | ) 89 | ) 90 | for sh in h5f["/wavefunction/basis/ecp_shells"][()] 91 | ] 92 | 93 | ecp_cores = {} 94 | if "/wavefunction/basis/ecp_cores" in h5f: 95 | ecp_cores = { 96 | atom: ncores 97 | for atom, ncores in h5f["/wavefunction/basis/ecp_cores"][()] 98 | } 99 | 100 | center_data = {} 101 | for idx in atoms: 102 | electron_shells_idx = None 103 | if electron_shells is not None: 104 | electron_shells_idx = [ 105 | sh for idx_,sh in electron_shells if idx_ == idx 106 | ] 107 | 108 | ecp_potentials_idx = None 109 | if ecp_potentials is not None: 110 | ecp_potentials_idx = [ 111 | p for idx_,p in ecp_potentials if idx_ == idx 112 | ] 113 | 114 | center_data[str(idx)] = BasisCenter( 115 | electron_shells=electron_shells_idx, 116 | ecp_potentials=ecp_potentials_idx, 117 | ecp_electrons=ecp_cores.get(idx, 0), 118 | ) 119 | 120 | basis_dict["center_data"] = center_data 121 | qc_wavefunction["basis"] = BasisSet(**basis_dict) 122 | 123 | 124 | def read_hdf5_wavefunction(file_name: str) -> Dict[str, Any]: 125 | qc_wavefunction = {} 126 | with h5py.File(file_name, "r") as h5f: 127 | # check that wavefunction field is present 128 | if "/wavefunction" not in h5f: 129 | raise ValueError("No wavefunction found in HDF5 file") 130 | # check if the wavefunction is restricted or not 131 | restricted = qc_wavefunction["restricted"] = bool( 132 | h5f["/wavefunction/restricted"][()] 133 | ) 134 | for key in [ 135 | "fock_a", 136 | "fock_b", 137 | "density_a", 138 | "density_b", 139 | "orbitals_a", 140 | "orbitals_b", 141 | "eigenvalues_a", 142 | "eigenvalues_b", 143 | "occupations_a", 144 | "occupations_b", 145 | "scf_fock_a", 146 | "scf_fock_b", 147 | "scf_density_a", 148 | "scf_density_b", 149 | "scf_orbitals_a", 150 | "scf_orbitals_b", 151 | "scf_eigenvalues_a", 152 | "scf_eigenvalues_b", 153 | "scf_occupations_a", 154 | "scf_occupations_b", 155 | ]: 156 | if restricted and key.endswith("_b"): 157 | # if restricted, we reference the alpha values for beta 158 | # without storing them twice 159 | key_a = key.replace("_b", "_a") 160 | qc_wavefunction[key] = key_a 161 | else: # alpha values and unrestricted values 162 | qc_wavefunction[key] = h5f[f"/wavefunction/{key}"][()] 163 | 164 | # reshape matrices: 165 | nmo = h5f["/properties/calcinfo_nmo"][()] 166 | if len(qc_wavefunction["scf_eigenvalues_a"]) != nmo: 167 | raise ValueError("Inconsistent number of eigenvalues and nmo") 168 | if not restricted: 169 | if len(qc_wavefunction["scf_eigenvalues_a"]) != len( 170 | qc_wavefunction["scf_eigenvalues_b"] 171 | ): 172 | raise ValueError( 173 | "Number of alpha and beta SCF eigenvalues do not match" 174 | ) 175 | nbf = h5f["/wavefunction/basis/nbf"][()] 176 | qc_wavefunction["scf_fock_a"] = qc_wavefunction["scf_fock_a"].reshape(nbf, nbf) 177 | qc_wavefunction["scf_density_a"] = qc_wavefunction["scf_density_a"].reshape(nbf, nbf) 178 | qc_wavefunction["scf_orbitals_a"] = qc_wavefunction["scf_orbitals_a"].reshape(nbf, nmo) 179 | if not restricted: 180 | qc_wavefunction["scf_fock_b"] = qc_wavefunction["scf_fock_b"].reshape(nbf, nbf) 181 | qc_wavefunction["scf_density_b"] = qc_wavefunction["scf_density_b"].reshape(nbf, nbf) 182 | qc_wavefunction["scf_orbitals_b"] = qc_wavefunction["scf_orbitals_b"].reshape(nbf, nmo) 183 | 184 | read_basis(h5f, qc_wavefunction) 185 | return qc_wavefunction 186 | 187 | 188 | if __name__ == "__main__": 189 | # parse filename from command line 190 | parser = argparse.ArgumentParser( 191 | description="Reads a wavefunction from a HDF5 file and converts it to a QC-Schema JSON." 192 | ) 193 | parser.add_argument("filename", type=str, help="HDF5 file to read") 194 | args = parser.parse_args() 195 | filename = args.filename 196 | if not os.path.exists(filename): 197 | raise FileNotFoundError(f"File {filename} not found") 198 | qc_wavefunction = read_hdf5_wavefunction(filename) 199 | print(qc_wavefunction.keys()) 200 | print(qc_wavefunction['basis'].dict().keys()) 201 | print(qc_wavefunction) 202 | -------------------------------------------------------------------------------- /samples/spe.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "nteract": { 7 | "transient": { 8 | "deleting": false 9 | } 10 | } 11 | }, 12 | "source": [ 13 | "# Submission of Calculation with Subsequent Results Query\n", 14 | "\n", 15 | "In this demo, we will submit a calculation, check the status of the job and query the results after it is finished." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 21, 21 | "metadata": { 22 | "collapsed": false, 23 | "jupyter": { 24 | "outputs_hidden": false, 25 | "source_hidden": false 26 | }, 27 | "nteract": { 28 | "transient": { 29 | "deleting": false 30 | } 31 | } 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "from azure.quantum import Workspace\n", 36 | "from azure.quantum.job import JobFailedWithResultsError\n", 37 | "\n", 38 | "# insert connection string form Azure Portal Workspace Access Keys\n", 39 | "connection_string = \"\"\n", 40 | "workspace = Workspace.from_connection_string(connection_string)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 22, 46 | "metadata": { 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false, 50 | "source_hidden": false 51 | }, 52 | "nteract": { 53 | "transient": { 54 | "deleting": false 55 | } 56 | } 57 | }, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "Verifying access to Accelerated DFT target.\n", 64 | "Verification complete.\n" 65 | ] 66 | } 67 | ], 68 | "source": [ 69 | "# To submit Accelerated DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 70 | "print(\"Verifying access to Accelerated DFT target.\")\n", 71 | "target = workspace.get_targets(\"microsoft.dft\")\n", 72 | "print(\"Verification complete.\")" 73 | ] 74 | }, 75 | { 76 | "attachments": {}, 77 | "cell_type": "markdown", 78 | "metadata": { 79 | "nteract": { 80 | "transient": { 81 | "deleting": false 82 | } 83 | } 84 | }, 85 | "source": [ 86 | "Now we submit the calculations to MADFT service.\n", 87 | "\n", 88 | "The QCSchema input below is for a DFT energy calculation on phenol (with the geometry in Bohr)." 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 23, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "qcschema_input = {\n", 98 | " \"driver\": \"energy\",\n", 99 | " \"model\": {\n", 100 | " \"method\": \"m06-2x\",\n", 101 | " \"basis\": \"def2-svp\"\n", 102 | " },\n", 103 | " \"schema_name\": \"qcschema_input\",\n", 104 | " \"schema_version\": 1,\n", 105 | " \"molecule\": {\n", 106 | " \"extras\": {},\n", 107 | " \"symbols\": [\n", 108 | " \"O\",\n", 109 | " \"C\",\n", 110 | " \"C\",\n", 111 | " \"C\",\n", 112 | " \"C\",\n", 113 | " \"C\",\n", 114 | " \"C\",\n", 115 | " \"H\",\n", 116 | " \"H\",\n", 117 | " \"H\",\n", 118 | " \"H\",\n", 119 | " \"H\",\n", 120 | " \"H\"\n", 121 | " ],\n", 122 | " \"geometry\": [\n", 123 | " 4.730542147965709,\n", 124 | " 0.034826575331843086,\n", 125 | " 0.07810088784463559,\n", 126 | " 2.1361232242687977,\n", 127 | " 0.017709001458524106,\n", 128 | " 0.009088108672780787,\n", 129 | " 0.7996954919209014,\n", 130 | " 2.290483253979806,\n", 131 | " 0.10106814673106823,\n", 132 | " -1.8298562750208616,\n", 133 | " 2.2732950799384737,\n", 134 | " -0.04537958079912547,\n", 135 | " -3.1327572801516967,\n", 136 | " -0.00564083248182671,\n", 137 | " -0.28742004920350506,\n", 138 | " -1.790388872477789,\n", 139 | " -2.271959799458856,\n", 140 | " -0.38978844089184156,\n", 141 | " 0.8394687277399734,\n", 142 | " -2.2656284043593296,\n", 143 | " -0.24392044354214196,\n", 144 | " 5.279447115915874,\n", 145 | " -0.07938333158181043,\n", 146 | " 1.8109098053069272,\n", 147 | " 1.8583211818406624,\n", 148 | " 4.051452964636673,\n", 149 | " 0.2691141588512759,\n", 150 | " -2.8675310249318393,\n", 151 | " 4.053900197762506,\n", 152 | " 0.0241508699472927,\n", 153 | " -5.190440656400895,\n", 154 | " -0.014523603513912258,\n", 155 | " -0.4052054313284032,\n", 156 | " -2.796624853566738,\n", 157 | " -4.060585444078858,\n", 158 | " -0.5909607661605761,\n", 159 | " 1.9285725820008635,\n", 160 | " -4.013248220398251,\n", 161 | " -0.3415529925897059\n", 162 | " ]\n", 163 | " }\n", 164 | "}" 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 25, 170 | "metadata": {}, 171 | "outputs": [ 172 | { 173 | "data": { 174 | "text/plain": [ 175 | "" 176 | ] 177 | }, 178 | "execution_count": 25, 179 | "metadata": {}, 180 | "output_type": "execute_result" 181 | } 182 | ], 183 | "source": [ 184 | "# submit calculation using QCSchema input\n", 185 | "target.submit(input_data=[qcschema_input],name='phenol_energy')" 186 | ] 187 | }, 188 | { 189 | "attachments": {}, 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "# Query Job Status and Retreive Results" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": { 199 | "nteract": { 200 | "transient": { 201 | "deleting": false 202 | } 203 | } 204 | }, 205 | "source": [ 206 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": 7, 212 | "metadata": { 213 | "collapsed": false, 214 | "jupyter": { 215 | "outputs_hidden": false, 216 | "source_hidden": false 217 | }, 218 | "nteract": { 219 | "transient": { 220 | "deleting": false 221 | } 222 | } 223 | }, 224 | "outputs": [ 225 | { 226 | "name": "stdout", 227 | "output_type": "stream", 228 | "text": [ 229 | "Job: \"phenol_spf\" is Succeeded\n" 230 | ] 231 | } 232 | ], 233 | "source": [ 234 | "# query the latest job that match the given name\n", 235 | "job = workspace.list_jobs(name_match=job_name)[-1]\n", 236 | "\n", 237 | "# refresh the job SAS for using the API\n", 238 | "job.refresh()\n", 239 | "\n", 240 | "# show the status of the job\n", 241 | "print(f'Job: \"{job_name}\" is {job.details.status}')" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 14, 247 | "metadata": { 248 | "collapsed": false, 249 | "jupyter": { 250 | "outputs_hidden": false, 251 | "source_hidden": false 252 | }, 253 | "nteract": { 254 | "transient": { 255 | "deleting": false 256 | } 257 | } 258 | }, 259 | "outputs": [], 260 | "source": [ 261 | "# read the results of the job\n", 262 | "if job.details.status == 'Succeeded':\n", 263 | " results = job.get_results()\n", 264 | "else:\n", 265 | " results = f'\"{job_name}\" is still {job.details.status}...'\n", 266 | "\n", 267 | "# QCSchema Output\n", 268 | "qcschema = results[\"results\"][0]" 269 | ] 270 | }, 271 | { 272 | "attachments": {}, 273 | "cell_type": "markdown", 274 | "metadata": {}, 275 | "source": [ 276 | "# Results" 277 | ] 278 | }, 279 | { 280 | "attachments": {}, 281 | "cell_type": "markdown", 282 | "metadata": {}, 283 | "source": [ 284 | "The results of the calculation are stored in the QCSchema format dict.\n", 285 | "\n", 286 | "For an energy calculation we can see the energy by simply looking at the key \"return_result\".\n", 287 | "(For gradient calculations, this key returns the force.)" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": 12, 293 | "metadata": {}, 294 | "outputs": [ 295 | { 296 | "name": "stdout", 297 | "output_type": "stream", 298 | "text": [ 299 | "Total Energy (Hartree): -307.12139651020345\n", 300 | "Gradient: [-0.002197471210684823, 0.00029306318218704636, -0.0035252897749837586, 0.00572408645715828, -5.241742714710666e-05, 0.0007351063564943854, -0.001393434816155871, -0.0004873898896040757, 0.00034441088254433955, 0.0005297075000868651, -0.0004320963967661823, -0.00011560122742454871, 0.0006584705780644795, -4.913920814949386e-05, 0.00013926396159232024, 0.0004422087761030598, 0.0005081197250234315, -5.1402734105060466e-05, -0.0013281339139158133, 0.00043517193286637945, 0.0004488397827828762, -0.00166022348528376, -0.00020252486646704475, 0.002096644098159628, 0.0006021813745114152, 0.00043432123944728285, 0.00010817314382553182, -0.0005365342204302209, 0.000759994909718481, 4.284247488994808e-06, -0.00089960013447693, 1.479943188319524e-07, -5.7922095602737714e-05, -0.0005052994442765399, -0.0007700909735886552, -0.00012093111577044305, 0.0006024978649389612, -0.00044209916061244137, 2.7267532771421688e-05]\n" 301 | ] 302 | } 303 | ], 304 | "source": [ 305 | "# The energy can be accessed:\n", 306 | "print(\"Total Energy (Hartree): \", qcschema['properties']['return_energy'] )" 307 | ] 308 | }, 309 | { 310 | "attachments": {}, 311 | "cell_type": "markdown", 312 | "metadata": {}, 313 | "source": [ 314 | "Other useful information is stored in the output dict, for example:" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 13, 320 | "metadata": {}, 321 | "outputs": [ 322 | { 323 | "name": "stdout", 324 | "output_type": "stream", 325 | "text": [ 326 | "Number of Basis Functions: 191\n", 327 | "Total Energy (Hartree): -307.12139651020345\n", 328 | "Nuclear Repulsion Energy (Hartree): 270.7004518261375\n", 329 | "Total Calculation Time (s): 6.6662819385528564\n" 330 | ] 331 | } 332 | ], 333 | "source": [ 334 | "print(\"Number of Basis Functions: \", qcschema[\"properties\"][\"calcinfo_nbasis\"])\n", 335 | "print(\"Total Energy (Hartree): \", qcschema[\"properties\"][\"return_energy\"])\n", 336 | "print(\"Nuclear Repulsion Energy (Hartree): \", qcschema[\"properties\"][\"nuclear_repulsion_energy\"])\n", 337 | "print(\"Total Calculation Time (s): \", qcschema[\"provenance\"][\"total_time_seconds\"])" 338 | ] 339 | }, 340 | { 341 | "cell_type": "markdown", 342 | "metadata": {}, 343 | "source": [ 344 | "## Output to QCSchema json file" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": null, 350 | "metadata": {}, 351 | "outputs": [], 352 | "source": [ 353 | "import json\n", 354 | "n = job_name + \"_output\"\n", 355 | "with open(n+\".json\", \"w\") as fp:\n", 356 | " json.dump(qcschema, fp)" 357 | ] 358 | } 359 | ], 360 | "metadata": { 361 | "kernel_info": { 362 | "name": "python3" 363 | }, 364 | "kernelspec": { 365 | "display_name": "testtest", 366 | "language": "python", 367 | "name": "python3" 368 | }, 369 | "language_info": { 370 | "codemirror_mode": { 371 | "name": "ipython", 372 | "version": 3 373 | }, 374 | "file_extension": ".py", 375 | "mimetype": "text/x-python", 376 | "name": "python", 377 | "nbconvert_exporter": "python", 378 | "pygments_lexer": "ipython3", 379 | "version": "3.11.10" 380 | }, 381 | "nteract": { 382 | "version": "nteract-front-end@1.0.0" 383 | } 384 | }, 385 | "nbformat": 4, 386 | "nbformat_minor": 4 387 | } 388 | -------------------------------------------------------------------------------- /samples/spf.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "nteract": { 7 | "transient": { 8 | "deleting": false 9 | } 10 | } 11 | }, 12 | "source": [ 13 | "# Submission of Calculation with Subsequent Results Query\n", 14 | "\n", 15 | "In this demo, we will submit a calculation, check the status of the job and query the results after it is finished." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "metadata": { 22 | "collapsed": false, 23 | "jupyter": { 24 | "outputs_hidden": false, 25 | "source_hidden": false 26 | }, 27 | "nteract": { 28 | "transient": { 29 | "deleting": false 30 | } 31 | } 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "from azure.quantum import Workspace\n", 36 | "from azure.quantum.job import JobFailedWithResultsError\n", 37 | "\n", 38 | "# insert connection string form Azure Portal Workspace Access Keys\n", 39 | "connection_string = \"\"\n", 40 | "workspace = Workspace.from_connection_string(connection_string)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": { 47 | "collapsed": false, 48 | "jupyter": { 49 | "outputs_hidden": false, 50 | "source_hidden": false 51 | }, 52 | "nteract": { 53 | "transient": { 54 | "deleting": false 55 | } 56 | } 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "# To submit Accelerated DFT jobs, we will be using the microsoft.dft target in the workspace.\n", 61 | "print(\"Verifying access to Accelerated DFT target.\")\n", 62 | "target = workspace.get_targets(\"microsoft.dft\")\n", 63 | "print(\"Verification complete.\")" 64 | ] 65 | }, 66 | { 67 | "attachments": {}, 68 | "cell_type": "markdown", 69 | "metadata": { 70 | "nteract": { 71 | "transient": { 72 | "deleting": false 73 | } 74 | } 75 | }, 76 | "source": [ 77 | "Now we submit the calculations to MADFT service.\n", 78 | "\n", 79 | "The QCSchema input below is for a DFT gradient calculation on the molecule phenol (with the geometry given in Bohr)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "qcschema_input = {\n", 89 | " \"driver\": \"gradient\",\n", 90 | " \"model\": {\n", 91 | " \"method\": \"m06-2x\",\n", 92 | " \"basis\": \"def2-svpd\"\n", 93 | " },\n", 94 | " \"schema_name\": \"qcschema_input\",\n", 95 | " \"schema_version\": 1,\n", 96 | " \"molecule\": {\n", 97 | " \"extras\": {},\n", 98 | " \"symbols\": [\n", 99 | " \"O\",\n", 100 | " \"C\",\n", 101 | " \"C\",\n", 102 | " \"C\",\n", 103 | " \"C\",\n", 104 | " \"C\",\n", 105 | " \"C\",\n", 106 | " \"H\",\n", 107 | " \"H\",\n", 108 | " \"H\",\n", 109 | " \"H\",\n", 110 | " \"H\",\n", 111 | " \"H\"\n", 112 | " ],\n", 113 | " \"geometry\": [\n", 114 | " 4.730542147965709,\n", 115 | " 0.034826575331843086,\n", 116 | " 0.07810088784463559,\n", 117 | " 2.1361232242687977,\n", 118 | " 0.017709001458524106,\n", 119 | " 0.009088108672780787,\n", 120 | " 0.7996954919209014,\n", 121 | " 2.290483253979806,\n", 122 | " 0.10106814673106823,\n", 123 | " -1.8298562750208616,\n", 124 | " 2.2732950799384737,\n", 125 | " -0.04537958079912547,\n", 126 | " -3.1327572801516967,\n", 127 | " -0.00564083248182671,\n", 128 | " -0.28742004920350506,\n", 129 | " -1.790388872477789,\n", 130 | " -2.271959799458856,\n", 131 | " -0.38978844089184156,\n", 132 | " 0.8394687277399734,\n", 133 | " -2.2656284043593296,\n", 134 | " -0.24392044354214196,\n", 135 | " 5.279447115915874,\n", 136 | " -0.07938333158181043,\n", 137 | " 1.8109098053069272,\n", 138 | " 1.8583211818406624,\n", 139 | " 4.051452964636673,\n", 140 | " 0.2691141588512759,\n", 141 | " -2.8675310249318393,\n", 142 | " 4.053900197762506,\n", 143 | " 0.0241508699472927,\n", 144 | " -5.190440656400895,\n", 145 | " -0.014523603513912258,\n", 146 | " -0.4052054313284032,\n", 147 | " -2.796624853566738,\n", 148 | " -4.060585444078858,\n", 149 | " -0.5909607661605761,\n", 150 | " 1.9285725820008635,\n", 151 | " -4.013248220398251,\n", 152 | " -0.3415529925897059\n", 153 | " ]\n", 154 | " }\n", 155 | "}" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "# Submit job:\n", 165 | "job_name = 'phenol_gradient'\n", 166 | "target.submit(input_data=[qcschema_input],name=job_name)" 167 | ] 168 | }, 169 | { 170 | "attachments": {}, 171 | "cell_type": "markdown", 172 | "metadata": {}, 173 | "source": [ 174 | "# Query Job Status and Retreive Results" 175 | ] 176 | }, 177 | { 178 | "cell_type": "markdown", 179 | "metadata": { 180 | "nteract": { 181 | "transient": { 182 | "deleting": false 183 | } 184 | } 185 | }, 186 | "source": [ 187 | "We can retrieve information about a job through [Workspace.get_job](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management), and also query the results by filtering the job name with [Workspace.list_jobs](https://learn.microsoft.com/en-us/azure/quantum/optimization-job-management)." 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "metadata": { 194 | "collapsed": false, 195 | "jupyter": { 196 | "outputs_hidden": false, 197 | "source_hidden": false 198 | }, 199 | "nteract": { 200 | "transient": { 201 | "deleting": false 202 | } 203 | } 204 | }, 205 | "outputs": [], 206 | "source": [ 207 | "# query the latest job that match the given name\n", 208 | "job = workspace.list_jobs(name_match=job_name)[-1]\n", 209 | "\n", 210 | "# refresh the job SAS for using the API\n", 211 | "job.refresh()\n", 212 | "\n", 213 | "# show the status of the job\n", 214 | "print(f'Job: \"{job_name}\" is {job.details.status}')" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": null, 220 | "metadata": { 221 | "collapsed": false, 222 | "jupyter": { 223 | "outputs_hidden": false, 224 | "source_hidden": false 225 | }, 226 | "nteract": { 227 | "transient": { 228 | "deleting": false 229 | } 230 | } 231 | }, 232 | "outputs": [], 233 | "source": [ 234 | "# read the results of the job\n", 235 | "if job.details.status == 'Succeeded':\n", 236 | " results = job.get_results()\n", 237 | "else:\n", 238 | " results = f'\"{job_name}\" is still {job.details.status}...'\n", 239 | "\n", 240 | "# QCSchema Output\n", 241 | "qcschema = results[\"results\"][0]" 242 | ] 243 | }, 244 | { 245 | "attachments": {}, 246 | "cell_type": "markdown", 247 | "metadata": {}, 248 | "source": [ 249 | "# Results" 250 | ] 251 | }, 252 | { 253 | "attachments": {}, 254 | "cell_type": "markdown", 255 | "metadata": {}, 256 | "source": [ 257 | "The results of the calculation are stored in the QCSchema format dict.\n", 258 | "\n", 259 | "For an energy calculation we can see the energy by simply looking at the key \"return_result\".\n", 260 | "For gradient calculations, this key returns the force." 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": null, 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "# The energy can be accessed:\n", 270 | "energy = qcschema['properties']['return_energy']\n", 271 | "\n", 272 | "# Since the job driver was 'gradient' the gradient is also returned \n", 273 | "gradient = qcschema[\"return_result\"]\n", 274 | "\n", 275 | "# or alternatively the gradient can be accessed via:\n", 276 | "#gradient = qcschema['properties']['scf_total_gradient']\n", 277 | "\n", 278 | "print(\"Total Energy (Hartree): \", energy)\n", 279 | "print(\"Gradient: \", gradient)" 280 | ] 281 | }, 282 | { 283 | "attachments": {}, 284 | "cell_type": "markdown", 285 | "metadata": {}, 286 | "source": [ 287 | "Other useful information is stored in the output dict, for example:" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "metadata": {}, 294 | "outputs": [], 295 | "source": [ 296 | "print(\"Number of Basis Functions: \", qcschema[\"properties\"][\"calcinfo_nbasis\"])\n", 297 | "print(\"Total Energy (Hartree): \", qcschema[\"properties\"][\"return_energy\"])\n", 298 | "print(\"Nuclear Repulsion Energy (Hartree): \", qcschema[\"properties\"][\"nuclear_repulsion_energy\"])\n", 299 | "print(\"Total Calculation Time (s): \", qcschema[\"provenance\"][\"total_time_seconds\"])" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "## Output to QCSchema json file" 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": null, 312 | "metadata": {}, 313 | "outputs": [], 314 | "source": [ 315 | "import json\n", 316 | "n = job_name + \"_output\"\n", 317 | "with open(n+\".json\", \"w\") as fp:\n", 318 | " json.dump(qcschema, fp, indent=4)\n" 319 | ] 320 | } 321 | ], 322 | "metadata": { 323 | "kernel_info": { 324 | "name": "python3" 325 | }, 326 | "kernelspec": { 327 | "display_name": "testtest", 328 | "language": "python", 329 | "name": "python3" 330 | }, 331 | "language_info": { 332 | "codemirror_mode": { 333 | "name": "ipython", 334 | "version": 3 335 | }, 336 | "file_extension": ".py", 337 | "mimetype": "text/x-python", 338 | "name": "python", 339 | "nbconvert_exporter": "python", 340 | "pygments_lexer": "ipython3", 341 | "version": "3.11.10" 342 | }, 343 | "nteract": { 344 | "version": "nteract-front-end@1.0.0" 345 | } 346 | }, 347 | "nbformat": 4, 348 | "nbformat_minor": 4 349 | } 350 | --------------------------------------------------------------------------------