├── LICENSE ├── LICENSE-CODE ├── README.md ├── ResourceTypes ├── KeyVault │ ├── Are-there-any-slow-requests.md │ ├── How-active-has-this-keyVault-been.md │ ├── How-fast-is-this-KeyVault-serving-requests.md │ ├── What-changes-occurred-last-month.md │ └── Who-is-calling-this-KeyVault.md └── StreamAnalytics │ ├── All-logs-with-level-error.txt │ ├── All-output-data-errors.txt │ ├── Events-that-arrived-early.txt │ ├── Events-that-arrived-late.txt │ ├── Events-that-arrived-out-of-order.txt │ ├── List-all-ColumnNameInvalid-errors.txt │ ├── List-all-DuplicateKey-errors.txt │ ├── List-all-InvalidInputTimeStamp-errors.txt │ ├── List-all-InvalidInputTimeStampKey-errors.txt │ ├── List-all-RecordExceededSizeLimit-errors.txt │ ├── List-all-RequiredColumnMissing-errors.txt │ ├── List-all-TypeConversionError-errors.txt │ ├── List-all-administrative-operations.txt │ ├── List-all-input-data-errors.txt │ ├── List-all-input-deserialization-errors.txt │ ├── Operations-that-have-Failed.txt │ ├── Output-Throttling-logs.txt │ ├── Reference-data-logs.txt │ ├── Summary-of-Failed-operations-in-the-last-7-days.txt │ ├── Summary-of-all-data-errors-in-the-last-7-days.txt │ ├── Summary-of-all-errors-in-the-last-7-days.txt │ └── Transient-input-and-output-errors.txt ├── application-insights.md ├── application-insights ├── custom-SLA-and-breaches.md ├── find-exceptions-that-failed-requests.md ├── percentiles-of-request-duration-in-the-last-24-hours.md ├── pie-chart-the-top-10-countries-by-traffic.md ├── requests-duration-compared-with-monthly-average.md └── top-10-custom-events.md ├── command-usage.md ├── command-usage ├── bar-chart-unstacked.md ├── calculate-multiple-percentiles-timechart.md ├── join-kind-inner.md ├── join-on-two-field-matches.md └── pie-chart-top-10-items.md ├── examples.md ├── github-repo.md ├── images ├── SLA.png ├── availability-hours.png ├── availability-rate.png ├── cohorts.png ├── current-duration-vs-monthly-average.png ├── percentiles.png ├── rolling-mau.png ├── top-10-countries-by-traffic.png ├── top-10-custom-events.png └── user-stickiness.png ├── log-analytics.md ├── log-analytics ├── calculate-state-duration.md ├── calculate-the-average-size-of-perf-usage-reports-per-computer.md ├── chart-a-week-over-week-view-of-the-number-of-computers-sending-data.md ├── chart-the-record-count-per-table-in-the-last-5-hours.md ├── computers-still-missing-updates.md ├── computers-with-non-reporting-protection-status-duration.md ├── computers-with-unhealthy-latency.md ├── correlate-computer-memory-and-cpu.md ├── count-all-logs-collected-over-the-last-hour-per-type.md ├── count-and-chart-alerts-severity-per-day.md ├── count-azure-diagnostics-records-per-category.md ├── count-security-events-by-activity-id.md ├── count-security-events-related-to-permissions.md ├── exclude-IP-range.md ├── find-accounts-that-failed-to-logon-from-computers-on-which-we-identify-a-security-detection.md ├── find-repeating-failed-login-attempts-by-the-same-account-from-different-ips.md ├── find-stale-computers.md ├── find-user-accounts-that-failed-to-login-over-5-times-in-1-day.md ├── get-a-numbered-list-of-the-latest-alerts.md ├── get-a-random-record-for-each-unique-category.md ├── get-latest-heartbeat-record-per-computer-ip.md ├── get-the-latest-record-per-category.md ├── is-my-security-data-available.md ├── match-protection-status-records-with-heartbeat-records.md ├── parse-activity-name-and-id.md ├── perf-cpu-utilization-graph-per-computer.md ├── pie-chart-explicit-credentials-processes.md ├── search-application-level-events-described-as-cryptographic.md ├── search-events-related-to-unmarshaling.md ├── server-availability-rate.md ├── timechart-latency-percentiles-50-and-95.md ├── top-5-running-processes-in-the-last-3-days.md └── usage-of-computers-today.md ├── smart-analytics.md ├── smart-analytics ├── automated-detector-for-service-disruptions-based-on-app-trace-logs.md ├── sliding-window-calculations-cohort-analysis.md ├── sliding-window-calculations-rolling-mau.md └── sliding-window-calculations-user-stickiness.md └── toc.md /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution 4.0 International Public License 58 | 59 | By exercising the Licensed Rights (defined below), You accept and agree 60 | to be bound by the terms and conditions of this Creative Commons 61 | Attribution 4.0 International Public License ("Public License"). To the 62 | extent this Public License may be interpreted as a contract, You are 63 | granted the Licensed Rights in consideration of Your acceptance of 64 | these terms and conditions, and the Licensor grants You such rights in 65 | consideration of benefits the Licensor receives from making the 66 | Licensed Material available under these terms and conditions. 67 | 68 | 69 | Section 1 -- Definitions. 70 | 71 | a. Adapted Material means material subject to Copyright and Similar 72 | Rights that is derived from or based upon the Licensed Material 73 | and in which the Licensed Material is translated, altered, 74 | arranged, transformed, or otherwise modified in a manner requiring 75 | permission under the Copyright and Similar Rights held by the 76 | Licensor. For purposes of this Public License, where the Licensed 77 | Material is a musical work, performance, or sound recording, 78 | Adapted Material is always produced where the Licensed Material is 79 | synched in timed relation with a moving image. 80 | 81 | b. Adapter's License means the license You apply to Your Copyright 82 | and Similar Rights in Your contributions to Adapted Material in 83 | accordance with the terms and conditions of this Public License. 84 | 85 | c. Copyright and Similar Rights means copyright and/or similar rights 86 | closely related to copyright including, without limitation, 87 | performance, broadcast, sound recording, and Sui Generis Database 88 | Rights, without regard to how the rights are labeled or 89 | categorized. For purposes of this Public License, the rights 90 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 91 | Rights. 92 | 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. Share means to provide material to the public by any means or 116 | process that requires permission under the Licensed Rights, such 117 | as reproduction, public display, public performance, distribution, 118 | dissemination, communication, or importation, and to make material 119 | available to the public including in ways that members of the 120 | public may access the material from a place and at a time 121 | individually chosen by them. 122 | 123 | j. Sui Generis Database Rights means rights other than copyright 124 | resulting from Directive 96/9/EC of the European Parliament and of 125 | the Council of 11 March 1996 on the legal protection of databases, 126 | as amended and/or succeeded, as well as other essentially 127 | equivalent rights anywhere in the world. 128 | 129 | k. You means the individual or entity exercising the Licensed Rights 130 | under this Public License. Your has a corresponding meaning. 131 | 132 | 133 | Section 2 -- Scope. 134 | 135 | a. License grant. 136 | 137 | 1. Subject to the terms and conditions of this Public License, 138 | the Licensor hereby grants You a worldwide, royalty-free, 139 | non-sublicensable, non-exclusive, irrevocable license to 140 | exercise the Licensed Rights in the Licensed Material to: 141 | 142 | a. reproduce and Share the Licensed Material, in whole or 143 | in part; and 144 | 145 | b. produce, reproduce, and Share Adapted Material. 146 | 147 | 2. Exceptions and Limitations. For the avoidance of doubt, where 148 | Exceptions and Limitations apply to Your use, this Public 149 | License does not apply, and You do not need to comply with 150 | its terms and conditions. 151 | 152 | 3. Term. The term of this Public License is specified in Section 153 | 6(a). 154 | 155 | 4. Media and formats; technical modifications allowed. The 156 | Licensor authorizes You to exercise the Licensed Rights in 157 | all media and formats whether now known or hereafter created, 158 | and to make technical modifications necessary to do so. The 159 | Licensor waives and/or agrees not to assert any right or 160 | authority to forbid You from making technical modifications 161 | necessary to exercise the Licensed Rights, including 162 | technical modifications necessary to circumvent Effective 163 | Technological Measures. For purposes of this Public License, 164 | simply making modifications authorized by this Section 2(a) 165 | (4) never produces Adapted Material. 166 | 167 | 5. Downstream recipients. 168 | 169 | a. Offer from the Licensor -- Licensed Material. Every 170 | recipient of the Licensed Material automatically 171 | receives an offer from the Licensor to exercise the 172 | Licensed Rights under the terms and conditions of this 173 | Public License. 174 | 175 | b. No downstream restrictions. You may not offer or impose 176 | any additional or different terms or conditions on, or 177 | apply any Effective Technological Measures to, the 178 | Licensed Material if doing so restricts exercise of the 179 | Licensed Rights by any recipient of the Licensed 180 | Material. 181 | 182 | 6. No endorsement. Nothing in this Public License constitutes or 183 | may be construed as permission to assert or imply that You 184 | are, or that Your use of the Licensed Material is, connected 185 | with, or sponsored, endorsed, or granted official status by, 186 | the Licensor or others designated to receive attribution as 187 | provided in Section 3(a)(1)(A)(i). 188 | 189 | b. Other rights. 190 | 191 | 1. Moral rights, such as the right of integrity, are not 192 | licensed under this Public License, nor are publicity, 193 | privacy, and/or other similar personality rights; however, to 194 | the extent possible, the Licensor waives and/or agrees not to 195 | assert any such rights held by the Licensor to the limited 196 | extent necessary to allow You to exercise the Licensed 197 | Rights, but not otherwise. 198 | 199 | 2. Patent and trademark rights are not licensed under this 200 | Public License. 201 | 202 | 3. To the extent possible, the Licensor waives any right to 203 | collect royalties from You for the exercise of the Licensed 204 | Rights, whether directly or through a collecting society 205 | under any voluntary or waivable statutory or compulsory 206 | licensing scheme. In all other cases the Licensor expressly 207 | reserves any right to collect such royalties. 208 | 209 | 210 | Section 3 -- License Conditions. 211 | 212 | Your exercise of the Licensed Rights is expressly made subject to the 213 | following conditions. 214 | 215 | a. Attribution. 216 | 217 | 1. If You Share the Licensed Material (including in modified 218 | form), You must: 219 | 220 | a. retain the following if it is supplied by the Licensor 221 | with the Licensed Material: 222 | 223 | i. identification of the creator(s) of the Licensed 224 | Material and any others designated to receive 225 | attribution, in any reasonable manner requested by 226 | the Licensor (including by pseudonym if 227 | designated); 228 | 229 | ii. a copyright notice; 230 | 231 | iii. a notice that refers to this Public License; 232 | 233 | iv. a notice that refers to the disclaimer of 234 | warranties; 235 | 236 | v. a URI or hyperlink to the Licensed Material to the 237 | extent reasonably practicable; 238 | 239 | b. indicate if You modified the Licensed Material and 240 | retain an indication of any previous modifications; and 241 | 242 | c. indicate the Licensed Material is licensed under this 243 | Public License, and include the text of, or the URI or 244 | hyperlink to, this Public License. 245 | 246 | 2. You may satisfy the conditions in Section 3(a)(1) in any 247 | reasonable manner based on the medium, means, and context in 248 | which You Share the Licensed Material. For example, it may be 249 | reasonable to satisfy the conditions by providing a URI or 250 | hyperlink to a resource that includes the required 251 | information. 252 | 253 | 3. If requested by the Licensor, You must remove any of the 254 | information required by Section 3(a)(1)(A) to the extent 255 | reasonably practicable. 256 | 257 | 4. If You Share Adapted Material You produce, the Adapter's 258 | License You apply must not prevent recipients of the Adapted 259 | Material from complying with this Public License. 260 | 261 | 262 | Section 4 -- Sui Generis Database Rights. 263 | 264 | Where the Licensed Rights include Sui Generis Database Rights that 265 | apply to Your use of the Licensed Material: 266 | 267 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 268 | to extract, reuse, reproduce, and Share all or a substantial 269 | portion of the contents of the database; 270 | 271 | b. if You include all or a substantial portion of the database 272 | contents in a database in which You have Sui Generis Database 273 | Rights, then the database in which You have Sui Generis Database 274 | Rights (but not its individual contents) is Adapted Material; and 275 | 276 | c. You must comply with the conditions in Section 3(a) if You Share 277 | all or a substantial portion of the contents of the database. 278 | 279 | For the avoidance of doubt, this Section 4 supplements and does not 280 | replace Your obligations under this Public License where the Licensed 281 | Rights include other Copyright and Similar Rights. 282 | 283 | 284 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 285 | 286 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 287 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 288 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 289 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 290 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 291 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 292 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 293 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 294 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 295 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 296 | 297 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 298 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 299 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 300 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 301 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 302 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 303 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 304 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 305 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 306 | 307 | c. The disclaimer of warranties and limitation of liability provided 308 | above shall be interpreted in a manner that, to the extent 309 | possible, most closely approximates an absolute disclaimer and 310 | waiver of all liability. 311 | 312 | 313 | Section 6 -- Term and Termination. 314 | 315 | a. This Public License applies for the term of the Copyright and 316 | Similar Rights licensed here. However, if You fail to comply with 317 | this Public License, then Your rights under this Public License 318 | terminate automatically. 319 | 320 | b. Where Your right to use the Licensed Material has terminated under 321 | Section 6(a), it reinstates: 322 | 323 | 1. automatically as of the date the violation is cured, provided 324 | it is cured within 30 days of Your discovery of the 325 | violation; or 326 | 327 | 2. upon express reinstatement by the Licensor. 328 | 329 | For the avoidance of doubt, this Section 6(b) does not affect any 330 | right the Licensor may have to seek remedies for Your violations 331 | of this Public License. 332 | 333 | c. For the avoidance of doubt, the Licensor may also offer the 334 | Licensed Material under separate terms or conditions or stop 335 | distributing the Licensed Material at any time; however, doing so 336 | will not terminate this Public License. 337 | 338 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 339 | License. 340 | 341 | 342 | Section 7 -- Other Terms and Conditions. 343 | 344 | a. The Licensor shall not be bound by any additional or different 345 | terms or conditions communicated by You unless expressly agreed. 346 | 347 | b. Any arrangements, understandings, or agreements regarding the 348 | Licensed Material not stated herein are separate from and 349 | independent of the terms and conditions of this Public License. 350 | 351 | 352 | Section 8 -- Interpretation. 353 | 354 | a. For the avoidance of doubt, this Public License does not, and 355 | shall not be interpreted to, reduce, limit, restrict, or impose 356 | conditions on any use of the Licensed Material that could lawfully 357 | be made without permission under this Public License. 358 | 359 | b. To the extent possible, if any provision of this Public License is 360 | deemed unenforceable, it shall be automatically reformed to the 361 | minimum extent necessary to make it enforceable. If the provision 362 | cannot be reformed, it shall be severed from this Public License 363 | without affecting the enforceability of the remaining terms and 364 | conditions. 365 | 366 | c. No term or condition of this Public License will be waived and no 367 | failure to comply consented to unless expressly agreed to by the 368 | Licensor. 369 | 370 | d. Nothing in this Public License constitutes or may be interpreted 371 | as a limitation upon, or waiver of, any privileges and immunities 372 | that apply to the Licensor or You, including from the legal 373 | processes of any jurisdiction or authority. 374 | 375 | 376 | ======================================================================= 377 | 378 | Creative Commons is not a party to its public 379 | licenses. Notwithstanding, Creative Commons may elect to apply one of 380 | its public licenses to material it publishes and in those instances 381 | will be considered the “Licensor.” The text of the Creative Commons 382 | public licenses is dedicated to the public domain under the CC0 Public 383 | Domain Dedication. Except for the limited purpose of indicating that 384 | material is shared under a Creative Commons public license or as 385 | otherwise permitted by the Creative Commons policies published at 386 | creativecommons.org/policies, Creative Commons does not authorize the 387 | use of the trademark "Creative Commons" or any other trademark or logo 388 | of Creative Commons without its prior written consent including, 389 | without limitation, in connection with any unauthorized modifications 390 | to any of its public licenses or any other arrangements, 391 | understandings, or agreements concerning use of licensed material. For 392 | the avoidance of doubt, this paragraph does not form part of the 393 | public licenses. 394 | 395 | Creative Commons may be contacted at creativecommons.org. -------------------------------------------------------------------------------- /LICENSE-CODE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) Microsoft Corporation 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and 5 | associated documentation files (the "Software"), to deal in the Software without restriction, 6 | including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial 11 | portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT 14 | NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 15 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 16 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 17 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Azure Log Analytics Examples 2 | The documentation in this repository is licensed under the Creative Commons Attribution License as found in [here](LICENSE). 3 | Any source code in this repository is licensed under the MIT license as found [here](LICENSE-CODE). 4 | 5 | ## How to contribute 6 | We follow the [GitHub fork and pull model](https://help.github.com/articles/about-collaborative-development-models). 7 | To contribute your own examples, first [fork this repo](https://help.github.com/articles/fork-a-repo/), submit any changes or additions to your forked repo, and then [submit a pull request](https://help.github.com/articles/about-pull-requests/). 8 | 9 | If you submit a pull request with new or significant changes, and you are not an employee of Microsoft, we'll add a comment to the pull request asking you to submit an online [CLA](https://cla.microsoft.com) (Contribution License Agreement). We'll need you to complete the online form before we can accept your pull request. 10 | 11 | ## Redistribution 12 | Upon redistribution of this repo, please be respectful of the readers and authors of this documentation, and include a link to the [original repo master branch](https://github.com/MicrosoftDocs/LogAnalyticsExamples). 13 | -------------------------------------------------------------------------------- /ResourceTypes/KeyVault/Are-there-any-slow-requests.md: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Are there any slow requests? 3 | // Description: List of KeyVault requests that took longer than 1sec. 4 | // Category: Azure Resources 5 | // Topic: General 6 | // ResourceType: KeyVault // may no be needed, support for smart saving, helps if saved in solutions folder 7 | // Keywords: 8 | 9 | ``` 10 | let threshold=1000; // let operator defines a constant that can be further used in the query 11 | AzureDiagnostics 12 | | where ResourceProvider =="MICROSOFT.KEYVAULT" 13 | | where DurationMs > threshold 14 | | summarize count() by OperationName 15 | ``` -------------------------------------------------------------------------------- /ResourceTypes/KeyVault/How-active-has-this-keyVault-been.md: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: How active has this KeyVault been? 3 | // Description: Line chart showing trend of KeyVault requests volume, per operation over time. 4 | // Category: Azure Resources 5 | // Topic: General 6 | // ResourceType: KeyVault 7 | // Keywords: #render 8 | 9 | ``` 10 | // KeyVault diagnostic currently stores logs in AzureDiagnostics table which stores logs for multiple services. 11 | // Filter on ResourceProvider for logs specific to a service. 12 | AzureDiagnostics 13 | | where ResourceProvider =="MICROSOFT.KEYVAULT" 14 | | summarize count() by bin(TimeGenerated, 1h), OperationName // Aggregate by hour 15 | | render timechart 16 | ``` 17 | -------------------------------------------------------------------------------- /ResourceTypes/KeyVault/How-fast-is-this-KeyVault-serving-requests.md: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: How fast is this KeyVault serving requests? 3 | // Description: Line chart showing trend of request duration over time using different aggregations. 4 | // Category: Azure Resources 5 | // Topic: General 6 | // ResourceType: KeyVault // may no be needed, support for smart saving, helps if saved in solutions folder 7 | // Keywords: 8 | 9 | ``` 10 | AzureDiagnostics 11 | | where ResourceProvider =="MICROSOFT.KEYVAULT" 12 | | where httpStatusCode_d >= 300 and not(OperationName == "Authentication" and httpStatusCode_d == 401) 13 | | summarize count() by requestUri_s, ResultSignature 14 | // ResultSignature contains HTTP status such as "OK" or "Forbidden". 15 | // httpStatusCode_d contains HTTP status code returned by the request such as 200 or 401. 16 | // requestUri_s contains the URI of the request. 17 | ``` -------------------------------------------------------------------------------- /ResourceTypes/KeyVault/What-changes-occurred-last-month.md: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: What changes occurred last month? 3 | // Description: Lists all update and patch requests from the last 30 days 4 | // Category: Azure Resources, Audit 5 | // Topic: General 6 | // ResourceType: KeyVault // may no be needed, support for smart saving, helps if saved in solutions folder 7 | // Keywords: 8 | 9 | ``` 10 | // KeyVault diagnostic currently stores logs in AzureDiagnostics table which stores logs for multiple services. 11 | // Filter on ResourceProvider for logs specific to a service. 12 | AzureDiagnostics 13 | | where TimeGenerated > ago(30d) // Time range specified in the query. Overrides time picker in portal. 14 | | where ResourceProvider =="MICROSOFT.KEYVAULT" 15 | | where OperationName == "VaultPut" or OperationName == "VaultPatch" 16 | | sort by TimeGenerated desc. 17 | ``` -------------------------------------------------------------------------------- /ResourceTypes/KeyVault/Who-is-calling-this-KeyVault.md: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Who is calling this KeyVault? 3 | // Description: List of callers identified by their IP address with their request count. 4 | // Category: Azure Resources, Security 5 | // Topic: General 6 | // ResourceType: KeyVault // may no be needed, support for smart saving, helps if saved in solutions folder 7 | 8 | ``` 9 | // KeyVault diagnostic currently stores logs in AzureDiagnostics table which stores logs for multiple services. 10 | // Filter on ResourceProvider for logs specific to a service. 11 | AzureDiagnostics 12 | | where ResourceProvider =="MICROSOFT.KEYVAULT" 13 | | summarize count() by CallerIPAddress 14 | ``` 15 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/All-logs-with-level-error.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: All logs with level "Error" 3 | // Description: Shows all logs that is likely to have negatively impacted your job. 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and Level == "Error"  11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/All-output-data-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: All output data errors 3 | // Description: Shows all errors that occurred while writing the results of the query to the outputs in your job. 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType in ("OutputDataConversionError.RequiredColumnMissing", "OutputDataConversionError.ColumnNameInvalid", "OutputDataConversionError.TypeConversionError", "OutputDataConversionError.RecordExceededSizeLimit", "OutputDataConversionError.DuplicateKey") 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Events-that-arrived-early.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Events that arrived early 3 | // Description: Shows errors due to events where difference between Application time and Arrival time is greater than 5 minutes. 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "EarlyInputEvent" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Events-that-arrived-late.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Events that arrived late 3 | // Description: Shows errors due to events where difference between application time and arrival time is greater than the late arrival policy. 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "LateInputEvent" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Events-that-arrived-out-of-order.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Events that arrived out of order 3 | // Description: Shows errors due to events that arrive out of order according to the out-of-order policy. 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutOfOrderEvent" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-ColumnNameInvalid-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all ColumnNameInvalid errors 3 | // Description: Shows errors where the output record produced by your job has a column name that doesn't map to a column in your output. 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutputDataConversionError.ColumnNameInvalid" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-DuplicateKey-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all DuplicateKey errors 3 | // Description: Shows errors where the output record produced by job contains a column with the same name as a System column 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutputDataConversionError.DuplicateKey" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-InvalidInputTimeStamp-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all InvalidInputTimeStamp errors 3 | // Description: Shows errors caused due to events where value of the TIMESTAMP BY expression can't be converted to datetime 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "InvalidInputTimeStamp" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-InvalidInputTimeStampKey-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all InvalidInputTimeStampKey errors 3 | // Description: Shows errors caused due to events where value of the TIMESTAMP BY OVER timestampColumn is NULL 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "InvalidInputTimeStampKey" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-RecordExceededSizeLimit-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all RecordExceededSizeLimit errors 3 | // Description: Shows errors where the size of the output record produced by your job is greater than the supported output size. 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutputDataConversionError.RecordExceededSizeLimit" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-RequiredColumnMissing-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all RequiredColumnMissing errors 3 | // Description: Shows all errors where the output record produced by your job has a missing column. 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutputDataConversionError.RequiredColumnMissing" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-TypeConversionError-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all TypeConversionError errors 3 | // Description: Shows errors where the output record produced by your job has a column can't be converted to a valid type in the output 4 | // Category: Analytics 5 | // Topic: Output data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType == "OutputDataConversionError.TypeConversionError" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-administrative-operations.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all administrative operations 3 | // Description: Operations performed on the job such as start, stop, add input and output etc. 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and Category == "Authoring"  11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-input-data-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all input data errors 3 | // Description: Shows all errors that occurred while processing the data from inputs. 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs // may no be needed, support for smart saving, helps if saved in solutions folder 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).Type == "DataError" 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/List-all-input-deserialization-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: List all input deserialization errors 3 | // Description: Shows errors caused due to malformed events that could not be deserialized by the job. 4 | // Category: Analytics 5 | // Topic: Input data Errors 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).DataErrorType in ("InputDeserializerError.InvalidData", "InputDeserializerError.TypeConversionError", "InputDeserializerError.MissingColumns", "InputDeserializerError.InvalidHeader", "InputDeserializerError.InvalidCompressionType") 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level 12 | -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Operations-that-have-Failed.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Operations that have "Failed" 3 | // Description: Shows all operations on your job that have resulted in a failure 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and status_s == "Failed"  11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Output-Throttling-logs.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Output Throttling logs (cosmos db, pbi, event hubs) 3 | // Description: Shows all instances where writing to one of your outputs was throttled by the destination service. 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).Type in ("DocumentDbOutputAdapterWriteThrottlingError", "EventHubOutputAdapterEventHubThrottlingError", "PowerBIServiceThrottlingError", "PowerBIServiceThrottlingError") 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Reference-data-logs.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Reference data logs 3 | // Description: Shows all diagnostic logs related to reference data operations. 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).Type in ("ReferenceDataLoadSnapshot" , "ReferenceDataInputAdapterError", "ReferenceDataInputAdapterProcessBlobFailed", "ReferenceDataInputAdapterTransientError", "ReferenceDataScanSnapshots") 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Summary-of-Failed-operations-in-the-last-7-days.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Summary of 'Failed' operations in the last 7 days 3 | // Description: Summary of 'Failed' operations in the last 7 days 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: // optional, name should match Azure service, format? Stream Analytics jobs 7 | // Solution: // optional, name should match Log Analytics solution, format? 8 | // labels: // open list of values, separate by commas 9 | 10 | 11 | AzureDiagnostics 12 | | where TimeGenerated > ago(7d) //last 7 days 13 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and status_s == "Failed"  14 | | summarize Count=count(), sampleEvent=any(properties_s) by JobName=Resource -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Summary-of-all-data-errors-in-the-last-7-days.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Summary of all data errors in the last 7 days 3 | // Description: Summary of all data errors in the last 7 days 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where TimeGenerated > ago(7d) //last 7 days 11 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).Type == "DataError" 12 | | extend DataErrorType = tostring(parse_json(properties_s).DataErrorType) 13 | | summarize Count=count(), sampleEvent=any(properties_s) by DataErrorType, JobName=Resource -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Summary-of-all-errors-in-the-last-7-days.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Summary of all errors in the last 7 days 3 | // Description: Summary of all errors in the last 7 days 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where TimeGenerated > ago(7d) //last 7 days 11 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" 12 | | extend ErrorType = tostring(parse_json(properties_s).Type) 13 | | summarize Count=count(), sampleEvent=any(properties_s) by ErrorType, JobName=Resource -------------------------------------------------------------------------------- /ResourceTypes/StreamAnalytics/Transient-input-and-output-errors.txt: -------------------------------------------------------------------------------- 1 | // Author: someone@somewhere.com 2 | // Display name: Transient input and output errors 3 | // Description: Shows all errors related to input and output that are intermittent in nature. 4 | // Category: Analytics 5 | // Topic: General 6 | // ResourceType: Stream Analytics jobs 7 | // Keywords: 8 | 9 | AzureDiagnostics 10 | | where ResourceProvider == "MICROSOFT.STREAMANALYTICS" and parse_json(properties_s).Type in ("AzureFunctionOutputAdapterTransientError", "BlobInputAdapterTransientError", "DataLakeOutputAdapterTransientError", "DocumentDbOutputAdapterTransientError", "EdgeHubOutputAdapterEdgeHubTransientError", "EventHubBasedInputInvalidOperationTransientError", "EventHubBasedInputOperationCanceledTransientError", "EventHubBasedInputTimeoutTransientError", "EventHubBasedInputTransientError", "EventHubOutputAdapterEventHubTransientError", "InputProcessorTransientFailure", "OutputProcessorTransientError", "ReferenceDataInputAdapterTransientError", "ServiceBusOutputAdapterTransientError", "TableOutputAdapterTransientError") 11 | | project TimeGenerated, Resource, Region_s, OperationName, properties_s, Level -------------------------------------------------------------------------------- /application-insights.md: -------------------------------------------------------------------------------- 1 | ## Application Insights examples 2 |
3 | ##### [Find exceptions that failed requests](application-insights/find-exceptions-that-failed-requests.md) 4 | ##### [Percentiles of request duration](application-insights/percentiles-of-request-duration-in-the-last-24-hours.md) 5 | ##### [Daily request duration compared with monthly average](requests-duration-compared-with-monthly-average.md) 6 | ##### [Top 10 custom events](application-insights/top-10-custom-events.md) 7 | ##### [Top 10 countries by traffic](application-insights/pie-chart-the-top-10-countries-by-traffic.md) 8 | -------------------------------------------------------------------------------- /application-insights/custom-SLA-and-breaches.md: -------------------------------------------------------------------------------- 1 | ## Defining a custom SLA, and adherence to it 2 | #### #count #render #timechart #summarize 3 | 4 | 5 | The following query defines a custom Service Level Agreement (SLA): a request is considered as meeting SLA if it completes in under 3 seconds. The query then adds a static SLA target of 99.9% of requests needing to meet the SLA. The two are plotted on a time chart. 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(7d) 10 | | summarize slaMet = count(duration < 3000), slaBreached = count(duration >= 3000), totalCount = count() by bin(timestamp, 1h) 11 | | extend SLAIndex = slaMet * 100.0 / totalCount 12 | | extend SLA = 99.9 13 | | project SLAIndex, timestamp, SLA 14 | | render timechart 15 | ``` 16 | 17 | The output will look like this: 18 |

a custom SLA metric definition and its trend over time compared to SLA target

-------------------------------------------------------------------------------- /application-insights/find-exceptions-that-failed-requests.md: -------------------------------------------------------------------------------- 1 | ## Inner join: find exception related to failed requests 2 | #### #join #project #bin #round 3 | 4 | 5 | This example finds which exceptions are related to failed requests in the past 24 hours. 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) and success=="False" 10 | | join kind=inner (exceptions 11 | | where timestamp > ago(24h) ) on operation_Id 12 | | project type, method, requestName = name, requestDuration = duration 13 | ``` -------------------------------------------------------------------------------- /application-insights/percentiles-of-request-duration-in-the-last-24-hours.md: -------------------------------------------------------------------------------- 1 | ## Calculate multiple percentiles timechart 2 | #### #percentiles #render #timechart #bin 3 | 4 | 5 | The following example calculates the 50th, 90th, and 95th percentiles of request duration, in the past 24 hours: 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) 10 | | summarize percentiles(duration, 50, 90, 95) by bin(timestamp, 1h) 11 | | render timechart 12 | ``` 13 | 14 | The output will look like this: 15 |

percentiles

-------------------------------------------------------------------------------- /application-insights/pie-chart-the-top-10-countries-by-traffic.md: -------------------------------------------------------------------------------- 1 | ## Top 10 countries by traffic 2 | #### #count #render #piechart 3 | 4 | 5 | The following example counts the number of requests received from each country (aka "traffic") in the past 24 hours. Traffic distribution from the top 10 countries is displays in a pie-chart. 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) 10 | | summarize count() by client_CountryOrRegion 11 | | top 10 by count_ 12 | | render piechart 13 | ``` 14 | 15 | The output will look like this: 16 |

top 10 countries by traffic

-------------------------------------------------------------------------------- /application-insights/requests-duration-compared-with-monthly-average.md: -------------------------------------------------------------------------------- 1 | ## Daily request duration compared with monthly average 2 | #### #count #make-series #timechart #summarize 3 | 4 | 5 | The following query calculates and charts the current (last day) and monthly average request duration. 6 | 7 | ``` 8 | let response_last_month = toscalar( 9 | requests 10 | | where timestamp between(ago(7d) .. ago(1d)) 11 | | summarize value=avg(duration) 12 | ); 13 | requests 14 | | where timestamp >= ago(1d) 15 | | make-series avg_duration = avg(duration) default=0 on timestamp in range(ago(1d), now(), 15m) 16 | | extend monthly_avg = repeat(response_last_month, array_length(avg_duration)) 17 | | render timechart 18 | ``` 19 | 20 | The output will look like this: 21 |

Request duration over the last day compared with the monthly average

-------------------------------------------------------------------------------- /application-insights/top-10-custom-events.md: -------------------------------------------------------------------------------- 1 | ## Top 10 custom events 2 | #### #dcount #render #barchart 3 | 4 | 5 | The following query retrieves all customEvents recorded in the past 24 hours, and calculates their total count by name, and the number of distinct users (also by name). 6 | The 10 names with the highest count are selected, and their calculated values (count and distict users) are displayed in a barchart: 7 | 8 | ```AIQL 9 | customEvents 10 | | where timestamp >= ago(24h) 11 | | summarize dcount(user_Id), count() by name 12 | | top 10 by count_ 13 | | render barchart 14 | ``` 15 | 16 | The output will look like this: 17 |

top 10 custom events

-------------------------------------------------------------------------------- /command-usage.md: -------------------------------------------------------------------------------- 1 | ## Command usage examples 2 |
3 | ##### [Bar chart unstacked](command-usage/bar-chart-unstacked.md) 4 | ##### [Pie chart top 10 items](command-usage/pie-chart-top-10-items.md) 5 | ##### [Calculate multiple percentiles timechart](command-usage/calculate-multiple-percentiles-timechart.md) 6 | ##### [Join kind - inner](command-usage/join-kind-inner.md) 7 | ##### [Join - match on two fields](command-usage/join-on-two-field-matches.md) 8 | -------------------------------------------------------------------------------- /command-usage/bar-chart-unstacked.md: -------------------------------------------------------------------------------- 1 | ## Bar chart unstacked 2 | #### #count #render #barchart #unstacked 3 | 4 | 5 | The following example count alerts severity, per day, and creates a bar chart 6 | ```OQL 7 | Alert 8 | | where TimeGenerated > ago(7d) 9 | | summarize count() by AlertSeverity, bin(TimeGenerated, 1d) 10 | | render barchart kind=unstacked 11 | ``` -------------------------------------------------------------------------------- /command-usage/calculate-multiple-percentiles-timechart.md: -------------------------------------------------------------------------------- 1 | ## Calculate multiple percentiles timechart 2 | #### #percentiles #render #timechart #bin 3 | 4 | 5 | The following example calculates the 50th, 90th, and 95th percentiles of request duration, in the past 24 hours: 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) 10 | | summarize percentiles(duration, 50, 90, 95) by bin(timestamp, 1h) 11 | | render timechart 12 | ``` 13 | 14 | The output will look like this: 15 |

percentiles

-------------------------------------------------------------------------------- /command-usage/join-kind-inner.md: -------------------------------------------------------------------------------- 1 | ## Inner join: find exception related to failed requests 2 | #### #join #project #bin #round 3 | 4 | 5 | This example finds which exceptions are related to failed requests in the past 24 hours. 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) and success=="False" 10 | | join kind=inner (exceptions 11 | | where timestamp > ago(24h) ) on operation_Id 12 | | project type, method, requestName = name, requestDuration = duration 13 | ``` -------------------------------------------------------------------------------- /command-usage/join-on-two-field-matches.md: -------------------------------------------------------------------------------- 1 | ## Join: match on two fields 2 | #### #join #let #project #bin #round 3 | 4 | 5 | This example finds related protection status records and heartbeat records, matched on both Computer and time. 6 | Note the time field is rounded to the nearest minute. We used runtime bin calculation to do that: `round_time=bin(TimeGenerated, 1m)`. 7 | 8 | ```OQL 9 | let protection_data = ProtectionStatus 10 | | project Computer, DetectionId, round_time=bin(TimeGenerated, 1m); 11 | let heartbeat_data = Heartbeat 12 | | project Computer, Category, round_time=bin(TimeGenerated, 1m); 13 | protection_data | join (heartbeat_data) on Computer, round_time 14 | ``` 15 | 16 | -------------------------------------------------------------------------------- /command-usage/pie-chart-top-10-items.md: -------------------------------------------------------------------------------- 1 | ## Top 10 countries by traffic 2 | #### #count #render #piechart 3 | 4 | 5 | The following example counts the number of requests received from each country (aka "traffic") in the past 24 hours. Traffic distribution from the top 10 countries is displays in a pie-chart. 6 | 7 | ```AIQL 8 | requests 9 | | where timestamp > ago(24h) 10 | | summarize count() by client_CountryOrRegion 11 | | top 10 by count_ 12 | | render piechart 13 | ``` 14 | 15 | The output will look like this: 16 |

top 10 countries by traffic

-------------------------------------------------------------------------------- /examples.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 |

Find the example you're looking for through the categories (on the left) or try the search (on the right).

4 | 5 |

Missing an example? Let us know and we'll add it!

-------------------------------------------------------------------------------- /github-repo.md: -------------------------------------------------------------------------------- 1 | ## Examples GitHub repo 2 | 3 | All examples are also available on our open GitHub repo. 4 |
5 |
6 |

Looking for an example but can't find it? Let us know and we'll add it.

-------------------------------------------------------------------------------- /images/SLA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/SLA.png -------------------------------------------------------------------------------- /images/availability-hours.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/availability-hours.png -------------------------------------------------------------------------------- /images/availability-rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/availability-rate.png -------------------------------------------------------------------------------- /images/cohorts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/cohorts.png -------------------------------------------------------------------------------- /images/current-duration-vs-monthly-average.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/current-duration-vs-monthly-average.png -------------------------------------------------------------------------------- /images/percentiles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/percentiles.png -------------------------------------------------------------------------------- /images/rolling-mau.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/rolling-mau.png -------------------------------------------------------------------------------- /images/top-10-countries-by-traffic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/top-10-countries-by-traffic.png -------------------------------------------------------------------------------- /images/top-10-custom-events.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/top-10-custom-events.png -------------------------------------------------------------------------------- /images/user-stickiness.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MicrosoftDocs/LogAnalyticsExamples/0a132e1e6b8c3c162b73fc92aad979f568cdd7f8/images/user-stickiness.png -------------------------------------------------------------------------------- /log-analytics.md: -------------------------------------------------------------------------------- 1 | ## Log Analytics examples 2 |
3 | 4 | ### Across all data types 5 | ##### [Chart the record-count per table in the last 5 hours](log-analytics/chart-the-record-count-per-table-in-the-last-5-hours.md) 6 | ##### [Count all logs collected over the last hour, per type](log-analytics/count-all-logs-collected-over-the-last-hour-per-type.md) 7 | 8 | ### No specific data type 9 | ##### [Calculate the duration of a reported state, logged continuously](log-analytics/calculate-state-duration.md) 10 | ##### [Exclude a range of IPs from results](log-analytics/exclude-IP-range.md) 11 | 12 | ### AzureDiagnostics 13 | ##### [Count Azure diagnostics records per category](log-analytics/count-azure-diagnostics-records-per-category.md) 14 | ##### [Get a random record for each unique category](log-analytics/get-a-random-record-for-each-unique-category.md) 15 | ##### [Get the latest record per category](log-analytics/get-the-latest-record-per-category.md) 16 | 17 | ### Usage 18 | ##### [Calculate the average size of perf usage reports per computer](log-analytics/calculate-the-average-size-of-perf-usage-reports-per-computer.md) 19 | ##### [Timechart latency percentiles 50 and 95](examples/log-analytics/timechart-latency-percentiles-50-and-95.md) 20 | ##### [Usage of computer today](log-analytics/usage-of-computers-today.md) 21 | 22 | ### Heartbeat 23 | ##### [Chart a week-over-week view of the number of computers sending data](log-analytics/chart-a-week-over-week-view-of-the-number-of-computers-sending-data.md) 24 | ##### [Find stale computers](log-analytics/find-stale-computers.md) 25 | ##### [Get the latest heartbeat record per computer IP](log-analytics/get-latest-heartbeat-record-per-computer-ip.md) 26 | ##### [Match protection and heartbeat records](log-analytics/match-protection-status-records-with-heartbeat-records.md) 27 | ##### [Calculate availability rate for connected servers](log-analytics/server-availability-rate.md) 28 | 29 | ### Updates 30 | ##### [Computers still missing updates](log-analytics/computers-still-missing-updates.md) 31 | 32 | ### ProtectionStatus 33 | ##### [Computers with non-reporting protection status duration](log-analytics/computers-with-non-reporting-protection-status-duration.md) 34 | ##### [Match protection and heartbeat records](log-analytics/match-protection-status-records-with-heartbeat-records.md) 35 | 36 | ### NetworkMonitoring 37 | ##### [Computers with unhealthy latency](log-analytics/computers-with-unhealthy-latency.md) 38 | 39 | ### Perf 40 | ##### [Correlate computer memory and CPU](log-analytics/correlate-computer-memory-and-cpu.md) 41 | ##### [Perf CPU Utilization graph per computer](log-analytics/perf-cpu-utilization-graph-per-computer.md) 42 | 43 | ### Alert 44 | ##### [Count alerts by severity per day](log-analytics/count-and-chart-alerts-severity-per-day.md) 45 | ##### [Get a numbered list of the latest alerts](log-analytics/get-a-numbered-list-of-the-latest-alerts.md) 46 | 47 | ### SecurityEvent 48 | ##### [Count security events by activity ID](log-analytics/count-security-events-by-activity-id.md) 49 | ##### [Count security events related to permissions](log-analytics/count-security-events-related-to-permissions.md) 50 | ##### [Find accounts that failed to logon from computers on which we identify a security detection](log-analytics/find-accounts-that-failed-to-logon-from-computers-on-which-we-identify-a-security-detection.md) 51 | ##### [Is my security data available](log-analytics/is-my-security-data-available.md) 52 | ##### [Parse activity name and ID](log-analytics/parse-activity-name-and-id.md) 53 | ##### [Processes that used explicit credentials](log-analytics/pie-chart-explicit-credentials-processes.md) 54 | ##### [Top 5 running processes in the last 3 days](log-analytics/top-5-running-processes-in-the-last-3-days.md) 55 | ##### [Find repeating failed login attempts by the same account from different IPs](log-analytics/find-repeating-failed-login-attempts-by-the-same-account-from-different-ips.md) 56 | ##### [Find user accounts that failed to login over 5 times in 1 day](log-analytics/find-user-accounts-that-failed-to-login-over-5-times-in-1-day.md) 57 | 58 | ### Event 59 | ##### [Search application-level events described as Cryptographic](log-analytics/search-application-level-events-described-as-cryptographic.md) 60 | ##### [Search events related to unmarshaling](log-analytics/search-events-related-to-unmarshaling.md) 61 | -------------------------------------------------------------------------------- /log-analytics/calculate-state-duration.md: -------------------------------------------------------------------------------- 1 | ## Calculate the duration of a VM state, logged continuously 2 | #### #datatable #sort #prev 3 | 4 | This example reviews status reports of VMs, which are either Running or Waiting, and can change from one to the other several times, and calculates the waiting time of each VM. 5 | ```OQL 6 | datatable (VMName:string, Status:string, TimeGenerated:datetime) 7 | [ 8 | "VM1", "Running", datetime('2018-11-13T19:27:16.000'), 9 | "VM1", "Running", datetime('2018-11-13T19:32:17.000'), 10 | "VM2", "Running", datetime('2018-11-13T19:31:10.000'), 11 | "VM2", "Waiting", datetime('2018-11-13T19:29:17.000'), 12 | "VM1", "Waiting", datetime('2018-11-13T19:28:14.000'), 13 | "VM1", "Waiting", datetime('2018-11-13T19:29:15.000'), 14 | "VM1", "Waiting", datetime('2018-11-13T19:30:19.000'), 15 | "VM2", "Waiting", datetime('2018-11-13T19:25:12.000'), 16 | "VM2", "Running", datetime('2018-11-13T19:26:10.000'), 17 | ] 18 | | sort by VMName asc, TimeGenerated asc 19 | | extend Status_changed = (VMName != prev(VMName) or Status != prev(Status)) 20 | | where Status_changed == true 21 | | extend Waiting_time = iff(Status=="Running" and prev(Status)=="Waiting", tostring(TimeGenerated-prev(TimeGenerated)), "null") 22 | ``` 23 | 24 | VMName |Status |TimeGenerated |Status_changed |Waiting_time 25 | -------------|-------------|------------------------|---------------|-------------- 26 | VM1 |Running |"2018-11-13T19:27:16Z" |true |null 27 | VM1 |Waiting |"2018-11-13T19:28:14Z" |true |null 28 | VM1 |Running |"2018-11-13T19:32:17Z" |true |"00:04:03" 29 | VM2 |Waiting |"2018-11-13T19:25:12Z" |true |null 30 | VM2 |Running |"2018-11-13T19:26:10Z" |true |"00:00:58" 31 | VM2 |Waiting |"2018-11-13T19:29:17Z" |true |null 32 | VM2 |Running |"2018-11-13T19:31:10Z" |true |"00:01:53" 33 | -------------------------------------------------------------------------------- /log-analytics/calculate-the-average-size-of-perf-usage-reports-per-computer.md: -------------------------------------------------------------------------------- 1 | ## Calculate the average size of perf usage reports per computer 2 | #### #avg #sort #barchart 3 | 4 | 5 | This example calculates the average size of perf usage reports per computer, over the last 3 hours. 6 | The results are shown in a bar chart. 7 | ```OQL 8 | Usage 9 | | where TimeGenerated > ago(3h) 10 | | where DataType == "Perf" 11 | | where QuantityUnit == "MBytes" 12 | | summarize avg(Quantity) by Computer 13 | | sort by avg_Quantity desc nulls last 14 | | render barchart 15 | ``` -------------------------------------------------------------------------------- /log-analytics/chart-a-week-over-week-view-of-the-number-of-computers-sending-data.md: -------------------------------------------------------------------------------- 1 | ## Chart a week-over-week view of the number of computers sending data 2 | #### #startofweek #dcount #barchart 3 | 4 | 5 | The following example charts the number of distinct computers that sent heartbeats, each week. 6 | 7 | ```OQL 8 | Heartbeat 9 | | where TimeGenerated >= startofweek(ago(21d)) 10 | | summarize dcount(Computer) by endofweek(TimeGenerated) | render barchart kind=default 11 | ``` -------------------------------------------------------------------------------- /log-analytics/chart-the-record-count-per-table-in-the-last-5-hours.md: -------------------------------------------------------------------------------- 1 | ## Chart the record-count per table in the last 5 hours 2 | #### #union #withsource #count #timechart 3 | 4 | 5 | The following example collects all records of all tables from the last 5 hours, and counts how many records were in each table, in each point in time. 6 | The results are shown in a timechart. 7 | 8 | ```OQL 9 | union withsource=sourceTable * 10 | | where TimeGenerated > ago(5h) 11 | | summarize count() by bin(TimeGenerated,10m), sourceTable 12 | | render timechart 13 | ``` -------------------------------------------------------------------------------- /log-analytics/computers-still-missing-updates.md: -------------------------------------------------------------------------------- 1 | ## Computers Still Missing Updates 2 | #### #makeset #let #dcount #in 3 | 4 | 5 | The following example shows a list of Computers that were missing one or more critical updates a few days ago and are still missing updates. 6 | ```OQL 7 | let ComputersMissingUpdates3DaysAgo = Update 8 | | where TimeGenerated between (ago(3d)..ago(2d)) 9 | | where Classification == "Critical Updates" and UpdateState != "Not needed" and UpdateState != "NotNeeded" 10 | | summarize makeset(Computer); 11 | 12 | Update 13 | | where TimeGenerated > ago(1d) 14 | | where Classification == "Critical Updates" and UpdateState != "Not needed" and UpdateState != "NotNeeded" 15 | | where Computer in (ComputersMissingUpdates3DaysAgo) 16 | | summarize UniqueUpdatesCount = dcount(Product) by Computer, OSType 17 | ``` -------------------------------------------------------------------------------- /log-analytics/computers-with-non-reporting-protection-status-duration.md: -------------------------------------------------------------------------------- 1 | ## Computers with non-reporting protection status duration 2 | #### #count #min #max #join #extend 3 | 4 | 5 | The following example lists computers that had a list one "Not Reporting" protection status. 6 | It also measures the duration they were in this status (assuming it's a single event, not several "fragmentations" in reporting). 7 | ```OQL 8 | ProtectionStatus 9 | | where ProtectionStatus == "Not Reporting" 10 | | summarize count(), startNotReporting = min(TimeGenerated), endNotReporting = max(TimeGenerated) by Computer, ProtectionStatusDetails 11 | | join ProtectionStatus on Computer 12 | | summarize lastReporting = max(TimeGenerated), startNotReporting = any(startNotReporting), endNotReporting = any(endNotReporting) by Computer 13 | | extend durationNotReporting = endNotReporting - startNotReporting 14 | ``` -------------------------------------------------------------------------------- /log-analytics/computers-with-unhealthy-latency.md: -------------------------------------------------------------------------------- 1 | ## Computers with unhealthy latency 2 | #### #distinct 3 | 4 | 5 | The following example creates a list of distinct computers with unhealthy latency. 6 | ```OQL 7 | NetworkMonitoring 8 | | where LatencyHealthState <> "Healthy" 9 | | where Computer != "" 10 | | distinct Computer 11 | ``` -------------------------------------------------------------------------------- /log-analytics/correlate-computer-memory-and-cpu.md: -------------------------------------------------------------------------------- 1 | ## Join computer perf records to correlate memory and CPU 2 | #### #let #datetime #project #join #inner #avg #max 3 | 4 | 5 | This example correlates a given computer's perf records, and creates 2 time charts: the average CPU and maximum memory, in 30-minute bins. 6 | 7 | ```OQL 8 | let StartTime = now()-5d; 9 | let EndTime = now()-4d; 10 | Perf 11 | | where CounterName == "% Processor Time" 12 | | where TimeGenerated > StartTime and TimeGenerated < EndTime 13 | and TimeGenerated < EndTime 14 | | project TimeGenerated, Computer, cpu=CounterValue 15 | | join kind= inner ( 16 | Perf 17 | | where CounterName == "% Used Memory" 18 | | where TimeGenerated > StartTime and TimeGenerated < EndTime 19 | | project TimeGenerated , Computer, mem=CounterValue 20 | ) on TimeGenerated, Computer 21 | | summarize avgCpu=avg(cpu), maxMem=max(mem) by TimeGenerated bin=30m 22 | | render timechart 23 | ``` -------------------------------------------------------------------------------- /log-analytics/count-all-logs-collected-over-the-last-hour-per-type.md: -------------------------------------------------------------------------------- 1 | ## Count all logs collected over the last hour, per type 2 | #### #search #count #barchart 3 | 4 | 5 | The following example search everything reported in the last hour and counts the records of each table using the system column $table. 6 | The results are displayed in a bar chart. 7 | 8 | ```OQL 9 | search * 10 | | where TimeGenerated > ago(1h) 11 | | summarize CountOfRecords = count() by $table 12 | | render barchart 13 | ``` -------------------------------------------------------------------------------- /log-analytics/count-and-chart-alerts-severity-per-day.md: -------------------------------------------------------------------------------- 1 | ## Count and chart alerts severity, per day 2 | #### #count #render #barchart #bin 3 | 4 | 5 | The following example creates an unstacked bar chart of alert count by severity, per day: 6 | ```OQL 7 | Alert 8 | | where TimeGenerated > ago(7d) 9 | | summarize count() by AlertSeverity, bin(TimeGenerated, 1d) 10 | | render barchart kind=unstacked 11 | ``` -------------------------------------------------------------------------------- /log-analytics/count-azure-diagnostics-records-per-category.md: -------------------------------------------------------------------------------- 1 | ## Count Azure diagnostics records per category 2 | #### #count 3 | 4 | 5 | Count Azure diagnostics records for each unique category. 6 | 7 | ```OQL 8 | AzureDiagnostics 9 | | where TimeGenerated > ago(1d) 10 | | summarize count() by Category 11 | ``` -------------------------------------------------------------------------------- /log-analytics/count-security-events-by-activity-id.md: -------------------------------------------------------------------------------- 1 | ## Count security events by activity ID 2 | #### #project #parse #count 3 | 4 | 5 | This example relies on the fixed structure of the Activity column: -. 6 | It parses the Activity value into 2 new columns, and counts the occurrence of each activity ID 7 | ```OQL 8 | SecurityEvent 9 | | where TimeGenerated > ago(30m) 10 | | project Activity 11 | | parse Activity with activityID " - " activityDesc 12 | | summarize count() by activityID 13 | ``` -------------------------------------------------------------------------------- /log-analytics/count-security-events-related-to-permissions.md: -------------------------------------------------------------------------------- 1 | ## Count security events related to permissions 2 | #### #countif #has 3 | 4 | 5 | This example show the number of securityEvent records, in which the Activity column contains the whole term "Permissions". 6 | The query applies to records created over the last 30m. 7 | ```OQL 8 | SecurityEvent 9 | | where TimeGenerated > ago(30m) 10 | | summarize EventCount = countif(Activity has "Permissions") 11 | ``` -------------------------------------------------------------------------------- /log-analytics/exclude-IP-range.md: -------------------------------------------------------------------------------- 1 | ## Exclude IP range and specific IPs from results 2 | #### #range #parse_ipv4 #binary_and #binary_shift_right #datatable 3 | 4 | This example shows how to exclude a list of IPs and a set of additinal specific IPs from query results. 5 | ```OQL 6 | let generateIpRange = (fromIp: string, toIp: string){ 7 | //convert input parameters to integers, and generate all integers between them to produce the full required IP range 8 | range IP from parse_ipv4(fromIp) to parse_ipv4(toIp) step 1 9 | //... then convert this range back into IPv4 strings 10 | | extend LSB = binary_and(IP, 255), IP = binary_shift_right(IP, 8) 11 | | extend thirdBit = binary_and(IP, 255), IP = binary_shift_right(IP, 8) 12 | | extend secondBit = binary_and(IP, 255), IP = binary_shift_right(IP, 8) 13 | | extend MSB = binary_and(IP, 255) 14 | | project IpStr=strcat(MSB, ".", secondBit, ".", thirdBit, ".", LSB) 15 | }; 16 | let additionalIps = 17 | //define any additional IPs outside of the initial range that we might want to exclude 18 | datatable(IpStr:string) 19 | [ 20 | "77.222.135.70", 21 | "68.233.194.2", 22 | "68.233.194.3", 23 | "68.233.194.6", 24 | "68.233.194.7", 25 | "68.233.194.61", 26 | ]; 27 | let excludedIps = //put together in this table a superset of all the things we want to exclude 28 | union 29 | generateIpRange("104.210.0.0", "104.210.255.255"), 30 | additionalIps; 31 | Heartbeat 32 | | where TimeGenerated >= ago(1h) 33 | | where ComputerIP != "" and ComputerIP !in (excludedIps) 34 | | project TimeGenerated, ComputerIP 35 | ``` -------------------------------------------------------------------------------- /log-analytics/find-accounts-that-failed-to-logon-from-computers-on-which-we-identify-a-security-detection.md: -------------------------------------------------------------------------------- 1 | ## Find accounts that failed to logon from computers on which we identify a security detection 2 | #### #makeset #let #in #count 3 | 4 | 5 | This example finds and counts accounts that failed to logon from computers on which we identify a security detection. 6 | ```OQL 7 | let detections = toscalar(SecurityDetection 8 | | summarize makeset(Computer)); 9 | SecurityEvent 10 | | where Computer in (detections) and EventID == 4624 11 | | summarize count() by Account 12 | ``` -------------------------------------------------------------------------------- /log-analytics/find-repeating-failed-login-attempts-by-the-same-account-from-different-ips.md: -------------------------------------------------------------------------------- 1 | ## Find repeating failed login attempts by the same account from different IPs 2 | #### #let #join #project-away 3 | 4 | 5 | The following examples finds failed login attempts by the same account from more than 5 different IPs, in the last 6 hours, and the enumerates the IPs. 6 | 7 | ```OQL 8 | SecurityEvent 9 | | where AccountType == "User" and EventID == 4625 and TimeGenerated > ago(6h) 10 | | summarize IPCount = dcount(IpAddress), makeset(IpAddress) by Account 11 | | where IPCount > 5 12 | | sort by IPCount desc 13 | ``` -------------------------------------------------------------------------------- /log-analytics/find-stale-computers.md: -------------------------------------------------------------------------------- 1 | ## Find stale computers 2 | #### #max #isnotempty 3 | 4 | 5 | The following example finds computers that were active in the last day but did not send heartbeats in the last hour. 6 | 7 | ```OQL 8 | Heartbeat 9 | | where TimeGenerated > ago(1d) 10 | | summarize LastHeartbeat = max(TimeGenerated) by Computer 11 | | where isnotempty(Computer) 12 | | where LastHeartbeat < ago(1h) 13 | ``` -------------------------------------------------------------------------------- /log-analytics/find-user-accounts-that-failed-to-login-over-5-times-in-1-day.md: -------------------------------------------------------------------------------- 1 | ## Find user accounts that failed to login over 5 times in 1 day 2 | #### #let #join #project-away 3 | 4 | 5 | The following example identifies user accounts that failed to login more than 5 times in the last day, and when they last attempted it. 6 | 7 | 8 | ```OQL 9 | let timeframe = 1d; 10 | SecurityEvent 11 | | where TimeGenerated > ago(1d) 12 | | where AccountType == 'User' and EventID == 4625 // 4625 - failed login 13 | | summarize failed_login_attempts=count(), latest_failed_login=arg_max(TimeGenerated, Account) by Account 14 | | where failed_login_attempts > 5 15 | | project-away Account1 16 | ``` 17 | 18 | Using *join*, and *let* statements we can check if the same suspicious accounts were later able to login successfully 19 | ```OQL 20 | let timeframe = 1d; 21 | let suspicious_users = 22 | SecurityEvent 23 | | where TimeGenerated > ago(timeframe) 24 | | where AccountType == 'User' and EventID == 4625 // 4625 - failed login 25 | | summarize failed_login_attempts=count(), latest_failed_login=arg_max(TimeGenerated, Account) by Account 26 | | where failed_login_attempts > 5 27 | | project-away Account1; 28 | let suspicious_users_that_later_logged_in = 29 | suspicious_users 30 | | join kind=innerunique ( 31 | SecurityEvent 32 | | where TimeGenerated > ago(timeframe) 33 | | where AccountType == 'User' and EventID == 4624 // 4624 - successful login, 34 | | summarize latest_successful_login=arg_max(TimeGenerated, Account) by Account 35 | ) on Account 36 | | extend was_login_after_failures = iif(latest_successful_login>latest_failed_login, 1, 0) 37 | | where was_login_after_failures == 1 38 | ; 39 | suspicious_users_that_later_logged_in 40 | ``` -------------------------------------------------------------------------------- /log-analytics/get-a-numbered-list-of-the-latest-alerts.md: -------------------------------------------------------------------------------- 1 | ## Get a numbered list of the latest alerts 2 | #### #let #top #serialize #extend #project 3 | 4 | 5 | This example gets the 100 latest alerts, adds the row number column to each row, and projects 5 interesting fields 6 | ```OQL 7 | let maxRows = 100; 8 | Alert 9 | | top maxRows by TimeGenerated 10 | | serialize 11 | | extend Id = row_number() 12 | | project Id, TimeGenerated , AlertName , AlertDescription , AlertSeverity 13 | ``` -------------------------------------------------------------------------------- /log-analytics/get-a-random-record-for-each-unique-category.md: -------------------------------------------------------------------------------- 1 | ## Get a random record for each unique category 2 | #### #any 3 | 4 | 5 | Get a single random AzureDiagnostics' record for each unique category 6 | 7 | ```OQL 8 | AzureDiagnostics 9 | | where TimeGenerated > ago(1d) 10 | | summarize any(*) by Category 11 | ``` -------------------------------------------------------------------------------- /log-analytics/get-latest-heartbeat-record-per-computer-ip.md: -------------------------------------------------------------------------------- 1 | ## Get the latest heartbeat record per computer IP 2 | #### #arg_max 3 | 4 | 5 | This example returns the last heartbeat record for each computer IP. 6 | ```OQL 7 | Heartbeat 8 | | summarize arg_max(TimeGenerated, *) by ComputerIP 9 | ``` -------------------------------------------------------------------------------- /log-analytics/get-the-latest-record-per-category.md: -------------------------------------------------------------------------------- 1 | ## Get the latest record per category 2 | #### #arg_max 3 | 4 | 5 | Get the latest AzureDiagnostics' record in each unique category 6 | 7 | ```OQL 8 | AzureDiagnostics 9 | | where TimeGenerated > ago(1d) 10 | | summarize arg_max(TimeGenerated, *) by Category 11 | ``` -------------------------------------------------------------------------------- /log-analytics/is-my-security-data-available.md: -------------------------------------------------------------------------------- 1 | ## Is my security data available 2 | #### #count 3 | 4 | 5 | Starting data exploration often starts with data availability check. 6 | This example shows the number of SecurityEvent records in the last 30 minutes: 7 | ```OQL 8 | SecurityEvent 9 | | where TimeGenerated > ago(30m) 10 | | count 11 | ``` -------------------------------------------------------------------------------- /log-analytics/match-protection-status-records-with-heartbeat-records.md: -------------------------------------------------------------------------------- 1 | ## Match protected status records with heartbeat records 2 | #### #join #let #project #bin #round 3 | 4 | 5 | This example finds related protection status records and heartbeat records, matched on both Computer and time. 6 | Note the time field is rounded to the nearest minute. We used runtime bin calculation to do that: `round_time=bin(TimeGenerated, 1m)`. 7 | 8 | ```OQL 9 | let protection_data = ProtectionStatus 10 | | project Computer, DetectionId, round_time=bin(TimeGenerated, 1m); 11 | let heartbeat_data = Heartbeat 12 | | project Computer, Category, round_time=bin(TimeGenerated, 1m); 13 | protection_data | join (heartbeat_data) on Computer, round_time 14 | ``` 15 | 16 | -------------------------------------------------------------------------------- /log-analytics/parse-activity-name-and-id.md: -------------------------------------------------------------------------------- 1 | ## Parse activity name and ID 2 | #### #project #parse #split 3 | 4 | 5 | The 2 examples below rely on the fixed structure of the Activity column: -. 6 | 7 | The first example uses the *parse* operator to assign values to 2 new columns: activityID and activityDesc. 8 | ```OQL 9 | SecurityEvent 10 | | take 100 11 | | project Activity 12 | | parse Activity with activityID " - " activityDesc 13 | ``` 14 | 15 | This example uses the *split* operator to create an array of separate values 16 | ```OQL 17 | SecurityEvent 18 | | take 100 19 | | project Activity 20 | | extend activityArr=split(Activity, " - ") 21 | | project Activity , activityArr, activityId=activityArr[0] 22 | ``` -------------------------------------------------------------------------------- /log-analytics/perf-cpu-utilization-graph-per-computer.md: -------------------------------------------------------------------------------- 1 | ## Perf CPU Utilization graph per computer 2 | #### #startswith #avg #timechart 3 | 4 | 5 | This example calculates and charts the CPU utilization of computers that start with "Contoso". 6 | 7 | ```OQL 8 | Perf 9 | | where TimeGenerated > ago(4h) 10 | | where Computer startswith "Contoso" 11 | | where CounterName == @"% Processor Time" 12 | | summarize avg(CounterValue) by Computer, bin(TimeGenerated, 15m) 13 | | render timechart 14 | ``` -------------------------------------------------------------------------------- /log-analytics/pie-chart-explicit-credentials-processes.md: -------------------------------------------------------------------------------- 1 | ## Pie chart explicit credentials processes 2 | #### #count #render #piechart 3 | 4 | 5 | The following example shows a pie chart of processes that used explicit credentials in the last week 6 | ```OQL 7 | SecurityEvent 8 | | where TimeGenerated > ago(7d) 9 | // filter by id 4648 ("A logon was attempted using explicit credentials") 10 | | where EventID == 4648 11 | | summarize count() by Process 12 | | render piechart 13 | ``` -------------------------------------------------------------------------------- /log-analytics/search-application-level-events-described-as-cryptographic.md: -------------------------------------------------------------------------------- 1 | ## Search application-level events described as "Cryptographic" 2 | #### #search 3 | 4 | 5 | Search the Events table for records in which EventLog is "Application", and RenderedDescription contains "cryptographic" (case-insensitive). 6 | Reviews records from the last 24 hours. 7 | 8 | ```OQL 9 | search in (Event) EventLog == "Application" and TimeGenerated > ago(24h) and RenderedDescription:"cryptographic" 10 | ``` -------------------------------------------------------------------------------- /log-analytics/search-events-related-to-unmarshaling.md: -------------------------------------------------------------------------------- 1 | ## Search events related to unmarshaling 2 | #### #search 3 | 4 | 5 | Search tables Event and SecurityEvents for records that mention "unmashaling". 6 | 7 | ```OQL 8 | search in (Event, SecurityEvent) "unmarshaling" 9 | ``` -------------------------------------------------------------------------------- /log-analytics/server-availability-rate.md: -------------------------------------------------------------------------------- 1 | ## Server availability rate 2 | #### #let #bin_at #countif #round 3 | 4 | 5 | Calculate server availability rate based on heartbeat records. Availability is defined as "at least 1 heartbeat per hour". 6 | So, if a server was available 98 of 100 hours, the availability rate is 98%. 7 | 8 | ```OQL 9 | let start_time=startofday(datetime("2018-03-01")); 10 | let end_time=now(); 11 | Heartbeat 12 | | where TimeGenerated > start_time and TimeGenerated < end_time 13 | | summarize heartbeat_per_hour=count() by bin_at(TimeGenerated, 1h, start_time), Computer 14 | | extend available_per_hour=iff(heartbeat_per_hour>0, true, false) 15 | | summarize total_available_hours=countif(available_per_hour==true) by Computer 16 | | extend total_number_of_buckets=round((end_time-start_time)/1h)+1 17 | | extend availability_rate=total_available_hours*100/total_number_of_buckets 18 | ``` 19 | 20 | 21 | Let's review each part in this example: 22 | The first 2 lines define variables, set to the desired start and end times. 23 | We then use these variables to limit the query to that time range: 24 | ``` 25 | ... | where TimeGenerated > start_time and TimeGenerated < end_time 26 | ``` 27 | 28 | Then we count the heartbeats reported by each computer, in buckets (bins) of 1 hour, starting at the start time: 29 | ``` 30 | ... | summarize heartbeat_per_hour=count() by bin_at(TimeGenerated, 1h, start_time), Computer 31 | ``` 32 | 33 | Now we can see how many heartbeats were reported by each computer each hour. If the number is 0 we understand the computer was probably offline at that time. 34 | We use a new column to mark if a computer was available or not each hour: 35 | ``` 36 | ... | extend available_per_hour=iff(heartbeat_per_hour>0, true, false) 37 | ``` 38 | and then count the number of hours each computer was indeed "alive": 39 | ``` 40 | ... | summarize total_available_hours=countif(available_per_hour==true) by Computer 41 | ``` 42 | 43 | Note that this way we give a little leeway for missing heartbeat reports each hour. 44 | Instead of expecting a report every 5 or 10 minutes, we only mark a computer as "unavailable" if we didn't get any report from it during a full hour. 45 | 46 | At this point we get a number for each computer, something like this: 47 |

server availability hours

48 | 49 | So we know the number of hours each computer was available during the set time range. But what does it mean? how many hours were there altogether? 50 | 51 | Here's how we can calculate the total number of hours in the selected time range: 52 | ``` 53 | ... | extend total_number_of_buckets=round((end_time-start_time)/1h)+1 54 | ``` 55 | 56 | (There could be a better way to calculate the number of buckets, but this will do for the sake of the example) 57 | 58 | Finally we calculate the ratio between available hours and total hours: 59 | ``` 60 | ... | extend availability_rate=total_available_hours*100/total_number_of_buckets 61 | ``` 62 | and get this: 63 | 64 |

server availability rate

65 | 66 | -------------------------------------------------------------------------------- /log-analytics/timechart-latency-percentiles-50-and-95.md: -------------------------------------------------------------------------------- 1 | ## Timechart latency percentiles 50 and 95 2 | #### #percentiles #timechart 3 | 4 | 5 | This example calculates and charts the 50th and 95th percentiles of reporteed avgLatency, hour by hour, during the last 24 hours. 6 | 7 | ```OQL 8 | Usage 9 | | where TimeGenerated > ago(24h) 10 | | summarize percentiles(AvgLatencyInSeconds, 50, 95) by bin(TimeGenerated, 1h) 11 | | render timechart 12 | ``` -------------------------------------------------------------------------------- /log-analytics/top-5-running-processes-in-the-last-3-days.md: -------------------------------------------------------------------------------- 1 | ## Top 5 running processes in the last 3 days 2 | #### #let #count #render #timechart #in 3 | 4 | 5 | The following example shows a time line of activity for the 5 most common processes, over the last 3 days. 6 | ```OQL 7 | // Find all processes that started in the last 3 days. ID 4688: A new process has been created. 8 | let RunProcesses = 9 | SecurityEvent 10 | | where TimeGenerated > ago(3d) 11 | | where EventID == "4688"; 12 | // Find the 5 processes that were run the most 13 | let Top5Processes = 14 | RunProcesses 15 | | summarize count() by Process 16 | | top 5 by count_; 17 | // Create a time chart of these 5 processes - hour by hour 18 | RunProcesses 19 | | where Process in (Top5Processes) 20 | | summarize count() by bin (TimeGenerated, 1h), Process 21 | | render timechart 22 | ``` -------------------------------------------------------------------------------- /log-analytics/usage-of-computers-today.md: -------------------------------------------------------------------------------- 1 | ## Usage of specific computers today 2 | #### #contains #sort 3 | 4 | 5 | This example retrieves Usage data from the last day for computer names that contains the string "ContosoFile". 6 | The results are sorted by "TimeGenerated". 7 | 8 | ```OQL 9 | Usage 10 | | where TimeGenerated > ago(1d) 11 | | where Computer contains "ContosoFile" 12 | | sort by TimeGenerated desc nulls last 13 | ``` -------------------------------------------------------------------------------- /smart-analytics.md: -------------------------------------------------------------------------------- 1 | ## Smart Analytics - analyze application usage patterns 2 | 3 | ##### [Detect disruptions with regression analysis](smart-analytics/automated-detector-for-service-disruptions-based-on-app-trace-logs.md) 4 | ##### [Rolling monthly active users (MAU)](smart-analytics/sliding-window-calculations-rolling-mau.md) 5 | ##### [Measure "user stickiness"](smart-analytics/sliding-window-calculations-user-stickiness.md) 6 | ##### [Cohort analysis](smart-analytics/sliding-window-calculations-cohort-analysis.md) 7 | -------------------------------------------------------------------------------- /smart-analytics/automated-detector-for-service-disruptions-based-on-app-trace-logs.md: -------------------------------------------------------------------------------- 1 | ## Regression analysis: detect service disruptions using trace logs 2 | #### #make-series #series_fit_line #series_fit_2lines 3 | 4 | This example demonstrates how to create an automated detector for service disruptions, based exclusively on an application's trace logs. 5 | The detector seeks abnormal and sudden increases in the relative amount of error/warning traces in our application compared to Info/Verbose. 6 | 7 | Two techniques are used to evaluate the service status based on trace logs data: 8 | 1. Conversion of semi-structured textual trace logs into a metric, representing the ratio between semantically positive/negative trace lines. 9 | 2. Advanced step-jump detection using time-series analysis, with a 2-line linear regression. 10 | 11 | ```AIQL 12 | let startDate = startofday(datetime("2017-02-01")); 13 | let endDate = startofday(datetime("2017-02-07")); 14 | let minRsquare = 0.8; // Tune the sensitivity of the detection sensor. Values close to 1 indicate very low sensitivity. 15 | 16 | // Count all Good (Verbose + Info) and Bad (Error + Fatal + Warning) traces, per day 17 | traces 18 | | where timestamp > startDate and timestamp < endDate 19 | | summarize 20 | Verbose = countif(severityLevel == 0), 21 | Info = countif(severityLevel == 1), 22 | Warning = countif(severityLevel == 2), 23 | Error = countif(severityLevel == 3), 24 | Fatal = countif(severityLevel == 4) by bin(timestamp, 1d) 25 | | extend Bad = (Error + Fatal + Warning), Good = (Verbose + Info) 26 | 27 | // Determine the ratio of bad traces, from the total 28 | | extend Ratio = (todouble(Bad) / todouble(Good + Bad))*10000 29 | | project timestamp , Ratio 30 | 31 | // Create a time series 32 | | make-series RatioSeries=any(Ratio) default=0 on timestamp in range(startDate , endDate -1d, 1d) by 'TraceSeverity' 33 | 34 | // Apply a 2-line regression to the time series 35 | | extend (RSquare2, SplitIdx, Variance2,RVariance2,LineFit2)=series_fit_2lines(RatioSeries) 36 | 37 | // Find out if our 2-line is trending up or down 38 | | extend (Slope,Interception,RSquare,Variance,RVariance,LineFit)=series_fit_line(LineFit2) 39 | 40 | // Check whether the line fit reaches the threshold, and if the spike represents an increase (rather than a decrease) 41 | | project PatternMatch = iff(RSquare2 > minRsquare and Slope>0, "Spike detected", "No Match") 42 | ``` -------------------------------------------------------------------------------- /smart-analytics/sliding-window-calculations-cohort-analysis.md: -------------------------------------------------------------------------------- 1 | ## Cohorts analysis 2 | #### #let #union #project 3 | 4 | ### What is "cohorts analysis"? 5 | Cohort analysis tracks the activity of specific groups of users (AKA "Cohorts"). The users are grouped by the time they first used the service (e.g. "October 2017"). 6 | Cohorts analysis attempts to measure how appealing a service, by measure the rate of returning users. 7 | 8 | ### Good to know 9 | When analyzing cohorts, it is expected to find a decreese in activity over the first tracked periods. 10 | Each cohort is titled by the week in which its members were observed for the first time. 11 | 12 | ### Cohort analysis example - 5 weeks 13 | In the following example we analyze the number of activities users perform over the course of 5 weeks, following their first use of the service. 14 | 15 | Labels: 16 | * r0 - the distinct count of members of the cohort in the first week when they were observed 17 | * r1 - the distinct count of members of the cohort that were active also in the week after then were observed (or after) 18 | * r2 - ... in the following week 19 | 20 | ```AIQL 21 | let startDate = startofweek(bin(datetime(2017-01-20T00:00:00Z), 1d)); 22 | let week = range Cohort from startDate to datetime(2017-03-01T00:00:00Z) step 7d; 23 | 24 | // For each user we find the first and last timestamp of activity 25 | let FirstAndLastUserActivity = (end:datetime) 26 | { 27 | customEvents 28 | | where customDimensions["sourceapp"]=="ai-loganalyticsui-prod" 29 | // Check 30 days back to see first time activity 30 | | where timestamp > startDate - 30d 31 | | where timestamp < end 32 | | summarize min=min(timestamp), max=max(timestamp) by user_AuthenticatedId 33 | }; 34 | 35 | let DistinctUsers = (cohortPeriod:datetime, evaluatePeriod:datetime) { 36 | toscalar ( 37 | FirstAndLastUserActivity(evaluatePeriod) 38 | // Find members of the cohort: only users that were observed in this period for the first time 39 | | where min >= cohortPeriod and min < cohortPeriod + 7d 40 | // Pick only the members that were active during the evaluated period or after 41 | | where max > evaluatePeriod - 7d 42 | | summarize dcount(user_AuthenticatedId)) 43 | }; 44 | 45 | week 46 | | where Cohort == startDate 47 | 48 | // Finally, calculate the desired metric for each cohort. In this sample we calculate distinct users but you can change 49 | // this to any other metric that would measure the engagement of the cohort members. 50 | | extend 51 | r0 = DistinctUsers(startDate, startDate+7d), 52 | r1 = DistinctUsers(startDate, startDate+14d), 53 | r2 = DistinctUsers(startDate, startDate+21d), 54 | r3 = DistinctUsers(startDate, startDate+28d), 55 | r4 = DistinctUsers(startDate, startDate+35d) 56 | | union (week | where Cohort == startDate + 7d 57 | | extend 58 | r0 = DistinctUsers(startDate+7d, startDate+14d), 59 | r1 = DistinctUsers(startDate+7d, startDate+21d), 60 | r2 = DistinctUsers(startDate+7d, startDate+28d), 61 | r3 = DistinctUsers(startDate+7d, startDate+35d) ) 62 | | union (week | where Cohort == startDate + 14d 63 | | extend 64 | r0 = DistinctUsers(startDate+14d, startDate+21d), 65 | r1 = DistinctUsers(startDate+14d, startDate+28d), 66 | r2 = DistinctUsers(startDate+14d, startDate+35d) ) 67 | | union (week | where Cohort == startDate + 21d 68 | | extend 69 | r0 = DistinctUsers(startDate+21d, startDate+28d), 70 | r1 = DistinctUsers(startDate+21d, startDate+35d) ) 71 | | union (week | where Cohort == startDate + 28d 72 | | extend 73 | r0 = DistinctUsers (startDate+28d, startDate+35d) ) 74 | 75 | // Calculate the retention percentage for each cohort by weeks 76 | | project Cohort, r0, r1, r2, r3, r4, 77 | p0 = r0/r0*100, 78 | p1 = todouble(r1)/todouble (r0)*100, 79 | p2 = todouble(r2)/todouble(r0)*100, 80 | p3 = todouble(r3)/todouble(r0)*100, 81 | p4 = todouble(r4)/todouble(r0)*100 82 | | sort by Cohort asc 83 | ``` 84 | 85 | The output will look like this: 86 |

cohorts

-------------------------------------------------------------------------------- /smart-analytics/sliding-window-calculations-rolling-mau.md: -------------------------------------------------------------------------------- 1 | ## Rolling MAU (Monthly Active Users) 2 | #### #make-series #fir #zip #mvexpand 3 | 4 | This example uses time-series analysis with the *fir* function (Finite Impulse Response), which is the basic for sliding window computations. 5 | 6 | Let's assume our app is an online store that tracks users' activity through custom events. Specifically, We track two types of user activities: AddToCart and Checkout. 7 | 8 | We define "active users" as only those that performed check-out at least once in a given day. 9 | The technique below allows you to set the criteria to the desired level of engagement using the "min_activity" value. 10 | 11 | ```AIQL 12 | let endtime = endofday(datetime(2017-03-01T00:00:00Z)); 13 | let window = 60d; 14 | let starttime = endtime-window; 15 | let interval = 1d; 16 | let user_bins_to_analyze = 28; 17 | 18 | // Create an array of filters coefficients for fir(). A list of '1' in our case will produce a simple sum. 19 | let moving_sum_filter = toscalar(range x from 1 to user_bins_to_analyze step 1 | extend v=1 | summarize makelist(v)); 20 | 21 | // Level of engagement. Users will be counted as engaged if they performed at least this number of activities. 22 | let min_activity = 1; 23 | 24 | customEvents 25 | | where timestamp > starttime 26 | | where customDimensions["sourceapp"] == "ai-loganalyticsui-prod" 27 | 28 | // We want to analyze users who actually checked-out in our web site 29 | | where (name == "Checkout") and user_AuthenticatedId <> "" 30 | 31 | // Create a series of activities per user 32 | | make-series UserClicks=count() default=0 on timestamp 33 | in range(starttime, endtime-1s, interval) by user_AuthenticatedId 34 | 35 | // Create a new column containing a sliding sum. 36 | // Passing 'false' as the last parameter to fir() prevents normalization of the calculation by the size of the window. 37 | // For each time bin in the *RollingUserClicks* column, the value is the aggregation of the user activities in the 38 | // 28 days that preceded the bin. For example, if a user was active once on 2016-12-31 and then inactive throughout 39 | // January, then the value will be 1 between 2016-12-31 -> 2017-01-28 and then 0s. 40 | | extend RollingUserClicks=fir(UserClicks, moving_sum_filter, false) 41 | 42 | // Use the zip() operator to pack the timestamp with the user activities per time bin 43 | | project User_AuthenticatedId=user_AuthenticatedId , RollingUserClicksByDay=zip(timestamp, RollingUserClicks) 44 | 45 | // Transpose the table and create a separate row for each combination of user and time bin (1 day) 46 | | mvexpand RollingUserClicksByDay 47 | | extend Timestamp=todatetime(RollingUserClicksByDay[0]) 48 | 49 | // Mark the users that qualify according to min_activity 50 | | extend RollingActiveUsersByDay=iff(toint(RollingUserClicksByDay[1]) >= min_activity, 1, 0) 51 | 52 | // And finally, count the number of users per time bin. 53 | | summarize sum(RollingActiveUsersByDay) by Timestamp 54 | 55 | // First 28 days contain partial data, so we filter them out. 56 | | where Timestamp > starttime + 28d 57 | 58 | // render as timechart 59 | | render timechart 60 | ``` 61 | 62 | The output will look like this: 63 |

rolling mau

-------------------------------------------------------------------------------- /smart-analytics/sliding-window-calculations-user-stickiness.md: -------------------------------------------------------------------------------- 1 | ## User stickiness (daily/monthly active users) 2 | #### #make-series #fir #range #zip #mvexpand 3 | 4 | This example uses time-series analysis with the fir() function (Finite Impulse Response), which is the basic for sliding window computations. 5 | 6 | Let's assume our app is an online store that tracks users' activity through custom events. Specifically, We track two types of user activities: AddToCart and Checkout. 7 | 8 | Lets turn the query above into a reusable function, and use it to calculate rolling user stickiness, defined as DAU/MAU. (Comments were dropped for brevity). 9 | We define "active users" as only those that performed check-out at least once in a given day. 10 | 11 | Parameters: 12 | * sliding_window_size - number of days in the sliding window 13 | * event_name - restrict to user activities by events with a specific name 14 | 15 | ```AIQL 16 | let rollingDcount = (sliding_window_size: int, event_name:string) 17 | { 18 | let endtime = endofday(datetime(2017-03-01T00:00:00Z)); 19 | let window = 90d; 20 | let starttime = endtime-window; 21 | let interval = 1d; 22 | let moving_sum_filter = toscalar(range x from 1 to sliding_window_size step 1 | extend v=1| summarize makelist(v)); 23 | let min_activity = 1; 24 | customEvents 25 | | where timestamp > starttime 26 | | where customDimensions["sourceapp"]=="ai-loganalyticsui-prod" 27 | | where (name == event_name) 28 | | where user_AuthenticatedId <> "" 29 | | make-series UserClicks=count() default=0 on timestamp 30 | in range(starttime, endtime-1s, interval) by user_AuthenticatedId 31 | | extend RollingUserClicks=fir(UserClicks, moving_sum_filter, false) 32 | | project User_AuthenticatedId=user_AuthenticatedId , RollingUserClicksByDay=zip(timestamp, RollingUserClicks) 33 | | mvexpand RollingUserClicksByDay 34 | | extend Timestamp=todatetime(RollingUserClicksByDay[0]) 35 | | extend RollingActiveUsersByDay=iff(toint(RollingUserClicksByDay[1]) >= min_activity, 1, 0) 36 | | summarize sum(RollingActiveUsersByDay) by Timestamp 37 | | where Timestamp > starttime + 28d 38 | }; 39 | 40 | // Use the moving_sum_filter with bin size of 28 to count MAU. 41 | rollingDcount(28, "Checkout") 42 | | join 43 | ( 44 | // Use the moving_sum_filter with bin size of 1 to count DAU. 45 | rollingDcount(1, "Checkout") 46 | ) 47 | on Timestamp 48 | | project sum_RollingActiveUsersByDay1 *1.0 / sum_RollingActiveUsersByDay, Timestamp 49 | | render timechart 50 | ``` 51 | 52 | The output will look like this: 53 |

user stickiness

-------------------------------------------------------------------------------- /toc.md: -------------------------------------------------------------------------------- 1 | # [Log Analytics Examples](~/examples/log-analytics.md) 2 | # [Application Insights Examples](~/examples/application-insights.md) 3 | # [Command Usage](~/examples/command-usage.md) 4 | # [Smart Analytics](~/examples/smart-analytics.md) 5 | # [Examples GitHub repo](~/examples/github-repo.md) --------------------------------------------------------------------------------