├── .gitignore ├── LICENSE ├── README.md ├── groovy ├── confluence │ ├── attachmentCleaner.groovy │ ├── findAttachmentsOfFormat.groovy │ ├── pageVersionsCleaner.groovy │ └── trashAttachmentsByLabel.groovy └── jira │ ├── agile │ ├── cleanFutureSprintForClosedIssues.groovy │ ├── cleanUnUsedSprints.groovy │ └── findAgileBoardsWithoutFilterAndAct.groovy │ ├── apps │ ├── jira_adaptavist_jobs_groovyrunner.groovy │ └── jira_insight_objects_cleanup.groovy │ ├── destroy │ ├── deleteAllDashboards.groovy │ ├── removeAllInsightObjects.groovy │ ├── removeProjectInCategory.groovy │ └── removeTicketsFromSystem.groovy │ ├── fields │ ├── copyCustomFieldValues.groovy │ ├── duplucatedNameOfFields.groovy │ └── migrateCustomFieldDataOnToComment.groovy │ ├── filters │ ├── makeDashboardsAccessibleToAdminsJira7.groovy │ ├── makeDashboardsGlobalJira7.groovy │ ├── makeFiltersAccessibleToAdminsJira7.groovy │ └── makeFiltersGlobalJira7.groovy │ ├── group │ └── emptyGroupDetector.groovy │ ├── history │ ├── cleanChangeItems.groovy │ ├── deleteAssetsImportResults.groovy │ └── removeExactInformations.groovy │ ├── issues │ ├── README.md │ ├── elements │ │ ├── issueTypeCleaner.groovy │ │ └── statusCleaner.groovy │ ├── migrates │ │ ├── issueTypeMerge.groovy │ │ ├── mergeCustomFields.groovy │ │ └── resolutionMigration.groovy │ ├── permission │ │ └── 01_permission_schemes_cleaner.groovy │ ├── schemes │ │ ├── fieldSreenSchemeCleaner.groovy │ │ ├── issueTypeSchemesCleaner.groovy │ │ ├── issueTypeScreenSchemeCleaner.groovy │ │ └── notificationSchemesCleaner.groovy │ ├── screen │ │ ├── 01_screen_schemes_cleaner.groovy │ │ └── 02_screen_cleaner.groovy │ └── workflow │ │ ├── 01_workflow_schemes_cleaner.groovy │ │ └── 02_workflow_cleaner.groovy │ ├── links │ └── delete_remote_issue_links.groovy │ ├── mail │ └── reviewMailQueue.groovy │ ├── troubleshooting │ ├── comment │ │ └── deleteCommentForIssues.groovy │ ├── integrity_checker.groovy │ ├── jira_deindexer.groovy │ ├── jira_deindexer_default.groovy │ ├── jira_enable_upload_button.groovy │ ├── jira_index_db_tickets_count_checker.groovy │ └── jira_reindex_unresolved_tickets.groovy │ ├── users │ ├── cleanUserHistoryLastView.groovy │ ├── cleanupVotesForInactiveUsers.groovy │ ├── deactivateUserDuringInActivity.groovy │ ├── decreaseOfFavouritesForInActiveUsers.groovy │ ├── deleteInActiveAvatars.groovy │ ├── deleteInActiveUsersFromGroup.groovy │ ├── deleteInActiveUsersFromGroupAndRoles.groovy │ ├── deleteInActiveUsersFromRoles.groovy │ ├── deleteOrphanedAvatars.groovy │ ├── disableAutoWatchForUsers.groovy │ ├── userCleanRecentHistoryForExactUser.groovy │ ├── userCleanupAllSubscriptionsForInActiveUsers.groovy │ ├── userCleanupPrivateDashboardsInActiveUsers.groovy │ ├── userCleanupPrivateFiltersForInActiveUsers.groovy │ └── userCleanupStopWatchingInactiveUsers.groovy │ └── various │ ├── removeAllAttachmentsForQuery.groovy │ └── removeAllAttachmentsForTicket.groovy ├── kb └── AO_mapping_table.md ├── pom.xml ├── sh ├── adjust_language_pack_of_jira.sh ├── attachment_migration.sh ├── backup_confluence.sh ├── changeOrder.sh ├── conf │ └── nginx │ │ └── jira.example.io.conf ├── db_active_checker.sh ├── dd_dsync_checker.sh ├── macos │ ├── zsh_macos_cleanup.sh │ └── zsh_macos_cleanupup_account_policy_data.sh ├── os_cleanup.sh ├── upgrade_bamboo.sh ├── upgrade_bitbucket.sh ├── upgrade_confluence.sh ├── upgrade_jira.sh └── upgrade_jsm.sh └── sql ├── clickhouse ├── clickhouse_size_databases.sql └── clickhouse_size_rows_of_table.sql ├── confluence ├── confluence_audit_domain_emails.sql ├── confluence_clean_staled_drafts.sql ├── confluence_consistency_checker.sql ├── confluence_crowd_directory_fails.sql ├── confluence_crowd_group_user_directory_sync_fails.sql ├── confluence_detect_ancestors_table_problem.sql ├── confluence_duplicate_entry_checker.sql ├── confluence_fix_after_fixing_collation_on_postgresql.sql ├── confluence_how_fetch_page_information_containing_pagename.sql ├── confluence_links_checker.sql ├── confluence_recreate_constraints.sql ├── confluence_remove_all_shared_drafts_collaborative_editing_mssql.sql ├── confluence_remove_all_shared_drafts_collaborative_editing_mysql.sql ├── confluence_remove_all_shared_drafts_collaborative_editing_postgresql.sql ├── confluence_remove_space.sql ├── confluence_remove_synchrony_tables.sql ├── confluence_url_notification_containing_plugins_servlet.sql ├── confluence_user_directory_sync_fails_null_pointer_exception.sql ├── stash_crowd_ldap_user_directory_sync.sql ├── stash_crowd_ldap_user_directory_sync_fails.sql └── stats │ ├── confluence_calculate_number_of_content_in_trash.sql │ ├── confluence_content_with_longest_history.sql │ ├── confluence_find_the_size_of_all_page_drafts_per_space.sql │ ├── confluence_get_huge_attachments.sql │ ├── confluence_get_spaces_updated_date.sql │ ├── confluence_get_stats_of_user_uploaded_attachments.sql │ ├── confluence_get_who_viewed_attachments.sql │ └── confluence_total_size_of_attachemens.sql ├── jira ├── assets │ ├── jira_01_assets_detect_and_remove_duplicated_attribute_values.sql │ ├── jira_02_assets_detect_and_remove_attributes.sql │ ├── jira_assets_cleanup_duplicated_history_records.sql │ ├── jira_assets_detect_duplicated_history_records.sql │ ├── jira_assets_find_objects_object_type_attributes.sql │ ├── jira_assets_inconsitency_attribute_checker.sql │ ├── jira_dc_insight_data_cache_info.sql │ ├── jira_insight_clean_history_for_retention_policy.sql │ ├── jira_insight_consistency_checker.sql │ ├── jira_insight_freetext_it_text_value.sql │ ├── jira_insight_history_object_stats.sql │ ├── jira_insight_producing_invalid_cache.sql │ ├── jira_insight_producing_load_all_failed.sql │ ├── jira_insight_stats_for_understanding_requirements.sql │ ├── jira_insight_unable_delete_empty_schema.sql │ ├── jira_insight_unable_import_configuration.sql │ └── jira_insight_when_deleting_an_insight_schema_or_objects.sql ├── cleanup │ ├── jira_clean_events_batching_mail_queue.sql │ ├── jira_clean_jeti_audit_log_entry.sql │ ├── jira_clean_old_hipchat_data.sql │ ├── jira_clean_queue_of_automation.sql │ ├── jira_clean_stucked_cache_values.sql │ ├── jira_cleanup_alert_logs_off_app.sql │ ├── jira_cleanup_development_caches.sql │ ├── jira_cleanup_jmwe_execution_log.sql │ ├── jira_cluster_lock_status_checker.sql │ ├── jira_clusterlockstatus_table_cleaner_and_checker.sql │ ├── jira_detect_unused_issue_types.sql │ ├── jira_fails_to_start_with_too_many_rows_found_in_clustered_job.sql │ ├── jira_infosysta_jira_nfj.sql │ ├── jira_metadata_cleaner.sql │ └── jira_remove_offline_nodes.sql ├── jIra_integrity_checker_jira_issues_with_null_status.sql ├── jira_Jira_server_throws_NullPointerException_when_creating_new_issues.sql ├── jira_apps_plugins_issues_startup_fails.sql ├── jira_audit_log_list_users_showing_when_accounts_were_created_and_by_whom.sql ├── jira_authentication_sso_login_page_methods.sql ├── jira_automation_rule_deletion.sql ├── jira_automation_troubleshooting.sql ├── jira_boards_not_visible_once_filter_removed.sql ├── jira_cache_delegation_checker.sql ├── jira_cannot_render_webpanel_with_key.sql ├── jira_check_data_center_index_checks.sql ├── jira_check_non_existing_project_ids.sql ├── jira_check_user_mapping.sql ├── jira_consistency_checker_attachments.sql ├── jira_core_can_not_upload_sql_files.sql ├── jira_core_detect_workflow_without_a_specific_screen_and_transitions.sql ├── jira_dashboard_passed_more_than_value.sql ├── jira_dc_clean_unused_node_replication_index.sql ├── jira_dc_nodeassociation_problem.sql ├── jira_detect_crashes_workflow_errors.sql ├── jira_detect_default_value_of_custom_field.sql ├── jira_detect_duplicate_colour_of_agile_boards.sql ├── jira_duplicate_field_detector_on_screen.sql ├── jira_duplicate_key_value_errors_in_logs.sql ├── jira_duplicate_naming_of_fields.sql ├── jira_duplicated_epic_story_links_detector.sql ├── jira_duplicated_issue_key_fixer.sql ├── jira_fieldconfigschemeissuetype_checker.sql ├── jira_filter_recursive_filter_detector.sql ├── jira_find_by_whom_and_when_was_an_issue_transitioned_from_one_to_another_status.sql ├── jira_get_custom_field_changes_for_ticket.sql ├── jira_get_status_changes.sql ├── jira_identify_group_usage_change_it.sql ├── jira_integrity_checker.sql ├── jira_integrity_checker_full_reindex_makes_errors.sql ├── jira_integrity_checker_workflow_current_step_entries.sql ├── jira_integrity_checker_workflow_entry_states.sql ├── jira_jira_with_large_number_of_history_records.sql ├── jira_labels_broken.sql ├── jira_notification_instance_cleaner.sql ├── jira_notification_instance_cleaner_by_resolution.sql ├── jira_notification_instance_cleaner_by_status.sql ├── jira_notification_instance_cleaner_by_updates.sql ├── jira_notification_instance_cleaner_empty_messageid.sql ├── jira_notification_instance_cleaner_no_assiciated_tickets.sql ├── jira_null_pointer_exception_while_linking_issues.sql ├── jira_plugins_mapping_with_AO_tables.sql ├── jira_priority_color_issue.sql ├── jira_project_latest_activity_date_detector.sql ├── jira_project_shortcut_is_not_showing_in_some_projects.sql ├── jira_remove_custom_field_value_with_null.sql ├── jira_remove_orphan_records.sql ├── jira_retrieve_list_of_issue_assignees_from_project.sql ├── jira_service_desk_automation_rule_checker.sql ├── jira_service_desk_automation_rules_history_rules.sql ├── jira_service_desk_psmq_queue.sql ├── jira_service_desk_sla_audit_checker.sql ├── jira_service_desk_sla_cleanup.sql ├── jira_service_management_locating_webhook_url.sql ├── jira_service_management_sla_field.sql ├── jira_service_management_sql_error_for_psmq.sql ├── jira_software_agile_check_correct_collation.sql ├── jira_software_agile_field_detect_reindexing_fails.sql ├── jira_software_agile_lexorank.sql ├── jira_software_agile_lock_hash_issue.sql ├── jira_software_agile_sprint_showing_in_different_sprints.sql ├── jira_software_cluster_node_tables.sql ├── jira_software_identify_which_node_is_performing_balance_operation.sql ├── jira_user_management_rename_group_membership.sql ├── jira_xml_backup_fails_throw_datatypeconverter.sql ├── performance │ └── jira_automation_for_jira_explain_perfromance.sql ├── queries │ ├── jira_audit_domain_emails.sql │ ├── jira_find_all_attachments_for_issue_key.sql │ ├── jira_find_all_users_of_gravatar.sql │ ├── jira_find_attachments_per_type.sql │ ├── jira_get_attachments_location.sql │ ├── jira_get_charting_app_gadgets.sql │ ├── jira_get_date_time_watcher_being_added_into_task.sql │ ├── jira_get_fresh_tickets_uploaded_attachments.sql │ ├── jira_get_groups_in_projects.sql │ ├── jira_get_moved_tickets.sql │ ├── jira_get_projects_with_anonymous_access.sql │ └── jira_service_desk_get_sla_information.sql └── stats │ ├── jira_approvals_history_stats.sql │ ├── jira_attachments_count_and_disk_usage.sql │ ├── jira_attachments_created_per_month.sql │ ├── jira_custom_field_null_checker.sql │ ├── jira_custom_fields_values_status.sql │ ├── jira_find_all_issues_in_db.sql │ ├── jira_find_user_login_date_for_user_in_jira.sql │ ├── jira_general_total_stats.sql │ ├── jira_identify_users_in_jira_who_have_not_logged.sql │ ├── jira_issue_field_configuration_scheme_overview.sql │ ├── jira_issue_link_type_counts_per_issues.sql │ ├── jira_issue_link_type_counts_per_projects.sql │ ├── jira_issue_type_per_issues.sql │ ├── jira_issue_type_per_project.sql │ ├── jira_issues_create_cgrouped_by_month.sql │ ├── jira_jeti_notification_stats.sql │ ├── jira_project_activity_level_stats.sql │ ├── jira_project_activity_stats.sql │ ├── jira_project_attachment_statistics.sql │ ├── jira_project_tickets_count.sql │ ├── jira_resolutions_stats.sql │ ├── jira_retrive_list_of_users_assigned_to_project_roles.sql │ ├── jira_scriptrunner_jobs_in_json_pretty.sql │ ├── jira_service_job_config_as_clustered_job.sql │ ├── jira_service_job_config_data.sql │ ├── jira_service_management_export_fields_and_forms.sql │ ├── jira_service_management_extra_data_in_db.sql │ ├── jira_service_management_long_request_automation_rules.sql │ ├── jira_service_management_project_issue_type_request_type_name.sql │ ├── jira_service_management_request_types_stats.sql │ ├── jira_stats_of_push_notifications.sql │ ├── jira_stats_status_issue_counts.sql │ ├── jira_stats_user_creation_per_month.sql │ ├── jira_tickets_with_large_number_of_worklogs.sql │ ├── jira_time_in_status_querry.sql │ ├── jira_top_tickets_with_large_number_of_attachments.sql │ ├── jira_top_tickets_with_large_number_of_comments.sql │ ├── jira_user_creation_per_month_with_application_access.sql │ ├── jira_who_has_not_been_logged_in_last_quarter.sql │ └── jira_who_never_logged_in_jira.sql ├── mysql ├── mysql_detect_blob_columns.sql └── mysql_show_unused_space_table.sql └── postgresql ├── postgresql_cache_hit_ratio_information.sql ├── postgresql_check_vacuum_running_status.sql ├── postgresql_compare_rollbacks_commits.sql ├── postgresql_connection_find_open_connections.sql ├── postgresql_connection_time_stats.sql ├── postgresql_database_bloat.sql ├── postgresql_get_blocked_ps_queries.sql ├── postgresql_get_connection_active_and_remaining.sql ├── postgresql_get_high_number_of_rows_tables.sql ├── postgresql_get_index_usages.sql ├── postgresql_get_largest_tables.sql ├── postgresql_get_running_longer_than_5_min_queries.sql ├── postgresql_get_size_of_databases.sql ├── postgresql_get_size_of_index_stats.sql ├── postgresql_get_size_of_schema.sql ├── postgresql_get_size_of_tables_in_current_schema.sql ├── postgresql_index_cache_hit_rate.sql ├── postgresql_maint_reindex_vacuum_table_by_table.sql ├── postgresql_monitoring_blocking_transaction.sql ├── postgresql_non_used_indexes.sql └── postgresql_number_of_tables_by_the_number_of_rows.sql /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.iml 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | 109 | #personal 110 | secret 111 | preps -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Gonchik Tsymzhitov 2 | 3 | Licensed under the Apache License, Version 2.0 (the «License»); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | Unless required by applicable law or agreed to in writing, 8 | software distributed under the License is distributed on an «AS IS» BASIS, 9 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | See the License for the specific language governing permissions and limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | cleanup-scripts 2 | --------------- 3 | 4 | Scripts for cleanup Atlassian Jira & Confluence (Groovy, Python and SQL). 5 | I hope these scripts will help you do a continuous cleanup. 6 | 7 | Reference: 8 | https://confluence.atlassian.com/adminjiraserver/project-screens-schemes-and-fields-938847220.html 9 | 10 | Usage 11 | ----- 12 | Most of the scripts have a variable `isPreview` on top. 13 | Don't forget to change the boolean value to execute. 14 | 15 | branch: 16 | `master` - is for Jira 8.x releases 17 | `jira-9` - is for Jira 9.x releases 18 | `jira-7` related to Jira 7.x releases. 19 | 20 | Development 21 | ----------- 22 | 23 | Feel free to provide a PR for any situation - improvements, new feature, docs, typo etc. 24 | As it's licensed with Apache 2.0 rights, feel modify and reuse how do you want. 25 | 26 | Additional 27 | ----------- 28 | Tool for check if your Jira instance is affected by the bug JRA-47568 and should be used in conjunction with the SSLPoke tool. 29 | https://bitbucket.org/atlassianlabs/httpclienttest/src 30 | 31 | You can use for groovy runner - Scriptrunner, Mercury for Jira, MyGroovy, Insight, JMWE 32 | -------------------------------------------------------------------------------- /groovy/confluence/findAttachmentsOfFormat.groovy: -------------------------------------------------------------------------------- 1 | import groovy.transform.TimedInterrupt 2 | import java.util.concurrent.TimeUnit 3 | import com.atlassian.confluence.security.ContentPermission 4 | import com.atlassian.sal.api.transaction.TransactionCallback 5 | import com.atlassian.confluence.core.Modification 6 | 7 | def log = Logger.getLogger("com.gonchik.scripts.groovy.confluence.findAttachmentsOfFormat") 8 | log.setLevel(Level.DEBUG) 9 | 10 | class Data { 11 | static String textToFind = "TO_FIND" 12 | static String textToReplace = "TO_REPLACE" 13 | static long parentPageId = 11111111111 14 | } 15 | 16 | transactionTemplate.execute(new TransactionCallback() { 17 | 18 | @Override 19 | Object doInTransaction() { 20 | 21 | SaveContext saveContext = new DefaultSaveContext(true, true, false); 22 | 23 | def parentPage = pageManager.getPage(Data.parentPageId); 24 | 25 | def pages = parentPage.getDescendants(); 26 | 27 | pages.each { page -> 28 | 29 | if (page.getTitle().contains(Data.textToFind)) { 30 | 31 | log.debug("Space Key :: " + page.getSpaceKey() + " | Page Title :: " + page.getTitle() + " | Link to Page :: " + settingsManager.getGlobalSettings().getBaseUrl() + page.getUrlPath() + "\n") 32 | pageManager.saveNewVersion(page, new TextModification(), saveContext); 33 | } 34 | } 35 | 36 | return "Success!"; 37 | } 38 | } 39 | ); 40 | 41 | 42 | class TextModification implements Modification { 43 | 44 | @Override 45 | void modify(Page page) { 46 | 47 | page.setTitle(page.getTitle().replace(Data.textToFind, Data.textToReplace)) 48 | 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /groovy/confluence/pageVersionsCleaner.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.confluence.spaces.SpaceManager 2 | import com.atlassian.sal.api.component.ComponentLocator 3 | import com.atlassian.confluence.pages.AttachmentManager 4 | import com.atlassian.confluence.pages.PageManager 5 | import org.apache.log4j.Logger 6 | import org.apache.log4j.Level 7 | import java.util.ConcurrentModificationException 8 | import com.atlassian.confluence.pages.Page 9 | import com.atlassian.confluence.core.ContentEntityObject 10 | 11 | 12 | def log = Logger.getLogger("com.gonchik.scripts.groovy.confluence.pageVersionsCleaner") 13 | log.setLevel(Level.DEBUG) 14 | final PageManager pageManager = ComponentLocator.getComponent(PageManager) 15 | final SpaceManager spaceManager = ComponentLocator.getComponent(SpaceManager) 16 | 17 | boolean allSpaces = false 18 | def spaceKey = "CONDUCTOR50" 19 | def spaces = [] 20 | 21 | if (allSpaces) { 22 | spaces = spaceManager.getAllSpaces() 23 | } else { 24 | spaces = spaceManager.getSpace(spaceKey) 25 | } 26 | 27 | 28 | for (def space : spaces) { 29 | log.debug "Review space: " + space.name 30 | def pages = pageManager.getPages(space, true) 31 | for (Page page : pages) { 32 | log.debug "Review page: " + page.getNameForComparison() 33 | def pageVersions = pageManager.getVersionHistorySummaries(page); 34 | for (def hPage : pageVersions) { 35 | def coe = pageManager.getPage(hPage.id) 36 | if (coe == null) { 37 | continue 38 | } 39 | pageManager.removeHistoricalVersion(coe) 40 | } 41 | log.debug "End review pages." 42 | } 43 | } 44 | log.debug "---- End review spaces ---" 45 | 46 | -------------------------------------------------------------------------------- /groovy/confluence/trashAttachmentsByLabel.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.sal.api.component.ComponentLocator 2 | import com.atlassian.confluence.pages.AttachmentManager 3 | import com.atlassian.confluence.pages.PageManager 4 | import org.apache.log4j.Logger 5 | import org.apache.log4j.Level 6 | import java.util.ConcurrentModificationException 7 | import com.atlassian.confluence.pages.Page 8 | 9 | long ParentPageID = 0000000000000; // id of parent page 10 | String attachmentLabel = "LABEL_NAME"; // label name 11 | 12 | final PageManager pageManager = ComponentLocator.getComponent(PageManager) 13 | def parentPage = pageManager.getPage(Long.valueOf(ParentPageID)); 14 | 15 | List pages = parentPage.getDescendants(); 16 | for (Page page : pages) { 17 | List attachments = page.getAttachments(); 18 | for (Attachment attachment : attachments) { 19 | for (Label label : attachment.getLabels()) { 20 | if (label.getName().equalsIgnoreCase(attachmentLabel)) { 21 | attachment.trash(); 22 | } 23 | 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /groovy/jira/apps/jira_insight_objects_cleanup.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.riadalabs.jira.plugins.insight.services.model.ObjectAttributeBean 3 | import com.riadalabs.jira.plugins.insight.services.model.ObjectBean 4 | import com.riadalabs.jira.plugins.insight.channel.external.api.facade.IQLFacade 5 | import com.riadalabs.jira.plugins.insight.channel.external.api.facade.ObjectFacade 6 | import com.onresolve.scriptrunner.runner.customisers.PluginModule 7 | import com.onresolve.scriptrunner.runner.customisers.WithPlugin 8 | @WithPlugin('com.riadalabs.jira.plugins.insight') 9 | 10 | @PluginModule 11 | IQLFacade iqlFacade 12 | 13 | @PluginModule 14 | ObjectFacade objFacade 15 | 16 | 17 | // Please, validate query using UI of Insight 18 | // def query = 'objectSchemaId = 4 ... etc ' 19 | 20 | 21 | try { 22 | iqlFacade.validateIQL(query) 23 | } catch (Exception e) { 24 | if (log) log.error("FAILED TO QUERY: ") 25 | return null 26 | } 27 | 28 | def objects = iqlFacade.findObjects(query) 29 | for (def object : objects) { 30 | try { 31 | objFacade.deleteObjectBean(object.getId()) 32 | } catch (Exception e) { 33 | // just skip 34 | } 35 | } 36 | // 37 | return "DONE" -------------------------------------------------------------------------------- /groovy/jira/destroy/deleteAllDashboards.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | * 4 | * Remove all dashboards 5 | * 6 | */ 7 | 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.portal.PortalPage 10 | import com.atlassian.jira.portal.PortalPageManager 11 | import com.atlassian.jira.util.collect.EnclosedIterable.Functions 12 | import org.apache.log4j.Logger 13 | import org.apache.log4j.Level 14 | 15 | def log = Logger.getLogger("com.gonchik.scripts.groovy.deleteAllDashboards") 16 | log.setLevel(Level.DEBUG) 17 | def sb = new StringBuilder() 18 | 19 | if (isPreview) { 20 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 21 | } else { 22 | sb.append("Please, note it works in execute mode

\n") 23 | } 24 | 25 | def BR = "
\n" 26 | log.debug("Start review dashboards") 27 | PortalPageManager ppm = ComponentAccessor.getComponent(PortalPageManager.class) 28 | for (PortalPage portalPage in Functions.toList(ppm.getAll())) { 29 | if (!isPreview) { 30 | sb.append("Removing dashboard ${portalPage.getName()} " + BR) 31 | log.warn("Removing dashboard ${portalPage.getName()}") 32 | ppm.delete(portalPage.getId()) 33 | } else { 34 | sb.append("Marking for remove dashboard ${portalPage.getName()} " + BR) 35 | log.warn("Marking for remove dashboard ${portalPage.getName()}") 36 | } 37 | 38 | } 39 | return sb -------------------------------------------------------------------------------- /groovy/jira/destroy/removeAllInsightObjects.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.onresolve.scriptrunner.runner.customisers.WithPlugin 3 | @WithPlugin('com.riadalabs.jira.plugins.insight') 4 | import com.riadalabs.jira.plugins.insight.channel.external.api.facade.IQLFacade 5 | import com.riadalabs.jira.plugins.insight.channel.external.api.facade.ObjectFacade 6 | import org.apache.log4j.Logger 7 | import org.apache.log4j.Level 8 | 9 | def log = Logger.getLogger("com.gonchik.scripts.groovy.removeInsightObjects") 10 | log.setLevel(Level.DEBUG) 11 | 12 | ObjectFacade objectFacade = ComponentAccessor.getOSGiComponentInstanceOfType(ObjectFacade) 13 | IQLFacade iqlFacade = ComponentAccessor.getOSGiComponentInstanceOfType(IQLFacade) 14 | def objects = iqlFacade.findObjectsByIQL(/objectSchema = "PepsiCo Issues Snapshots" /) 15 | objects.each { 16 | log.debug("Dropping ${it.name}") 17 | objectFacade.deleteObjectBean(it.id) 18 | } 19 | 20 | -------------------------------------------------------------------------------- /groovy/jira/destroy/removeProjectInCategory.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.atlassian.jira.bc.project.ProjectService 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import org.apache.log4j.Level 5 | import org.apache.log4j.Logger 6 | 7 | 8 | def log = Logger.getLogger("com.gonchik.scriptrunner") 9 | log.setLevel(Level.DEBUG) 10 | 11 | def projectManager = ComponentAccessor.getProjectManager() 12 | projectCategory = projectManager.getProjectCategoryObjectByName("Archive") 13 | 14 | log.debug("Project Category: " + projectCategory.name) 15 | 16 | def projects = projectManager.getProjectsFromProjectCategory(projectCategory) 17 | def projectService = ComponentAccessor.getComponent(ProjectService) 18 | def user = ComponentAccessor.jiraAuthenticationContext.loggedInUser 19 | 20 | 21 | projects.each { project -> 22 | ProjectService.DeleteProjectValidationResult validationResult = projectService.validateDeleteProject(user, project.getKey()); 23 | if (validationResult.isValid()) { 24 | ProjectService.DeleteProjectResult result = projectService.deleteProjectAsynchronous(user, validationResult); 25 | log.debug("Removed ${project.getKey}") 26 | } 27 | } -------------------------------------------------------------------------------- /groovy/jira/fields/duplucatedNameOfFields.groovy: -------------------------------------------------------------------------------- 1 | /* 2 | This script investigate the duplicated naming of fields in the Jira 3 | as waiting that request https://jira.atlassian.com/browse/JRASERVER-61376 4 | Motivation based on the UX and continuous of mistakes in scripts, add-ons etc. 5 | 6 | */ 7 | 8 | import com.atlassian.jira.issue.fields.FieldManager 9 | import com.atlassian.jira.component.ComponentAccessor 10 | 11 | final FieldManager fieldManager = ComponentAccessor.getFieldManager() 12 | final def fields = fieldManager.getAllAvailableNavigableFields() 13 | def sb = new StringBuilder() 14 | 15 | def uniqueFieldNames = [] 16 | def output = "Start review fields" 17 | def br = "
\n" 18 | log.debug(output) 19 | sb.append(output + br) 20 | int duplicatedFields = 0 21 | for (field in fields) { 22 | def name = field.getName().toLowerCase() 23 | if (!(name in (uniqueFieldNames))) { 24 | uniqueFieldNames += name 25 | } else { 26 | output = "${field.name} field has duplicate name" 27 | duplicatedFields += 1 28 | sb.append(output + br) 29 | log.debug(output) 30 | } 31 | } 32 | 33 | output = br + "Summary: Investigated ${fields.size()} number of fields, where duplicated is ${duplicatedFields}" 34 | sb.append(output + br) 35 | log.debug(output) 36 | 37 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/filters/makeDashboardsAccessibleToAdminsJira7.groovy: -------------------------------------------------------------------------------- 1 | // https://kb.botronsoft.com/x/moBk 2 | import com.atlassian.jira.component.ComponentAccessor 3 | import com.atlassian.jira.portal.PortalPage 4 | import com.atlassian.jira.portal.PortalPageManager 5 | import com.atlassian.jira.sharing.SharePermission 6 | import com.atlassian.jira.sharing.SharePermissionImpl 7 | import com.atlassian.jira.sharing.SharedEntity.SharePermissions 8 | import com.atlassian.jira.sharing.type.ShareType.Name 9 | import com.atlassian.jira.util.collect.EnclosedIterable.Functions 10 | 11 | 12 | def makeAllDashboardsGlobal(long dashboardId) { 13 | PortalPageManager ppm = ComponentAccessor.getComponent(PortalPageManager.class) 14 | 15 | PortalPage portalPage = ppm.getPortalPageById(dashboardId) 16 | Set permissionsSet = new HashSet( 17 | portalPage.getPermissions().getPermissionSet() 18 | ) 19 | permissionsSet.add(new SharePermissionImpl(null, Name.GROUP, "jira-administrators", null)) 20 | ppm.update(PortalPage.portalPage(portalPage).permissions(new SharePermissions(permissionsSet)).build()) 21 | } 22 | 23 | makeAllDashboardsGlobal(DASHBOARDIDHERE) -------------------------------------------------------------------------------- /groovy/jira/filters/makeDashboardsGlobalJira7.groovy: -------------------------------------------------------------------------------- 1 | // KB from Botron 2 | import com.atlassian.jira.component.ComponentAccessor 3 | import com.atlassian.jira.portal.PortalPage 4 | import com.atlassian.jira.portal.PortalPageManager 5 | import com.atlassian.jira.sharing.SharePermission 6 | import com.atlassian.jira.sharing.SharePermissionImpl 7 | import com.atlassian.jira.sharing.SharedEntity.SharePermissions 8 | import com.atlassian.jira.sharing.type.ShareType.Name 9 | import com.atlassian.jira.util.collect.EnclosedIterable.Functions 10 | 11 | 12 | def makeAllDashboardsGlobal() { 13 | PortalPageManager ppm = ComponentAccessor.getComponent(PortalPageManager.class) 14 | 15 | for (PortalPage portalPage in Functions.toList(ppm.getAll())) { 16 | Set permissionsSet = new HashSet( 17 | portalPage.getPermissions().getPermissionSet() 18 | ) 19 | permissionsSet.add(new SharePermissionImpl(Name.GLOBAL, null, null)) 20 | ppm.update(PortalPage.portalPage(portalPage).permissions(new SharePermissions(permissionsSet)).build()) 21 | } 22 | } 23 | 24 | makeAllDashboardsGlobal() -------------------------------------------------------------------------------- /groovy/jira/filters/makeFiltersAccessibleToAdminsJira7.groovy: -------------------------------------------------------------------------------- 1 | // https://kb.botronsoft.com/x/gIJk 2 | import com.atlassian.crowd.embedded.api.User 3 | import com.atlassian.jira.user.ApplicationUser 4 | import com.atlassian.jira.component.ComponentAccessor 5 | import com.atlassian.jira.issue.search.SearchRequest 6 | import com.atlassian.jira.issue.search.SearchRequestEntity 7 | import com.atlassian.jira.issue.search.SearchRequestManager 8 | import com.atlassian.jira.sharing.SharePermission 9 | import com.atlassian.jira.sharing.SharePermissionImpl 10 | import com.atlassian.jira.sharing.SharedEntity.SharePermissions 11 | import com.atlassian.jira.sharing.type.ShareType.Name 12 | import com.atlassian.jira.util.Visitor 13 | 14 | // This function will share with the group "jira-administrators" th filter with ID "filterId" 15 | def makeFilterAccessibleToAdmins(long filterId) { 16 | ApplicationUser user = ComponentAccessor.getJiraAuthenticationContext().getLoggedInUser() 17 | SearchRequestManager srm = ComponentAccessor.getComponent(SearchRequestManager.class) 18 | 19 | SearchRequest sr = srm.getSearchRequestById(filterId) 20 | Set permissionsSet = new HashSet( 21 | sr.getPermissions().getPermissionSet() 22 | ) 23 | permissionsSet.add(new SharePermissionImpl(null, Name.GROUP, "jira-administrators", null)) 24 | sr.setPermissions(new SharePermissions(permissionsSet)) 25 | 26 | srm.update(sr) 27 | } 28 | 29 | // Pass the ID of the filter you want to update 30 | makeFilterAccessibleToAdmins(FILTERIDHERE) -------------------------------------------------------------------------------- /groovy/jira/filters/makeFiltersGlobalJira7.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.crowd.embedded.api.User 2 | import com.atlassian.jira.user.ApplicationUser 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.issue.search.SearchRequest 5 | import com.atlassian.jira.issue.search.SearchRequestEntity 6 | import com.atlassian.jira.issue.search.SearchRequestManager 7 | import com.atlassian.jira.sharing.SharePermission 8 | import com.atlassian.jira.sharing.SharePermissionImpl 9 | import com.atlassian.jira.sharing.SharedEntity.SharePermissions 10 | import com.atlassian.jira.sharing.type.ShareType.Name 11 | import com.atlassian.jira.util.Visitor 12 | 13 | def makeAllFiltersGlobal() { 14 | ApplicationUser user = ComponentAccessor.getJiraAuthenticationContext().getLoggedInUser() 15 | SearchRequestManager srm = ComponentAccessor.getComponent(SearchRequestManager.class) 16 | 17 | srm.visitAll(new Visitor() { 18 | 19 | void visit(SearchRequestEntity e) { 20 | 21 | SearchRequest sr = srm.getSearchRequestById(e.id) 22 | Set permissionsSet = new HashSet( 23 | sr.getPermissions().getPermissionSet() 24 | ) 25 | permissionsSet.add(new SharePermissionImpl(Name.GLOBAL, null, null)) 26 | sr.setPermissions(new SharePermissions(permissionsSet)) 27 | 28 | srm.update(sr) 29 | } 30 | }) 31 | } 32 | 33 | makeAllFiltersGlobal() -------------------------------------------------------------------------------- /groovy/jira/group/emptyGroupDetector.groovy: -------------------------------------------------------------------------------- 1 | /* 2 | Find empty groups in Jira 3 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 4 | Tested Environment: Jira 8.20.5, 8.13.3, 9.12.5 5 | Contribution: Gonchik Tsymzhitov 6 | */ 7 | import com.atlassian.jira.component.ComponentAccessor 8 | import com.atlassian.jira.bc.user.search.UserSearchService 9 | import com.atlassian.jira.bc.user.search.UserSearchParams 10 | import com.atlassian.jira.user.ApplicationUser 11 | import org.apache.log4j.Logger 12 | import org.apache.log4j.Level 13 | 14 | 15 | def log = Logger.getLogger("com.gonchik.scripts.groovy.deleteInActiveUsersFromGroupAndRoles") 16 | log.setLevel(Level.DEBUG) 17 | 18 | def groupManager = ComponentAccessor.getGroupManager() 19 | 20 | def allGroups = groupManager.getAllGroups() 21 | def sb = new StringBuilder() 22 | def BR = "
\n" 23 | log.debug("Start review users") 24 | sb.append("Start review users" + BR) 25 | 26 | for (def group : allGroups) { 27 | def userCount = groupManager.getUsersInGroupCount(group) 28 | if (userCount && userCount > 0) { 29 | continue; 30 | } 31 | sb.append(group.name + BR) 32 | log.debug(group.name) 33 | } 34 | 35 | return sb -------------------------------------------------------------------------------- /groovy/jira/issues/README.md: -------------------------------------------------------------------------------- 1 | Please, start to make clean from *schemes* and after elements -------------------------------------------------------------------------------- /groovy/jira/issues/elements/issueTypeCleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | Find unassociated issue types to projects 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16, 9.12.5 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.config.IssueTypeManager 10 | import org.apache.log4j.Logger 11 | import org.apache.log4j.Level 12 | 13 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupUnAssociatedIssueType") 14 | log.setLevel(Level.DEBUG) 15 | 16 | // Cleanup of the cleanupUnAssociatedIssueType 17 | def issueTypeManager = ComponentAccessor.getComponent(IssueTypeManager) 18 | def sb = new StringBuilder() 19 | 20 | if (isPreview) { 21 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 22 | } else { 23 | sb.append("Please, note it works in execute mode

\n") 24 | } 25 | sb.append("Deleted issue type with no associated projects:

\n") 26 | issueTypeManager.issueTypes.each { 27 | try { 28 | if (!issueTypeManager.hasAssociatedIssues(it)) { 29 | sb.append("${it.name}
\n") 30 | if (!isPreview) { 31 | // Set the Default of Task Id 32 | String replaceIssueTypeId = "1" 33 | if (it.isSubTask()){ 34 | // Id of Sub-Task 35 | replaceIssueTypeId = "5" 36 | } 37 | issueTypeManager.removeIssueType(it.id, replaceIssueTypeId) 38 | } 39 | } 40 | } 41 | catch (Exception e) { 42 | sb.append("Error: " + e + "
\n") 43 | } 44 | } 45 | 46 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/elements/statusCleaner.groovy: -------------------------------------------------------------------------------- 1 | /* 2 | Delete statuses with no associated workflow 3 | please, be informed that script works based on the exception 4 | for com.atlassian.jira.config.StatusManager.removeStatus() 5 | 6 | Disclaimer: please, check on test env before run prod 7 | */ 8 | 9 | import com.atlassian.jira.component.ComponentAccessor 10 | import com.atlassian.jira.config.StatusManager 11 | import org.apache.log4j.Logger 12 | import org.apache.log4j.Level 13 | 14 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupUnAssociatedStatuses") 15 | log.setLevel(Level.DEBUG) 16 | 17 | def statusManager = ComponentAccessor.getComponent(StatusManager) 18 | def sb = new StringBuilder() 19 | 20 | sb.append("Delete statuses with no associated workflow:

\n") 21 | statusManager.statuses.each { 22 | try { 23 | statusManager.removeStatus(it.id) 24 | sb.append("Removed status ${it.name}
\n") 25 | } catch (Exception e) { 26 | log.error(e) 27 | } 28 | } 29 | 30 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/permission/01_permission_schemes_cleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script for cleanup of unused permission schemes 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.permission.PermissionSchemeManager 10 | 11 | PermissionSchemeManager permissionSchemeManager = ComponentAccessor.getPermissionSchemeManager() 12 | 13 | def sb = new StringBuilder() 14 | if (isPreview) { 15 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 16 | } else { 17 | sb.append("Please, note it works in execute mode

\n") 18 | } 19 | permissionSchemeManager.getUnassociatedSchemes().each { 20 | try{ 21 | if (!isPreview) { 22 | sb.append ("Deleting unused permission scheme: ${it.name}
") 23 | def sId = Long.valueOf("${it.id}") 24 | permissionSchemeManager.deleteScheme(sId) 25 | } else { 26 | sb.append ("Unused permission scheme: ${it.name}
") 27 | } 28 | } 29 | catch (Exception e) { 30 | sb.append("Error: " + e + "\n") 31 | } 32 | } 33 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/schemes/fieldSreenSchemeCleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.issue.fields.screen.FieldScreenSchemeManager 5 | 6 | import org.apache.log4j.Logger 7 | import org.apache.log4j.Level 8 | 9 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupUnUsedFieldScreens") 10 | log.setLevel(Level.DEBUG) 11 | 12 | def schemeManager = ComponentAccessor.fieldScreenSchemeManager 13 | def sb = new StringBuilder() 14 | if (isPreview) { 15 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 16 | } else { 17 | sb.append("Please, note it works in execute mode

\n") 18 | } 19 | sb.append("Deleted issue type screen schemes with no associated projects:

\n") 20 | schemeManager.fieldScreenSchemes().each { 21 | if (it.isDefault()) { 22 | return 23 | } 24 | try { 25 | if (schemeManager.getProjects(it).size() == 0) { 26 | sb.append("${it.name}
\n") 27 | if (!isPreview) { 28 | schemeManager.removeFieldScreenScheme(it) 29 | } 30 | } 31 | } 32 | catch (Exception e) { 33 | sb.append("Error: " + e + "
\n") 34 | } 35 | } 36 | 37 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/schemes/issueTypeSchemesCleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | Detect unused of Issue Type Schemes 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16, 9.12.5 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import org.apache.log4j.Logger 10 | import org.apache.log4j.Level 11 | 12 | def log = Logger.getLogger("com.gonchik.scripts.groovy.issueTypeSchemesCleaner") 13 | log.setLevel(Level.DEBUG) 14 | 15 | def schemeManager = ComponentAccessor.issueTypeSchemeManager 16 | def sb = new StringBuilder() 17 | 18 | if (isPreview) { 19 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 20 | sb.append("Will be deleted issue type schemes with no associated projects:

\n") 21 | } else { 22 | sb.append("Please, note it works in execute mode

\n") 23 | sb.append("Deleted issue type schemes with no associated projects:

\n") 24 | } 25 | 26 | schemeManager.allSchemes.each { 27 | if (schemeManager.isDefaultIssueTypeScheme(it)) { 28 | return 29 | } 30 | try { 31 | if (it.associatedProjectIds.size() == 0) { 32 | sb.append("${it.name}
\n") 33 | if (!isPreview) { 34 | schemeManager.deleteScheme(it) 35 | } 36 | } 37 | } 38 | catch (Exception e) { 39 | sb.append("Error: " + e + "
\n") 40 | } 41 | } 42 | 43 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/schemes/issueTypeScreenSchemeCleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | Detect unused of issue type screen schemes 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16, 9.12.5 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.issue.fields.screen.issuetype.IssueTypeScreenSchemeManager 10 | import org.apache.log4j.Logger 11 | import org.apache.log4j.Level 12 | 13 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupUnAssociatedIssueTypeScreen") 14 | log.setLevel(Level.DEBUG) 15 | 16 | def schemeManager = ComponentAccessor.issueTypeScreenSchemeManager 17 | def sb = new StringBuilder() 18 | if (isPreview) { 19 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 20 | sb.append("Will be deleted issue type screen schemes with no associated projects:

\n") 21 | } else { 22 | sb.append("Please, note it works in execute mode

\n") 23 | sb.append("Deleted issue type screen schemes with no associated projects:

\n") 24 | } 25 | 26 | schemeManager.issueTypeScreenSchemes.each { 27 | if (it.isDefault()) { 28 | return 29 | } 30 | try { 31 | if (schemeManager.getProjects(it).size() < 1) { 32 | sb.append("${it.name}
\n") 33 | if (!isPreview) { 34 | schemeManager.removeIssueTypeScreenScheme(it) 35 | } 36 | } 37 | } 38 | catch (Exception e) { 39 | sb.append("Error: " + e + "
\n") 40 | } 41 | } 42 | 43 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/schemes/notificationSchemesCleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script for cleanup of unused notification schemes 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16, 9.12.5 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.notification.NotificationSchemeManager 10 | 11 | NotificationSchemeManager notificationSchemeManager = ComponentAccessor.getNotificationSchemeManager() 12 | 13 | def sb = new StringBuilder() 14 | 15 | if (isPreview) { 16 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 17 | } else { 18 | sb.append("Please, note it works in execute mode

\n") 19 | } 20 | notificationSchemeManager.getUnassociatedSchemes().each { 21 | try { 22 | if (!isPreview) { 23 | sb.append("Deleting unused notification scheme: ${it.name}
") 24 | def sId = Long.valueOf("${it.id}") 25 | notificationSchemeManager.deleteScheme(sId) 26 | } else { 27 | sb.append("Unused notification scheme: ${it.name}
") 28 | } 29 | } 30 | catch (Exception e) { 31 | sb.append("Error: " + e + "\n") 32 | } 33 | } 34 | 35 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/screen/01_screen_schemes_cleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script for cleanup of unused screen schemes 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.16 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.issue.fields.screen.FieldScreenSchemeManager 10 | import org.apache.log4j.Logger 11 | import org.apache.log4j.Level 12 | 13 | def log = Logger.getLogger("com.gonchik.scripts.groovy.screenSchemesCleaner") 14 | log.setLevel(Level.DEBUG) 15 | 16 | def schemeManager = ComponentAccessor.getComponent(FieldScreenSchemeManager.class) 17 | def sb = new StringBuilder() 18 | 19 | if (isPreview) { 20 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 21 | } else { 22 | sb.append("Please, note it works in execute mode

\n") 23 | } 24 | sb.append("Deleted screen schemes with no screens projects:

\n") 25 | schemeManager.getFieldScreenSchemes().each { 26 | try { 27 | if (schemeManager.getFieldScreenSchemeItems(it).size() < 1) { 28 | if (!isPreview) { 29 | schemeManager.removeFieldScreenScheme(it) 30 | } 31 | } 32 | } 33 | catch (Exception e) { 34 | sb.append("Error: " + e + "
\n") 35 | } 36 | } 37 | if (!isPreview) { 38 | schemeManager.refresh() 39 | } 40 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/screen/02_screen_cleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | // This script for cleanup of the screens 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.issue.fields.screen.FieldScreenFactory 5 | import com.atlassian.jira.issue.fields.screen.FieldScreenManager 6 | import com.atlassian.jira.issue.fields.screen.FieldScreenSchemeManager 7 | import com.atlassian.jira.web.action.admin.issuefields.screens.ViewFieldScreens 8 | import com.atlassian.jira.workflow.WorkflowManager 9 | 10 | FieldScreenManager fieldScreenManager = ComponentAccessor.getFieldScreenManager() 11 | FieldScreenFactory fieldScreenFactory = ComponentAccessor.getComponent(FieldScreenFactory.class) 12 | FieldScreenSchemeManager fieldScreenSchemeManager = ComponentAccessor.getComponent(FieldScreenSchemeManager.class) 13 | WorkflowManager workflowManager = ComponentAccessor.getWorkflowManager() 14 | ViewFieldScreens viewFieldScreens = new ViewFieldScreens(fieldScreenManager, fieldScreenFactory, fieldScreenSchemeManager, workflowManager) 15 | 16 | def sb = new StringBuilder() 17 | 18 | for (def fieldScreen : fieldScreenManager.getFieldScreens()) { 19 | if (viewFieldScreens.isDeletable(fieldScreen)) { 20 | if (!isPreview) { 21 | sb.append("Deleleting unused screen: ${fieldScreen.getName()}
") 22 | fieldScreenManager.removeFieldScreen(fieldScreen.getId()) 23 | } else { 24 | sb.append("Unused screen: ${fieldScreen.getName()}
") 25 | } 26 | } 27 | } 28 | 29 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/workflow/01_workflow_schemes_cleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script investigate workflows schemes 4 | Additional: 5 | This script can be run from Jira: 6 | -> Administration -> Scriptrunner -> Console 7 | or 8 | -> Administration -> Apps -> Script Console 9 | Tested Environment: Jira 8.20.x, 9.4.x 10 | Contribution: Gonchik Tsymzhitov 11 | */ 12 | 13 | import com.atlassian.jira.component.ComponentAccessor 14 | import org.apache.log4j.Logger 15 | import org.apache.log4j.Level 16 | 17 | def log = Logger.getLogger("com.gonchik.scripts.groovy.workflowSchemesCleaner") 18 | log.setLevel(Level.DEBUG) 19 | 20 | def workflowManager = ComponentAccessor.workflowManager 21 | def schemeManager = ComponentAccessor.workflowSchemeManager 22 | def sb = new StringBuilder() 23 | sb.append("Start to detect unused workflow schemes
") 24 | if (isPreview) { 25 | sb.append("Please, note it works as preview. For execute change variable isPreview = true

\n") 26 | } else { 27 | sb.append("Please, note it works in execute mode

\n") 28 | } 29 | // Review workflow schemes 30 | schemeManager.schemeObjects.each { 31 | try { 32 | if (schemeManager.getProjectsUsing(schemeManager.getWorkflowSchemeObj(it.id)).size() == 0) { 33 | sb.append("Workflow scheme remove candidate: ${it.name}
\n") 34 | if (!isPreview) { 35 | schemeManager.deleteScheme(it.id) 36 | } 37 | } 38 | } 39 | catch (Exception e) { 40 | log.error('Something wrong, ' + e) 41 | sb.append("Error: " + e + "
\n") 42 | } 43 | } 44 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/issues/workflow/02_workflow_cleaner.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script investigate the workflows 4 | Purpose: Remove all Inactive or Drafts Workflows in Jira 5 | Additional: 6 | This script can be run from Jira: 7 | -> Administration -> Scriptrunner -> Console 8 | or 9 | -> Administration -> Apps -> Script Console 10 | Tested Environment: Jira 8.20.x, 9.4.x 11 | Contribution: Gonchik Tsymzhitov 12 | */ 13 | 14 | import com.atlassian.jira.component.ComponentAccessor 15 | import org.apache.log4j.Logger 16 | import org.apache.log4j.Level 17 | 18 | def log = Logger.getLogger("com.gonchik.scripts.groovy.deleteUnUsedWorkflowsAndWorkflowSchemes") 19 | log.setLevel(Level.DEBUG) 20 | 21 | def workflowManager = ComponentAccessor.workflowManager 22 | def schemeManager = ComponentAccessor.workflowSchemeManager 23 | def sb = new StringBuilder() 24 | 25 | sb.append("Start to look unused workflows
") 26 | if (isPreview) { 27 | sb.append("Please, note it works as preview.
\n For execute change variable isPreview = true


\n") 28 | } else { 29 | sb.append("Please, note it works in execute mode

\n") 30 | } 31 | // review Workflows 32 | workflowManager.workflows.each { 33 | if (!it.systemWorkflow) { 34 | def schemes = schemeManager.getSchemesForWorkflow(it) 35 | if (schemes.size() == 0) { 36 | sb.append("Workflow remove candidate: ${it.name}
\n") 37 | if (!isPreview) { 38 | try { 39 | workflowManager.deleteWorkflow(it) 40 | } catch (Exception e) { 41 | } 42 | } 43 | } 44 | } 45 | } 46 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/links/delete_remote_issue_links.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.atlassian.jira.issue.IssueManager 3 | import com.atlassian.jira.issue.Issue 4 | import com.atlassian.jira.user.ApplicationUser 5 | import com.atlassian.jira.user.ApplicationUsers 6 | import com.atlassian.crowd.embedded.api.User 7 | import com.atlassian.jira.issue.link.RemoteIssueLinkManager 8 | 9 | def deleteRemoteIssueLinks(Issue issue) { 10 | def applicationUser = ComponentAccessor.jiraAuthenticationContext.user 11 | RemoteIssueLinkManager remoteIssueLinkManager = ComponentAccessor.getComponent(RemoteIssueLinkManager) 12 | def remoteIssueLinks = remoteIssueLinkManager.getRemoteIssueLinksForIssue(issue) 13 | remoteIssueLinks.each { remoteIssueLink -> 14 | remoteIssueLinkManager.removeRemoteIssueLink(remoteIssueLink.id, applicationUser) 15 | } 16 | remoteIssueLinks.size() 17 | } 18 | 19 | String issueKey = 'GRAILS-166' 20 | IssueManager issueManager = ComponentAccessor.issueManager 21 | def issue = issueManager.getIssueObject(issueKey) 22 | if(issue) { 23 | def deleteCount = deleteRemoteIssueLinks(issue) 24 | return "deleted ${deleteCount} links." 25 | } else { 26 | return "issue not found." 27 | } -------------------------------------------------------------------------------- /groovy/jira/mail/reviewMailQueue.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.atlassian.mail.queue.MailQueue 3 | import com.atlassian.jira.mail.SubscriptionSingleRecepientMailQueueItem 4 | import com.atlassian.jira.web.action.admin.MailQueueAdmin 5 | import com.atlassian.jira.notification.NotificationSchemeManager 6 | 7 | import org.apache.log4j.Logger 8 | import org.apache.log4j.Level 9 | 10 | 11 | final def log = Logger.getLogger("com.gonchik.scripts.groovy.mail.queue") 12 | log.setLevel(Level.DEBUG) 13 | 14 | def mailQueue = ComponentAccessor.getMailQueue() 15 | boolean checkIsSending = mailQueue.isSending() 16 | 17 | def items = mailQueue.getQueue() 18 | def errorQueue = mailQueue.getErrorQueue() 19 | def errorItems = [] 20 | errorItems.addAll(errorQueue) 21 | errorItems.each { item -> 22 | log.warn "$item.dateQueued - $item.subject" 23 | } 24 | // remove item from queue 25 | mailQueue.emptyErrorQueue() 26 | mailQueue.unstickQueue() 27 | 28 | /* 29 | items.each { 30 | item -> log.warn "$item.dateQueued - $item.subject" 31 | } 32 | */ 33 | 34 | /* 35 | for (def item in items) { 36 | if (!item.subject) { 37 | log.warn item 38 | } 39 | if (item instanceof SubscriptionSingleRecepientMailQueueItem){ 40 | log.warn item 41 | } 42 | log.warn item 43 | } 44 | 45 | */ -------------------------------------------------------------------------------- /groovy/jira/troubleshooting/comment/deleteCommentForIssues.groovy: -------------------------------------------------------------------------------- 1 | /* This script works without notification and as a service */ 2 | 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.issue.CustomFieldManager 5 | import com.atlassian.jira.issue.fields.CustomField 6 | import com.atlassian.jira.issue.IssueManager 7 | import com.atlassian.jira.issue.Issue 8 | import com.atlassian.jira.issue.MutableIssue 9 | import org.apache.log4j.Logger 10 | import org.apache.log4j.Level 11 | import com.atlassian.jira.event.type.* 12 | import com.atlassian.jira.bc.issue.search.SearchService 13 | import com.atlassian.jira.bc.issue.search.SearchService.ParseResult 14 | import com.atlassian.jira.web.bean.PagerFilter 15 | import com.atlassian.jira.issue.customfields.manager.OptionsManager 16 | import com.atlassian.jira.issue.comments.CommentManager 17 | import com.atlassian.jira.issue.comments.Comment 18 | 19 | 20 | def log = Logger.getLogger("com.gonchik.scripts.groovy.setAccountValueFromCustomerAccount") 21 | log.setLevel(Level.DEBUG) 22 | 23 | String jqlSearch = 'key=SDK-154141' 24 | def user = ComponentAccessor.getJiraAuthenticationContext().getLoggedInUser() 25 | IssueManager issueManager = ComponentAccessor.getIssueManager() 26 | SearchService searchService = ComponentAccessor.getComponent(SearchService.class) 27 | SearchService.ParseResult parseResult = searchService.parseQuery(user, jqlSearch) 28 | CommentManager commentManager = ComponentAccessor.commentManager 29 | 30 | if (parseResult.isValid()) { 31 | def searchResult = searchService.search(user, parseResult.getQuery(), PagerFilter.getUnlimitedFilter()) 32 | def issues = searchResult.results.collect { issueManager.getIssueObject(it.id) } 33 | for (issue in issues) { 34 | commentManager.deleteCommentsForIssue(issue); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /groovy/jira/troubleshooting/jira_deindexer_default.groovy: -------------------------------------------------------------------------------- 1 | import com.atlassian.jira.component.ComponentAccessor 2 | import com.atlassian.jira.issue.Issue 3 | import com.atlassian.jira.issue.index.DefaultIndexManager 4 | import com.atlassian.jira.issue.IssueManager 5 | import org.slf4j.LoggerFactory 6 | import org.slf4j.Logger 7 | 8 | final Logger log = LoggerFactory.getLogger(this.getClass()) 9 | 10 | 11 | def issueId = 988672 12 | 13 | 14 | final DefaultIndexManager defaultIndexManager = ComponentAccessor.getComponentOfType(DefaultIndexManager.class); 15 | final IssueManager issueManager = ComponentAccessor.getIssueManager() 16 | 17 | 18 | Issue targetIssue = issueManager.getIssueObject(issueId); 19 | if (!targetIssue) {return;} 20 | log.info(">>> Issue deindex started."); 21 | defaultIndexManager.deIndex(targetIssue); 22 | log.info(">>> Issue deindex completed successfully!"); -------------------------------------------------------------------------------- /groovy/jira/troubleshooting/jira_enable_upload_button.groovy: -------------------------------------------------------------------------------- 1 | /* 2 | Enable plugin upload by default in Jira 8.12.4 and later 3 | https://jira.atlassian.com/browse/JRASERVER-77129 4 | */ 5 | import com.onresolve.osgi.AllBundlesApplicationContext 6 | import com.onresolve.scriptrunner.runner.ScriptRunnerImpl 7 | 8 | def allBundlesApplicationContext = ScriptRunnerImpl.scriptRunner.getBean(AllBundlesApplicationContext) 9 | def policyEnforcer = allBundlesApplicationContext.getBean( 10 | 'com.atlassian.upm.atlassian-universal-plugin-manager-plugin', 'policyEnforcer' 11 | ) 12 | 13 | policyEnforcer.getClass().declaredFields.find { 14 | it.name == 'pluginUploadEnabled' 15 | }.with { 16 | setAccessible(true) 17 | setBoolean(policyEnforcer, true) 18 | } 19 | -------------------------------------------------------------------------------- /groovy/jira/troubleshooting/jira_index_db_tickets_count_checker.groovy: -------------------------------------------------------------------------------- 1 | /* This script do reindexing the exact tickets unresolved */ 2 | 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.issue.IssueManager 5 | import com.atlassian.jira.issue.Issue 6 | import com.atlassian.jira.issue.MutableIssue 7 | import org.ofbiz.core.entity.DelegatorInterface 8 | import org.apache.log4j.Logger 9 | import org.apache.log4j.Level 10 | import org.ofbiz.core.entity.ConnectionFactory 11 | import groovy.sql.Sql 12 | import java.sql.Connection 13 | import com.atlassian.jira.util.ImportUtils 14 | import com.atlassian.jira.issue.index.IssueIndexingService 15 | 16 | 17 | def log = Logger.getLogger("com.gonchik.scripts.groovy.updateExactIndexOfTickets") 18 | log.setLevel(Level.DEBUG) 19 | 20 | def user = ComponentAccessor.getJiraAuthenticationContext().getLoggedInUser() 21 | IssueManager issueManager = ComponentAccessor.getIssueManager() 22 | def issueIndexingService = ComponentAccessor.getComponent(IssueIndexingService.class) 23 | def projectManager = ComponentAccessor.getProjectManager() 24 | def project = projectManager.getProjectObjByKey("SALE") 25 | long countInProject = issueManager.getIssueCountForProject(project.id) 26 | log.debug countInProject 27 | 28 | 29 | 30 | 31 | 32 | def getUnresolvedTickets(from, to) { 33 | def delegator = (DelegatorInterface) ComponentAccessor.getComponent(DelegatorInterface) 34 | String helperName = delegator.getGroupHelperName("default") 35 | Connection conn = ConnectionFactory.getConnection(helperName) 36 | Sql sql = new Sql(conn) 37 | def values = [] 38 | try { 39 | String s = "SELECT j.id " + 40 | "FROM jiraissue j " + 41 | "WHERE resolution is null and created > '${from}' and created < '${to}' " 42 | values = sql.rows(s) 43 | } finally { 44 | sql.close() 45 | } 46 | return values; 47 | } -------------------------------------------------------------------------------- /groovy/jira/users/cleanUserHistoryLastView.groovy: -------------------------------------------------------------------------------- 1 | def isPreview = true 2 | /* 3 | That script clean last view activity, 4 | just in case it was related to the performance degradation in Jira Service Desk and Jira Software for the 10k+ 5 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 6 | Tested Environment: Jira 8.20.5, 8.13.3 7 | Contribution: Gonchik Tsymzhitov 8 | */ 9 | import com.atlassian.jira.user.ApplicationUser; 10 | import com.atlassian.jira.user.UserHistoryItem; 11 | import com.atlassian.jira.user.UserHistoryManager; 12 | import com.atlassian.jira.component.ComponentAccessor; 13 | import com.atlassian.jira.user.UserHistoryItem.*; 14 | import com.atlassian.jira.bc.user.search.UserSearchService 15 | import com.atlassian.jira.bc.user.search.UserSearchParams 16 | import com.atlassian.jira.user.ApplicationUser 17 | import org.apache.log4j.Logger 18 | import org.apache.log4j.Level 19 | 20 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupLastViewHistoryForInactiveUsers") 21 | log.setLevel(Level.DEBUG) 22 | 23 | boolean cleanForActiveUsers = false 24 | // This script shows how to clean up the history items from inactive users 25 | UserSearchService userSearchService = ComponentAccessor.getComponent(UserSearchService.class) 26 | UserSearchParams userSearchParams = (new UserSearchParams.Builder()).allowEmptyQuery(true).includeActive(cleanForActiveUsers).includeInactive(true).maxResults(100000).build() 27 | def userHistoryManager = ComponentAccessor.getComponent(UserHistoryManager.class) 28 | def sb = new StringBuilder() 29 | for (ApplicationUser appUser : userSearchService.findUsers("", userSearchParams)) { 30 | List recentUserHistory = userHistoryManager.getHistory(UserHistoryItem.ASSIGNEE, appUser); 31 | if (!isPreview){ 32 | userHistoryManager.removeHistoryForUser(appUser) 33 | } 34 | sb.append("${appUser.name}
\n") 35 | } 36 | return sb.toString() 37 | 38 | -------------------------------------------------------------------------------- /groovy/jira/users/cleanupVotesForInactiveUsers.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | Clean up the votes from inactive users 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.5, 8.13.3, 9.12.5 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.bc.user.search.UserSearchService 10 | import com.atlassian.jira.bc.user.search.UserSearchParams 11 | import com.atlassian.jira.user.ApplicationUser 12 | import com.atlassian.jira.issue.vote.VoteManager 13 | import org.apache.log4j.Logger 14 | import org.apache.log4j.Level 15 | 16 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupVotesForInactiveUsers") 17 | log.setLevel(Level.DEBUG) 18 | 19 | // This script shows how to clean up the votes from inactive users 20 | UserSearchService userSearchService = ComponentAccessor.getComponent(UserSearchService.class) 21 | UserSearchParams userSearchParams = (new UserSearchParams.Builder()).allowEmptyQuery(true).includeActive(false).includeInactive(true).maxResults(100000).build() 22 | VoteManager voteManager = ComponentAccessor.getComponent(VoteManager.class) 23 | def sb = new StringBuilder() 24 | for (ApplicationUser appUser : userSearchService.findUsers("", userSearchParams)) { 25 | ApplicationUser userToRemove = appUser 26 | if (!isPreview) { 27 | voteManager.removeVotesForUser(userToRemove) 28 | } 29 | sb.append("${userToRemove.name}
\n") 30 | } 31 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/users/disableAutoWatchForUsers.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = false 2 | // remove inactive users from role 3 | import com.atlassian.jira.component.ComponentAccessor 4 | import com.atlassian.jira.bc.user.search.UserSearchService 5 | import com.atlassian.jira.bc.user.search.UserSearchParams 6 | import com.atlassian.jira.user.ApplicationUser 7 | import com.atlassian.jira.util.SimpleErrorCollection 8 | import com.atlassian.jira.bc.projectroles.ProjectRoleService 9 | import com.atlassian.jira.project.Project 10 | import org.apache.log4j.Logger 11 | import org.apache.log4j.Level 12 | import com.atlassian.jira.user.preferences.UserPreferencesManager 13 | import com.atlassian.core.user.preferences.Preferences 14 | import com.atlassian.jira.user.preferences.PreferenceKeys 15 | import com.atlassian.jira.security.groups.GroupManager 16 | 17 | def log = Logger.getLogger("com.gonchik.scripts.groovy.deleteAutoWatchPreferencesForExternalUsers") 18 | log.setLevel(Level.DEBUG) 19 | 20 | 21 | ComponentAccessor.getGroupManager().getUsersInGroup('external-users').each{ 22 | ApplicationUser user = it 23 | UserPreferencesManager userPreferencesManager = ComponentAccessor.getUserPreferencesManager(); 24 | Preferences preferences = userPreferencesManager.getExtendedPreferences(user); 25 | try { 26 | if(!isPreview){ 27 | preferences.setBoolean(PreferenceKeys.USER_AUTOWATCH_DISABLED, true); 28 | } 29 | } catch (Exception e) { 30 | log.error(e); 31 | } 32 | } -------------------------------------------------------------------------------- /groovy/jira/users/userCleanRecentHistoryForExactUser.groovy: -------------------------------------------------------------------------------- 1 | def isPreview = true 2 | /* 3 | That script clean last view activity, 4 | just in case it was related to the performance degradation in Jira Service Desk and Jira Software for the 10k+ 5 | */ 6 | import com.atlassian.jira.user.ApplicationUser; 7 | import com.atlassian.jira.user.UserHistoryItem; 8 | import com.atlassian.jira.user.UserHistoryManager; 9 | import com.atlassian.jira.component.ComponentAccessor; 10 | import com.atlassian.jira.user.UserHistoryItem.*; 11 | import com.atlassian.jira.bc.user.search.UserSearchService 12 | import com.atlassian.jira.bc.user.search.UserSearchParams 13 | import com.atlassian.jira.user.ApplicationUser 14 | import org.apache.log4j.Logger 15 | import org.apache.log4j.Level 16 | 17 | def USERNAME = "test.user" 18 | 19 | def log = Logger.getLogger("com.gonchik.scripts.groovy.cleanupLastViewHistoryForUser") 20 | log.setLevel(Level.DEBUG) 21 | 22 | 23 | // This script shows how to clean up the history items from inactive users 24 | def userHistoryManager = ComponentAccessor.getComponent(UserHistoryManager.class) 25 | 26 | def appUser = ComponentAccessor.getUserManager().getUserByName(USERNAME) 27 | def sb = new StringBuilder() 28 | if (appUser) { 29 | List recentUserHistory = userHistoryManager.getHistory(UserHistoryItem.ASSIGNEE, appUser); 30 | if (!isPreview) { 31 | userHistoryManager.removeHistoryForUser(appUser) 32 | } 33 | sb.append("${appUser.name}
\n") 34 | } 35 | return sb.toString() 36 | 37 | -------------------------------------------------------------------------------- /groovy/jira/users/userCleanupAllSubscriptionsForInActiveUsers.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | This script do cleanup of subscription for inactive users 4 | Purpose: reduce extra checking during send subscriptions 5 | Link: https://confluence.atlassian.com/jirakb/keep-receiving-subscription-emails-from-deleted-filters-314450232.html 6 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 7 | Contribution: Gonchik Tsymzhitov 8 | */ 9 | 10 | import com.atlassian.jira.bc.JiraServiceContextImpl 11 | import com.atlassian.jira.bc.JiraServiceContext 12 | import com.atlassian.jira.bc.filter.SearchRequestService 13 | import com.atlassian.jira.component.ComponentAccessor 14 | import com.atlassian.jira.user.ApplicationUser 15 | import com.atlassian.jira.bc.user.search.UserSearchService 16 | import com.atlassian.jira.bc.user.search.UserSearchParams 17 | import com.atlassian.jira.issue.subscription.SubscriptionManager 18 | import org.apache.log4j.Logger 19 | import org.apache.log4j.Level 20 | 21 | 22 | def log = Logger.getLogger("com.gonchik.scripts.groovy.userCleanupAllSubscriptionsForInActiveUsers") 23 | log.setLevel(Level.DEBUG) 24 | 25 | UserSearchService userSearchService = ComponentAccessor.getOSGiComponentInstanceOfType(UserSearchService.class) 26 | UserSearchParams userSearchParams = (new UserSearchParams.Builder()).allowEmptyQuery(true).includeActive(false).includeInactive(true).maxResults(100000).build() 27 | def subscriptionManager = ComponentAccessor.getOSGiComponentInstanceOfType(SubscriptionManager.class) 28 | 29 | for (ApplicationUser appUser : userSearchService.findUsers("", userSearchParams)) { 30 | log.debug("Cleaning subscriptions for ${appUser.name}") 31 | subscriptionManager.deleteSubscriptionsForUser(appUser) 32 | } 33 | log.debug("Cleaned up not needed subscriptions") -------------------------------------------------------------------------------- /groovy/jira/users/userCleanupStopWatchingInactiveUsers.groovy: -------------------------------------------------------------------------------- 1 | boolean isPreview = true 2 | /* 3 | Clean up the inactive watchers 4 | Additional: This script can be run from Jira -> Administration -> Add-ons -> Script Console 5 | Tested Environment: Jira 8.20.5, 8.13.3 6 | Contribution: Gonchik Tsymzhitov 7 | */ 8 | import com.atlassian.jira.component.ComponentAccessor 9 | import com.atlassian.jira.bc.user.search.UserSearchService 10 | import com.atlassian.jira.bc.user.search.UserSearchParams 11 | import com.atlassian.jira.user.ApplicationUser 12 | import com.atlassian.jira.issue.watchers.WatcherManager 13 | import org.apache.log4j.Logger 14 | import org.apache.log4j.Level 15 | 16 | def log = Logger.getLogger("com.gonchik.scripts.groovy.userCleanupStopWatchingInactiveUsers") 17 | log.setLevel(Level.DEBUG) 18 | 19 | // this script shows how to clean up the inactive watchers 20 | UserSearchService userSearchService = ComponentAccessor.getComponent(UserSearchService.class) 21 | UserSearchParams userSearchParams = (new UserSearchParams.Builder()).allowEmptyQuery(true).includeActive(false).includeInactive(true).maxResults(100000).build() 22 | WatcherManager watcherManager = ComponentAccessor.getComponent(WatcherManager.class) 23 | 24 | def sb = new StringBuilder() 25 | for (ApplicationUser appUser : userSearchService.findUsers("", userSearchParams)) { 26 | ApplicationUser userToRemove = appUser 27 | if (!isPreview) { 28 | watcherManager.removeAllWatchesForUser(userToRemove) 29 | } 30 | sb.append("${userToRemove.name}
\n") 31 | } 32 | 33 | return sb.toString() -------------------------------------------------------------------------------- /groovy/jira/various/removeAllAttachmentsForTicket.groovy: -------------------------------------------------------------------------------- 1 | /* 2 | Remove all attachments for exact ticket 3 | */ 4 | 5 | import com.atlassian.jira.component.ComponentAccessor 6 | import com.atlassian.jira.issue.Issue 7 | import org.apache.log4j.Logger 8 | import com.atlassian.jira.issue.IssueManager 9 | 10 | import com.atlassian.jira.issue.AttachmentManager 11 | 12 | 13 | IssueManager im = ComponentAccessor.getIssueManager() 14 | def issue = im.getIssueObject('FM-773') 15 | 16 | AttachmentManager atm = ComponentAccessor.getAttachmentManager() 17 | def atms = atm.getAttachments(issue) 18 | atms.each { 19 | it -> atm.deleteAttachment(it) 20 | } -------------------------------------------------------------------------------- /sh/adjust_language_pack_of_jira.sh: -------------------------------------------------------------------------------- 1 | su jira 2 | echo "Adjust existing bundle of translation" 3 | cd /opt/atlassian/jira/atlassian-jira/WEB-INF/atlassian-bundled-plugins/ 4 | mkdir unpack 5 | cd unpack 6 | /usr/java/latest/bin/jar xvf ../jira-core-language-pack-ru_RU-9.8.0.v20230228123141.jar 7 | vim com/atlassian/jira/web/action/JiraWebActionSupport_ru_RU.properties 8 | /usr/java/latest/bin/jar cf ../jira-core-language-pack-ru_RU-9.8.0.v20230228123141.jar * 9 | cd .. 10 | rm -rf unpack/ 11 | -------------------------------------------------------------------------------- /sh/attachment_migration.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gonchik/cleanup-scripts/cf74d762d6cfde73635048253e2f63b1d7111ac2/sh/attachment_migration.sh -------------------------------------------------------------------------------- /sh/backup_confluence.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "That's used only for schematic backup for creating test env" 3 | echo "Just query to make a copy faster as possible" 4 | # just one time script to share 5 | CONFLUENCE_HOME="/var/atlassian/application-data/confluence" 6 | CONFLUENCE_INSTALL="/opt/atlassian/confluence/" 7 | cd ${CONFLUENCE_INSTALL} 8 | tar --exclude="logs" --exclude="temp" --exclude="work" -czvf confluence_install.tgz ${CONFLUENCE_INSTALL} 9 | CONFLUENCE_HOME="/var/atlassian/application-data/confluence" 10 | cd ${CONFLUENCE_HOME} 11 | tar -czvf confluence_home.tgz --exclude "shared-home" --exclude "logs" --exclude="analytics-logs" --exclude="temp" --exclude="webresource-temp" --exclude="backups" --exclude="journal/" --exclude="index" --exclude="viewfile" --exclude="sandbox" --exclude="plugins-cache" --exclude="plugins-osgi-cache" --exclude="thumbnails" --exclude="imgEffects" --exclude="log" --exclude="restore" --exclude="recovery" --exclude="bundled-plugins" --exclude="plugins-temp" ${CONFLUENCE_HOME} 12 | 13 | echo "DB activity" 14 | su postges 15 | pg_dump -d confluencedb -f backup_file.sql 16 | -------------------------------------------------------------------------------- /sh/changeOrder.sh: -------------------------------------------------------------------------------- 1 | curl 'https://jira.example.io/secure/admin/StatusUp.jspa?up=§' \ 2 | -H 'authority: jira.example.io' \ 3 | -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' \ 4 | -H 'accept-language: en,ru-RU;q=0.9,ru;q=0.8' \ 5 | -H 'cookie: '\ 6 | -H 'dnt: 1' \ 7 | -H 'referer: https://jira.example.io/secure/admin/ViewStatuses.jspa' \ 8 | -H 'sec-ch-ua-platform: "macOS"' \ 9 | -H 'sec-fetch-dest: document' \ 10 | -H 'sec-fetch-mode: navigate' \ 11 | -H 'sec-fetch-site: same-origin' \ 12 | -H 'sec-fetch-user: ?1' \ 13 | -H 'upgrade-insecure-requests: 1' \ 14 | -H 'user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36' \ 15 | --compressed 16 | -------------------------------------------------------------------------------- /sh/db_active_checker.sh: -------------------------------------------------------------------------------- 1 | # check number active sessions 2 | cat atlassian-jira-perf.log* | egrep 'dbcp.numActive","value":"[0-9]*"' | sed -E 's/,[0-9]{3}.*dbcp.numActive",/ /g' | sed -E 's/\},\{.*$//g' | sed 's/"value"://g' | sed 's/"//g' | cut -d" " -f-3 |sort |awk '{if ($3 >20) {print $1" "$2" "$3 } }' 3 | 4 | # check number of active HTTP sessions 5 | cat atlassian-jira-perf.log* | egrep 'http.sessions","value":"[0-9]*"' | sed -E 's/,[0-9]{3}.*http.sessions",/ /g' | sed -E 's/\},\{.*$//g' | sed 's/"value"://g' | sed 's/"//g' | cut -d" " -f-3 |sort |awk '{if ($3 >1300) {print $1" "$2" "$3 } }' 6 | -------------------------------------------------------------------------------- /sh/dd_dsync_checker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Disk performance check with dd dsync (synchronized I/O for data)" 3 | # Small files (64KB) 4 | dd if=/dev/zero of=./dsync64KB.img bs=64 count=1000 oflag=dsync 2>&1|tee dsync-test-1-64KB.log 5 | 6 | # Small files (512KB) 7 | dd if=/dev/zero of=./dsync64KB.img bs=512 count=1000 oflag=dsync 2>&1|tee dsync-test-1-512KB.log 8 | 9 | # Small files (1024KB) 10 | dd if=/dev/zero of=./dsync64KB.img bs=1024 count=1000 oflag=dsync 2>&1|tee dsync-test-1-1024KB.log 11 | 12 | # Medium files (8MB) 13 | dd if=/dev/zero of=./dsync8MB.img bs=8k count=1000 oflag=dsync 2>&1|tee dsync-test-2-8MB.log 14 | 15 | # Big files (128MB) 16 | dd if=/dev/zero of=./dsync128MB.img bs=128k count=1000 oflag=dsync 2>&1|tee dsync-test-3-128MB.log 17 | 18 | for log in dsync*.log; do echo "## $log ##"; cat $log; done 19 | 20 | echo "Removing dsync*.img files..." 21 | find . -name "dsync*.img" -delete 22 | 23 | echo "More info you can bring from https://cwiki.apache.org/confluence/display/lucene/ImproveIndexingSpeed" -------------------------------------------------------------------------------- /sh/macos/zsh_macos_cleanupup_account_policy_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the list of all user accounts 4 | users=$(dscl . list /Users) 5 | 6 | # Loop through each user account and delete the accountPolicyData 7 | for user in $users; do 8 | # Get the user ID for the current user 9 | userID=$(id -u $user 2>/dev/null) 10 | 11 | # Skip root, service accounts, and users with UID less than 500 12 | if [ "$user" != "root" ] && [ "$userID" -ge 500 ]; then 13 | echo "Deleting accountPolicyData for user: $user" 14 | sudo dscl . deletepl /Users/$user accountPolicyData history 15 | else 16 | echo "Skipping system/service account: $user" 17 | fi 18 | done 19 | 20 | echo "AccountPolicyData cleanup completed for regular users." -------------------------------------------------------------------------------- /sh/upgrade_bamboo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Main variables 4 | NEW_RELEASE=atlassian-bamboo-10.2.3 5 | OLD_RELEASE=atlassian-bamboo-9.6.0 6 | APP_USER=bamboo 7 | APP_INSTALL_DIR=/opt/atlassian/bamboo 8 | APP_HOME=/var/atlassian/application-data/bamboo 9 | 10 | 11 | cd ${APP_INSTALL_DIR} || exit 12 | echo "Downloading installation from https://www.atlassian.com/software/bamboo/download-archives" 13 | wget -c https://www.atlassian.com/software/bamboo/downloads/binary/atlassian-bamboo-10.2.3.tar.gz 14 | tar -xzvf atlassian-bamboo-10.2.3.tar.gz 15 | echo 'Download jdk 17 if you need https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.15%2B6/OpenJDK17U-jdk_x64_linux_hotspot_17.0.15_6.tar.gz' 16 | 17 | 18 | echo "Copying JRE" 19 | cp -rf {${OLD_RELEASE},${NEW_RELEASE}}/jre 20 | echo 'Rewrite setenv.sh file' 21 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/bin/setenv.sh 22 | echo 'Rewrite server.xml' 23 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/conf/server.xml 24 | 25 | echo "Rewrite home location properties" 26 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/atlassian-bamboo/WEB-INF/classes/bamboo-init.properties 27 | 28 | 29 | chown -R ${APP_USER}: ${NEW_RELEASE}/ 30 | 31 | systemctl stop bamboo 32 | unlink current 33 | ln -s ${NEW_RELEASE} current 34 | 35 | echo "Cleaning lock files" 36 | rm -rf ${APP_HOME}/bamboo.lock 37 | echo "Cleaning old logs from old installation" 38 | rm -f ${OLD_RELEASE}/logs/* 39 | rm -rf ${OLD_RELEASE}/temp/* 40 | 41 | echo "Cleaning old caches" 42 | rm -rf ${APP_HOME}/caches/* 43 | 44 | systemctl restart bamboo 45 | 46 | # Checking logs 47 | # tail -f ${APP_INSTALL_DIR}/${NEW_RELEASE}/logs/catalina.out 48 | # tail -f ${APP_HOME}/logs/atlassian-bamboo.log 49 | -------------------------------------------------------------------------------- /sh/upgrade_bitbucket.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Main variables 4 | NEW_RELEASE=atlassian-bitbucket-9.4.5 5 | OLD_RELEASE=atlassian-bitbucket-7.17.9 6 | APP_USER=atlbitbucket 7 | APP_INSTALL_DIR=/opt/atlassian/bitbucket 8 | APP_HOME=/var/atlassian/application-data/bitbucket 9 | 10 | 11 | cd ${APP_INSTALL_DIR} || exit 12 | echo "Downloading installation from https://www.atlassian.com/software/bitbucket/download-archives" 13 | wget -c https://www.atlassian.com/software/stash/downloads/binary/atlassian-bitbucket-9.4.5.tar.gz 14 | tar -xzvf atlassian-bitbucket-9.4.5.tar.gz 15 | echo 'Download jdk 17 if you need https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.15%2B6/OpenJDK17U-jdk_x64_linux_hotspot_17.0.15_6.tar.gz' 16 | 17 | 18 | echo "Copying JRE location" 19 | cp -rf {${OLD_RELEASE},${NEW_RELEASE}}/bin/set-jre-home.sh 20 | echo "Copy customized Bitbucket Home location" 21 | cp -rf {${OLD_RELEASE},${NEW_RELEASE}}/bin/set-bitbucket-home.sh 22 | echo 'Rewrite setenv.sh file' 23 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/bin/_start-webapp.sh 24 | 25 | 26 | chown -R ${APP_USER}: ${NEW_RELEASE}/ 27 | 28 | systemctl stop bitbucket 29 | unlink current 30 | ln -s ${NEW_RELEASE} current 31 | 32 | echo "Cleaning lock files" 33 | rm -f ${APP_HOME}/bitbucket.lock 34 | rm -f ${APP_HOME}/shared/.lock 35 | rm -f ${APP_HOME}/plugins/.osgi-plugins 36 | 37 | systemctl restart bitbucket 38 | 39 | # Checking logs 40 | # tail -f ${APP_HOME}/logs/atlassian-bitbucket.log 41 | -------------------------------------------------------------------------------- /sh/upgrade_confluence.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Main variables 4 | NEW_RELEASE=atlassian-confluence-9.2.3 5 | OLD_RELEASE=atlassian-confluence-8.5.11 6 | APP_USER=confluence 7 | APP_HOME=/var/atlassian/application-data/confluence/ 8 | APP_INSTALL_DIR=/opt/atlassian/confluence 9 | 10 | cd ${APP_INSTALL_DIR} || exit 11 | 12 | wget -c https://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-9.2.3.tar.gz 13 | tar -xzvf atlassian-confluence-9.2.3.tar.gz 14 | 15 | 16 | 17 | 18 | echo "Copying JRE" 19 | if [ -d "${OLD_RELEASE}/jre" ]; then 20 | cp -rf {${OLD_RELEASE},${NEW_RELEASE}}/jre 21 | else 22 | echo "The directory ${OLD_RELEASE}/jre does not exist." 23 | fi 24 | 25 | echo 'Rewrite setenv.sh file' 26 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/bin/setenv.sh 27 | echo 'Rewrite server.xml' 28 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/conf/server.xml 29 | echo "Rewrite home location properties" 30 | yes | cp {${OLD_RELEASE},${NEW_RELEASE}}/confluence/WEB-INF/classes/confluence-init.properties 31 | 32 | chown -R ${APP_USER}: ${NEW_RELEASE}/ 33 | 34 | systemctl stop confluence 35 | unlink current 36 | ln -s ${NEW_RELEASE} current 37 | 38 | echo "Cleaning plugin cache" 39 | rm -rf ${APP_HOME}/plugins-osgi-cache/* 40 | rm -rf ${APP_HOME}/plugins-cache/* 41 | rm -f ${APP_HOME}/lock 42 | 43 | echo "Clean old logs from old installation" 44 | rm -f ${OLD_RELEASE}/logs/* 45 | rm -rf ${OLD_RELEASE}/temp/* 46 | 47 | systemctl restart confluence 48 | 49 | # Checking logs 50 | # tail -f ${NEW_RELEASE}/logs/catalina.out 51 | # tail -f ${APP_HOME}/logs/atlassian-confluence.log 52 | -------------------------------------------------------------------------------- /sql/clickhouse/clickhouse_size_databases.sql: -------------------------------------------------------------------------------- 1 | SELECT concat(database, '.', table) AS table, 2 | formatReadableSize(sum(bytes)) AS compressed_size, 3 | formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size, 4 | sum(rows) AS rows, 5 | max(modification_time) AS latest_modification, 6 | sum(bytes) AS bytes_size, 7 | sum(data_uncompressed_bytes) AS uncompressed_bytes_size, 8 | any(engine) AS engine, 9 | formatReadableSize(sum(primary_key_bytes_in_memory)) AS primary_keys_size 10 | FROM system.parts 11 | WHERE active 12 | GROUP BY database, table 13 | ORDER BY bytes_size DESC; 14 | 15 | -------------------------------------------------------------------------------- /sql/clickhouse/clickhouse_size_rows_of_table.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get compressed, uncompressed, rows of tables 3 | i.e. sandbox.gonchik 4 | */ 5 | SELECT database, 6 | table, 7 | formatReadableSize(size) as size, 8 | formatReadableSize(dub) as size_uncompressed, 9 | formatReadableQuantity(rows) as rows 10 | FROM ( 11 | SELECT table, 12 | sum(bytes) AS size, 13 | sum(bytes_on_disk) AS size_ondisk, 14 | sum(data_compressed_bytes) AS dcb, 15 | sum(data_uncompressed_bytes) AS dub, 16 | sum(rows) AS rows, 17 | database 18 | FROM system.parts 19 | WHERE 1 20 | and active 21 | and database in ['sandbox' ] 22 | and position(lower(table), 'gonchik') 23 | GROUP BY database, table 24 | ORDER BY size_ondisk DESC 25 | ); -------------------------------------------------------------------------------- /sql/confluence/confluence_audit_domain_emails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Auditing user email domains by querying the application database 3 | Purpose 4 | If you are migrating to the Cloud or just auditing your users for security purposes, a good way to ensure that 5 | only authorized users will have access to your instance is to audit their email domains. 6 | Emails can be used to reset a user password and therefore are a key component to be assessed to keep your user base safe. 7 | This article provides steps to help you obtain an aggregate list of email domains and how many users use each domain. 8 | With this information, you can then work with your security team to audit it. 9 | 10 | Solution 11 | Step 1: In your instance database, run the following SQL query to retrieve a report containing 12 | all the domains used in user emails and the user count for each domain: 13 | 14 | 15 | link: https://confluence.atlassian.com/migrationkb/auditing-user-email-domains-by-querying-the-application-database-1180146477.html 16 | */ 17 | select right(cwd_user.email_address, strpos(reverse(cwd_user.email_address), '@') - 1), count(*) 18 | from cwd_user 19 | inner join cwd_directory cd on cd.id = cwd_user.directory_id 20 | where cd.active = 'T' 21 | group by 1 22 | order by 2 desc; -------------------------------------------------------------------------------- /sql/confluence/confluence_crowd_directory_fails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | User directory fails to sync with Confluence due to 'Unable to find user mapping' error 3 | Link: https://confluence.atlassian.com/confkb/user-directory-fails-to-sync-with-confluence-due-to-unable-to-find-user-mapping-error-894116978.html 4 | */ 5 | 6 | SELECT * FROM cwd_user WHERE lower_user_name NOT IN (SELECT lower_username FROM user_mapping); 7 | -------------------------------------------------------------------------------- /sql/confluence/confluence_crowd_group_user_directory_sync_fails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Synchronization with external directory fails with error: query did not return unique result due to duplicate groups 3 | Link: 4 | https://confluence.atlassian.com/confkb/synchronization-with-external-directory-fails-with-error-query-did-not-return-unique-result-due-to-duplicate-groups-838543854.html 5 | */ 6 | 7 | SELECT lower_group_name 8 | FROM cwd_group 9 | GROUP BY lower_group_name 10 | HAVING (COUNT(lower_group_name) > 1); 11 | -------------------------------------------------------------------------------- /sql/confluence/confluence_detect_ancestors_table_problem.sql: -------------------------------------------------------------------------------- 1 | -- it helps to to understand the problem with ancestors 2 | -- https://jira.atlassian.com/browse/CONFSERVER-54945 3 | -- https://jira.atlassian.com/browse/CONFSERVER-25188 4 | 5 | SELECT A.ANCESTORID, A.DESCENDENTID, COUNT(A.DESCENDENTID) 6 | FROM CONFANCESTORS A, CONTENT B 7 | WHERE B.CONTENTID = A.DESCENDENTID 8 | GROUP BY A.ANCESTORID, A.DESCENDENTID 9 | HAVING COUNT(A.DESCENDENTID) > 1 10 | ORDER BY 3 DESC -------------------------------------------------------------------------------- /sql/confluence/confluence_fix_after_fixing_collation_on_postgresql.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Changing Database Collation/Ctype to UTF-8 breaks pretty links 3 | Purpose: after fixing collation on PostgreSQL breaks links 4 | 5 | Confluence database collation/ctype should be set to UTF-8. 6 | If Confluence resides on a database with an ASCII (C) collation/ctype 7 | we advise that this is changed to UTF-8, 8 | as described in: How to fix the collation of a Postgres Confluence database 9 | There is however a potential problem for instances using a non-Latin characters 10 | that have capitalisations, such as Cyrillic or Greek, 11 | that may manifest in ways such as pretty links being broken. 12 | The cause is that an ASCII (C) ctype, will ignore non-Latin characters, 13 | as it won't know how to treat them. That means that character manipulation functions, 14 | such as those used to change capitalisation, won't be applied to them. 15 | Confluence relies on those functions to populate the lower* columns in its database. 16 | Such lowertitle in CONTENT, or lowerspacekey in SPACES. 17 | As a result of the failure to apply those functions, when using non-ASCII characters, 18 | the columns will be populated by the values as provided, including capitalisations. 19 | 20 | link: https://confluence.atlassian.com/confkb/changing-database-collation-ctype-to-utf-8-breaks-pretty-links-1086419517.html 21 | */ 22 | update content set lowertitle=lower(lowertitle); 23 | -------------------------------------------------------------------------------- /sql/confluence/confluence_how_fetch_page_information_containing_pagename.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to Fetch Page Information Containing Pagename, Url, Creator, Lastmodifiedby From the Confluence Database 3 | This article will help you to retrieve detailed Page information from your Confluence database. 4 | The below SQL queries will fetch the following Page information data: 5 | - PageName 6 | - Creator 7 | - CreationDate 8 | - LastModified Date 9 | - SpaceName 10 | - LastModifier Username 11 | - PageURL 12 | Note: Please replace the http:localhost:6720/c6720 value with your own base url value. 13 | Link: https://confluence.atlassian.com/confkb/how-to-fetch-page-information-containing-pagename-url-creator-lastmodifiedby-from-the-confluence-database-1047535924.html 14 | */ 15 | 16 | SELECT c.title as PageName, 17 | u.username AS Creator, 18 | c.creationdate, 19 | c.lastmoddate, 20 | s.Spacename, 21 | um.username AS LastModifier, 22 | CONCAT('http:localhost:6720/c6720', '/pages/viewpage.action?pageId=', c.contentid) AS "Page URL" 23 | FROM content c 24 | JOIN user_mapping u 25 | ON c.creator = u.user_key 26 | JOIN user_mapping um 27 | ON c.lastmodifier = um.user_key 28 | JOIN Spaces s 29 | on c.SpaceID = s.SpaceID 30 | WHERE c.prevver IS NULL 31 | AND c.contenttype = 'PAGE' 32 | AND c.content_status = 'current' 33 | ORDER BY s.spacename; -------------------------------------------------------------------------------- /sql/confluence/confluence_links_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to report on links in a Confluence instance 3 | Purpose: For auditing purposes, you may wish to see what links are being used 4 | on each page within a Confluence instance. 5 | Link: https://confluence.atlassian.com/confkb/how-to-report-on-links-in-a-confluence-instance-795936565.html 6 | */ 7 | 8 | 9 | SELECT s.spacename as Space, c.title as Page, l.destspacekey as SpaceOrProtocol, l.destpagetitle as Destination 10 | FROM LINKS l 11 | JOIN CONTENT c ON c.contentid = l.contentid 12 | JOIN SPACES s ON s.spaceid = c.spaceid 13 | WHERE c.prevver IS NULL 14 | ORDER BY l.destspacekey -------------------------------------------------------------------------------- /sql/confluence/confluence_recreate_constraints.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to drop and recreate the database constraints on PostgreSQL. 3 | link: https://confluence.atlassian.com/kb/how-to-drop-and-recreate-the-database-constraints-on-postgresql-776812450.html 4 | */ 5 | 6 | -- CREATING A FILE TO DROP THE CONSTRAINTS. 7 | copy (SELECT 'ALTER TABLE '||nspname||'.\"'||relname||'\" DROP CONSTRAINT \"'||conname||'\";' 8 | FROM pg_constraint 9 | INNER JOIN pg_class ON conrelid=pg_class.oid 10 | INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace 11 | ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname) to '/droppingConstraints.sql'; 12 | 13 | 14 | -- CREATING A FILE TO ADD THE CONSTRAINTS LATER ON. 15 | copy (SELECT 'ALTER TABLE '||nspname||'.\"'||relname||'\" ADD CONSTRAINT \"'||conname||'\" '|| pg_get_constraintdef(pg_constraint.oid)||';' 16 | FROM pg_constraint 17 | INNER JOIN pg_class ON conrelid=pg_class.oid 18 | INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace 19 | ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END DESC,contype DESC,nspname DESC,relname DESC,conname DESC) to '/addingConstraint.sql'; -------------------------------------------------------------------------------- /sql/confluence/confluence_remove_synchrony_tables.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to reduce the size of Synchrony tables 3 | Purpose: problems with synchrony works for large systems 4 | link: https://confluence.atlassian.com/confkb/how-to-reduce-the-size-of-synchrony-tables-858770831.html 5 | 6 | */ 7 | 8 | TRUNCATE TABLE "EVENTS"; 9 | TRUNCATE TABLE "SECRETS"; 10 | TRUNCATE TABLE "SNAPSHOTS"; -------------------------------------------------------------------------------- /sql/confluence/confluence_url_notification_containing_plugins_servlet.sql: -------------------------------------------------------------------------------- 1 | /* 2 | MySQL query 3 | Broken URL in Notification Containing Plugins/Servlet/Undefined 4 | url: https://confluence.atlassian.com/confkb/broken-url-in-notification-containing-plugins-servlet-undefined-678561912.html 5 | */ 6 | 7 | SELECT TABLE_SCHEMA, 8 | TABLE_NAME, 9 | CCSA.CHARACTER_SET_NAME AS DEFAULT_CHAR_SET, 10 | COLUMN_NAME, 11 | COLUMN_TYPE, 12 | C.CHARACTER_SET_NAME, 13 | ENGINE 14 | FROM information_schema.TABLES AS T 15 | JOIN information_schema.COLUMNS AS C USING (TABLE_SCHEMA, TABLE_NAME) 16 | JOIN information_schema.COLLATION_CHARACTER_SET_APPLICABILITY AS CCSA 17 | ON (T.TABLE_COLLATION = CCSA.COLLATION_NAME) 18 | WHERE TABLE_SCHEMA = SCHEMA () 19 | AND C.DATA_TYPE IN ('enum' 20 | , 'varchar' 21 | , 'char' 22 | , 'text' 23 | , 'mediumtext' 24 | , 'longtext') 25 | ORDER BY TABLE_SCHEMA, 26 | TABLE_NAME, 27 | COLUMN_NAME; -------------------------------------------------------------------------------- /sql/confluence/confluence_user_directory_sync_fails_null_pointer_exception.sql: -------------------------------------------------------------------------------- 1 | /* 2 | User directory synchronization fails with NullPointerException at HibernateConfluenceUserDao.rename 3 | link: 4 | https://confluence.atlassian.com/confkb/user-directory-synchronization-fails-with-nullpointerexception-at-hibernateconfluenceuserdao-rename-794500074.html 5 | */ 6 | 7 | SELECT * 8 | FROM user_mapping 9 | WHERE 10 | LOWER(username) != lower_username 11 | OR lower_username IS NULL; 12 | -------------------------------------------------------------------------------- /sql/confluence/stash_crowd_ldap_user_directory_sync.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Unable to sync crowd user directory - query did not return a unique result 3 | link: https://confluence.atlassian.com/stashkb/unable-to-sync-crowd-user-directory-query-did-not-return-a-unique-result-664993976.html 4 | */ 5 | 6 | SELECT * 7 | FROM cwd_membership 8 | WHERE directory_id in 9 | (SELECT directory_id 10 | FROM cwd_membership 11 | GROUP BY directory_id, lower_parent_name, lower_child_name, membership_type 12 | HAVING COUNT(*) > 1) 13 | AND lower_parent_name in 14 | (SELECT lower_parent_name 15 | FROM cwd_membership 16 | GROUP BY directory_id, lower_parent_name, lower_child_name, membership_type 17 | HAVING COUNT(*) > 1) 18 | AND lower_child_name in 19 | (SELECT lower_child_name 20 | FROM cwd_membership 21 | GROUP BY directory_id, lower_parent_name, lower_child_name, membership_type 22 | HAVING COUNT(*) > 1) 23 | AND membership_type in 24 | (SELECT membership_type 25 | FROM cwd_membership 26 | GROUP BY directory_id, lower_parent_name, lower_child_name, membership_type 27 | HAVING COUNT(*) > 1); -------------------------------------------------------------------------------- /sql/confluence/stash_crowd_ldap_user_directory_sync_fails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Unable to sync crowd user directory - query did not return a unique result 3 | Scenario 2: duplicate row in cwd_user 4 | link: https://confluence.atlassian.com/stashkb/unable-to-sync-crowd-user-directory-query-did-not-return-a-unique-result-664993976.html 5 | */ 6 | 7 | SELECT * 8 | FROM cwd_user 9 | WHERE external_id in 10 | (SELECT external_id 11 | FROM cwd_user 12 | GROUP BY external_id 13 | HAVING COUNT(*) > 1); -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_calculate_number_of_content_in_trash.sql: -------------------------------------------------------------------------------- 1 | -- Get number of content in each space 2 | SELECT COUNT(c.contentid) AS number_of_trashed_pages, 3 | (SUM(LENGTH(b.BODY))) AS trash_total_size, 4 | s.SPACENAME AS space_name 5 | FROM BODYCONTENT b 6 | INNER JOIN CONTENT c ON (c.CONTENTID = b.CONTENTID) 7 | INNER JOIN SPACES s ON (c.SPACEID = s.SPACEID) 8 | WHERE b.CONTENTID IN 9 | (SELECT CONTENTID 10 | FROM CONTENT 11 | WHERE content_status = 'deleted' 12 | AND contenttype = 'PAGE') 13 | GROUP BY space_name 14 | ORDER BY trash_total_size 15 | LIMIT 5; 16 | -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_content_with_longest_history.sql: -------------------------------------------------------------------------------- 1 | -- Get content with longest history 2 | 3 | SELECT title, 4 | -- CONTENTID, 5 | MAX(VERSION) 6 | FROM CONTENT 7 | WHERE contenttype = 'PAGE' 8 | GROUP BY title 9 | ORDER BY 2 DESC 10 | LIMIT 5; -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_find_the_size_of_all_page_drafts_per_space.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to query the database to find the size of all page drafts per space 3 | Confluence administrators may want to audit draft page usage on their instance. 4 | link: https://confluence.atlassian.com/confkb/how-to-query-the-database-to-find-the-size-of-all-page-drafts-per-space-998664642.html 5 | */ 6 | select 7 | count(content.contentid) as number_of_drafts, 8 | pg_size_pretty(sum(pg_column_size(bodycontent.body))) as total_size_of_drafts, 9 | spaces.spacename as space_name 10 | from bodycontent 11 | inner join content on (content.contentid = bodycontent.contentid) 12 | inner join spaces on (content.spaceid = spaces.spaceid) 13 | where bodycontent.contentid in 14 | (select contentid from CONTENT where CONTENT_STATUS = 'draft' and CONTENTTYPE = 'PAGE') 15 | GROUP BY space_name 16 | ORDER BY number_of_drafts DESC, space_name; -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_get_huge_attachments.sql: -------------------------------------------------------------------------------- 1 | -- Get list of huge attachments 2 | 3 | SELECT DISTINCT c.CONTENTID, 4 | c.TITLE AS attachmentTitle, 5 | u.USERNAME AS uploadedBy, 6 | co.TITLE AS pageTitle, 7 | round(cn.LONGVAL/1024/1024) AS MBytes 8 | FROM CONTENT AS c 9 | JOIN user_mapping AS u ON u.user_key = c.creator 10 | JOIN CONTENT AS co ON c.pageid = co.contentid 11 | JOIN CONTENTPROPERTIES AS cn ON cn.contentid = c.contentid 12 | WHERE c.contenttype = 'ATTACHMENT' 13 | AND cn.longval IS NOT NULL 14 | ORDER BY cn.longval DESC 15 | LIMIT 5; 16 | 17 | -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_get_spaces_updated_date.sql: -------------------------------------------------------------------------------- 1 | -- Get Status of Spaces with created and updated date for non Archived spaces 2 | 3 | SELECT SPACEID, 4 | SPACEKEY as "KEY", 5 | SPACETYPE as "Type", 6 | CREATIONDATE as "Created", 7 | LASTMODDATE as "Updated" 8 | FROM SPACES 9 | WHERE SPACESTATUS != 'ARCHIVED' 10 | ORDER BY LASTMODDATE ASC; -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_get_stats_of_user_uploaded_attachments.sql: -------------------------------------------------------------------------------- 1 | -- Get stats of uploaded attachments by user 2 | SELECT u.lower_username, 3 | SUM(cp.longval)/1024 AS "Size in KB" 4 | FROM CONTENT c1 5 | JOIN CONTENT c2 ON c1.CONTENTID = c2.PAGEID 6 | JOIN user_mapping u ON c1.creator=u.user_key 7 | JOIN CONTENTPROPERTIES cp ON c2.CONTENTID = cp.CONTENTID 8 | WHERE c2.contenttype='ATTACHMENT' 9 | GROUP BY u.lower_username 10 | ORDER BY 2 DESC 11 | LIMIT 5; -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_get_who_viewed_attachments.sql: -------------------------------------------------------------------------------- 1 | -- Ability to see which users viewed the attachments in Confluence Analytics 2 | -- https://jira.atlassian.com/browse/CONFSERVER-69474 3 | 4 | select c.title,to_timestamp(cast ("EVENT_AT" as bigint)/ 1000) , 5 | "NAME", "SPACE_KEY", um.username 6 | from "AO_7B47A5_EVENT" e, content c, user_mapping um 7 | where 8 | e."USER_KEY" = um.user_key and 9 | e."CONTENT_ID" = c.contentid and 10 | e."NAME" = 'attachment_viewed'; 11 | -------------------------------------------------------------------------------- /sql/confluence/stats/confluence_total_size_of_attachemens.sql: -------------------------------------------------------------------------------- 1 | -- Get total size of attachments in each space 2 | 3 | SELECT s.spaceid, 4 | s.spacename, 5 | round(sum(LONGVAL)/1024) 6 | FROM CONTENTPROPERTIES c 7 | JOIN CONTENT co ON c.contentid = co.contentid 8 | JOIN SPACES s ON co.spaceid = s.spaceid 9 | WHERE c.contentid IN 10 | (SELECT contentid 11 | FROM CONTENT 12 | WHERE contenttype = 'ATTACHMENT') 13 | AND c.propertyname = 'FILESIZE' 14 | GROUP BY s.spaceid 15 | ORDER BY 3 DESC 16 | LIMIT 5; -------------------------------------------------------------------------------- /sql/jira/assets/jira_assets_inconsitency_attribute_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | This script checks for inconsistencies assets tables. 3 | Verify whether there is an inconsistency with the Objects attributes values in the database. 4 | Replace ? for one of the Object Ids affected. 5 | link:https://confluence.atlassian.com/jirakb/error-something-went-wrong-contact-administrator-exception-when-viewing-insight-objects-1178866909.html 6 | */ 7 | 8 | SELECT 9 | O.NAME OBJECT_NAME, 10 | O.OBJECT_TYPE_ID, 11 | OTA.NAME OTA_NAME, 12 | OA.ID OA_ID, 13 | OA.OBJECT_TYPE_ATTRIBUTE_ID, 14 | OA.OBJECT_ID, 15 | OAV.ID OAV_ID, 16 | OAV.BOOLEAN_VALUE, 17 | OAV.* 18 | FROM AO_8542F1_IFJ_OBJ O 19 | LEFT OUTER JOIN AO_8542F1_IFJ_OBJ_ATTR OA ON O.ID = OA.OBJECT_ID 20 | LEFT OUTER JOIN AO_8542F1_IFJ_OBJ_TYPE_ATTR OTA ON OTA.ID = OA.OBJECT_TYPE_ATTRIBUTE_ID 21 | LEFT OUTER JOIN AO_8542F1_IFJ_OBJ_ATTR_VAL OAV ON OA.ID = OAV.OBJECT_ATTRIBUTE_ID 22 | WHERE O.ID = ? -------------------------------------------------------------------------------- /sql/jira/assets/jira_dc_insight_data_cache_info.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Known issue with the clustermessage table in Insight for Data Center 3 | Link: https://documentation.mindville.com/display/ICV85/Known+issue+with+the+clustermessage+table+in+Insight+for+Data+Center 4 | https://documentation.mindville.com/display/ICV60/How+do+I+configure+the+clustermessage+retention+period+to+automatically+clear 5 | https://confluence.atlassian.com/assetapps/known-issue-with-the-clustermessage-table-in-assets-for-data-center-1168847922.html 6 | */ 7 | 8 | SELECT count(id) as "Count of messages" 9 | FROM clustermessage 10 | WHERE message like '%INSIGHT%' 11 | and message_time < NOW() - INTERVAL '3 hours'; 12 | 13 | /* 14 | Request to delete longer then 3 hour Insight Data cluster messages 15 | */ 16 | 17 | DELETE 18 | FROM clustermessage 19 | WHERE message like '%INSIGHT%' 20 | and message_time < NOW() - INTERVAL '3 hours'; -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_clean_history_for_retention_policy.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Retention policy for insight for Jira SM ad DC 4 | * 5 | */ 6 | 7 | SELECT count("ID") 8 | FROM "AO_8542F1_IFJ_OBJ_HIST" hist 9 | WHERE 10 | "CREATED" < NOW() - INTERVAL '3 months' ; 11 | -- and hist."OBJECT_TYPE_ATTRIBUTE_ID" = 400 ; 12 | 13 | -- action for retention policy 14 | /* 15 | DELETE 16 | FROM "AO_8542F1_IFJ_OBJ_HIST" hist 17 | WHERE 18 | "CREATED" < NOW() - INTERVAL '3 months' 19 | -- and hist."OBJECT_TYPE_ATTRIBUTE_ID" = 400 ; 20 | */ 21 | 22 | 23 | /* 24 | TRUNCATE TABLE "AO_8542F1_IFJ_OBJ_HIST"; 25 | */ -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_consistency_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Insight Consistency checker 3 | Check orphaned objects 4 | */ 5 | 6 | SELECT count("ID") 7 | FROM "AO_8542F1_IFJ_OBJ_HIST" 8 | WHERE "OBJECT_ID" is null; 9 | 10 | SELECT count("ID") 11 | FROM "AO_8542F1_IFJ_OBJ_JIRAISSUE" 12 | WHERE "OBJECT_ID" is null; 13 | 14 | SELECT count("ID") 15 | FROM "AO_8542F1_IFJ_COMMENT" 16 | WHERE "OBJECT_ID" is null; 17 | 18 | SELECT count("ID") 19 | FROM "AO_8542F1_IFJ_OBJ_ATTACH" 20 | WHERE "OBJECT_ID" is null; 21 | 22 | SELECT count("ID") 23 | FROM "AO_8542F1_IFJ_OBJ_WATCH" 24 | WHERE "OBJECT_ID" is null; 25 | 26 | -- detect attributes without linkage to object 27 | SELECT count("ID") 28 | FROM "AO_8542F1_IFJ_OBJ_ATTR_VAL" 29 | WHERE "OBJECT_ATTRIBUTE_ID" is null; -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_freetext_it_text_value.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Exception - freetext-reindex java.lang.IllegalStateException: it.textValue must not be null 3 | https://confluence.atlassian.com/jirakb/exception-freetext-reindex-java-lang-illegalstateexception-it-textvalue-must-not-be-null-1096100429.html 4 | */ 5 | 6 | SELECT * 7 | FROM "AO_8542F1_IFJ_OBJ_ATTR" OA, 8 | "AO_8542F1_IFJ_OBJ_ATTR_VAL" OAV 9 | WHERE OAV."OBJECT_ATTRIBUTE_ID" = OA."ID" 10 | AND OAV."TEXT_VALUE" IS NULL 11 | AND OA."OBJECT_TYPE_ATTRIBUTE_ID" IN 12 | (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR" OTA WHERE OTA."DEFAULT_TYPE_ID" IN (0, 9)); 13 | 14 | 15 | /* 16 | 17 | update "AO_8542F1_IFJ_OBJ_ATTR_VAL" OAV 18 | set "TEXT_VALUE" = 'XOXOXOXOX' 19 | where OAV."ID" in (SELECT OAV."ID" FROM "AO_8542F1_IFJ_OBJ_ATTR" OA, "AO_8542F1_IFJ_OBJ_ATTR_VAL" OAV 20 | WHERE OAV."OBJECT_ATTRIBUTE_ID" = OA."ID" AND OAV."TEXT_VALUE" IS NULL 21 | AND OA."OBJECT_TYPE_ATTRIBUTE_ID" IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR" OTA WHERE OTA."DEFAULT_TYPE_ID" IN (0,9))) 22 | 23 | */ -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_history_object_stats.sql: -------------------------------------------------------------------------------- 1 | -- Get Insight history attributes for the retention policy 2 | 3 | SELECT hist."AFFECTED_ATTRIBUTE", 4 | count(hist."ID") updates 5 | FROM "AO_8542F1_IFJ_OBJ_HIST" hist 6 | GROUP BY hist."AFFECTED_ATTRIBUTE" 7 | ORDER BY updates DESC; 8 | 9 | 10 | -- the next query is extra 11 | -- only ID aggregated 12 | SELECT hist."OBJECT_TYPE_ATTRIBUTE_ID", 13 | count(hist."ID") updates 14 | FROM "AO_8542F1_IFJ_OBJ_HIST" hist 15 | GROUP BY hist."OBJECT_TYPE_ATTRIBUTE_ID" 16 | ORDER BY updates DESC; 17 | 18 | -- aggregated for both for small asset management 19 | SELECT hist."OBJECT_TYPE_ATTRIBUTE_ID", 20 | hist."AFFECTED_ATTRIBUTE", 21 | count(hist."ID") updates 22 | FROM "AO_8542F1_IFJ_OBJ_HIST" hist 23 | GROUP BY hist."OBJECT_TYPE_ATTRIBUTE_ID", hist."AFFECTED_ATTRIBUTE" 24 | ORDER BY updates DESC; -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_producing_invalid_cache.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Insight producing "InvalidCacheLoadException: 3 | loadAll failed to return a value for xxx" or "This attribute needs to be indexed" errors 4 | link: 5 | https://confluence.atlassian.com/jirakb/insight-producing-invalidcacheloadexception-loadall-failed-to-return-a-value-for-xxx-or-this-attribute-needs-to-be-indexed-errors-1063163284.html 6 | */ 7 | 8 | SELECT "ID", "NAME", "REMOVABLE", "TYPE", "LABEL", "INDEXED" 9 | FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR" 10 | WHERE ("REMOVABLE" = 'false' OR "TYPE" = 1) 11 | AND ("INDEXED" != 'true' OR "INDEXED" is NULL); 12 | 13 | 14 | -- Set only the necessary attributes to be indexed 15 | UPDATE "AO_8542F1_IFJ_OBJ_TYPE_ATTR" 16 | SET "INDEXED" = 'true' 17 | WHERE ("REMOVABLE" = 'false' OR "TYPE" = 1) 18 | AND ("INDEXED" != 'true' OR "INDEXED" is NULL); 19 | 20 | 21 | 22 | -- Set all attributes but 'textarea' types to be indexed: 23 | UPDATE "AO_8542F1_IFJ_OBJ_TYPE_ATTR" 24 | SET "INDEXED" = 'true' 25 | WHERE "DEFAULT_TYPE_ID" != 9 AND ("INDEXED" != 'true' OR "INDEXED" is NULL); 26 | -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_producing_load_all_failed.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Insight producing "InvalidCacheLoadException: 3 | loadAll failed to return a value for xxx" or "This attribute needs to be indexed" errors 4 | https://confluence.atlassian.com/jirakb/insight-producing-invalidcacheloadexception-loadall-failed-to-return-a-value-for-xxx-or-this-attribute-needs-to-be-indexed-errors-1063163284.html 5 | */ 6 | 7 | SELECT "ID", "NAME", "REMOVABLE", "TYPE", "LABEL", "INDEXED" 8 | FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR" 9 | WHERE ("REMOVABLE" = 'false' OR "TYPE" = 1) 10 | AND ("INDEXED" != 'true' OR "INDEXED" is NULL); 11 | 12 | -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_stats_for_understanding_requirements.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Stats query to easy meet with requirements of Insight app 3 | */ 4 | 5 | -- count of objects 6 | select count("ID") 7 | FROM "AO_8542F1_IFJ_OBJ"; 8 | 9 | -- count of attributes 10 | select count("ID") 11 | FROM "AO_8542F1_IFJ_OBJ_ATTR"; 12 | 13 | -- count of attribute values usually the same as previous one 14 | select count("ID") 15 | FROM "AO_8542F1_IFJ_OBJ_ATTR_VAL"; 16 | 17 | -- count of history records 18 | select count("ID") 19 | FROM "AO_8542F1_IFJ_OBJ_HIST"; 20 | 21 | -- count of releated between Jira ticket and objects 22 | select count("ID") 23 | FROM "AO_8542F1_IFJ_OBJ_JIRAISSUE"; -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_unable_delete_empty_schema.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Unable to delete empty Insight object scheme, Something went wrong, contact administrator 3 | https://confluence.atlassian.com/jirakb/unable-to-delete-empty-insight-object-scheme-something-went-wrong-contact-adminstrator-1124181236.html 4 | 5 | Detect orphaned elements 6 | */ 7 | 8 | SELECT * 9 | FROM "AO_8542F1_IFJ_OBJ" 10 | WHERE "ID" IS NULL; 11 | 12 | SELECT * 13 | FROM "AO_8542F1_IFJ_OBJ_ATTR" 14 | WHERE "OBJECT_ID" IS NULL; 15 | 16 | SELECT * 17 | FROM "AO_8542F1_IFJ_OBJ_ATTR_VAL" 18 | WHERE "OBJECT_ATTRIBUTE_ID" IS NULL; 19 | 20 | SELECT * 21 | FROM "AO_8542F1_IFJ_OBJ_HIST" 22 | WHERE "OBJECT_ID" is NULL; 23 | 24 | -- remove data from schema 25 | SELECT * 26 | FROM "AO_8542F1_IFJ_OBJ" 27 | WHERE "OBJECT_TYPE_ID" IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE" WHERE "OBJECT_SCHEMA_ID" = '9'); 28 | 29 | -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_unable_import_configuration.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Unable to open Import configuration in Insight 3 | link: https://confluence.atlassian.com/jirakb/unable-to-open-import-configuration-in-insight-1072214509.html 4 | */ 5 | 6 | 7 | SELECT * 8 | FROM "AO_8542F1_IFJ_IMPORT_SRC_OT" 9 | WHERE "OBJECT_TYPE_ID" NOT IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE"); 10 | 11 | SELECT * 12 | FROM "AO_8542F1_IFJ_IMPORT_SRC_OTA" 13 | WHERE "OBJECT_TYPE_ATTRIBUTE_ID" NOT IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR"); 14 | 15 | 16 | -- DELETE FROM "AO_8542F1_IFJ_IMPORT_SRC_OT" WHERE "OBJECT_TYPE_ID" NOT IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE"); 17 | -- DELETE FROM "AO_8542F1_IFJ_IMPORT_SRC_OTA" WHERE "OBJECT_TYPE_ATTRIBUTE_ID" NOT IN (SELECT "ID" FROM "AO_8542F1_IFJ_OBJ_TYPE_ATTR"); -------------------------------------------------------------------------------- /sql/jira/assets/jira_insight_when_deleting_an_insight_schema_or_objects.sql: -------------------------------------------------------------------------------- 1 | /* 2 | When deleting an Insight Schema or searching for objects, Insight leads to an error: Something went wrong. Contact administrator 3 | https://confluence.atlassian.com/jirakb/when-deleting-an-insight-schema-or-searching-for-objects-insight-leads-to-an-error-something-went-wrong-contact-administrator-1096089797.html 4 | */ 5 | 6 | SELECT OAV."TEXT_VALUE", 7 | O."NAME", 8 | O."OBJECT_TYPE_ID", 9 | OTA."NAME", 10 | OA."ID", 11 | OA."OBJECT_TYPE_ATTRIBUTE_ID", 12 | OA."OBJECT_ID", 13 | OAV."ID", 14 | OAV."OBJECT_ATTRIBUTE_ID" 15 | FROM "AO_8542F1_IFJ_OBJ" O 16 | LEFT OUTER JOIN "AO_8542F1_IFJ_OBJ_ATTR" OA ON O."ID" = OA."OBJECT_ID" 17 | LEFT OUTER JOIN "AO_8542F1_IFJ_OBJ_TYPE_ATTR" OTA ON OTA."ID" = OA."OBJECT_TYPE_ATTRIBUTE_ID" 18 | LEFT OUTER JOIN "AO_8542F1_IFJ_OBJ_ATTR_VAL" OAV ON OA."ID" = OAV."OBJECT_ATTRIBUTE_ID" 19 | WHERE O."ID" = ?; 20 | 21 | -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clean_events_batching_mail_queue.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * Batched notifications execute periodical tasks (every 14 days) to clean up old events 3 | * from the AO_733371_EVENT and AO_733371_EVENT_RECIPIENT tables. 4 | * However, on unknown circumstances, 5 | * this leads to extremely slow queries on the database if using Microsoft SQL Server. 6 | * This issue seems to happen more often on older MSSQL versions such as 2012 and 2014. 7 | * 8 | * Steps to Reproduce 9 | * - Set up Batched Notifications (enabled by default on Jira 8.0) 10 | * - Have a large number of email notifications occurring daily 11 | * - Monitor the database CPU usage with graphs 12 | * 13 | * Expected Results 14 | * - The cleanup tasks occur without affecting the database performance, 15 | * as it does on other DBMSs such as Postgres. 16 | * 17 | * Actual Results 18 | * - The cleanup tasks take several hours to complete, 19 | * with the CPU spiking to 100% usage during that period. 20 | * General slowness is experienced on Jira due to this. 21 | * 22 | * Links: https://jira.atlassian.com/browse/JRASERVER-71350 23 | * https://jira.atlassian.com/browse/JSWSERVER-20794 24 | */ 25 | 26 | SELECT count(*) FROM "AO_733371_EVENT"; 27 | 28 | TRUNCATE "AO_733371_EVENT" CASCADE; 29 | 30 | /* 31 | * 32 | * 33 | * MySQL 34 | * 35 | * 36 | */ 37 | /* 38 | SELECT count(*) FROM AO_733371_EVENT; 39 | 40 | SET FOREIGN_KEY_CHECKS = 0; 41 | TRUNCATE AO_733371_EVENT ; 42 | SET FOREIGN_KEY_CHECKS = 1; 43 | */ -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clean_jeti_audit_log_entry.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Clean Jeti (Email this Issue) audit log entry 3 | i.e. longer than 2 month 4 | */ 5 | 6 | SELECT count("ID") 7 | FROM "AO_544E33_AUDIT_LOG_ENTRY" 8 | WHERE "SEND_TIME_STAMP" < current_date - interval '60' day; 9 | 10 | DELETE 11 | FROM "AO_544E33_AUDIT_LOG_ENTRY" 12 | WHERE "SEND_TIME_STAMP" < current_date - interval '60' day; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clean_old_hipchat_data.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * Detect old HipChat history 3 | */ 4 | 5 | SELECT count(id) 6 | FROM entity_property 7 | WHERE ENTITY_NAME = 'hipchat.integration.caches.issue-mentions'; 8 | 9 | 10 | /* 11 | -- cleaning that old hipchat data 12 | 13 | DELETE 14 | FROM entity_property 15 | WHERE ENTITY_NAME = 'hipchat.integration.caches.issue-mentions'; 16 | 17 | */ -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clean_queue_of_automation.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira Service Management queue cleanup 3 | */ 4 | 5 | DELETE 6 | FROM "AO_319474_QUEUE_PROPERTY" 7 | WHERE "QUEUE_ID" in (SELECT "ID" 8 | FROM "AO_319474_QUEUE" 9 | WHERE "CREATED_TIME" < (EXTRACT(EPOCH FROM (NOW() - INTERVAL '3 days')) * 1000)); 10 | 11 | DELETE 12 | FROM "AO_319474_QUEUE" 13 | WHERE "CREATED_TIME" < (EXTRACT(EPOCH FROM (NOW() - INTERVAL '3 days')) * 1000); 14 | 15 | 16 | -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clean_stucked_cache_values.sql: -------------------------------------------------------------------------------- 1 | -- Find mapped linked issues 2 | 3 | SELECT count(id) 4 | FROM entity_property 5 | WHERE ENTITY_NAME = 'fusion.caches.issue'; 6 | 7 | 8 | -- Investigate stucked values of caches 9 | 10 | SELECT count(*) 11 | FROM entity_property 12 | WHERE json_value LIKE '%9223371721494775807%'; 13 | 14 | -- DELETE FROM entity_property WHERE json_value LIKE '%9223371721494775807%'; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_cleanup_alert_logs_off_app.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Cleanup alerts history app 3 | */ 4 | 5 | TRUNCATE "AO_A406ED_FIRE_HISTORY_V3" CASCADE; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_cleanup_development_caches.sql: -------------------------------------------------------------------------------- 1 | -- DETECT HOW MANY VALUES DO YOU HAVE IN Development panel 2 | -- FYI: that info is like SQL cache 3 | 4 | -- MySQL dialect 5 | /* 6 | SELECT count(ID) FROM AO_575BF5_PROVIDER_ISSUE; 7 | TRUNCATE AO_575BF5_PROVIDER_ISSUE; 8 | */ 9 | 10 | -- PostgreSQL dialect 11 | SELECT count("ID") FROM "AO_575BF5_PROVIDER_ISSUE"; 12 | 13 | TRUNCATE TABLE "AO_575BF5_PROVIDER_ISSUE"; 14 | 15 | 16 | 17 | -- DETECT AND CLEAN DEV SUMMARY INFO 18 | -- mysql dialect 19 | /* 20 | SELECT count(ID) FROM AO_575BF5_DEV_SUMMARY; 21 | TRUNCATE AO_575BF5_DEV_SUMMARY; 22 | */ 23 | 24 | 25 | 26 | -- PostgreSQL dialect 27 | -- calculate count 28 | SELECT count("ID") FROM "AO_575BF5_DEV_SUMMARY"; 29 | 30 | -- Execution 31 | TRUNCATE TABLE "AO_575BF5_DEV_SUMMARY"; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_cleanup_jmwe_execution_log.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Cleanup JMWE (Jira Misc Workflow Extensions) execution log 3 | */ 4 | 5 | 6 | SELECT count(*) 7 | FROM "AO_7AEB56_EXECUTION_LOG"; 8 | 9 | 10 | -- cleanup table 11 | TRUNCATE TABLE "AO_7AEB56_EXECUTION_LOG"; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_cluster_lock_status_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira Data Center Functionalities Loss Due to Cluster Wide Lock 3 | This only affects Jira Data Center - the application is not responding 4 | on any system administration page. Node restart causes 5 | the node to be stuck during application startup due to another node holding a cluster wide lock. 6 | link: https://jira.atlassian.com/browse/JRASERVER-69114 7 | https://confluence.atlassian.com/jirakb/jira-data-center-functionalities-loss-due-to-cluster-wide-lock-942860754.html 8 | 9 | */ 10 | 11 | 12 | SELECT count(*) 13 | FROM clusterlockstatus 14 | WHERE locked_by_node is not null; 15 | 16 | -- cleanup of unlocked_nodes 17 | DELETE 18 | FROM clusterlockstatus 19 | WHERE locked_by_node is NULL; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_clusterlockstatus_table_cleaner_and_checker.sql: -------------------------------------------------------------------------------- 1 | -- JiraDashboardStateStoreManager leave left-over rows in 'clusterlockstatus' table. 2 | -- https://jira.atlassian.com/browse/JRASERVER-69113 3 | -- Workflow scheme actions leave left-over rows in 'clusterlockstatus' table. 4 | -- https://jira.atlassian.com/browse/JRASERVER-68477 5 | select count(id) 6 | from clusterlockstatus; 7 | 8 | SELECT count(id) 9 | FROM clusterlockstatus 10 | WHERE update_time < (EXTRACT(EPOCH FROM (NOW() - INTERVAL '3 days')) * 1000); 11 | 12 | /* 13 | DELETE that clusterlockstatus rows 14 | */ 15 | DELETE 16 | FROM clusterlockstatus 17 | WHERE update_time < (EXTRACT(EPOCH FROM (NOW() - INTERVAL '3 days')) * 1000); 18 | -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_detect_unused_issue_types.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to find unused issue types with SQL 3 | Problem: A Jira administrator may want to identify any issue types 4 | which are not being used across the instance. In other words, 5 | no issues of these issue types exist and therefore may be considered unnecessary clutter. 6 | link: https://confluence.atlassian.com/jirakb/how-to-find-unused-issue-types-with-sql-1072216995.html 7 | */ 8 | 9 | -- This query will return the name and id of any issue type which is currently not in-use. 10 | SELECT pname, id 11 | FROM issuetype 12 | WHERE id 13 | IN (SELECT optionid FROM optionconfiguration) 14 | AND id 15 | NOT IN (SELECT DISTINCT(issuetype) FROM jiraissue); 16 | 17 | 18 | -- It is also possible to restrict this to specific issue type schemes. 19 | -- For example, the query below only looks for issue types from the default issue type scheme. 20 | 21 | SELECT pname, id 22 | FROM issuetype 23 | WHERE id 24 | IN (SELECT optionid FROM optionconfiguration WHERE fieldconfig = 10000) 25 | AND id 26 | NOT IN (SELECT DISTINCT(issuetype) FROM jiraissue); -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_fails_to_start_with_too_many_rows_found_in_clustered_job.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira server fails to start with Too many rows found for query on ClusteredJob error 3 | 4 | */ 5 | 6 | 7 | -- diagnosis 8 | SELECT * 9 | FROM clusteredjob 10 | WHERE job_id in (SELECT job_id FROM clusteredjob GROUP BY job_id HAVING COUNT(*) > 1); 11 | 12 | -- resolution 13 | -- delete duplicated clustered job 14 | 15 | -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_infosysta_jira_nfj.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Cleanup notificaton count and unreaded notes 3 | com.infosysta.jira.nfj (notification for Jira) 4 | */ 5 | 6 | truncate "AO_248DF5_INOTIFICATION" cascade ; 7 | truncate "AO_248DF5_INOTIFI_COUNTER"; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_metadata_cleaner.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Detect old HipChat metadata and development metadata 3 | */ 4 | 5 | SELECT count(id) from entity_property where ENTITY_NAME = 'fusion.caches.issue'; 6 | SELECT count(id) from entity_property where ENTITY_NAME = 'hipchat.integration.caches.issue-mentions'; 7 | SELECT * from entity_property where ENTITY_NAME = 'fusion.caches.issue'; 8 | 9 | 10 | 11 | /* 12 | Clean old HipChat metadata and development metadata 13 | */ 14 | 15 | SET SQL_SAFE_UPDATES=0; 16 | DELETE from entity_property where json_value like '%9223371721494775807%'; 17 | DELETE from entity_property where PROPERTY_KEY like 'jpo-exclude-from-plan'; 18 | DELETE from entity_property where PROPERTY_KEY like 'jpo-issue-properties'; 19 | DELETE from entity_property where PROPERTY_KEY like 'hipchat.issue.dedicated.room'; 20 | DELETE from entity_property where ENTITY_NAME = 'hipchat.integration.caches.issue-mentions'; -------------------------------------------------------------------------------- /sql/jira/cleanup/jira_remove_offline_nodes.sql: -------------------------------------------------------------------------------- 1 | -- https://confluence.atlassian.com/jirakb/remove-abandoned-or-offline-nodes-in-jira-data-center-946616137.html 2 | -- CHECK Offline node and remove it 3 | -- https://jira.atlassian.com/browse/JRASERVER-42916 4 | 5 | 6 | select NODE_ID from clusternode where NODE_STATE ='OFFLINE'; 7 | 8 | 9 | -- delete offline node 10 | delete from clusternode where node_id IN (select NODE_ID from clusternode where NODE_STATE ='OFFLINE'); 11 | -------------------------------------------------------------------------------- /sql/jira/jIra_integrity_checker_jira_issues_with_null_status.sql: -------------------------------------------------------------------------------- 1 | ./* 2 | SQL equivalents for Jira server's workflow integrity checks 3 | Jira Issues with Null Status 4 | link: https://confluence.atlassian.com/jirakb/sql-equivalents-for-jira-server-s-workflow-integrity-checks-658179102.html 5 | */ 6 | 7 | SELECT ji.id, 8 | ji.issuenum, 9 | ji.issuestatus, 10 | ji.project, 11 | ji.issuetype, 12 | currentStep.step_id 13 | FROM jiraissue ji 14 | JOIN OS_CURRENTSTEP currentStep 15 | ON ji.workflow_id = currentStep.entry_id 16 | WHERE ji.issuestatus is null; 17 | 18 | 19 | -- And can be fixed by: 20 | UPDATE jiraissue 21 | SET issuestatus = (SELECT state 22 | FROM OS_WFENTRY 23 | WHERE id = workflow_id) 24 | WHERE issuestatus is null; -------------------------------------------------------------------------------- /sql/jira/jira_Jira_server_throws_NullPointerException_when_creating_new_issues.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira server throws NullPointerException when creating new issues or changing project settings 3 | link: https://confluence.atlassian.com/jirakb/jira-server-throws-nullpointerexception-when-creating-new-issues-or-changing-project-settings-292651342.html 4 | */ 5 | 6 | SELECT * 7 | FROM fieldconfiguration 8 | WHERE id IN (SELECT id FROM fieldconfigscheme WHERE configname = 'Default Issue Type Scheme') 9 | AND fieldid = 'issuetype'; 10 | 11 | SELECT * 12 | FROM configurationcontext 13 | WHERE customfield = 'issuetype' 14 | AND project IS NULL 15 | AND fieldconfigscheme = (SELECT id FROM fieldconfigscheme WHERE configname = 'Default Issue Type Scheme'); 16 | 17 | SELECT * 18 | FROM fieldconfigschemeissuetype 19 | WHERE id = 10100 20 | OR fieldconfigscheme IN (SELECT id 21 | FROM fieldconfiguration 22 | WHERE id IN 23 | (SELECT id FROM fieldconfigscheme where configname = 'Default Issue Type Scheme')); -------------------------------------------------------------------------------- /sql/jira/jira_apps_plugins_issues_startup_fails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira startup fails with message that required plugins are not started 3 | url: https://confluence.atlassian.com/jirakb/jira-startup-fails-with-message-that-required-plugins-are-not-started-254738702.html 4 | */ 5 | 6 | SELECT * FROM pluginstate where pluginenabled = 'false'; 7 | 8 | DELETE FROM pluginstate WHERE pluginkey=''; 9 | 10 | 11 | DELETE FROM pluginstate WHERE pluginkey LIKE 'com.atlassian.%'; 12 | -------------------------------------------------------------------------------- /sql/jira/jira_audit_log_list_users_showing_when_accounts_were_created_and_by_whom.sql: -------------------------------------------------------------------------------- 1 | /* 2 | List of users showing when the accounts were created and by whom 3 | link: https://confluence.atlassian.com/jirakb/list-of-users-showing-when-the-accounts-were-created-and-by-whom-695241575.html 4 | */ 5 | select author_key, object_id, created 6 | from audit_log 7 | where summary = 'User created' 8 | AND object_parent_name = 'JIRA Internal Directory' 9 | ORDER BY created; 10 | -------------------------------------------------------------------------------- /sql/jira/jira_authentication_sso_login_page_methods.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira's authentication methods are easily accessible via the UI via Jira Admin → System → Authentication methods 3 | This article describes how to obtain this information from the database or REST API 4 | Please note that both the REST API and database schema are subject to change in future versions. 5 | link: https://confluence.atlassian.com/jirakb/how-to-obtain-authentication-methods-via-a-database-sql-query-or-rest-api-in-jira-datacenter-1319567431.html 6 | */ 7 | 8 | -- Username and password (Product login form) 9 | select p.property_key, ps.propertyvalue 10 | from propertystring ps 11 | join propertyentry p ON ps.id = p.id 12 | WHERE p.property_key = 'com.atlassian.plugins.authentication.sso.config.show-login-form'; 13 | 14 | -- SSO login page methods 15 | select * 16 | from "AO_ED669C_IDP_CONFIG"; 17 | -------------------------------------------------------------------------------- /sql/jira/jira_automation_rule_deletion.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * How to manually delete an Automation Rule on the database in Jira 4 | * https://confluence.atlassian.com/jirakb/how-to-manually-delete-an-automation-rule-on-the-database-in-jira-1050544443.html 5 | */ 6 | 7 | select rule."ID" as "Rule ID", 8 | rule."NAME" as "Rule name", 9 | rule."DESCRIPTION" as "Rule Description", 10 | rule."STATE" as "Rule status", 11 | rule."CREATED" as "Rule creation date", 12 | rule."UPDATED" as "Rule last updated date", 13 | state."CURRENT_CREATED" as "Rule last execution status date", 14 | state."CURRENT_CATEGORY" as "Rule last execution status", 15 | state."EXEC_COUNT" as "Rule execution count" 16 | from "AO_589059_RULE_CONFIG" rule 17 | left join "AO_589059_RULE_STATE_LATEST" state on state."RULE_ID" = rule."ID"; 18 | -------------------------------------------------------------------------------- /sql/jira/jira_automation_troubleshooting.sql: -------------------------------------------------------------------------------- 1 | /* 2 | link: https://jira.atlassian.com/browse/JIRAAUTOSERVER-266 3 | */ 4 | 5 | -- Count how many items we have waiting to be processed 6 | SELECT count(*) 7 | FROM "AO_589059_AUTOMATION_QUEUE"; 8 | 9 | -- Shows which rules have events waiting to processed 10 | SELECT "RULE_ID", count(*) 11 | FROM "AO_589059_AUTOMATION_QUEUE" 12 | GROUP BY "RULE_ID" 13 | order by count desc; 14 | 15 | -- Shows how many rule ran per hour in last 2 weeks. 16 | -- Including total & avg duration during that hour 17 | SELECT date_trunc('hour', "CREATED") AS rule_ran_hour, count(*), sum("DURATION"), avg("DURATION") 18 | FROM "AO_589059_AUDIT_ITEM" 19 | WHERE "CREATED" > (now() - interval '2 week') 20 | GROUP BY rule_ran_hour 21 | ORDER BY rule_ran_hour; 22 | 23 | -- shows top 50 rules ordered by how much time they were in the queue in total in the last 2 weeks 24 | SELECT ai."OBJECT_ITEM_ID" as rule_id, 25 | rc."NAME" as rule_name, 26 | count(*), 27 | avg("DURATION") AS time_avg_ms, 28 | avg("END_TIME" - "START_TIME") AS queued_time_avg 29 | FROM "AO_589059_AUDIT_ITEM" ai 30 | JOIN "AO_589059_RULE_CONFIG" rc ON "OBJECT_ITEM_ID" = rc."ID" 31 | WHERE ai."CREATED" > (now() - INTERVAL '2 week') 32 | GROUP BY "OBJECT_ITEM_ID", rc."NAME" 33 | ORDER BY queued_time_avg DESC LIMIT 50; -------------------------------------------------------------------------------- /sql/jira/jira_boards_not_visible_once_filter_removed.sql: -------------------------------------------------------------------------------- 1 | -- Boards Are Not Visible After the Filter is Deleted 2 | -- https://confluence.atlassian.com/jirakb/boards-are-not-visible-after-the-filter-is-deleted-779158656.html 3 | -- you can do it on instance and then clean cache 4 | 5 | SELECT * 6 | FROM "AO_60DB71_RAPIDVIEW" 7 | WHERE "SAVED_FILTER_ID" NOT IN (SELECT id FROM searchrequest); 8 | 9 | -- MySQL 10 | /* 11 | SELECT * 12 | FROM AO_60DB71_RAPIDVIEW 13 | WHERE SAVED_FILTER_ID NOT IN (SELECT id FROM searchrequest); 14 | */ -------------------------------------------------------------------------------- /sql/jira/jira_cache_delegation_checker.sql: -------------------------------------------------------------------------------- 1 | -- Cache Delegation: null keys are not permitted 2 | -- https://confluence.atlassian.com/jirakb/cache-delegation-null-keys-are-not-permitted-952050953.html 3 | 4 | select * 5 | from issuetypescreenschemeentity 6 | where fieldscreenscheme is null; 7 | 8 | 9 | select * 10 | from issuetypescreenschemeentity 11 | where fieldscreenscheme not in (select id from fieldscreenscheme); 12 | 13 | -- delete rows and clean Jira cache -------------------------------------------------------------------------------- /sql/jira/jira_cannot_render_webpanel_with_key.sql: -------------------------------------------------------------------------------- 1 | /* 2 | JIRA Cannot render WebPanel with key 'com.atlassian.jira.jira-projects-plugin:summary-page-project-key' when Browsing Project 3 | link: https://confluence.atlassian.com/jirakb/jira-cannot-render-webpanel-with-key-com-atlassian-jira-jira-projects-plugin-summary-page-project-key-when-browsing-project-759858703.html 4 | 5 | */ 6 | 7 | 8 | SELECT * FROM projectcategory where description is null; 9 | 10 | -- fix 11 | UPDATE projectcategory set description = '' where description is null; 12 | -------------------------------------------------------------------------------- /sql/jira/jira_check_data_center_index_checks.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Checks index mismatch 4 | link: https://confluence.atlassian.com/jirakb/upgrading-from-8-x-to-9-x-index-changes-1168845946.html 5 | 6 | */ 7 | 8 | 9 | --- number of unversioned issues 10 | select count(*) 11 | FROM jiraissue 12 | LEFT JOIN issue_version ON issue_version.issue_id = jiraissue.id 13 | WHERE issue_version.issue_id is null; 14 | 15 | 16 | --- number of unversioned worklogs 17 | SELECT count(*) 18 | FROM worklog 19 | LEFT JOIN worklog_version on worklog_version.worklog_id = worklog.id 20 | WHERE worklog_version.worklog_id is null; 21 | 22 | 23 | --- number of unversioned comments 24 | SELECT count(*) 25 | FROM jiraaction 26 | LEFT JOIN comment_version ON comment_version.comment_id = jiraaction.id 27 | WHERE actiontype = 'comment' 28 | AND comment_version.comment_id is null; 29 | -------------------------------------------------------------------------------- /sql/jira/jira_check_non_existing_project_ids.sql: -------------------------------------------------------------------------------- 1 | -- https://confluence.atlassian.com/jirakb/500-cacheexception-error-thrown-during-upgrade-restore-of-jira-server-800858895.html 2 | -- https://jira.atlassian.com/browse/JRASERVER-70651 3 | -- Double check cache exception 4 | 5 | 6 | SELECT * 7 | FROM nodeassociation 8 | WHERE source_node_entity = 'Project' 9 | and source_node_id not in (select id from project); 10 | 11 | -- fix 12 | -- delete from nodeassociation where source_node_entity = 'Project' and source_node_id not in (select id from project); 13 | -------------------------------------------------------------------------------- /sql/jira/jira_check_user_mapping.sql: -------------------------------------------------------------------------------- 1 | -- check consistency of mapping membership and group 2 | SELECT lower_parent_name 3 | FROM cwd_membership 4 | WHERE parent_id not in (SELECT ID from cwd_group); -------------------------------------------------------------------------------- /sql/jira/jira_consistency_checker_attachments.sql: -------------------------------------------------------------------------------- 1 | /* 2 | XXX-#### can not be opened. The issue may have been deleted or you might not have permission to see the issue. 3 | https://confluence.atlassian.com/jirakb/issue-can-not-be-opened-error-when-trying-to-select-it-in-the-issue-navigator-376832648.html 4 | */ 5 | 6 | select count(*) 7 | from fileattachment 8 | where created is null; 9 | 10 | -- update fileattachment set created = now() where created is null; -------------------------------------------------------------------------------- /sql/jira/jira_core_can_not_upload_sql_files.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Cannot upload SQL files to JIRA on Windows 3 | error: java.lang.IllegalArgumentException: Error parsing media type 'text\plain' 4 | link: https://confluence.atlassian.com/jirakb/cannot-upload-sql-files-to-jira-on-windows-282173924.html 5 | */ 6 | 7 | select * 8 | from fileattachment 9 | where filename like '%.sql' 10 | and mimetype = 'text\plain'; 11 | 12 | 13 | -- fix 14 | update fileattachment 15 | set mimetype = 'text/plain' 16 | where filename like '%.sql' 17 | and mimetype = 'text\plain'; 18 | -------------------------------------------------------------------------------- /sql/jira/jira_core_detect_workflow_without_a_specific_screen_and_transitions.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to use SQL query to look for workflows without a specific screen in any transition 3 | link:https://confluence.atlassian.com/jirakb/how-to-use-sql-query-to-look-for-workflows-without-a-specific-screen-in-any-transition-1141475246.html 4 | */ 5 | 6 | select j.id, 7 | j.workflowname, 8 | workflow_action.name as transition_name, 9 | workflow_action.view, 10 | workflow_action.fieldscreen, 11 | workflow_step.name as statusname, 12 | workflow_step.statusid 13 | from jiraworkflows j, 14 | XMLTABLE('//workflow/common-actions/action' 15 | passing xmlparse(document descriptor) 16 | columns 17 | id int path '@id', 18 | name text path '@name', 19 | view text path '@view' default null, 20 | fieldscreen int path 'meta[@name = "jira.fieldscreen.id" and text() != ""]' default null, 21 | next_step int path 'results/unconditional-result/@step' 22 | ) as workflow_action 23 | join 24 | XMLTABLE('//workflow/steps/step' 25 | passing xmlparse(document descriptor) 26 | columns 27 | id int path '@id', 28 | name text path '@name', 29 | statusid text path 'meta[@name = "jira.status.id"]' default null 30 | ) as workflow_step on workflow_action.next_step = workflow_step.id 31 | join 32 | issuestatus i on workflow_step.statusid = i.id 33 | where i.statuscategory = 3 34 | and workflow_action.fieldscreen is null; 35 | -------------------------------------------------------------------------------- /sql/jira/jira_dashboard_passed_more_than_value.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Passed List Had More than One Value when Calling Dashboard 3 | java.lang.IllegalArgumentException: Passed List had more than one value. 4 | 5 | */ 6 | -- https://confluence.atlassian.com/jirakb/passed-list-had-more-than-one-value-when-calling-dashboard-215486151.html 7 | 8 | -- Diagnosis 9 | select username, pagename, count(pagename) 10 | from portalpage 11 | group by username, pagename 12 | having count(pagename) > 1; 13 | 14 | 15 | 16 | -- Resolution 17 | -- method 1 18 | select username, id 19 | from portalpage 20 | where username in (select username 21 | from portalpage 22 | group by username, pagename 23 | having count(pagename) > 1); 24 | -- Delete all the rows except one row for each of those users using the ID of the row. 25 | 26 | 27 | -- method 2 28 | -- Run the below SQL for a direct deletion of all the duplicated rows. This has been tested only on PostgreSQL: 29 | DELETE 30 | FROM portalpage p1 USING 31 | (SELECT username, 32 | id 33 | FROM portalpage 34 | WHERE username IN 35 | (SELECT username 36 | FROM portalpage 37 | GROUP BY username, 38 | pagename HAVING count(pagename) > 1)) p2, 39 | (SELECT username, 40 | max(id) maxid 41 | FROM portalpage 42 | WHERE username IN 43 | (SELECT username 44 | FROM portalpage 45 | GROUP BY username, 46 | pagename HAVING count(pagename) > 1) 47 | GROUP BY username) p3 48 | WHERE p1.username = p2.username 49 | AND p3.maxid != p1.id 50 | AND p3.username = p2.username; -------------------------------------------------------------------------------- /sql/jira/jira_dc_clean_unused_node_replication_index.sql: -------------------------------------------------------------------------------- 1 | -- let's remove replicated info from unused node 2 | 3 | SELECT count(*) 4 | FROM replicatedindexoperation rep 5 | WHERE rep.node_id NOT IN (SELECT node_id from clusternode); 6 | 7 | -- delete FROM replicatedindexoperation rep WHERE rep.node_id NOT IN (SELECT node_id from clusternode); 8 | -------------------------------------------------------------------------------- /sql/jira/jira_dc_nodeassociation_problem.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 500 CacheException error thrown during upgrade/restore of Jira server 3 | https://confluence.atlassian.com/jirakb/500-cacheexception-error-thrown-during-upgrade-restore-of-jira-server-800858895.html 4 | https://jira.atlassian.com/browse/JRASERVER-70651 5 | */ 6 | 7 | SELECT * 8 | FROM nodeassociation 9 | WHERE source_node_entity = 'Project' 10 | AND source_node_id NOT IN (SELECT id FROM project); 11 | 12 | -- delete from nodeassociation where source_node_entity = 'Project' and source_node_id not in (select id from project); 13 | -------------------------------------------------------------------------------- /sql/jira/jira_detect_crashes_workflow_errors.sql: -------------------------------------------------------------------------------- 1 | -- com.opensymphony.workflow.FactoryException 2 | -- https://developer.atlassian.com/server/jira/platform/database-schema/ 3 | -- https://confluence.atlassian.com/jirakb/jira-crashes-and-throws-unknown-workflow-name-error-230819861.html 4 | -- https://confluence.atlassian.com/jirakb/unable-to-transition-workflow-due-to-duplicate-entry-error-373260771.html 5 | -------------------------------------------------------------------------------- /sql/jira/jira_detect_default_value_of_custom_field.sql: -------------------------------------------------------------------------------- 1 | -- That query helps to understand the default value of custom field 2 | -- do cleanup after that 3 | SELECT customfield.cfname, concat('customfield_', customfield.ID) 4 | FROM fieldconfigscheme 5 | INNER JOIN genericconfiguration on genericconfiguration.DATAKEY = fieldconfigscheme.ID 6 | JOIN customfield on concat('customfield_', customfield.ID) = fieldconfigscheme.FIELDID; -------------------------------------------------------------------------------- /sql/jira/jira_detect_duplicate_colour_of_agile_boards.sql: -------------------------------------------------------------------------------- 1 | -- @todo rewrite for T-SQL (MSSQL) 2 | -- works well on MySQL, PostgreSQL 3 | -- detect duplicate card colours 4 | 5 | 6 | -- MySQL 7 | /* 8 | SELECT * 9 | FROM AO_60DB71_CARDCOLOR 10 | WHERE ID in 11 | (SELECT ID FROM 12 | ( SELECT t.ID,t.RAPID_VIEW_ID, count(val) 13 | FROM AO_60DB71_CARDCOLOR t 14 | GROUP BY t.RAPID_VIEW_ID, val 15 | HAVING count(val) > 1 ) dup); 16 | */ 17 | 18 | 19 | SELECT * 20 | FROM "AO_60DB71_CARDCOLOR" 21 | WHERE ID in 22 | (SELECT ID FROM 23 | ( SELECT t.ID,t."RAPID_VIEW_ID", count(val) 24 | FROM "AO_60DB71_CARDCOLOR" t 25 | GROUP BY t."RAPID_VIEW_ID", val 26 | HAVING count(val) > 1 ) dup); 27 | 28 | 29 | -- delete duplicates 30 | -- PostgreSQL 31 | /* 32 | DELETE 33 | FROM "AO_60DB71_CARDCOLOR" 34 | WHERE ID in 35 | (SELECT ID FROM 36 | ( SELECT t.ID,t."RAPID_VIEW_ID", count(val) 37 | FROM "AO_60DB71_CARDCOLOR" t 38 | GROUP BY t."RAPID_VIEW_ID", val 39 | HAVING count(val) > 1 ) dup); 40 | */ 41 | 42 | 43 | -- MySQL 44 | /* 45 | 46 | SET SQL_SAFE_UPDATES=0; 47 | DELETE FROM AO_60DB71_CARDCOLOR 48 | WHERE ID in 49 | (SELECT ID FROM 50 | (SELECT t.ID, count(val) 51 | FROM AO_60DB71_CARDCOLOR t 52 | GROUP BY RAPID_VIEW_ID, val HAVING count(val)>1) dup); 53 | 54 | */ 55 | -------------------------------------------------------------------------------- /sql/jira/jira_duplicate_field_detector_on_screen.sql: -------------------------------------------------------------------------------- 1 | -- Just detect problem with duplicate field on the screens 2 | -- Duplicate key error in Jira server on creating issue via REST API or accessing Custom Fields page in JIRA 3 | -- https://confluence.atlassian.com/jirakb/duplicate-key-error-in-jira-server-on-creating-issue-via-rest-api-or-accessing-custom-fields-page-in-jira-872016885.html 4 | SELECT f.name, i.fieldidentifier, count(*) 5 | FROM fieldscreen f, 6 | fieldscreenlayoutitem i, 7 | fieldscreentab t 8 | WHERE f.id = t.fieldscreen 9 | AND i.fieldscreentab = t.id 10 | GROUP BY f.name, i.fieldidentifier 11 | HAVING count(*) > 1; 12 | -------------------------------------------------------------------------------- /sql/jira/jira_duplicate_key_value_errors_in_logs.sql: -------------------------------------------------------------------------------- 1 | -- Duplicate key value errors in logs in Jira Server using PostgreSQL 2 | -- https://confluence.atlassian.com/jirakb/duplicate-key-value-errors-in-logs-in-jira-server-using-postgresql-958771364.html 3 | 4 | SELECT max("ID") 5 | FROM "AO_60DB71_SWIMLANE"; 6 | 7 | 8 | select * 9 | from "AO_60DB71_SWIMLANE_ID_seq"; 10 | -------------------------------------------------------------------------------- /sql/jira/jira_duplicate_naming_of_fields.sql: -------------------------------------------------------------------------------- 1 | -- That query just to make review of duplicated fields 2 | -- as waiting that request https://jira.atlassian.com/browse/JRASERVER-61376 3 | -- Motivation based on the UX and continuous of mistakes in scripts, add-ons etc. 4 | 5 | SELECT ID, cfname, CUSTOMFIELDTYPEKEY 6 | FROM customfield 7 | WHERE UPPER(cfname) IN 8 | (SELECT UPPER(cfname) 9 | FROM customfield 10 | GROUP BY UPPER(cfname) 11 | HAVING COUNT(*) > 1) 12 | ORDER BY cfname; -------------------------------------------------------------------------------- /sql/jira/jira_duplicated_epic_story_links_detector.sql: -------------------------------------------------------------------------------- 1 | /* 2 | duplicate epic-story link 3 | link: https://confluence.atlassian.com/jirakb/duplicate-epic-story-link-types-in-issuelinktype-table-causing-errors-in-search-using-has-epic-779158623.html 4 | if you see more that 1 rows - something wrong 5 | 6 | https://jira.atlassian.com/browse/JSWSERVER-10243 7 | 8 | */ 9 | 10 | SELECT * 11 | FROM issuelinktype 12 | WHERE linkname = 'Epic-Story Link'; 13 | 14 | 15 | /* 16 | Additional steps for locked duplicated fields 17 | */ 18 | CREATE TABLE BACKUP_managedconfigurationitem AS 19 | SELECT * 20 | FROM managedconfigurationitem 21 | where access_level like 'LOCKED'; 22 | 23 | 24 | 25 | UPDATE managedconfigurationitem 26 | set managed='false' 27 | where item_id in (select item_id FROM managedconfigurationitem where access_level like 'LOCKED'); 28 | 29 | 30 | UPDATE managedconfigurationitem 31 | set managed='true' 32 | where item_id in (select item_id FROM managedconfigurationitem where access_level like 'LOCKED'); 33 | -------------------------------------------------------------------------------- /sql/jira/jira_duplicated_issue_key_fixer.sql: -------------------------------------------------------------------------------- 1 | -- Detect duplicate issue keys in Jira 2 | -- https://confluence.atlassian.com/jirakb/how-to-fix-duplicate-issue-keys-in-jira-1062243102.html 3 | 4 | select p.id as "Project ID", 5 | p.pkey as "Project Key", 6 | a.issuenum as "Issue Num", 7 | a.id as "Issue A ID", 8 | b.id as "Issue B ID" 9 | from jiraissue a 10 | join jiraissue b on a.issuenum = b.issuenum and a.project = b.project and a.id < b.id 11 | join project p on a.project = p.id 12 | where a.id is not null; 13 | 14 | 15 | 16 | -- fix 17 | update jiraissue 18 | set issuenum = (select (max(issuenum) + 1) 19 | from jiraissue where project = ) 20 | where id = ; 21 | -------------------------------------------------------------------------------- /sql/jira/jira_fieldconfigschemeissuetype_checker.sql: -------------------------------------------------------------------------------- 1 | -- Jira server throws communication error when filtering or searching issues 2 | -- Check via next query 3 | -- https://confluence.atlassian.com/jirakb/jira-server-throws-communication-error-when-filtering-or-searching-issues-591593887.html 4 | select fc.id 5 | from fieldconfiguration fc 6 | left join fieldconfigschemeissuetype fcsit on (fc.id = fcsit.fieldconfiguration) 7 | where fcsit.id is null; -------------------------------------------------------------------------------- /sql/jira/jira_filter_recursive_filter_detector.sql: -------------------------------------------------------------------------------- 1 | -- Detector of recursive requests if customer use the Scriptrunner 2 | SELECT * 3 | FROM searchrequest 4 | WHERE reqcontent like '%linkedIssuesOfRecursive%'; -------------------------------------------------------------------------------- /sql/jira/jira_find_by_whom_and_when_was_an_issue_transitioned_from_one_to_another_status.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to find by whom and when was an issue transitioned from one status to another in Jira using SQL queries 3 | Purpose: 4 | You may need to pull some information about who/when transitioned an issue from one status to another for reporting purposes. 5 | To retrieve this information from Jira UI, you can use the JQL "status changed FROM 'First' TO 'Second'" to filter tickets which were moved from 'First' to 'Second' status. Then go through the history of the tickets manually to see the user who performed the action. 6 | 7 | Since the operation above is time consuming, this article offers an alternative to get the data directly from the database using SQL queries. 8 | link: https://confluence.atlassian.com/jirakb/how-to-find-by-whom-and-when-was-an-issue-transitioned-from-one-status-to-another-in-jira-using-sql-queries-1283687816.html 9 | */ 10 | 11 | select i.field, i.oldstring, i.newstring, a.lower_user_name, g.created 12 | from changeitem i 13 | join changegroup g on g.id = i.groupid 14 | join jiraissue j on j.id = g.issueid 15 | join project p on p.id = j.project 16 | join app_user a on a.user_key = g.author 17 | where i.field = 'status' 18 | and i.oldstring = 'First' -- old status 19 | and i.newstring = 'Second' -- new status 20 | and j.issuenum = 123 -- issue number 21 | and p.pkey = 'ABC'; 22 | -------------------------------------------------------------------------------- /sql/jira/jira_get_custom_field_changes_for_ticket.sql: -------------------------------------------------------------------------------- 1 | -- CUSTOM FIELD VALUE CHANGES 2 | -- issuenum 3 | -- pkey 4 | -- https://confluence.atlassian.com/jirakb/retrieve-issue-change-history-from-database-in-jira-server-933695139.html 5 | SELECT p.pname, 6 | p.pkey, 7 | i.issuenum, 8 | cg.ID, 9 | cg.issueid, 10 | au.lower_user_name, 11 | cg.AUTHOR, 12 | cg.CREATED, 13 | ci.FIELDTYPE, 14 | ci.FIELD, 15 | ci.OLDVALUE, 16 | ci.OLDSTRING, 17 | ci.NEWVALUE, 18 | ci.NEWSTRING 19 | FROM changegroup cg 20 | inner join jiraissue i on cg.issueid = i.id 21 | inner join project p on i.project = p.id 22 | inner join changeitem ci 23 | on ci.groupid = cg.id AND ci.FIELDTYPE = 'custom' AND ci.FIELD = 'Name of Custom Field' 24 | inner join app_user au on cg.author = au.user_key 25 | WHERE cg.issueid = (select id 26 | from jiraissue 27 | where issuenum = 115 and project in (select id from project where pname = 'Project name')) 28 | order by 1, 3, 4; -------------------------------------------------------------------------------- /sql/jira/jira_get_status_changes.sql: -------------------------------------------------------------------------------- 1 | -- get only STATUS changes for ticket 2 | -- issuenum 3 | -- pkey 4 | -- https://confluence.atlassian.com/jirakb/retrieve-issue-change-history-from-database-in-jira-server-933695139.html 5 | 6 | SELECT p.pname, 7 | p.pkey, 8 | i.issuenum, 9 | cg.ID, 10 | cg.issueid, 11 | au.lower_user_name, 12 | cg.AUTHOR, 13 | cg.CREATED, 14 | ci.FIELDTYPE, 15 | ci.FIELD, 16 | ci.OLDVALUE, 17 | ci.OLDSTRING, 18 | ci.NEWVALUE, 19 | ci.NEWSTRING 20 | FROM changegroup cg 21 | inner join jiraissue i on cg.issueid = i.id 22 | inner join project p on i.project = p.id 23 | inner join changeitem ci on ci.groupid = cg.id AND ci.FIELDTYPE = 'jira' AND ci.FIELD = 'status' 24 | inner join app_user au on cg.author = au.user_key 25 | WHERE cg.issueid = 26 | (select id from jiraissue where issuenum = 115 and project in (select id from project where pkey = 'PROJECT_KEY')) 27 | order by 1, 3, 4; -------------------------------------------------------------------------------- /sql/jira/jira_integrity_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | -- SQL equivalents for Jira server's workflow integrity checks 3 | -- Execute the Administration -> System -> Integrity Checker 4 | -- Execute the Administration -> System -> Indexing 5 | */ 6 | 7 | 8 | -- https://confluence.atlassian.com/jirakb/how-to-run-the-workflow-integrity-checks-in-sql-658179102.html 9 | -- https://jira.atlassian.com/browse/JRASERVER-4241 10 | -- check Workflow Entry States are Correct 11 | SELECT jiraissue.id issue_id, 12 | jiraissue.workflow_id, 13 | OS_WFENTRY.* 14 | FROM jiraissue 15 | JOIN OS_WFENTRY 16 | ON jiraissue.workflow_id = OS_WFENTRY.id 17 | WHERE OS_WFENTRY.state is null 18 | OR OS_WFENTRY.state = 0; 19 | 20 | -- fix by 21 | -- UPDATE OS_WFENTRY SET state = 1 WHERE id in (OS_WFENTRY_ID_VALUES) 22 | 23 | -- alternative query with prepared UP request 24 | SELECT concat( 'UPDATE OS_WFENTRY SET state = 1 WHERE id in (', 25 | jiraissue.workflow_id, ');') 26 | FROM jiraissue 27 | JOIN OS_WFENTRY 28 | ON jiraissue.workflow_id = OS_WFENTRY.id 29 | WHERE OS_WFENTRY.state IS NULL 30 | OR OS_WFENTRY.state = 0; 31 | 32 | 33 | -- check issue number null tickets 34 | SELECT id, issuenum, project 35 | FROM jiraissue 36 | WHERE project is null; 37 | 38 | -- fix that null ticket 39 | -- DELETE FROM jiraissue WHERE project IS NULL; -------------------------------------------------------------------------------- /sql/jira/jira_integrity_checker_full_reindex_makes_errors.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Jira is trying to do index recovery on invalid issue data coming from Lucene indexes 3 | Purpose: Check issue number null tickets 4 | Ticket: https://jira.atlassian.com/browse/JRASERVER-70248 5 | Link: https://confluence.atlassian.com/jirakb/full-reindex-failing-at-100-with-1-error-in-jira-1047548462.html 6 | */ 7 | 8 | SELECT id, issuenum, project 9 | FROM jiraissue 10 | WHERE issuenum IS null; 11 | 12 | -- fix possible problem 13 | DELETE 14 | FROM jiraissue 15 | WHERE issuenum IS NULL; 16 | -------------------------------------------------------------------------------- /sql/jira/jira_integrity_checker_workflow_current_step_entries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | SQL equivalents for Jira server's workflow integrity checks 3 | Workflow current step entries 4 | link: https://confluence.atlassian.com/jirakb/sql-equivalents-for-jira-server-s-workflow-integrity-checks-658179102.html 5 | */ 6 | 7 | SELECT concat(concat(P.pkey, '-'), I.issuenum) as "ticket" 8 | FROM jiraissue I 9 | join project P on P.id = I.project 10 | left join OS_CURRENTSTEP C on C.entry_id = I.workflow_id 11 | WHERE C.id is null; 12 | 13 | 14 | /* 15 | Running the query below will generate the inserts for all issues missing the valid workflow step entry: 16 | */ 17 | 18 | SELECT concat(concat('insert into os_currentstep values ((select max(id)+1 from os_currentstep),', workflow_id), ',1,0,'''',now(),null,null,''open'',null)') 19 | FROM jiraissue 20 | WHERE workflow_id NOT IN ( 21 | SELECT entry_id 22 | FROM OS_CURRENTSTEP); 23 | -------------------------------------------------------------------------------- /sql/jira/jira_integrity_checker_workflow_entry_states.sql: -------------------------------------------------------------------------------- 1 | /* 2 | SQL equivalents for Jira server's workflow integrity checks 3 | Execute the Administration -> System -> Integrity Checker 4 | Execute the Administration -> System -> Indexing 5 | */ 6 | 7 | /* 8 | Check Workflow Entry States are Correct 9 | link: https://confluence.atlassian.com/jirakb/how-to-run-the-workflow-integrity-checks-in-sql-658179102.html 10 | link: https://jira.atlassian.com/browse/JRASERVER-4241 11 | */ 12 | SELECT jiraissue.id issue_id, 13 | jiraissue.workflow_id, 14 | OS_WFENTRY.* 15 | FROM jiraissue 16 | JOIN OS_WFENTRY 17 | ON jiraissue.workflow_id = OS_WFENTRY.id 18 | WHERE OS_WFENTRY.state is null OR OS_WFENTRY.state = 0; 19 | 20 | -- fix by 21 | -- UPDATE OS_WFENTRY SET state = 1 WHERE id in (OS_WFENTRY_ID_VALUES); 22 | 23 | /* 24 | That query will generate update queries 25 | */ 26 | SELECT concat( 'UPDATE OS_WFENTRY SET state = 1 WHERE id in (', 27 | jiraissue.workflow_id, ');') 28 | FROM jiraissue 29 | JOIN OS_WFENTRY 30 | ON jiraissue.workflow_id = OS_WFENTRY.id 31 | WHERE OS_WFENTRY.state is null 32 | OR OS_WFENTRY.state = 0; 33 | -------------------------------------------------------------------------------- /sql/jira/jira_jira_with_large_number_of_history_records.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Find top 100 with large history issues 3 | Purpose: Reindexing large number of issues with a lot of change history can cause an OOME 4 | link: https://jira.atlassian.com/browse/JRASERVER-66251 5 | */ 6 | 7 | 8 | SELECT concat(p.pkey, '-', ji.issuenum) as issue, count(ji.id) 9 | FROM changeitem ci 10 | JOIN changegroup cg on cg.id = ci.groupid 11 | JOIN jiraissue ji on cg.issueid = ji.id 12 | JOIN project p on p.id = ji.project 13 | GROUP BY ji.issuenum, p.pkey 14 | ORDER BY count(ji.id) desc limit 100; 15 | 16 | -- MS SQL request 17 | /* 18 | SELECT TOP 100 concat(p.pkey,'-',ji.issuenum) AS issue, count(ji.id) 19 | FROM changeitem ci 20 | JOIN changegroup cg ON cg.id = ci.groupid 21 | JOIN jiraissue ji ON cg.issueid = ji.id 22 | JOIN project p ON p.id = ji.project 23 | GROUP BY ji.issuenum,p.pkey 24 | ORDER BY count(ji.id) DESC; 25 | */ -------------------------------------------------------------------------------- /sql/jira/jira_labels_broken.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Updating Labels in Jira using SQL 3 | link: https://confluence.atlassian.com/jirakb/updating-labels-in-jira-using-sql-1345820764.html 4 | */ 5 | 6 | SELECT * 7 | FROM "label" 8 | where "label" like '% %'; 9 | 10 | -- fix 11 | UPDATE "label" 12 | SET "label" = replace("label", ' ', '_') 13 | where "label" like '% %'; 14 | -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner.sql: -------------------------------------------------------------------------------- 1 | -- Check duplicates of MessageID 2 | 3 | 4 | SELECT SOURCE, MESSAGEID, COUNT(MESSAGEID) 5 | FROM notificationinstance 6 | GROUP BY MESSAGEID 7 | HAVING COUNT(MESSAGEID) > 1 8 | ORDER BY 3 DESC LIMIT 10; 9 | 10 | 11 | -- Remove duplicated MESSAGEID rows, keep lowest id 12 | DELETE 13 | n1 14 | FROM notificationinstance n1, 15 | notificationinstance n2 16 | WHERE n1.id > n2.id 17 | AND n1.MESSAGEID = n2.MESSAGEID; -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner_by_resolution.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Count number of email references for tickets with resolution 3 | */ 4 | SELECT count(ID) 5 | FROM notificationinstance 6 | WHERE SOURCE in (select id from jiraissue where RESOLUTION is not null); 7 | 8 | 9 | /* 10 | Delete messageid for tickets with resolution 11 | */ 12 | 13 | -- for MySQL 14 | -- SET SQL_SAFE_UPDATES=0; 15 | 16 | DELETE 17 | FROM notificationinstance 18 | WHERE SOURCE in (select id from jiraissue where RESOLUTION is not null); 19 | 20 | -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner_by_status.sql: -------------------------------------------------------------------------------- 1 | -- Delete references to closed issues and NOT updated last 4 week 2 | SELECT count(*) 3 | FROM notificationinstance 4 | WHERE SOURCE IN (SELECT ID 5 | FROM jiraissue 6 | where issuestatus in (SELECT id 7 | FROM issuestatus 8 | WHERE pname in ('Closed', 'Done', 'Rejected', 'Declined')) 9 | AND UPDATED < (NOW() - '4 WEEK')); 10 | 11 | /* 12 | DELETE 13 | FROM notificationinstance 14 | WHERE SOURCE IN ( SELECT ID 15 | FROM jiraissue 16 | where issuestatus in (SELECT id 17 | FROM issuestatus 18 | WHERE pname in ('Closed', 'Done', 'Rejected', 'Declined') ) 19 | AND UPDATED < (NOW() - '4 WEEK')); 20 | */ 21 | 22 | -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner_by_updates.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Delete references to issues WITHOUT resolution or NOT updated last 24 week */ 3 | * 4 | / 5 | 6 | 7 | SELECT count(ID) 8 | FROM notificationinstance 9 | WHERE SOURCE IN (SELECT ID 10 | FROM jiraissue 11 | WHERE RESOLUTION is not null 12 | and UPDATED < (NOW() - '12 WEEK')); 13 | 14 | 15 | /* 16 | DELETE 17 | FROM notificationinstance 18 | WHERE SOURCE IN ( SELECT ID 19 | FROM jiraissue 20 | WHERE RESOLUTION is not null 21 | and 22 | UPDATED < (NOW() - '12 WEEK') 23 | ); 24 | */ 25 | -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner_empty_messageid.sql: -------------------------------------------------------------------------------- 1 | -- Remove empty MESSAGEID rows 2 | DELETE FROM notificationinstance 3 | where messageid like ''; -------------------------------------------------------------------------------- /sql/jira/jira_notification_instance_cleaner_no_assiciated_tickets.sql: -------------------------------------------------------------------------------- 1 | -- Delete unassociated messageID without references to tickets 2 | 3 | SELECT count(*) 4 | FROM notificationinstance 5 | WHERE SOURCE not in (SELECT ID from jiraissue); 6 | 7 | 8 | 9 | /* 10 | DELETE 11 | FROM notificationinstance 12 | where SOURCE not in (SELECT ID from jiraissue); 13 | */ -------------------------------------------------------------------------------- /sql/jira/jira_null_pointer_exception_while_linking_issues.sql: -------------------------------------------------------------------------------- 1 | -- NullPointerException while linking issues 2 | -- https://confluence.atlassian.com/jirakb/nullpointerexception-while-linking-issues-313463281.html 3 | 4 | SELECT * 5 | FROM issuelinktype 6 | WHERE inward is null 7 | or outward is null; 8 | 9 | -------------------------------------------------------------------------------- /sql/jira/jira_priority_color_issue.sql: -------------------------------------------------------------------------------- 1 | /* 2 | It's possible to confirm if you're being affected by this bug by running the query below: 3 | 4 | SELECT id, pname, status_color FROM priority ; 5 | If the value of status_color returned for any of the rows returned is different 6 | from a '#' character followed by six hexadecimal characters, 7 | please edit the corresponding priority through the Administration » Issues » Priorities page. 8 | link: https://jira.atlassian.com/browse/JRASERVER-32328 9 | */ 10 | 11 | SELECT id, pname, status_color 12 | FROM priority 13 | where length(status_color) <> 7; -------------------------------------------------------------------------------- /sql/jira/jira_project_latest_activity_date_detector.sql: -------------------------------------------------------------------------------- 1 | /* Prepare list of not active projects */ 2 | SET 3 | @latestActivityDate='2023-01-01 00:00:00'; 4 | 5 | SELECT pc.cname AS 'Project Category', p.id AS 'Project ID', p.pkey AS 'Project Key', p.pname AS 'Project Name', MAX(i.updated) AS 'Latest activity', COUNT(*) AS 'Issue count' 6 | FROM jiraissue AS i 7 | join project AS p on i.project = p.id 8 | join nodeassociation AS na on p.id = na.SOURCE_NODE_ID 9 | join projectcategory AS pc on pc.ID = na.SINK_NODE_ID and na.ASSOCIATION_TYPE = 'ProjectCategory' 10 | where pc.cname not in ('Archive', 'DEPRECATED') 11 | GROUP BY p.id 12 | HAVING MAX(i.updated) < @latestActivityDate 13 | ORDER BY MAX(i.updated); 14 | -------------------------------------------------------------------------------- /sql/jira/jira_project_shortcut_is_not_showing_in_some_projects.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Project Shortcut is not showing in some projects 3 | link: https://confluence.atlassian.com/jirakb/project-shortcut-is-not-showing-in-some-projects-975017597.html 4 | */ 5 | 6 | 7 | select * from "AO_550953_SHORTCUT" s, project p where s."PROJECT_ID" = p.id and s."SHORTCUT_TYPE" = 'project.shortcut.default.link'; 8 | 9 | -- resolution 10 | 11 | delete from "AO_550953_SHORTCUT" where "SHORTCUT_TYPE" = 'project.shortcut.default.link'; 12 | -------------------------------------------------------------------------------- /sql/jira/jira_remove_custom_field_value_with_null.sql: -------------------------------------------------------------------------------- 1 | -- https://confluence.atlassian.com/jirakb/nullpointerexception-when-deleting-a-custom-field-218272042.html 2 | 3 | -- Let's detect custom field value without issue value 4 | 5 | SELECT * 6 | FROM customfieldvalue 7 | where issue is null; 8 | -- DELETE from customfieldvalue where issue is null; 9 | 10 | -- detect empty rows 11 | SELECT * 12 | FROM customfieldvalue 13 | WHERE stringvalue is null 14 | AND numbervalue is null 15 | AND textvalue is null 16 | AND datevalue is null limit 10; -------------------------------------------------------------------------------- /sql/jira/jira_retrieve_list_of_issue_assignees_from_project.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Retrieve a list of issue assignees from a specific project in Jira 3 | Purpose: Admins may want to retrieve a list of issue assignees from a specific project in Jira. 4 | link: https://confluence.atlassian.com/jirakb/retrieve-a-list-of-issue-assignees-from-a-specific-project-in-jira-1102614385.html 5 | */ 6 | SELECT DISTINCT u.display_name, au.lower_user_name, j.assignee 7 | FROM jiraissue j 8 | JOIN app_user au ON j.assignee = au.user_key 9 | JOIN cwd_user u ON u.lower_user_name = au.lower_user_name 10 | WHERE j.project = 12345; -------------------------------------------------------------------------------- /sql/jira/jira_service_desk_automation_rule_checker.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * Loading Automation and/or Customer notifications in Jira Service Management throws Internal Server Error 3 | * https://confluence.atlassian.com/jirakb/loading-automation-and-or-customer-notifications-in-jira-service-management-throws-internal-server-error-777026946.html 4 | */ 5 | 6 | SELECT * 7 | FROM "AO_9B2E3B_RULE" 8 | WHERE "ID" is null 9 | or "ORDINAL" is null 10 | or "RULESET_REVISION_ID" is null 11 | or "ENABLED" is null; 12 | 13 | SELECT * 14 | FROM "AO_9B2E3B_RULESET" 15 | WHERE "ACTIVE_REVISION_ID" is null 16 | or "ID" is null; 17 | 18 | SELECT * 19 | FROM "AO_9B2E3B_RULESET_REVISION" 20 | WHERE "CREATED_BY" is null 21 | or "CREATED_TIMESTAMP_MILLIS" is null 22 | or "DESCRIPTION" is null 23 | or "ID" is null 24 | or "NAME" is null 25 | or "RULE_SET_ID" is null 26 | or "TRIGGER_FROM_OTHER_RULES" is null 27 | or "IS_SYSTEM_RULE_SET" is null; 28 | 29 | SELECT * 30 | FROM "AO_9B2E3B_RULE_EXECUTION" 31 | WHERE "EXECUTOR_USER_KEY" is null 32 | or "FINISH_TIME_MILLIS" is null 33 | or "ID" is null 34 | or "OUTCOME" is null 35 | or "RULE_ID" is null 36 | or "START_TIME_MILLIS" is null; -------------------------------------------------------------------------------- /sql/jira/jira_service_desk_automation_rules_history_rules.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Clean automation rules items logs for Jira Service Management 4 | * it helps for the large installations 5 | * Purpose: during migration or on large installation those logs eat quite a lot of resources. 6 | * 7 | */ 8 | select count(*) from "AO_9B2E3B_EXEC_RULE_MSG_ITEM"; 9 | -- clean exact table 10 | truncate "AO_9B2E3B_EXEC_RULE_MSG_ITEM"; 11 | 12 | select count(*) from "AO_9B2E3B_IF_COND_EXECUTION"; 13 | -- clean exact table 14 | truncate "AO_9B2E3B_IF_COND_EXECUTION" CASCADE ; 15 | 16 | -- remove all history of running 17 | truncate "AO_9B2E3B_RULE_EXECUTION" CASCADE ; 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /sql/jira/jira_service_desk_psmq_queue.sql: -------------------------------------------------------------------------------- 1 | -- fixed in 3.9.0 of JSD 2 | -- https://jira.atlassian.com/browse/JSDSERVER-5493 3 | -- PSMQ automation thread loops over queue due to message count discrepancy 4 | 5 | SELECT Q."NAME", Q."MESSAGE_COUNT", count(M."ID") as real_message_count 6 | FROM "AO_319474_QUEUE" Q 7 | LEFT JOIN "AO_319474_MESSAGE" M on M."QUEUE_ID" = q."ID" 8 | GROUP BY Q."NAME", Q."MESSAGE_COUNT" 9 | HAVING count(M."ID") = 0 10 | AND Q."MESSAGE_COUNT" != 0; 11 | 12 | 13 | -- if you found rows you can run the next query 14 | /** 15 | update "AO_319474_QUEUE" set "MESSAGE_COUNT" = 0 16 | where "NAME" in (select Q."NAME" 17 | from "AO_319474_QUEUE" Q 18 | left join "AO_319474_MESSAGE" M on M."QUEUE_ID" = q."ID" 19 | group by Q."NAME", Q."MESSAGE_COUNT" 20 | having count(M."ID") = 0 AND Q."MESSAGE_COUNT" != 0); 21 | */ -------------------------------------------------------------------------------- /sql/jira/jira_service_desk_sla_cleanup.sql: -------------------------------------------------------------------------------- 1 | /* 2 | link: https://jira.atlassian.com/browse/JSDSERVER-5871 3 | */ 4 | 5 | SELECT * 6 | FROM propertyentry 7 | JOIN propertynumber ON propertyentry.ID = propertynumber.ID 8 | WHERE PROPERTY_KEY = 'sd.sla.audit.log.cleanup.days'; 9 | 10 | -- possible cleanup all data of SLA AUDIT LOG 11 | truncate "AO_54307E_SLAAUDITLOG" cascade; 12 | truncate "AO_54307E_SLAAUDITLOGDATA"; -------------------------------------------------------------------------------- /sql/jira/jira_service_management_locating_webhook_url.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Locating WebHook URL in Service Management automation rules 3 | link: https://confluence.atlassian.com/jirakb/locating-webhook-url-in-service-management-automation-rules-1056677121.html 4 | */ 5 | 6 | 7 | select thenactioncfgdata."ID", 8 | rsetrevision."CREATED_BY" as "Rule created by", 9 | rsetrevision."DESCRIPTION" as "Rule description", 10 | thenactioncfgdata."CONFIG_DATA_VALUE" as "Webhook URL" 11 | from "AO_9B2E3B_RULE" r 12 | join "AO_9B2E3B_RULESET" rset on r."ID" = rset."ID" 13 | join "AO_9B2E3B_RULESET_REVISION" rsetrevision on rsetrevision."ID" = rset."ACTIVE_REVISION_ID" 14 | join "AO_9B2E3B_IF_THEN" ifthen on rset."ACTIVE_REVISION_ID" = ifthen."RULE_ID" 15 | join "AO_9B2E3B_THEN_ACTION_CONFIG" thenactioncfg on thenactioncfg."IF_THEN_ID" = ifthen."ID" 16 | join "AO_9B2E3B_THEN_ACT_CONF_DATA" thenactioncfgdata 17 | on thenactioncfg."ID" = thenactioncfgdata."THEN_ACTION_CONFIG_ID" 18 | and thenactioncfgdata."CONFIG_DATA_KEY" ilike '%url%'; -------------------------------------------------------------------------------- /sql/jira/jira_service_management_sla_field.sql: -------------------------------------------------------------------------------- 1 | /* 2 | SQL to find unused SLA 3 | https://confluence.atlassian.com/jirakb/how-to-find-the-usage-of-sla-fields-in-jira-service-manager-1041828257.html 4 | */ 5 | SELECT DISTINCT cf.cfname 6 | FROM customfield cf 7 | WHERE cf.customfieldtypekey = 'com.atlassian.servicedesk:sd-sla-field' 8 | AND cf.id not in (SELECT cf2.id 9 | FROM customfield cf2, 10 | customfieldvalue cfv 11 | WHERE cf2.customfieldtypekey = 'com.atlassian.servicedesk:sd-sla-field' 12 | AND cfv.customfield = cf2.id 13 | AND cfv.textvalue like '%"events":[{%'); -------------------------------------------------------------------------------- /sql/jira/jira_service_management_sql_error_for_psmq.sql: -------------------------------------------------------------------------------- 1 | /* 2 | SQLServerException: The ntext data type cannot be selected as DISTINCT because it is not comparable 3 | link: https://confluence.atlassian.com/jirakb/sqlserverexception-the-ntext-data-type-cannot-be-selected-as-distinct-because-it-is-not-comparable-1141987390.html 4 | 2022-02-22 18:03:57,449-0500 localhost-startStop-1 ERROR anonymous [c.a.s.core.lifecycle.DefaultLifecycleManager] 5 | LifecycleAware.onStart() failed for component with class 'com.atlassian.servicedesk.plugins.base.internal.bootstrap.lifecycle.InternalBasePluginLauncher' 6 | from plugin 'com.atlassian.servicedesk.internal-base-plugin' 7 | */ 8 | 9 | 10 | SELECT s.name, t.name, i.name, c.name 11 | FROM sys.tables t 12 | INNER JOIN sys.schemas s on t.schema_id = s.schema_id 13 | INNER JOIN sys.indexes i on i.object_id = t.object_id 14 | INNER JOIN sys.index_columns ic on ic.object_id = t.object_id 15 | INNER JOIN sys.columns c on c.object_id = t.object_id and 16 | ic.column_id = c.column_id 17 | WHERE t.name = 'AO_319474_MESSAGE'; 18 | 19 | exec sp_columns AO_319474_MESSAGE; 20 | 21 | 22 | -- fix and double check it 23 | ALTER TABLE dbo.AO_319474_MESSAGE ALTER COLUMN MSG_DATA NVARCHAR(MAX) NULL; 24 | -------------------------------------------------------------------------------- /sql/jira/jira_software_agile_check_correct_collation.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to change the RANK column in the AO_60DB71_LEXORANK table to the correct collation 3 | link: https://confluence.atlassian.com/jirakb/how-to-change-the-rank-column-in-the-ao_60db71_lexorank-table-to-the-correct-collation-779158613.html 4 | */ 5 | 6 | -- get Collation expected C or POSIX 7 | show 8 | LC_COLLATE; 9 | show 10 | LC_CTYPE; 11 | 12 | 13 | 14 | select collation_name 15 | from INFORMATION_SCHEMA.COLUMNS 16 | where table_name = 'AO_60DB71_LEXORANK' 17 | and column_name = 'RANK'; 18 | -------------------------------------------------------------------------------- /sql/jira/jira_software_agile_lexorank.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * JIRA Software Agile Lexorank get field ID 3 | LexoRankIntegrityException: Expected exactly one rank row for issue x for rank field y, but found 2 rows 4 | link: https://confluence.atlassian.com/jirakb/lexorankintegrityexception-expected-exactly-one-rank-row-for-issue-x-for-rank-field-y-but-found-2-rows-1101933359.html 5 | */ 6 | 7 | -- get custom field ID 8 | SELECT id 9 | from CUSTOMFIELD 10 | WHERE customfieldtypekey = 'com.pyxis.greenhopper.jira:gh-lexo-rank'; 11 | 12 | -- detect duplicate rank 13 | SELECT "ISSUE_ID", count("ISSUE_ID") 14 | FROM "AO_60DB71_LEXORANK" 15 | WHERE "FIELD_ID" = 10009 16 | GROUP BY "ISSUE_ID" 17 | HAVING count("ISSUE_ID") > 1; 18 | 19 | 20 | -- fix the duplicates 21 | DELETE 22 | from "AO_60DB71_LEXORANK" 23 | WHERE "ID" in (WITH temp AS (SELECT "ID", 24 | "ISSUE_ID", 25 | ROW_NUMBER() 26 | OVER ( PARTITION BY "ISSUE_ID" ORDER BY "ISSUE_ID" ) 27 | from "AO_60DB71_LEXORANK" 28 | WHERE "ISSUE_ID" in 29 | (SELECT "ISSUE_ID" 30 | from "AO_60DB71_LEXORANK" 31 | WHERE "FIELD_ID" = < rank_custom_field_ID > 32 | GROUP BY "ISSUE_ID" 33 | HAVING count("ISSUE_ID") > 1)) 34 | select "ID" 35 | from temp 36 | where row_number = 1); -------------------------------------------------------------------------------- /sql/jira/jira_software_agile_lock_hash_issue.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * JIRA Agile Lock Hash Issue 3 | [c.a.g.service.lexorank.LexoRankOperation] Failed to acquire a lock on the max marker row and previous row for rank field[id=xxx], retrying rank initially 4 | */ 5 | SELECT COUNT("ID") 6 | FROM "AO_60DB71_LEXORANK" 7 | WHERE "LOCK_HASH" is NOT NULL 8 | AND "TYPE" in ('0', '2'); 9 | 10 | 11 | -- fix of solution 12 | UPDATE "AO_60DB71_LEXORANK" 13 | SET "LOCK_HASH" = NULL, 14 | "LOCK_TIME" = NULL; 15 | 16 | -- then rebalance Lexorank -------------------------------------------------------------------------------- /sql/jira/jira_software_agile_sprint_showing_in_different_sprints.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Backlog doesn't show issues in the correct sprints 3 | link: https://jira.atlassian.com/browse/JSWSERVER-13530 4 | */ 5 | -- Backup all the "sprint-removal history items" into a separate table, and remove them from the initial table 6 | create table changeitem_quarantine_jira721 as 7 | select ci.* 8 | from changeitem ci 9 | where ci.field = 'Sprint' 10 | and ci.newvalue is null; 11 | 12 | -- Remove them from the initial changeitem table 13 | DELETE 14 | from changeitem ci 15 | where ci.id in (select id from changeitem_quarantine_jira721); 16 | 17 | 18 | -- If any misbehaviour happens, the records can be restored again with the statement: 19 | insert into changeitem 20 | select * 21 | from changeitem_quarantine_jira721 22 | where id not in (select id from changeitem); -------------------------------------------------------------------------------- /sql/jira/jira_software_identify_which_node_is_performing_balance_operation.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to Identify Which Node is Performing Balance Operation 3 | This query will return the node_id and the time when the balance operation is performed. 4 | link: https://confluence.atlassian.com/jirakb/how-to-identify-which-node-is-performing-balance-operation-779158614.html 5 | */ 6 | select locked_by_node 7 | from clusterlockstatus 8 | where lock_name = 'com.atlassian.greenhopper.service.lexorank.balance.LexoRankBalancer'; 9 | -------------------------------------------------------------------------------- /sql/jira/jira_xml_backup_fails_throw_datatypeconverter.sql: -------------------------------------------------------------------------------- 1 | /* 2 | JIRA XML Backup fails throws "java.lang.NoClassDefFoundError: javax/xml/bind/DatatypeConverter" 3 | 4 | Cause: An AO table has column with BLOB datatype. According to this documentation Developing your plugin with Active Objects, JIRA does not fully support BLOB datatype in AO. 5 | 6 | */ 7 | 8 | -- MySQL 9 | SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, DATA_TYPE 10 | FROM INFORMATION_SCHEMA.COLUMNS 11 | where table_name like 'AO_%' 12 | and data_type = 'blob'; 13 | 14 | 15 | -- PostgreSQL 16 | SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, DATA_TYPE 17 | FROM INFORMATION_SCHEMA.COLUMNS 18 | WHERE TABLE_NAME LIKE 'AO_%' 19 | AND DATA_TYPE = 'bytea'; 20 | 21 | -- MS SQL SERVER 22 | SELECT DISTINCT ta.name AS table_name, 23 | SCHEMA_NAME(ta.schema_id) AS schema_name, 24 | c.name AS column_name, 25 | t.name AS data_type 26 | FROM sys.tables AS ta 27 | INNER JOIN sys.columns c ON ta.OBJECT_ID = c.OBJECT_ID 28 | INNER JOIN sys.types AS t ON c.user_type_id = t.user_type_id 29 | WHERE ta.name like 'AO_%' 30 | and (t.name = 'image' OR t.name = 'varbinary'); 31 | 32 | 33 | -- Cleanup tables 34 | 35 | DELETE 36 | FROM < table_name >; 37 | -------------------------------------------------------------------------------- /sql/jira/performance/jira_automation_for_jira_explain_perfromance.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Changing the join strategy to improve performance of the PostgreSQL database 3 | https://confluence.atlassian.com/automation/changing-the-join-strategy-to-improve-performance-of-the-postgresql-database-1072481210.html 4 | */ 5 | explain 6 | analyse 7 | select "AO_589059_AUDIT_ITEM_COMP_CGE"."AUDIT_ITEM_ID", 8 | "AO_589059_AUDIT_ITEM_COMP_CGE"."COMPONENT_NAME_KEY", 9 | "AO_589059_AUDIT_ITEM_COMP_CGE"."COMPONENT", 10 | "AO_589059_AUDIT_ITEM_COMP_CGE"."COMPONENT_ID", 11 | "AO_589059_AUDIT_ITEM_COMP_CGE"."DURATION", 12 | "AO_589059_AUDIT_ITEM_COMP_CGE"."ID", 13 | "AO_589059_AUDIT_ITEM_COMP_CGE"."OPTIMISED_IDS", 14 | "AO_589059_AUDIT_ITEM_COMP_CGE"."START_TIME", 15 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."AUDIT_ITEM_COMPONENT_CHANGE_ID", 16 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."AUDIT_ITEM_ID", 17 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."CHANGE_FROM", 18 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."CHANGE_TO", 19 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."FIELD_NAME", 20 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."ID", 21 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."MESSAGE" 22 | from "public"."AO_589059_AUDIT_ITEM_COMP_CGE" "AO_589059_AUDIT_ITEM_COMP_CGE" 23 | left join "public"."AO_589059_AUDIT_ITEM_CGE_ITEM" "AO_589059_AUDIT_ITEM_CGE_ITEM" 24 | on "AO_589059_AUDIT_ITEM_COMP_CGE"."ID" = 25 | "AO_589059_AUDIT_ITEM_CGE_ITEM"."AUDIT_ITEM_COMPONENT_CHANGE_ID" 26 | where "AO_589059_AUDIT_ITEM_COMP_CGE"."AUDIT_ITEM_ID" = 10; -------------------------------------------------------------------------------- /sql/jira/queries/jira_audit_domain_emails.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Auditing user email domains by querying the application database 3 | Purpose 4 | If you are migrating to the Cloud or just auditing your users for security purposes, a good way to ensure that 5 | only authorized users will have access to your instance is to audit their email domains. 6 | Emails can be used to reset a user password and therefore are a key component to be assessed to keep your user base safe. 7 | This article provides steps to help you obtain an aggregate list of email domains and how many users use each domain. 8 | With this information, you can then work with your security team to audit it. 9 | 10 | Solution 11 | Step 1: In your instance database, run the following SQL query to retrieve a report containing 12 | all the domains used in user emails and the user count for each domain: 13 | 14 | 15 | link: https://confluence.atlassian.com/migrationkb/auditing-user-email-domains-by-querying-the-application-database-1180146477.html 16 | */ 17 | select right (cwd_user.email_address, strpos(reverse(cwd_user.email_address), '@') - 1), count (*) 18 | from cwd_user 19 | inner join cwd_directory cd 20 | on cd.id = cwd_user.directory_id 21 | where cd.active = 1 22 | group by 1 23 | order by 2 desc; -------------------------------------------------------------------------------- /sql/jira/queries/jira_find_all_attachments_for_issue_key.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Querying attachments per issue key 3 | To retrieve the list of attachments and its corresponding issue keys. 4 | link: https://confluence.atlassian.com/jirakb/querying-attachments-per-issue-key-828794979.html 5 | 6 | */ 7 | 8 | SELECT DISTINCT p.pkey || '-' || j.issuenum, fa.filename 9 | FROM project p, 10 | jiraissue j, 11 | fileattachment fa 12 | WHERE p.id = j.project 13 | AND fa.issueid = j.id; 14 | -------------------------------------------------------------------------------- /sql/jira/queries/jira_find_all_users_of_gravatar.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to find all users, that make use of Gravatar for their profile in Jira 3 | 4 | As such Jira doesn't save the URL to the gravatar or saves this anywhere in the database. 5 | Instead Jira tries to collect the gravatar (if enabled) 6 | for every user that has the default profile picture. 7 | Also this will not work for users, that never changed their avatar, as this would be set as default. 8 | */ 9 | 10 | 11 | SELECT u.lower_user_name, a.* 12 | FROM app_user AS u 13 | LEFT JOIN propertyentry AS pe ON pe.entity_id = u.id 14 | LEFT JOIN propertynumber AS pn ON pe.id = pn.id 15 | LEFT JOIN avatar AS a ON a.id = pn.propertyvalue 16 | WHERE entity_name = 'ApplicationUser' 17 | AND property_key = 'user.avatar.id' 18 | AND a.filename = 'Avatar-default.svg'; -------------------------------------------------------------------------------- /sql/jira/queries/jira_find_attachments_per_type.sql: -------------------------------------------------------------------------------- 1 | /* 2 | link: https://confluence.atlassian.com/jirakb/querying-attachments-per-file-type-1319575950.html 3 | */ 4 | 5 | SELECT fa.id, fa.filename, p.pkey, ji.issuenum 6 | FROM fileattachment fa 7 | join jiraissue ji on ji.id = fa.issueid 8 | join project p on p.id = ji.project 9 | WHERE LOWER(fa.filename) LIKE '%.png'; 10 | 11 | 12 | -- Find by mime-type 13 | -- https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types 14 | SELECT fa.id, fa.filename, p.pkey, ji.issuenum 15 | FROM fileattachment fa 16 | join jiraissue ji on ji.id = fa.issueid 17 | join project p on p.id = ji.project 18 | WHERE fa.mimetype LIKE 'image/png'; -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_attachments_location.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Locate Jira server / Data center file attachments in the filesystem 3 | Jira stores attachments, such as files and images, in a file system. 4 | This page explains where attachments are located within this file system. 5 | link: https://confluence.atlassian.com/jirakb/locate-jira-server-file-attachments-in-the-filesystem-859487788.html 6 | */ 7 | SELECT fa.id, 8 | fa.filename, 9 | pkk.pkey as project, 10 | ji.issuenum, 11 | concat('/var/atlassian/application-data/jira', '/data/attachments/', pkk.pkey, '/', 12 | CEILING((ji.issuenum / 10000)) * 10000, '/', pkk.pkey, '-', ji.issuenum, '/', fa.id) as Path, 13 | case 14 | when fa.mimetype = 'image/png' OR fa.mimetype = 'image/gif' OR fa.mimetype = 'image/jpeg' then concat( 15 | '/var/atlassian/application-data/jira', '/data/attachments/', pkk.pkey, '/', 16 | CEILING((ji.issuenum / 10000)) * 10000, '/', pkk.pkey, '-', ji.issuenum, '/thumbs/_thumb_', fa.id, 17 | '.png') end as thumbnail 18 | FROM fileattachment fa 19 | join jiraissue ji on ji.id = fa.issueid 20 | join (SELECT DISTINCT 21 | ON (project_id) project_key as pkey, project_id 22 | FROM project_key 23 | ORDER BY project_id, project_key desc) pkk 24 | on ji.project = pkk.project_id 25 | WHERE fa.created > '2021-04-20 13:45'; -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_charting_app_gadgets.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Detect charting plugin gadget to understand the usage 3 | https://jira.atlassian.com/browse/JRASERVER-67011 4 | */ 5 | 6 | SELECT count(*) 7 | FROM portletconfiguration 8 | WHERE gadget_xml LIKE '%com.atlassian.jira.ext.charting%'; -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_date_time_watcher_being_added_into_task.sql: -------------------------------------------------------------------------------- 1 | -- To identify when a watcher was added to an issue via database query 2 | -- https://confluence.atlassian.com/jirakb/identify-date-time-of-a-watcher-being-added-to-an-issue-836077733.html 3 | SELECT 4 | concat(project.pkey,'-',jiraissue.issuenum) AS pkey_issuenum, cwd_user.user_name, userassociation.created 5 | FROM 6 | userassociation 7 | JOIN 8 | cwd_user 9 | ON 10 | userassociation.source_name = cwd_user.user_name 11 | JOIN 12 | jiraissue 13 | ON 14 | jiraissue.ID = userassociation.SINK_NODE_ID 15 | JOIN 16 | project 17 | ON 18 | project.id = jiraissue.PROJECT 19 | WHERE 20 | jiraissue.id = userassociation.sink_node_id AND userassociation.association_type='WatchIssue' AND userassociation.sink_node_entity ='Issue' 21 | ORDER BY cwd_user.user_name ASC; 22 | -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_fresh_tickets_uploaded_attachments.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Outline a scenario in which users seek to retrieve attachment details 4 | from Jira tickets for a customizable duration of time, be it days or months. 5 | title: SQL query to get the details of attachments uploaded in Jira issues for configurable number of days or months. 6 | link: https://confluence.atlassian.com/jirakb/sql-query-to-get-the-details-of-attachments-uploaded-in-jira-issues-for-configurable-number-of-days-or-months-1305249650.html 7 | */ 8 | select fa.id, 9 | fa.filename, 10 | p.pkey as project, 11 | ji.issuenum 12 | from fileattachment fa 13 | join jiraissue ji on 14 | ji.id = fa.issueid 15 | join project p on 16 | p.id = ji.project 17 | where fa.created > '2023-10-17 07:55'; -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_groups_in_projects.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How to identify which Groups have access to a Project in JIRA 3 | https://confluence.atlassian.com/jirakb/how-to-identify-which-groups-have-access-to-a-project-in-jira-884354938.html 4 | 5 | */ 6 | 7 | SELECT project.pkey as pkey, 8 | project.pname as project_name, 9 | projectrole.name as project_role_name, 10 | cwd_membership.parent_name as groupuser 11 | FROM cwd_membership, 12 | projectrole, 13 | projectroleactor, 14 | project 15 | WHERE project.id = projectroleactor.pid 16 | and projectroleactor.projectroleid = projectrole.id 17 | and roletype = 'atlassian-group-role-actor' 18 | and membership_type = 'GROUP_USER' 19 | and parent_name = roletypeparameter 20 | GROUP BY pkey, project_name, project_role_name, groupuser; -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_moved_tickets.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get moved tickets based on the change log 3 | How to list issues moved from one project to another in Jira 4 | 5 | */ 6 | 7 | select k.oldstring as "Old Key", 8 | coalesce(t.oldstring, it.pname) as "Old Type", 9 | k.newstring as "New Key", 10 | coalesce(t.newstring, it.pname) as "New Type", 11 | u.lower_user_name as "Username", 12 | kg.created as "Moved date" 13 | from changeitem k 14 | join changegroup kg on kg.id = k.groupid 15 | join app_user a on a.user_key = kg.author 16 | join cwd_user u on u.lower_user_name = a.lower_user_name 17 | join changegroup gt on gt.id = k.groupid 18 | left join changeitem t on t.groupid = gt.id and t.field = 'issuetype' 19 | join jiraissue i on i.id = kg.issueid 20 | join issuetype it on it.id = i.issuetype 21 | where k.field = 'Key'; 22 | 23 | -- also, you can check in moved_issue_key table 24 | -------------------------------------------------------------------------------- /sql/jira/queries/jira_get_projects_with_anonymous_access.sql: -------------------------------------------------------------------------------- 1 | -- How to Get a List of Projects that has Anonymous Access in Jira from Database 2 | -- https://confluence.atlassian.com/jirakb/how-to-get-a-list-of-projects-that-has-anonymous-access-in-jira-from-database-794368099.html 3 | 4 | SELECT p.id, p.pname, ps.name FROM project p 5 | INNER JOIN nodeassociation na ON 6 | p.id = na.source_node_id 7 | INNER JOIN schemepermissions sp ON 8 | na.sink_node_id = sp.scheme 9 | INNER JOIN permissionscheme ps ON 10 | na.sink_node_id = ps.id 11 | WHERE na.source_node_entity = 'Project' 12 | AND na.sink_node_entity = 'PermissionScheme' 13 | AND sp.permission_key='BROWSE_PROJECTS' 14 | AND sp.perm_type='group' 15 | AND sp.perm_parameter is null -------------------------------------------------------------------------------- /sql/jira/queries/jira_service_desk_get_sla_information.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get Goals and SLA in one table across all products 3 | 4 | */ 5 | 6 | -- SELECT * FROM "AO_54307E_TIMEMETRIC"; 7 | 8 | SELECT 9 | vp."NAME" as "Portal Name", 10 | tm."NAME" as "SLA Name", 11 | g."JQL_QUERY", 12 | cal."NAME" as "Calendar", 13 | g."TARGET_DURATION"/1000/3600 as "Duration in h" 14 | FROM 15 | "AO_54307E_GOAL" g, 16 | "AO_54307E_TIMEMETRIC" tm, 17 | "AO_54307E_VIEWPORT" vp, 18 | "AO_7A2604_CALENDAR" cal 19 | 20 | WHERE 21 | g."CALENDAR_ID" = cal."ID" 22 | and g."TIME_METRIC_ID" = tm."ID" 23 | and tm."SERVICE_DESK_ID" = vp."ID" 24 | ; 25 | 1000/3600 as "Duration in h" 26 | FROM 27 | "AO_54307E_GOAL" g, 28 | "AO_54307E_TIMEMETRIC" tm, 29 | "AO_54307E_VIEWPORT" vp, 30 | "AO_7A2604_CALENDAR" cal 31 | 32 | WHERE 33 | g."CALENDAR_ID" = cal."ID" 34 | and g."TIME_METRIC_ID" = tm."ID" 35 | and tm."SERVICE_DESK_ID" = vp."ID" 36 | ; 37 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_attachments_created_per_month.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Monthly created attachments, and yearly. 3 | 4 | */ 5 | 6 | -- check per year created files 7 | SELECT to_char(created, 'YYYY') AS "Year", 8 | COUNT(*) AS "Number of Attachments" 9 | FROM fileattachment fa 10 | GROUP BY to_char(created, 'YYYY') 11 | ORDER BY 1 LIMIT 100; 12 | 13 | -- check per month 14 | SELECT to_char(created, 'YYYY-MM') AS "Year", 15 | COUNT(*) AS "Number of Attachments" 16 | FROM fileattachment fa 17 | GROUP BY to_char(created, 'YYYY-MM') 18 | ORDER BY 1 LIMIT 100; 19 | 20 | 21 | /* 22 | Bottom query for MySQL 23 | */ 24 | 25 | -- mysql query for monthly 26 | SELECT DATE_FORMAT(created, '%Y-%m') as "Months", 27 | COUNT(*) as "Number of Attachments" 28 | FROM fileattachment fa 29 | GROUP BY YEAR (created), MONTH (created) 30 | ORDER BY 1 31 | LIMIT 100; 32 | 33 | -- mysql query for yearly 34 | SELECT DATE_FORMAT(created, '%Y') as "Year", 35 | COUNT(*) as "Number of Attachments" 36 | FROM fileattachment fa 37 | GROUP BY YEAR (created), MONTH (created) 38 | ORDER BY 1 39 | LIMIT 100; 40 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_custom_field_null_checker.sql: -------------------------------------------------------------------------------- 1 | -- https://confluence.atlassian.com/jirakb/searching-for-jira-issue-by-custom-field-value-results-in-nullpointerexception-223224206.html 2 | 3 | SELECT c.cfname AS "Custom Field", 4 | o.customvalue AS "Value" 5 | FROM customfield c 6 | JOIN customfieldoption o ON c.id = o.customfield 7 | WHERE customvalue IS NULL; 8 | 9 | 10 | -- DELETE FROM customfieldoption WHERE customvalue is null; 11 | 12 | 13 | SELECT customfield 14 | FROM customfieldoption 15 | WHERE customfield NOT IN (SELECT id 16 | FROM customfield); 17 | 18 | 19 | /* 20 | DELETE 21 | FROM 22 | customfieldoption 23 | WHERE 24 | customfield NOT IN ( 25 | SELECT 26 | id 27 | FROM 28 | customfield 29 | ); 30 | */ -------------------------------------------------------------------------------- /sql/jira/stats/jira_custom_fields_values_status.sql: -------------------------------------------------------------------------------- 1 | -- it's heavy requests 2 | 3 | -- it's just helps to understand the situation with one of the EAV table model 4 | SELECT count(id) 5 | FROM customfieldvalue; 6 | 7 | -- it's just yet another small calculator it's better to use from the 8 | SELECT customfield.id, 9 | customfield.cfname, 10 | count(*) 11 | FROM customfield 12 | LEFT JOIN customfieldvalue on customfield.id = customfieldvalue.customfield 13 | GROUP BY customfield.id 14 | ORDER BY count(*) DESC LIMIT 10 15 | ; 16 | 17 | 18 | -- simple version 19 | SELECT customfield, count(customfield) 20 | FROM customfieldvalue 21 | GROUP BY customfield 22 | ORDER 2 DESC 23 | limit 10; -------------------------------------------------------------------------------- /sql/jira/stats/jira_find_all_issues_in_db.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Find all Jira issue in Database 3 | Export Jira tickets from Jira database to CSV 4 | */ 5 | 6 | SELECT CONCAT(p.pkey, '-', ji.issuenum) AS "issuekey", 7 | ji.summary, 8 | ji.description, 9 | ji.assignee, 10 | ji.creator, 11 | ji.created, 12 | ji.updated 13 | FROM jiraissue ji 14 | JOIN project p ON ji.project = p.id 15 | limit 1000; -------------------------------------------------------------------------------- /sql/jira/stats/jira_general_total_stats.sql: -------------------------------------------------------------------------------- 1 | --total number of users 2 | SELECT COUNT(ID) FROM cwd_user; 3 | 4 | --total number of groups 5 | SELECT COUNT(ID) FROM cwd_group; 6 | 7 | --total number of attachments 8 | SELECT COUNT(ID) FROM fileattachment; 9 | 10 | --total number of issues 11 | SELECT COUNT(ID) FROM jiraissue; 12 | 13 | --total number of projects 14 | SELECT COUNT(ID) FROM project; 15 | 16 | --total number of comments 17 | SELECT COUNT(ID) FROM jiraaction; 18 | 19 | --total number of custom fields 20 | SELECT COUNT(ID) FROM customfield; 21 | 22 | --total number of issue security schemes 23 | SELECT COUNT(ID) FROM issuesecurityscheme; 24 | 25 | --total number of screen schemes 26 | SELECT COUNT(ID) FROM fieldscreenscheme; 27 | 28 | --total number of components 29 | SELECT COUNT(ID) FROM component; 30 | 31 | --total number of issue types 32 | SELECT COUNT(ID) FROM issuetype; 33 | 34 | --total number of priorities 35 | SELECT COUNT(ID) FROM priority; 36 | 37 | --total number of resolution 38 | SELECT COUNT(ID) FROM resolution; 39 | 40 | --total number of screens 41 | SELECT COUNT(ID) FROM fieldscreen; 42 | 43 | --total number of statuses 44 | SELECT COUNT(ID) FROM issuestatus; 45 | 46 | --total number of versions 47 | SELECT COUNT(ID) FROM projectversion; -------------------------------------------------------------------------------- /sql/jira/stats/jira_identify_users_in_jira_who_have_not_logged.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gonchik/cleanup-scripts/cf74d762d6cfde73635048253e2f63b1d7111ac2/sql/jira/stats/jira_identify_users_in_jira_who_have_not_logged.sql -------------------------------------------------------------------------------- /sql/jira/stats/jira_issue_field_configuration_scheme_overview.sql: -------------------------------------------------------------------------------- 1 | -- Just representation of config Issue type scheme by project 2 | SELECT ITS.id as "Issue Type Scheme", 3 | ITS.configname as "Issue Type Scheme name", 4 | IT.id as "Issue Type", 5 | IT.pname as "Issue Type name", 6 | CASE WHEN FCS.configname = 'Default Issue Type Scheme' THEN 0 ELSE CC.project END as "Project", 7 | CASE WHEN FCS.configname = 'Default Issue Type Scheme' THEN 'Global' ELSE P.pkey END as "Project Key", 8 | CASE WHEN FCS.configname = 'Default Issue Type Scheme' THEN 'Global' ELSE P.pname END as "Project Name" 9 | FROM 10 | fieldconfigscheme ITS 11 | LEFT JOIN optionconfiguration OC ON FCS.id = OC.fieldconfig 12 | LEFT JOIN issuetype IT ON OC.optionid = IT.id 13 | LEFT JOIN configurationcontext CC OC CC.fieldconfigscheme = FCS.id 14 | LEFT JOIN project P ON P.id = CC.project 15 | WHERE FCS.fieldid='issuetype'; 16 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_issue_link_type_counts_per_issues.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How many tickets has exact issue link types in Jira 3 | */ 4 | 5 | select ist.linkname, count(ist.linkname) 6 | from issuelink il 7 | join issuelinktype ist on ist.id = il.linktype 8 | group by ist.linkname 9 | order by 2; 10 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_issue_link_type_counts_per_projects.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How many tickets has exact issue link types in Jira per projects 3 | */ 4 | 5 | SELECT p.pkey, ist.linkname, count(ist.linkname) 6 | FROM jiraissue ji 7 | INNER JOIN issuelink il ON il.source = ji.id 8 | INNER JOIN project p ON ji.project = p.id 9 | INNER JOIN issuelinktype ist on ist.id = il.linktype 10 | GROUP BY p.id, ist.linkname 11 | ORDER BY 1, 2, 3; 12 | 13 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_issue_type_per_issues.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How many tickets linked to exact issue types in Jira 3 | Purpose: Make analysis of link type overall 4 | */ 5 | 6 | SELECT it.pname AS "Issue Type", 7 | COUNT(ji.id) AS "Number Of Tickets" 8 | FROM jiraissue ji 9 | INNER JOIN issuetype it ON ji.issuetype = it.id 10 | -- WHERE ji.created BETWEEN '2023-01-01' AND '2023-06-01' 11 | GROUP BY it.id 12 | ORDER BY 2 DESC; -------------------------------------------------------------------------------- /sql/jira/stats/jira_issue_type_per_project.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How many issue types there are per project in Jira 3 | Purpose: Admins may want to pull statistics of how many issue types there are per project in Jira. 4 | link: https://confluence.atlassian.com/jirakb/how-to-query-the-jira-database-for-issue-type-statistics-per-project-993922099.html 5 | */ 6 | 7 | 8 | SELECT p.pkey, it.pname, COUNT(i.id) 9 | FROM jiraissue i 10 | INNER JOIN project p ON i.project = p.id 11 | INNER JOIN issuetype it ON it.id = i.issuetype 12 | GROUP BY p.id, it.id 13 | ORDER BY p.pkey, count(i.id) DESC; 14 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_issues_create_cgrouped_by_month.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Number of tickets grouped by month of creation 3 | Purpose: make an overview 4 | */ 5 | 6 | SELECT date_format(created, '%Y-%m') as "Year - Month", 7 | count(*) as "Total cases" 8 | FROM jiraissue 9 | GROUP BY year (created), month (created); 10 | 11 | 12 | -- postgresql query 13 | SELECT to_char(DATE_TRUNC('month', created),'YYYY-MM') as "Year - Month", 14 | count(*) as "Total cases" 15 | FROM jiraissue 16 | GROUP BY DATE_TRUNC('month', created), DATE_TRUNC('year', created); -------------------------------------------------------------------------------- /sql/jira/stats/jira_jeti_notification_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Provide stats of notification from Jira project to end users via JETI app (Email This Issue plugin). 3 | Recommended adjust the dates. 4 | */ 5 | 6 | SELECT to_char(DATE_TRUNC('month', notif."SEND_TIME_STAMP"), 'YYYY-MM') as "Date", 7 | p.pkey as "Project", 8 | it.pname as "Issue Type", 9 | count(notif."ISSUE_KEY") as "Notifications Number" 10 | FROM "AO_544E33_AUDIT_LOG_ENTRY" notif 11 | JOIN jiraissue ji on notif."ISSUE_KEY" = 12 | CONCAT((select pro.pkey from project pro where ji.project = pro.id), '-', ji.issuenum) 13 | INNER JOIN project p ON ji.project = p.id 14 | INNER JOIN issuetype it ON it.id = ji.issuetype 15 | WHERE notif."SEND_TIME_STAMP" > '2023-01-01' 16 | AND notif."SEND_TIME_STAMP" < '2023-06-01' 17 | GROUP BY DATE_TRUNC('month', notif."SEND_TIME_STAMP"), p.pkey, it.pname; -------------------------------------------------------------------------------- /sql/jira/stats/jira_project_activity_level_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | JIRA Project Activity Level Stats per Project Updated last 30 days 3 | */ 4 | 5 | -- MySQL 6 | SELECT DISTINCT p.pkey as "KEY", 7 | p.pname as "Project Name", 8 | COUNT(jiraissue.pkey) AS "Issue Updated Last 30 Days", 9 | pc.cname as "CATEGORY" 10 | FROM jiraissue i 11 | RIGHT OUTER JOIN project 12 | ON jiraissue.project = project.ID AND jiraissue.updated > NOW() - 30 13 | INNER JOIN project p ON p.ID = i.PROJECT 14 | INNER JOIN nodeassociation na ON na.source_node_id = p.id 15 | INNER JOIN projectcategory pc ON na.sink_node_id = pc.id 16 | WHERE na.sink_node_entity = 'ProjectCategory' 17 | and na.association_type = 'ProjectCategory' 18 | GROUP BY p.pname, p.pkey, pc.cname 19 | ORDER BY "Issue Updated Last 30 Days"; 20 | 21 | 22 | -- PostgreSQL 23 | SELECT project.pname AS "Project Name", 24 | COUNT(jiraissue.pkey) AS "Issue Updated Last 30 Days" 25 | FROM jiraissue 26 | RIGHT OUTER JOIN project 27 | ON jiraissue.project = project.ID 28 | AND jiraissue.updated > NOW() - INTERVAL '30 DAY' 29 | GROUP BY project.pname 30 | ORDER BY "Issue Updated Last 30 Days"; -------------------------------------------------------------------------------- /sql/jira/stats/jira_project_activity_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | That request shows Jira project of last issue activity to understand the frequency 3 | */ 4 | 5 | 6 | SELECT DISTINCT p.pkey as "KEY", 7 | p.pname as "Project Name", 8 | MAX(i.UPDATED) as "Last Ticket Updated", 9 | pc.cname as "CATEGORY" 10 | FROM jiraissue i 11 | INNER JOIN project p ON p.ID = i.PROJECT 12 | INNER JOIN nodeassociation na ON na.source_node_id = p.id 13 | INNER JOIN projectcategory pc ON na.sink_node_id = pc.id 14 | WHERE na.sink_node_entity = 'ProjectCategory' 15 | and na.association_type = 'ProjectCategory' 16 | GROUP BY i.PROJECT, p.pname, p.pkey, pc.cname 17 | ORDER BY MAX(i.UPDATED) ASC, p.pname; -------------------------------------------------------------------------------- /sql/jira/stats/jira_project_attachment_statistics.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Finding Attachment Statistic in Jira 3 | 4 | */ 5 | 6 | 7 | SELECT p.pkey, 8 | p.pname, 9 | EXTRACT(YEAR FROM f.created) yr, 10 | COUNT(f.id) AS "# of files", 11 | MIN(f.filesize) / 1024 AS "min size (kB)", 12 | AVG(f.filesize) / 1024 AS "average size(kB)", 13 | MAX(f.filesize) / 1024 AS "max size (kB)", 14 | SUM(f.filesize) / (1024 * 1024) AS "total size(mB)" 15 | FROM fileattachment f, 16 | project p, 17 | jiraissue i 18 | WHERE f.issueid = i.id 19 | AND i.project = p.id 20 | GROUP BY p.pkey, p.pname, yr; -------------------------------------------------------------------------------- /sql/jira/stats/jira_project_tickets_count.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Stats number of tickets per every project in Jira 3 | */ 4 | 5 | SELECT p.pname AS "Project Name", 6 | p.pkey AS "Project Key", 7 | COUNT(i.id) AS "No. Issue" 8 | FROM jiraissue i 9 | INNER JOIN project p ON i.project = p.id 10 | GROUP BY (p.id) 11 | ORDER BY count desc; -------------------------------------------------------------------------------- /sql/jira/stats/jira_resolutions_stats.sql: -------------------------------------------------------------------------------- 1 | -- How many tickets linked to exact Resolutions in Jira 2 | 3 | SELECT re.pname, 4 | (SELECT count(ji.id) 5 | FROM jiraissue ji 6 | WHERE re.id = ji.RESOLUTION) as "Number of linked tickets" 7 | FROM resolution re 8 | ORDER BY 2 ASC; -------------------------------------------------------------------------------- /sql/jira/stats/jira_retrive_list_of_users_assigned_to_project_roles.sql: -------------------------------------------------------------------------------- 1 | -- Retrieve a list of users assigned to project roles in Jira server 2 | -- https://confluence.atlassian.com/jirakb/retrieve-a-list-of-users-assigned-to-project-roles-in-jira-server-705954232.html 3 | 4 | 5 | SELECT p.pname, pr.NAME, u.display_name 6 | FROM projectroleactor pra 7 | INNER JOIN projectrole pr ON pr.ID = pra.PROJECTROLEID 8 | INNER JOIN project p ON p.ID = pra.PID 9 | INNER JOIN app_user au ON au.user_key = pra.ROLETYPEPARAMETER 10 | INNER JOIN cwd_user u ON u.lower_user_name = au.lower_user_name; -------------------------------------------------------------------------------- /sql/jira/stats/jira_scriptrunner_jobs_in_json_pretty.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get job configs of Scriptrunner 3 | */ 4 | SELECT jsonb_pretty(t."SETTING"::jsonb) 5 | FROM "AO_4B00E6_STASH_SETTINGS" t 6 | WHERE t."KEY" = 'scheduled_jobs'; -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_job_config_as_clustered_job.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get info from clustered job table 3 | */ 4 | select id, 5 | job_id, 6 | sched_type, 7 | interval_millis, 8 | cron_expression, 9 | cron_expression, 10 | time_zone, 11 | next_run 12 | from clusteredjob; -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_job_config_data.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Provide service configs which running on that node. 3 | */ 4 | select id, servicename, clazz, cron_expression, delaytime 5 | from serviceconfig; -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_management_export_fields_and_forms.sql: -------------------------------------------------------------------------------- 1 | select request."NAME", 2 | fields."FIELD_ID" as "custom_field", 3 | fields."LABEL" as "field_portal_name", 4 | fields."REQUIRED" 5 | from "AO_54307E_VIEWPORT" request 6 | join "AO_54307E_VIEWPORTFIELD" fields on fields."FORM_ID" = request."ID" 7 | order by 1, 3 8 | ; -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_management_long_request_automation_rules.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Get top 10 automation rules longer than 1 sec 4 | * 5 | */ 6 | 7 | SELECT "FINISH_TIME_MILLIS"-"START_TIME_MILLIS" as duration, * 8 | FROM "AO_9B2E3B_IF_COND_EXECUTION" 9 | WHERE "FINISH_TIME_MILLIS"-"START_TIME_MILLIS" > 1000 10 | ORDER BY 1 DESC 11 | LIMIT 10; -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_management_project_issue_type_request_type_name.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Export Project - Issue Type - Customer Request Type 3 | 4 | */ 5 | 6 | SELECT p.pname as project_name, 7 | p.pkey as project_key, 8 | it.pname as issue_type_name, 9 | r."NAME" as request_type_name 10 | FROM "AO_54307E_VIEWPORT" po, 11 | "AO_54307E_VIEWPORTFORM" r, 12 | project p, 13 | issuetype it 14 | WHERE po."ID" = r."VIEWPORT_ID" 15 | AND po."PROJECT_ID" = p.id 16 | AND r."ISSUE_TYPE_ID" = CAST(it.id AS BIGINT) 17 | ORDER BY p.pname; 18 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_service_management_request_types_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get monthly created request types 3 | */ 4 | 5 | SELECT to_char(DATE_TRUNC('month', created),'YYYY-MM') as "Monthly", 6 | p.pkey, 7 | (select r."NAME" 8 | FROM "AO_54307E_VIEWPORT" po, "AO_54307E_VIEWPORTFORM" r, project p 9 | WHERE po."ID"=r."VIEWPORT_ID" AND po."PROJECT_ID"=p.id and (po."KEY" || '/' || r."KEY") = c.stringvalue) as "Request Type", 10 | count(*) as "Total cases" 11 | FROM jiraissue ji 12 | INNER JOIN project p ON ji.project = p.id 13 | INNER JOIN customfieldvalue c on ji.id = c.issue and c.customfield=(SELECT id FROM customfield WHERE cfname='Customer Request Type') 14 | GROUP BY DATE_TRUNC('month', created), p.pkey, "Request Type" 15 | order by 2; -------------------------------------------------------------------------------- /sql/jira/stats/jira_stats_of_push_notifications.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Provide stats of push notifications 4 | Author: Gonchik Tsymzhitov 5 | 6 | */ 7 | SELECT to_char(DATE_TRUNC('month', notif."NOTIFICATION_DATE"), 'YYYY-MM'), 8 | p.pkey as "Project", 9 | it.pname as "Issue Type", 10 | count(notif."ISSUE_KEY") 11 | FROM "AO_248DF5_INOTIFICATION" notif 12 | JOIN jiraissue ji on notif."ISSUE_KEY" = 13 | CONCAT((select pro.pkey from project pro where ji.project = pro.id), '-', ji.issuenum) 14 | INNER JOIN project p ON ji.project = p.id 15 | inner join issuetype it on it.id = ji.issuetype 16 | group by DATE_TRUNC('month', notif."NOTIFICATION_DATE"), p.pkey, it.pname; -------------------------------------------------------------------------------- /sql/jira/stats/jira_stats_status_issue_counts.sql: -------------------------------------------------------------------------------- 1 | /* 2 | How many tickets linked to exact status in Jira 3 | */ 4 | 5 | SELECT it.pname as "Status Name", 6 | (SELECT count(ji.id) 7 | FROM jiraissue ji 8 | WHERE it.id = ji.issuestatus) as "Count of tickets" 9 | FROM issuestatus it 10 | ORDER BY 2 ASC; -------------------------------------------------------------------------------- /sql/jira/stats/jira_stats_user_creation_per_month.sql: -------------------------------------------------------------------------------- 1 | /* 2 | User creation per month. 3 | Note: it can be users without access to Jira app 4 | */ 5 | 6 | SELECT to_char(created_date, 'YYYY-MM') AS "Year", 7 | COUNT(*) AS "Number of Users" 8 | FROM cwd_user 9 | GROUP BY to_char(created_date, 'YYYY-MM') 10 | ORDER BY 1 LIMIT 100; 11 | 12 | SELECT to_char(created_date, 'YYYY') AS "Year", 13 | COUNT(*) AS "Number of Users" 14 | FROM cwd_user 15 | GROUP BY to_char(created_date, 'YYYY') 16 | ORDER BY 1 LIMIT 100; 17 | 18 | 19 | -- mysql query per month 20 | SELECT date_format(created_date, '%Y-%m'), count(*) 21 | FROM cwd_user 22 | GROUP BY year (created_date), month (created_date) 23 | ORDER BY 1; 24 | 25 | SELECT date_format(created_date, '%Y'), count(*) 26 | FROM cwd_user 27 | GROUP BY year (created_date) 28 | ORDER BY 1; -------------------------------------------------------------------------------- /sql/jira/stats/jira_tickets_with_large_number_of_worklogs.sql: -------------------------------------------------------------------------------- 1 | -- SQL to identify issues with large worklog history 2 | -- https://jira.atlassian.com/browse/JRASERVER-45903 3 | 4 | -- node reindex replication fails for single issues with 1000 or more worklog entries 5 | -- https://jira.atlassian.com/browse/JRASERVER-71980 6 | 7 | 8 | SELECT concat(p.pkey,'-',i.issuenum) as issue, count(a.id) as CountWorklogs 9 | FROM worklog a, jiraissue i, project p 10 | WHERE i.project = p.id 11 | AND i.id = a.issueid 12 | GROUP BY p.pkey,i.issuenum 13 | HAVING count(a.id) > 900 14 | ORDER BY CountWorklogs DESC 15 | limit 100; -------------------------------------------------------------------------------- /sql/jira/stats/jira_time_in_status_querry.sql: -------------------------------------------------------------------------------- 1 | with statusHistory as 2 | (select jiraissue.ID, 3 | p.pname, 4 | p.pkey, 5 | jiraissue.issuenum, 6 | jiraissue.priority, 7 | pri.pname as priorityname, 8 | c.cname as componentname, 9 | pv.id as fixversionId, 10 | pv.vname as fixversion, 11 | jiraissue.created as issueCreated, 12 | jiraissue.resolutiondate, 13 | changeitem.OLDSTRING OldStatus, 14 | changeitem.NEWSTRING NewStatus, 15 | changegroup.CREATED Executed, 16 | changegroup.CREATED - lag(changegroup.CREATED) over (partition by jiraissue.ID order by changegroup.CREATED) as MinutesInStatus, row_number() over (partition by jiraissue.ID order by changegroup.CREATED) StatusOrder 17 | from changeitem 18 | inner join changegroup on changeitem.groupid = changegroup.id 19 | inner join jiraissue on jiraissue.id = changegroup.issueid 20 | inner join project p on jiraissue.project = p.id 21 | inner join nodeassociation na on na.SOURCE_NODE_ID = jiraissue.ID 22 | inner join projectversion pv on pv.id = na.SINK_NODE_ID 23 | join priority pri on pri.id = jiraissue.priority 24 | inner join component c on na.sink_node_id = c.id 25 | where changeitem.field = 'status' 26 | and changeitem.FIELDTYPE = 'jira' 27 | and pkey = 'HM') 28 | select * 29 | from statusHistory; -------------------------------------------------------------------------------- /sql/jira/stats/jira_top_tickets_with_large_number_of_attachments.sql: -------------------------------------------------------------------------------- 1 | /* 2 | find tickets with large number of attachments 3 | purpose: detect broken automations 4 | link: https://jira.atlassian.com/browse/JRASERVER-66251 5 | */ 6 | 7 | SELECT concat(p.pkey, '-', i.issuenum) as issue, count(f.id) 8 | FROM fileattachment f, 9 | jiraissue i, 10 | project p 11 | WHERE i.project = p.id 12 | and i.id = f.issueid 13 | GROUP BY p.pkey, i.issuenum 14 | HAVING count(i.id) > 100 15 | ORDER BY count(i.id) desc LIMIT 100; -------------------------------------------------------------------------------- /sql/jira/stats/jira_top_tickets_with_large_number_of_comments.sql: -------------------------------------------------------------------------------- 1 | /* 2 | find tickets with large number of comments 3 | https://jira.atlassian.com/browse/JRASERVER-66251 4 | */ 5 | 6 | 7 | SELECT concat(p.pkey, '-', i.issuenum) as issue, count(i.id) 8 | FROM jiraaction a, 9 | jiraissue i, 10 | project p 11 | WHERE i.project = p.id 12 | and i.id = a.issueid 13 | GROUP BY p.pkey, i.issuenum 14 | HAVING count(i.id) > 100 15 | ORDER BY count(i.id) DESC limit 100; -------------------------------------------------------------------------------- /sql/jira/stats/jira_user_creation_per_month_with_application_access.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Monthly created active users as Jira Core or Jira Software application access user 3 | */ 4 | SELECT date_format(u.created_date, '%Y-%m') as "Period", count(*) as "Created tickets" 5 | FROM cwd_user u 6 | JOIN cwd_membership m ON u.id = m.child_id AND u.directory_id = m.directory_id 7 | JOIN licenserolesgroup lrg ON lower(m.parent_name) = lower(lrg.group_id) 8 | JOIN cwd_directory d ON m.directory_id = d.id 9 | WHERE d.active = '1' 10 | AND u.active = '1' 11 | AND license_role_name in ('jira-software', 'jira-core') 12 | GROUP BY year (u.created_date), month (u.created_date); 13 | -------------------------------------------------------------------------------- /sql/jira/stats/jira_who_has_not_been_logged_in_last_quarter.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Identify users in Jira who haven't logged in for the past 90 days 3 | link: https://confluence.atlassian.com/jirakb/identify-users-in-jira-who-haven-t-logged-in-for-the-past-90-days-695241569.html 4 | */ 5 | 6 | SELECT d.directory_name AS "Directory", 7 | u.user_name AS "Username", 8 | to_timestamp(CAST(ca.attribute_value AS BIGINT) / 1000) AS "Last Login" 9 | FROM cwd_user u 10 | JOIN cwd_directory d ON u.directory_id = d.id 11 | LEFT JOIN cwd_user_attributes ca ON u.id = ca.user_id AND ca.attribute_name = 'login.lastLoginMillis' 12 | WHERE u.active = 1 13 | AND d.active = 1 14 | AND u.lower_user_name IN (SELECT DISTINCT lower_child_name 15 | FROM cwd_membership m 16 | JOIN licenserolesgroup gp ON m.lower_parent_name = lower(gp.GROUP_ID)) 17 | AND (u.id IN (SELECT ca.user_id 18 | FROM cwd_user_attributes ca 19 | WHERE attribute_name = 'login.lastLoginMillis' 20 | AND to_timestamp(CAST(ca.attribute_value as bigint) / 1000) <= current_date - 90) 21 | OR u.id NOT IN (SELECT ca.user_id 22 | FROM cwd_user_attributes ca 23 | WHERE attribute_name = 'login.lastLoginMillis') 24 | ) 25 | ORDER BY "Last Login" DESC; -------------------------------------------------------------------------------- /sql/jira/stats/jira_who_never_logged_in_jira.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Who never logged into Jira 3 | link: https://confluence.atlassian.com/jirakb/identify-users-in-jira-who-haven-t-logged-in-for-the-past-90-days-695241569.html 4 | */ 5 | 6 | select user_name 7 | from cwd_user 8 | where user_name not in 9 | (SELECT cwd_user.user_name 10 | FROM cwd_user, 11 | cwd_user_attributes 12 | WHERE cwd_user_attributes.user_id = cwd_user.id 13 | AND cwd_user_attributes.attribute_name = 'lastAuthenticated'); -------------------------------------------------------------------------------- /sql/mysql/mysql_detect_blob_columns.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Large objects in MySQL are columns with following data types: blob, mediumblob, longblob, text, mediumtext, and longtext. 3 | */ 4 | 5 | select tab.table_name, 6 | count(*) as columns 7 | from information_schema.tables as tab 8 | inner join information_schema.columns as col 9 | on col.table_schema = tab.table_schema 10 | and col.table_name = tab.table_name 11 | and col.data_type in ('blob', 'mediumblob', 'longblob', 12 | 'text', 'mediumtext', 'longtext') 13 | where tab.table_schema = 'your database name' 14 | and tab.table_type = 'BASE TABLE' 15 | group by tab.table_name 16 | order by tab.table_name; -------------------------------------------------------------------------------- /sql/mysql/mysql_show_unused_space_table.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get possible optimization 3 | */ 4 | select table_name, round(data_length/1024/1024) as DATA_LENGTH_MB, round(data_free/1024/1024) as DATA_FREE_MB 5 | from information_schema.tables 6 | where table_schema='' 7 | order by data_free desc; 8 | 9 | -------------------------------------------------------------------------------- /sql/postgresql/postgresql_cache_hit_ratio_information.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Cache Hit Ratio 3 | 4 | */ 5 | SELECT sum(heap_blks_read) as heap_read, 6 | sum(heap_blks_hit) as heap_hit, 7 | sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio 8 | FROM pg_statio_user_tables; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_check_vacuum_running_status.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Check vacuum running status 3 | */ 4 | 5 | SELECT relname, 6 | last_vacuum, 7 | last_autovacuum 8 | FROM pg_stat_user_tables; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_compare_rollbacks_commits.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Compare rollbacks and commits 4 | Purpose: Compare commits and rollbacks for the understanding client activity 5 | 6 | */ 7 | SELECT sum(xact_commit) as commits, 8 | sum(xact_rollback) as rollbacks 9 | FROM pg_stat_database; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_connection_find_open_connections.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Find open connections 3 | */ 4 | 5 | SELECT EXTRACT(EPOCH FROM (now() - backend_start)) AS backend_seconds, 6 | EXTRACT(EPOCH FROM (now() - query_start)) AS query_seconds, 7 | usename, 8 | client_addr, left (query, 300) 9 | backend_type 10 | FROM pg_stat_activity 11 | where state = 'active' OR state = 'idle' 12 | ORDER BY 1 ASC 13 | 14 | -- datname = 15 | 16 | GROUP BY backend_type 17 | ORDER BY connections DESC; 18 | 19 | 20 | /* 21 | 22 | Kill idle open connections - that have been open > threshold (hours) 23 | 24 | */ 25 | 26 | 27 | \set 28 | idleThresholdMinutes 1 29 | 30 | SELECT pg_terminate_backend(pid) 31 | FROM pg_stat_activity 32 | WHERE state = 'idle' 33 | AND EXTRACT(EPOCH FROM (now() - query_start)) / 60 > :idleThresholdMinutes 34 | ; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_connection_time_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Provide a connection stats for PostgreSQL clients 4 | Purpose: activity of clients 5 | */ 6 | 7 | 8 | with states as 9 | (select datname, client_addr, case 10 | when now() - state_change < interval '10 seconds' then '10sec' 11 | when now() - state_change < interval '30 seconds' then '30sec' 12 | when now() - state_change < interval '60 seconds' then '60sec' 13 | else 'idle' end 14 | as stat from pg_stat_activity) 15 | select datname, client_addr, stat, count(*) 16 | from states group by datname, client_addr, stat; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_blocked_ps_queries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Finding blocked processes and blocking queries 4 | 5 | */ 6 | SELECT 7 | activity.pid, 8 | activity.usename, 9 | activity.query, 10 | blocking.pid AS blocking_id, 11 | blocking.query AS blocking_query 12 | FROM pg_stat_activity AS activity 13 | JOIN pg_stat_activity AS blocking ON blocking.pid = ANY(pg_blocking_pids(activity.pid)); 14 | 15 | /* 16 | Viewing locks with table names and queries 17 | */ 18 | 19 | select 20 | relname as relation_name, 21 | query, 22 | pg_locks.* 23 | from pg_locks 24 | join pg_class on pg_locks.relation = pg_class.oid 25 | join pg_stat_activity on pg_locks.pid = pg_stat_activity.pid; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_connection_active_and_remaining.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Number of active connections and remaining connections 4 | * used for easy analyze the situation of requests 5 | */ 6 | 7 | select max_conn,used,res_for_super,max_conn-used-res_for_super res_for_normal 8 | from 9 | (select count(*) used from pg_stat_activity) t1, 10 | (select setting::int res_for_super from pg_settings where name=$$superuser_reserved_connections$$) t2, 11 | (select setting::int max_conn from pg_settings where name=$$max_connections$$) t3; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_high_number_of_rows_tables.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Get list of high number of rows tables, helps to the detecting where large tables 4 | * 5 | */ 6 | 7 | SELECT n.nspname as table_schema, 8 | c.relname as table_name, 9 | c.reltuples as rows 10 | FROM pg_class c 11 | join pg_namespace n 12 | on n.oid = c.relnamespace 13 | WHERE c.relkind = 'r' 14 | and n.nspname not in ('information_schema' 15 | , 'pg_catalog') 16 | order by c.reltuples desc 17 | limit 10; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_index_usages.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Usage of indexes 3 | */ 4 | 5 | SELECT relname, 6 | 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, 7 | n_live_tup rows_in_table 8 | FROM pg_stat_user_tables 9 | WHERE seq_scan + idx_scan > 0 10 | ORDER BY n_live_tup DESC; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_largest_tables.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * This query returns list of largest (by data size) tables. 3 | */ 4 | SELECT schemaname as table_schema, 5 | relname as table_name, 6 | pg_size_pretty(pg_total_relation_size(relid)) as total_size, 7 | pg_size_pretty(pg_relation_size(relid)) as data_size, 8 | pg_size_pretty(pg_total_relation_size(relid) - pg_relation_size(relid)) 9 | as external_size 10 | FROM pg_catalog.pg_statio_user_tables 11 | ORDER BY pg_total_relation_size(relid) DESC , 12 | pg_relation_size(relid) DESC 13 | LIMIT 10; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_running_longer_than_5_min_queries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Find queries running longer than 5 minutes 3 | 4 | 5 | Kill long-running PostgreSQL query processes 6 | Where some queries look like they’re not going to finish, you can use the pid (process ID) 7 | from the pg_stat_activity or pg_locks views to terminate the running process. 8 | 9 | pg_cancel_backend(pid) will attempt to gracefully kill a running query process. 10 | pg_terminate_backend(pid) will immediately kill the running query process, 11 | but potentially have side affects across additional queries running on your database server. 12 | The full connection may be reset when running pg_terminate_backend, so other running queries can be affected. 13 | Use as a last resort. 14 | */ 15 | SELECT pid, 16 | user, 17 | pg_stat_activity.query_start, 18 | now() - pg_stat_activity.query_start AS query_time, 19 | query, 20 | state, 21 | wait_event_type, 22 | wait_event 23 | FROM pg_stat_activity 24 | WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes'; 25 | 26 | -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_size_of_databases.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get database size 3 | */ 4 | 5 | SELECT pg_database.datname, 6 | pg_size_pretty(pg_database_size(pg_database.datname)) AS size 7 | FROM pg_database 8 | ORDER BY pg_database_size(pg_database.datname) DESC; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_size_of_index_stats.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Get stats of your DB indexes. 4 | * Interesting to understand which indexes is not used on your instance. 5 | * After you can easier review tables via \dt command 6 | * Also, better to make an overview via 7 | * https://wiki.postgresql.org/wiki/Index_Maintenance 8 | */ 9 | 10 | SELECT s.schemaname, 11 | s.relname AS tablename, 12 | s.indexrelname AS indexname, 13 | pg_size_pretty(pg_relation_size(s.indexrelid::regclass)) AS index_size, 14 | psut.idx_scan, 15 | psut.seq_scan, 16 | CASE 17 | WHEN (psut.seq_scan + psut.idx_scan) = 0 THEN 18 | 0 19 | ELSE 20 | (100 * psut.idx_scan / (psut.seq_scan + psut.idx_scan)) 21 | END AS percent_of_times_index_used 22 | FROM pg_catalog.pg_stat_user_indexes s 23 | LEFT JOIN pg_stat_user_tables AS psut ON psut.relid = s.relid 24 | JOIN pg_catalog.pg_index i ON s.indexrelid = i.indexrelid 25 | WHERE s.idx_scan = 0 -- has never been scanned 26 | AND 0 <> ALL (i.indkey) -- no index column is an expression 27 | AND NOT i.indisunique -- is not a UNIQUE index 28 | AND NOT EXISTS -- does not enforce a constraint 29 | (SELECT 1 30 | FROM pg_catalog.pg_constraint c 31 | WHERE c.conindid = s.indexrelid) 32 | ORDER BY pg_relation_size(s.indexrelid) DESC 33 | ; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_size_of_schema.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get size of schemas 3 | */ 4 | 5 | SELECT A.schemaname, 6 | pg_size_pretty(SUM(pg_relation_size(C.oid))) as table, 7 | pg_size_pretty (SUM(pg_total_relation_size(C.oid)-pg_relation_size(C.oid))) as index, 8 | pg_size_pretty (SUM(pg_total_relation_size(C.oid))) as table_index, 9 | SUM(n_live_tup) 10 | FROM pg_class C 11 | LEFT JOIN pg_namespace N 12 | ON (N.oid = C.relnamespace) 13 | INNER JOIN pg_stat_user_tables A ON C.relname = A.relname 14 | WHERE nspname NOT IN ('pg_catalog' 15 | , 'information_schema') 16 | AND C.relkind <> 'i' 17 | AND nspname !~ '^pg_toast' 18 | GROUP BY A.schemaname; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_get_size_of_tables_in_current_schema.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Get table sizes provided with detail info and simple info 3 | extended query 4 | */ 5 | 6 | SELECT schemaname, 7 | C.relname AS "relation", 8 | pg_size_pretty(pg_relation_size(C.oid)) as table, 9 | pg_size_pretty (pg_total_relation_size (C.oid)-pg_relation_size(C.oid)) as index, 10 | pg_size_pretty (pg_total_relation_size (C.oid)) as table_index, 11 | n_live_tup 12 | FROM pg_class C 13 | LEFT JOIN pg_namespace N 14 | ON (N.oid = C.relnamespace) 15 | LEFT JOIN pg_stat_user_tables A ON C.relname = A.relname 16 | WHERE nspname NOT IN ('pg_catalog' 17 | , 'information_schema') 18 | AND C.relkind <> 'i' 19 | AND nspname !~ '^pg_toast' 20 | ORDER BY pg_total_relation_size (C.oid) DESC; 21 | 22 | -- simple query 23 | SELECT table_name, 24 | pg_size_pretty(pg_relation_size(quote_ident(table_name))), 25 | pg_relation_size(quote_ident(table_name)) 26 | FROM information_schema.tables 27 | WHERE table_schema = 'public' 28 | ORDER BY 3 DESC; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_index_cache_hit_rate.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Index Cache Hit Rate 3 | */ 4 | 5 | SELECT sum(idx_blks_read) as idx_read, 6 | sum(idx_blks_hit) as idx_hit, 7 | (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio 8 | FROM pg_statio_user_indexes; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_maint_reindex_vacuum_table_by_table.sql: -------------------------------------------------------------------------------- 1 | -- https://support.atlassian.com/atlassian-knowledge-base/kb/optimize-and-improve-postgresql-performance-with-vacuum-analyze-and-reindex/ 2 | -- https://support.atlassian.com/jira/kb/jira-and-postgresql-server-consuming-high-cpu-after-users-login/ 3 | 4 | DO $$ 5 | DECLARE 6 | tbl RECORD; 7 | start_time TIMESTAMP; 8 | end_time TIMESTAMP; 9 | BEGIN 10 | FOR tbl IN ( 11 | SELECT schemaname, tablename 12 | FROM pg_tables 13 | WHERE schemaname NOT IN ('pg_catalog', 'information_schema') 14 | ) 15 | LOOP 16 | BEGIN 17 | RAISE NOTICE 'Processing: %.%', tbl.schemaname, tbl.tabelname; 18 | 19 | -- Step 1: Regular VACUUM first to minimize FULL needs 20 | EXECUTE format('VACUUM (VERBOSE) %I.%I', tbl.schemaname, tbl.tablename); 21 | 22 | -- Step 2: VACUUM FULL without parallel (exclusive lock) 23 | EXECUTE format('VACUUM (FULL, VERBOSE) %I.%I', tbl.schemaname, tbl.tablename); 24 | 25 | -- Step 3: REINDEX if needed (only for suspected corruption) 26 | EXECUTE format('REINDEX TABLE %I.%I', tbl.schemaname, tbl.tablename); 27 | 28 | -- Step 4: ANALYZE with statistics 29 | EXECUTE format('ANALYZE VERBOSE %I.%I', tbl.schemaname, tbl.tablename); 30 | 31 | EXCEPTION WHEN others THEN 32 | RAISE WARNING 'Failed on %.%: %', tbl.schemaname, tbl.tablename, SQLERRM; 33 | END; 34 | END LOOP; 35 | END $$; 36 | -------------------------------------------------------------------------------- /sql/postgresql/postgresql_monitoring_blocking_transaction.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Monitoring of blocking requests 3 | */ 4 | 5 | SELECT schemaname, 6 | C.relname AS "relation", 7 | pg_size_pretty (pg_relation_size(C.oid)) as table, 8 | pg_size_pretty (pg_total_relation_size (C.oid)-pg_relation_size(C.oid)) as index, 9 | pg_size_pretty (pg_total_relation_size (C.oid)) as table_index, 10 | n_live_tup 11 | FROM pg_class C 12 | LEFT JOIN pg_namespace N ON (N.oid = C .relnamespace) 13 | LEFT JOIN pg_stat_user_tables A ON C.relname = A.relname 14 | WHERE nspname NOT IN ('pg_catalog', 'information_schema') 15 | AND C.relkind <> 'i' 16 | AND nspname !~ '^pg_toast' 17 | ORDER BY pg_total_relation_size (C.oid) DESC 18 | 19 | /* 20 | PID_ID - ID of process to be canceled 21 | */ 22 | /* 23 | 24 | SELECT pg_cancel_backend(PID_ID); 25 | OR 26 | SELECT pg_terminate_backend(PID_ID); 27 | 28 | */ -------------------------------------------------------------------------------- /sql/postgresql/postgresql_non_used_indexes.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Detect non-used indexes 3 | */ 4 | 5 | SELECT schemaname, relname, indexrelname 6 | FROM pg_stat_all_indexes 7 | WHERE idx_scan = 0 and schemaname <> 'pg_toast' and schemaname <> 'pg_catalog'; -------------------------------------------------------------------------------- /sql/postgresql/postgresql_number_of_tables_by_the_number_of_rows.sql: -------------------------------------------------------------------------------- 1 | /* 2 | * 3 | * Get overview of high number of rows tables 4 | * 5 | */ 6 | 7 | select row_count, 8 | count(*) as tables 9 | from ( 10 | select c.relname as table_name, 11 | n.nspname as table_schema, 12 | case 13 | when c.reltuples > 3000000000 then '3b rows and more' 14 | when c.reltuples > 1000000000 then '1b - 3b rows' 15 | when c.reltuples > 1000000 then '1m - 1b rows' 16 | when c.reltuples > 1000 then '1k - 1m rows' 17 | when c.reltuples > 100 then '100 - 1k rows' 18 | when c.reltuples > 10 then '10 - 100 rows' 19 | else '0 - 10 rows' end as row_count, 20 | c.reltuples as rows 21 | from pg_class c 22 | join pg_namespace n on n.oid = c.relnamespace 23 | where c.relkind = 'r' 24 | and n.nspname not in ('pg_catalog', 'information_schema') 25 | ) itv 26 | group by row_count 27 | order by max(rows); --------------------------------------------------------------------------------