├── .gitreview ├── COPYING ├── README.md ├── nsx_vds_id.py ├── os_projects.py ├── os_user.py ├── vcenter_add_lag_vds.py ├── vcenter_cluster.py ├── vcenter_config_host_vds.py ├── vcenter_config_host_vds_only.py ├── vcenter_host_ntp.py ├── vcenter_host_profile.py ├── vcenter_host_vmnic.py ├── vcenter_nfs_ds.py ├── vcenter_pg_activeuplinks.py ├── vcenter_portgroup.py ├── vcenter_rename_vsan_ds.py ├── vcenter_stand_alone_host.py ├── vcenter_vli_deploy.py ├── vcenter_vmk.py ├── vcenter_vmkmigration.py ├── vcenter_vmmigration.py ├── vcenter_vro_deploy.py ├── vcenter_vrops_config.py ├── vcenter_vrops_deploy.py ├── vcenter_vsan_diskcheck.py ├── vcenter_vsan_diskgroup.py ├── vcenter_vsan_healthperf.py ├── vcenter_vsan_stretch_cluster.py ├── vcenter_vsan_witness_deploy.py ├── vds.py ├── vio_cluster_deploy.py ├── vio_ldap.py ├── vio_oms_deploy.py ├── vio_provider_network.py └── vio_unregister_extension.py /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=gerrit.eng.vmware.com 3 | port=29418 4 | project=ansible-modules-extra-gpl3 5 | defaultbranch=master 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ansible-modules-extra-gpl3 2 | 3 | [Ansible](https://github.com/ansible/ansible) modules for manipulating 4 | bits and pieces of vSphere, vCenter, NSX and other portions of VIO 5 | related technologies. 6 | 7 | # Minimal Requirements 8 | 9 | * Python ``urllib2`` 10 | * Python ``ast`` 11 | * Python ``datetime`` 12 | * VMware 'pyVmomi" 13 | 14 | # Notes 15 | 16 | None yet. 17 | 18 | # Examples: 19 | ### Create a new virtual distributed switch 20 | - name: Create VDS 21 | local_action: 22 | module: vds 23 | hostname: myvCenter.corp.local 24 | vs_port: 443 25 | username: "admin@vsphere.local" 26 | password: "some_sneaky_password" 27 | vds_name: "myVIOVDS" 28 | numUplinks: 8 29 | numPorts: 128 30 | networkIOControl: true 31 | productVersion: "6.0.0" 32 | state: present 33 | 34 | -------------------------------------------------------------------------------- /nsx_vds_id.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf-8 3 | # 4 | # (c) 2015, Joseph Callen 5 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 6 | # 7 | # This file is part of Ansible 8 | # 9 | # Ansible is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Ansible is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with Ansible. If not, see . 21 | 22 | ANSIBLE_METADATA = {'metadata_version': '1.0', 23 | 'status': ['preview'], 24 | 'supported_by': 'community'} 25 | 26 | DOCUMENTATION = ''' 27 | module: nsx_vds_id 28 | short_description: Get the specified nsx vdnscope ID 29 | description: 30 | This module is for getting the vdnscope (transport zone) id. Intended to be used as part of 31 | the deploy and configure chaperone VIO role. This module will get the vdnscope id and 32 | output as an ansible variable to be used in later tasks 33 | author: VMware 34 | version_added: 2.1 35 | requirements: 36 | - requests 37 | - ElementTree 38 | - json 39 | options: 40 | nsx_manager: 41 | description: 42 | - NSX manger ip 43 | required: True 44 | type: str 45 | nsx_manager_username: 46 | description: 47 | - NSX manager username 48 | required: True 49 | type: str 50 | nsx_manager_password: 51 | description: 52 | - password for the NSX manager user 53 | required: True 54 | type: str 55 | nsx_api_version: 56 | description: 57 | - NSX api version 58 | choices: [2.0, 1.0] 59 | required: True 60 | vdnscope_name: 61 | description: 62 | - The name of the vdn scope you need the ID of 63 | required: True 64 | type: str 65 | ansible_variable_name: 66 | description: 67 | - valid ansible variable name for the vdnscope id 68 | required: True 69 | type: str 70 | ''' 71 | 72 | EXAMPLES = ''' 73 | - name: Get Transport Zone Id 74 | nsx_vds_id: 75 | nsx_manager: "{{ vio_nsx_manager_ip }}" 76 | nsx_manager_username: "{{ vio_nsx_manager_username }}" 77 | nsx_manager_password: "{{ vio_nsx_manager_password }}" 78 | nsx_api_version: "2.0" 79 | vdnscope_name: "{{ vio_nsx_transport_zone }}" 80 | ansible_variable_name: "vdnscope_id" 81 | 82 | - name: Debug vdnscope id variable 83 | debug: var=vdnscop_id 84 | ''' 85 | 86 | RETURN = ''' 87 | description: Returns the vdnscope id for the named vdnscope name 88 | returned: object_id 89 | type: str 90 | sample: vdnscope-123 91 | ''' 92 | 93 | 94 | try: 95 | import requests 96 | import xml.etree.ElementTree as ET 97 | import json 98 | IMPORTS = True 99 | except ImportError: 100 | IMPORTS = False 101 | 102 | 103 | class NsxRestClient(object): 104 | 105 | _url_template_prefix = "https://{}/{}" 106 | 107 | def __init__(self, module, server, username, password, api_version, verify, stream=True): 108 | self.module = module 109 | self._server = server 110 | self._username = username 111 | self._password = password 112 | self._api_version = api_version 113 | self._verfiy = verify 114 | self._stream = stream 115 | self._session = requests.Session() 116 | self._session.verify = self._verfiy 117 | self._session.auth = (self._username, self._password) 118 | 119 | def _api_url(self, path): 120 | api_url_template = "api/{}/{}" 121 | api_url_path = api_url_template.format(self._api_version, path) 122 | return self._url_template_prefix.format(self._server, api_url_path) 123 | 124 | def do_session_reqeust(self, method, path, data=None, headers=None, params=None, stream=True): 125 | 126 | url = self._api_url(path) 127 | 128 | response = self._session.request( 129 | method, 130 | url, 131 | headers=headers, 132 | stream=stream, 133 | params=params, 134 | data=data 135 | ) 136 | 137 | return response 138 | 139 | def vds_scope_id(self, response_content, scope_name): 140 | 141 | root = ET.fromstring(response_content) 142 | 143 | if root.findtext('vdnScope/name') == scope_name: 144 | return root.findtext('vdnScope/objectId') 145 | else: 146 | return None 147 | 148 | 149 | 150 | def main(): 151 | argument_spec = dict( 152 | nsx_manager=dict(type='str', required=True), 153 | nsx_manager_username=dict(type='str', required=True), 154 | nsx_manager_password=dict(type='str', required=True, no_log=True), 155 | nsx_api_version=dict(type='str', default="2.0"), 156 | vdnscope_name=dict(type='str', required=True), 157 | ansible_variable_name=dict(type='str'), 158 | ) 159 | 160 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 161 | 162 | if not IMPORTS: 163 | module.fail_json(msg="failed to import required modules") 164 | 165 | rheaders = {'Content-Type': 'application/xml'} 166 | 167 | n = NsxRestClient( 168 | module, 169 | module.params['nsx_manager'], 170 | module.params['nsx_manager_username'], 171 | module.params['nsx_manager_password'], 172 | module.params['nsx_api_version'], 173 | False 174 | ) 175 | 176 | resp = n.do_session_reqeust('GET', 'vdn/scopes', headers=rheaders) 177 | 178 | if resp.status_code != 200: 179 | module.fail_json(msg="Failed with response code--> {}".format(resp.status_code)) 180 | 181 | vds_id = n.vds_scope_id(resp.content, module.params['vdnscope_name']) 182 | 183 | if vds_id: 184 | module.exit_json(changed=False, object_id=vds_id) 185 | else: 186 | module.exit_json(msg="Failed to get vdscope id") 187 | 188 | 189 | from ansible.module_utils.basic import * 190 | from ansible.module_utils.facts import * 191 | 192 | if __name__ == '__main__': 193 | main() 194 | -------------------------------------------------------------------------------- /os_projects.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | ANSIBLE_METADATA = {'metadata_version': '1.0', 22 | 'status': ['preview'], 23 | 'supported_by': 'community'} 24 | 25 | DOCUMENTATION = ''' 26 | module: os_projects 27 | short_description: Creates Deletes openstack project 28 | description: 29 | Creates Deletes openstack project 30 | requirements: 31 | - keystoneauth1 32 | - keystoneclient 33 | - inspect 34 | - logging 35 | - ansible 2.x 36 | Tested on: 37 | - VIO 3.0 / Openstack Mitaka 38 | author: VMware 39 | options: 40 | auth_url: 41 | description: 42 | - keystone authentication for the openstack api endpoint 43 | example: 44 | - https://:5000/v3 45 | required: True 46 | auth_user: 47 | description: 48 | - User with openstack admin rights usually admin or LDAP/AD admin user 49 | required: True 50 | auth_password: 51 | description: 52 | - Users password 53 | required: True 54 | auth_project: 55 | description: 56 | - Users project 57 | auth_project_domain: 58 | description: 59 | - Users project domain 60 | required: True 61 | auth_user_domain: 62 | description: 63 | - Users user domain 64 | required: True 65 | project_name: 66 | description: 67 | - Project to create delete 68 | required: True 69 | enabled: 70 | description: 71 | - Project enabled, defaults to True 72 | required: False 73 | type: bool 74 | project_domain_id: 75 | description: 76 | - Domain for the project being deleted created, defaults to 'default' 77 | required: False 78 | project_description: 79 | description: 80 | - description for the project 81 | required: False 82 | state: 83 | description: 84 | - If should be present or absent 85 | choices: ['present', 'absent'] 86 | required: True 87 | ''' 88 | 89 | EXAMPLES = ''' 90 | - name: Demo Project 91 | os_projects: 92 | auth_url: 'https://{{ vio_loadbalancer_vip }}:5000/v3' 93 | auth_user: "{{ authuser }}" 94 | auth_password: "{{ authpass }}" 95 | auth_project: 'admin' 96 | auth_project_domain: 'default' 97 | auth_user_domain: 'default' 98 | project_name: "{{ demo_project_name }}" 99 | enabled: True 100 | state: "{{ desired_state }}" 101 | ''' 102 | 103 | RETURN = ''' 104 | description: Returns the project id 105 | returned: project_id 106 | type: str 107 | sample: uuid 108 | ''' 109 | 110 | try: 111 | from keystoneauth1.identity import v3 112 | from keystoneauth1 import session 113 | from keystoneclient.v3 import client 114 | HAS_CLIENTS = True 115 | except ImportError: 116 | HAS_CLIENTS = False 117 | 118 | 119 | class OpenstackProject(object): 120 | 121 | def __init__(self, module): 122 | super(OpenstackProject, self).__init__() 123 | self.module = module 124 | self.auth_url = module.params['auth_url'] 125 | self.auth_user = module.params['auth_user'] 126 | self.auth_pass = module.params['auth_password'] 127 | self.auth_project = module.params['auth_project'] 128 | self.auth_project_domain = module.params['auth_project_domain'] 129 | self.auth_user_domain = module.params['auth_user_domain'] 130 | self.project_name = module.params['project_name'] 131 | self.project_enabled = module.params['enabled'] 132 | self.project_desc = module.params['project_description'] 133 | self.project_domain_id = \ 134 | module.params['project_domain_id'] if module.params['project_domain_id'] else 'default' 135 | self.project_description = \ 136 | module.params['project_description'] if module.params['project_description'] else 'New Project: %s' % self.project_name 137 | self.ks = self.keystone_auth() 138 | self.project_id = None 139 | self.project = None 140 | 141 | def keystone_auth(self): 142 | ks = None 143 | try: 144 | auth = v3.Password(auth_url=self.auth_url, 145 | username=self.auth_user, 146 | password=self.auth_pass, 147 | project_name=self.auth_project, 148 | project_domain_id=self.auth_project_domain, 149 | user_domain_id=self.auth_user_domain) 150 | sess = session.Session(auth=auth, verify=False) 151 | ks = client.Client(session=sess) 152 | except Exception as e: 153 | msg = "Failed to get client: %s " % str(e) 154 | self.module.fail_json(msg=msg) 155 | return ks 156 | 157 | def run_state(self): 158 | changed = False 159 | result = None 160 | msg = None 161 | 162 | current_state = self.check_project_state() 163 | desired_state = self.module.params['state'] 164 | module_state = (current_state == desired_state) 165 | 166 | if module_state: 167 | changed, result = self.state_exit_unchanged() 168 | 169 | if current_state == 'absent' and desired_state == 'present': 170 | changed, project = self.state_create_project(self.project_name, 171 | self.project_domain_id, 172 | self.project_description) 173 | self.project_id = project.id 174 | result = self.project_id 175 | 176 | if current_state == 'present' and desired_state == 'absent': 177 | changed, delete_result = self.state_delete_project(self.project) 178 | result = str(delete_result[0]) 179 | 180 | self.module.exit_json(changed=changed, result=result, project_id=self.project_id) 181 | 182 | def state_exit_unchanged(self): 183 | return False, self.project_id 184 | 185 | def state_delete_project(self, project): 186 | changed = False 187 | delete_status = None 188 | 189 | try: 190 | delete_status = self.ks.projects.delete(project) 191 | changed = True 192 | except Exception as e: 193 | msg = "Failed to delete Project: %s " % str(e) 194 | self.module.fail_json(msg=msg) 195 | return changed, delete_status 196 | 197 | def state_create_project(self, _name, _domain_id, _description): 198 | changed = False 199 | project = None 200 | 201 | try: 202 | project = self.ks.projects.create(_name, _domain_id, _description) 203 | changed = True 204 | except Exception as e: 205 | msg = "Failed to create project: %s " % str(e) 206 | self.module.fail_json(msg=msg) 207 | 208 | return changed, project 209 | 210 | def check_project_state(self): 211 | project = None 212 | try: 213 | project = [p for p in self.ks.projects.list() if p.name == self.project_name][0] 214 | except IndexError: 215 | return 'absent' 216 | self.project_id = project.id 217 | self.project = project 218 | 219 | return 'present' 220 | 221 | 222 | 223 | def main(): 224 | argument_spec = dict( 225 | auth_url=dict(required=True, type='str'), 226 | auth_user=dict(required=True, type='str'), 227 | auth_password=dict(required=True, type='str', no_log=True), 228 | auth_project=dict(required=True, type='str'), 229 | auth_project_domain=dict(required=True, type='str'), 230 | auth_user_domain=dict(required=True, type='str'), 231 | project_name=dict(required=True, type='str'), 232 | enabled=dict(required=True, type='bool'), 233 | project_domain_id=dict(required=False, type='str'), 234 | project_description=dict(required=False, type='str'), 235 | state=dict(default='present', choices=['present', 'absent'], type='str'), 236 | ) 237 | 238 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 239 | 240 | if not HAS_CLIENTS: 241 | module.fail_json(msg='python-keystone is required for this module') 242 | 243 | os = OpenstackProject(module) 244 | os.run_state() 245 | 246 | 247 | from ansible.module_utils.basic import * 248 | 249 | if __name__ == '__main__': 250 | main() 251 | -------------------------------------------------------------------------------- /os_user.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf-8 3 | # 4 | # (c) 2015, Joseph Callen 5 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 6 | # 7 | # This file is part of Ansible 8 | # 9 | # Ansible is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Ansible is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with Ansible. If not, see . 21 | 22 | 23 | DOCUMENTATION = ''' 24 | module: os_user 25 | short_description: Creates and deletes a user adding user as specified roles to project 26 | description: 27 | Creates and deletes a user adding user as specified roles to project. 28 | requirements: 29 | - ansible 2.x 30 | - keystoneauth1 31 | - keystoneclient 32 | author: VMware 33 | version_added: 2.1 34 | options: 35 | auth_url: 36 | description: 37 | - keystone authentication for the openstack api endpoint 38 | example: 39 | - https://:5000/v3 40 | required: True 41 | auth_user: 42 | description: 43 | - User with openstack admin rights usually admin or LDAP/AD admin user 44 | required: True 45 | auth_password: 46 | description: 47 | - Users password 48 | required: True 49 | auth_project: 50 | description: 51 | - Users project 52 | auth_project_domain: 53 | description: 54 | - Users project domain 55 | required: True 56 | auth_user_domain: 57 | description: 58 | - Users user domain 59 | required: True 60 | user_name: 61 | description: 62 | - Username to create or delete 63 | required: True 64 | type: str 65 | user_password: 66 | description: 67 | - User password 68 | required: True 69 | domain: 70 | description: 71 | - Users domain 72 | required: False 73 | state: 74 | description: 75 | - If should be present or absent 76 | choices: ['present', 'absent'] 77 | required: True 78 | required_together: 79 | domain: 80 | description: 81 | domain for user 82 | required: true 83 | type: str 84 | default_project: 85 | description: 86 | default project for user 87 | required: true 88 | type: str 89 | roles: 90 | description: 91 | list of roles for the user 92 | type: list 93 | required: true 94 | ''' 95 | 96 | EXAMPLES = ''' 97 | - name: Demo User 98 | os_user: 99 | auth_url: 'https://{{ vio_loadbalancer_vip }}:5000/v3' 100 | auth_user: "{{ authuser }}" 101 | auth_password: "{{ authpass }}" 102 | auth_project: 'admin' 103 | auth_project_domain: 'default' 104 | auth_user_domain: 'default' 105 | user_name: "{{ demo_username }}" 106 | user_password: "{{ demo_user_password }}" 107 | default_project: "{{ demo_project_name }}" 108 | domain: 'default' 109 | roles: 110 | - '_member_' 111 | - 'heat_stack_user' 112 | state: "{{ desired_state }}" 113 | register: demo_project_user 114 | tags: 115 | - quick_val 116 | ''' 117 | 118 | RETURN = ''' 119 | description: Returns the user id 120 | returned: result 121 | type: str 122 | sample: uuid 123 | ''' 124 | 125 | try: 126 | from keystoneauth1.identity import v3 127 | from keystoneauth1 import session 128 | from keystoneauth1 import exceptions as key_auth1_exceptions 129 | from keystoneclient.v3 import client 130 | HAS_CLIENTS = True 131 | except ImportError: 132 | HAS_CLIENTS = False 133 | 134 | 135 | member_roles = ['_member_', 136 | 'heat_stack_owner', 137 | 'heat_stack_user', 138 | 'admin'] 139 | 140 | 141 | class OpenstackUser(object): 142 | 143 | def __init__(self, module): 144 | super(OpenstackUser, self).__init__() 145 | self.module = module 146 | self.auth_url = module.params['auth_url'] 147 | self.auth_user = module.params['auth_user'] 148 | self.auth_pass = module.params['auth_password'] 149 | self.auth_project = module.params['auth_project'] 150 | self.auth_project_domain = module.params['auth_project_domain'] 151 | self.auth_user_domain = module.params['auth_user_domain'] 152 | self.user_name = module.params['user_name'] 153 | self.user_password = module.params['user_password'] 154 | self.ks = self.keystone_auth() 155 | self.user = None 156 | self.user_id = None 157 | self.project_member = None 158 | self.roles = None 159 | self.project = None 160 | 161 | def keystone_auth(self): 162 | ks = None 163 | try: 164 | auth = v3.Password(auth_url=self.auth_url, 165 | username=self.auth_user, 166 | password=self.auth_pass, 167 | project_name=self.auth_project, 168 | project_domain_id=self.auth_project_domain, 169 | user_domain_id=self.auth_user_domain) 170 | sess = session.Session(auth=auth, verify=False) 171 | ks = client.Client(session=sess) 172 | except Exception as e: 173 | msg = "Failed to get client: %s " % str(e) 174 | self.module.fail_json(msg=msg) 175 | return ks 176 | 177 | def run_state(self): 178 | changed = False 179 | result = None 180 | 181 | current_state = self.check_user_state() 182 | desired_state = self.module.params['state'] 183 | exit_unchanged = (current_state == desired_state) 184 | 185 | if exit_unchanged: 186 | changed, result = self.state_exit_unchanged() 187 | 188 | if current_state == 'absent' and desired_state == 'present': 189 | params = self._setup_params() 190 | 191 | if not self.user: 192 | changed, user = self.state_create_user(**params) 193 | self.user_id = user.id 194 | result = self.user_id 195 | 196 | if self.roles: 197 | for role in self.roles: 198 | role_assign = self.user_role(params['name'], 199 | params['default_project'].name, 200 | role) 201 | changed = True 202 | result = self.user_id 203 | 204 | if current_state == 'present' and desired_state == 'absent': 205 | changed, delete_result = self.state_delete_user() 206 | result = self.user_id 207 | 208 | self.module.exit_json(changed=changed, result=result) 209 | 210 | def state_exit_unchanged(self): 211 | return False, self.user_id 212 | 213 | def state_delete_user(self): 214 | changed = False 215 | delete_status = None 216 | 217 | try: 218 | delete_status = self.ks.users.delete(self.user) 219 | changed = True 220 | except Exception as e: 221 | msg = "Failed to delete User: %s " % str(e) 222 | self.module.fail_json(msg=msg) 223 | return changed, delete_status 224 | 225 | def _setup_params(self): 226 | user_data = {'name': self.user_name, 227 | 'password': self.user_password} 228 | 229 | _optional_params = ['domain', 'default_project', 230 | 'email', 'description'] 231 | 232 | params = [p for p in self.module.params.keys() if p in _optional_params and \ 233 | self.module.params[p]] 234 | 235 | if not params: 236 | return user_data 237 | 238 | for param in params: 239 | if param == 'default_project': 240 | project = self.get_project(self.module.params[param]) 241 | user_data.update({param: project}) 242 | else: 243 | user_data.update({param: self.module.params[param]}) 244 | 245 | return user_data 246 | 247 | def state_create_user(self, **kwargs): 248 | changed = False 249 | user = None 250 | 251 | try: 252 | user = self.ks.users.create(**kwargs) 253 | changed = True 254 | except Exception as e: 255 | msg = "Failed to create user: %s " % str(e) 256 | self.module.fail_json(msg=msg) 257 | 258 | return changed, user 259 | 260 | def get_project(self, project_name): 261 | project = None 262 | try: 263 | project = [p for p in self.ks.projects.list() if p.name == project_name][0] 264 | except IndexError: 265 | return project 266 | return project 267 | 268 | def get_role(self, role_name): 269 | role = None 270 | try: 271 | role = [r for r in self.ks.roles.list() if r.name == role_name][0] 272 | except IndexError: 273 | return role 274 | 275 | return role 276 | 277 | def user_role(self, user_name, project_name, role_name): 278 | grant_role = None 279 | _role = self.get_role(role_name) 280 | _project = self.get_project(project_name) 281 | _user = self.get_user(user_name) 282 | 283 | try: 284 | grant_role = self.ks.roles.grant(_role, user=_user, 285 | project=_project) 286 | except Exception as e: 287 | msg = "Failed to grant role: %s " % str(e) 288 | self.module.fail_json(msg=msg) 289 | 290 | return grant_role 291 | 292 | def get_user(self, user_name): 293 | user = None 294 | try: 295 | user = [u for u in self.ks.users.list() if u.name == user_name][0] 296 | except IndexError: 297 | return user 298 | return user 299 | 300 | def check_user_project(self, user, project): 301 | state = False 302 | 303 | user_projects = self.ks.projects.list(user=user) 304 | 305 | if not user_projects: 306 | return state 307 | if project in self.ks.projects.list(user=user): 308 | state = True 309 | return state 310 | 311 | def check_user_roles(self, project, user, roles): 312 | state = [] 313 | user_roles = None 314 | try: 315 | user_roles = self.ks.roles.list(user=user, project=project) 316 | except Exception as e: 317 | return False 318 | 319 | if not user_roles: 320 | return roles 321 | 322 | user_roles_names = [r.name for r in user_roles] 323 | 324 | if set(roles) == set(user_roles_names): 325 | return state 326 | 327 | state = list(set(roles) - set(user_roles_names)) 328 | return state 329 | 330 | def check_user_state(self): 331 | state = 'absent' 332 | 333 | user = self.get_user(self.user_name) 334 | 335 | if not user: 336 | self.roles = \ 337 | self.module.params['roles'] if self.module.params['roles'] else [] 338 | return state 339 | 340 | self.user = user 341 | self.user_id = user.id 342 | 343 | if self.module.params['default_project']: 344 | project = self.get_project(self.module.params['default_project']) 345 | 346 | if not project: 347 | msg = "Failed finding project: %s " % self.module.params['default_project'] 348 | self.module.fail_json(msg=msg) 349 | 350 | self.project = project 351 | user_project_state = self.check_user_project(self.user, project) 352 | 353 | if user_project_state: 354 | self.project_member = True 355 | 356 | if self.module.params['roles']: 357 | desired_user_roles = self.module.params['roles'] 358 | roles = self.check_user_roles(self.project, self.user, desired_user_roles) 359 | 360 | if roles: 361 | self.roles = roles 362 | 363 | if self.user and self.project_member and not self.roles: 364 | state = 'present' 365 | 366 | return state 367 | 368 | 369 | def main(): 370 | argument_spec = dict( 371 | auth_url=dict(required=True, type='str'), 372 | auth_user=dict(required=True, type='str'), 373 | auth_password=dict(required=True, type='str', no_log=True), 374 | auth_project=dict(required=True, type='str'), 375 | auth_project_domain=dict(required=True, type='str'), 376 | auth_user_domain=dict(required=True, type='str'), 377 | user_name=dict(required=True, type='str'), 378 | user_password=dict(required=True, type='str', no_log=True), 379 | domain=dict(required=False, type='str'), 380 | default_project=dict(required=False, type='str'), 381 | roles=dict(required=False, type='list'), 382 | email=dict(required=False, type='str'), 383 | description=dict(required=False, type='str'), 384 | state=dict(default='present', choices=['present', 'absent'], type='str'), 385 | ) 386 | 387 | module = AnsibleModule(argument_spec=argument_spec, 388 | supports_check_mode=False, 389 | required_together=[ 390 | ['domain', 'default_project', 'roles'], 391 | ]) 392 | 393 | if not HAS_CLIENTS: 394 | module.fail_json(msg='python-keystone is required for this module') 395 | 396 | os = OpenstackUser(module) 397 | os.run_state() 398 | 399 | 400 | from ansible.module_utils.basic import * 401 | 402 | if __name__ == '__main__': 403 | main() 404 | -------------------------------------------------------------------------------- /vcenter_add_lag_vds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcenter_add_lad_vds 23 | short_description: add a new link aggregation group to a virtual distributed switch 24 | description: 25 | - add a new link aggregation group to a virtual distributed switch 26 | options: 27 | vds_name: 28 | description: 29 | - name of the vds to add the lag to 30 | type: str 31 | lag_name: 32 | description: 33 | - name of the lag group 34 | type: str 35 | num_ports: 36 | description: 37 | - number of ports 38 | type: int 39 | lag_mode: 40 | description: 41 | - mode of the lag 42 | choices: ['active', 'passive'] 43 | type: str 44 | lb_mode: 45 | description: 46 | - load balanceing mode for the lag 47 | choices: see argument_spec below for choices 48 | type: str 49 | state: 50 | description: 51 | - If should be present or absent 52 | choices: ['present', 'absent'] 53 | required: True 54 | ''' 55 | 56 | EXAMPLES = ''' 57 | - name: create lags 58 | vcenter_add_lad_vds: 59 | hostname: '172.16.0.100' 60 | username: 'administrator@corp.local' 61 | password: 'VMware1!' 62 | validate_certs: False 63 | vds_name: 'vds001' 64 | lag_name: 'lag1' 65 | num_ports: 2 66 | lag_mode: 'active' 67 | lb_mode: 'srcTcpUdpPort' 68 | state: 'present' 69 | ''' 70 | 71 | 72 | try: 73 | from pyVmomi import vim, vmodl 74 | HAS_PYVMOMI = True 75 | except ImportError: 76 | HAS_PYVMOMI = False 77 | 78 | 79 | vc = {} 80 | 81 | 82 | def state_destroy_lag(module): 83 | module.exit_json(msg="DESTROY") 84 | 85 | 86 | def state_exit_unchanged(module): 87 | module.exit_json(msg="EXIT UNCHANGED") 88 | 89 | 90 | def state_update_lag(module): 91 | 92 | vds = vc['vds'] 93 | spec = lag_spec(module, True) 94 | 95 | changed, result = create_lag(module, vds, spec) 96 | 97 | if not changed: 98 | module.fail_json(msg="Failed to update lag: {}".format(module.params['lag_name'])) 99 | 100 | module.exit_json(changed=changed, result=result) 101 | 102 | 103 | def lag_spec(module, update): 104 | 105 | lacp_group_config = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig( 106 | name = module.params['lag_name'], 107 | mode = module.params['lag_mode'], 108 | uplinkNum = module.params['num_ports'], 109 | loadbalanceAlgorithm = module.params['lb_mode'], 110 | ) 111 | 112 | if update: 113 | operation_mode = "edit" 114 | lacp_group_config.key = vc['vds_lag'].key 115 | else: 116 | operation_mode = "add" 117 | 118 | lacp_group_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec( 119 | lacpGroupConfig = lacp_group_config, 120 | operation = operation_mode, 121 | ) 122 | 123 | return lacp_group_spec 124 | 125 | 126 | def create_lag(module, vds, spec): 127 | 128 | changed = False 129 | result = None 130 | 131 | try: 132 | create_task = vds.UpdateDVSLacpGroupConfig_Task([spec]) 133 | changed, result = wait_for_task(create_task) 134 | except vim.fault.DvsFault as dvs_fault: 135 | module.fail_json(msg="Failed to create lag with fault: {}".format(str(dvs_fault))) 136 | except vmodl.fault.NotSupported as not_supported: 137 | module.fail_json(msg="Failed to create lag. Check if multiple Link Aggregation Control Protocol is not supported on the switch".format(not_supported)) 138 | except Exception as e: 139 | module.fail_json(msg="Failed to create lag: {}".format(str(e))) 140 | 141 | return changed, result 142 | 143 | 144 | def state_create_lag(module): 145 | 146 | vds = vc['vds'] 147 | spec = lag_spec(module, False) 148 | 149 | changed, result = create_lag(module, vds, spec) 150 | 151 | if not changed: 152 | module.fail_json(msg="Failed to create lag: {}".format(module.params['lag_name'])) 153 | 154 | module.exit_json(changed=changed, result=result) 155 | 156 | 157 | def check_lag_present(module): 158 | 159 | vds_lag = None 160 | vds = vc['vds'] 161 | 162 | vds_lags = vds.config.lacpGroupConfig 163 | 164 | if not vds_lags: 165 | return vds_lag 166 | 167 | for lag in vds_lags: 168 | if lag.name == module.params['lag_name']: 169 | vds_lag = lag 170 | 171 | return vds_lag 172 | 173 | 174 | def check_lag_config(module): 175 | 176 | lag = vc['vds_lag'] 177 | 178 | check_vals = [ 179 | (module.params['num_ports'] == lag.uplinkNum), 180 | (module.params['lag_mode'] == lag.mode), 181 | (module.params['lb_mode'] == lag.loadbalanceAlgorithm) 182 | ] 183 | 184 | if False in check_vals: 185 | return False 186 | else: 187 | return True 188 | 189 | 190 | def check_lag_state(module): 191 | state = 'absent' 192 | 193 | si = connect_to_api(module) 194 | vc['si'] = si 195 | 196 | vds_name = module.params['vds_name'] 197 | 198 | vds = find_dvs_by_name(si, vds_name) 199 | 200 | if not vds: 201 | module.fail_json(msg="Failed to get vds: {}".format(vds_name)) 202 | 203 | vc['vds'] = vds 204 | 205 | lag = check_lag_present(module) 206 | 207 | if not lag: 208 | return state 209 | 210 | vc['vds_lag'] = lag 211 | 212 | lag_config = check_lag_config(module) 213 | 214 | if not lag_config: 215 | state = 'update' 216 | else: 217 | state = 'present' 218 | 219 | return state 220 | 221 | 222 | 223 | def main(): 224 | argument_spec = vmware_argument_spec() 225 | 226 | argument_spec.update( 227 | dict( 228 | vds_name=dict(type='str', required=True), 229 | lag_name=dict(type='str', required=True), 230 | num_ports=dict(type='int', required=True), 231 | lag_mode=dict(required=True, choices=['active', 'passive'], type='str'), 232 | state=dict(required=True, choices=['present', 'absent'], type='str'), 233 | lb_mode=dict( 234 | required=True, choices=[ 235 | 'srcTcpUdpPort', 236 | 'srcDestIpTcpUdpPortVlan', 237 | 'srcIpVlan', 238 | 'srcDestTcpUdpPort', 239 | 'srcMac', 240 | 'destIp', 241 | 'destMac', 242 | 'vlan', 243 | 'srcDestIp', 244 | 'srcIpTcpUdpPortVlan', 245 | 'srcDestIpTcpUdpPort', 246 | 'srcDestMac', 247 | 'destIpTcpUdpPort', 248 | 'srcPortId', 249 | 'srcIp', 250 | 'srcIpTcpUdpPort', 251 | 'destIpTcpUdpPortVlan', 252 | 'destTcpUdpPort', 253 | 'destIpVlan', 254 | 'srcDestIpVlan', 255 | ] 256 | ) 257 | ) 258 | ) 259 | 260 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 261 | 262 | if not HAS_PYVMOMI: 263 | module.fail_json(msg='pyvmomi is required for this module') 264 | 265 | lag_states = { 266 | 'absent': { 267 | 'present': state_destroy_lag, 268 | 'absent': state_exit_unchanged, 269 | }, 270 | 'present': { 271 | 'present': state_exit_unchanged, 272 | 'update': state_update_lag, 273 | 'absent': state_create_lag, 274 | } 275 | } 276 | 277 | desired_state = module.params['state'] 278 | current_state = check_lag_state(module) 279 | 280 | lag_states[desired_state][current_state](module) 281 | 282 | from ansible.module_utils.basic import * 283 | from ansible.module_utils.vmware import * 284 | 285 | if __name__ == '__main__': 286 | main() 287 | -------------------------------------------------------------------------------- /vcenter_host_ntp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | ANSIBLE_METADATA = {'metadata_version': '1.0', 22 | 'status': ['preview'], 23 | 'supported_by': 'community'} 24 | 25 | DOCUMENTATION = ''' 26 | module: vcenter_host_ntp 27 | Short_description: sets ntp setting for esx hosts in a cluster 28 | description: 29 | sets ntp setting for esx hosts in a cluster 30 | requirements: 31 | - pyvmomi 6 32 | - ansible 2.x 33 | Tested on: 34 | - vcenter 6.0 35 | - pyvmomi 6.5 36 | - esx 6 37 | - ansible 2.1.2 38 | options: 39 | hostname: 40 | description: 41 | - The hostname or IP address of the vSphere vCenter API server 42 | required: True 43 | username: 44 | description: 45 | - The username of the vSphere vCenter with Admin rights 46 | required: True 47 | password: 48 | description: 49 | - The password of the vSphere vCenter user 50 | required: True 51 | aliases: ['pass', 'pwd'] 52 | cluster_name: 53 | description: 54 | - The name for the vsphere cluster 55 | required: True 56 | ntp_server: 57 | description: 58 | - The ip or fqdn for the NTP server 59 | required: True 60 | state: 61 | description: 62 | - Desired state of the disk group 63 | choices: ['present', 'absent'] 64 | required: True 65 | 66 | ''' 67 | 68 | EXAMPLES = ''' 69 | - name: Host NTP 70 | vcenter_host_ntp: 71 | hostname: "{{ vcenter }}" 72 | username: "{{ vcenter_user }}" 73 | password: "{{ vcenter_password }}" 74 | validate_certs: "{{ vcenter_validate_certs }}" 75 | cluster_name: "{{ cluster_name }}" 76 | ntp_server: ntp.your_org.com 77 | state: "{{ global_state }}" 78 | tags: workflow_tag 79 | ''' 80 | 81 | RETURN = ''' 82 | host_results: 83 | description: List of dicts for hosts changed 84 | returned: host_results 85 | type: list 86 | sample: "{'name': str, 'host_ntp_server_changed': bool, 'restart_ntp': bool}" 87 | ''' 88 | 89 | try: 90 | from pyVmomi import vim, vmodl 91 | IMPORTS = True 92 | except ImportError: 93 | IMPORTS = False 94 | 95 | 96 | class VcenterHostNtp(object): 97 | """docstring for VcenterHostNtp""" 98 | def __init__(self, module): 99 | super(VcenterHostNtp, self).__init__() 100 | self.module = module 101 | self.cluster_name = module.params['cluster_name'] 102 | self.ntp_server = module.params['ntp_server'] 103 | self.desired_state = module.params['state'] 104 | self.hosts = None 105 | self.host_update_list = [] 106 | self.content = connect_to_api(module) 107 | 108 | def run_state(self): 109 | 110 | desired_state = self.module.params['state'] 111 | current_state = self.current_state() 112 | module_state = (desired_state == current_state) 113 | 114 | if module_state: 115 | self.state_exit_unchanged() 116 | 117 | if desired_state == 'absent' and current_state == 'present': 118 | self.state_delete() 119 | 120 | if desired_state == 'present' and current_state == 'absent': 121 | self.state_create() 122 | 123 | if desired_state == 'present' and current_state == 'update': 124 | self.state_update() 125 | 126 | self.module.exit_json(changed=False, result=None) 127 | 128 | 129 | def ntp_spec(self): 130 | ntp_config_spec = vim.host.NtpConfig() 131 | 132 | if self.module.params['state'] == 'present': 133 | ntp_config_spec.server = [self.ntp_server] 134 | 135 | if self.module.params['state'] == 'absent': 136 | ntp_config_spec.server = [] 137 | 138 | update_spec = vim.host.DateTimeConfig() 139 | update_spec.ntpConfig = ntp_config_spec 140 | 141 | return update_spec 142 | 143 | def update_host_date_time(self, host): 144 | state = False 145 | host_date_time_mgr = host.configManager.dateTimeSystem 146 | 147 | update_spec = self.ntp_spec() 148 | 149 | try: 150 | host_date_time_mgr.UpdateDateTimeConfig(update_spec) 151 | state = True 152 | except vim.fault.HostConfigFault as host_config_fault: 153 | msg = "Failed Host Config Fault: {}".format(host_config_fault) 154 | return state 155 | except Exception as e: 156 | msg = "Failed to config NTP on host: {}".format(e) 157 | return state 158 | 159 | return state 160 | 161 | def set_ntp_service(self, host, service_state): 162 | service_system = host.configManager.serviceSystem 163 | changed = False 164 | 165 | try: 166 | 167 | if service_state == 'start': 168 | service_system.StartService(id='ntpd') 169 | if service_state == 'stop': 170 | service_system.StopService(id='ntpd') 171 | if service_state == 'restart': 172 | service_system.RestartService(id='ntpd') 173 | changed = True 174 | 175 | except vim.fault.InvalidState as invalid_state: 176 | return changed 177 | except vim.fault.NotFound as not_found: 178 | return changed 179 | except vim.fault.HostConfigFault as config_fault: 180 | return changed 181 | 182 | return changed 183 | 184 | def state_create(self): 185 | changed = False 186 | results = [] 187 | 188 | for host in self.host_update_list: 189 | host_results = {'name': host.name} 190 | 191 | if not self.check_host_ntp_server(host): 192 | host_ntp_server_changed = self.update_host_date_time(host) 193 | restart_ntp = self.set_ntp_service(host, 'restart') 194 | 195 | host_results.update({'host_ntp_server_changed': host_ntp_server_changed}) 196 | host_results.update({'restart_ntp': restart_ntp}) 197 | 198 | if not self.check_host_ntp_service(host): 199 | host_ntp_service_changed = self.set_ntp_service(host, 'start') 200 | host_results.update({'host_ntp_service_changed': host_ntp_service_changed}) 201 | 202 | results.append(host_results) 203 | 204 | if results: 205 | changed = True 206 | 207 | self.module.exit_json(changed=changed, results=results, msg="STATE CREATE") 208 | 209 | def state_update(self): 210 | self.state_create() 211 | 212 | def state_exit_unchanged(self): 213 | self.module.exit_json(changed=False, msg="EXIT UNCHANGED") 214 | 215 | def state_delete(self): 216 | changed = False 217 | results = [] 218 | 219 | for host in self.host_update_list: 220 | stop_ntp_service = self.set_ntp_service(host, 'stop') 221 | remove_ntp_server = self.update_host_date_time(host) 222 | host_results = {'name': host.name, 223 | 'stop_ntp_service': stop_ntp_service, 224 | 'remove_ntp_server': remove_ntp_server} 225 | results.append(host_results) 226 | 227 | if results: 228 | changed = True 229 | 230 | self.module.exit_json(changed=changed, results=results, msg="STATE DELETE") 231 | 232 | def check_host_ntp_service(self, host): 233 | ntp_status = False 234 | host_services = host.configManager.serviceSystem.serviceInfo.service 235 | 236 | try: 237 | ntp_status = [s.running for s in host_services if s.key == 'ntpd'][0] 238 | except IndexError: 239 | return ntp_status 240 | 241 | return ntp_status 242 | 243 | def check_host_ntp_server(self, host): 244 | state = False 245 | date_time_system = host.configManager.dateTimeSystem 246 | host_ntp_servers = date_time_system.dateTimeInfo.ntpConfig.server 247 | 248 | if self.ntp_server in host_ntp_servers: 249 | state = True 250 | 251 | return state 252 | 253 | def current_state(self): 254 | state = 'absent' 255 | 256 | cluster = find_cluster_by_name(self.content, self.cluster_name) 257 | 258 | if not cluster: 259 | msg = "Cannot find cluster: {}".format(self.cluster_name) 260 | self.module.fail_json(msg=msg) 261 | 262 | hosts = cluster.host 263 | 264 | if not hosts: 265 | msg = "No hosts present in cluster" 266 | self.module.exit_json(changed=False, msg=msg) 267 | 268 | self.hosts = hosts 269 | 270 | for host in self.hosts: 271 | 272 | ntp_server = self.check_host_ntp_server(host) 273 | ntp_service = self.check_host_ntp_service(host) 274 | 275 | if ntp_server and ntp_service: 276 | host_state = 'present' 277 | 278 | if (not ntp_server) or (not ntp_service): 279 | host_state = 'absent' 280 | 281 | if host_state == 'present' and self.desired_state == 'absent': 282 | self.host_update_list.append(host) 283 | if host_state == 'absent' and self.desired_state == 'present': 284 | self.host_update_list.append(host) 285 | 286 | if self.desired_state == 'present' and self.host_update_list: 287 | return state 288 | if self.desired_state == 'absent' and self.host_update_list: 289 | state = 'present' 290 | 291 | return state 292 | 293 | 294 | def main(): 295 | argument_spec = vmware_argument_spec() 296 | 297 | argument_spec.update( 298 | dict( 299 | cluster_name=dict(required=True, type='str'), 300 | ntp_server=dict(required=True, type='str'), 301 | state=dict(default='present', choices=['present', 'absent'], type='str'), 302 | ) 303 | ) 304 | 305 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 306 | 307 | if not IMPORTS: 308 | module.fail_json(msg='pyvmomi is required for this module') 309 | 310 | hostntp = VcenterHostNtp(module) 311 | hostntp.run_state() 312 | 313 | from ansible.module_utils.basic import * 314 | from ansible.module_utils.vmware import * 315 | 316 | if __name__ == '__main__': 317 | main() -------------------------------------------------------------------------------- /vcenter_host_profile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vcenter_host_profile 24 | short_description: creates updates deletes host profiles 25 | description: 26 | creates updates deletes host profiles. 27 | notes: 28 | requirements: ansible 2.x 29 | - Tested on vSphere 6.0 30 | options: 31 | hostname: 32 | description: 33 | - The hostname or IP address of the vSphere vCenter API server 34 | required: True 35 | username: 36 | description: 37 | - The username of the vSphere vCenter 38 | required: True 39 | aliases: ['user', 'admin'] 40 | password: 41 | description: 42 | - The password of the vSphere vCenter 43 | required: True 44 | aliases: ['pass', 'pwd'] 45 | esxi_hostname: 46 | description: 47 | - The esxi host to extract the host profile from 48 | state: 49 | description: 50 | - desired state 51 | choices: ['present', 'absent'] 52 | required: True 53 | ''' 54 | 55 | EXAMPLES = ''' 56 | - name: Host Profile 57 | vcenter_host_profile: 58 | hostname: "{{ vcenter }}" 59 | username: "{{ vcenter_user }}" 60 | password: "{{ vcenter_password }}" 61 | validate_certs: "{{ vcenter_validate_certs }}" 62 | esxi_hostname: "{{ item }}" 63 | state: 'absent' 64 | with_items: 65 | - "{{ host_profile }}" 66 | tags: 67 | - taghere 68 | ''' 69 | 70 | try: 71 | from pyVmomi import vim, vmodl 72 | import datetime 73 | HAS_PYVMOMI = True 74 | except ImportError: 75 | HAS_PYVMOMI = False 76 | 77 | 78 | vc = {} 79 | 80 | 81 | def get_host_profile(si, profilename): 82 | 83 | profile = None 84 | 85 | profiles = si.hostProfileManager.profile 86 | 87 | for p in profiles: 88 | if p.name == profilename: 89 | profile = p 90 | 91 | return profile 92 | 93 | 94 | def profile_spec(name, host): 95 | 96 | spec = vim.profile.host.HostProfile.HostBasedConfigSpec( 97 | name = name, 98 | enabled = True, 99 | host = host, 100 | useHostProfileEngine = True, 101 | ) 102 | 103 | return spec 104 | 105 | 106 | def create_profile(module, si, spec): 107 | 108 | host_profile = None 109 | 110 | profile_manager = si.hostProfileManager 111 | 112 | try: 113 | host_profile = profile_manager.CreateProfile(spec) 114 | except Exception as e: 115 | module.fail_json(msg="Failed to create host profile: {}".format(str(e))) 116 | 117 | return host_profile 118 | 119 | 120 | def update_reference_host(module, profile, host): 121 | 122 | state = False 123 | 124 | try: 125 | profile.UpdateReferenceHost(host) 126 | state = True 127 | except Exception as e: 128 | module.fail_json(msg="Failed to update reference host: {}".format(str(e))) 129 | 130 | return state 131 | 132 | 133 | def check_host_profile(si, host, profile_name, present): 134 | 135 | state = False 136 | 137 | profile_manager = si.hostProfileManager 138 | 139 | profiles = [p for p in profile_manager.profile] 140 | 141 | if not profiles: 142 | return False 143 | 144 | if present: 145 | for profile in profiles: 146 | if profile.name == profile_name: 147 | state = True 148 | 149 | if not present: 150 | for p in profiles: 151 | if p.name == profile_name: 152 | if p.referenceHost == host: 153 | state = True 154 | 155 | return state 156 | 157 | 158 | def profile_name(cluster_name): 159 | 160 | fmt = '%Y_%m_%d' 161 | time_stamp = datetime.datetime.now().strftime(fmt) 162 | 163 | sep = "_" 164 | seq = (cluster_name, time_stamp) 165 | 166 | profilename = sep.join(seq) 167 | 168 | return profilename 169 | 170 | 171 | def state_create_profile(module): 172 | 173 | changed = False 174 | result = None 175 | 176 | si = vc['si'] 177 | host = vc['host'] 178 | profile_name = vc['profile_name'] 179 | 180 | spec = profile_spec(profile_name, host) 181 | 182 | profile = create_profile(module, si, spec) 183 | 184 | if not profile: 185 | module.fail_json(msg="Failed creating profile") 186 | 187 | update_ref_host = update_reference_host(module, profile, host) 188 | 189 | if update_ref_host: 190 | changed = True 191 | result = profile.name 192 | 193 | module.exit_json(changed=changed, result=result) 194 | 195 | 196 | def state_update_profile(module): 197 | 198 | profilename = vc['profile_name'] 199 | si = vc['si'] 200 | host = vc['host'] 201 | 202 | profile = get_host_profile(si, profilename) 203 | 204 | if not profile: 205 | module.fail_json(msg="Failed to get profile to update ref host") 206 | 207 | changed = update_reference_host(module, profile, host) 208 | 209 | if not changed: 210 | module.fail_json(msg="Failed to update ref host for host profile") 211 | 212 | module.exit_json(changed=changed) 213 | 214 | 215 | def state_destroy_profile(module): 216 | 217 | profilename = vc['profile_name'] 218 | si = vc['si'] 219 | 220 | profile = get_host_profile(si, profilename) 221 | 222 | if not profile: 223 | module.fail_json(msg="Failed to get profile to update ref host") 224 | 225 | try: 226 | profile.DestroyProfile() 227 | except Exception as e: 228 | module.exit_json(msg="Failed to destroy profile: {}".format(str(e))) 229 | 230 | module.exit_json(changed=True) 231 | 232 | 233 | def state_exit_unchanged(module): 234 | module.exit_json(changed=False, msg="EXIT UNCHANGED") 235 | 236 | 237 | def check_profile_state(module): 238 | 239 | esxi_hostname = module.params['esxi_hostname'] 240 | 241 | si = connect_to_api(module) 242 | 243 | vc['si'] = si 244 | 245 | host = find_hostsystem_by_name(si, esxi_hostname) 246 | 247 | if not host: 248 | module.fail_json(msg="Failed getting host: {}".format(esxi_hostname)) 249 | 250 | vc['host'] = host 251 | vc['profile_name'] = profile_name(host.parent.name) 252 | 253 | profile_present = check_host_profile(si, host, vc['profile_name'], True) 254 | 255 | if not profile_present: 256 | return 'absent' 257 | 258 | profile_config = check_host_profile(si, host, vc['profile_name'], False) 259 | 260 | if not profile_config: 261 | 262 | return 'update' 263 | 264 | return 'present' 265 | 266 | 267 | 268 | def main(): 269 | argument_spec = vmware_argument_spec() 270 | 271 | argument_spec.update( 272 | dict( 273 | esxi_hostname=dict(required=True, type='str'), 274 | state=dict(required=True, choices=['present', 'absent'], type='str'), 275 | ) 276 | ) 277 | 278 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 279 | 280 | if not HAS_PYVMOMI: 281 | module.fail_json(msg='pyvmomi is required for this module') 282 | 283 | try: 284 | profile_states = { 285 | 'absent': { 286 | 'absent': state_exit_unchanged, 287 | 'present': state_destroy_profile, 288 | }, 289 | 'present': { 290 | 'present': state_exit_unchanged, 291 | 'update': state_update_profile, 292 | 'absent': state_create_profile, 293 | } 294 | } 295 | 296 | profile_states[module.params['state']][check_profile_state(module)](module) 297 | 298 | except vmodl.RuntimeFault as runtime_fault: 299 | module.fail_json(msg=runtime_fault.msg) 300 | except vmodl.MethodFault as method_fault: 301 | module.fail_json(msg=method_fault.msg) 302 | except Exception as e: 303 | module.fail_json(msg=str(e)) 304 | 305 | 306 | from ansible.module_utils.basic import * 307 | from ansible.module_utils.vmware import * 308 | 309 | if __name__ == '__main__': 310 | main() -------------------------------------------------------------------------------- /vcenter_host_vmnic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # (c) 2015, Joseph Callen 5 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 6 | # 7 | # This file is part of Ansible 8 | # 9 | # Ansible is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Ansible is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with Ansible. If not, see . 21 | 22 | ANSIBLE_METADATA = {'status': ['preview'], 23 | 'supported_by': 'community', 24 | 'version': '1.0'} 25 | 26 | DOCUMENTATION = ''' 27 | --- 28 | module: vcenter_host_vmnic 29 | short_description: Obtains a list of available vmnics for a specified host 30 | description: 31 | Obtains a list of available vmnics for a specified host 32 | version_added: 2.3 33 | author: VMware 34 | notes: 35 | - Tested on vsphere 6.0 36 | requirements: 37 | - PyVmomi 38 | options: 39 | esxi_hostname: 40 | description: 41 | - Host name or ip for the esxi host 42 | required: True 43 | type: str 44 | 45 | ''' 46 | 47 | EXAMPLES = ''' 48 | - name: Get esxi hosts available vmnics 49 | vcenter_host_vmnic: 50 | hostname: "{{ vcenter }}" 51 | username: "{{ vcenter_user }}" 52 | password: "{{ vcenter_password }}" 53 | validate_certs: "{{ vcenter_validate_certs }}" 54 | esxi_hostname: "172.16.78.101" 55 | register: host_vmnics 56 | ''' 57 | 58 | RETURN = ''' 59 | host_vmnics: 60 | description: 61 | - dict with host and list of available vmnics 62 | returned: host_vmnics 63 | type: dict 64 | sample: { host: "esxi.corp.local", vmnics: ['vmnic0', 'vmnic1'] } 65 | 66 | ''' 67 | 68 | try: 69 | from pyVmomi import vim, vmodl 70 | HAS_PYVMOMI = True 71 | except ImportError: 72 | HAS_PYVMOMI = False 73 | 74 | 75 | class VcenterHostVmnics(object): 76 | """ 77 | Obtains the available/used vmnics for the specified esx host 78 | :param module AnsibleModule 79 | :param esxi_hostname 80 | :param vcapi 81 | """ 82 | def __init__(self, module): 83 | super(VcenterHostVmnics, self).__init__() 84 | self.module = module 85 | self.esxi_hostname = module.params['esxi_hostname'] 86 | self.get_type = module.params['obtain'] 87 | self.vcapi = connect_to_api(self.module) 88 | self.host = None 89 | self.available_vmnics = [] 90 | 91 | 92 | def run_state(self): 93 | if not self.check_state(): 94 | self.module.fail_json(msg="Failed to find host: %s " % self.esxi_hostname) 95 | 96 | host_vmnics = self.get_host_vmnics(self.host) 97 | 98 | if self.get_type == 'available': 99 | vmnics = self.get_host_available_vmnics(host_vmnics) 100 | 101 | if self.get_type == 'used': 102 | vmnics = self.get_used_vmnic(self.host) 103 | 104 | host_data = {'host': self.esxi_hostname, 'vmnics': vmnics} 105 | 106 | self.module.exit_json(changed=False, host_vmnics=host_data) 107 | 108 | def get_host_available_vmnics(self, vmnics): 109 | used_vmnics = self.get_used_vmnic(self.host) 110 | 111 | for vmnic in vmnics: 112 | if vmnic not in used_vmnics: 113 | self.available_vmnics.append(vmnic) 114 | 115 | return self.available_vmnics 116 | 117 | def get_host_vmnics(self, host): 118 | net_config = host.configManager.networkSystem.networkConfig 119 | vmnics = [pnic.device for pnic in net_config.pnic] 120 | return vmnics 121 | 122 | def get_vswitch_vmnics(self, host): 123 | vswitch_vmnics = [] 124 | 125 | net_config = self.host.configManager.networkSystem.networkConfig 126 | 127 | if not net_config.vswitch: 128 | return vswitch_vmnics 129 | 130 | for vswitch in net_config.vswitch: 131 | for v in vswitch.spec.bridge.nicDevice: 132 | vswitch_vmnics.append(v) 133 | 134 | return vswitch_vmnics 135 | 136 | def get_proxyswitch_vmnics(self, host): 137 | proxy_switch_vmnics = [] 138 | 139 | net_config = self.host.configManager.networkSystem.networkConfig 140 | 141 | if not net_config.proxySwitch: 142 | return proxy_switch_vmnics 143 | 144 | for proxy_config in net_config.proxySwitch: 145 | for p in proxy_config.spec.backing.pnicSpec: 146 | proxy_switch_vmnics.append(p.pnicDevice) 147 | 148 | return proxy_switch_vmnics 149 | 150 | def get_used_vmnic(self, host): 151 | vswitch_vmnics = self.get_vswitch_vmnics(host) 152 | proxy_switch_vmnics = self.get_proxyswitch_vmnics(host) 153 | return vswitch_vmnics + proxy_switch_vmnics 154 | 155 | def check_state(self): 156 | host = find_hostsystem_by_name(self.vcapi, self.esxi_hostname) 157 | 158 | if not host: 159 | return False 160 | 161 | self.host = host 162 | 163 | return True 164 | 165 | 166 | def main(): 167 | argument_spec = vmware_argument_spec() 168 | 169 | argument_spec.update(dict(esxi_hostname=dict(type='str', require=True), 170 | obtain=dict(type='str', 171 | required=False, 172 | default='available', 173 | choices=['available', 'used']), 174 | state=dict(default='present', choices=['present', 'absent'], type='str'))) 175 | 176 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 177 | 178 | if not HAS_PYVMOMI: 179 | module.fail_json(msg='pyvmomi is required for this module') 180 | 181 | vcenter_host_vmnics = VcenterHostVmnics(module) 182 | vcenter_host_vmnics.run_state() 183 | 184 | 185 | from ansible.module_utils.basic import * 186 | from ansible.module_utils.vmware import * 187 | 188 | if __name__ == '__main__': 189 | main() -------------------------------------------------------------------------------- /vcenter_nfs_ds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcenter_nfs_ds 23 | short_description: Add host to nfs datastore 24 | description: 25 | - Add host to specified nfs datastore 26 | options: 27 | 28 | esxi_hostname: 29 | description: 30 | - The esxi hostname or ip to add to nfs ds 31 | required: True 32 | nfs_host: 33 | description: 34 | - The nfs service providing nfs service 35 | required: True 36 | nfs_path: 37 | description: 38 | - The remove file path ex: /nfs1 39 | required: True 40 | nfs_name: 41 | description: 42 | - The name of the datastore as seen by vcenter 43 | required: True 44 | nfs_access: 45 | description: 46 | - The access type 47 | choices: [readWrite, readOnly] 48 | required: True 49 | nfs_type: 50 | description: 51 | - The type of volume. Defaults to nfs if not specified 52 | choices: [nfs, cifs] 53 | required: False 54 | nfs_username: 55 | description: 56 | - The username to access the nfs ds if required 57 | required: False 58 | nfs_password: 59 | description: 60 | - The password to access the nfs ds if required 61 | required: False 62 | state: 63 | description: 64 | - If the datacenter should be present or absent 65 | choices: ['present', 'absent'] 66 | required: True 67 | ''' 68 | 69 | EXAMPLES = ''' 70 | - name: Add NFS DS to Host 71 | ignore_errors: no 72 | vcenter_nfs_ds: 73 | esxi_hostname: '192.168.1.102' 74 | nfs_host: '192.168.1.145' 75 | nfs_path: '/nfs1' 76 | nfs_name: 'nfs_ds_1' 77 | nfs_access: 'readWrite' 78 | nfs_type: 'nfs' 79 | state: 'present' 80 | tags: 81 | - addnfs 82 | ''' 83 | 84 | try: 85 | from pyVmomi import vim, vmodl 86 | HAS_PYVMOMI = True 87 | except ImportError: 88 | HAS_PYVMOMI = False 89 | 90 | 91 | vc = {} 92 | 93 | 94 | def find_vcenter_object_by_name(content, vimtype, object_name): 95 | vcenter_object = get_all_objs(content, [vimtype]) 96 | 97 | for k, v in vcenter_object.items(): 98 | if v == object_name: 99 | return k 100 | else: 101 | return None 102 | 103 | 104 | def nfs_spec(module): 105 | 106 | nfs_remote_host = module.params['nfs_host'] 107 | nfs_remote_path = module.params['nfs_path'] 108 | nfs_local_name = module.params['nfs_name'] 109 | nfs_access_mode = module.params['nfs_access'] 110 | nfs_type = module.params['nfs_type'] 111 | nfs_username = module.params['nfs_username'] 112 | nfs_password = module.params['nfs_password'] 113 | 114 | nfs_config_spec = vim.host.NasVolume.Specification( 115 | remoteHost=nfs_remote_host, 116 | remotePath=nfs_remote_path, 117 | localPath=nfs_local_name, 118 | accessMode=nfs_access_mode, 119 | type=nfs_type, 120 | ) 121 | 122 | if nfs_username and nfs_password: 123 | nfs_config_spec.userName = nfs_username 124 | nfs_config_spec.password = nfs_password 125 | 126 | return nfs_config_spec 127 | 128 | 129 | def check_host_added_to_nfs_ds(module): 130 | 131 | state = None 132 | 133 | nfs_ds = vc['nfs'] 134 | host = vc['host'] 135 | 136 | for esxhost in nfs_ds.host: 137 | if esxhost.key == host: 138 | state = True 139 | 140 | return state 141 | 142 | 143 | def state_exit_unchanged(module): 144 | module.exit_json(change=False, msg="EXIT UNCHANGED") 145 | 146 | 147 | def state_delete_nfs(module): 148 | 149 | changed = False 150 | result = None 151 | 152 | host = vc['host'] 153 | ds = vc['nfs'] 154 | 155 | try: 156 | host.configManager.datastoreSystem.RemoveDatastore(ds) 157 | changed = True 158 | result = "Removed Datastore: {}".format(ds.name) 159 | except Exception as e: 160 | module.fail_json(msg="Failed to remove datastore: %s" % str(e)) 161 | 162 | module.exit_json(changed=changed, result=result) 163 | 164 | def state_create_nfs(module): 165 | 166 | changed = False 167 | result = None 168 | 169 | host = vc['host'] 170 | ds_spec = nfs_spec(module) 171 | 172 | try: 173 | ds = host.configManager.datastoreSystem.CreateNasDatastore(ds_spec) 174 | changed = True 175 | result = ds.name 176 | except vim.fault.DuplicateName as duplicate_name: 177 | module.fail_json(msg="Failed duplicate name: %s" % duplicate_name) 178 | except vim.fault.AlreadyExists as already_exists: 179 | module.exit_json(changed=False, result=str(already_exists)) 180 | except vim.HostConfigFault as config_fault: 181 | module.fail_json(msg="Failed to configure nfs on host: %s" % config_fault.msg) 182 | except vmodl.fault.InvalidArgument as invalid_arg: 183 | module.fail_json(msg="Failed with invalid arg: %s" % invalid_arg) 184 | except vim.fault.NoVirtualNic as no_virt_nic: 185 | module.fail_json(msg="Failed no virtual nic: %s" % no_virt_nic) 186 | except vim.fault.NoGateway as no_gwy: 187 | module.fail_json(msg="Failed no gateway: %s" % no_gwy) 188 | except vmodl.MethoFault as method_fault: 189 | module.fail_json(msg="Failed to configure nfs on host method fault: %s" % method_fault.msg) 190 | 191 | module.exit_json(change=changed, result=result) 192 | 193 | def check_nfs_host_state(module): 194 | 195 | esxi_hostname = module.params['esxi_hostname'] 196 | nfs_ds_name = module.params['nfs_name'] 197 | 198 | si = connect_to_api(module) 199 | vc['si'] = si 200 | 201 | host = find_hostsystem_by_name(si, esxi_hostname) 202 | 203 | if host is None: 204 | module.fail_json(msg="Esxi host: %s not in vcenter".format(esxi_hostname)) 205 | 206 | vc['host'] = host 207 | 208 | nfs_ds = find_vcenter_object_by_name(si, vim.Datastore, nfs_ds_name) 209 | 210 | if nfs_ds is None: 211 | return 'absent' 212 | else: 213 | vc['nfs'] = nfs_ds 214 | 215 | if check_host_added_to_nfs_ds(module): 216 | return 'present' 217 | else: 218 | return 'update' 219 | 220 | 221 | 222 | def main(): 223 | argument_spec = vmware_argument_spec() 224 | 225 | argument_spec.update( 226 | dict( 227 | esxi_hostname=dict(required=True, type='str'), 228 | nfs_host=dict(required=True, type='str'), 229 | nfs_path=dict(required=True, type='str'), 230 | nfs_name=dict(required=True, type='str'), 231 | nfs_access=dict(required=True, type='str'), 232 | nfs_type=dict(required=False, type='str'), 233 | nfs_username=dict(required=False, type='str'), 234 | nfs_password=dict(required=False, type='str', no_log=True), 235 | state=dict(default='present', choices=['present', 'absent'], type='str'), 236 | ) 237 | ) 238 | 239 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 240 | 241 | if not HAS_PYVMOMI: 242 | module.fail_json(msg='pyvmomi is required for this module') 243 | 244 | try: 245 | nfs_host_states = { 246 | 'absent': { 247 | 'update': state_exit_unchanged, 248 | 'present': state_delete_nfs, 249 | 'absent': state_exit_unchanged, 250 | }, 251 | 'present': { 252 | 'update': state_create_nfs, 253 | 'present': state_exit_unchanged, 254 | 'absent': state_create_nfs, 255 | } 256 | } 257 | 258 | nfs_host_states[module.params['state']][check_nfs_host_state(module)](module) 259 | 260 | except vmodl.RuntimeFault as runtime_fault: 261 | module.fail_json(msg=runtime_fault.msg) 262 | except vmodl.MethodFault as method_fault: 263 | module.fail_json(msg=method_fault.msg) 264 | except Exception as e: 265 | module.fail_json(msg=str(e)) 266 | 267 | 268 | from ansible.module_utils.basic import * 269 | from ansible.module_utils.vmware import * 270 | 271 | if __name__ == '__main__': 272 | main() -------------------------------------------------------------------------------- /vcenter_pg_activeuplinks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcenter_pg_activeuplinks 23 | short_description: set uplink to active or standby for portgroup 24 | description: 25 | set uplink to active or standby for portgroup 26 | options: 27 | vds_name: 28 | description: 29 | - name of the vds 30 | type: str 31 | pg_name: 32 | description: 33 | - Name of the portgroup to modify 34 | type: str 35 | uplink_state: 36 | description: 37 | - Set to active or standby 38 | uplinks: 39 | description: 40 | - list of desired active uplinks. 41 | - when specifing lag group only 1 lag may be specified 42 | type: list 43 | state: 44 | description: 45 | - be present or absent 46 | choices: ['present', 'absent'] 47 | required: True 48 | ''' 49 | 50 | EXAMPLES = ''' 51 | - name: modify pg 52 | vcenter_pg_activeuplinks: 53 | hostname: '172.16.0.100' 54 | username: 'administrator@corp.local' 55 | password: 'VMware1!' 56 | validate_certs: False 57 | vds_name: 'vds-001' 58 | pg_name: 'mgmt-pg-01' 59 | uplinks: 60 | - 'lag-grp-001' 61 | state: 'present' 62 | ''' 63 | 64 | 65 | try: 66 | from pyVmomi import vim, vmodl 67 | HAS_PYVMOMI = True 68 | except ImportError: 69 | HAS_PYVMOMI = False 70 | 71 | 72 | vc = {} 73 | 74 | invalid_uplinks_fail_msg = ("Specified uplinks: {} " 75 | "include a LAG group and Uplink or mulitple LAG groups. " 76 | "Only Single LAG or multiple Uplinks allowed.") 77 | 78 | 79 | def state_destroy_pguplink(module): 80 | module.exit_json(msg="DESTROY") 81 | 82 | 83 | def state_exit_unchanged(module): 84 | module.exit_json(msg="EXIT UNCHANGED") 85 | 86 | 87 | def state_update_pguplinks(module): 88 | module.exit_json(msg="UPDATE") 89 | 90 | 91 | def get_current_active_uplinks(): 92 | 93 | pg = vc['pg'] 94 | 95 | active_uplinks = \ 96 | pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort 97 | 98 | return active_uplinks 99 | 100 | def uplink_spec(module, uplinks, pg_config_version): 101 | 102 | spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() 103 | spec.configVersion = pg_config_version 104 | spec.defaultPortConfig =vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() 105 | spec.defaultPortConfig.uplinkTeamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy() 106 | 107 | if module.params['uplink_state'] == 'active': 108 | spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder = \ 109 | vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy() 110 | spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort = uplinks 111 | 112 | if module.params['uplink_state'] == 'standby': 113 | 114 | active_uplinks = get_current_active_uplinks() 115 | 116 | spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder = \ 117 | vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy() 118 | spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort = active_uplinks 119 | spec.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.standbyUplinkPort = uplinks 120 | 121 | return spec 122 | 123 | 124 | def create_active_uplink(module, pg): 125 | 126 | changed = False 127 | result = None 128 | 129 | pg_spec = uplink_spec(module, 130 | module.params['uplinks'], 131 | vc['pg_config_version']) 132 | 133 | try: 134 | reconfig_task = pg.ReconfigureDVPortgroup_Task(pg_spec) 135 | changed, result = wait_for_task(reconfig_task) 136 | except vim.fault.DvsFault, dvs_fault: 137 | module.fail_json(msg="Invalid spec: {}".format(str(dvs_fault))) 138 | except vim.fault.ConcurrentAccess, access: 139 | module.fail_json(msg="Concurrent Access Fault: {}".format(str(access))) 140 | except vmodl.fault.NotSupported, support: 141 | module.fail_json(msg="Feature in spec not supported: {}".format(str(support))) 142 | except Exception, e: 143 | module.fail_json(msg="Failed to reconfigure: {}".format(str(e))) 144 | 145 | return changed, result 146 | 147 | 148 | def state_create_pguplinks(module): 149 | 150 | pg = vc['pg'] 151 | 152 | changed, result = create_active_uplink(module, pg) 153 | 154 | if not changed: 155 | module.fail_json(msg="Failed to reconfigure active or standby uplinks") 156 | 157 | module.exit_json(changed=changed, result=result, msg="STATE CREATE") 158 | 159 | 160 | def check_vds_for_lags(vds): 161 | 162 | lags = None 163 | 164 | if vds.config.lacpApiVersion != "multipleLag": 165 | return lags 166 | 167 | if not vds.config.lacpGroupConfig: 168 | return lags 169 | 170 | lags = [lag.name for lag in vds.config.lacpGroupConfig] 171 | 172 | return lags 173 | 174 | 175 | def check_uplinks_lag_uplink(module, vds_lags): 176 | 177 | state = False 178 | 179 | uplinks = module.params['uplinks'] 180 | check_lags = [v for v in uplinks if v in vds_lags] 181 | 182 | if len(uplinks) > 1 and not check_lags: 183 | state = (len(uplinks) == len(set(uplinks))) 184 | elif len(uplinks) == 1: 185 | state = True 186 | 187 | return state 188 | 189 | 190 | def check_uplinks_valid(module): 191 | 192 | uplinks = module.params['uplinks'] 193 | 194 | vds = vc['vds'] 195 | 196 | vds_uplinks = vds.config.uplinkPortPolicy.uplinkPortName 197 | vds_lags = check_vds_for_lags(vds) 198 | 199 | invalid_uplinks = [x for x in uplinks if x not in vds_uplinks + vds_lags] 200 | 201 | if invalid_uplinks: 202 | module.fail_json(msg="Uplinks specified invalid: {}".format(invalid_uplinks)) 203 | 204 | state = check_uplinks_lag_uplink(module, vds_lags) 205 | 206 | return state 207 | 208 | 209 | def check_uplinks_present(module): 210 | 211 | state = False 212 | 213 | pg = vc['pg'] 214 | 215 | if module.params['uplink_state'] == 'active': 216 | pg_uplinks = pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort 217 | 218 | if module.params['uplink_state'] == 'standby': 219 | pg_uplinks = pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.standbyUplinkPort 220 | 221 | if (pg_uplinks == module.params['uplinks']): 222 | state = True 223 | 224 | return state 225 | 226 | 227 | def check_pguplink_state(module): 228 | 229 | state = 'absent' 230 | 231 | si = connect_to_api(module) 232 | vc['si'] = si 233 | 234 | vds = find_dvs_by_name(si, module.params['vds_name']) 235 | 236 | if not vds: 237 | module.fail_json(msg="Failed to get vds: {}".format(module.params['vds_name'])) 238 | 239 | vc['vds'] = vds 240 | 241 | pg = find_dvspg_by_name(vds, module.params['pg_name']) 242 | 243 | if not pg: 244 | module.fail_json(msg="Failed to get portgroup: {}".format(module.params['pg_name'])) 245 | 246 | vc['pg'] = pg 247 | vc['pg_config_version'] = pg.config.configVersion 248 | 249 | valid_uplinks = check_uplinks_valid(module) 250 | 251 | if not valid_uplinks: 252 | fail_msg = invalid_uplinks_fail_msg.format(module.params['uplinks']) 253 | module.fail_json(msg=fail_msg) 254 | 255 | uplinks_present = check_uplinks_present(module) 256 | 257 | if uplinks_present: 258 | state = 'present' 259 | 260 | return state 261 | 262 | 263 | 264 | def main(): 265 | argument_spec = vmware_argument_spec() 266 | 267 | argument_spec.update( 268 | dict( 269 | vds_name=dict(required=True, type='str'), 270 | pg_name=dict(required=True, type='str'), 271 | uplink_state=dict(required=True, choices=['active', 'standby'], type='str'), 272 | uplinks=dict(required=True, type='list'), 273 | state=dict(required=True, choices=['present', 'absent'], type='str'), 274 | ) 275 | ) 276 | 277 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 278 | 279 | if not HAS_PYVMOMI: 280 | module.fail_json(msg='pyvmomi is required for this module') 281 | 282 | pguplink_states = { 283 | 'absent': { 284 | 'present': state_destroy_pguplink, 285 | 'absent': state_exit_unchanged, 286 | }, 287 | 'present': { 288 | 'present': state_exit_unchanged, 289 | 'update': state_update_pguplinks, 290 | 'absent': state_create_pguplinks, 291 | } 292 | } 293 | 294 | desired_state = module.params['state'] 295 | current_state = check_pguplink_state(module) 296 | 297 | pguplink_states[desired_state][current_state](module) 298 | 299 | from ansible.module_utils.basic import * 300 | from ansible.module_utils.vmware import * 301 | 302 | if __name__ == '__main__': 303 | main() 304 | -------------------------------------------------------------------------------- /vcenter_portgroup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | --- 24 | module: vcenter_portgroup 25 | short_description: Manage VMware vSphere VDS Portgroup 26 | description: 27 | - Manage VMware vCenter portgroups in a given virtual distributed switch 28 | version_added: 1.0 29 | notes: 30 | - Tested on vSphere 6.0 31 | requirements: 32 | - "python >= 2.6" 33 | - PyVmomi 34 | options: 35 | hostname: 36 | description: 37 | - The hostname or IP address of the vSphere vCenter API server 38 | required: True 39 | username: 40 | description: 41 | - The username of the vSphere vCenter 42 | required: True 43 | aliases: ['user', 'admin'] 44 | password: 45 | description: 46 | - The password of the vSphere vCenter 47 | required: True 48 | aliases: ['pass', 'pwd'] 49 | vds_name: 50 | description: 51 | - The name of the distributed virtual switch where the port group is added to. 52 | The vds must exist prior to adding a new port group, otherwise, this 53 | process will fail. 54 | required: True 55 | port_group_name: 56 | description: 57 | - The name of the port group the cluster will be created in. 58 | required: True 59 | port_binding: 60 | description: 61 | - Available port binding types - static, dynamic, ephemeral 62 | required: True 63 | port_allocation: 64 | description: 65 | - Allocation model of the ports - fixed, elastic 66 | - Fixed allocation always reserve the number of ports requested 67 | - Elastic allocation increases/decreases the number of ports as needed 68 | required: True 69 | numPorts: 70 | description: 71 | - The number of the ports for the port group 72 | - Default value will be 0 - no ports 73 | state: 74 | description: 75 | - If the port group should be present or absent 76 | choices: ['present', 'absent'] 77 | required: True 78 | ''' 79 | EXAMPLES = ''' 80 | - name: create portgroups 81 | vcenter_portgroup: 82 | hostname: '172.16.78.15' 83 | username: 'administrator@vsphere.local' 84 | password: 'VMware1!' 85 | validate_certs: False 86 | vds_name: 'vds001' 87 | port_group_name: "{{ item['name'] }}" 88 | port_binding: "{{ item['binding'] }}" 89 | port_allocation: "{{ item['allocation'] }}" 90 | numPorts: "{{ item['numports'] }}" 91 | vlan: 92 | state: 'present' 93 | with_items: 94 | - { name: 'pg001', binding: 'static', allocation: 'elastic', numports: 8 } 95 | ''' 96 | 97 | 98 | try: 99 | from pyVmomi import vim, vmodl 100 | HAS_PYVMOMI = True 101 | except ImportError: 102 | HAS_PYVMOMI = False 103 | 104 | 105 | pgTypeMap = {'static': 'earlyBinding', 106 | 'dynamic': 'lateBinding', 107 | 'ephemeral': 'ephemeral',} 108 | 109 | pg_allocation = {'elastic': True, 110 | 'fixed': False,} 111 | 112 | 113 | def find_vds_by_name(content, vds_name): 114 | vdSwitches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) 115 | for vds in vdSwitches: 116 | if vds_name == vds.name: 117 | return vds 118 | return None 119 | 120 | 121 | def find_vdspg_by_name(vdSwitch, portgroup_name): 122 | portgroups = vdSwitch.portgroup 123 | 124 | for pg in portgroups: 125 | if pg.name == portgroup_name: 126 | return pg 127 | return None 128 | 129 | 130 | def state_exit_unchanged(si, module): 131 | module.exit_json(changed=False, msg="EXIT UNCHANGED") 132 | 133 | 134 | def state_destroy_port_group(module): 135 | # TODO 136 | module.exit_json(changed=False) 137 | 138 | 139 | def check_pg_spec(si, module): 140 | 141 | state = True 142 | 143 | vds_name = module.params['vds_name'] 144 | vds = find_vds_by_name(si, vds_name) 145 | 146 | pg_name = module.params['port_group_name'] 147 | pg = find_vdspg_by_name(vds, pg_name) 148 | 149 | check_vals = [(pgTypeMap[module.params['port_binding']] == pg.config.type), 150 | (pg_allocation[module.params['port_allocation']] == pg.config.autoExpand),] 151 | 152 | if False in check_vals: 153 | state = False 154 | 155 | return state 156 | 157 | 158 | def create_pg_spec(si, update, module): 159 | 160 | port_group_name = module.params['port_group_name'] 161 | 162 | port_group_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() 163 | port_group_spec.name = port_group_name 164 | port_group_spec.numPorts = module.params['numPorts'] 165 | port_group_spec.type = pgTypeMap[module.params['port_binding']] 166 | #port_group_spec.autoExpand = pg_allocation[module.params['port_allocation']] 167 | 168 | pg_policy = vim.dvs.DistributedVirtualPortgroup.PortgroupPolicy() 169 | port_group_spec.policy = pg_policy 170 | 171 | if module.params['vlan']: 172 | port_group_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() 173 | port_group_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() 174 | port_group_spec.defaultPortConfig.vlan.vlanId = module.params['vlan'] 175 | #port_group_spec.defaultPortConfig.vlan.inherited = False 176 | 177 | if update: 178 | vds_name = module.params['vds_name'] 179 | vds = find_vds_by_name(si, vds_name) 180 | 181 | pg_name = module.params['port_group_name'] 182 | pg = find_vdspg_by_name(vds, pg_name) 183 | 184 | port_group_spec.configVersion = pg.config.configVersion 185 | 186 | return port_group_spec 187 | 188 | 189 | def state_create_port_group(si, module): 190 | 191 | port_group_spec = create_pg_spec(si, False, module) 192 | 193 | vds_name = module.params['vds_name'] 194 | vds = find_vds_by_name(si, vds_name) 195 | 196 | try: 197 | if not module.check_mode: 198 | 199 | task = vds.AddDVPortgroup_Task(spec=[port_group_spec]) 200 | 201 | changed, result = wait_for_task(task) 202 | module.exit_json(changed=changed, result=result) 203 | 204 | except Exception, e: 205 | module.fail_json(msg=str(e)) 206 | 207 | 208 | def state_update_port_group(si, module): 209 | 210 | vds_name = module.params['vds_name'] 211 | vds = find_vds_by_name(si, vds_name) 212 | 213 | pg_name = module.params['port_group_name'] 214 | pg = find_vdspg_by_name(vds, pg_name) 215 | 216 | pg_spec = create_pg_spec(si, True, module) 217 | 218 | try: 219 | reconfig_task = pg.ReconfigureDVPortgroup_Task(pg_spec) 220 | changed, result = wait_for_task(reconfig_task) 221 | except Exception as e: 222 | module.fail_json(msg="Failed to reconfigure pg: {}".format(e)) 223 | 224 | module.exit_json(changed=changed, result=result) 225 | 226 | 227 | def check_port_group_state(si, module): 228 | 229 | vds_name = module.params['vds_name'] 230 | port_group_name = module.params['port_group_name'] 231 | vlan = module.params['vlan'] 232 | 233 | if vlan: 234 | module.params['vlan'] = int(vlan) 235 | else: 236 | module.params['vlan'] = None 237 | 238 | vds = find_vds_by_name(si, vds_name) 239 | 240 | port_group = find_vdspg_by_name(vds, port_group_name) 241 | 242 | if port_group is None: 243 | return 'absent' 244 | elif not check_pg_spec(si, module): 245 | return 'update' 246 | else: 247 | return 'present' 248 | 249 | 250 | def main(): 251 | argument_spec = vmware_argument_spec() 252 | 253 | argument_spec.update( 254 | dict( 255 | vds_name=dict(type='str', required=True), 256 | port_group_name=dict(required=True, type='str'), 257 | port_binding=dict(required=True, choices=['static', 'dynamic', 'ephemeral'], type='str'), 258 | port_allocation=dict(choices=['fixed', 'elastic'], type='str'), 259 | numPorts=dict(required=True, type='int'), 260 | vlan=dict(type='str', required=False, default=False), 261 | state=dict(required=True, choices=['present', 'absent'], type='str'), 262 | ) 263 | ) 264 | 265 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 266 | 267 | if not HAS_PYVMOMI: 268 | module.fail_json(msg='pyvmomi is required for this module') 269 | 270 | port_group_states = { 271 | 'absent': { 272 | 'present': state_destroy_port_group, 273 | 'absent': state_exit_unchanged, 274 | }, 275 | 'present': { 276 | 'present': state_exit_unchanged, 277 | 'update': state_update_port_group, 278 | 'absent': state_create_port_group, 279 | } 280 | } 281 | 282 | si = connect_to_api(module) 283 | 284 | vds_name = module.params['vds_name'] 285 | vds = find_vds_by_name(si, vds_name) 286 | 287 | if not vds: 288 | module.fail_json(msg="Could not find vds: {}".format(vds_name)) 289 | 290 | desired_state = module.params['state'] 291 | current_state = check_port_group_state(si, module) 292 | 293 | port_group_states[desired_state][current_state](si, module) 294 | 295 | 296 | from ansible.module_utils.basic import * 297 | from ansible.module_utils.vmware import * 298 | 299 | if __name__ == '__main__': 300 | main() 301 | -------------------------------------------------------------------------------- /vcenter_rename_vsan_ds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcener_rename_vsan_ds 23 | Short_description: Renames vcenter datastore. 24 | description: 25 | Renames vcenter datastore to cluster name + VSAN_DS. Modules specifically developed for the 26 | purpose of renaming newly created vsan datastores. 27 | requirements: 28 | - pyvmomi 6 29 | - ansible 2.x 30 | Tested on: 31 | - vcenter 6.0 32 | - pyvmomi 6 33 | - esx 6 34 | - ansible 2.1.2 35 | options: 36 | hostname: 37 | description: 38 | - The hostname or IP address of the vSphere vCenter API server 39 | required: True 40 | username: 41 | description: 42 | - The username of the vSphere vCenter with Admin rights 43 | required: True 44 | aliases: ['user', 'admin'] 45 | password: 46 | description: 47 | - The password of the vSphere vCenter user 48 | required: True 49 | aliases: ['pass', 'pwd'] 50 | datacenter_name: 51 | description: 52 | - The name of the datacenter. 53 | required: True 54 | cluster_name: 55 | description: 56 | - The name of the vCenter cluster to create the disk groups in 57 | required: True 58 | state: 59 | description: 60 | - Desired state of the disk group 61 | choices: ['present', 'absent'] 62 | required: True 63 | ''' 64 | 65 | EXAMPLE = ''' 66 | - name: Rename VSAN Datastores 67 | vcenter_rename_vsan_ds: 68 | hostname: "{{ vcenter }}" 69 | username: "{{ vcenter_user }}" 70 | password: "{{ vcenter_password }}" 71 | validate_certs: False 72 | datacenter_name: "{{ datacenter.name }}" 73 | cluster_name: "{{ item.name }}" 74 | state: 'present' 75 | with_items: 76 | - "{{ datacenter.clusters }}" 77 | tags: 78 | - vio_rename_vsan_ds 79 | ''' 80 | 81 | 82 | try: 83 | from pyVmomi import vim, vmodl 84 | import collections 85 | HAS_PYVMOMI = True 86 | except ImportError: 87 | HAS_PYVMOMI = False 88 | 89 | 90 | vc = {} 91 | 92 | 93 | def find_vcenter_object_by_name(content, vimtype, object_name): 94 | vcenter_object = get_all_objs(content, [vimtype]) 95 | 96 | for k, v in vcenter_object.items(): 97 | if v == object_name: 98 | return k 99 | else: 100 | return None 101 | 102 | 103 | def state_delete(module): 104 | module.exit_json(changed=False, msg="CURRENTLY NOT SUPPORTED") 105 | 106 | 107 | def state_exit_unchanged(module): 108 | module.exit_json(changed=False, msg="EXIT UNCHANGED") 109 | 110 | 111 | def state_create(module): 112 | 113 | changed = False 114 | result = None 115 | 116 | dc = vc['dc'] 117 | cl = vc['cluster'] 118 | 119 | new_ds_name = "{}_VSAN_DS".format(module.params['cluster_name']) 120 | 121 | hosts_in_cluster = [host for host in cl.host] 122 | datastores = dc.datastoreFolder.childEntity 123 | 124 | for ds in datastores: 125 | ds_hosts = [h.key for h in ds.host] 126 | compare = lambda x, y: collections.Counter(x) == collections.Counter(y) 127 | 128 | if compare(hosts_in_cluster, ds_hosts): 129 | ds.Rename_Task(new_ds_name) 130 | changed = True 131 | result = new_ds_name 132 | 133 | module.exit_json(changed=changed, result=result) 134 | 135 | 136 | 137 | def check_ds_state(module): 138 | 139 | content = connect_to_api(module) 140 | 141 | dc = find_vcenter_object_by_name(content, vim.Datacenter, module.params['datacenter_name']) 142 | 143 | if not dc: 144 | module.fail_json(msg="Failed to find datacenter") 145 | 146 | vc['dc'] = dc 147 | 148 | cluster = find_vcenter_object_by_name(content, vim.ClusterComputeResource, module.params['cluster_name']) 149 | 150 | if not cluster: 151 | module.fail_json(msg="Failed to find cluster") 152 | 153 | vc['cluster'] = cluster 154 | 155 | datastores = dc.datastoreFolder.childEntity 156 | 157 | ds_name = "{}_VSAN_DS".format(module.params['cluster_name']) 158 | 159 | ds = [d for d in datastores if d.name == ds_name] 160 | 161 | if ds: 162 | return 'present' 163 | else: 164 | return 'absent' 165 | 166 | 167 | 168 | def main(): 169 | argument_spec = vmware_argument_spec() 170 | 171 | argument_spec.update( 172 | dict( 173 | datacenter_name=dict(required=True, type='str'), 174 | cluster_name=dict(required=True, type='str'), 175 | state=dict(default='present', choices=['present', 'absent'], type='str'), 176 | ) 177 | ) 178 | 179 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 180 | 181 | if not HAS_PYVMOMI: 182 | module.fail_json(msg='pyvmomi is required for this module') 183 | 184 | ds_states = { 185 | 'absent': { 186 | 'absent': state_exit_unchanged, 187 | 'present': state_delete, 188 | }, 189 | 'present': { 190 | 'absent': state_create, 191 | 'present': state_exit_unchanged, 192 | } 193 | } 194 | 195 | desired_state = module.params['state'] 196 | current_state = check_ds_state(module) 197 | 198 | ds_states[desired_state][current_state](module) 199 | 200 | 201 | from ansible.module_utils.basic import * 202 | from ansible.module_utils.vmware import * 203 | 204 | if __name__ == '__main__': 205 | main() -------------------------------------------------------------------------------- /vcenter_stand_alone_host.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcener_stand_alone_host 23 | Short_description: Adds a standalone host to a vcenter datacenter 24 | description: 25 | Adds a standalone host to a vcenter datacenter. Module specifically developed for the purposes 26 | of adding a standalone host outside of a datacenters vsan clusters. Since the default behavior 27 | of adding a standalone host from a witness appliance for the witness vmk is dhcp this module 28 | will update the witness vmk ip information to static with the specified ip address. 29 | requirements: 30 | - pyvmomi 6 31 | - ansible 2.x 32 | Tested on: 33 | - vcenter 6.0 34 | - pyvmomi 6 35 | - esx 6 36 | - ansible 2.1.2 37 | options: 38 | hostname: 39 | description: 40 | - The hostname or IP address of the vSphere vCenter API server 41 | required: True 42 | username: 43 | description: 44 | - The username of the vSphere vCenter with Admin rights 45 | required: True 46 | aliases: ['user', 'admin'] 47 | password: 48 | description: 49 | - The password of the vSphere vCenter user 50 | required: True 51 | aliases: ['pass', 'pwd'] 52 | datacenter_name: 53 | description: 54 | - The name of the datacenter. 55 | required: True 56 | esx_hostname: 57 | description: 58 | - hostname of the esx standalone host to add 59 | required: True 60 | esx_username: 61 | description: 62 | - username for adding the host, root 63 | required: True 64 | esx_password: 65 | description: 66 | - password for the specified user 67 | required: True 68 | witness_vmk_ip: 69 | description: 70 | - IP addresss of the hosts vmk 71 | required: True 72 | witness_vmk_subnet: 73 | description: 74 | - subnet of the hosts vmk 75 | state: 76 | description: 77 | - Desired state of the disk group 78 | choices: ['present', 'absent'] 79 | required: True 80 | 81 | ''' 82 | 83 | EXAMPLE = ''' 84 | - name: Add Standalone Witness Host 85 | vcenter_stand_alone_host: 86 | hostname: "{{ vcenter }}" 87 | username: "{{ vcenter_user }}" 88 | password: "{{ vcenter_password }}" 89 | validate_certs: "{{ vcenter_validate_certs }}" 90 | datacenter_name: "{{ datacenter.name }}" 91 | esx_hostname: "{{ wa_esx_hostname }}" 92 | esx_username: "{{ wa_esx_username }}" 93 | esx_password: "{{ wa_rootpass }}" 94 | witness_vmk_ip: "{{ wa_vsan_vmk_ip }}" 95 | witness_vmk_subnet: "{{ wa_vsan_vmk_subnet }}" 96 | state: "{{ global_state }}" 97 | tags: 98 | - vsan_stretch_addhost 99 | ''' 100 | 101 | 102 | try: 103 | from pyVim import vim, vmodl 104 | HAS_PYVMOMI = True 105 | except ImportError: 106 | HAS_PYVMOMI = False 107 | 108 | class AddStandAloneHost(object): 109 | ''' 110 | 111 | ''' 112 | 113 | def __init__(self, module): 114 | self.module = module 115 | self.datacenter_name = module.params['datacenter_name'] 116 | self.host_name = module.params['esx_hostname'] 117 | self.host_user = module.params['esx_username'] 118 | self.host_password = module.params['esx_password'] 119 | self.content = connect_to_api(module) 120 | self.datacenter = None 121 | self.host = None 122 | self.host_folder = None 123 | 124 | 125 | def process_state(self): 126 | 127 | states = { 128 | 'absent': { 129 | 'absent': self.state_exit_unchanged, 130 | 'present': self.state_delete, 131 | }, 132 | 'present': { 133 | 'absent': self.state_create, 134 | 'present': self.state_exit_unchanged, 135 | 'update': self.state_update, 136 | } 137 | } 138 | 139 | desired_state = self.module.params['state'] 140 | current_state = self.current_state() 141 | 142 | states[desired_state][current_state]() 143 | 144 | 145 | def state_create(self): 146 | 147 | changed, result = self.add_host() 148 | 149 | if changed: 150 | host = find_hostsystem_by_name(self.content, self.host_name) 151 | 152 | vmk = self.get_vsan_vmk(host) 153 | vmk = vmk.device 154 | 155 | changed = self.update_witnesspg_vmk(host, vmk) 156 | 157 | self.module.exit_json(changed=changed, result=str(result)) 158 | 159 | 160 | def state_update(self): 161 | vmk = self.get_vsan_vmk(self.host) 162 | vmk = vmk.device 163 | 164 | changed = self.update_witnesspg_vmk(self.host, vmk) 165 | 166 | self.module.exit_json(changed=changed, result='update vmk') 167 | 168 | 169 | def update_witnesspg_vmk(self, host, vsan_vmk): 170 | changed = False 171 | 172 | vsan_vmk_spec = vim.host.VirtualNic.Specification() 173 | vsan_vmk_spec.ip = vim.host.IpConfig() 174 | vsan_vmk_spec.ip.dhcp = False 175 | vsan_vmk_spec.ip.ipAddress = self.module.params['witness_vmk_ip'] 176 | vsan_vmk_spec.ip.subnetMask = self.module.params['witness_vmk_subnet'] 177 | 178 | net_sys = host.configManager.networkSystem 179 | 180 | try: 181 | net_sys.UpdateVirtualNic(device=vsan_vmk, 182 | nic=vsan_vmk_spec) 183 | changed = True 184 | except Exception: 185 | return changed 186 | 187 | return changed 188 | 189 | 190 | def add_host(self): 191 | 192 | host_spec = vim.host.ConnectSpec() 193 | host_spec.hostName = self.host_name 194 | host_spec.userName = self.host_user 195 | host_spec.password = self.host_password 196 | host_spec.force = True 197 | host_spec.sslThumbprint = "" 198 | add_connected = True 199 | 200 | try: 201 | add_host_task = self.host_folder.AddStandaloneHost_Task(spec=host_spec, 202 | addConnected=add_connected) 203 | changed, result = wait_for_task(add_host_task) 204 | return changed, result 205 | except TaskError as add_task_error: 206 | ssl_verify_fault = add_task_error.args[0] 207 | host_spec.sslThumbprint = ssl_verify_fault.thumbprint 208 | 209 | add_host_task = self.host_folder.AddStandaloneHost_Task(spec=host_spec, 210 | addConnected=add_connected) 211 | changed, result = wait_for_task(add_host_task) 212 | return changed, result 213 | 214 | 215 | def state_exit_unchanged(self): 216 | self.module.exit_json(changed=False, msg="EXIT UNCHANGED") 217 | 218 | 219 | def state_delete(self): 220 | self.module.exit_json(changed=False, msg="Delete") 221 | 222 | 223 | def get_vsan_vmk(self, host): 224 | 225 | try: 226 | query_result = host.configManager.virtualNicManager.QueryNetConfig('vsan') 227 | except Exception: 228 | query_result = None 229 | 230 | if not query_result: 231 | self.module.fail_json(msg="no vmks with vsan service") 232 | 233 | selected_vmks = [i for i in query_result.selectedVnic] 234 | vsan_vmk = [v for v in query_result.candidateVnic if v.key in selected_vmks][0] 235 | 236 | return vsan_vmk 237 | 238 | 239 | def check_witness_vmk(self): 240 | 241 | vsan_vmk = self.get_vsan_vmk(self.host) 242 | 243 | if vsan_vmk.spec.ip.dhcp: 244 | return False 245 | if vsan_vmk.spec.ip.ipAddress == self.module.params['witness_vmk_ip']: 246 | return True 247 | 248 | 249 | def current_state(self): 250 | state = 'absent' 251 | 252 | try: 253 | self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) 254 | 255 | if not self.datacenter: 256 | self.module.fail_json(msg="Cannot find DC") 257 | 258 | self.host_folder = self.datacenter.hostFolder 259 | 260 | self.host = find_hostsystem_by_name(self.content, self.host_name) 261 | 262 | if self.host: 263 | check_vmk = self.check_witness_vmk() 264 | 265 | if check_vmk: 266 | state = 'present' 267 | else: 268 | state = 'update' 269 | 270 | except vmodl.RuntimeFault as runtime_fault: 271 | self.module.fail_json(msg=runtime_fault.msg) 272 | except vmodl.MethodFault as method_fault: 273 | self.module.fail_json(msg=method_fault.msg) 274 | 275 | return state 276 | 277 | 278 | 279 | def main(): 280 | argument_spec = vmware_argument_spec() 281 | 282 | argument_spec.update( 283 | dict( 284 | datacenter_name=dict(required=True, type='str'), 285 | esx_hostname=dict(required=True, type='str'), 286 | esx_username=dict(required=False, default='root', type='str'), 287 | esx_password=dict(required=True, type='str', no_log=True), 288 | witness_vmk_ip=dict(required=True, type='str'), 289 | witness_vmk_subnet=dict(required=True, type='str'), 290 | state=dict(default='present', choices=['present', 'absent'], type='str'), 291 | ) 292 | ) 293 | 294 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 295 | 296 | if not HAS_PYVMOMI: 297 | module.fail_json(msg='pyvmomi is required for this module') 298 | 299 | stand_alone = AddStandAloneHost(module) 300 | stand_alone.process_state() 301 | 302 | 303 | from ansible.module_utils.basic import * 304 | from ansible.module_utils.vmware import * 305 | 306 | if __name__ == '__main__': 307 | main() -------------------------------------------------------------------------------- /vcenter_vli_deploy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcenter_vli_deploy 23 | Short_description: Deploys (creates), Deletes log insight ova to vcenter cluster 24 | description: 25 | Deploys (creates), Deletes log insight ova to vcenter cluster. Module will wait for vm to 26 | power on and "pings" the log insight api before exiting if not failed. 27 | requirements: 28 | - pyvmomi 6 29 | - ansible 2.x 30 | Tested on: 31 | - vcenter 6.0 32 | - pyvmomi 6 33 | - esx 6 34 | - ansible 2.1.2 35 | - VMware-vRealize-Log-Insight-3.0.1-3568951.ova 36 | options: 37 | hostname: 38 | description: 39 | - The hostname or IP address of the vSphere vCenter API server 40 | required: True 41 | username: 42 | description: 43 | - The username of the vSphere vCenter with Admin rights 44 | required: True 45 | aliases: ['user', 'admin'] 46 | password: 47 | description: 48 | - The password of the vSphere vCenter user 49 | required: True 50 | aliases: ['pass', 'pwd'] 51 | datacenter: 52 | description: 53 | - The name of the datacenter. 54 | required: True 55 | cluster: 56 | description: 57 | - The name of the vCenter cluster 58 | required: True 59 | vmname: 60 | description: 61 | - The name of the vm in vcenter 62 | required: True 63 | ovftool_path: 64 | description: 65 | - The path where the ovftool is installed 66 | ex: /usr/local/bin/ovftool 67 | path_to_ova: 68 | description: 69 | - The path where the ova is located 70 | required: True 71 | ova_file: 72 | description: 73 | - The name of the ova file 74 | required: True 75 | disk_mode: 76 | description: 77 | - The disk mode for the deployment of the ova 78 | default: thin 79 | required: True 80 | datastore: 81 | description: 82 | - Valid vcenter datastore 83 | required: True 84 | network: 85 | description: 86 | - Name of the network/portgroup for the appliance 87 | required: True 88 | gateway: 89 | description: 90 | - gatway information for the appliance 91 | required: True 92 | dns_ip: 93 | description: 94 | - dns server ip address 95 | type: list 96 | ip_addr: 97 | description: 98 | - ip address for the appliance 99 | required: True 100 | netmask: 101 | description: 102 | - netmask information for the appliance 103 | required: True 104 | root_password: 105 | description: 106 | - root password for the appliance 107 | required: True 108 | deployment_size: 109 | description: 110 | - size of the deployment for the appliance 111 | required: True 112 | vli_hostname: 113 | description: 114 | - hostname for the appliance 115 | required: True 116 | state: 117 | description: 118 | - Desired state of the disk group 119 | choices: ['present', 'absent'] 120 | required: True 121 | ''' 122 | 123 | EXAMPLE = ''' 124 | - name: deploy vR Log Insight Appliance 125 | vcenter_vli_deploy: 126 | hostname: "{{ vcenter }}" 127 | username: "{{ vcenter_user }}" 128 | password: "{{ vcenter_password }}" 129 | validate_certs: "{{ vcenter_validate_certs }}" 130 | vmname: "{{ vli_vmname }}" 131 | ovftool_path: "{{ ovf_tool_path }}" 132 | path_to_ova: "{{ ova_path }}" 133 | ova_file: "{{ vrli_ova }}" 134 | datacenter: "{{ datacenter.name }}" 135 | cluster: "{{ ib_vcenter_mgmt_esx_cluster_name }}" 136 | disk_mode: "{{ disk_mode }}" 137 | datastore: "{{ ib_vcenter_mgmt_esx_cluster_name }}_VSAN_DS" 138 | vli_hostname: "{{ vrli_hostname }}" 139 | network: "{{ mgmt_vds_viomgmt }}" 140 | gateway: "{{ vrli_gateway }}" 141 | dns_ip: "{{ ova_dns_list }}" 142 | ip_addr: "{{ vrli_ip_addr }}" 143 | netmask: "{{ vrli_netmask }}" 144 | root_password: "{{ vrli_rootpw }}" 145 | deployment_size: "{{ vli_deployment_size }}" 146 | state: "{{ global_state }}" 147 | tags: 148 | - vio_deploy_vrli_ova 149 | ''' 150 | 151 | 152 | try: 153 | import time 154 | import requests 155 | from pyVmomi import vim, vmodl 156 | IMPORTS = True 157 | except ImportError: 158 | IMPORTS = False 159 | 160 | vc = {} 161 | 162 | def check_vli_api(module): 163 | 164 | url = "https://{}/api/v1".format(module.params['ip_addr']) 165 | auth = requests.auth.HTTPBasicAuth('root', module.params['root_password']) 166 | header = {'Content-Type': 'application/json'} 167 | 168 | try: 169 | resp = requests.get(url=url, verify=False, 170 | auth=auth, headers=header) 171 | 172 | except requests.exceptions.ConnectionError: 173 | return False 174 | 175 | return resp.status_code, resp.content 176 | 177 | 178 | def wait_for_api(module, sleep_time=15): 179 | status_poll_count = 0 180 | while status_poll_count < 30: 181 | api_status = check_vli_api(module) 182 | if api_status: 183 | if api_status[0] == 200: 184 | return True 185 | else: 186 | status_poll_count += 1 187 | time.sleep(sleep_time) 188 | else: 189 | status_poll_count += 1 190 | time.sleep(sleep_time) 191 | 192 | if status_poll_count == 30: 193 | return False 194 | 195 | 196 | def wait_for_vm(vm, sleep_time=15): 197 | 198 | vm_pool_count = 0 199 | while vm_pool_count < 30: 200 | connected = (vm.runtime.connectionState == 'connected') 201 | 202 | if connected: 203 | powered_on = (vm.runtime.powerState == 'poweredOn') 204 | 205 | if powered_on: 206 | return True 207 | else: 208 | vm_pool_count += 1 209 | time.sleep(sleep_time) 210 | else: 211 | vm_pool_count += 1 212 | time.sleep(sleep_time) 213 | 214 | if vm_pool_count == 30: 215 | return False 216 | 217 | 218 | def find_virtual_machine(content, searched_vm_name): 219 | virtual_machines = get_all_objs(content, [vim.VirtualMachine]) 220 | for vm in virtual_machines: 221 | if vm.name == searched_vm_name: 222 | return vm 223 | return None 224 | 225 | 226 | def state_delete_vm(module): 227 | changed = False 228 | 229 | vm = vc['vli_vm'] 230 | 231 | if vm.runtime.powerState == 'poweredOn': 232 | power_off_task = vm.PowerOffVM_Task() 233 | wait_for_task(power_off_task) 234 | 235 | try: 236 | delete_vm_task = vm.Destroy_Task() 237 | changed, result = wait_for_task(delete_vm_task) 238 | except Exception as e: 239 | module.fail_json(msg="Failed deleting vm: {}".format(str(e))) 240 | 241 | module.exit_json(changed=changed) 242 | 243 | 244 | def state_exit_unchanged(module): 245 | module.exit_json(changed=False, msg="EXIT UNCHANED") 246 | 247 | 248 | def state_create_vm(module): 249 | 250 | ovftool_exec = '{}/ovftool'.format(module.params['ovftool_path']) 251 | ova_file = '{}/{}'.format(module.params['path_to_ova'], module.params['ova_file']) 252 | vi_string = 'vi://{}:{}@{}/{}/host/{}/'.format(module.params['username'], 253 | module.params['password'], module.params['hostname'], 254 | module.params['datacenter'], module.params['cluster']) 255 | 256 | ova_tool_result = module.run_command([ovftool_exec, 257 | '--acceptAllEulas', 258 | '--skipManifestCheck', 259 | '--overwrite', 260 | '--powerOn', 261 | '--noSSLVerify', 262 | '--allowExtraConfig', 263 | '--name={}'.format(module.params['vmname']), 264 | '--prop:vm.rootpw={}'.format(module.params['root_password']), 265 | '--diskMode={}'.format(module.params['disk_mode']), 266 | '--datastore={}'.format(module.params['datastore']), 267 | '--net:Network 1={}'.format(module.params['network']), 268 | '--prop:vami.ip0.VMware_vCenter_Log_Insight={}'.format(module.params['ip_addr']), 269 | '--prop:vami.gateway.VMware_vCenter_Log_Insight={}'.format(module.params['gateway']), 270 | '--prop:vami.DNS.VMware_vCenter_Log_Insight={},{}'.format(module.params['dns_ip'][0], 271 | module.params['dns_ip'][1]), 272 | '--prop:vami.netmask0.VMware_vCenter_Log_Insight={}'.format(module.params['netmask']), 273 | '--prop:vami.hostname.VMware_vCenter_Log_Insight={}'.format(module.params['vli_hostname']), 274 | '--deploymentOption={}'.format(module.params['deployment_size']), 275 | ova_file, 276 | vi_string]) 277 | 278 | if ova_tool_result[0] != 0: 279 | module.fail_json(msg='Failed to deploy OVA, error message from ovftool is: {}'.format(ova_tool_result[1])) 280 | 281 | return ova_tool_result[0] 282 | 283 | 284 | 285 | def main(): 286 | argument_spec = vmware_argument_spec() 287 | 288 | argument_spec.update( 289 | dict( 290 | vmname=dict(required=True, type='str'), 291 | ovftool_path=dict(required=True, type='str'), 292 | path_to_ova=dict(required=True, type='str'), 293 | ova_file=dict(required=True, type='str'), 294 | datacenter=dict(required=True, type='str'), 295 | cluster=dict(required=True, type='str'), 296 | disk_mode=dict(default='thin', type='str'), 297 | datastore=dict(required=True, type='str'), 298 | network=dict(required=True, type='str'), 299 | vli_hostname=dict(required=True, type='str'), 300 | gateway=dict(required=True, type='str'), 301 | dns_ip=dict(required=True, type='list'), 302 | ip_addr=dict(required=True, type='str'), 303 | netmask=dict(required=True, type='str'), 304 | ip_protocol=dict(type='str', default="IPv4"), 305 | deployment_size=dict(required=True, type='str'), 306 | root_password=dict(required=True, type='str', no_log=True), 307 | state=dict(default='present', choices=['present', 'absent']), 308 | ) 309 | ) 310 | 311 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 312 | 313 | if not IMPORTS: 314 | module.fail_json(msg="Failed to import modules") 315 | 316 | content = connect_to_api(module) 317 | 318 | vli_vm = find_virtual_machine(content, module.params['vmname']) 319 | 320 | vc['vli_vm'] = vli_vm 321 | 322 | vli_vm_states = { 323 | 'absent': { 324 | 'present': state_delete_vm, 325 | 'absent': state_exit_unchanged, 326 | }, 327 | 'present': { 328 | 'present': state_exit_unchanged, 329 | 'absent': state_create_vm 330 | } 331 | } 332 | 333 | desired_state = module.params['state'] 334 | 335 | if vli_vm: 336 | current_state = 'present' 337 | else: 338 | current_state = 'absent' 339 | 340 | vli_vm_states[desired_state][current_state](module) 341 | 342 | 343 | vli_vm = find_virtual_machine(content, module.params['vmname']) 344 | 345 | if not vli_vm: 346 | module.fail_json(changed=False, msg="Failed to find vm") 347 | 348 | if not wait_for_vm(vli_vm): 349 | module.fail_json(msg="VM failed to power on") 350 | 351 | if not wait_for_api(module): 352 | module.fail_json(msg="Failed to hit api") 353 | 354 | module.exit_json(changed=True, result="Success") 355 | 356 | 357 | from ansible.module_utils.basic import * 358 | from ansible.module_utils.vmware import * 359 | 360 | if __name__ == '__main__': 361 | main() 362 | -------------------------------------------------------------------------------- /vcenter_vmkmigration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # (c) 2015, Joseph Callen 5 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 6 | # 7 | # This file is part of Ansible 8 | # 9 | # Ansible is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Ansible is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with Ansible. If not, see . 21 | 22 | # vmkernel adapter migrate --c 23 | DOCUMENTATION = ''' 24 | --- 25 | module: vmware_vmkmigration 26 | short_description: Migrate a VMK interface from VSS to VDS 27 | description: 28 | - Migrate a VMK interface from VSS to VDS 29 | options: 30 | vcenter_hostname: 31 | description: 32 | - The hostname or IP address of the vSphere vCenter API server 33 | required: True 34 | vcenter_username: 35 | description: 36 | - The username of the vSphere vCenter 37 | required: True 38 | vcenter_password: 39 | description: 40 | - The password of the vSphere vCenter 41 | required: True 42 | vcenter_port: 43 | description: 44 | - The port number of the vSphere vCenter 45 | esxi_hostname: 46 | description: 47 | - ESXi hostname to be managed 48 | required: True 49 | device: 50 | description: 51 | - VMK interface name 52 | required: True 53 | current_switch_name: 54 | description: 55 | - Switch VMK interface is currently on 56 | required: True 57 | current_portgroup_name: 58 | description: 59 | - Portgroup name VMK interface is currently on 60 | required: True 61 | migrate_switch_name: 62 | description: 63 | - Switch name to migrate VMK interface to 64 | required: True 65 | migrate_portgroup_name: 66 | description: 67 | - Portgroup name to migrate VMK interface to 68 | required: True 69 | ''' 70 | 71 | EXAMPLES = ''' 72 | Example from Ansible playbook 73 | 74 | - name: Migrate a VMK interface from VSS to VDS 75 | vcenter_vmkmigration: 76 | vcenter_hostname: vcsa_host 77 | vcenter_username: vcsa_user 78 | vcenter_password: vcsa_pass 79 | vcenter_port: vcsa_port 80 | esxi_hostname: esxi_hostname 81 | device: vmk1 82 | current_switch_name: temp_vswitch 83 | current_portgroup_name: esx-mgmt 84 | migrate_switch_name: dvSwitch 85 | migrate_portgroup_name: Management 86 | ''' 87 | 88 | 89 | try: 90 | import atexit 91 | import time 92 | import requests 93 | import sys 94 | import collections 95 | from pyVim import connect 96 | from pyVmomi import vim, vmodl 97 | HAS_PYVMOMI = True 98 | except ImportError: 99 | HAS_PYVMOMI = False 100 | 101 | def state_exit_unchanged(module): 102 | module.exit_json(changed=False) 103 | 104 | 105 | def state_migrate_vds_vss(module): 106 | module.exit_json(changed=False, msg="Currently Not Implemented") 107 | 108 | 109 | def connect_to_vcenter(module, disconnect_atexit=True): 110 | hostname = module.params['vcenter_hostname'] 111 | username = module.params['vcenter_username'] 112 | password = module.params['vcenter_password'] 113 | port = module.params['vcenter_port'] 114 | 115 | try: 116 | service_instance = connect.SmartConnect( 117 | host=hostname, 118 | user=username, 119 | pwd=password, 120 | port=port 121 | ) 122 | 123 | if disconnect_atexit: 124 | atexit.register(connect.Disconnect, service_instance) 125 | 126 | return service_instance.RetrieveContent() 127 | except vim.fault.InvalidLogin, invalid_login: 128 | module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) 129 | except requests.ConnectionError, connection_error: 130 | module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) 131 | 132 | def create_host_vnic_config(dv_switch_uuid, portgroup_key, device): 133 | 134 | host_vnic_config = vim.host.VirtualNic.Config() 135 | host_vnic_config.spec = vim.host.VirtualNic.Specification() 136 | host_vnic_config.changeOperation = "edit" 137 | host_vnic_config.device = device 138 | host_vnic_config.portgroup = "" 139 | host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() 140 | host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid 141 | host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key 142 | 143 | return host_vnic_config 144 | 145 | def create_port_group_config(switch_name, portgroup_name): 146 | port_group_config = vim.host.PortGroup.Config() 147 | port_group_config.spec = vim.host.PortGroup.Specification() 148 | 149 | port_group_config.changeOperation = "remove" 150 | port_group_config.spec.name = portgroup_name 151 | port_group_config.spec.vlanId = -1 152 | port_group_config.spec.vswitchName = switch_name 153 | port_group_config.spec.policy = vim.host.NetworkPolicy() 154 | 155 | return port_group_config 156 | 157 | def state_migrate_vss_vds(module): 158 | content = connect_to_vcenter(module); 159 | esxi_hostname = module.params['esxi_hostname'] 160 | host_system = find_hostsystem_by_name(content, esxi_hostname) 161 | migrate_switch_name = module.params['migrate_switch_name'] 162 | migrate_portgroup_name = module.params['migrate_portgroup_name'] 163 | current_portgroup_name = module.params['current_portgroup_name'] 164 | current_switch_name = module.params['current_switch_name'] 165 | device = module.params['device'] 166 | 167 | host_network_system = host_system.configManager.networkSystem 168 | 169 | dv_switch = find_dvs_by_name(content, migrate_switch_name) 170 | pg = find_vdspg_by_name(dv_switch, migrate_portgroup_name) 171 | 172 | config = vim.host.NetworkConfig() 173 | config.portgroup = [create_port_group_config(current_switch_name, current_portgroup_name)] 174 | config.vnic = [create_host_vnic_config(dv_switch.uuid, pg.key, device)] 175 | host_network_system.UpdateNetworkConfig(config, "modify") 176 | module.exit_json(changed=True) 177 | 178 | def check_vmk_current_state(module): 179 | 180 | device = module.params['device'] 181 | esxi_hostname = module.params['esxi_hostname'] 182 | current_portgroup_name = module.params['current_portgroup_name'] 183 | current_switch_name = module.params['current_switch_name'] 184 | 185 | content = connect_to_vcenter(module); 186 | 187 | host_system = find_hostsystem_by_name(content, esxi_hostname) 188 | 189 | for vnic in host_system.configManager.networkSystem.networkInfo.vnic: 190 | if vnic.device == device: 191 | if vnic.spec.distributedVirtualPort is None: 192 | if vnic.portgroup == current_portgroup_name: 193 | return "migrate_vss_vds" 194 | else: 195 | dvs = find_dvs_by_name(content, current_switch_name) 196 | if dvs is None: 197 | return "migrated" 198 | if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: 199 | return "migrate_vds_vss" 200 | 201 | def get_all_objects(content, vimtype): 202 | obj = {} 203 | container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) 204 | for managed_object_ref in container.view: 205 | obj.update({managed_object_ref: managed_object_ref.name}) 206 | return obj 207 | 208 | def find_dvs_by_name(content, vds_name): 209 | vdSwitches = get_all_objects(content, [vim.dvs.VmwareDistributedVirtualSwitch]) 210 | for vds in vdSwitches: 211 | if vds_name == vds.name: 212 | return vds 213 | return None 214 | 215 | def find_vdspg_by_name(vdSwitch, portgroup_name): 216 | portgroups = vdSwitch.portgroup 217 | for pg in portgroups: 218 | if pg.name == portgroup_name: 219 | return pg 220 | return None 221 | 222 | def find_hostsystem_by_name(content, host_name): 223 | host = find_vcenter_object_by_name(content, vim.HostSystem, host_name) 224 | if(host != ""): 225 | return host 226 | else: 227 | print "Host not found" 228 | return None 229 | 230 | def find_vcenter_object_by_name(content, vimtype, object_name): 231 | 232 | vcenter_object = get_all_objects(content, [vimtype]) 233 | 234 | for k, v in vcenter_object.items(): 235 | if v == object_name: 236 | return k 237 | else: 238 | return None 239 | 240 | def main(): 241 | 242 | argument_spec = dict( 243 | vcenter_hostname=dict(type='str', required=True), 244 | vcenter_port=dict(type='str'), 245 | vcenter_username=dict(type='str', aliases=['user', 'admin'], required=True), 246 | vcenter_password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), 247 | esxi_hostname=dict(required=True, type='str'), 248 | device=dict(required=True, type='str'), 249 | current_switch_name=dict(required=True, type='str'), 250 | current_portgroup_name=dict(required=True, type='str'), 251 | migrate_switch_name=dict(required=True, type='str'), 252 | migrate_portgroup_name=dict(required=True, type='str')) 253 | 254 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 255 | 256 | if not HAS_PYVMOMI: 257 | module.fail_json(msg='pyvmomi required for this module') 258 | 259 | try: 260 | vmk_migration_states = { 261 | 'migrate_vss_vds': state_migrate_vss_vds, 262 | 'migrate_vds_vss': state_migrate_vds_vss, 263 | 'migrated': state_exit_unchanged 264 | } 265 | 266 | vmk_migration_states[check_vmk_current_state(module)](module) 267 | 268 | except vmodl.RuntimeFault as runtime_fault: 269 | module.fail_json(msg=runtime_fault.msg) 270 | except vmodl.MethodFault as method_fault: 271 | module.fail_json(msg=method_fault.msg) 272 | except Exception as e: 273 | module.fail_json(msg=str(e)) 274 | 275 | 276 | from ansible.module_utils.basic import * 277 | 278 | 279 | if __name__ == '__main__': 280 | main() 281 | -------------------------------------------------------------------------------- /vcenter_vmmigration.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | # (c) 2015, Joseph Callen 5 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 6 | # 7 | # This file is part of Ansible 8 | # 9 | # Ansible is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Ansible is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with Ansible. If not, see . 21 | 22 | DOCUMENTATION = ''' 23 | --- 24 | module: vcenter_vmmigration 25 | short_description: Migrates a virtual machine from a standard vswitch to distributed 26 | description: 27 | - Migrates a virtual machine from a standard vswitch to distributed 28 | requirements: 29 | - "python >= 2.6" 30 | - PyVmomi 31 | options: 32 | vcenter_hostname: 33 | description: 34 | - The hostname or IP address of the vSphere vCenter API server 35 | required: True 36 | vcenter_username: 37 | description: 38 | - The username of the vSphere vCenter 39 | required: True 40 | vcenter_password: 41 | description: 42 | - The password of the vSphere vCenter 43 | required: True 44 | vcenter_port: 45 | description: 46 | - The port number of the vSphere vCenter 47 | vm_name: 48 | description: 49 | - Name of the virtual machine to migrate to a dvSwitch 50 | required: True 51 | dvportgroup_name: 52 | description: 53 | - Name of the portgroup to migrate to the virtual machine to 54 | required: True 55 | ''' 56 | 57 | EXAMPLES = ''' 58 | - name: Migrate VM from standard vswitch to vDS 59 | vcenter_vmmigration: 60 | vcenter_hostname: vcenter_ip_or_hostname 61 | vcenter_username: vcenter_username 62 | vcenter_password: vcenter_password 63 | vcenter_port: vcenter_port 64 | vm_name: virtual_machine_name 65 | dvportgroup_name: distributed_portgroup_name 66 | ''' 67 | 68 | try: 69 | import atexit 70 | import time 71 | import requests 72 | import sys 73 | import collections 74 | from pyVim import connect 75 | from pyVmomi import vim, vmodl 76 | 77 | HAS_PYVMOMI = True 78 | except ImportError: 79 | HAS_PYVMOMI = False 80 | 81 | def connect_to_vcenter(module, disconnect_atexit=True): 82 | hostname = module.params['vcenter_hostname'] 83 | username = module.params['vcenter_username'] 84 | password = module.params['vcenter_password'] 85 | port = module.params['vcenter_port'] 86 | 87 | try: 88 | service_instance = connect.SmartConnect( 89 | host=hostname, 90 | user=username, 91 | pwd=password, 92 | port=port 93 | ) 94 | if disconnect_atexit: 95 | atexit.register(connect.Disconnect, service_instance) 96 | return service_instance.RetrieveContent() 97 | except vim.fault.InvalidLogin, invalid_login: 98 | module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) 99 | except requests.ConnectionError, connection_error: 100 | module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) 101 | 102 | def get_all_objs(content, vimtype): 103 | obj = {} 104 | container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) 105 | for managed_object_ref in container.view: 106 | obj.update({managed_object_ref: managed_object_ref.name}) 107 | return obj 108 | 109 | def wait_for_task(task): 110 | while True: 111 | if task.info.state == vim.TaskInfo.State.success: 112 | return True, task.info.result 113 | if task.info.state == vim.TaskInfo.State.error: 114 | try: 115 | raise Exception(task.info.error) 116 | except AttributeError: 117 | raise Exception("An unknown error has occurred") 118 | if task.info.state == vim.TaskInfo.State.running: 119 | time.sleep(15) 120 | if task.info.state == vim.TaskInfo.State.queued: 121 | time.sleep(15) 122 | 123 | 124 | def _find_dvspg_by_name(content, pg_name): 125 | 126 | vmware_distributed_port_group = get_all_objs(content, [vim.dvs.DistributedVirtualPortgroup]) 127 | for dvspg in vmware_distributed_port_group: 128 | if dvspg.name == pg_name: 129 | return dvspg 130 | return None 131 | 132 | 133 | def find_vm_by_name(content, vm_name): 134 | 135 | virtual_machines = get_all_objs(content, [vim.VirtualMachine]) 136 | for vm in virtual_machines: 137 | if vm.name == vm_name: 138 | return vm 139 | return None 140 | 141 | 142 | def migrate_network_adapter_vds(module): 143 | vm_name = module.params['vm_name'] 144 | dvportgroup_name = module.params['dvportgroup_name'] 145 | content = module.params['content'] 146 | 147 | vm_configspec = vim.vm.ConfigSpec() 148 | nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() 149 | port = vim.dvs.PortConnection() 150 | devicespec = vim.vm.device.VirtualDeviceSpec() 151 | 152 | pg = _find_dvspg_by_name(content, dvportgroup_name) 153 | 154 | if pg is None: 155 | module.fail_json(msg="The standard portgroup was not found") 156 | 157 | vm = find_vm_by_name(content, vm_name) 158 | if vm is None: 159 | module.fail_json(msg="The virtual machine was not found") 160 | 161 | dvswitch = pg.config.distributedVirtualSwitch 162 | port.switchUuid = dvswitch.uuid 163 | port.portgroupKey = pg.key 164 | nic.port = port 165 | 166 | for device in vm.config.hardware.device: 167 | if isinstance(device, vim.vm.device.VirtualEthernetCard): 168 | devicespec.device = device 169 | devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit 170 | devicespec.device.backing = nic 171 | vm_configspec.deviceChange.append(devicespec) 172 | 173 | task = vm.ReconfigVM_Task(vm_configspec) 174 | changed, result = wait_for_task(task) 175 | module.exit_json(changed=changed) 176 | 177 | def state_exit_unchanged(module): 178 | module.exit_json(changed=False) 179 | 180 | 181 | def check_vm_network_state(module): 182 | vm_name = module.params['vm_name'] 183 | try: 184 | content = connect_to_vcenter(module) 185 | module.params['content'] = content 186 | vm = find_vm_by_name(content, vm_name) 187 | module.params['vm'] = vm 188 | if vm is None: 189 | module.fail_json(msg="A virtual machine with name %s does not exist" % vm_name) 190 | for device in vm.config.hardware.device: 191 | if isinstance(device, vim.vm.device.VirtualEthernetCard): 192 | if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): 193 | return 'present' 194 | return 'absent' 195 | except vmodl.RuntimeFault as runtime_fault: 196 | module.fail_json(msg=runtime_fault.msg) 197 | except vmodl.MethodFault as method_fault: 198 | module.fail_json(msg=method_fault.msg) 199 | 200 | 201 | def main(): 202 | argument_spec = dict( 203 | vcenter_hostname=dict(required=True, type='str'), 204 | vcenter_username=dict(required=True, type='str'), 205 | vcenter_password=dict(required=True, type='str'), 206 | vcenter_port=dict(required=True, type='int'), 207 | vm_name=dict(required=True, type='str'), 208 | dvportgroup_name=dict(required=True, type='str')) 209 | 210 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 211 | if not HAS_PYVMOMI: 212 | module.fail_json(msg='pyvmomi is required for this module') 213 | 214 | vm_nic_states = { 215 | 'absent': migrate_network_adapter_vds, 216 | 'present': state_exit_unchanged, 217 | } 218 | 219 | vm_nic_states[check_vm_network_state(module)](module) 220 | 221 | from ansible.module_utils.basic import * 222 | 223 | if __name__ == '__main__': 224 | main() -------------------------------------------------------------------------------- /vcenter_vro_deploy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vcenter_vro_deploy 24 | Short_description: Deploys (creates), Deletes vRO ova to vcenter cluster 25 | description: 26 | Deploys (creates), Deletes vRO ova to vcenter cluster. Module will wait for vm to 27 | power on and "pings" the vRO api before exiting if not failed. 28 | requirements: 29 | - pyvmomi 6 30 | - ansible 2.x 31 | - ovftool 32 | Tested on: 33 | - vcenter 6.0 34 | - pyvmomi 6 35 | - esx 6 36 | - ansible 2.1.2 37 | - VMware-vCO-Appliance-6.0.3.0-3000579_OVF10.ova 38 | options: 39 | hostname: 40 | description: 41 | - The hostname or IP address of the vSphere vCenter API server 42 | required: True 43 | username: 44 | description: 45 | - The username of the vSphere vCenter with Admin rights 46 | required: True 47 | aliases: ['user', 'admin'] 48 | password: 49 | description: 50 | - The password of the vSphere vCenter user 51 | required: True 52 | aliases: ['pass', 'pwd'] 53 | datacenter: 54 | description: 55 | - The name of the datacenter. 56 | required: True 57 | cluster: 58 | description: 59 | - The name of the vCenter cluster 60 | required: True 61 | vmname: 62 | description: 63 | - The name of the vm in vcenter 64 | required: True 65 | ovftool_path: 66 | description: 67 | - The path where the ovftool is installed 68 | ex: /usr/local/bin/ovftool 69 | path_to_ova: 70 | description: 71 | - The path where the ova is located 72 | required: True 73 | ova_file: 74 | description: 75 | - The name of the ova file 76 | required: True 77 | disk_mode: 78 | description: 79 | - The disk mode for the deployment of the ova 80 | default: thin 81 | required: True 82 | datastore: 83 | description: 84 | - Valid vcenter datastore 85 | required: True 86 | network: 87 | description: 88 | - Name of the network/portgroup for the appliance 89 | required: True 90 | vro_gateway: 91 | description: 92 | - gatway information for the appliance 93 | required: True 94 | vro_dns_ip: 95 | description: 96 | - dns server ip address 97 | type: list 98 | vro_ip_address: 99 | description: 100 | - ip address for the appliance 101 | required: True 102 | vro_netmask: 103 | description: 104 | - netmask information for the appliance 105 | required: True 106 | root_password: 107 | description: 108 | - root password for the appliance 109 | required: True 110 | deployment_size: 111 | description: 112 | - size of the deployment for the appliance 113 | required: True 114 | vro_hostname: 115 | description: 116 | - hostname for the appliance 117 | required: True 118 | vro_domain: 119 | description: 120 | - The domain name for the appliance 121 | required: True 122 | enable_ssh: 123 | description: 124 | - Enable or disable ssh on appliance 125 | type: bool 126 | required: True 127 | state: 128 | description: 129 | - Desired state of the disk group 130 | choices: ['present', 'absent'] 131 | required: True 132 | ''' 133 | 134 | EXAMPLES = ''' 135 | - name: deploy vRO Appliance 136 | vcenter_vro_deploy: 137 | hostname: "{{ vcenter }}" 138 | username: "{{ vcenter_user }}" 139 | password: "{{ vcenter_password }}" 140 | validate_certs: "{{ vcenter_validate_certs }}" 141 | vmname: "{{ vro_vm_name }}" 142 | ovftool_path: "{{ ovf_tool_path }}" 143 | path_to_ova: "{{ ova_path }}" 144 | ova_file: "{{ vro_ova }}" 145 | datacenter: "{{ ib_vcenter_datacenter_name }}" 146 | cluster: "{{ ib_vcenter_mgmt_esx_cluster_name }}" 147 | disk_mode: "{{ disk_mode }}" 148 | datastore: "{{ ib_vcenter_mgmt_esx_cluster_name }}_VSAN_DS" 149 | network: "{{ mgmt_vds_viomgmt }}" 150 | vro_root_pass: "{{ vro_rootpass }}" 151 | enable_ssh: True 152 | vro_hostname: "{{ vro_hostname }}" 153 | vro_gateway: "{{ vro_gateway }}" 154 | vro_domain: "{{ vro_domain_name }}" 155 | vro_dns_ip: "{{ ova_dns_list }}" 156 | vro_ip_address: "{{ vro_ip }}" 157 | vro_netmask: "{{ vro_netmask }}" 158 | state: "{{ global_state }}" 159 | tags: 160 | - deploy_vro_ova 161 | ''' 162 | 163 | RETURN = ''' 164 | description: TBD 165 | returned: 166 | type: 167 | sample: 168 | ''' 169 | 170 | try: 171 | import time 172 | import requests 173 | from pyVmomi import vim, vmodl 174 | IMPORTS = True 175 | except ImportError: 176 | IMPORTS = False 177 | 178 | 179 | vc = {} 180 | 181 | def check_vro_api(module): 182 | 183 | url = "https://{}:8281/vco/api/".format(module.params['vro_ip_address']) 184 | auth = requests.auth.HTTPBasicAuth('vcoadmin','vcoadmin') 185 | header = {'Content-Type': 'application/json', 'Accept': 'application/json'} 186 | 187 | try: 188 | resp = requests.get(url=url, verify=False, 189 | auth=auth, headers=header) 190 | 191 | except requests.exceptions.ConnectionError: 192 | return False 193 | 194 | return resp.status_code, resp.content 195 | 196 | 197 | def wait_for_api(module, sleep_time=15): 198 | status_poll_count = 0 199 | while status_poll_count < 30: 200 | api_status = check_vro_api(module) 201 | if api_status: 202 | if api_status[0] == 200: 203 | return True 204 | else: 205 | status_poll_count += 1 206 | time.sleep(sleep_time) 207 | else: 208 | status_poll_count += 1 209 | time.sleep(sleep_time) 210 | 211 | if status_poll_count == 30: 212 | return False 213 | 214 | 215 | 216 | def wait_for_vm(vm, sleep_time=15): 217 | 218 | vm_pool_count = 0 219 | while vm_pool_count < 30: 220 | connected = (vm.runtime.connectionState == 'connected') 221 | 222 | if connected: 223 | powered_on = (vm.runtime.powerState == 'poweredOn') 224 | 225 | if powered_on: 226 | return True 227 | else: 228 | vm_pool_count += 1 229 | time.sleep(sleep_time) 230 | else: 231 | vm_pool_count += 1 232 | time.sleep(sleep_time) 233 | 234 | if vm_pool_count == 30: 235 | return False 236 | 237 | 238 | 239 | def find_virtual_machine(content, searched_vm_name): 240 | virtual_machines = get_all_objs(content, [vim.VirtualMachine]) 241 | for vm in virtual_machines: 242 | if vm.name == searched_vm_name: 243 | return vm 244 | return None 245 | 246 | 247 | def state_delete_vm(module): 248 | changed = False 249 | 250 | vm = vc['vro_vm'] 251 | 252 | if vm.runtime.powerState == 'poweredOn': 253 | power_off_task = vm.PowerOffVM_Task() 254 | wait_for_task(power_off_task) 255 | 256 | try: 257 | delete_vm_task = vm.Destroy_Task() 258 | changed, result = wait_for_task(delete_vm_task) 259 | except Exception as e: 260 | module.fail_json(msg="Failed deleting vm: {}".format(str(e))) 261 | 262 | module.exit_json(changed=changed) 263 | 264 | 265 | def state_exit_unchanged(module): 266 | module.exit_json(changed=False, msg="EXIT UNCHANED") 267 | 268 | 269 | def state_create_vm(module): 270 | 271 | ovftool_exec = '{}/ovftool'.format(module.params['ovftool_path']) 272 | ova_file = '{}/{}'.format(module.params['path_to_ova'], module.params['ova_file']) 273 | vi_string = 'vi://{}:{}@{}/{}/host/{}/'.format(module.params['username'], 274 | module.params['password'], module.params['hostname'], 275 | module.params['datacenter'], module.params['cluster']) 276 | 277 | ova_tool_result = module.run_command([ovftool_exec, 278 | '--acceptAllEulas', 279 | '--skipManifestCheck', 280 | '--powerOn', 281 | '--noSSLVerify', 282 | '--allowExtraConfig', 283 | '--diskMode={}'.format(module.params['disk_mode']), 284 | '--datastore={}'.format(module.params['datastore']), 285 | '--network={}'.format(module.params['network']), 286 | '--name={}'.format(module.params['vmname']), 287 | '--prop:varoot-password={}'.format(module.params['vro_root_pass']), 288 | '--prop:vcoconf-password={}'.format(module.params['vro_root_pass']), 289 | '--prop:va-ssh-enabled={}'.format(module.params['enable_ssh']), 290 | '--prop:vami.hostname={}'.format(module.params['vro_hostname']), 291 | '--prop:vami.gateway.VMware_vRealize_Orchestrator_Appliance={}'.format(module.params['vro_gateway']), 292 | '--prop:vami.domain.VMware_vRealize_Orchestrator_Appliance={}'.format(module.params['vro_domain']), 293 | '--prop:vami.DSN.VMware_vRealize_Orchestrator_Appliance={},{}'.format(module.params['vro_dns_ip'][0], 294 | module.params['vro_dns_ip'][1]), 295 | '--prop:vami.ip0.VMware_vRealize_Orchestrator_Appliance={}'.format(module.params['vro_ip_address']), 296 | '--prop:vami.netmask0.VMware_vRealize_Orchestrator_Appliance={}'.format(module.params['vro_netmask']), 297 | ova_file, 298 | vi_string]) 299 | 300 | if ova_tool_result[0] != 0: 301 | module.fail_json(msg='Failed to deploy OVA, error message from ovftool is: {}'.format(ova_tool_result[1])) 302 | 303 | return ova_tool_result[0] 304 | 305 | 306 | def main(): 307 | argument_spec = vmware_argument_spec() 308 | 309 | argument_spec.update( 310 | dict( 311 | vmname=dict(required=True, type='str'), 312 | ovftool_path=dict(required=True, type='str'), 313 | path_to_ova=dict(required=True, type='str'), 314 | ova_file=dict(required=True, type='str'), 315 | datacenter=dict(required=True, type='str'), 316 | cluster=dict(required=True, type='str'), 317 | disk_mode=dict(default='thin', type='str'), 318 | datastore=dict(required=True, type='str'), 319 | network=dict(required=True, type='str'), 320 | vro_root_pass=dict(required=True, type='str', no_log=True), 321 | enable_ssh=dict(required=True, type='bool'), 322 | vro_hostname=dict(required=True, type='str'), 323 | vro_gateway=dict(required=True, type='str'), 324 | vro_domain=dict(required=True, type='str'), 325 | vro_dns_ip=dict(required=True, type='list'), 326 | vro_ip_address=dict(required=True, type='str'), 327 | vro_netmask=dict(required=True, type='str'), 328 | state=dict(default='present', choices=['present', 'absent']), 329 | ) 330 | ) 331 | 332 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 333 | 334 | if not IMPORTS: 335 | module.fail_json(msg="Failed to import modules") 336 | 337 | content = connect_to_api(module) 338 | 339 | vro_vm = find_virtual_machine(content, module.params['vmname']) 340 | 341 | vc['vro_vm'] = vro_vm 342 | 343 | vro_vm_states = { 344 | 'absent': { 345 | 'present': state_delete_vm, 346 | 'absent': state_exit_unchanged, 347 | }, 348 | 'present': { 349 | 'present': state_exit_unchanged, 350 | 'absent': state_create_vm 351 | } 352 | } 353 | 354 | desired_state = module.params['state'] 355 | 356 | if vro_vm: 357 | current_state = 'present' 358 | else: 359 | current_state = 'absent' 360 | 361 | vro_vm_states[desired_state][current_state](module) 362 | 363 | vro_vm = find_virtual_machine(content, module.params['vmname']) 364 | 365 | if not vro_vm: 366 | module.fail_json(changed=False, msg="Failed to find vm") 367 | 368 | if not wait_for_vm(vro_vm): 369 | module.fail_json(msg="VM failed to power on") 370 | 371 | if not wait_for_api(module): 372 | module.fail_json(msg="Failed to hit api") 373 | 374 | module.exit_json(changed=True, result="Success") 375 | 376 | 377 | from ansible.module_utils.basic import * 378 | from ansible.module_utils.vmware import * 379 | 380 | if __name__ == '__main__': 381 | main() 382 | -------------------------------------------------------------------------------- /vcenter_vsan_diskcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vcener_vsan_disk_check 24 | Short_description: Reads, Checks if all hosts in specified cluster has specified number of disks (ssd,hdd) 25 | eligible for creating disk groups. 26 | description: 27 | Reads, Checks if all hosts in specified cluster has specified number of disks (ssd,hdd) 28 | eligible for creating disk groups. 29 | requirements: 30 | - pyvmomi 6 31 | - ansible 2.x 32 | Tested on: 33 | - vcenter 6.0 34 | - pyvmomi 6 35 | - esx 6 36 | - ansible 2.1.2 37 | options: 38 | hostname: 39 | description: 40 | - The hostname or IP address of the vSphere vCenter API server 41 | required: True 42 | username: 43 | description: 44 | - The username of the vSphere vCenter with Admin rights 45 | required: True 46 | aliases: ['user', 'admin'] 47 | password: 48 | description: 49 | - The password of the vSphere vCenter user 50 | required: True 51 | aliases: ['pass', 'pwd'] 52 | datacenter_name: 53 | description: 54 | - The name of the datacenter. 55 | required: True 56 | cluster_name: 57 | description: 58 | - The name of the vCenter cluster 59 | required: True 60 | num_ssd: 61 | description: 62 | - The number of ssd that should be eligible for disk group creation 63 | required: True 64 | num_hdd: 65 | description: 66 | - The number of hdd disks that should be available for disk group creation 67 | state: 68 | choices: ['present', 'absent'] 69 | required: True 70 | ''' 71 | 72 | EXAMPLE = ''' 73 | - name: VSAN Disk Check 74 | vcenter_vsan_disk_check: 75 | hostname: "{{ vcenter }}" 76 | username: "{{ vcenter_user }}" 77 | password: "{{ vcenter_password }}" 78 | validate_certs: "{{ vcenter_validate_certs }}" 79 | datacenter_name: "{{ datacenter.name }}" 80 | cluster_name: "{{ item.name }}" 81 | num_ssd: "{{ item.vsan.num_disk_groups }}" 82 | num_hdd: "{{ item.vsan.num_disks_per_group }}" 83 | state: 'present' 84 | with_items: 85 | - "{{ datacenter.clusters }}" 86 | register: disk_check 87 | tags: 88 | - vsan_disk_check 89 | 90 | - name: disk check debug 91 | debug: msg="HOST--> {{ item.item.name }} PASS Disk Check --> {{ item.result }}" 92 | failed_when: not item.result 93 | with_items: 94 | - "{{ disk_check.results }}" 95 | tags: 96 | - vsan_disk_check 97 | ''' 98 | 99 | try: 100 | from pyVmomi import vim, vmodl 101 | import collections 102 | HAS_PYVMOMI = True 103 | except ImportError: 104 | HAS_PYVMOMI = False 105 | 106 | 107 | vc = {} 108 | 109 | 110 | def check_hosts_disks(host): 111 | 112 | vsan_mgr = host.configManager.vsanSystem 113 | disks = host.config.storageDevice.scsiLun 114 | 115 | ssd = [] 116 | hdd =[] 117 | 118 | for disk in disks: 119 | check_eligible = vsan_mgr.QueryDisksForVsan(disk.canonicalName) 120 | 121 | for d in check_eligible: 122 | if d.state == 'eligible' and d.disk.ssd: 123 | ssd.append(d.disk.canonicalName) 124 | if d.state == 'eligible' and (not d.disk.ssd): 125 | hdd.append(d.disk.canonicalName) 126 | 127 | return ssd, hdd 128 | 129 | 130 | def state_exit_unchanged(module): 131 | module.exit_json(changed=False, msg="EXIT UNCHANGED") 132 | 133 | 134 | def state_delete(module): 135 | module.exit_json(changed=False, msg="CURRENTLY NOT SUPPORTED") 136 | 137 | 138 | def state_create(module): 139 | state = False 140 | results = [] 141 | 142 | num_ssd = module.params['num_ssd'] 143 | num_hdd = module.params['num_hdd'] 144 | 145 | for h in vc['hosts']: 146 | ssd, hdd = check_hosts_disks(h) 147 | 148 | if (len(ssd) >= num_ssd) and (len(hdd) >= num_hdd * num_ssd): 149 | results.append(True) 150 | else: 151 | results.append(False) 152 | 153 | if False not in results: 154 | state = True 155 | 156 | module.exit_json(changed=False, result=state) 157 | 158 | 159 | def check_vsan_state(module): 160 | 161 | content = connect_to_api(module) 162 | 163 | dc = find_datacenter_by_name(content, module.params['datacenter_name']) 164 | 165 | if not dc: 166 | module.fail_json(msg="Failed to get datacenter") 167 | 168 | vc['dc'] = dc 169 | 170 | cluster = find_cluster_by_name_datacenter(dc, module.params['cluster_name']) 171 | 172 | if not cluster: 173 | module.fail_json(msg="Failed to get cluster") 174 | 175 | vc['cluster'] = cluster 176 | 177 | if not cluster.host: 178 | module.fail_json(msg="No hosts in cluster") 179 | 180 | vc['hosts'] = cluster.host 181 | 182 | return 'absent' 183 | 184 | 185 | def main(): 186 | argument_spec = vmware_argument_spec() 187 | 188 | argument_spec.update( 189 | dict( 190 | datacenter_name=dict(required=True, type='str'), 191 | cluster_name=dict(required=True, type='str'), 192 | num_ssd=dict(required=True, type='int'), 193 | num_hdd=dict(required=True, type='int'), 194 | state=dict(default='present', choices=['present', 'absent'], type='str'), 195 | ) 196 | ) 197 | 198 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 199 | 200 | if not HAS_PYVMOMI: 201 | module.fail_json(msg='pyvmomi is required for this module') 202 | 203 | vsan_states = { 204 | 'absent': { 205 | 'absent': state_exit_unchanged, 206 | 'present': state_delete, 207 | }, 208 | 'present': { 209 | 'absent': state_create, 210 | 'present': state_exit_unchanged, 211 | } 212 | } 213 | 214 | desired_state = module.params['state'] 215 | current_state = check_vsan_state(module) 216 | 217 | vsan_states[desired_state][current_state](module) 218 | 219 | 220 | from ansible.module_utils.basic import * 221 | from ansible.module_utils.vmware import * 222 | 223 | if __name__ == '__main__': 224 | main() 225 | -------------------------------------------------------------------------------- /vcenter_vsan_healthperf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcener_vsan_healthperf 23 | Short_description: Creates (enables) Deletes (disables), Performance Health system for a vsan 24 | cluster 25 | description: 26 | Creates (enables) Deletes (disables), Performance Health system for a vsan cluster 27 | requirements: 28 | - pyvmomi 6 29 | - vsan SDK 30 | - ansible 2.x 31 | Tested on: 32 | - vcenter 6.0 33 | - pyvmomi 6 34 | - esx 6 35 | - ansible 2.1.2 36 | options: 37 | hostname: 38 | description: 39 | - The hostname or IP address of the vSphere vCenter API server 40 | required: True 41 | username: 42 | description: 43 | - The username of the vSphere vCenter with Admin rights 44 | required: True 45 | aliases: ['user', 'admin'] 46 | password: 47 | description: 48 | - The password of the vSphere vCenter user 49 | required: True 50 | aliases: ['pass', 'pwd'] 51 | datacenter_name: 52 | description: 53 | - The name of the datacenter. 54 | required: True 55 | cluster_name: 56 | description: 57 | - The name of the vCenter cluster 58 | required: True 59 | state: 60 | choices: ['present', 'absent'] 61 | required: True 62 | ''' 63 | 64 | EXAMPLE = ''' 65 | - name: VSAN Config Perf Health 66 | vcenter_vsan_healtperf: 67 | hostname: "{{ vcenter }}" 68 | username: "{{ vcenter_user }}" 69 | password: "{{ vcenter_password }}" 70 | validate_certs: "{{ vcenter_validate_certs }}" 71 | datacenter_name: "{{ datacenter.name }}" 72 | cluster_name: "{{ item.name }}" 73 | state: 'present' 74 | with_items: 75 | - "{{ datacenter.clusters }}" 76 | tags: 77 | - vsan_perf_health 78 | ''' 79 | 80 | 81 | try: 82 | from pyVim import vim, vmodl, connect 83 | import requests 84 | import ssl 85 | import atexit 86 | import time 87 | HAS_PYVMOMI = True 88 | except ImportError: 89 | HAS_PYVMOMI = False 90 | 91 | 92 | class VsanHealthPerf(object): 93 | ''' 94 | 95 | ''' 96 | def __init__(self, module): 97 | self.module = module 98 | self.cluster_name = module.params['cluster_name'] 99 | self.datacenter_name = module.params['datacenter_name'] 100 | self.cluster = None 101 | self.datacenter = None 102 | self.si = None 103 | self.content = None 104 | self.vc_mos = None 105 | 106 | 107 | def state_exit_unchanged(self): 108 | self.module.exit_json(changed=False, msg="EXIT UNCHANGED") 109 | 110 | 111 | def state_destroy_health_perf(self, vsan_perf_system): 112 | try: 113 | deleted_stats = vsan_perf_system.VsanPerfDeleteStatsObject(self.cluster) 114 | except vim.fault.NotFound: 115 | self.state_exit_unchanged() 116 | except (vmodl.RuntimeFault, vim.fault.VsanFault, vmodl.MethodFault): 117 | self.module.fail_json(msg="Failed to create stats") 118 | 119 | self.module.exit_json(changed=deleted_stats, result=deleted_stats) 120 | 121 | 122 | def vsan_health_perf_create(self, vsan_perf_system): 123 | try: 124 | result = vsan_perf_system.CreateStatsObject(self.cluster) 125 | except vim.fault.FileAlreadyExists: 126 | self.state_exit_unchanged() 127 | except vim.fault.FileNotFound as fnf: 128 | self.module.fail_json(msg="File not found: {}".format(str(fnf))) 129 | except vim.fault.CannotCreateFile as cnc: 130 | self.module.fail_json(msg="Cannot Create: {}".format(str(cnc))) 131 | except vim.fault.VsanFault as vf: 132 | self.module.fail_json(msg="vsan fault: {}".format(str(vf))) 133 | except (vmodl.RuntimeFault, vmodl.MethodFault): 134 | self.module.fail_json(msg="Runtime Method fault") 135 | 136 | self.module.exit_json(changed=True, result=result) 137 | 138 | 139 | def vsan_health_perf(self): 140 | 141 | self.si = self.connect_to_vc_api() 142 | self.content = self.si.RetrieveContent() 143 | self.vc_mos = GetVsanVcMos(self.si._stub, context=None) 144 | 145 | try: 146 | self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) 147 | 148 | if not self.datacenter: 149 | self.module.fail_json(msg="Cannot find DC") 150 | 151 | self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name) 152 | 153 | if not self.cluster: 154 | self.module.fail_json(msg="Cannot find cluster") 155 | 156 | except vmodl.RuntimeFault as runtime_fault: 157 | self.module.fail_json(msg=runtime_fault.msg) 158 | except vmodl.MethodFault as method_fault: 159 | self.module.fail_json(msg=method_fault.msg) 160 | 161 | vsan_perf_system = self.vc_mos['vsan-performance-manager'] 162 | 163 | if self.module.params['state'] == 'present': 164 | self.vsan_health_perf_create(vsan_perf_system) 165 | 166 | if self.module.params['state'] == 'absent': 167 | self.state_destroy_health_perf(vsan_perf_system) 168 | 169 | 170 | 171 | def connect_to_vc_api(self, disconnect_atexit=True): 172 | 173 | hostname = self.module.params['hostname'] 174 | username = self.module.params['username'] 175 | password = self.module.params['password'] 176 | validate_certs = self.module.params['validate_certs'] 177 | 178 | if validate_certs and not hasattr(ssl, 'SSLContext'): 179 | self.module.fail_json( 180 | msg='pyVim does not support changing verification mode with python < 2.7.9. Either update python or or use validate_certs=false') 181 | 182 | try: 183 | service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) 184 | except vim.fault.InvalidLogin, invalid_login: 185 | self.module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) 186 | except requests.ConnectionError, connection_error: 187 | if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs: 188 | context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) 189 | context.verify_mode = ssl.CERT_NONE 190 | service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) 191 | else: 192 | self.module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", 193 | apierror=str(connection_error)) 194 | 195 | if disconnect_atexit: 196 | atexit.register(connect.Disconnect, service_instance) 197 | return service_instance 198 | 199 | 200 | def main(): 201 | argument_spec = vmware_argument_spec() 202 | 203 | argument_spec.update( 204 | dict( 205 | datacenter_name=dict(required=True, type='str'), 206 | cluster_name=dict(required=True, type='str'), 207 | state=dict(default='present', choices=['present', 'absent'], type='str'), 208 | ) 209 | ) 210 | 211 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 212 | 213 | if not HAS_PYVMOMI: 214 | module.fail_json(msg='pyvmomi is required for this module') 215 | 216 | vsan_health_perf = VsanHealthPerf(module) 217 | vsan_health_perf.vsan_health_perf() 218 | 219 | 220 | from ansible.module_utils.basic import * 221 | from ansible.module_utils.vmware import * 222 | from ansible.module_utils.vsanapiutils import * 223 | from ansible.module_utils.vsanmgmtObjects import * 224 | 225 | if __name__ == '__main__': 226 | main() -------------------------------------------------------------------------------- /vcenter_vsan_stretch_cluster.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vcener_vsan_stretch_cluster 24 | Short_description: Creates fault domains and adds a esx host as a witness host, creates diskgroups on 25 | witness host 26 | description: 27 | Creates fault domains and adds a esx host as a witness host, creates diskgroups on 28 | witness host. Module will fail if the cluster has less than 2 esx hosts. You need at least 2 29 | esx hosts in the cluster for the primary and secondary fault domains. 30 | requirements: 31 | - pyvmomi 6 32 | - vsan SDK 33 | - ansible 2.x 34 | - esx witness host, physical or witness host appliance in a non vsan cluster 35 | Tested on: 36 | - vcenter 6 37 | - pyvmomi 6 38 | - esx 6 39 | - ansible 2.1.2 40 | options: 41 | hostname: 42 | description: 43 | - The hostname or IP address of the vSphere vCenter API server 44 | required: True 45 | username: 46 | description: 47 | - The username of the vSphere vCenter with Admin rights 48 | required: True 49 | aliases: ['user', 'admin'] 50 | password: 51 | description: 52 | - The password of the vSphere vCenter user 53 | required: True 54 | aliases: ['pass', 'pwd'] 55 | datacenter_name: 56 | description: 57 | - The name of the datacenter. 58 | required: True 59 | cluster_name: 60 | description: 61 | - The name of the vCenter cluster 62 | required: True 63 | witness_host: 64 | description: 65 | - The host to be used as the witness host 66 | required: True 67 | state: 68 | choices: ['present', 'absent'] 69 | required: True 70 | ''' 71 | 72 | EXAMPLE = ''' 73 | - name: VSAN Config Stretch Cluster 74 | vcenter_vsan_stretch_cluster: 75 | hostname: "{{ vcenter }}" 76 | username: "{{ vcenter_user }}" 77 | password: "{{ vcenter_password }}" 78 | validate_certs: "{{ vcenter_validate_certs }}" 79 | datacenter_name: "{{ datacenter.name }}" 80 | cluster_name: "{{ item.name }}" 81 | witness_host: 82 | name: "{{ wa_esx_hostname }}" 83 | num_disk_groups: "{{ item.vsan.num_disk_groups }}" 84 | num_disks_per_group: "{{ item.vsan.num_disks_per_group }}" 85 | state: 'present' 86 | when: item.name == ib_vcenter_nsxedge_esx_cluster_name 87 | with_items: 88 | - "{{ datacenter.clusters }}" 89 | tags: 90 | - vsan_stretch_cluster 91 | ''' 92 | 93 | 94 | try: 95 | from pyVim import vim, vmodl, connect 96 | import requests 97 | import ssl 98 | import atexit 99 | HAS_PYVMOMI = True 100 | except ImportError: 101 | HAS_PYVMOMI = False 102 | 103 | class VsanStretchCluster(object): 104 | ''' 105 | 106 | ''' 107 | def __init__(self, module): 108 | self.module = module 109 | self.cluster_name = module.params['cluster_name'] 110 | self.datacenter_name = module.params['datacenter_name'] 111 | self.witness_host_name = module.params['witness_host']['name'] 112 | self.witness_host_ssd = module.params['witness_host']['num_disk_groups'] 113 | self.witness_host_hdd = module.params['witness_host']['num_disks_per_group'] 114 | self.content = None 115 | self.si = None 116 | self.vMos = None 117 | self.vsan_sc_system = None 118 | self.cluster = None 119 | self.witness_host = None 120 | self.datacenter = None 121 | self.prefered_fault_domain_name = self.cluster_name + "_FD_01" 122 | self.second_fault_domain_name = self.cluster_name + "_FD_02" 123 | 124 | 125 | def connect_to_vc_api(self, disconnect_atexit=True): 126 | 127 | hostname = self.module.params['hostname'] 128 | username = self.module.params['username'] 129 | password = self.module.params['password'] 130 | validate_certs = self.module.params['validate_certs'] 131 | 132 | if validate_certs and not hasattr(ssl, 'SSLContext'): 133 | self.module.fail_json( 134 | msg='pyVim does not support changing verification mode with python < 2.7.9. Either update python or or use validate_certs=false') 135 | 136 | try: 137 | service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) 138 | except vim.fault.InvalidLogin, invalid_login: 139 | self.module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) 140 | except requests.ConnectionError, connection_error: 141 | if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs: 142 | context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) 143 | context.verify_mode = ssl.CERT_NONE 144 | service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context) 145 | else: 146 | self.module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", 147 | apierror=str(connection_error)) 148 | 149 | if disconnect_atexit: 150 | atexit.register(connect.Disconnect, service_instance) 151 | return service_instance 152 | 153 | 154 | def vsan_process_state(self): 155 | ''' 156 | Given desired state and current state process accordingly 157 | :return: changed, result 158 | ''' 159 | 160 | vsan_states = { 161 | 'absent': { 162 | 'absent': self.state_exit_unchanged, 163 | 'present': self.state_destroy_stretchclsuter, 164 | }, 165 | 'present': { 166 | 'absent': self.state_create_strectcluster, 167 | 'present': self.state_exit_unchanged, 168 | } 169 | } 170 | 171 | desired_state = self.module.params['state'] 172 | current_state = self.current_state_stretchcluster() 173 | 174 | vsan_states[desired_state][current_state]() 175 | 176 | 177 | def state_exit_unchanged(self): 178 | ''' 179 | 180 | ''' 181 | self.module.exit_json(changed=False, msg='EXIT UNCHANGED') 182 | 183 | 184 | def state_destroy_stretchclsuter(self): 185 | self.module.exit_json(changed=False, msg="DESTROY") 186 | 187 | 188 | def state_create_strectcluster(self): 189 | 190 | fault_domain_config = self.vsan_stretch_cluster_fd_config() 191 | disk_mapping = self.vsan_host_disk_mapping_spec(self.witness_host) 192 | 193 | result = self.vsan_convert_to_stretch(fault_domain_config, disk_mapping) 194 | 195 | if not result: 196 | self.module.fail_json(msg="failed to create") 197 | 198 | self.module.exit_json(changed=True, result=str(result)) 199 | 200 | 201 | def current_state_stretchcluster(self): 202 | 203 | state = 'absent' 204 | 205 | try: 206 | self.si = self.connect_to_vc_api(disconnect_atexit=False) 207 | self.content = self.si.RetrieveContent() 208 | self.vMos = GetVsanVcMos(self.si._stub, None) 209 | self.vsan_sc_system = self.vMos['vsan-stretched-cluster-system'] 210 | 211 | self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) 212 | 213 | if not self.datacenter: 214 | self.module.fail_json(msg="Cannot find DC") 215 | 216 | self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name) 217 | 218 | if not self.cluster: 219 | self.module.fail_json(msg="Cannot find cluster") 220 | 221 | witness_host = find_hostsystem_by_name(self.content, self.witness_host_name) 222 | 223 | if not witness_host: 224 | self.module.fail_json(msg="Cannot find witness host") 225 | else: 226 | self.witness_host = witness_host 227 | 228 | num_hosts = len(self.cluster.host) 229 | 230 | if num_hosts < 2: 231 | self.module.fail_json(msg="You do not have the qualified number of hosts for stretch cluster") 232 | 233 | witness_host_present = self.vsan_check_if_witness_host(self.vsan_sc_system, witness_host) 234 | 235 | if witness_host_present: 236 | state = 'present' 237 | 238 | except vmodl.RuntimeFault as runtime_fault: 239 | self.module.fail_json(msg=runtime_fault.msg) 240 | except vmodl.MethodFault as method_fault: 241 | self.module.fail_json(msg=method_fault.msg) 242 | 243 | return state 244 | 245 | 246 | def vsan_check_if_witness_host(self, vsan_sc_system, host): 247 | is_witness = False 248 | 249 | try: 250 | is_witness = vsan_sc_system.VSANVcIsWitnessHost(host) 251 | except vim.fault.VsanFault: 252 | return is_witness 253 | 254 | return is_witness 255 | 256 | 257 | def vsan_stretch_cluster_fd_config(self): 258 | 259 | first_fd_host = [] 260 | second_fd_host = [] 261 | 262 | num_hosts = len(self.cluster.host) 263 | hosts = [host for host in self.cluster.host] 264 | 265 | if (not num_hosts%2): 266 | for index, item in enumerate(hosts): 267 | if index < num_hosts/2: 268 | first_fd_host.append(item) 269 | for host in set(hosts) - set(first_fd_host): 270 | second_fd_host.append(host) 271 | 272 | spec = vim.VimClusterVSANStretchedClusterFaultDomainConfig( 273 | firstFdHosts=first_fd_host, 274 | firstFdName=self.prefered_fault_domain_name, 275 | secondFdHosts=second_fd_host, 276 | secondFdName=self.second_fault_domain_name 277 | ) 278 | 279 | return spec 280 | 281 | 282 | def vsan_host_disk_state(self, host, state): 283 | ''' 284 | 285 | ''' 286 | 287 | disks = host.config.storageDevice.scsiLun 288 | vsan_mgr = host.configManager.vsanSystem 289 | 290 | host_vsan_info = {'name': host.name, 'ssd': [], 'hdd': []} 291 | 292 | for disk in disks: 293 | eligible_disks = vsan_mgr.QueryDisksForVsan(disk.canonicalName) 294 | 295 | for d in eligible_disks: 296 | if d.state == state and d.disk.ssd: 297 | host_vsan_info['ssd'].append(d.disk) 298 | if d.state == state and (not d.disk.ssd): 299 | host_vsan_info['hdd'].append(d.disk) 300 | 301 | return host_vsan_info 302 | 303 | 304 | def vsan_host_disk_mapping_spec(self, host): 305 | 306 | disk_info = self.vsan_host_disk_state(host, 'eligible') 307 | ssd = disk_info['ssd'] 308 | hdd = disk_info['hdd'] 309 | 310 | if not ssd or not hdd: 311 | self.module.fail_json(msg="No eligible disks on host") 312 | 313 | disk_map_spec = vim.VsanHostDiskMapping(ssd=ssd[0], nonSsd=hdd) 314 | 315 | return disk_map_spec 316 | 317 | 318 | def vsan_convert_to_stretch(self, fd_config, disk_mapping): 319 | 320 | result = False 321 | 322 | try: 323 | result = self.vsan_sc_system.VSANVcConvertToStretchedCluster( 324 | cluster=self.cluster, 325 | faultDomainConfig=fd_config, 326 | witnessHost=self.witness_host, 327 | preferredFd=self.prefered_fault_domain_name, 328 | diskMapping=disk_mapping 329 | ) 330 | WaitForTasks([result], self.si) 331 | except Exception: 332 | pass 333 | 334 | return result 335 | 336 | 337 | def main(): 338 | argument_spec = vmware_argument_spec() 339 | 340 | argument_spec.update( 341 | dict( 342 | datacenter_name=dict(required=True, type='str'), 343 | cluster_name=dict(required=True, type='str'), 344 | witness_host=dict(type='dict'), 345 | state=dict(default='present', choices=['present', 'absent'], type='str'), 346 | ) 347 | ) 348 | 349 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 350 | 351 | if not HAS_PYVMOMI: 352 | module.fail_json(msg='pyvmomi is required for this module') 353 | 354 | vsan = VsanStretchCluster(module) 355 | vsan.vsan_process_state() 356 | 357 | 358 | from ansible.module_utils.basic import * 359 | from ansible.module_utils.vmware import * 360 | from ansible.module_utils.vsanapiutils import * 361 | from ansible.module_utils.vsanmgmtObjects import * 362 | 363 | if __name__ == '__main__': 364 | main() -------------------------------------------------------------------------------- /vcenter_vsan_witness_deploy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vcener_vsan_witness_deploy 23 | Short_description: Deploys (Creates), Removes (Deletes) a witness host appliance for a stretched cluster 24 | description: 25 | Deploys (Creates), Removes (Deletes) a witness host appliance for a stretched cluster 26 | requirements: 27 | - pyvmomi 6 28 | - ansible 2.x 29 | Tested on: 30 | - vcenter 6 31 | - pyvmomi 6 32 | - esx 6 33 | options: 34 | hostname: 35 | description: 36 | - The hostname or IP address of the vSphere vCenter API server 37 | required: True 38 | username: 39 | description: 40 | - The username of the vSphere vCenter with Admin rights 41 | required: True 42 | aliases: ['user', 'admin'] 43 | password: 44 | description: 45 | - The password of the vSphere vCenter user 46 | required: True 47 | aliases: ['pass', 'pwd'] 48 | datacenter_name: 49 | description: 50 | - The name of the datacenter. 51 | required: True 52 | cluster_name: 53 | description: 54 | - The name of the vCenter cluster 55 | required: True 56 | vmname: 57 | description: 58 | - The name of the vm in vcenter 59 | required: True 60 | ovftool_path: 61 | description: 62 | - The path where the ovftool is installed 63 | ex: /usr/local/bin/ovftool 64 | path_to_ova: 65 | description: 66 | - The path where the witness appliance ova is located 67 | required: True 68 | ova_file: 69 | description: 70 | - The name of the ova file 71 | ex: VMware-VirtualSAN-Witness-6.x.x.ova 72 | required: True 73 | disk_mode: 74 | description: 75 | - The disk mode for the deployment of the ova 76 | default: thin 77 | required: True 78 | datastore: 79 | description: 80 | - Valid vcenter datastore 81 | required: True 82 | management_network: 83 | description: 84 | - Management Network will be used to drive witness vm traffic 85 | required: True 86 | vsan_network: 87 | description: 88 | - Witness Network will be used to drive witness vm traffic 89 | required: True 90 | root_password: 91 | description: 92 | - Set password for root account. A valid password must be at least 7 characters long and must 93 | contain a mix of upper and lower case letters, digits, and other 94 | characters. You can use a 7 character long password with characters from at least 3 95 | of these 4 classes. An upper case letter that begins the password and a 96 | digit that ends it do not count towards the number of character classes 97 | used. 98 | - module does not validate password 99 | required: True 100 | deployment_size: 101 | description: 102 | - deployment options 103 | options: 104 | - tiny: 105 | Configuration for Tiny Virtual SAN Deployments with 10 VMs or fewer 106 | - normal: 107 | Configuration for Medium Virtual SAN Deployments of up to 500 VMs 108 | - large: 109 | Configuration for Large Virtual SAN Deployments of more than 500 VMs 110 | required: True 111 | state: 112 | choices: ['present', 'absent'] 113 | required: True 114 | ''' 115 | 116 | EXAMPLE = ''' 117 | - name: deploy vsan witness appliance for edge cluster 118 | vcenter_vsan_witness_deploy: 119 | hostname: "{{ vcenter }}" 120 | username: "{{ vcenter_user }}" 121 | password: "{{ vcenter_password }}" 122 | validate_certs: "{{ vcenter_validate_certs }}" 123 | vmname: "{{ wa_vm_name }}" 124 | ovftool_path: "{{ ovf_tool_path }}" 125 | path_to_ova: "{{ ova_path }}" 126 | ova_file: "{{ wa_ova }}" 127 | datacenter: "{{ datacenter.name }}" 128 | cluster: "{{ ib_vcenter_mgmt_esx_cluster_name }}" 129 | disk_mode: "{{ disk_mode }}" 130 | datastore: "{{ ib_vcenter_mgmt_esx_cluster_name }}_VSAN_DS" 131 | management_network: "{{ mgmt_vds_viomgmt }}" 132 | vsan_network: "{{ edge_vsan_pg }}" 133 | root_password: "{{ wa_root_pass }}" 134 | deployment_size: "{{ wa_deployment_size }}" 135 | state: "{{ global_state }}" 136 | tags: 137 | - vio_deploy_wa_ova 138 | ''' 139 | 140 | 141 | try: 142 | import json 143 | import os 144 | import requests 145 | import time 146 | from pyVmomi import vim, vmodl 147 | IMPORTS = True 148 | except ImportError: 149 | IMPORTS = False 150 | 151 | 152 | class TaskError(Exception): 153 | pass 154 | 155 | vc = {} 156 | 157 | def wait_for_vm(vm): 158 | while True: 159 | 160 | if vm.runtime.powerState == 'poweredOn' and vm.runtime.connectionState == 'connected': 161 | return True 162 | if vm.runtime.connectionState in ('inaccessible', 'invalid', 'orphaned') or \ 163 | vm.rumtime.powerState == 'suspended': 164 | try: 165 | raise TaskError("VM in Error State") 166 | except TaskError as e: 167 | return e 168 | 169 | time.sleep(15) 170 | 171 | 172 | def find_virtual_machine(content, searched_vm_name): 173 | virtual_machines = get_all_objs(content, [vim.VirtualMachine]) 174 | for vm in virtual_machines: 175 | if vm.name == searched_vm_name: 176 | return vm 177 | return None 178 | 179 | 180 | def get_all_objs(content, vimtype): 181 | obj = {} 182 | container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) 183 | for managed_object_ref in container.view: 184 | obj.update({managed_object_ref: managed_object_ref.name}) 185 | return obj 186 | 187 | 188 | def find_vcenter_object_by_name(content, vimtype, name): 189 | objts = get_all_objs(content, [vimtype]) 190 | 191 | for objt in objts: 192 | if objt.name == name: 193 | return objt 194 | 195 | return None 196 | 197 | 198 | def state_delete_vm(module): 199 | 200 | changed = False 201 | 202 | vm = vc['witness_appliance'] 203 | 204 | if vm.runtime.powerState == 'poweredOn': 205 | power_off_task = vm.PowerOffVM_Task() 206 | wait_for_task(power_off_task) 207 | 208 | try: 209 | delete_vm_task = vm.Destroy_Task() 210 | changed, result = wait_for_task(delete_vm_task) 211 | except Exception as e: 212 | module.fail_json(msg="Failed deleting vm: {}".format(str(e))) 213 | 214 | module.exit_json(changed=changed) 215 | 216 | 217 | 218 | def state_exit_unchanged(module): 219 | module.exit_json(changed=False, msg="EXIT UNCHANED") 220 | 221 | 222 | def ova_tool_command_list(module, ovftool_exec, ova_file, vi_string, proxy=None): 223 | ova_command_list = [ovftool_exec, 224 | '--acceptAllEulas', 225 | '--skipManifestCheck', 226 | '--powerOn', 227 | '--noSSLVerify', 228 | '--allowExtraConfig', 229 | '--name={}'.format(module.params['vmname']), 230 | '--diskMode={}'.format(module.params['disk_mode']), 231 | '--datastore={}'.format(module.params['datastore']), 232 | '--net:Management Network={}'.format(module.params['management_network']), 233 | '--net:Witness Network={}'.format(module.params['vsan_network']), 234 | '--deploymentOption={}'.format(module.params['deployment_size']), 235 | '--prop:vsan.witness.root.passwd={}'.format(module.params['root_password'])] 236 | 237 | if proxy: 238 | ova_command_list.append('--proxy={}'.format(proxy)) 239 | 240 | ova_command_list.append(ova_file) 241 | ova_command_list.append(vi_string) 242 | 243 | return ova_command_list 244 | 245 | 246 | def state_create_vm(module): 247 | 248 | ovftool_exec = '{}/ovftool'.format(module.params['ovftool_path']) 249 | ova_file = '{}/{}'.format(module.params['path_to_ova'], module.params['ova_file']) 250 | vi_string = 'vi://{}:{}@{}/{}/host/{}/'.format(module.params['username'], 251 | module.params['password'], module.params['hostname'], 252 | module.params['datacenter'], module.params['cluster']) 253 | 254 | ova_commands = ova_tool_command_list(module, ovftool_exec, ova_file, vi_string) 255 | 256 | ova_tool_result = module.run_command(ova_commands) 257 | 258 | if ova_tool_result[0] != 0: 259 | module.fail_json(msg='Failed to deploy OVA, error message from ovftool is: {}'.format(ova_tool_result[1])) 260 | 261 | module.exit_json(changed=True, result=ova_tool_result[0]) 262 | 263 | 264 | 265 | def main(): 266 | argument_spec = vmware_argument_spec() 267 | 268 | argument_spec.update( 269 | dict( 270 | vmname=dict(required=True, type='str'), 271 | ovftool_path=dict(required=True, type='str'), 272 | path_to_ova=dict(required=True, type='str'), 273 | ova_file=dict(required=True, type='str'), 274 | datacenter=dict(required=True, type='str'), 275 | cluster=dict(required=True, type='str'), 276 | disk_mode=dict(default='thin', type='str'), 277 | datastore=dict(required=True, type='str'), 278 | management_network=dict(required=True, type='str'), 279 | vsan_network=dict(required=True, type='str'), 280 | root_password=dict(required=True, type='str'), 281 | deployment_size=dict(required=True, choices=['tiny', 'normal', 'large']), 282 | proxy=dict(require=False, type='str'), 283 | state=dict(default='present', choices=['present', 'absent']), 284 | ) 285 | ) 286 | 287 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 288 | 289 | if not IMPORTS: 290 | module.fail_json(msg="Failed to import modules") 291 | 292 | content = connect_to_api(module) 293 | 294 | witness_appliance = find_virtual_machine(content, module.params['vmname']) 295 | 296 | vc['witness_appliance'] = witness_appliance 297 | 298 | vm_states = { 299 | 'absent': { 300 | 'present': state_delete_vm, 301 | 'absent': state_exit_unchanged, 302 | }, 303 | 'present': { 304 | 'present': state_exit_unchanged, 305 | 'absent': state_create_vm 306 | } 307 | } 308 | 309 | desired_state = module.params['state'] 310 | 311 | if witness_appliance: 312 | current_state = 'present' 313 | else: 314 | current_state = 'absent' 315 | 316 | vm_states[desired_state][current_state](module) 317 | 318 | 319 | from ansible.module_utils.basic import * 320 | from ansible.module_utils.vmware import * 321 | 322 | if __name__ == '__main__': 323 | main() 324 | -------------------------------------------------------------------------------- /vds.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | DOCUMENTATION = ''' 22 | module: vds 23 | short_description: virtual distributed switch 24 | description: 25 | - create update delete virtual distributed switch. 26 | options: 27 | datacenter_name: 28 | description: 29 | - The name of the datacenter the cluster will be created in. 30 | required: True 31 | vds_name: 32 | description: 33 | - The name of the new vds 34 | numUplinks: 35 | description: 36 | - The number of uplinks for the vds 37 | numPorts: 38 | description: 39 | - The number of ports * 40 | mtu: 41 | description: 42 | - The mtu for the vds. Upstream physical switch must match or more 43 | discovery_protocol: 44 | description: 45 | - 46 | discovery_operation: 47 | description: 48 | - 49 | productVersion: 50 | description: 51 | - 52 | state: 53 | description: 54 | - If the datacenter should be present or absent 55 | choices: ['present', 'absent'] 56 | required: True 57 | ''' 58 | 59 | EXAMPLES = ''' 60 | - name: Create VDS 61 | vds: 62 | hostname: '172.16.78.15' 63 | username: 'administrator@vsphere.local' 64 | password: 'VMware1!' 65 | validate_certs: False 66 | datacenter_name: "test-dc-01" 67 | vds_name: "vds001" 68 | numUplinks: 4 69 | numPorts: 16 70 | mtu: 9000 71 | discovery_protocol: 'lldp' 72 | discovery_operation: 'both' 73 | productVersion: '6.0.0' 74 | state: 'present' 75 | ''' 76 | 77 | try: 78 | from pyVmomi import vim, vmodl 79 | from pyVim import connect 80 | HAS_PYVMOMI = True 81 | except ImportError: 82 | HAS_PYVMOMI = False 83 | 84 | 85 | def find_vcenter_object_by_name(content, vimtype, object_name): 86 | vcenter_object = get_all_objs(content, [vimtype]) 87 | 88 | for k, v in vcenter_object.items(): 89 | if v == object_name: 90 | return k 91 | else: 92 | return None 93 | 94 | 95 | def find_vds_by_name(content, vds_name): 96 | vdSwitches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) 97 | for vds in vdSwitches: 98 | if vds_name == vds.name: 99 | return vds 100 | return None 101 | 102 | 103 | def _create_vds_spec(si, update, module): 104 | 105 | vds_name = module.params['vds_name'] 106 | mtu = module.params['mtu'] 107 | discovery_protocol = module.params['discovery_protocol'] 108 | discovery_operation = module.params['discovery_operation'] 109 | productVersion = module.params['productVersion'] 110 | numUplinks = module.params['numUplinks'] 111 | numPorts = module.params['numPorts'] 112 | 113 | uplink_port_names = [] 114 | 115 | for x in range(int(numUplinks)): 116 | uplink_port_names.append("%s_Uplink_%d" % (vds_name, x + 1)) 117 | 118 | prod_info = vim.dvs.ProductSpec( 119 | version = productVersion, 120 | name = "DVS", 121 | vendor = "VMware, Inc." 122 | ) 123 | 124 | uplink = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy( 125 | uplinkPortName = uplink_port_names 126 | ) 127 | 128 | linkConfig = vim.host.LinkDiscoveryProtocolConfig( 129 | protocol = discovery_protocol, 130 | operation = discovery_operation 131 | ) 132 | 133 | configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( 134 | name = vds_name, 135 | numStandalonePorts = numPorts, 136 | maxMtu = mtu, 137 | uplinkPortPolicy = uplink, 138 | linkDiscoveryProtocolConfig = linkConfig, 139 | lacpApiVersion = "multipleLag", 140 | ) 141 | 142 | if update: 143 | vds = find_vds_by_name(si, vds_name) 144 | configSpec.configVersion = vds.config.configVersion 145 | return configSpec 146 | 147 | spec = vim.DistributedVirtualSwitch.CreateSpec() 148 | spec.configSpec = configSpec 149 | spec.productInfo = prod_info 150 | 151 | return spec 152 | 153 | 154 | def _check_vds_config_spec(vds, module): 155 | 156 | vds_name = module.params['vds_name'] 157 | mtu = module.params['mtu'] 158 | discovery_protocol = module.params['discovery_protocol'] 159 | discovery_operation = module.params['discovery_operation'] 160 | productVersion = module.params['productVersion'] 161 | numUplinks = module.params['numUplinks'] 162 | numPorts = module.params['numPorts'] 163 | 164 | current_spec = vds.config 165 | 166 | check_vals = [ 167 | (vds_name == current_spec.name), 168 | (mtu == current_spec.maxMtu), 169 | (discovery_protocol == current_spec.linkDiscoveryProtocolConfig.protocol), 170 | (discovery_operation == current_spec.linkDiscoveryProtocolConfig.operation), 171 | (productVersion == current_spec.productInfo.version), 172 | (numUplinks == len(current_spec.uplinkPortPolicy.uplinkPortName)) 173 | ] 174 | 175 | if False in check_vals: 176 | return False 177 | else: 178 | return True 179 | 180 | 181 | def state_update_vds(si, module): 182 | 183 | vds_name = module.params['vds_name'] 184 | vds = vds = find_vds_by_name(si, vds_name) 185 | 186 | config_spec = _create_vds_spec(si, True, module) 187 | 188 | try: 189 | reconfig_task = vds.ReconfigureDvs_Task(config_spec) 190 | changed, result = wait_for_task(reconfig_task) 191 | except Exception as e: 192 | module.fail_json(msg="Failed reconfiguring vds: {}".format(e)) 193 | 194 | module.exit_json(changed=changed, result=str(result)) 195 | 196 | 197 | def state_exit_unchanged(si, module): 198 | module.exit_json(changed=False, msg="EXIT UNCHANGED") 199 | 200 | 201 | def state_destroy_vds(si, module): 202 | 203 | vds_name = module.params['vds_name'] 204 | vds = find_vds_by_name(si, vds_name) 205 | 206 | if vds is None: 207 | module.exit_json(msg="Could not find vds: {}".format(vds_name)) 208 | 209 | try: 210 | task = vds.Destroy_Task() 211 | changed, result = wait_for_task(task) 212 | except Exception as e: 213 | module.fail_json(msg="Failed to destroy vds: {}".format(str(e))) 214 | 215 | module.exit_json(changed=changed, result=result) 216 | 217 | 218 | def state_create_vds(si, module): 219 | 220 | datacenter = find_datacenter_by_name(si, module.params['datacenter_name']) 221 | network_folder = datacenter.networkFolder 222 | 223 | vds_create_spec = _create_vds_spec(si, False, module) 224 | 225 | try: 226 | if not module.check_mode: 227 | 228 | task = network_folder.CreateDVS_Task(vds_create_spec) 229 | changed, vds_created = wait_for_task(task) 230 | 231 | module.exit_json(changed=changed, result=vds_created.name) 232 | 233 | except Exception, e: 234 | module.fail_json(msg=str(e)) 235 | 236 | 237 | def check_vds_state(si, module): 238 | 239 | vds_name = module.params['vds_name'] 240 | 241 | try: 242 | vds = find_vds_by_name(si, vds_name) 243 | 244 | if vds is None: 245 | return 'absent' 246 | elif not _check_vds_config_spec(vds, module): 247 | return 'update' 248 | else: 249 | return 'present' 250 | 251 | except vmodl.RuntimeFault as runtime_fault: 252 | module.fail_json(msg=runtime_fault.msg) 253 | except vmodl.MethodFault as method_fault: 254 | module.fail_json(msg=method_fault.msg) 255 | 256 | 257 | def main(): 258 | argument_spec = vmware_argument_spec() 259 | 260 | argument_spec.update( 261 | dict( 262 | datacenter_name=dict(type='str', required=True), 263 | vds_name=dict(type='str', required=True), 264 | numUplinks=dict(type='int', required=True), 265 | numPorts=dict(type='int', required=True), 266 | mtu=dict(type='int', required=True), 267 | discovery_protocol=dict(required=True, choices=['cdp', 'lldp'], type='str'), 268 | discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'), 269 | productVersion=dict(type='str', required=True, choices=['6.0.0', '5.5.0', '5.1.0', '5.0.0']), 270 | state=dict(required=True, choices=['present', 'absent'], type='str') 271 | ) 272 | ) 273 | 274 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) 275 | 276 | if not HAS_PYVMOMI: 277 | module.fail_json(msg='pyvmomi is required for this module') 278 | 279 | vds_states = { 280 | 'absent': { 281 | 'present': state_destroy_vds, 282 | 'absent': state_exit_unchanged, 283 | }, 284 | 'present': { 285 | 'present': state_exit_unchanged, 286 | 'update': state_update_vds, 287 | 'absent': state_create_vds, 288 | } 289 | } 290 | 291 | si = connect_to_api(module) 292 | 293 | datacenter = find_datacenter_by_name(si, module.params['datacenter_name']) 294 | 295 | if datacenter is None: 296 | module.fail_json(msg="Could not find datacenter: {}".format(module.params['datacenter_name'])) 297 | 298 | desired_state = module.params['state'] 299 | current_state = check_vds_state(si, module) 300 | 301 | vds_states[desired_state][current_state](si, module) 302 | 303 | 304 | from ansible.module_utils.basic import * 305 | from ansible.module_utils.vmware import * 306 | 307 | if __name__ == '__main__': 308 | main() 309 | -------------------------------------------------------------------------------- /vio_ldap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vio_ldap 24 | short_description: Validate active directory bind, admin, test project users for chaperone VIO. 25 | description: 26 | - This module is indented to be used to validate the specified admin, bind and project users for 27 | the configuration of VIO if Active Directory is the authentication source. This module will 28 | attempt to bind with the bind and admin user, then search for the admin user and bind user 29 | within the specified user dn tree and optional filter. Will search for a group in the group 30 | dn tree and optional filter. 31 | credits: VIO Team for all the continued support 32 | options: 33 | domain_controller: 34 | description: 35 | - keystone authentication url 36 | required: True 37 | type: str 38 | encryption: 39 | description: 40 | - The type of encription that will be used. Currently only NONE and SSL supported. 41 | required: True 42 | choices: None, SSL 43 | type: str 44 | admin_user: 45 | description: 46 | - specify the admin user to be validated 47 | required: True 48 | type: str 49 | admin_user_password: 50 | description: 51 | - password for the admin user 52 | required: True 53 | type: str 54 | bind_user: 55 | description: 56 | - specify the bind user to be validated 57 | required: True 58 | type: str 59 | bind_user_password: 60 | description: 61 | - password for the bind user 62 | required: True 63 | type: str 64 | project_user: 65 | description: 66 | - specify the project user 67 | required: True 68 | type: str 69 | project_user_password: 70 | description: 71 | - password for the project user 72 | required: True 73 | type: str 74 | user_dn_tree: 75 | description: 76 | - user tree DN ex: ou=vio,dc=corp,dc=local 77 | required: True 78 | type: str 79 | user_filter: 80 | description: 81 | - valid ldap query to use for searching for the specified users ex: (&(objectCategory=person)(objectClass=user)) 82 | required: True 83 | type: str 84 | group_dn_tree: 85 | description: 86 | - group tree DN ex: ou=vio,dc=corp,dc=local 87 | required: True 88 | type: str 89 | group_filter: 90 | description: 91 | - valid ldap query to use for searching for a group in specified dn ex: (&(objectClass=group)(objectCategory=group)) 92 | required: True 93 | type: str 94 | 95 | requirements: python-ldap, python-ldapurl 96 | ''' 97 | 98 | EXAMPLE = ''' 99 | - name: Validate AD users for Openstack Deployment when AD as authentication source 100 | vio_ldap: 101 | domain_controller: "{{ vio_authentication_ad_dc_hostname }}" 102 | encryption: "{{ vio_authentication_ad_encryption }}" 103 | admin_user: "{{ vio_authentication_ad_admin_user }}" 104 | admin_user_password: "{{ vio_authentication_ad_admin_user_password }}" 105 | bind_user: "{{ vio_authentication_ad_bind_user }}" 106 | bind_user_password: "{{ vio_authentication_ad_bind_user_password }}" 107 | project_user: "{{ vio_val_user_name_ad }}" 108 | project_user_password: "{{ vio_val_user_pass_ad }}" 109 | user_dn_tree: "{{ vio_authentication_ad_ldap_user_tree_dn }}" 110 | user_filter: "{{ vio_authentication_ad_ldap_user_filter }}" 111 | group_dn_tree: "{{ vio_authentication_ad_ldap_group_tree_dn }}" 112 | group_filter: "{{ vio_authentication_ad_ldap_group_filter }}" 113 | 114 | ''' 115 | 116 | 117 | try: 118 | import sys 119 | import ldap 120 | import ldapurl 121 | IMPORTS = True 122 | except ImportError: 123 | IMPORTS = False 124 | 125 | def _setup_url(prefix, ldap_port, dc_hostname): 126 | ldap_url = "{}://{}:{}".format(prefix, dc_hostname, ldap_port) 127 | return ldap_url 128 | 129 | 130 | def ldap_setup_url(module, hostname, encryption=None): 131 | 132 | prefix = 'ldap' 133 | port = 389 134 | 135 | if encryption == 'SSL': 136 | prefix = 'ldaps' 137 | port = 636 138 | 139 | server = _setup_url(prefix, port, hostname) 140 | 141 | if ldapurl.isLDAPUrl(server): 142 | return server 143 | else: 144 | fail_msg = "Invalid ldap uri for: {}".format(server) 145 | module.fail_json(msg=fail_msg) 146 | 147 | 148 | def ldap_initialize(module, server): 149 | 150 | ldapmodule_trace_level = 1 151 | ldapmodule_trace_file = sys.stderr 152 | ldap._trace_level = ldapmodule_trace_level 153 | 154 | try: 155 | conn = ldap.initialize( 156 | server, 157 | trace_level=ldapmodule_trace_level, 158 | trace_file=ldapmodule_trace_file 159 | ) 160 | 161 | except ldap.LDAPError as e: 162 | fail_msg = "LDAP Error initializing: {}".format(ldap_errors(e)) 163 | module.fail_json(msg=fail_msg) 164 | 165 | return conn 166 | 167 | 168 | def ldap_bind_with_user(module, conn, username, password): 169 | 170 | result = False 171 | 172 | try: 173 | 174 | conn.simple_bind_s(username, password) 175 | result = True 176 | 177 | except ldap.INVALID_CREDENTIALS: 178 | fail_msg = "Invalid Credentials for user {}".format(username) 179 | module.fail_json(msg=fail_msg) 180 | except ldap.LDAPError as e: 181 | fail_msg = "LDAP Error Binding user: {}: ERROR: {}".format(username, ldap_errors(e)) 182 | module.fail_json(msg=fail_msg) 183 | 184 | return result 185 | 186 | 187 | def ldap_search(module, conn, dn, search_filter, ldap_attrs): 188 | 189 | try: 190 | search = conn.search_s(dn, ldap.SCOPE_SUBTREE, search_filter, ldap_attrs) 191 | except ldap.LDAPError as e: 192 | fail_msg = "LDAP Error Searching: {}".format(ldap_errors(e)) 193 | module.fail_json(msg=fail_msg) 194 | 195 | return search 196 | 197 | 198 | def ldap_errors(error): 199 | if type(error.message) == dict and error.message.has_key('info'): 200 | return error.message['info'] 201 | else: 202 | return error.message 203 | 204 | 205 | def ldap_search_results(results, ldap_attr, target): 206 | results_list = [j for i in results for j in i if type(j) == dict] 207 | 208 | attr_results = [v for x in results_list for k, v in x.items() if k == ldap_attr] 209 | 210 | target_list = [x for r in attr_results for x in r] 211 | 212 | if target in target_list: 213 | return target 214 | else: 215 | return False 216 | 217 | 218 | def ldap_unbind(module, conn): 219 | result = False 220 | 221 | try: 222 | conn.unbind_s() 223 | result = True 224 | except ldap.LDAPError as e: 225 | fail_msg = "LDAP Error unbinding: {}".format(e) 226 | module.fail_json(msg=fail_msg) 227 | 228 | return result 229 | 230 | 231 | def set_filter_for_search(search_type, search_filter=None): 232 | 233 | if search_type == 'user' and search_filter: 234 | return search_filter 235 | elif search_type == 'user' and search_filter is None: 236 | return '(&(objectCategory=person)(objectClass=user))' 237 | elif search_type == 'group' and search_filter: 238 | return search_filter 239 | elif search_type == 'group' and search_filter is None: 240 | return '(&(objectClass=group)(objectCategory=group))' 241 | 242 | 243 | 244 | def main(): 245 | argument_spec = dict( 246 | domain_controller=dict(type='str', required=True), 247 | encryption=dict(type='str', required=True), 248 | admin_user=dict(type='str', required=True), 249 | admin_user_password=dict(type='str', required=True), 250 | bind_user=dict(type='str', required=True), 251 | bind_user_password=dict(type='str', required=True), 252 | project_user=dict(type='str', required=True), 253 | project_user_password=dict(type='str', required=True), 254 | user_dn_tree=dict(type='str', required=True), 255 | user_filter=dict(type='str', required=False), 256 | group_dn_tree=dict(type='str', required=True), 257 | group_filter=dict(type='str', required=False), 258 | ) 259 | 260 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 261 | 262 | if not IMPORTS: 263 | module.fail_json(msg="failed to import required modules") 264 | 265 | failed = True 266 | msg = "Failed to validate AD users" 267 | 268 | domain_controller = module.params['domain_controller'] 269 | encryption = module.params['encryption'] 270 | admin_user = module.params['admin_user'] 271 | admin_password = module.params['admin_user_password'] 272 | bind_user = module.params['bind_user'] 273 | bind_password = module.params['bind_user_password'] 274 | project_user = module.params['project_user'] 275 | project_password = module.params['project_user_password'] 276 | user_dn_tree = module.params['user_dn_tree'] 277 | user_filter = module.params['user_filter'] 278 | group_dn_tree = module.params['group_dn_tree'] 279 | group_filter = module.params['group_filter'] 280 | 281 | server = ldap_setup_url(module, domain_controller, encryption) 282 | conn = ldap_initialize(module, server) 283 | 284 | conn.protocol_version=ldap.VERSION3 285 | conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) 286 | conn.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF) 287 | 288 | test_bind_user = ldap_bind_with_user(module, conn, bind_user, bind_password) 289 | 290 | if not test_bind_user: 291 | module.fail_json(msg="Failed to bind with bind user") 292 | 293 | test_admin_user = ldap_bind_with_user(module, conn, admin_user, admin_password) 294 | 295 | if not test_admin_user: 296 | module.fail_json(msg="Failed to bind with admin user") 297 | 298 | test_project_user = ldap_bind_with_user(module, conn, project_user, project_password) 299 | 300 | if not test_project_user: 301 | module.fail_json(msg="Failed to bind with Project user") 302 | 303 | admin_search = ldap_search(module, conn, user_dn_tree, user_filter, ['userPrincipalName']) 304 | 305 | if not admin_search: 306 | module.fail_json(msg="Failed to bind with admin or bind user") 307 | 308 | admin_search_results = ldap_search_results(admin_search, 'userPrincipalName', admin_user) 309 | 310 | if not admin_search_results: 311 | fail_msg = "Failed to find admin user: {}".format(admin_user) 312 | module.fail_json(msg=fail_msg) 313 | 314 | bind_search = ldap_search(module, conn, user_dn_tree, user_filter, ['userPrincipalName']) 315 | 316 | if not bind_search: 317 | fail_msg = "Failed to find bind user: {}".format(bind_user) 318 | module.fail_json(msg=fail_msg) 319 | 320 | bind_search_results = ldap_search_results(bind_search, 'userPrincipalName', bind_user) 321 | 322 | if not bind_search_results: 323 | fail_msg = "Failed to find bind user: {} in tree dn: {}".format(bind_user, user_dn_tree) 324 | module.fail_json(msg=fail_msg) 325 | 326 | group_search = ldap_search(module, conn, group_dn_tree, group_filter, ['cn']) 327 | 328 | if not group_search: 329 | fail_msg = "Failed to find a group in greo dn tree: {} and filter: {}".format(group_dn_tree, group_filter) 330 | module.fail_json(msg=fail_msg) 331 | 332 | failed = False 333 | msg = "Validated AD Users" 334 | 335 | ldap_unbind(module, conn) 336 | 337 | module.exit_json(changed=False, failed=failed, msg=msg) 338 | 339 | from ansible.module_utils.basic import * 340 | 341 | if __name__ == '__main__': 342 | main() 343 | -------------------------------------------------------------------------------- /vio_unregister_extension.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # (c) 2015, Joseph Callen 4 | # Portions Copyright (c) 2015 VMware, Inc. All rights reserved. 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | 21 | 22 | DOCUMENTATION = ''' 23 | module: vio_unregister_extension 24 | short_description: Unregisters vio or nsx plugins from vcenter 25 | description: 26 | Unregisters vio or nsx plugins from vcenter. 27 | options: 28 | hostname: 29 | description: 30 | - The hostname or IP address of the vSphere vCenter API server 31 | required: True 32 | username: 33 | description: 34 | - The username of the vSphere vCenter with Admin rights 35 | required: True 36 | aliases: ['user', 'admin'] 37 | password: 38 | description: 39 | - The password of the vSphere vCenter user 40 | required: True 41 | aliases: ['pass', 'pwd'] 42 | extention_type: 43 | description: 44 | - type of extention to unregister 45 | choices: nsx, vio 46 | 47 | ''' 48 | 49 | EXAMPLES = ''' 50 | - name: Unregister Extention 51 | vio_unregister_extension: 52 | hostname: "{{ vcenter }}" 53 | username: "{{ vcenter_user }}" 54 | password: "{{ vcenter_password }}" 55 | validate_certs: "{{ vcenter_validate_certs }}" 56 | extention_type: 'nsx' 57 | 58 | ''' 59 | 60 | try: 61 | from pyVmomi import vim, vmodl 62 | HAS_PYVMOMI = True 63 | except ImportError: 64 | HAS_PYVMOMI = False 65 | 66 | 67 | vio_ext =['com.vmware.openstack.ui', 68 | 'org.os.vmw.plugin'] 69 | 70 | nsx_ext = ['com.vmware.vShieldManager'] 71 | 72 | vc = {} 73 | 74 | 75 | def state_exit_unchanged(module): 76 | module.exit_json(changed=False, result=vc['current_ext'], msg="EXIT Unchanged") 77 | 78 | 79 | def state_unregister_ext(module): 80 | 81 | extensions_to_unregister = vc['current_ext'] 82 | content = vc['content'] 83 | 84 | failed_to_unregister =[] 85 | 86 | for ext in extensions_to_unregister: 87 | 88 | try: 89 | content.extensionManager.UnregisterExtension(ext) 90 | except vim.fault.NotFound: 91 | failed_to_unregister.append(ext) 92 | except Exception as e: 93 | module.fail_json(msg="Failed to unregister extension: {} with error: {}".format(ext, str(e))) 94 | 95 | module.exit_json(changed=False, result=failed_to_unregister, msg="UNregister ext") 96 | 97 | 98 | def state_register_ext(module): 99 | module.exit_json(changed=False, msg="NOT SUPPORTED use appliance specific extension registration") 100 | 101 | 102 | def get_instance_ext_id(extention_keys): 103 | 104 | instance_ext_ids = [] 105 | 106 | for ext in extention_keys: 107 | exts = ext.split('.') 108 | if 'vcext' in exts: 109 | inst_id = exts[-1] 110 | inst_ext_id = "com.vmware.openstack.vcext.{}".format(inst_id) 111 | instance_ext_ids.append(inst_ext_id) 112 | 113 | return instance_ext_ids 114 | 115 | def check_extention_state(module): 116 | state = 'absent' 117 | 118 | content = connect_to_api(module) 119 | 120 | vc['content'] = content 121 | 122 | extentions = content.extensionManager.extensionList 123 | 124 | ext_keys = [k.key for k in extentions] 125 | 126 | if module.params['extention_type'] == 'nsx': 127 | 128 | if nsx_ext[0] in ext_keys: 129 | state = 'present' 130 | 131 | if module.params['extention_type'] == 'vio': 132 | 133 | instance_ext_ids = get_instance_ext_id(ext_keys) 134 | vio_extensions = vio_ext + instance_ext_ids 135 | 136 | if all(x in ext_keys for x in vio_extensions): 137 | state ='present' 138 | 139 | vc['current_ext'] = [e for e in vio_extensions if e in ext_keys] 140 | 141 | return state 142 | 143 | 144 | def main(): 145 | argument_spec = vmware_argument_spec() 146 | 147 | argument_spec.update( 148 | dict( 149 | extention_type=dict(choices=['vio', 'nsx'], type='str'), 150 | state=dict(default='present', choices=['present', 'absent'], type='str'), 151 | ) 152 | ) 153 | 154 | module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) 155 | 156 | if not HAS_PYVMOMI: 157 | module.fail_json(msg='pyvmomi is required for this module') 158 | 159 | states = { 160 | 'absent': { 161 | 'absent': state_exit_unchanged, 162 | 'present': state_unregister_ext, 163 | }, 164 | 'present': { 165 | 'present': state_exit_unchanged, 166 | 'absent': state_register_ext, 167 | } 168 | } 169 | 170 | 171 | desired_state = module.params['state'] 172 | current_state = check_extention_state(module) 173 | 174 | states[desired_state][current_state](module) 175 | 176 | from ansible.module_utils.basic import * 177 | from ansible.module_utils.vmware import * 178 | 179 | if __name__ == '__main__': 180 | main() 181 | --------------------------------------------------------------------------------