688 | tags = client.list_tags_for_resource(
689 | ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
690 | c['Tags'] = tags['TagList']
691 |
692 | if self.ec2_instance_filters:
693 | for filter_key, filter_values in self.ec2_instance_filters.items():
694 | # get AWS tag key e.g. tag:env will be 'env'
695 | tag_name = filter_key.split(":", 1)[1]
696 | # Filter values is a list (if you put multiple values for the same tag name)
697 | matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
698 |
699 | if matches_filter:
700 | # it matches a filter, so stop looking for further matches
701 | break
702 |
703 | except Exception as e:
704 | if e.message.find('DBInstanceNotFound') >= 0:
705 | # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
706 | # Ignore errors when trying to find tags for these
707 | pass
708 |
709 | # ignore empty clusters caused by AWS bug
710 | if len(c['DBClusterMembers']) == 0:
711 | continue
712 | elif matches_filter:
713 | c_dict[c['DBClusterIdentifier']] = c
714 |
715 | self.inventory['db_clusters'] = c_dict
716 |
717 | def get_elasticache_clusters_by_region(self, region):
718 | ''' Makes an AWS API call to the list of ElastiCache clusters (with
719 | nodes' info) in a particular region.'''
720 |
721 | # ElastiCache boto module doesn't provide a get_all_intances method,
722 | # that's why we need to call describe directly (it would be called by
723 | # the shorthand method anyway...)
724 | try:
725 | conn = self.connect_to_aws(elasticache, region)
726 | if conn:
727 | # show_cache_node_info = True
728 | # because we also want nodes' information
729 | response = conn.describe_cache_clusters(None, None, None, True)
730 |
731 | except boto.exception.BotoServerError as e:
732 | error = e.reason
733 |
734 | if e.error_code == 'AuthFailure':
735 | error = self.get_auth_error_message()
736 | if not e.reason == "Forbidden":
737 | error = "Looks like AWS ElastiCache is down:\n%s" % e.message
738 | self.fail_with_error(error, 'getting ElastiCache clusters')
739 |
740 | try:
741 | # Boto also doesn't provide wrapper classes to CacheClusters or
742 | # CacheNodes. Because of that we can't make use of the get_list
743 | # method in the AWSQueryConnection. Let's do the work manually
744 | clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
745 |
746 | except KeyError as e:
747 | error = "ElastiCache query to AWS failed (unexpected format)."
748 | self.fail_with_error(error, 'getting ElastiCache clusters')
749 |
750 | for cluster in clusters:
751 | self.add_elasticache_cluster(cluster, region)
752 |
753 | def get_elasticache_replication_groups_by_region(self, region):
754 | ''' Makes an AWS API call to the list of ElastiCache replication groups
755 | in a particular region.'''
756 |
757 | # ElastiCache boto module doesn't provide a get_all_intances method,
758 | # that's why we need to call describe directly (it would be called by
759 | # the shorthand method anyway...)
760 | try:
761 | conn = self.connect_to_aws(elasticache, region)
762 | if conn:
763 | response = conn.describe_replication_groups()
764 |
765 | except boto.exception.BotoServerError as e:
766 | error = e.reason
767 |
768 | if e.error_code == 'AuthFailure':
769 | error = self.get_auth_error_message()
770 | if not e.reason == "Forbidden":
771 | error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
772 | self.fail_with_error(error, 'getting ElastiCache clusters')
773 |
774 | try:
775 | # Boto also doesn't provide wrapper classes to ReplicationGroups
776 | # Because of that we can't make use of the get_list method in the
777 | # AWSQueryConnection. Let's do the work manually
778 | replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
779 |
780 | except KeyError as e:
781 | error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
782 | self.fail_with_error(error, 'getting ElastiCache clusters')
783 |
784 | for replication_group in replication_groups:
785 | self.add_elasticache_replication_group(replication_group, region)
786 |
787 | def get_auth_error_message(self):
788 | ''' create an informative error message if there is an issue authenticating'''
789 | errors = ["Authentication error retrieving ec2 inventory."]
790 | if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
791 | errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
792 | else:
793 | errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
794 |
795 | boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
796 | boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
797 | if len(boto_config_found) > 0:
798 | errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
799 | else:
800 | errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
801 |
802 | return '\n'.join(errors)
803 |
804 | def fail_with_error(self, err_msg, err_operation=None):
805 | '''log an error to std err for ansible-playbook to consume and exit'''
806 | if err_operation:
807 | err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
808 | err_msg=err_msg, err_operation=err_operation)
809 | sys.stderr.write(err_msg)
810 | sys.exit(1)
811 |
812 | def get_instance(self, region, instance_id):
813 | conn = self.connect(region)
814 |
815 | reservations = conn.get_all_instances([instance_id])
816 | for reservation in reservations:
817 | for instance in reservation.instances:
818 | return instance
819 |
820 | def add_instance(self, instance, region):
821 | ''' Adds an instance to the inventory and index, as long as it is
822 | addressable '''
823 |
824 | # Only return instances with desired instance states
825 | if instance.state not in self.ec2_instance_states:
826 | return
827 |
828 | # Select the best destination address
829 | if self.destination_format and self.destination_format_tags:
830 | dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags])
831 | elif instance.subnet_id:
832 | dest = getattr(instance, self.vpc_destination_variable, None)
833 | if dest is None:
834 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
835 | else:
836 | dest = getattr(instance, self.destination_variable, None)
837 | if dest is None:
838 | dest = getattr(instance, 'tags').get(self.destination_variable, None)
839 |
840 | if not dest:
841 | # Skip instances we cannot address (e.g. private VPC subnet)
842 | return
843 |
844 | # Set the inventory name
845 | hostname = None
846 | if self.hostname_variable:
847 | if self.hostname_variable.startswith('tag_'):
848 | hostname = instance.tags.get(self.hostname_variable[4:], None)
849 | else:
850 | hostname = getattr(instance, self.hostname_variable)
851 |
852 | # set the hostname from route53
853 | if self.route53_enabled and self.route53_hostnames:
854 | route53_names = self.get_instance_route53_names(instance)
855 | for name in route53_names:
856 | if name.endswith(self.route53_hostnames):
857 | hostname = name
858 |
859 | # If we can't get a nice hostname, use the destination address
860 | if not hostname:
861 | hostname = dest
862 | # to_safe strips hostname characters like dots, so don't strip route53 hostnames
863 | elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
864 | hostname = hostname.lower()
865 | else:
866 | hostname = self.to_safe(hostname).lower()
867 |
868 | # if we only want to include hosts that match a pattern, skip those that don't
869 | if self.pattern_include and not self.pattern_include.match(hostname):
870 | return
871 |
872 | # if we need to exclude hosts that match a pattern, skip those
873 | if self.pattern_exclude and self.pattern_exclude.match(hostname):
874 | return
875 |
876 | # Add to index
877 | self.index[hostname] = [region, instance.id]
878 |
879 | # Inventory: Group by instance ID (always a group of 1)
880 | if self.group_by_instance_id:
881 | self.inventory[instance.id] = [hostname]
882 | if self.nested_groups:
883 | self.push_group(self.inventory, 'instances', instance.id)
884 |
885 | # Inventory: Group by region
886 | if self.group_by_region:
887 | self.push(self.inventory, region, hostname)
888 | if self.nested_groups:
889 | self.push_group(self.inventory, 'regions', region)
890 |
891 | # Inventory: Group by availability zone
892 | if self.group_by_availability_zone:
893 | self.push(self.inventory, instance.placement, hostname)
894 | if self.nested_groups:
895 | if self.group_by_region:
896 | self.push_group(self.inventory, region, instance.placement)
897 | self.push_group(self.inventory, 'zones', instance.placement)
898 |
899 | # Inventory: Group by Amazon Machine Image (AMI) ID
900 | if self.group_by_ami_id:
901 | ami_id = self.to_safe(instance.image_id)
902 | self.push(self.inventory, ami_id, hostname)
903 | if self.nested_groups:
904 | self.push_group(self.inventory, 'images', ami_id)
905 |
906 | # Inventory: Group by instance type
907 | if self.group_by_instance_type:
908 | type_name = self.to_safe('type_' + instance.instance_type)
909 | self.push(self.inventory, type_name, hostname)
910 | if self.nested_groups:
911 | self.push_group(self.inventory, 'types', type_name)
912 |
913 | # Inventory: Group by instance state
914 | if self.group_by_instance_state:
915 | state_name = self.to_safe('instance_state_' + instance.state)
916 | self.push(self.inventory, state_name, hostname)
917 | if self.nested_groups:
918 | self.push_group(self.inventory, 'instance_states', state_name)
919 |
920 | # Inventory: Group by key pair
921 | if self.group_by_key_pair and instance.key_name:
922 | key_name = self.to_safe('key_' + instance.key_name)
923 | self.push(self.inventory, key_name, hostname)
924 | if self.nested_groups:
925 | self.push_group(self.inventory, 'keys', key_name)
926 |
927 | # Inventory: Group by VPC
928 | if self.group_by_vpc_id and instance.vpc_id:
929 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
930 | self.push(self.inventory, vpc_id_name, hostname)
931 | if self.nested_groups:
932 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
933 |
934 | # Inventory: Group by security group
935 | if self.group_by_security_group:
936 | try:
937 | for group in instance.groups:
938 | key = self.to_safe("security_group_" + group.name)
939 | self.push(self.inventory, key, hostname)
940 | if self.nested_groups:
941 | self.push_group(self.inventory, 'security_groups', key)
942 | except AttributeError:
943 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
944 | 'Please upgrade boto >= 2.3.0.']))
945 |
946 | # Inventory: Group by AWS account ID
947 | if self.group_by_aws_account:
948 | self.push(self.inventory, self.aws_account_id, dest)
949 | if self.nested_groups:
950 | self.push_group(self.inventory, 'accounts', self.aws_account_id)
951 |
952 | # Inventory: Group by tag keys
953 | if self.group_by_tag_keys:
954 | for k, v in instance.tags.items():
955 | if self.expand_csv_tags and v and ',' in v:
956 | values = map(lambda x: x.strip(), v.split(','))
957 | else:
958 | values = [v]
959 |
960 | for v in values:
961 | if v:
962 | key = self.to_safe("tag_" + k + "=" + v)
963 | else:
964 | key = self.to_safe("tag_" + k)
965 | self.push(self.inventory, key, hostname)
966 | if self.nested_groups:
967 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
968 | if v:
969 | self.push_group(self.inventory, self.to_safe("tag_" + k), key)
970 |
971 | # Inventory: Group by Route53 domain names if enabled
972 | if self.route53_enabled and self.group_by_route53_names:
973 | route53_names = self.get_instance_route53_names(instance)
974 | for name in route53_names:
975 | self.push(self.inventory, name, hostname)
976 | if self.nested_groups:
977 | self.push_group(self.inventory, 'route53', name)
978 |
979 | # Global Tag: instances without tags
980 | if self.group_by_tag_none and len(instance.tags) == 0:
981 | self.push(self.inventory, 'tag_none', hostname)
982 | if self.nested_groups:
983 | self.push_group(self.inventory, 'tags', 'tag_none')
984 |
985 | # Global Tag: tag all EC2 instances
986 | self.push(self.inventory, 'ec2', hostname)
987 |
988 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
989 | self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
990 |
991 | def add_rds_instance(self, instance, region):
992 | ''' Adds an RDS instance to the inventory and index, as long as it is
993 | addressable '''
994 |
995 | # Only want available instances unless all_rds_instances is True
996 | if not self.all_rds_instances and instance.status != 'available':
997 | return
998 |
999 | # Select the best destination address
1000 | dest = instance.endpoint[0]
1001 |
1002 | if not dest:
1003 | # Skip instances we cannot address (e.g. private VPC subnet)
1004 | return
1005 |
1006 | # Set the inventory name
1007 | hostname = None
1008 | if self.hostname_variable:
1009 | if self.hostname_variable.startswith('tag_'):
1010 | hostname = instance.tags.get(self.hostname_variable[4:], None)
1011 | else:
1012 | hostname = getattr(instance, self.hostname_variable)
1013 |
1014 | # If we can't get a nice hostname, use the destination address
1015 | if not hostname:
1016 | hostname = dest
1017 |
1018 | hostname = self.to_safe(hostname).lower()
1019 |
1020 | # Add to index
1021 | self.index[hostname] = [region, instance.id]
1022 |
1023 | # Inventory: Group by instance ID (always a group of 1)
1024 | if self.group_by_instance_id:
1025 | self.inventory[instance.id] = [hostname]
1026 | if self.nested_groups:
1027 | self.push_group(self.inventory, 'instances', instance.id)
1028 |
1029 | # Inventory: Group by region
1030 | if self.group_by_region:
1031 | self.push(self.inventory, region, hostname)
1032 | if self.nested_groups:
1033 | self.push_group(self.inventory, 'regions', region)
1034 |
1035 | # Inventory: Group by availability zone
1036 | if self.group_by_availability_zone:
1037 | self.push(self.inventory, instance.availability_zone, hostname)
1038 | if self.nested_groups:
1039 | if self.group_by_region:
1040 | self.push_group(self.inventory, region, instance.availability_zone)
1041 | self.push_group(self.inventory, 'zones', instance.availability_zone)
1042 |
1043 | # Inventory: Group by instance type
1044 | if self.group_by_instance_type:
1045 | type_name = self.to_safe('type_' + instance.instance_class)
1046 | self.push(self.inventory, type_name, hostname)
1047 | if self.nested_groups:
1048 | self.push_group(self.inventory, 'types', type_name)
1049 |
1050 | # Inventory: Group by VPC
1051 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
1052 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
1053 | self.push(self.inventory, vpc_id_name, hostname)
1054 | if self.nested_groups:
1055 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
1056 |
1057 | # Inventory: Group by security group
1058 | if self.group_by_security_group:
1059 | try:
1060 | if instance.security_group:
1061 | key = self.to_safe("security_group_" + instance.security_group.name)
1062 | self.push(self.inventory, key, hostname)
1063 | if self.nested_groups:
1064 | self.push_group(self.inventory, 'security_groups', key)
1065 |
1066 | except AttributeError:
1067 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
1068 | 'Please upgrade boto >= 2.3.0.']))
1069 |
1070 | # Inventory: Group by engine
1071 | if self.group_by_rds_engine:
1072 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
1073 | if self.nested_groups:
1074 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
1075 |
1076 | # Inventory: Group by parameter group
1077 | if self.group_by_rds_parameter_group:
1078 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
1079 | if self.nested_groups:
1080 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
1081 |
1082 | # Global Tag: all RDS instances
1083 | self.push(self.inventory, 'rds', hostname)
1084 |
1085 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
1086 | self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
1087 |
1088 | def add_elasticache_cluster(self, cluster, region):
1089 | ''' Adds an ElastiCache cluster to the inventory and index, as long as
1090 | it's nodes are addressable '''
1091 |
1092 | # Only want available clusters unless all_elasticache_clusters is True
1093 | if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
1094 | return
1095 |
1096 | # Select the best destination address
1097 | if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
1098 | # Memcached cluster
1099 | dest = cluster['ConfigurationEndpoint']['Address']
1100 | is_redis = False
1101 | else:
1102 | # Redis sigle node cluster
1103 | # Because all Redis clusters are single nodes, we'll merge the
1104 | # info from the cluster with info about the node
1105 | dest = cluster['CacheNodes'][0]['Endpoint']['Address']
1106 | is_redis = True
1107 |
1108 | if not dest:
1109 | # Skip clusters we cannot address (e.g. private VPC subnet)
1110 | return
1111 |
1112 | # Add to index
1113 | self.index[dest] = [region, cluster['CacheClusterId']]
1114 |
1115 | # Inventory: Group by instance ID (always a group of 1)
1116 | if self.group_by_instance_id:
1117 | self.inventory[cluster['CacheClusterId']] = [dest]
1118 | if self.nested_groups:
1119 | self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
1120 |
1121 | # Inventory: Group by region
1122 | if self.group_by_region and not is_redis:
1123 | self.push(self.inventory, region, dest)
1124 | if self.nested_groups:
1125 | self.push_group(self.inventory, 'regions', region)
1126 |
1127 | # Inventory: Group by availability zone
1128 | if self.group_by_availability_zone and not is_redis:
1129 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
1130 | if self.nested_groups:
1131 | if self.group_by_region:
1132 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
1133 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
1134 |
1135 | # Inventory: Group by node type
1136 | if self.group_by_instance_type and not is_redis:
1137 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
1138 | self.push(self.inventory, type_name, dest)
1139 | if self.nested_groups:
1140 | self.push_group(self.inventory, 'types', type_name)
1141 |
1142 | # Inventory: Group by VPC (information not available in the current
1143 | # AWS API version for ElastiCache)
1144 |
1145 | # Inventory: Group by security group
1146 | if self.group_by_security_group and not is_redis:
1147 |
1148 | # Check for the existence of the 'SecurityGroups' key and also if
1149 | # this key has some value. When the cluster is not placed in a SG
1150 | # the query can return None here and cause an error.
1151 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
1152 | for security_group in cluster['SecurityGroups']:
1153 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
1154 | self.push(self.inventory, key, dest)
1155 | if self.nested_groups:
1156 | self.push_group(self.inventory, 'security_groups', key)
1157 |
1158 | # Inventory: Group by engine
1159 | if self.group_by_elasticache_engine and not is_redis:
1160 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
1161 | if self.nested_groups:
1162 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
1163 |
1164 | # Inventory: Group by parameter group
1165 | if self.group_by_elasticache_parameter_group:
1166 | self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
1167 | if self.nested_groups:
1168 | self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
1169 |
1170 | # Inventory: Group by replication group
1171 | if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
1172 | self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
1173 | if self.nested_groups:
1174 | self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
1175 |
1176 | # Global Tag: all ElastiCache clusters
1177 | self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
1178 |
1179 | host_info = self.get_host_info_dict_from_describe_dict(cluster)
1180 |
1181 | self.inventory["_meta"]["hostvars"][dest] = host_info
1182 |
1183 | # Add the nodes
1184 | for node in cluster['CacheNodes']:
1185 | self.add_elasticache_node(node, cluster, region)
1186 |
1187 | def add_elasticache_node(self, node, cluster, region):
1188 | ''' Adds an ElastiCache node to the inventory and index, as long as
1189 | it is addressable '''
1190 |
1191 | # Only want available nodes unless all_elasticache_nodes is True
1192 | if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
1193 | return
1194 |
1195 | # Select the best destination address
1196 | dest = node['Endpoint']['Address']
1197 |
1198 | if not dest:
1199 | # Skip nodes we cannot address (e.g. private VPC subnet)
1200 | return
1201 |
1202 | node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
1203 |
1204 | # Add to index
1205 | self.index[dest] = [region, node_id]
1206 |
1207 | # Inventory: Group by node ID (always a group of 1)
1208 | if self.group_by_instance_id:
1209 | self.inventory[node_id] = [dest]
1210 | if self.nested_groups:
1211 | self.push_group(self.inventory, 'instances', node_id)
1212 |
1213 | # Inventory: Group by region
1214 | if self.group_by_region:
1215 | self.push(self.inventory, region, dest)
1216 | if self.nested_groups:
1217 | self.push_group(self.inventory, 'regions', region)
1218 |
1219 | # Inventory: Group by availability zone
1220 | if self.group_by_availability_zone:
1221 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
1222 | if self.nested_groups:
1223 | if self.group_by_region:
1224 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
1225 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
1226 |
1227 | # Inventory: Group by node type
1228 | if self.group_by_instance_type:
1229 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
1230 | self.push(self.inventory, type_name, dest)
1231 | if self.nested_groups:
1232 | self.push_group(self.inventory, 'types', type_name)
1233 |
1234 | # Inventory: Group by VPC (information not available in the current
1235 | # AWS API version for ElastiCache)
1236 |
1237 | # Inventory: Group by security group
1238 | if self.group_by_security_group:
1239 |
1240 | # Check for the existence of the 'SecurityGroups' key and also if
1241 | # this key has some value. When the cluster is not placed in a SG
1242 | # the query can return None here and cause an error.
1243 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
1244 | for security_group in cluster['SecurityGroups']:
1245 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
1246 | self.push(self.inventory, key, dest)
1247 | if self.nested_groups:
1248 | self.push_group(self.inventory, 'security_groups', key)
1249 |
1250 | # Inventory: Group by engine
1251 | if self.group_by_elasticache_engine:
1252 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
1253 | if self.nested_groups:
1254 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
1255 |
1256 | # Inventory: Group by parameter group (done at cluster level)
1257 |
1258 | # Inventory: Group by replication group (done at cluster level)
1259 |
1260 | # Inventory: Group by ElastiCache Cluster
1261 | if self.group_by_elasticache_cluster:
1262 | self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
1263 |
1264 | # Global Tag: all ElastiCache nodes
1265 | self.push(self.inventory, 'elasticache_nodes', dest)
1266 |
1267 | host_info = self.get_host_info_dict_from_describe_dict(node)
1268 |
1269 | if dest in self.inventory["_meta"]["hostvars"]:
1270 | self.inventory["_meta"]["hostvars"][dest].update(host_info)
1271 | else:
1272 | self.inventory["_meta"]["hostvars"][dest] = host_info
1273 |
1274 | def add_elasticache_replication_group(self, replication_group, region):
1275 | ''' Adds an ElastiCache replication group to the inventory and index '''
1276 |
1277 | # Only want available clusters unless all_elasticache_replication_groups is True
1278 | if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
1279 | return
1280 |
1281 | # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
1282 | if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
1283 | replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
1284 | return
1285 |
1286 | # Select the best destination address (PrimaryEndpoint)
1287 | dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
1288 |
1289 | # Add to index
1290 | self.index[dest] = [region, replication_group['ReplicationGroupId']]
1291 |
1292 | # Inventory: Group by ID (always a group of 1)
1293 | if self.group_by_instance_id:
1294 | self.inventory[replication_group['ReplicationGroupId']] = [dest]
1295 | if self.nested_groups:
1296 | self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
1297 |
1298 | # Inventory: Group by region
1299 | if self.group_by_region:
1300 | self.push(self.inventory, region, dest)
1301 | if self.nested_groups:
1302 | self.push_group(self.inventory, 'regions', region)
1303 |
1304 | # Inventory: Group by availability zone (doesn't apply to replication groups)
1305 |
1306 | # Inventory: Group by node type (doesn't apply to replication groups)
1307 |
1308 | # Inventory: Group by VPC (information not available in the current
1309 | # AWS API version for replication groups
1310 |
1311 | # Inventory: Group by security group (doesn't apply to replication groups)
1312 | # Check this value in cluster level
1313 |
1314 | # Inventory: Group by engine (replication groups are always Redis)
1315 | if self.group_by_elasticache_engine:
1316 | self.push(self.inventory, 'elasticache_redis', dest)
1317 | if self.nested_groups:
1318 | self.push_group(self.inventory, 'elasticache_engines', 'redis')
1319 |
1320 | # Global Tag: all ElastiCache clusters
1321 | self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
1322 |
1323 | host_info = self.get_host_info_dict_from_describe_dict(replication_group)
1324 |
1325 | self.inventory["_meta"]["hostvars"][dest] = host_info
1326 |
1327 | def get_route53_records(self):
1328 | ''' Get and store the map of resource records to domain names that
1329 | point to them. '''
1330 |
1331 | if self.boto_profile:
1332 | r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
1333 | else:
1334 | r53_conn = route53.Route53Connection()
1335 | all_zones = r53_conn.get_zones()
1336 |
1337 | route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
1338 |
1339 | self.route53_records = {}
1340 |
1341 | for zone in route53_zones:
1342 | rrsets = r53_conn.get_all_rrsets(zone.id)
1343 |
1344 | for record_set in rrsets:
1345 | record_name = record_set.name
1346 |
1347 | if record_name.endswith('.'):
1348 | record_name = record_name[:-1]
1349 |
1350 | for resource in record_set.resource_records:
1351 | self.route53_records.setdefault(resource, set())
1352 | self.route53_records[resource].add(record_name)
1353 |
1354 | def get_instance_route53_names(self, instance):
1355 | ''' Check if an instance is referenced in the records we have from
1356 | Route53. If it is, return the list of domain names pointing to said
1357 | instance. If nothing points to it, return an empty list. '''
1358 |
1359 | instance_attributes = ['public_dns_name', 'private_dns_name',
1360 | 'ip_address', 'private_ip_address']
1361 |
1362 | name_list = set()
1363 |
1364 | for attrib in instance_attributes:
1365 | try:
1366 | value = getattr(instance, attrib)
1367 | except AttributeError:
1368 | continue
1369 |
1370 | if value in self.route53_records:
1371 | name_list.update(self.route53_records[value])
1372 |
1373 | return list(name_list)
1374 |
1375 | def get_host_info_dict_from_instance(self, instance):
1376 | instance_vars = {}
1377 | for key in vars(instance):
1378 | value = getattr(instance, key)
1379 | key = self.to_safe('ec2_' + key)
1380 |
1381 | # Handle complex types
1382 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
1383 | if key == 'ec2__state':
1384 | instance_vars['ec2_state'] = instance.state or ''
1385 | instance_vars['ec2_state_code'] = instance.state_code
1386 | elif key == 'ec2__previous_state':
1387 | instance_vars['ec2_previous_state'] = instance.previous_state or ''
1388 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code
1389 | elif isinstance(value, (int, bool)):
1390 | instance_vars[key] = value
1391 | elif isinstance(value, six.string_types):
1392 | instance_vars[key] = value.strip()
1393 | elif value is None:
1394 | instance_vars[key] = ''
1395 | elif key == 'ec2_region':
1396 | instance_vars[key] = value.name
1397 | elif key == 'ec2__placement':
1398 | instance_vars['ec2_placement'] = value.zone
1399 | elif key == 'ec2_tags':
1400 | for k, v in value.items():
1401 | if self.expand_csv_tags and ',' in v:
1402 | v = list(map(lambda x: x.strip(), v.split(',')))
1403 | key = self.to_safe('ec2_tag_' + k)
1404 | instance_vars[key] = v
1405 | elif key == 'ec2_groups':
1406 | group_ids = []
1407 | group_names = []
1408 | for group in value:
1409 | group_ids.append(group.id)
1410 | group_names.append(group.name)
1411 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
1412 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
1413 | elif key == 'ec2_block_device_mapping':
1414 | instance_vars["ec2_block_devices"] = {}
1415 | for k, v in value.items():
1416 | instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
1417 | else:
1418 | pass
1419 | # TODO Product codes if someone finds them useful
1420 | # print key
1421 | # print type(value)
1422 | # print value
1423 |
1424 | instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
1425 |
1426 | return instance_vars
1427 |
1428 | def get_host_info_dict_from_describe_dict(self, describe_dict):
1429 | ''' Parses the dictionary returned by the API call into a flat list
1430 | of parameters. This method should be used only when 'describe' is
1431 | used directly because Boto doesn't provide specific classes. '''
1432 |
1433 | # I really don't agree with prefixing everything with 'ec2'
1434 | # because EC2, RDS and ElastiCache are different services.
1435 | # I'm just following the pattern used until now to not break any
1436 | # compatibility.
1437 |
1438 | host_info = {}
1439 | for key in describe_dict:
1440 | value = describe_dict[key]
1441 | key = self.to_safe('ec2_' + self.uncammelize(key))
1442 |
1443 | # Handle complex types
1444 |
1445 | # Target: Memcached Cache Clusters
1446 | if key == 'ec2_configuration_endpoint' and value:
1447 | host_info['ec2_configuration_endpoint_address'] = value['Address']
1448 | host_info['ec2_configuration_endpoint_port'] = value['Port']
1449 |
1450 | # Target: Cache Nodes and Redis Cache Clusters (single node)
1451 | if key == 'ec2_endpoint' and value:
1452 | host_info['ec2_endpoint_address'] = value['Address']
1453 | host_info['ec2_endpoint_port'] = value['Port']
1454 |
1455 | # Target: Redis Replication Groups
1456 | if key == 'ec2_node_groups' and value:
1457 | host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
1458 | host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
1459 | replica_count = 0
1460 | for node in value[0]['NodeGroupMembers']:
1461 | if node['CurrentRole'] == 'primary':
1462 | host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
1463 | host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
1464 | host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
1465 | elif node['CurrentRole'] == 'replica':
1466 | host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
1467 | host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
1468 | host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
1469 | replica_count += 1
1470 |
1471 | # Target: Redis Replication Groups
1472 | if key == 'ec2_member_clusters' and value:
1473 | host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
1474 |
1475 | # Target: All Cache Clusters
1476 | elif key == 'ec2_cache_parameter_group':
1477 | host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
1478 | host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
1479 | host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
1480 |
1481 | # Target: Almost everything
1482 | elif key == 'ec2_security_groups':
1483 |
1484 | # Skip if SecurityGroups is None
1485 | # (it is possible to have the key defined but no value in it).
1486 | if value is not None:
1487 | sg_ids = []
1488 | for sg in value:
1489 | sg_ids.append(sg['SecurityGroupId'])
1490 | host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
1491 |
1492 | # Target: Everything
1493 | # Preserve booleans and integers
1494 | elif isinstance(value, (int, bool)):
1495 | host_info[key] = value
1496 |
1497 | # Target: Everything
1498 | # Sanitize string values
1499 | elif isinstance(value, six.string_types):
1500 | host_info[key] = value.strip()
1501 |
1502 | # Target: Everything
1503 | # Replace None by an empty string
1504 | elif value is None:
1505 | host_info[key] = ''
1506 |
1507 | else:
1508 | # Remove non-processed complex types
1509 | pass
1510 |
1511 | return host_info
1512 |
1513 | def get_host_info(self):
1514 | ''' Get variables about a specific host '''
1515 |
1516 | if len(self.index) == 0:
1517 | # Need to load index from cache
1518 | self.load_index_from_cache()
1519 |
1520 | if self.args.host not in self.index:
1521 | # try updating the cache
1522 | self.do_api_calls_update_cache()
1523 | if self.args.host not in self.index:
1524 | # host might not exist anymore
1525 | return self.json_format_dict({}, True)
1526 |
1527 | (region, instance_id) = self.index[self.args.host]
1528 |
1529 | instance = self.get_instance(region, instance_id)
1530 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
1531 |
1532 | def push(self, my_dict, key, element):
1533 | ''' Push an element onto an array that may not have been defined in
1534 | the dict '''
1535 | group_info = my_dict.setdefault(key, [])
1536 | if isinstance(group_info, dict):
1537 | host_list = group_info.setdefault('hosts', [])
1538 | host_list.append(element)
1539 | else:
1540 | group_info.append(element)
1541 |
1542 | def push_group(self, my_dict, key, element):
1543 | ''' Push a group as a child of another group. '''
1544 | parent_group = my_dict.setdefault(key, {})
1545 | if not isinstance(parent_group, dict):
1546 | parent_group = my_dict[key] = {'hosts': parent_group}
1547 | child_groups = parent_group.setdefault('children', [])
1548 | if element not in child_groups:
1549 | child_groups.append(element)
1550 |
1551 | def get_inventory_from_cache(self):
1552 | ''' Reads the inventory from the cache file and returns it as a JSON
1553 | object '''
1554 |
1555 | with open(self.cache_path_cache, 'r') as f:
1556 | json_inventory = f.read()
1557 | return json_inventory
1558 |
1559 | def load_index_from_cache(self):
1560 | ''' Reads the index from the cache file sets self.index '''
1561 |
1562 | with open(self.cache_path_index, 'rb') as f:
1563 | self.index = json.load(f)
1564 |
1565 | def write_to_cache(self, data, filename):
1566 | ''' Writes data in JSON format to a file '''
1567 |
1568 | json_data = self.json_format_dict(data, True)
1569 | with open(filename, 'w') as f:
1570 | f.write(json_data)
1571 |
1572 | def uncammelize(self, key):
1573 | temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
1574 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
1575 |
1576 | def to_safe(self, word):
1577 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
1578 | regex = "[^A-Za-z0-9\_"
1579 | if not self.replace_dash_in_groups:
1580 | regex += "\-"
1581 | return re.sub(regex + "]", "_", word)
1582 |
1583 | def json_format_dict(self, data, pretty=False):
1584 | ''' Converts a dict to a JSON object and dumps it as a formatted
1585 | string '''
1586 |
1587 | if pretty:
1588 | return json.dumps(data, sort_keys=True, indent=2)
1589 | else:
1590 | return json.dumps(data)
1591 |
1592 |
1593 | if __name__ == '__main__':
1594 | # Run the script
1595 | Ec2Inventory()
1596 |
--------------------------------------------------------------------------------
/environments/aws/hosts:
--------------------------------------------------------------------------------
1 | control_machine ansible_ssh_host=localhost ansible_connection=local
2 |
--------------------------------------------------------------------------------
/files/flask-redis/flask-redis.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from flask_redis import FlaskRedis
3 | import traceback
4 |
5 | app = Flask(__name__)
6 | redis_store = FlaskRedis(app)
7 |
8 | @app.route('/')
9 | def hello_world():
10 | return 'Welcome to Deploying Applications With Ansible! You are visitor ' \
11 | 'number: {}'.format(redis_store.incr(1))
12 |
13 |
14 | # A little Flask magic to show backtraces in HTTP responses.
15 | @app.errorhandler(Exception)
16 | def show_errors(exception):
17 | return("" + traceback.format_exc() + "
"), 500
18 |
--------------------------------------------------------------------------------
/files/hello_world/hello_world.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | app = Flask(__name__)
3 |
4 | @app.route('/')
5 | def hello_world():
6 | return 'Welcome to Deploying Applications With Ansible!'
7 |
--------------------------------------------------------------------------------
/playbooks/1.10_webapp.yml:
--------------------------------------------------------------------------------
1 | # This is an example of what your 1.6_webapp.yml playbook should look
2 | # like after completing the tasks described in 1.10:
3 |
4 | # - Install nginx, Redis, and supervisord via roles
5 | # - Write a custom role to manage Flask with supervisord
6 | # - Persist number of visits in Redis
7 | # - Increment and display visits to user
8 |
9 | # This is a role-based version of the playbook from 1.6_webapp.yml; if you
10 | # run into issues completing section 1.8, you can use it as inspiration.
11 |
12 | # This will deploy a web app locally rather than to another server.
13 | - name: Deploy a web application
14 | hosts: control_machine
15 |
16 | # Values we're using in more than one place are best
17 | # stored as variables. Playbooks are one of the many
18 | # locations we can define variables.
19 | vars:
20 | deploy_directory: ~/apps
21 | nginx_listen_port: 80
22 | nginx_upstream_port: 5000
23 |
24 | # Roles are executed before tasks.
25 | roles:
26 | # This is a different Flask role that uses supervisor.
27 | - role: flask_supervisor_example
28 | tags: [flask]
29 | # Variables can be passed into a role
30 | flask_app: "flask-redis"
31 | flask_src: ../files/flask-redis/flask-redis.py
32 | flask_directory: "{{ deploy_directory|expanduser }}"
33 | flask_port: "{{nginx_upstream_port }}"
34 |
35 | - { role: nginx_example, tags: [nginx] }
36 |
37 | - { role: redis_example, tags: [redis] }
38 |
39 | tasks:
40 | # This is a great example of a task that doesn't belong in either
41 | # the Flask role or the Redis role, because it only matters to the
42 | # intersection of them.
43 | - name: Install flask-redis
44 | become: yes
45 | pip:
46 | name: flask-redis
47 | notify: restart flask
48 |
49 | # It's common to use a role to install 'basic' configuration (and
50 | # provide handlers), but then override that configuration in the
51 | # `tasks:` section of the role.
52 | - name: Template nginx configuration file
53 | become: yes
54 | template:
55 | src: ../templates/proxy.j2
56 | dest: /etc/nginx/sites-available/proxy
57 | # The nginx handler is available because we included the role.
58 | notify: reload nginx
59 |
60 | # Create a symlink in `/etc/nginx/sites-enabled` pointing to the
61 | # file we previously templated into `/etc/nginx/sites-available`.
62 | - name: Enable this site
63 | become: yes
64 | file:
65 | state: link
66 | src: /etc/nginx/sites-available/proxy
67 | dest: /etc/nginx/sites-enabled/proxy
68 | notify: reload nginx
69 |
70 | # Handlers won't be executed until the end of the `tasks:`
71 | # section, so we need to move these into `post_tasks` instead.
72 | post_tasks:
73 | # Ansible commands process much faster than manual execution, so
74 | # it's possible that nginx won't have bound to port 80 before we
75 | # attempt to query the server.
76 | - name: Wait for nginx to listen on port {{nginx_listen_port}}
77 | wait_for:
78 | port: "{{nginx_listen_port}}"
79 | # wait_for starts immediately and polls every second, but if
80 | # nginx hasn't started in 10 seconds, something is wrong.
81 | timeout: 10
82 |
83 | # The `uri` module is a basic HTTP client that is primarily for
84 | # checking status codes, but can also save response bodies.
85 | - name: Ensure that the proxy is functioning
86 | uri:
87 | url: http://localhost:{{nginx_listen_port}}
88 | return_content: yes
89 | # `register` saves the output to an Ansible variable.
90 | register: proxy_response
91 | # Mark the task as failed, but run the next task.
92 | failed_when: "'Deploying Applications With Ansible' not in proxy_response.content"
93 | ignore_errors: true
94 |
95 | # The `fail` module will stop a playbook with an error. The `when`
96 | # section will prevent the `fail` module from executing if the
97 | # condition evaluates to true.
98 | - name: Make sure we're in the right class!
99 | debug:
100 | msg: "Application {% if proxy_response.failed %}failed to deploy.{% else %}successfully deployed!{% endif %}"
101 | failed_when: proxy_response.failed
102 |
--------------------------------------------------------------------------------
/playbooks/1.4_cron.yml:
--------------------------------------------------------------------------------
1 | # control_machine is defined as an alias for localhost in
2 | # `environments/aws/hosts`.
3 | - hosts: control_machine
4 |
5 | # vars_prompt will collect a variable and store it.
6 | vars_prompt:
7 | name: message
8 | prompt: "Enter a message to print via `wall` (leave blank to delete crontab)"
9 | private: no
10 |
11 | # Tasks are executed in order.
12 | tasks:
13 | # `service` requires running as root, so this task has the
14 | # `become: yes` parameter.
15 | - name: Ensure that cron is started
16 | become: yes
17 | service:
18 | name: cron
19 | state: started
20 |
21 | # This file is stored in the `templates` folder, adjacent to
22 | # `playbooks`.
23 | - name: Template a cron script
24 | template:
25 | src: ../templates/cron.sh.j2
26 | dest: ~/cron.sh
27 | mode: 0755
28 |
29 | # This task will only run if there's a non-blank message.
30 | - name: Create a cron job
31 | cron:
32 | name: Message logged in users
33 | job: ~/cron.sh
34 | when: message != ""
35 |
36 | # This task will delete any cron job created by the previous task,
37 | # if no message is provided.
38 | - name: Delete a cron job
39 | cron:
40 | name: Message logged in users
41 | state: absent
42 | when: message == ""
43 |
--------------------------------------------------------------------------------
/playbooks/1.5_vars.yml:
--------------------------------------------------------------------------------
1 | - hosts: control_machine
2 | become: yes
3 |
4 | vars:
5 | # Default to a dev environment
6 | env: dev
7 | # Default to yum as a package manager
8 | package_manager: yum
9 |
10 | vars_prompt:
11 | name: distro
12 | prompt: Choose which distro to install
13 | default: ubuntu
14 |
15 | vars_files:
16 | - "../vars/{{distro}}-{{env}}.yml"
17 |
18 | tasks:
19 | # This will run if you enter ubuntu at the prompt.
20 | - apt:
21 | name: git
22 | when: package_manager == 'apt'
23 |
24 | - yum:
25 | name: git
26 | when: package_manager == 'yum'
27 |
28 | # This variable won't be defined in the next play.
29 | - hosts: control_machine
30 | tasks:
31 | - debug: var=distro
32 |
--------------------------------------------------------------------------------
/playbooks/1.6_webapp.yml:
--------------------------------------------------------------------------------
1 | # This is a 'list of tasks' playbook that deploys a 'hello world' web
2 | # application written in Flask. Several tasks have been filled in
3 | # for you, but others are incomplete. Fill in the remainder of the
4 | # tasks and make sure that you can run the playbook (from ~/ansible):
5 | #
6 | # ansible-playbook playbooks/1.6_webapp.yml
7 | #
8 | # Each of the tasks points you to an Ansible module you could use to
9 | # complete it, though there are often multiple correct answers. You
10 | # can read up on specific Ansible modules here:
11 | #
12 | # http://docs.ansible.com/ansible/list_of_all_modules.html
13 | #
14 | # If you get stuck, you can check out 1.6_webapp_example.yml for a completed
15 | # version of this playbook.
16 |
17 | # This will deploy a web app locally rather than to another server.
18 | - name: Deploy a web application
19 | hosts: control_machine
20 |
21 | # Values we're using in more than one place are best
22 | # stored as variables. Playbooks are one of the many
23 | # locations we can define variables.
24 | vars:
25 | deploy_directory: ~/apps
26 | nginx_listen_port: 80
27 | nginx_upstream_port: 5000
28 |
29 | tasks:
30 | # Flask is a Python web server, installed as a
31 | # Python package. Check out the `pip` module - and
32 | # remember, we need to run it as root.
33 | - name: Install flask
34 | fail: msg='Unimplemented task!'
35 | ignore_errors: yes
36 |
37 | # We can create directories with the `file` module. Make
38 | # sure to use the deploy_directory variable!
39 | - name: Create the deploy directory if it doesn't exist.
40 | fail: msg='Unimplemented task!'
41 | ignore_errors: yes
42 |
43 | # This repository ships with 'files/hello_world/hello_world.py',
44 | # a simple Flask application. Copy it to the deploy directory
45 | # with the `copy` module.
46 | - name: Deploy flask app
47 | fail: msg='Unimplemented task!'
48 | ignore_errors: yes
49 |
50 | # Normally, we'd deploy Flask with a process
51 | # manager like supervisor. To keep this example
52 | # basic, we'll just run it as a backgrounded
53 | # process for now.
54 |
55 | # To ensure playbook idempotency (ability to safely re-run),
56 | # we need to kill existing Flask processes before starting
57 | # a new one. Otherwise we'd fail to bind to the right port.
58 | - name: Kill all running flask processes.
59 | shell: pkill -e flask
60 | # pkill returns 1 when it doesn't find a process to kill.
61 | # Setting `failed_when` to false will prevent that from
62 | # being treated as a failure.
63 | failed_when: false
64 | # `shell` tasks aren't necessarily idempotent, so they'll always
65 | # show as 'changed' in Ansible output. Setting `changed_when`
66 | # lets us define what is considered to be a state change.
67 | register: pkill
68 | changed_when: "'killed' in pkill.stdout"
69 |
70 | - name: Start flask as a backgrounded process
71 | # We can set environment variables for a task to be run with.
72 | environment:
73 | FLASK_APP: "{{deploy_directory|expanduser}}/hello_world.py"
74 | # When we run Flask, it attaches to our terminal. When we run
75 | # a shell command with Ansible, it operates much like it would
76 | # if we ran it as our user, so this task wouldn't complete
77 | # until the Flask process died. We can use the `nohup` command
78 | # and the `&` operator to
79 | # run Flask as a backgrounded, unattached process.
80 | shell: "nohup flask run -p {{nginx_upstream_port}} &"
81 | # We do still get stderr before the process returns, and we can
82 | # inspect that for an error message.
83 | register: flask
84 | # This is a form of YAML multiline syntax.
85 | failed_when: >
86 | 'does not appear to exist' in flask.stderr
87 | or 'Traceback' in flask.stderr
88 |
89 | # Install the packaged version of nginx with the `apt` module.
90 | # Like other package managers, we'll want to run this command as
91 | # root by using `become: yes`.
92 | - name: Install nginx
93 | fail: msg='Unimplemented task!'
94 | ignore_errors: yes
95 |
96 | # (We'll need to be root for the following nginx tasks, too!)
97 |
98 | # Use the `template` module to template the configuration file
99 | # from `templates/proxy.j2` to `/etc/nginx/sites-available`.
100 | - name: Template nginx configuration file
101 | fail: msg='Unimplemented task!'
102 | ignore_errors: yes
103 |
104 | # nginx loads configuration files from `/etc/nginx/sites-enabled`,
105 | # which are traditionally symlinks to files in
106 | # `/etc/nginx/sites-available`. Use the `file` module to delete
107 | # all symlinks so that nginx is only serving this Flask app.
108 | # (HINT: Try out `with_fileglob: /etc/nginx/sites-enabled/*`)
109 | - name: Disable all nginx sites
110 | fail: msg='Unimplemented task!'
111 | ignore_errors: yes
112 |
113 | # Create a symlink in `/etc/nginx/sites-enabled` pointing to the
114 | # file we previously templated into `/etc/nginx/sites-available`.
115 | - name: Enable this site
116 | fail: msg='Unimplemented task!'
117 | ignore_errors: yes
118 |
119 | # The nginx package installs it as a service, so we can use the
120 | # `service` or `systemd` commands to manage it.
121 | - name: Reload nginx configuration
122 | fail: msg='Unimplemented task!'
123 | ignore_errors: yes
124 |
125 | # Ansible commands process much faster than manual execution, so
126 | # it's possible that nginx won't have bound to port 80 before we
127 | # attempt to query the server.
128 | - name: Wait for nginx to listen on port {{nginx_listen_port}}
129 | wait_for:
130 | port: "{{nginx_listen_port}}"
131 | # wait_for starts immediately and polls every second, but if
132 | # nginx hasn't started in 10 seconds, something is wrong.
133 | timeout: 10
134 |
135 | # The `uri` module is a basic HTTP client that is primarily for
136 | # checking status codes, but can also save response bodies.
137 | - name: Ensure that the proxy is functioning
138 | uri:
139 | url: http://localhost:{{nginx_listen_port}}
140 | return_content: yes
141 | # `register` saves the output to an Ansible variable.
142 | register: proxy_response
143 | # Mark the task as failed, but run the next task.
144 | failed_when: "'Deploying Applications With Ansible' not in proxy_response.content"
145 | ignore_errors: true
146 |
147 | # The `fail` module will stop a playbook with an error. The `when`
148 | # section will prevent the `fail` module from executing if the
149 | # condition evaluates to true.
150 | - name: Make sure we're in the right class!
151 | debug:
152 | msg: "Application {% if proxy_response.failed %}failed to deploy.{% else %}successfully deployed!{% endif %}"
153 | failed_when: proxy_response.failed
154 |
--------------------------------------------------------------------------------
/playbooks/1.6_webapp_example.yml:
--------------------------------------------------------------------------------
1 | # This is the complete version of the playbook from 1.6_webapp.yml; if you
2 | # run into issues completing section 1.8, you can use it as inspiration.
3 |
4 | # This will deploy a web app locally rather than to another server.
5 | - name: Deploy a web application
6 | hosts: control_machine
7 |
8 | # Values we're using in more than one place are best
9 | # stored as variables. Playbooks are one of the many
10 | # locations we can define variables.
11 | vars:
12 | deploy_directory: ~/apps
13 | nginx_listen_port: 80
14 | nginx_upstream_port: 5000
15 |
16 | tasks:
17 | # Flask is a Python web server, installed as a
18 | # Python package. Check out the `pip` module - and
19 | # remember, we need to run it as root.
20 | - name: Install flask
21 | become: yes
22 | pip:
23 | name: flask
24 |
25 | # We can create directories with the `file` module. Make
26 | # sure to use the deploy_directory variable!
27 | - name: Create the deploy directory if it doesn't exist.
28 | file:
29 | path: "{{ deploy_directory }}"
30 | state: directory
31 |
32 | # This repository ships with 'files/hello_world/hello_world.py',
33 | # a simple Flask application. Copy it to the deploy directory
34 | # with the `copy` module.
35 | - name: Deploy flask app
36 | copy:
37 | src: "../files/hello_world/hello_world.py"
38 | dest: "{{ deploy_directory }}/hello_world.py"
39 |
40 | # Normally, we'd deploy Flask with a process
41 | # manager like supervisor. To keep this example
42 | # basic, we'll just run it as a backgrounded
43 | # process for now.
44 |
45 | # To ensure playbook idempotency (ability to safely re-run),
46 | # we need to kill existing Flask processes before starting
47 | # a new one. Otherwise we'd fail to bind to the right port.
48 | - name: Kill all running flask processes.
49 | shell: pkill -e flask
50 | # pkill returns 1 when it doesn't find a process to kill.
51 | # Setting `failed_when` to false will prevent that from
52 | # being treated as a failure.
53 | failed_when: false
54 | # `shell` tasks aren't necessarily idempotent, so they'll always
55 | # show as 'changed' in Ansible output. Setting `changed_when`
56 | # lets us define what is considered to be a state change.
57 | register: pkill
58 | changed_when: "'killed' in pkill.stdout"
59 |
60 | - name: Start flask as a backgrounded process
61 | # We can set environment variables for a task to be run with.
62 | environment:
63 | FLASK_APP: "{{deploy_directory|expanduser}}/hello_world.py"
64 | # When we run Flask, it attaches to our terminal. When we run
65 | # a shell command with Ansible, it operates much like it would
66 | # if we ran it as our user, so this task wouldn't complete
67 | # until the Flask process died. We can use the `nohup` command
68 | # and the `&` operator to
69 | # run Flask as a backgrounded, unattached process.
70 | shell: "nohup flask run -p {{nginx_upstream_port}} &"
71 | # We do still get stderr before the process returns, and we can
72 | # inspect that for an error message.
73 | register: flask
74 | failed_when: "'does not appear to exist' in flask.stderr"
75 |
76 | # Install the packaged version of nginx with the `apt` module.
77 | # Like other package managers, we'll want to run this command as
78 | # root by using `become: yes`.
79 | - name: Install nginx
80 | become: yes
81 | apt:
82 | name: nginx
83 |
84 | # (We'll need to be root for the following nginx tasks, too!)
85 |
86 | # Use the `template` module to template the configuration file
87 | # from `templates/proxy.j2` to `/etc/nginx/sites-available`.
88 | - name: Template nginx configuration file
89 | become: yes
90 | template:
91 | src: ../templates/proxy.j2
92 | dest: /etc/nginx/sites-available/proxy
93 |
94 | # nginx loads configuration files from `/etc/nginx/sites-enabled`,
95 | # which are traditionally symlinks to files in
96 | # `/etc/nginx/sites-available`. Use the `file` module to delete
97 | # all symlinks so that nginx is only serving this Flask app.
98 | # (HINT: Try out `with_fileglob: /etc/nginx/sites-enabled/*`)
99 | - name: Disable all nginx sites
100 | become: yes
101 | file:
102 | state: absent
103 | path: "{{ item }}"
104 | with_fileglob: /etc/nginx/sites-enabled/*
105 |
106 | # Create a symlink in `/etc/nginx/sites-enabled` pointing to the
107 | # file we previously templated into `/etc/nginx/sites-available`.
108 | - name: Enable this site
109 | become: yes
110 | file:
111 | state: link
112 | src: /etc/nginx/sites-available/proxy
113 | dest: /etc/nginx/sites-enabled/proxy
114 |
115 | # The nginx package installs it as a service, so we can use the
116 | # `service` or `systemd` commands to manage it.
117 | - name: Reload nginx configuration
118 | become: yes
119 | service:
120 | name: nginx
121 | state: reloaded
122 |
123 | # Ansible commands process much faster than manual execution, so
124 | # it's possible that nginx won't have bound to port 80 before we
125 | # attempt to query the server.
126 | - name: Wait for nginx to listen on port {{nginx_listen_port}}
127 | wait_for:
128 | port: "{{nginx_listen_port}}"
129 | # wait_for starts immediately and polls every second, but if
130 | # nginx hasn't started in 10 seconds, something is wrong.
131 | timeout: 10
132 |
133 | # The `uri` module is a basic HTTP client that is primarily for
134 | # checking status codes, but can also save response bodies.
135 | - name: Ensure that the proxy is functioning
136 | uri:
137 | url: http://localhost:{{nginx_listen_port}}
138 | return_content: yes
139 | # `register` saves the output to an Ansible variable.
140 | register: proxy_response
141 | # Mark the task as failed, but run the next task.
142 | failed_when: "'Deploying Applications With Ansible' not in proxy_response.content"
143 | ignore_errors: true
144 |
145 | # The `fail` module will stop a playbook with an error. The `when`
146 | # section will prevent the `fail` module from executing if the
147 | # condition evaluates to true.
148 | - name: Make sure we're in the right class!
149 | debug:
150 | msg: "Application {% if proxy_response.failed %}failed to deploy.{% else %}successfully deployed!{% endif %}"
151 | failed_when: proxy_response.failed
152 |
--------------------------------------------------------------------------------
/playbooks/1.7_handlers.yml:
--------------------------------------------------------------------------------
1 | - name: Demonstrate handler execution order
2 | hosts: control_machine
3 |
4 | handlers:
5 | # This handler will never be notified because only the first defined
6 | # handler with given a name will be notified. Both roles define a
7 | # handler with this name, and role handlers are defined before
8 | # play handlers.
9 | - name: handler_a
10 | debug:
11 | msg: handler_a from playbook
12 |
13 | # This handler will still be notified because neither role defined
14 | # it before the play could. However, it will execute after any
15 | # handlers notified during plays.
16 | - name: c_handler
17 | debug:
18 | msg: c_handler from playbook
19 | changed_when: yes
20 | # This list of handlers will be notified even if they already ran
21 | # in the task that c_handler was notified from.
22 | notify:
23 | # This handler will run second because a was defined before b.
24 | - handler_b
25 | # This handler will run first despite being listed second.
26 | - handler_a
27 | # This one won't run at all.
28 | - handler_a
29 |
30 | pre_tasks:
31 |
32 | - name: Notify handler_a
33 | debug: msg=notified
34 | changed_when: yes
35 | notify: handler_a
36 |
37 | # The play's handler_a will run after pre_tasks but before roles.
38 |
39 | # These roles only define handlers; they don't perform tasks or notify.
40 | roles:
41 | # role_b's handlers will be notified because it appeared first in the list.
42 | - handler_example_b
43 | - handler_example_a
44 |
45 | # No handlers run between roles and tasks.
46 |
47 | tasks:
48 | # This task will notify the role_a's handler_a because tasks run after roles.
49 | - name: Notify handler_a
50 | debug: msg=notified
51 | changed_when: yes
52 | notify: handler_a
53 |
54 | # This will notify handler_a and handler_b from the role, not the play.
55 | - name: Notify all handlers
56 | debug: msg=notified
57 | changed_when: yes
58 | notify:
59 | # c_handler will execute last, even though it was listed first.
60 | - c_handler
61 | - handler_a
62 | - handler_b
63 |
64 |
65 |
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/playbooks/1.8_webapp.yml:
--------------------------------------------------------------------------------
1 | # This is a role-based version of the playbook from 1.6_webapp.yml; if you
2 | # run into issues completing section 1.8, you can use it as inspiration.
3 |
4 | # This will deploy a web app locally rather than to another server.
5 | - name: Deploy a web application
6 | hosts: control_machine
7 |
8 | # Values we're using in more than one place are best
9 | # stored as variables. Playbooks are one of the many
10 | # locations we can define variables.
11 | vars:
12 | deploy_directory: ~/apps
13 | nginx_listen_port: 80
14 | nginx_upstream_port: 5000
15 |
16 | # `roles:` are executed in order, before `tasks:`.
17 | roles:
18 | - role: flask_example
19 | tags: [flask]
20 | # Variables can be passed into a role
21 | flask_app: "hello_world"
22 | flask_src: ../files/hello_world/hello_world.py
23 | flask_directory: "{{ deploy_directory|expanduser }}"
24 | flask_port: "{{nginx_upstream_port }}"
25 |
26 | tasks:
27 | # Install the packaged version of nginx with the `apt` module.
28 | # Like other package managers, we'll want to run this command as
29 | # root by using `become: yes`.
30 | - name: Install nginx
31 | become: yes
32 | apt:
33 | name: nginx
34 |
35 | # (We'll need to be root for the following nginx tasks, too!)
36 |
37 | # Use the `template` module to template the configuration file
38 | # from `templates/proxy.j2` to `/etc/nginx/sites-available`.
39 | - name: Template nginx configuration file
40 | become: yes
41 | template:
42 | src: ../templates/proxy.j2
43 | dest: /etc/nginx/sites-available/proxy
44 |
45 | # nginx loads configuration files from `/etc/nginx/sites-enabled`,
46 | # which are traditionally symlinks to files in
47 | # `/etc/nginx/sites-available`. Use the `file` module to delete
48 | # all symlinks so that nginx is only serving this Flask app.
49 | # (HINT: Try out `with_fileglob: /etc/nginx/sites-enabled/*`)
50 | - name: Disable all nginx sites
51 | become: yes
52 | file:
53 | state: absent
54 | path: "{{ item }}"
55 | with_fileglob: /etc/nginx/sites-enabled/*
56 |
57 | # Create a symlink in `/etc/nginx/sites-enabled` pointing to the
58 | # file we previously templated into `/etc/nginx/sites-available`.
59 | - name: Enable this site
60 | become: yes
61 | file:
62 | state: link
63 | src: /etc/nginx/sites-available/proxy
64 | dest: /etc/nginx/sites-enabled/proxy
65 |
66 | # The nginx package installs it as a service, so we can use the
67 | # `service` or `systemd` commands to manage it.
68 | - name: Reload nginx configuration
69 | become: yes
70 | service:
71 | name: nginx
72 | state: reloaded
73 |
74 | # Handlers won't be executed until the end of the `tasks:`
75 | # section, so we need to move these into `post_tasks` instead.
76 | post_tasks:
77 | # Ansible commands process much faster than manual execution, so
78 | # it's possible that nginx won't have bound to port 80 before we
79 | # attempt to query the server.
80 | - name: Wait for nginx to listen on port {{nginx_listen_port}}
81 | wait_for:
82 | port: "{{nginx_listen_port}}"
83 | # wait_for starts immediately and polls every second, but if
84 | # nginx hasn't started in 10 seconds, something is wrong.
85 | timeout: 10
86 |
87 | # The `uri` module is a basic HTTP client that is primarily for
88 | # checking status codes, but can also save response bodies.
89 | - name: Ensure that the proxy is functioning
90 | uri:
91 | url: http://localhost:{{nginx_listen_port}}
92 | return_content: yes
93 | # `register` saves the output to an Ansible variable.
94 | register: proxy_response
95 | # Mark the task as failed, but run the next task.
96 | failed_when: "'Deploying Applications With Ansible' not in proxy_response.content"
97 | ignore_errors: true
98 |
99 | # The `fail` module will stop a playbook with an error. The `when`
100 | # section will prevent the `fail` module from executing if the
101 | # condition evaluates to true.
102 | - name: Make sure we're in the right class!
103 | debug:
104 | msg: "Application {% if proxy_response.failed %}failed to deploy.{% else %}successfully deployed!{% endif %}"
105 | failed_when: proxy_response.failed
106 |
--------------------------------------------------------------------------------
/playbooks/2.4_delegate.yml:
--------------------------------------------------------------------------------
1 | - hosts: tag_Course_deploying_applications_with_ansible
2 | gather_facts: no
3 |
4 | vars:
5 | password_file: ~/ansible_infrastructure/passwords.csv
6 |
7 | tasks:
8 | # This is a workaround for supporting a combination of EC2 dynamic
9 | # inventory at the same time as password authentication from a flat file.
10 | - set_fact:
11 | ansible_ssh_user: "{{ ec2_tag_User }}"
12 | ansible_ssh_pass: "{{ lookup('csvfile', ec2_tag_User+' file='+password_file|expanduser+' delimiter=,') }}"
13 |
14 | - name: Collect modified files
15 | shell: >
16 | cd ~/ansible
17 | && git diff --shortstat
18 | | cut -d ' ' -f 2
19 | register: playbook_progress
20 |
21 | - name: Report modified files
22 | delegate_to: control_machine
23 | shell:
24 | echo "{{ ansible_ssh_user }},{{ playbook_progress.stdout or '0' }}" >> ~/ansible/student_instances
25 |
--------------------------------------------------------------------------------
/playbooks/2.4_parallel.yml:
--------------------------------------------------------------------------------
1 | - hosts: tag_Course_deploying_applications_with_ansible
2 | gather_facts: no
3 |
4 | vars:
5 | password_file: ~/ansible_infrastructure/passwords.csv
6 |
7 | tasks:
8 | # This is a workaround for supporting a combination of EC2 dynamic
9 | # inventory at the same time as password authentication from a flat file.
10 | - set_fact:
11 | ansible_ssh_user: "{{ ec2_tag_User }}"
12 | ansible_ssh_pass: "{{ lookup('csvfile', ec2_tag_User+' file='+password_file|expanduser+' delimiter=,') }}"
13 |
14 | # Send a messaage via wall
15 | - name: Say hi to everyone
16 | shell: wall "Hi, {{ ansible_ssh_user }}!"
17 |
--------------------------------------------------------------------------------
/playbooks/2.4_rolling.yml:
--------------------------------------------------------------------------------
1 | - hosts: tag_Course_deploying_applications_with_ansible
2 | gather_facts: no
3 | serial: 10%
4 |
5 | vars:
6 | password_file: ~/ansible_infrastructure/passwords.csv
7 |
8 | tasks:
9 | # This is a workaround for supporting a combination of EC2 dynamic
10 | # inventory at the same time as password authentication from a flat file.
11 | - set_fact:
12 | ansible_ssh_user: "{{ ec2_tag_User }}"
13 | ansible_ssh_pass: "{{ lookup('csvfile', ec2_tag_User+' file='+password_file|expanduser+' delimiter=,') }}"
14 |
15 | # Send a messaage via wall
16 | - name: Say hi to everyone
17 | shell: >
18 | wall "Hi again, {{ ansible_ssh_user }}! You're in the same
19 | serial batch as {{ ansible_play_batch[:1]|join(', ') }}, and
20 | {{ ansible_play_batch[-1] }}."
21 | ignore_errors: yes
22 |
--------------------------------------------------------------------------------
/playbooks/2.5_hello.yml:
--------------------------------------------------------------------------------
1 | # You'll need to uncomment the line in your ansible.cfg that loads the dynamic
2 | # inventory. Otherwise, this script won't work!
3 |
4 | # Make sure to use tags based on what you added in the last playbook.
5 | - hosts: "tag_User_{{ course_username }}:&tag_Name_Provisioned"
6 | gather_facts: no
7 | vars_files:
8 | - ~/ec2.yml
9 |
10 | tasks:
11 | - ec2_facts:
12 |
13 | - name: Retrieve all tags on an instance
14 | delegate_to: localhost
15 | ec2_tag:
16 | region: '{{ ansible_ec2_placement_region }}'
17 | resource: '{{ ansible_ec2_instance_id }}'
18 | state: list
19 | register: ec2_tags
20 |
21 | - debug: var=ec2_tags
22 |
23 | # Send a message via wall
24 | - name: Say hi to everyone
25 | shell: wall "Hello, World!"
26 |
--------------------------------------------------------------------------------
/playbooks/2.5_provision.yml:
--------------------------------------------------------------------------------
1 | # Provision a second instance based on configuration parameters in ~/ec2.yml
2 | - name: Provision a new instance
3 | hosts: localhost
4 | gather_facts: False
5 | vars_files:
6 | - ~/ec2.yml
7 |
8 | tasks:
9 | - name: Launch instance
10 | ec2:
11 | region: "{{ region }}"
12 | instance_type: "{{ instance_type }}"
13 | image: "{{ image }}"
14 | key_name: "{{ key_name }}"
15 | group: "{{ security_group }}"
16 | instance_tags:
17 | Course: "{{ course_slug }}"
18 | wait: true
19 | register: ec2
20 |
21 | - name: Add new instances to host group
22 | add_host:
23 | hostname: "{{ item.public_dns_name }}"
24 | groupname: launched
25 | with_items: "{{ ec2.instances }}"
26 |
27 | - name: Wait for SSH to come up
28 | wait_for:
29 | host: "{{ item.public_dns_name }}"
30 | port: 22
31 | timeout: 320
32 | state: started
33 | with_items: "{{ ec2.instances }}"
34 |
35 | - name: Apply instance modifications
36 | hosts: launched
37 | gather_facts: False
38 | strategy: free
39 | vars_files:
40 | - ~/ec2.yml
41 |
42 | tasks:
43 | # NOTE: These tasks might fail if apt is flaky!
44 | - name: Install python2 for Ansible
45 | raw: bash -c "test -e /usr/bin/python || (sudo apt -qqy update && sudo apt install -qqy python-minimal)"
46 | register: output
47 | changed_when:
48 | - output.stdout != ""
49 | - output.stdout != "\r\n"
50 | until: output|success
51 | retries: 5
52 |
53 | - name: Gather facts
54 | action: setup
55 |
56 | - name: Get updated EC2 facts
57 | action: ec2_facts
58 |
59 | - name: Tag instances
60 | delegate_to: localhost
61 | ec2_tag:
62 | region: "{{ region }}"
63 | resource: "{{ ansible_ec2_instance_id }}"
64 | state: present
65 | # You can change these tags! But you'll need them in future steps.
66 | tags:
67 | Name: Provisioned
68 | User: "{{ course_username }}"
69 |
--------------------------------------------------------------------------------
/playbooks/2.6_cloud.yml:
--------------------------------------------------------------------------------
1 | # The final deployment project of the course - coordinate two or more
2 | # servers into a multi tier application using Ansible tasks and roles!
3 |
4 | # To get you started, here's a sample playbook detailing what your
5 | # two instances should end up looking like. But feel free to use
6 | # technologies that you're more familiar with, look to Ansible Galaxy
7 | # for inspiration, or just extend the playbooks from Day 1.
8 |
9 | # You may need to terminate existing processes. If you run into memory
10 | # issues on your provisioned node, you can modify ec2.yml and try out
11 | # a t2.small instead of a t2.micro. (You can't relaunch your control
12 | # machine that contains this repository.)
13 |
14 | # Make sure to use tags based on what you added in the last playbook.
15 | - hosts: "tag_User_{{username}}:&tag_Name_Provisioned"
16 | vars_files:
17 | - ~/ec2.yml
18 | # Create this file and store configuration in it.
19 | - ~/app.yml
20 |
21 | roles:
22 | # - Install a database
23 |
24 | tasks:
25 | # - name: Create a database table
26 |
27 | # - name: Populate entries in the database
28 |
29 | - hosts: localhost
30 | vars_files:
31 | - ~/ec2.yml
32 | # Create this file and store configuration in it.
33 | - ~/app.yml
34 |
35 | roles:
36 | # - Install a webserver
37 | # - Install an app server
38 |
39 | tasks:
40 | # - name: Customize webserver configuration
41 |
42 | # - name: Deploy your app server's code
43 |
44 | # - name: Install database drivers
45 |
--------------------------------------------------------------------------------
/playbooks/2.9_custom_module.yml:
--------------------------------------------------------------------------------
1 | # How to build a custom module in Ansible: http://docs.ansible.com/ansible/dev_guide/developing_modules_general.html
2 |
3 | - hosts: localhost
4 | roles:
5 | # Custom modules embedded in roles become available after that
6 | # role has been loaded.
7 | - custom_role
8 | tasks:
9 | - name: Call my custom module
10 | # Or: create a file like custom_ruby_module.rb, custom_node_module.js, etc.
11 | custom_python_module:
12 | arg1: foo
13 | arg2: bar
14 | register: custom_module_output
15 |
16 | - name: Print custom module output
17 | debug: var=custom_module_output
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==2.3.0.0
2 | appdirs==1.4.3
3 | asn1crypto==0.22.0
4 | awscli==1.11.91
5 | boto==2.46.1
6 | botocore==1.5.54
7 | cffi==1.10.0
8 | click==6.7
9 | colorama==0.3.7
10 | cryptography==1.8.1
11 | docutils==0.13.1
12 | enum34==1.1.6
13 | Fabric==1.13.2
14 | Flask==0.12.1
15 | Flask-Redis==0.3.0
16 | futures==3.1.1
17 | idna==2.5
18 | ipaddress==1.0.18
19 | itsdangerous==0.24
20 | Jinja2==2.9.6
21 | jmespath==0.9.2
22 | MarkupSafe==1.0
23 | packaging==16.8
24 | paramiko==2.1.2
25 | prettytable==0.7.2
26 | pyasn1==0.2.3
27 | pycparser==2.17
28 | pycrypto==2.6.1
29 | pyparsing==2.2.0
30 | python-dateutil==2.6.0
31 | PyYAML==3.12
32 | redis==2.10.5
33 | rsa==3.4.2
34 | s3transfer==0.1.10
35 | six==1.10.0
36 | Werkzeug==0.12.1
37 |
--------------------------------------------------------------------------------
/roles/custom_role/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/roles/custom_role/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for custom_role
--------------------------------------------------------------------------------
/roles/custom_role/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for custom_role
--------------------------------------------------------------------------------
/roles/custom_role/library/custom_python_module.py:
--------------------------------------------------------------------------------
1 | # Check out this page for more information!
2 | # http://docs.ansible.com/ansible/dev_guide/developing_modules_general.html#common-module-boilerplate
3 |
4 | from ansible.module_utils.basic import AnsibleModule
5 |
6 | def main():
7 | module = AnsibleModule(
8 | argument_spec = dict(
9 | state = dict(default='present', choices=['present', 'absent']),
10 | name = dict(required=True),
11 | enabled = dict(required=True, type='bool'),
12 | something = dict(aliases=['whatever'])
13 | )
14 | )
15 |
16 | ansible_return = {
17 | "changed" : true,
18 | "rc" : 1,
19 | "ansible_facts" : {
20 |
21 | }
22 | }
23 |
24 | return ansible_return
25 |
26 | if __name__ == '__main__':
27 | main()
--------------------------------------------------------------------------------
/roles/custom_role/library/custom_ruby_module.rb:
--------------------------------------------------------------------------------
1 | # See here: https://github.com/ansible/ansible-for-rubyists/blob/master/library/my_ruby_facts
--------------------------------------------------------------------------------
/roles/custom_role/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Some suggested licenses:
11 | # - BSD (default)
12 | # - MIT
13 | # - GPLv2
14 | # - GPLv3
15 | # - Apache
16 | # - CC-BY
17 | license: license (GPLv2, CC-BY, etc)
18 |
19 | min_ansible_version: 1.2
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | # Optionally specify the branch Galaxy will use when accessing the GitHub
25 | # repo for this role. During role install, if no tags are available,
26 | # Galaxy will use this branch. During import Galaxy will access files on
27 | # this branch. If Travis integration is configured, only notifications for this
28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch
29 | # (usually master) will be used.
30 | #github_branch:
31 |
32 | #
33 | # platforms is a list of platforms, and each platform has a name and a list of versions.
34 | #
35 | # platforms:
36 | # - name: Fedora
37 | # versions:
38 | # - all
39 | # - 25
40 | # - name: SomePlatform
41 | # versions:
42 | # - all
43 | # - 1.0
44 | # - 7
45 | # - 99.99
46 |
47 | galaxy_tags: []
48 | # List tags for your role here, one per line. A tag is a keyword that describes
49 | # and categorizes the role. Users find roles by searching for tags. Be sure to
50 | # remove the '[]' above, if you add tags to this list.
51 | #
52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
53 | # Maximum 20 tags per role.
54 |
55 | dependencies: []
56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
57 | # if you add dependencies to this list.
--------------------------------------------------------------------------------
/roles/custom_role/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for custom_role
--------------------------------------------------------------------------------
/roles/custom_role/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/roles/custom_role/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - custom_role
--------------------------------------------------------------------------------
/roles/custom_role/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for custom_role
--------------------------------------------------------------------------------
/roles/flask_example/handlers/main.yml:
--------------------------------------------------------------------------------
1 |
2 | # This handler always marks itself as changed, then notifies two
3 | # other handlers.
4 | - name: restart flask
5 | debug: msg="Notifying Flask start and stop handlers..."
6 | changed_when: true
7 | notify:
8 | - stop flask
9 | - start flask
10 |
11 | # To ensure playbook idempotency (ability to safely re-run),
12 | # we need to kill existing Flask processes before starting
13 | # a new one. Otherwise we'd fail to bind to the right port.
14 |
15 | - name: stop flask
16 | shell: pkill -e flask
17 | # pkill returns 1 when it doesn't find a process to kill.
18 | # Setting `failed_when` to false will prevent that from
19 | # being treated as a failure.
20 | failed_when: false
21 | # `shell` tasks aren't necessarily idempotent, so they'll always
22 | # show as 'changed' in Ansible output. Setting `changed_when`
23 | # lets us define what is considered to be a state change.
24 | register: pkill
25 | changed_when: "'killed' in pkill.stdout"
26 |
27 | - name: start flask
28 | # We can set environment variables for a task to be run with.
29 | environment:
30 | FLASK_APP: "{{flask_directory|expanduser}}/{{flask_app}}.py"
31 | # When we run Flask, it attaches to our terminal. When we run
32 | # a shell command with Ansible, it operates much like it would
33 | # if we ran it as our user, so this task wouldn't complete
34 | # until the Flask process died. We can use the `nohup` command
35 | # and the `&` operator to
36 | # run Flask as a backgrounded, unattached process.
37 | shell: "nohup flask run -p {{flask_port}} &"
38 | # We do still get stderr before the process returns, and we can
39 | # inspect that for an error message.
40 | register: flask
41 | failed_when: >
42 | 'does not appear to exist' in flask.stderr
43 | or 'Traceback' in flask.stderr
44 |
--------------------------------------------------------------------------------
/roles/flask_example/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install flask
2 | become: yes
3 | pip:
4 | name: flask
5 | # If the flask version ever updates, this will force a restart.
6 | notify: restart flask
7 |
8 | - name: Create the deploy directory if it doesn't exist.
9 | file:
10 | path: "{{ flask_directory }}"
11 | state: directory
12 |
13 | - name: Deploy flask app
14 | copy:
15 | src: "{{ flask_src }}"
16 | dest: "{{ flask_directory }}/{{flask_app}}.py"
17 | notify: restart flask
18 |
--------------------------------------------------------------------------------
/roles/flask_supervisor_example/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # We need to restart supervisor itself when we add new config.
2 |
3 | - name: restart supervisor
4 | become: yes
5 | service:
6 | name: supervisor
7 | state: restarted
8 |
9 | - name: start supervisor
10 | become: yes
11 | service:
12 | name: supervisor
13 | state: restarted
14 |
15 | - name: stop supervisor
16 | become: yes
17 | service:
18 | name: supervisor
19 | state: restarted
20 |
21 | - name: restart flask
22 | become: yes
23 | supervisorctl:
24 | name: "{{ flask_app }}"
25 | state: restarted
26 |
27 | - name: start flask
28 | become: yes
29 | supervisorctl:
30 | name: "{{ flask_app }}"
31 | state: started
32 |
33 | - name: stop flask
34 | become: yes
35 | supervisorctl:
36 | name: "{{ flask_app }}"
37 | state: stopped
38 |
--------------------------------------------------------------------------------
/roles/flask_supervisor_example/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install supervisor
2 | become: yes
3 | apt:
4 | name: supervisor
5 | # If the flask version ever updates, this will force a restart.
6 | notify: restart supervisor
7 |
8 | - name: Template supervisor config
9 | become: yes
10 | template:
11 | src: flask.conf.j2
12 | dest: /etc/supervisor/conf.d/flask.conf
13 | # We need to restart the whole process if we changed supervisor config
14 | notify: restart supervisor
15 |
16 | - name: Install flask
17 | become: yes
18 | pip:
19 | name: flask
20 | # If the flask version ever updates, this will force a restart.
21 | notify: restart flask
22 |
23 | - name: Create the deploy directory if it doesn't exist.
24 | file:
25 | path: "{{ flask_directory }}"
26 | state: directory
27 |
28 | - name: Deploy flask app
29 | copy:
30 | src: "{{ flask_src }}"
31 | dest: "{{ flask_directory }}/{{flask_app}}.py"
32 | notify: restart flask
33 |
34 | - name: Template flask service
35 | template:
36 | src: flask.sh.j2
37 | dest: "{{ flask_directory }}/{{flask_app}}.sh"
38 | mode: 0755
39 | notify: restart flask
40 |
--------------------------------------------------------------------------------
/roles/flask_supervisor_example/templates/flask.conf.j2:
--------------------------------------------------------------------------------
1 | [program:{{flask_app}}]
2 | # Run as our user
3 | user={{ ansible_user_id }}
4 |
5 | # Run the templated script
6 | command={{ flask_directory }}/{{flask_app}}.sh
7 |
--------------------------------------------------------------------------------
/roles/flask_supervisor_example/templates/flask.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This doesn't get set with supervisor.
4 | export HOME=/home/{{ ansible_user_id }}
5 |
6 | # Load a Python version that has Flask
7 | . /home/{{ ansible_user_id }}/.setup_pyenv
8 |
9 | # Load our user's virtualenv
10 | pyenv activate ansible
11 |
12 | # Set the Flask app
13 | export FLASK_APP="{{flask_directory|expanduser}}/{{flask_app}}.py"
14 |
15 | # We now do want this to block, since supervisor expects the process
16 | # to attach so it can be properly managed.
17 | flask run -p {{flask_port}}
18 |
--------------------------------------------------------------------------------
/roles/handler_example_a/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: handler_a
2 | debug: msg="handler_a from handler_example_a"
3 |
4 | - name: handler_b
5 | debug: msg="handler_b from handler_example_a"
6 |
--------------------------------------------------------------------------------
/roles/handler_example_b/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: handler_b
2 | debug: msg="handler_b from handler_example_b"
3 |
4 | - name: handler_a
5 | debug: msg="handler_a from handler_example_b"
6 |
--------------------------------------------------------------------------------
/roles/nginx_example/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start nginx
2 | become: yes
3 | service:
4 | name: nginx
5 | state: started
6 |
7 | - name: stop nginx
8 | become: yes
9 | service:
10 | name: nginx
11 | state: stopped
12 |
13 | - name: restart nginx
14 | become: yes
15 | service:
16 | name: nginx
17 | state: restarted
18 |
19 | - name: reload nginx
20 | become: yes
21 | service:
22 | name: nginx
23 | state: reloaded
24 |
--------------------------------------------------------------------------------
/roles/nginx_example/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install nginx
2 | become: yes
3 | apt:
4 | name: nginx
5 | # This will force a restart on package upgrade.
6 | notify: restart nginx
7 |
8 | - name: Disable all nginx sites
9 | become: yes
10 | file:
11 | state: absent
12 | path: "{{ item }}"
13 | with_fileglob: /etc/nginx/sites-enabled/*
14 | notify: reload nginx
15 |
--------------------------------------------------------------------------------
/roles/redis_example/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart redis
2 | become: yes
3 | service:
4 | name: redis
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/redis_example/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install redis
2 | become: yes
3 | apt:
4 | name: redis-server
5 | notify: restart redis
6 |
--------------------------------------------------------------------------------
/roles/requirements.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jmeickle/deploying-applications-with-ansible/86b2529668dcacb4b96bca7d85cf3a43be0df20a/roles/requirements.yml
--------------------------------------------------------------------------------
/slides.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jmeickle/deploying-applications-with-ansible/86b2529668dcacb4b96bca7d85cf3a43be0df20a/slides.pdf
--------------------------------------------------------------------------------
/templates/cron.sh.j2:
--------------------------------------------------------------------------------
1 | wall {{message}}
2 |
--------------------------------------------------------------------------------
/templates/proxy.j2:
--------------------------------------------------------------------------------
1 | upstream app {
2 | server localhost:{{nginx_upstream_port}};
3 | }
4 |
5 | server {
6 | listen {{nginx_listen_port}} default_server;
7 | listen [::]:{{nginx_listen_port}} default_server;
8 |
9 | location / {
10 | proxy_pass http://app;
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/vars/ubuntu-dev.yml:
--------------------------------------------------------------------------------
1 | # Override package manager
2 | package_manager: apt
3 |
--------------------------------------------------------------------------------