├── .deepsource.toml ├── LICENSE ├── README.md ├── aws.py ├── base.py ├── config.py ├── email_report.py ├── fabfile.py ├── haproxy.cfg.template ├── iptables.rules ├── linode.py ├── proxy.conf ├── proxy_monitor_restart.py ├── requirements.txt ├── rotate_proxies.py ├── scripts └── get_linode_image_id.py ├── send_gmail.py ├── ses_email.py ├── squid.conf └── utils.py /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "python" 5 | enabled = true 6 | 7 | [analyzers.meta] 8 | runtime_version = "3.x.x" 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Anand B Pillai 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rotating-proxy-daemon 2 | Automated proxy rotator and manager daemon written in Python with backends available in Linode and AWS. Uses pre-prepared squid nodes for the proxy nodes. 3 | -------------------------------------------------------------------------------- /aws.py: -------------------------------------------------------------------------------- 1 | import random 2 | import boto3 3 | from base import ProxyRotator 4 | 5 | class AWSCommand(object): 6 | '''Class encapsulating the aws ec2 API''' 7 | 8 | def __init__(self, config=None): 9 | self.ec2 = boto3.resource('ec2') 10 | self.config = config 11 | 12 | def create_ec2(self, **params): 13 | return self.ec2.create_instances(MaxCount=1, MinCount=1, **params)[0] 14 | 15 | def get_proxies(self): 16 | proxies = [] 17 | filters=[ 18 | {'Name':'image-id', 'Values':[self.config.aws_image_id]}, 19 | {'Name': 'instance-state-name', 'Values': ['running']} 20 | ] 21 | for instance in self.ec2.instances.filter(Filters=filters): 22 | proxies.append(','.join([instance.public_ip_address, '0', instance.id,'0','0'])) 23 | return proxies 24 | 25 | def delete_ec2(self, instance_id): 26 | instance = self.ec2.Instance(instance_id) 27 | instance.terminate() 28 | instance.wait_until_terminated() 29 | 30 | 31 | class AwsProxyRotator(ProxyRotator): 32 | """ AWS implementation of ProxyRotator """ 33 | 34 | def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None): 35 | super(AwsProxyRotator, self).__init__(cfg, test_mode, rotate, region) 36 | #AWS resource manager 37 | self.aws_command = AWSCommand(config=self.config) 38 | self.vps_command = self.aws_command 39 | 40 | def delete_instance(self, instance_id): 41 | """ Delete instance by id """ 42 | return self.aws_command.delete_ec2(instance_id) 43 | 44 | def make_new_instance(self, region=None, test=False, verbose=False): 45 | # If calling as test, make up an ip 46 | if test: 47 | return '.'.join(map(lambda x: str(random.randrange(20, 100)), range(4))), random.randrange(10000, 48 | 50000) 49 | params = dict(ImageId=self.config.aws_image_id, 50 | InstanceType=self.config.aws_instance_type, 51 | KeyName=self.config.aws_key_name, 52 | SecurityGroupIds=self.config.aws_security_groups, 53 | SubnetId=self.config.aws_subnet_id , 54 | DryRun=True) 55 | 56 | print 'Making new ec2...' 57 | ec2_instance = self.aws_command.create_ec2(**params) 58 | ec2_instance.wait_until_running() 59 | time.sleep(10) 60 | 61 | ip = ec2_instance.public_ip_address 62 | pid = ec2_instance.id 63 | 64 | # Post process the host 65 | print 'Post-processing',ip,'...' 66 | self.post_process(ip) 67 | 68 | return ip, pid 69 | 70 | def drop(self): 71 | """ Drop all instances in current configuration (except the LB) """ 72 | 73 | print 'Dropping all proxies ...' 74 | proxies = self.aws_command.get_proxies() 75 | 76 | for item in proxies: 77 | ip,_,instance_id = item.split(',') 78 | print '\tDropping ec2',instance_id,'with IP',ip,'...' 79 | self.aws_command.delete_ec2(instance_id) 80 | 81 | -------------------------------------------------------------------------------- /base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import threading 3 | import signal 4 | import time 5 | import collections 6 | 7 | import email_report 8 | from config import * 9 | 10 | from utils import daemonize 11 | 12 | class ProxyRotator(object): 13 | """ Proxy rotation, provisioning & re-configuration base class """ 14 | 15 | def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None): 16 | self.config = ProxyConfig(cfg=cfg) 17 | print 'Frequency set to',self.config.frequency,'seconds.' 18 | # Test mode ? 19 | self.test_mode = test_mode 20 | # Event object 21 | self.alarm = threading.Event() 22 | # Clear the event 23 | self.alarm.clear() 24 | # Heartbeat file 25 | self.hbf = '.heartbeat' 26 | # Actual command used for doing stuff 27 | self.vps_command = None 28 | # If rotate is set, rotate before going to sleep 29 | if rotate: 30 | print 'Rotating a node' 31 | self.rotate(region=region) 32 | 33 | signal.signal(signal.SIGTERM, self.sighandler) 34 | signal.signal(signal.SIGUSR1, self.sighandler) 35 | 36 | def pick_region(self): 37 | """ Pick the region for the new node """ 38 | 39 | # Try and pick a region not present in the 40 | # current list of nodes 41 | regions = self.config.get_active_regions() 42 | # Shuffle current regions 43 | random.shuffle(self.config.region_ids) 44 | 45 | for reg in self.config.region_ids: 46 | if reg not in regions: 47 | return reg 48 | 49 | # All regions already present ? Pick a random one. 50 | return random.choice(self.config.region_ids) 51 | 52 | def rotate(self, region=None): 53 | """ Rotate the configuration to a new node """ 54 | 55 | proxy_out_label = None 56 | # Pick the data-center 57 | if region == None: 58 | print 'Picking a region ...' 59 | region = self.pick_region() 60 | else: 61 | print 'Using supplied region',region,'...' 62 | 63 | # Switch in the new proxy from this region 64 | new_proxy, proxy_id = self.make_new_instance(region) 65 | 66 | # Rotate another node 67 | if self.config.policy == Policy.ROTATION_RANDOM: 68 | proxy_out = self.config.get_proxy_for_rotation(use_random=True, input_region=region) 69 | elif self.config.policy == Policy.ROTATION_NEW_REGION: 70 | proxy_out = self.config.get_proxy_for_rotation(region_switch=True, input_region=region) 71 | elif self.config.policy == Policy.ROTATION_LRU: 72 | proxy_out = self.config.get_proxy_for_rotation(least_used=True, input_region=region) 73 | elif self.config.policy == Policy.ROTATION_LRU_NEW_REGION: 74 | proxy_out = self.config.get_proxy_for_rotation(least_used=True, region_switch=True, 75 | input_region=region) 76 | 77 | # Switch in the new proxy 78 | self.config.switch_in_proxy(new_proxy, proxy_id, region) 79 | print 'Switched in new proxy',new_proxy 80 | # Write configuration 81 | self.config.write() 82 | print 'Wrote new configuration.' 83 | # Write new HAProxy LB template and reload ha proxy 84 | ret1 = self.config.write_lb_config() 85 | ret2 = self.config.reload_lb() 86 | 87 | if ret1 and ret2: 88 | if proxy_out != None: 89 | print 'Switched out proxy',proxy_out 90 | proxy_out_id = int(self.config.get_proxy_id(proxy_out)) 91 | 92 | if proxy_out_id != 0: 93 | proxy_out_label = self.get_instance_label(proxy_out_id) 94 | print 'Removing switched out instance',proxy_out_id 95 | self.delete_instance(proxy_out_id) 96 | else: 97 | 'Proxy id is 0, not removing proxy',proxy_out 98 | else: 99 | print 'Error - Did not switch out proxy as there was a problem in writing/restarting LB' 100 | 101 | if proxy_out_label != None: 102 | # Get its label and assign it to the new proxy 103 | print 'Assigning label',proxy_out_label,'to new instance',proxy_id 104 | time.sleep(5) 105 | self.update_instance(proxy_id, 106 | proxy_out_label, 107 | self.config.group) 108 | 109 | # Post process the host 110 | print 'Post-processing',new_proxy,'...' 111 | self.post_process(new_proxy) 112 | self.send_email(proxy_out, proxy_out_label, new_proxy, region) 113 | 114 | def post_process(self, ip): 115 | """ Post-process a switched-in host """ 116 | 117 | # Sleep a bit before sshing 118 | time.sleep(5) 119 | cmd = post_process_cmd_template % (self.config.user, ip, iptables_restore_cmd) 120 | print 'SSH command 1=>',cmd 121 | os.system(cmd) 122 | cmd = post_process_cmd_template % (self.config.user, ip, squid_restart_cmd) 123 | print 'SSH command 2=>',cmd 124 | os.system(cmd) 125 | 126 | def provision(self, count=8, add=False): 127 | """ Provision an entirely fresh set of proxies after dropping current set """ 128 | 129 | if not add: 130 | self.drop() 131 | 132 | num, idx = 0, 0 133 | 134 | # If we are adding without dropping, start from current count 135 | if add: 136 | start = len(self.config.get_active_proxies()) 137 | else: 138 | start = 0 139 | 140 | for i in range(start, start + count): 141 | 142 | # Do a round-robin on regions 143 | region = self.config.region_ids[idx % len(self.config.region_ids) ] 144 | try: 145 | ip, lid = self.make_new_instance(region) 146 | new_label = self.config.proxy_prefix + str(i+1) 147 | self.update_instance(int(lid), 148 | new_label, 149 | self.config.group) 150 | 151 | num += 1 152 | except Exception, e: 153 | print 'Error creating instance',e 154 | 155 | idx += 1 156 | 157 | print 'Provisioned',num,' proxies.' 158 | # Save latest proxy information 159 | self.write_proxies() 160 | 161 | def write_proxies(self): 162 | """ Write proxies to a file """ 163 | 164 | proxies_list = self.vps_command.get_proxies() 165 | # Randomize it 166 | for i in range(5): 167 | random.shuffle(proxies_list) 168 | 169 | filename = self.config.proxylist 170 | print >> open(filename, 'w'), '\n'.join(proxies_list) 171 | print 'Saved current proxy configuration to {}'.format(filename) 172 | 173 | def test(self): 174 | """ Function to be called in loop for testing """ 175 | 176 | proxy_out_label = '' 177 | region = self.pick_region() 178 | print 'Rotating proxy to new region',region,'...' 179 | # Make a test IP 180 | new_proxy, proxy_id = self.make_new_instance(region, test=True) 181 | proxy_out = self.config.get_proxy_for_rotation(least_used=True, region_switch=True, 182 | input_region=region) 183 | 184 | if proxy_out != None: 185 | print 'Switched out proxy',proxy_out 186 | proxy_out_id = int(self.config.get_proxy_id(proxy_out)) 187 | proxy_out_label = self.get_instance_label(proxy_out_id) 188 | 189 | # Switch in the new proxy 190 | self.config.switch_in_proxy(new_proxy, proxy_id, region) 191 | print 'Switched in new proxy',new_proxy 192 | # Write new HAProxy LB template and reload ha proxy 193 | self.config.write_lb_config(test=True) 194 | self.send_email(proxy_out, proxy_out_label, new_proxy, region) 195 | 196 | def stop(self): 197 | """ Stop the rotator process """ 198 | 199 | try: 200 | os.remove(self.hbf) 201 | # Signal the event 202 | self.alarm.set() 203 | return True 204 | except (IOError, OSError), e: 205 | pass 206 | 207 | return False 208 | 209 | def sighandler(self, signum, stack): 210 | """ Signal handler """ 211 | 212 | # This will be called when you want to stop the daemon 213 | self.stop() 214 | 215 | def run(self): 216 | """ Run as a background process, rotating proxies """ 217 | 218 | # Touch heartbeat file 219 | open(self.hbf,'w').write('') 220 | # Fork 221 | print 'Daemonizing...' 222 | daemonize('rotator.pid',logfile='rotator.log', drop=True) 223 | print 'Proxy rotate daemon started.' 224 | count = 1 225 | 226 | while True: 227 | # Wait on event object till woken up 228 | self.alarm.wait(self.config.frequency) 229 | status = self.alive() 230 | if not status: 231 | print 'Daemon signalled to exit. Quitting ...' 232 | break 233 | 234 | print 'Rotating proxy node, round #%d ...' % count 235 | if self.test_mode: 236 | self.test() 237 | else: 238 | self.rotate() 239 | count += 1 240 | 241 | sys.exit(0) 242 | 243 | def create(self, region=3): 244 | """ Create a new instance for testing """ 245 | 246 | print 'Creating new instance in region',region,'...' 247 | new_proxy = self.make_new_instance(region, verbose=True) 248 | 249 | return new_proxy 250 | 251 | def send_email(self, proxy_out, label, proxy_in, region): 252 | """ Send email upon switching of a proxy """ 253 | 254 | print 'Sending email...' 255 | region = region_dict[region] 256 | content = email_template % locals() 257 | email_config = self.config.get_email_config() 258 | 259 | email_report.email_report(email_config, "%s", content) 260 | 261 | def alive(self): 262 | """ Return whether I should be alive """ 263 | 264 | return os.path.isfile(self.hbf) 265 | 266 | def get_instance_label(self, instance_id): 267 | """ Return instance label given instance id """ 268 | pass 269 | 270 | def update_instance(self, instance_id, label, group=None): 271 | """ Update the meta-data for the instance """ 272 | pass 273 | 274 | def delete_instance(self, instance_id): 275 | """ Delete a given instance given its id """ 276 | pass 277 | 278 | def drop(self): 279 | """ Drop all instances in current configuration (except the LB) """ 280 | pass 281 | 282 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time 3 | import random 4 | import operator 5 | import sys 6 | import os 7 | from utils import enum 8 | 9 | # Configurations 10 | 11 | # Rotation Policies 12 | Policy = enum('ROTATION_RANDOM', 13 | # Least recently used 14 | 'ROTATION_LRU', 15 | # Switch to another region 16 | 'ROTATION_NEW_REGION', 17 | # LRU + New region 18 | 'ROTATION_LRU_NEW_REGION') 19 | 20 | region_dict = {2: 'Dallas', 21 | 3: 'Fremont', 22 | 4: 'Atlanta', 23 | 6: 'Newark', 24 | 7: 'London', 25 | 8: 'Tokyo', 26 | 9: 'Singapore', 27 | 10: 'Frankfurt'} 28 | 29 | 30 | email_template = """ 31 | 32 | I just switched a proxy node in the proxy infrastructure. Details are below. 33 | 34 | In: %(label)s, %(proxy_in)s 35 | Out: %(label)s, %(proxy_out)s 36 | 37 | Region: %(region)s 38 | 39 | -- Proxy Rotator Daemon 40 | 41 | """ 42 | 43 | # Post process command 44 | post_process_cmd_template = """ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s "%s" """ 45 | iptables_restore_cmd = "sudo iptables-restore < /etc/iptables.rules" 46 | squid_restart_cmd = "sudo squid3 -f /etc/squid3/squid.conf" 47 | 48 | 49 | class ProxyConfig(object): 50 | """ Class representing configuration of crawler proxy infrastructure """ 51 | 52 | def __init__(self, cfg='proxy.conf'): 53 | """ Initialize proxy config from the config file """ 54 | 55 | self.parse_config(cfg) 56 | # This is a file with each line of the form 57 | # IPV4 address, datacenter code, instance-id, switch_in timestamp, switch_out timestamp 58 | # E.g: 45.79.91.191, 3, 1446731065, 144673390 59 | try: 60 | proxies = map(lambda x: x.strip().split(','), open(self.proxylist).readlines()) 61 | # Proxy IP to (switch_in, switch_out) timestamp mappings 62 | self.proxy_dict = {} 63 | # Proxy IP to enabled mapping 64 | self.proxy_state = {} 65 | self.process_proxies(proxies) 66 | except (OSError, IOError), e: 67 | print e 68 | sys.exit("Fatal error, proxy list input file " + self.proxylist + " not found!") 69 | except ValueError, e: 70 | print e 71 | print self.proxylist + " is empty or has junk values" 72 | 73 | try: 74 | self.proxy_template = open(self.lb_template).read() 75 | except (OSError, IOError), e: 76 | print e 77 | sys.exit("Fatal error, template config input file " + template_file + " not found!") 78 | 79 | def parse_config(self, cfg): 80 | """ Parse the configuration file and load config """ 81 | 82 | self.config = json.load(open(cfg)) 83 | for key,value in self.config.items(): 84 | # Set attribute locally 85 | setattr(self, key, value) 86 | 87 | # Do some further processing 88 | self.frequency = float(self.frequency)*3600.0 89 | self.policy = eval('Policy.' + self.policy) 90 | 91 | def get_proxy_ips(self): 92 | """ Return all proxy IP addresses as a list """ 93 | 94 | return self.proxy_state.keys() 95 | 96 | def get_active_proxies(self): 97 | """ Return a list of all active proxies as a list """ 98 | 99 | return map(self.proxy_dict.get, filter(self.proxy_state.get, self.proxy_state.keys())) 100 | 101 | def process_proxies(self, proxies): 102 | """ Process the proxy information to create internal dictionaries """ 103 | 104 | # Prepare the proxy region dict 105 | for proxy_ip, region, proxy_id, switch_in, switch_out in proxies: 106 | # If switch_in ==0: put current time 107 | if int(float(switch_in))==0: 108 | switch_in = int(time.time()) 109 | if int(float(switch_out))==0: 110 | switch_out = int(time.time()) 111 | 112 | self.proxy_dict[proxy_ip] = [proxy_ip, int(region), proxy_id, int(float(switch_in)), int(float(switch_out))] 113 | self.proxy_state[proxy_ip] = True 114 | 115 | print 'Processed',len(self.proxy_state),'proxies.' 116 | 117 | def get_proxy_for_rotation(self, 118 | use_random=False, 119 | least_used=False, 120 | region_switch=False, 121 | input_region=3): 122 | """ Return a proxy IP address for rotation using the given settings. The 123 | returned proxy will be replaced with a new proxy. 124 | 125 | @use_random - Means returns a random proxy from the current active list 126 | @least_used - Returns a proxy IP which is the oldest switched out one 127 | so we keep the switching more or less democratic. 128 | @region_switch - Returns a proxy which belongs to a different region 129 | from the new proxy. 130 | @input_region - The region of the new proxy node - defaults to Fremont, CA. 131 | 132 | Note that if use_random is set to true, the other parameters are ignored. 133 | 134 | """ 135 | 136 | active_proxies = self.get_active_proxies() 137 | print 'Active proxies =>',active_proxies 138 | 139 | if use_random: 140 | # Pick a random proxy IP 141 | proxy = random.choice(active_proxies) 142 | print 'Returning proxy =>',proxy 143 | proxy_ip = proxy[0] 144 | 145 | # Remove it from every data structure 146 | self.switch_out_proxy(proxy_ip) 147 | return proxy 148 | 149 | if least_used: 150 | # Pick the oldest switched out proxy i.e one 151 | # with smallest switched out value 152 | proxies_used = sorted(active_proxies, 153 | key=operator.itemgetter(-1)) 154 | 155 | print 'Proxies used =>',proxies_used 156 | 157 | if region_switch: 158 | # Find the one with a different region from input 159 | for proxy, reg, pi, si, so in proxies_used: 160 | if reg != input_region: 161 | print 'Returning proxy',proxy,'from region',reg 162 | self.switch_out_proxy(proxy) 163 | return proxy 164 | 165 | # If all regions are already in use, pick the last used 166 | # proxy anyway 167 | return proxies_used[0][0] 168 | 169 | if region_switch: 170 | # Pick a random proxy not in the input region 171 | proxies = active_proxies 172 | random.shuffle(proxies) 173 | 174 | for proxy, reg, pi, si, so in proxies: 175 | if reg != input_region: 176 | print 'Returning proxy',proxy,'from region',reg 177 | self.switch_out_proxy(proxy) 178 | return proxy 179 | 180 | def __getattr__(self, name): 181 | """ Return from local, else written from config """ 182 | 183 | try: 184 | return self.__dict__[name] 185 | except KeyError: 186 | return self.config.get(name) 187 | 188 | def switch_out_proxy(self, proxy): 189 | """ Switch out a given proxy IP """ 190 | 191 | # Disable it 192 | self.proxy_state[proxy] = False 193 | # Mark its switched out timestamp 194 | self.proxy_dict[proxy][-1] = int(time.time()) 195 | 196 | def switch_in_proxy(self, proxy, proxy_id, region): 197 | """ Switch in a given proxy IP """ 198 | 199 | # Mark its switched out timestamp 200 | self.proxy_dict[proxy] = [proxy, int(region), proxy_id, int(time.time()), int(time.time())] 201 | # Enable it 202 | self.proxy_state[proxy] = True 203 | 204 | def get_active_regions(self): 205 | """ Return unique regions for which proxies are active """ 206 | 207 | regions = set() 208 | for proxy,region,pi,si,so in self.proxy_dict.values(): 209 | if self.proxy_state[proxy]: 210 | regions.add(region) 211 | 212 | return list(regions) 213 | 214 | def write(self, disabled=False): 215 | """ Write current state to an output file """ 216 | 217 | lines = [] 218 | for proxy, reg, pi, si, so in self.proxy_dict.values(): 219 | if disabled or self.proxy_state[proxy]: 220 | lines.append('%s,%s,%s,%s,%s\n' % (proxy, str(reg), str(pi), str(int(si)), str(int(so)))) 221 | 222 | open(self.proxylist,'w').writelines(lines) 223 | 224 | def write_lb_config(self, disabled=False, test=False): 225 | """ Write current proxy configuration into the load balancer config """ 226 | 227 | lines, idx = [], 1 228 | # Shuffle 229 | items = self.proxy_dict.values() 230 | for i in range(10): 231 | random.shuffle(items) 232 | 233 | for proxy, reg, pi, si, so in items: 234 | if self.proxy_state[proxy]: 235 | lines.append('\tserver squid%d %s:8321 check inter 10000 rise 2 fall 5' % (idx, proxy)) 236 | idx += 1 237 | 238 | squid_config = "\n".join(lines) 239 | content = self.proxy_template % locals() 240 | # Write to temp file 241 | tmpfile = '/tmp/.haproxy.cfg' 242 | open(tmpfile,'w').write(content) 243 | 244 | # If running in test mode, don't do this! 245 | if not test: 246 | # Run as sudo 247 | cmd = 'sudo cp %s %s; rm -f %s' % (tmpfile, self.lb_config, tmpfile) 248 | os.system(cmd) 249 | 250 | self.reload_lb() 251 | return True 252 | 253 | def reload_lb(self): 254 | """ Reload the HAProxy load balancer """ 255 | 256 | return (os.system(self.lb_restart) == 0) 257 | 258 | def get_proxy_id(self, proxy): 259 | """ Given proxy return its id """ 260 | 261 | return self.proxy_dict[proxy][2] 262 | 263 | def get_email_config(self): 264 | """ Return email configuration """ 265 | 266 | return self.config['email'] 267 | 268 | 269 | -------------------------------------------------------------------------------- /email_report.py: -------------------------------------------------------------------------------- 1 | import smtplib 2 | import datetime 3 | import sys 4 | import socket 5 | import os 6 | import send_gmail 7 | 8 | def email_report(config, template, content): 9 | """ Email any kind of report to anyone """ 10 | 11 | print "Sending email report..." 12 | timestamp = datetime.datetime.strftime(datetime.datetime.now(), "%d-%b-%Y %I:%M:%S %p") 13 | 14 | data = template % content 15 | 16 | if config.get('send_email', True): 17 | print 'Sending email ...' 18 | from_e, to_e = config.get('from_email'), config.get('to_email') 19 | from_pass = config.get('from_pass') 20 | subject = config.get('email_subject') % (timestamp, socket.gethostname()) 21 | print send_gmail.send_mail(from_e, from_pass, to_e, subject, data) 22 | # print ses_email.send_ses(from_e, subject, data, to_e) 23 | print 'done.' 24 | else: 25 | print 'Not sending email.' 26 | # Simply print 27 | print data 28 | 29 | if __name__ == "__main__": 30 | pass 31 | -------------------------------------------------------------------------------- /fabfile.py: -------------------------------------------------------------------------------- 1 | from fabric.api import run 2 | from fabric.api import hosts, local, settings, abort 3 | from fabric.state import env 4 | 5 | import os 6 | 7 | def process_proxy_host(): 8 | """ Post-process a proxy host """ 9 | 10 | with settings(warn_only=True): 11 | run("sudo iptables-restore < /etc/iptables.rules") 12 | run("sudo squid3 -f /etc/squid3/squid.conf") 13 | 14 | def iptables_apply(): 15 | """ Apply iptables rules from /etc/iptables.rules """ 16 | 17 | with settings(warn_only=True): 18 | run("sudo iptables-restore < /etc/iptables.rules") 19 | 20 | def proxy_iptables(): 21 | """ Apply iptables rules on all proxy nodes """ 22 | 23 | # get proxy list from proxylb 24 | local('scp alpha@proxylb:proxyrotate/proxies.list .') 25 | if os.path.isfile('proxies.list'): 26 | for line in open('proxies.list'): 27 | ip = line.strip().split(',')[0].strip() 28 | env.host_string = ip 29 | env.user = 'alpha' 30 | print 'Restoring iptables rules on',ip,'...' 31 | run('sudo iptables-restore < /etc/iptables.rules') 32 | 33 | 34 | def install_keys(): 35 | """ Install an ssh key to all proxy nodes """ 36 | 37 | # get proxy list from proxylb 38 | local('scp alpha@proxylb:proxyrotate/proxies.list .') 39 | if os.path.isfile('proxies.list'): 40 | for line in open('proxies.list'): 41 | ip = line.strip().split(',')[0].strip() 42 | env.host_string = ip 43 | env.user = 'alpha' 44 | local('scp id_rsa.pub alpha@%s:' % ip) 45 | run('cat id_rsa.pub >> .ssh/authorized_keys') 46 | 47 | 48 | -------------------------------------------------------------------------------- /haproxy.cfg.template: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | chroot /var/lib/haproxy 5 | stats socket /run/haproxy/admin.sock mode 660 level admin 6 | stats timeout 30s 7 | user haproxy 8 | group haproxy 9 | daemon 10 | 11 | defaults 12 | log global 13 | mode http 14 | option httplog 15 | option dontlognull 16 | timeout connect 5000 17 | timeout client 50000 18 | timeout server 50000 19 | errorfile 400 /etc/haproxy/errors/400.http 20 | errorfile 403 /etc/haproxy/errors/403.http 21 | errorfile 408 /etc/haproxy/errors/408.http 22 | errorfile 500 /etc/haproxy/errors/500.http 23 | errorfile 502 /etc/haproxy/errors/502.http 24 | errorfile 503 /etc/haproxy/errors/503.http 25 | errorfile 504 /etc/haproxy/errors/504.http 26 | 27 | # reverse proxy-squid 28 | frontend localproxy 29 | bind *:5729 30 | mode http 31 | option http-keep-alive 32 | default_backend rotateproxy 33 | 34 | backend rotateproxy 35 | mode http 36 | balance roundrobin 37 | cookie proxycookie insert indirect nocache 38 | 39 | # Squid-farm configuration 40 | %(squid_config)s 41 | 42 | 43 | listen admin_stats 44 | bind 0.0.0.0:8080 45 | mode http 46 | stats uri /stats 47 | stats realm Global\ statistics 48 | stats auth ubuntu:adG3Hj09l<} 49 | option contstats 50 | -------------------------------------------------------------------------------- /iptables.rules: -------------------------------------------------------------------------------- 1 | *filter 2 | # Template iptables rules for Proxy nodes. 3 | # Allow all loopback (lo0) traffic and drop all traffic to 127/8 that doesn't use lo0 4 | -A INPUT -i lo -j ACCEPT 5 | -A INPUT -d 127.0.0.0/8 -j REJECT 6 | 7 | # Accept all established inbound connections 8 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 9 | 10 | # Allow ping 11 | # -A INPUT -p icmp --icmp-type echo-request -j ACCEPT 12 | 13 | # Allow SSH connections 14 | -A INPUT -p tcp -m tcp --dport 22 -m state --state NEW -j ACCEPT 15 | 16 | # Prevent DOS attack 17 | -A INPUT -s myloadbalancerip -p tcp --dport myproxyport -m limit --limit 500/minute --limit-burst 800 -j ACCEPT 18 | 19 | # Drop all other inbound - default deny unless explicitly allowed policy 20 | -A FORWARD -j DROP 21 | 22 | # Log dropped packets 23 | -N LOGGING 24 | -A INPUT -j LOGGING 25 | -A LOGGING -m limit --limit 2/min -j LOG --log-prefix "iptables packet dropped: " --log-level 7 26 | -A LOGGING -j DROP 27 | 28 | 29 | COMMIT 30 | 31 | -------------------------------------------------------------------------------- /linode.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import functools 4 | from base import ProxyRotator 5 | from utils import randpass 6 | 7 | class LinodeCommand(object): 8 | """ Class encapsulating linode CLI commands """ 9 | 10 | def __init__(self, binary='linode', verbose=False, config=None): 11 | self.binary = binary 12 | self.verbose = verbose 13 | self.cmd_template = {'create': 'create -d %d -p %d -o %d -i %d -l %s -r %s', 14 | 'delete': 'delete -l %d', 15 | 'list_proxies': 'find -g %s -s %s' % (config.group, config.proxylb), 16 | 'info': 'info -l %d', 17 | 'update': 'update -l %d -L %s -g %s' 18 | } 19 | # Dynamically create command methods 20 | self.dyn_create() 21 | 22 | def _run(self, command, *args): 23 | """ Run a command and return the output """ 24 | 25 | template = self.cmd_template.get(command) 26 | if template == None: 27 | print 'No such command configured =>',command 28 | return -1 29 | 30 | cmd = ' '.join((self.binary, template % args)) 31 | if self.verbose: print 'Command is',cmd 32 | return os.popen(cmd).read() 33 | 34 | def dyn_create(self): 35 | """ Dynamically create linode methods """ 36 | 37 | for cmd in self.cmd_template: 38 | method_name = cmd 39 | method = functools.partial(self._run, cmd) 40 | if self.verbose: print 'Dyn-creating method',method_name,'...' 41 | setattr(self, method_name, method) 42 | 43 | def get_label(self, linode_id): 44 | """ Return the label, given the linode id """ 45 | 46 | data = self.info(linode_id) 47 | return data.split('\n')[0].split(':')[-1].strip() 48 | 49 | def get_proxies(self): 50 | """ Return all proxies as a list """ 51 | 52 | return self.list_proxies().strip().split('\n') 53 | 54 | class LinodeProxyRotator(ProxyRotator): 55 | """ Linode VPS implementation of ProxyRotator """ 56 | 57 | def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None): 58 | super(LinodeProxyRotator, self).__init__(cfg, test_mode, rotate, region) 59 | # Linode creation class 60 | self.linode_command = LinodeCommand(verbose=True, config=self.config) 61 | self.vps_command = self.linode_command 62 | 63 | def get_instance_label(self, instance_id): 64 | """ Return instance label given instance id """ 65 | return self.linode_command.get_label(instance_id) 66 | 67 | def delete_instance(self, instance_id): 68 | """ Delete instance by id """ 69 | return self.linode_command.delete(proxy_out_id) 70 | 71 | def make_new_instance(self, region, test=False, verbose=False): 72 | """ Make a new instance in the given region """ 73 | 74 | # If calling as test, make up an ip 75 | if test: 76 | return '.'.join(map(lambda x: str(random.randrange(20, 100)), range(4))), random.randrange(10000, 77 | 50000) 78 | 79 | tup = (region, 80 | self.config.plan_id, 81 | self.config.os_id, 82 | self.config.image_id, 83 | 'proxy_disk', 84 | randpass()) 85 | 86 | print 'Making new linode in region',region,'...' 87 | data = self.linode_command.create(*tup) 88 | 89 | # data = os.popen(cmd).read() 90 | if verbose: 91 | print data 92 | # The IP is the last line of the command 93 | ip = data.strip().split('\n')[-1].strip().split()[-1].strip() 94 | # Proxy ID 95 | pid = data.strip().split('\n')[-3].strip().split()[-1].strip() 96 | print 'I.P address of new linode is',ip 97 | print 'ID of new linode is',pid 98 | # Post process the host 99 | print 'Post-processing',ip,'...' 100 | self.post_process(ip) 101 | 102 | return ip, pid 103 | 104 | def update_instance(self, instance_id, label, group=None): 105 | """ Update meta-data for a new instance """ 106 | 107 | # Updates label (name) and group information 108 | ret = self.linode_command.update(int(instance_id), 109 | label, 110 | group) 111 | return ret 112 | 113 | 114 | def drop(self): 115 | """ Drop all the proxies in current configuration (except the LB) """ 116 | 117 | print 'Dropping all proxies ...' 118 | proxies = self.linode_command.get_proxies() 119 | 120 | for item in proxies: 121 | if item.strip() == "": continue 122 | ip,dc,lid,si,so = item.split(',') 123 | print '\tDropping linode',lid,'with IP',ip,'from dc',dc,'...' 124 | self.linode_command.delete(int(lid)) 125 | 126 | -------------------------------------------------------------------------------- /proxy.conf: -------------------------------------------------------------------------------- 1 | { 2 | "group": "ynodes", 3 | "proxylb": "yproxylb", 4 | "proxy_prefix": "ynode", 5 | "policy": "ROTATION_LRU_NEW_REGION", 6 | "rotate": 1, 7 | "frequency": 72, 8 | "image_id": 0, 9 | "plan_id": 1, 10 | "os_id": 140, 11 | "region_ids": [2,3,4,6,7,9,10], 12 | "lb_template": "haproxy.cfg.template", 13 | "lb_config": "/etc/haproxy/haproxy.cfg", 14 | "lb_restart": "sudo service haproxy restart", 15 | "lb_stop": "sudo service haproxy stop", 16 | "lb_start": "sudo service haproxy start", 17 | "proxylist": "proxies.list", 18 | "daemon": true, 19 | "user": "ubuntu", 20 | "email" : { 21 | "send_email": true, 22 | "from_email": "yegiiproxy@gmail.com", 23 | "from_pass": "", 24 | "to_email": ["anandpillai@letterboxes.org"], 25 | "email_subject": "Linode proxy switch report: %s from %s" 26 | }, 27 | 28 | "vps_provider":"aws", 29 | "aws_image_id":"ami-f104ec8c", 30 | "aws_instance_type":"t2.micro", 31 | "aws_key_name":"hamon", 32 | "aws_security_groups":["sg-5517b522"], 33 | "aws_subnet_id":"subnet-5dbc8316" 34 | } 35 | -------------------------------------------------------------------------------- /proxy_monitor_restart.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Script to parse haproxy.cfg file and restart dead squid instances 4 | 5 | """ 6 | 7 | import re 8 | import os 9 | import time 10 | import utils 11 | 12 | server_re = re.compile(r'server\s+([a-zA-Z0-9]+)\s+(\d+\.\d+\.\d+\.\d+)\:(\d+)*') 13 | network_test_cmd = 'nc %s %d -w 5 -zv 2>/dev/null' 14 | squid_restart_cmd = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s "sudo squid3 -f /etc/squid3/squid.conf"' 15 | 16 | def parse_config(filename='/etc/haproxy/haproxy.cfg'): 17 | """ Parse HAproxy configuration file """ 18 | 19 | restarted = {} 20 | 21 | for line in open(filename).readlines(): 22 | line = line.strip() 23 | if line == '': continue 24 | 25 | if line.startswith('server'): 26 | # Get the server name out 27 | match = server_re.match(line) 28 | server_name, ip_address, port = match.groups() 29 | # Test for access via nc 30 | print ip_address, port 31 | cmd = network_test_cmd % (ip_address, int(port)) 32 | if os.system(cmd) != 0: 33 | # This squid instance is down 34 | cmd = squid_restart_cmd % ip_address 35 | print 'Restarting squid on',ip_address,'...' 36 | if os.system(cmd) == 0: 37 | restarted[ip_address] = 1 38 | 39 | print 'Restarted',len(restarted),'squid instances.' 40 | 41 | def main(): 42 | 43 | utils.daemonize('monitor.pid', logfile='monitor.log') 44 | 45 | while True: 46 | parse_config() 47 | time.sleep(300) 48 | 49 | if __name__ == "__main__": 50 | import sys 51 | if len(sys.argv)>1: 52 | parse_config(sys.argv[1]) 53 | else: 54 | main() 55 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | https://github.com/anvetsu/pylinode/archive/master.zip 2 | -------------------------------------------------------------------------------- /rotate_proxies.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to auto-rotate and configure squid proxy farms using multiple 3 | VPS backends using their APIs. 4 | 5 | """ 6 | 7 | import argparse 8 | import os 9 | import sys 10 | import signal 11 | 12 | def process_args(rotator, args): 13 | 14 | if args.test: 15 | print 'Testing the daemon' 16 | rotator.test() 17 | sys.exit(0) 18 | 19 | if args.add != 0: 20 | print 'Adding new set of',args.num,'proxies ...' 21 | rotator.provision(count = int(args.num), add=True) 22 | sys.exit(0) 23 | 24 | if args.provision != 0: 25 | print 'Provisioning fresh set of',args.num,'proxies ...' 26 | rotator.provision(count = int(args.num)) 27 | sys.exit(0) 28 | 29 | if args.create: 30 | print 'Creating new instance...' 31 | rotator.create(int(args.region)) 32 | sys.exit(0) 33 | 34 | if args.drop: 35 | print 'Dropping current proxies ...' 36 | rotator.drop() 37 | sys.exit(0) 38 | 39 | if args.writeconfig: 40 | # Load current proxies config and write proxies.list file 41 | rotator.write_proxies() 42 | print 'Saved current proxy configuration to proxies.list' 43 | sys.exit(0) 44 | 45 | if args.writelbconfig: 46 | # Load current proxies config and write proxies.list file 47 | rotator.config.write_lb_config() 48 | print 'Wrote HAProxy configuration' 49 | sys.exit(0) 50 | 51 | 52 | if args.stop or args.restart: 53 | pidfile = 'rotator.pid' 54 | if os.path.isfile(pidfile): 55 | print 'Stopping proxy rotator daemon ...', 56 | # Signal the running daemon with SIGTERM 57 | try: 58 | os.kill(int(open(pidfile).read().strip()), signal.SIGTERM) 59 | print 'stopped.' 60 | except OSError, e: 61 | print e 62 | print 'Unable to stop, possibly daemon not running.' 63 | 64 | if args.restart: 65 | print 'Starting...' 66 | os.system('python rotate_proxies.py') 67 | 68 | sys.exit(1) 69 | 70 | if __name__ == "__main__": 71 | parser = argparse.ArgumentParser(prog='rotate_proxies') 72 | parser.add_argument('-C','--conf',help='Use the given configuration file', default='proxy.conf') 73 | parser.add_argument('-s','--stop',help='Stop the currently running daemon', action='store_true') 74 | parser.add_argument('-t','--test',help='Run the test function to test the daemon', action='store_true') 75 | parser.add_argument('-c','--create',help='Create a proxy instance', action='store_true',default=False) 76 | parser.add_argument('-r','--region',help='Specify a region when creating an instance', default=3, type=int) 77 | parser.add_argument('-R','--rotate',help='Rotate a node immediately and go to sleep', default=False, 78 | action='store_true') 79 | parser.add_argument('-D','--drop',help='Drop the current configuration of proxies (except LB)', 80 | default=False,action='store_true') 81 | parser.add_argument('-P','--provision',help='Provision a fresh set of proxy instances',default=False, 82 | action='store_true') 83 | parser.add_argument('-A','--add',help='Add a new set of instances to existing farm',default=False, 84 | action='store_true') 85 | parser.add_argument('-N','--num',help='Number of new instances to provision or add (use with -P or -A)',type=int, 86 | default=10) 87 | 88 | parser.add_argument('-w','--writeconfig',help='Load current proxies configuration and write a fresh proxies.list config file', action='store_true') 89 | parser.add_argument('-W','--writelbconfig',help='Load current proxies configuration and write a fresh HAProxy config to /etc/haproxy/haproxy.cfg', action='store_true') 90 | parser.add_argument('--restart',help='Restart the daemon',action='store_true') 91 | parser.add_argument('-T','--target',help='Target VPS platform (linode, aws)',default='linode') 92 | 93 | args = parser.parse_args() 94 | # print args 95 | 96 | if args.target == 'linode': 97 | linode = __import__('linode') 98 | rotator = linode.LinodeProxyRotator(cfg=args.conf, 99 | test_mode = args.test, 100 | rotate=args.rotate) 101 | elif args.target == 'aws': 102 | aws = __import__('aws') 103 | rotator = aws.AwsProxyRotator(cfg=args.conf, 104 | test_mode = args.test, 105 | rotate=args.rotate) 106 | 107 | 108 | process_args(rotator, args) 109 | rotator.run() 110 | 111 | -------------------------------------------------------------------------------- /scripts/get_linode_image_id.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | def query_linode_image_id(): 4 | """ Query for linode image id and write to proxy.conf """ 5 | 6 | image_id = raw_input('Enter linode image id: ') 7 | try: 8 | im_id = int(image_id) 9 | cfg = json.load(open('proxy.conf')) 10 | cfg['image_id'] = im_id 11 | json.dump(cfg, open('proxy.conf','w'), indent=4) 12 | except ValueError, e: 13 | print 'Invalid image id=>',image_id 14 | except Exception, e: 15 | print 'Error updating proxy.conf =>',e 16 | 17 | if __name__ == "__main__": 18 | query_linode_image_id() 19 | -------------------------------------------------------------------------------- /send_gmail.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Send email via gmail SMTP 4 | 5 | """ 6 | import smtplib 7 | 8 | from email.MIMEMultipart import MIMEMultipart 9 | from email.MIMEText import MIMEText 10 | 11 | 12 | def create_message(user, recipients, subject, body): 13 | msg = MIMEMultipart() 14 | msg['From'] = user 15 | msg['To'] = ', '.join(recipients) 16 | msg['Subject'] = subject 17 | msg.attach(MIMEText(body)) 18 | return msg 19 | 20 | 21 | def send_mail(user, password, recipients, subject, body): 22 | msg = create_message(user, recipients, subject, body) 23 | 24 | server = smtplib.SMTP('smtp.gmail.com', 587) 25 | server.ehlo() 26 | server.starttls() 27 | server.ehlo() 28 | server.login(user, password) 29 | server.sendmail(user, recipients, msg.as_string()) 30 | server.close() 31 | print('Sent email to %s' % (', '.join(recipients))) 32 | -------------------------------------------------------------------------------- /ses_email.py: -------------------------------------------------------------------------------- 1 | import boto 2 | from email.mime.application import MIMEApplication 3 | from email.mime.multipart import MIMEMultipart 4 | from email.mime.text import MIMEText 5 | 6 | def send_ses(fromaddr, 7 | subject, 8 | body, 9 | recipient, 10 | attachment=None, 11 | filename=''): 12 | """Send an email via the Amazon SES service. 13 | 14 | Example: 15 | send_ses('me@example.com, 'greetings', "Hi!", 'you@example.com) 16 | 17 | Return: 18 | If 'ErrorResponse' appears in the return message from SES, 19 | return the message, otherwise return an empty '' string. 20 | """ 21 | msg = MIMEMultipart() 22 | msg['Subject'] = subject 23 | msg['From'] = fromaddr 24 | msg['To'] = recipient 25 | msg.attach(MIMEText(body)) 26 | 27 | if attachment: 28 | part = MIMEApplication(attachment) 29 | part.add_header('Content-Disposition', 'attachment', filename=filename) 30 | msg.attach(part) 31 | conn = boto.connect_ses() 32 | result = conn.send_raw_email(msg.as_string()) 33 | return result if 'ErrorResponse' in result else '' 34 | 35 | -------------------------------------------------------------------------------- /squid.conf: -------------------------------------------------------------------------------- 1 | # Standard squid configuration for proxy nodes - can be edited to suit your needs 2 | # though it is advisable to keep most of the configuration (except the paths) unchanged. 3 | 4 | # Header control 5 | via off 6 | forwarded_for off 7 | 8 | request_header_access From deny all 9 | request_header_access Server deny all 10 | request_header_access WWW-Authenticate deny all 11 | request_header_access Link deny all 12 | request_header_access Cache-Control deny all 13 | request_header_access Proxy-Connection deny all 14 | request_header_access X-Cache deny all 15 | request_header_access X-Cache-Lookup deny all 16 | request_header_access Via deny all 17 | request_header_access X-Forwarded-For deny all 18 | request_header_access Pragma deny all 19 | # request_header_access Keep-Alive deny all 20 | 21 | # disk and memory cache settings 22 | cache_dir aufs /usr/local/squid/var/cache 500 16 256 23 | maximum_object_size 4096 KB 24 | 25 | 26 | # store coredumps in the first cache dir 27 | coredump_dir /usr/local/squid/var/cache #change your cache location 28 | 29 | 30 | # the hostname squid displays in error messages 31 | visible_hostname localhost 32 | 33 | 34 | # log & process ID file details 35 | #change it according to your path 36 | cache_access_log /usr/local/squid/var/logs/access.log 37 | cache_log /usr/local/squid/var/logs/cache.log 38 | #cache_store_log /Users/newscred/Library/Logs/squid/squid-store 39 | pid_filename /usr/local/squid/var/run/squid.pid 40 | 41 | 42 | # Squid listening port 43 | http_port 8321 44 | 45 | auth_param basic program /usr/lib/squid3/basic_ncsa_auth /etc/squid3/squid_passwd 46 | 47 | # Access Control lists 48 | acl localhost src 127.0.0.1/32 49 | acl to_localhost dst 127.0.0.0/8 0.0.0.0/32 50 | # acl manager proto cache_object 51 | acl SSL_ports port 443 52 | acl Safe_ports port 80 # http 53 | acl Safe_ports port 21 # ftp 54 | acl Safe_ports port 443 # https 55 | acl Safe_ports port 70 # gopher 56 | acl Safe_ports port 210 # wais 57 | acl Safe_ports port 1025-65535 # unregistered ports 58 | acl Safe_ports port 280 # http-mgmt 59 | acl Safe_ports port 488 # gss-http 60 | acl Safe_ports port 591 # filemaker 61 | acl Safe_ports port 777 # multiling http 62 | acl CONNECT method CONNECT 63 | 64 | acl myfetchers proxy_auth REQUIRED 65 | acl local src 127.0.0.1 66 | 67 | acl godirect dstdomain google.com 68 | http_access allow myfetchers 69 | http_access allow local 70 | http_access deny all 71 | 72 | # logformat squid %ts.%03tu %6tr %>a %Ss/%03Hs %a %Ss/%03Hs %ha %Sh/% 0: 64 | sys.exit(0) 65 | except OSError, e: 66 | print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror) 67 | sys.exit(1) 68 | 69 | # Do not prevent unmounting... 70 | os.setsid() 71 | os.umask(0) 72 | 73 | # do second fork 74 | try: 75 | pid = os.fork() 76 | if pid > 0: 77 | # exit from second parent, print eventual PID before 78 | #print "Daemon PID %d" % pid 79 | open(pidfile,'w').write("%d"%pid) 80 | sys.exit(0) 81 | except OSError, e: 82 | print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror) 83 | sys.exit(1) 84 | 85 | # Drop privileges to given user by default 86 | if drop: 87 | drop_privileges(user, user) 88 | 89 | # Redirect stdout/stderr to log file 90 | if logfile != None: 91 | log=Log(open(logfile,'a')) 92 | sys.stdout.close() 93 | sys.stderr.close() 94 | sys.stdin.close() 95 | sys.stdout=sys.stderr=log 96 | 97 | if __name__ == "__main__": 98 | pass 99 | 100 | 101 | --------------------------------------------------------------------------------