├── fsf ├── __init__.py ├── fsf.py ├── fsf.py~ └── test_fsf.py ├── optim ├── __init__.py ├── test_optimMinPow2x2.py ├── optimMinPow2x2DTX.py~ ├── optimMinPow2x2DTX.py ├── test_optim.py ├── optimMinPow2x2.py~ ├── optimMinPow2x2.py └── optimMinPow.py ├── utils ├── __init__.py ├── test_utils.py ├── utils.py └── utils.py~ ├── world ├── __init__.py ├── README ├── loadprecomputedworld.py ├── physicalentity.py ├── basestation.py ├── pathloss.py ├── hexagon.py └── hexfuns.py ├── configure ├── __init__.py ├── settings.cfg ├── phy.py ├── createconfigfile.py~ ├── createconfigfile.py └── wconfig.py ├── plotting ├── __init__.py ├── README ├── plot_CDF_from_file.py ├── plotarray.py ├── sinr_analysis_plot_ICC2013.py ├── generate_network_figure.py ├── e_per_bit_analysis_plot_ICC2013.py ├── sum_rate_analysis_plot_ICC2013.py ├── plot_delivered_individual_seqDTX.py ├── plot_miss_rate_analysis_seqDTX.py ├── plot_percentage_satisfied_over_target_rate_seqDTX.py ├── plot_power_cons_per_iter_seqDTX.py ├── plot_variance_of_user_rate_over_target_user_rate.py ├── plot_average_mobile_delivered_rate_over_target_rate.py ├── power_consumption_over_target_sum_rate_seqDTX.py ├── JSACplot.py ├── delivered_rate_over_target_sum_rate_seqDTX.py ├── plot_percentage_satisfieder_over_iters_seqDTX.py ├── convergence_analysis_plot_ICC2013.py └── channelplotter.py ├── quantmap ├── __init__.py ├── test_quantmap.py ├── quantmap.py~ └── quantmap.py ├── results ├── __init__.py ├── README ├── resultshandler.py ├── rename_folders.py ├── collect_delivered_per_mobile_distribution_over_iterations.py ├── collect_average_user_rate_over_target_user_rate.py ├── collect_variance_of_user_rate_over_target_user_rate.py ├── collect_delivered_rate_over_iterations_seqDTX.py ├── collect_percentage_over_satifsfied_users_over_delta_sumrate.py └── collect_percentage_over_satifsfied_users_over_target_sumrate.py ├── scripts ├── README ├── SINRcdfUniform.py └── precomputeworld.py ├── iwf ├── __init__.py ├── test_iwf.py ├── iwf.py~ └── iwf.py ├── raps ├── __init__.py └── test_pf.py ├── rcg ├── __init__.py ├── test_rcg.py ├── rcg.py~ └── rcg.py ├── testsuite.py ├── README.md └── documentation └── structure.txt /fsf/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /optim/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /world/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /configure/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plotting/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /quantmap/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /results/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /plotting/README: -------------------------------------------------------------------------------- 1 | This folder contains individual scripts for very specific plots reading from very specific csv files for publication. I have left them here for salvaging. The required data files for plotting are generated by scripts in the /results folder. 2 | -------------------------------------------------------------------------------- /scripts/README: -------------------------------------------------------------------------------- 1 | This folder contains scripts that run simulations with certain configuration files and store output in a certain way. You usually put together your desired simulation in one of these files and then execute that in parallel until you have enough data. 2 | -------------------------------------------------------------------------------- /results/README: -------------------------------------------------------------------------------- 1 | This folder contains a range of data collection scripts. Since parallely executed simulations generate a large number of results files, those need to be collected, processed and arranged for plotting. These scripts take care of that. If you need help figuring it out, contact me at h.holtkamp@gmail.com 2 | -------------------------------------------------------------------------------- /iwf/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' All files related to inverse water-filling 4 | 5 | File: __init__.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | -------------------------------------------------------------------------------- /world/README: -------------------------------------------------------------------------------- 1 | This folder contains the world module which puts together a hexagonally arranged map of base stations and distributes mobiles on it. I also played around with pre-computing the world map and reusing a map exactly to have comparable and repeatable results. But I never got around to fully separating those parameters affect the world, wconfig, and those that are modified specifically. 2 | -------------------------------------------------------------------------------- /raps/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' All files related to resource allocation using power control and sleep. 4 | 5 | File: __init__.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | -------------------------------------------------------------------------------- /rcg/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' All files related to Kivanc Rate-Craving-Greedy subcarrier allocation algorithm. 4 | 5 | File: __init__.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | -------------------------------------------------------------------------------- /configure/settings.cfg: -------------------------------------------------------------------------------- 1 | [General] 2 | tiers = 3 3 | consideredtiers = 1 4 | intersitedistance = 500 5 | userspercell = 10 6 | numcenterusers = 10 7 | arrivalrate = 0.12 8 | timestep = 1 9 | sectorsperbs = 3 10 | lnssd = 8 11 | forbiddendistance = 35 12 | iterations = 10 13 | mobilevelocity = 0 14 | enablefrequencyselectivefading = True 15 | numtimeslots = 10 16 | numfreqchunks = 50 17 | centerfrequency = 2000000000.0 18 | simulationtime = 0.1 19 | systembandwidth = 10000000.0 20 | temperature = 290 21 | boltzmannconstant = 4e-23 22 | p0 = 200 23 | m = 3.75 24 | ps = 90 25 | 26 | -------------------------------------------------------------------------------- /testsuite.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Project test suite. Numpy and Python 2.7 required. 4 | 5 | File: testsuite.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import unittest 17 | 18 | # discover() requires Python 2.7 with numpy 19 | suite = unittest.TestLoader().discover('.') # search from here 20 | unittest.TextTestRunner(verbosity=2).run(suite) 21 | -------------------------------------------------------------------------------- /world/loadprecomputedworld.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Load precomputed world from pickle file 4 | 5 | File: loadprecomputedworld.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import sys, os 17 | import cPickle 18 | import pprint 19 | import world 20 | 21 | def load(filename): 22 | """Load a world from a pickle file""" 23 | 24 | pkl_file = open(filename, 'rb') 25 | while True: 26 | try: 27 | data1 = cPickle.load(pkl_file) 28 | pprint.pprint(data1) 29 | except EOFError: 30 | break 31 | 32 | pkl_file.close() 33 | return data1 34 | 35 | 36 | if __name__ == '__main__': 37 | load(sys.argv[1]) 38 | -------------------------------------------------------------------------------- /utils/test_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the utils module 4 | 5 | File: test_utils.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import unittest 17 | import numpy as np 18 | import utils 19 | import scipy 20 | 21 | class TestSequenceFunctions(unittest.TestCase): 22 | 23 | def setUp(self): 24 | pass 25 | 26 | def test_ergMIMOcapacity(self): 27 | """Test ergodic MIMO capacity""" 28 | CSI = np.array([[1.-1j,-1.],[-1.,1.]]) 29 | CSI = scipy.dot(CSI, CSI.conj().T) 30 | SNRrx = 1. 31 | cap = utils.ergMIMOCapacityCDITCSIR(CSI, SNRrx) 32 | cap_solution = 1.9068905 33 | np.testing.assert_almost_equal(cap, cap_solution, decimal=5) 34 | 35 | if __name__ == '__main__': 36 | unittest.main() 37 | -------------------------------------------------------------------------------- /world/physicalentity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | This class provides all features for a physical object that has a position, 5 | shape, etc. 6 | 7 | File: physicalentity.py 8 | ''' 9 | 10 | __author__ = "Hauke Holtkamp" 11 | __credits__ = "Hauke Holtkamp" 12 | __license__ = "unknown" 13 | __version__ = "unknown" 14 | __maintainer__ = "Hauke Holtkamp" 15 | __email__ = "h.holtkamp@gmail.com" 16 | __status__ = "Development" 17 | 18 | 19 | 20 | from numpy import * 21 | 22 | class PhysicalEntity(object): 23 | "This class provides basic physical properties" 24 | 25 | def __init__(self, position, velocity=0): 26 | 'Must have position. Stored as numpy array' 27 | self._position = array(position) # 2D 28 | self.velocity = velocity # meters per second 29 | 30 | @property 31 | def position(self): 32 | """The object's physical position.""" 33 | return self._position 34 | 35 | @position.setter 36 | def position(self, value): 37 | self._position = value 38 | 39 | if __name__ == '__main__': 40 | 41 | a = PhysicalEntity(1) 42 | b = PhysicalEntity([0,0]) 43 | c = PhysicalEntity(array([1,1])) 44 | d = PhysicalEntity(1,1) 45 | print a.position, b.position, c.position, d.velocity 46 | -------------------------------------------------------------------------------- /plotting/plot_CDF_from_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Plot a cdf from a csv file 4 | 5 | File: plot_CDF_from_file.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | def plot_cdf_from_file(filename): 17 | """Open file, store cdf to .pdf and .png""" 18 | 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | import pylab as P 22 | data = np.genfromtxt(filename, delimiter=',') 23 | 24 | # SINR data is best presented in dB 25 | from utils import utils 26 | data = utils.WTodB(data) 27 | 28 | import cdf_plot 29 | label = [ "Iteration %d" %i for i in np.arange(data.shape[0])+1] 30 | cdf_plot.cdf_plot(data, '-', label=label) 31 | # plt.xlabel(xlabel) 32 | # plt.ylabel(ylabel) 33 | # plt.title(title) 34 | P.arrow( 0, 50, 40, 0, fc="k", ec="k", 35 | head_width=3, head_length=5 ) 36 | plt.savefig(filename+'.pdf', format='pdf') 37 | plt.savefig(filename+'.png', format='png') 38 | 39 | 40 | 41 | if __name__ == '__main__': 42 | import sys 43 | filename = sys.argv[1] 44 | plot_cdf_from_file(filename) 45 | -------------------------------------------------------------------------------- /plotting/plotarray.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Read from csv, plot one line for each row. The x-axis is always arange(len(data)) Save as pdf and png. 4 | 5 | File: plotArray.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | def plot_arr(filename, title, xlabel, ylabel): 17 | """Read file, create MPL plot with tile, and axis labels.""" 18 | 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | import matplotlib.mlab as mlab 22 | data = mlab.csv2rec(filename, delimiter=',') 23 | 24 | x = np.arange(len(data[0])) 25 | color = 'b' 26 | for i in np.arange(data.shape[0]): 27 | y = np.around(data[i].tolist(), decimals=2) # I don't get recarrays. This is a workaround. 28 | if i > 9: 29 | color = 'g' 30 | plt.plot(x, y, color) 31 | plt.xlabel(xlabel) 32 | plt.ylabel(ylabel) 33 | plt.title(title) 34 | plt.savefig(filename+'.pdf', format='pdf') 35 | plt.savefig(filename+'.png', format='png') 36 | 37 | if __name__ == '__main__': 38 | import sys 39 | filename = sys.argv[1] 40 | plot_arr(filename, 'RAPS power consumption', 'Iterations', 'Power consumption in Watt') 41 | -------------------------------------------------------------------------------- /plotting/sinr_analysis_plot_ICC2013.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Plot a cdf from a csv file 4 | 5 | File: plot_CDF_from_file.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | def plot_cdf_from_file(filename): 17 | """Open file, store cdf to .pdf and .png""" 18 | 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | data = np.genfromtxt(filename, delimiter=',') 22 | 23 | # convert zeros to nans and clear empty rows 24 | data[np.where(data==0)] = np.nan 25 | data = data[~np.isnan(data).all(1)] 26 | if not data.size: 27 | print 'No data in ' + str(filename) 28 | 29 | # SINR data is best presented in dB 30 | from utils import utils 31 | data = utils.WTodB(data) 32 | 33 | import cdf_plot 34 | label = [ "Iteration %d" %i for i in np.arange(data.shape[0])+1] 35 | cdf_plot.cdf_plot(data, '-', label=label) 36 | # plt.xlabel(xlabel) 37 | # plt.ylabel(ylabel) 38 | # plt.title(title) 39 | plt.savefig(filename+'.pdf', format='pdf') 40 | plt.savefig(filename+'.png', format='png') 41 | 42 | 43 | 44 | if __name__ == '__main__': 45 | import sys 46 | filename = sys.argv[1] 47 | plot_cdf_from_file(filename) 48 | -------------------------------------------------------------------------------- /plotting/generate_network_figure.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the figure of a large network 4 | 5 | File: generate_network_figure.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from world import world 17 | from configure import phy, wconfig 18 | from plotting import networkplotter 19 | import os 20 | 21 | outpath = 'out/network_plot/' 22 | if not os.path.exists(outpath): 23 | os.makedirs(outpath) 24 | 25 | def main(): 26 | """Generate world and figure""" 27 | 28 | configPath = 'configure/settings_network_plot.cfg' 29 | wconf = wconfig.Wconfig(configPath) 30 | phy_ = phy.PHY(configPath) 31 | wconf.enablefrequencyselectivefading = False 32 | 33 | wrld = world.World(wconf, phy_) 34 | wrld.associatePathlosses() 35 | wrld.calculateSINRs() 36 | 37 | networkplotter.NetworkPlotter().plotAssociatedMobiles(wrld, outpath+'assocation_plot') 38 | networkplotter.NetworkPlotter().plotBasicWorld(wrld, outpath+'basic_plot') 39 | networkplotter.NetworkPlotter().plotAssociatedConsideredMobiles(wrld, outpath+'association_considered') 40 | networkplotter.NetworkPlotter().plotConsideredWorld(wrld, outpath+'considered') 41 | 42 | if __name__ == '__main__': 43 | main() 44 | -------------------------------------------------------------------------------- /configure/phy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Holds PHY layer parameters 4 | 5 | File: phy.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import ConfigParser 17 | import sys 18 | 19 | class PHY(): 20 | """ Holds PHY layer parameters.""" 21 | def __init__(self, pathToSettingsFile): 22 | try: 23 | open(pathToSettingsFile) 24 | except IOError: 25 | print pathToSettingsFile + ' not found. Try running createconfig.py. Aborting...' 26 | sys.exit(0) 27 | config = ConfigParser.RawConfigParser() 28 | config.read(pathToSettingsFile) 29 | 30 | # getfloat() raises an exception if the value is not a float 31 | # getint() and getboolean() also do this for their respective types 32 | 33 | self.numTimeslots = config.getint('General', 'numTimeslots') 34 | self.numFreqChunks = config.getint('General', 'numFreqChunks') 35 | self.centerFrequency = config.getfloat('General', 'centerFrequency') 36 | self.simulationTime = config.getfloat('General', 'simulationTime') 37 | self.systemBandwidth = config.getfloat('General', 'systemBandwidth') 38 | self.iterations = config.getint('General', 'iterations') 39 | self.pMax = config.getfloat('General', 'pmax') 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | pyltesim 2 | ======== 3 | 4 | Modules for simulating an LTE cellular network in python. 5 | 6 | Features 7 | ======== 8 | * Hexagonal base station distribution 9 | * Uniform mobile station distribution 10 | * Configuration through config files 11 | * Separate data generation, collection and plotting for large scale simulation 12 | * WINNER channel model with mobility, fast fading, directional antenna gain 13 | * LTE OFDMA transmission frame 14 | * Numerous data visualization scripts 15 | * ipopt integration for convex optimization 16 | * Large number of unit tests 17 | * Script samles for large scale simulations 18 | 19 | Background 20 | ========== 21 | 22 | For some of my [academic papers] (http://scholar.google.de/citations?hl=en&user=tEM1S0EAAAAJ), I was in need of simulating the link layer of hexagonally arranged base stations that communicate with mobiles using OFDMA. For that I built this simulator. 23 | 24 | It provides a world module that is definitely reusable for the hexagonal arrangements (or others with small modifications) and the resource allocation. The general module setup with configuration, scripts, results handling etc. could also be reused. Fading modeling or rate craving greedy could also be of interest to some. Other modules like /optim, /raps, or /quantmap are very specifically concerned with the content of my research papers (resource allocation algorithms I propose). 25 | 26 | Requirements 27 | ============ 28 | * python 2.7.3 with ssl 29 | * numpy 30 | * scipy 31 | * matplotlib 32 | * pyipopt/ipopt 33 | * recommended: virtualenv 34 | 35 | Misc 36 | ======= 37 | Feel free to contact me at h.holtkamp@gmail.com. 38 | -------------------------------------------------------------------------------- /raps/test_pf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit tests for proportional fair module 4 | 5 | File: test_pf.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | 17 | import pf 18 | from configure import phy, wconfig 19 | from world import world 20 | 21 | import numpy as np 22 | import copy 23 | import unittest 24 | 25 | class TestSequenceFunctions(unittest.TestCase): 26 | 27 | def setUp(self): 28 | configPath = 'configure/settings1tier1sector.cfg' 29 | self.phy = phy.PHY(configPath) 30 | self.wconf = wconfig.Wconfig(configPath) 31 | 32 | def test_pf(self): 33 | wconf = copy.copy(self.wconf) 34 | wconf.hexTiers = 0 35 | wconf.usersPerCell = 10 36 | wconf.mobileVelocity = 100 37 | world1 = world.World(wconf, self.phy) 38 | world1.associatePathlosses() 39 | world1.calculateSINRs() 40 | rate = 1 41 | avg_rate = np.ones(len(world1.mobiles)) 42 | 43 | # when avg_rate is near zero for one user, that user 0 should receive all RBs 44 | avg_rate[0] = 1e-20 45 | alloc = pf.pf(world1, world1.cells[0], world1.mobiles, rate, avg_rate) 46 | np.testing.assert_array_equal(alloc, np.zeros([50,10])) 47 | 48 | rate = 1e6 49 | pSupplyPC = pf.pf_ba(world1, world1.cells[0], world1.mobiles, rate) 50 | pSupplyDTX = pf.pf_dtx(world1, world1.cells[0], world1.mobiles, rate) 51 | 52 | 53 | 54 | if __name__ == '__main__': 55 | unittest.main() 56 | -------------------------------------------------------------------------------- /quantmap/test_quantmap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the quantization module 4 | 5 | File: test_optim.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from quantmap import quantmap 17 | import unittest 18 | import numpy as np 19 | from utils import utils 20 | import scipy.linalg 21 | 22 | class TestSequenceFunctions(unittest.TestCase): 23 | 24 | def setUp(self): 25 | pass 26 | 27 | def test_quantmap(self): 28 | """Test quantmap. Since it's randomly permuted, we cannot check the exact outcome. Test that the outcome is proportional to the request.""" 29 | alloc = np.array([ 0.1147, 0.0381, 0.1080, 0.0721, 0.0640, 0.1477, 0.1048, 0.1607 , 0.0416, 0.1378, 0.0107 ]) 30 | 31 | K = alloc.size-1 # users 32 | N = 50 # subcarriers on 10 MHz 33 | T = 10 # timeslots per subframe 34 | 35 | 36 | #outMap = quantmap(alloc, N, T) 37 | 38 | # All resources must be used 39 | #self.assertEqual(N*T, np.sum(np.sum(outMap))) 40 | 41 | # Test that a user receives more than requested (conservative assignment) 42 | alloc = np.array([ 0., 0., 0., 0., 0., 0., 0., 0. , 0., 0.5, 0.5 ]) 43 | 44 | K = alloc.size-1 # users 45 | outMap = quantmap(alloc, N, T) 46 | answer = np.nansum(outMap[:,-1]) # last user's resources 47 | 48 | self.assertTrue(N*T*alloc[9] < answer) 49 | 50 | if __name__ == '__main__': 51 | unittest.main() 52 | -------------------------------------------------------------------------------- /scripts/SINRcdfUniform.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generates SINR cdf for a hexagonal mobile network. 4 | 5 | File: runscript.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | 17 | # libraries 18 | import random 19 | from numpy import * 20 | import math 21 | import ConfigParser 22 | import sys 23 | from plotting import networkplotter 24 | 25 | # custom 26 | from world import * 27 | from world import world 28 | from configure import phy, wconfig 29 | 30 | ############### Read config file ##################### 31 | configPath = 'configure/settingsBeFemtoCalibration.cfg' 32 | ############# Generate map ######################### 33 | 34 | SINRlist = [] 35 | for itr in range(1,iterations+1): 36 | wrld = world.World(wconfig.Wconfig(configPath), phy.PHY(configPath)) 37 | wrld.associatePathlosses() 38 | wrld.calculateSINRs() 39 | 40 | ### Wideband SINR ### 41 | SINRlist += [mob.SINR for mob in wrld.consideredMobiles] 42 | print '%(a)d out of %(b)d done.' % {"a": itr, "b": iterations} 43 | 44 | ### OFDMA SINR ### 45 | li = [list(mob.OFDMA_SINR.ravel()) for mob in wrld.consideredMobiles] 46 | li = sum(li,[]) # flatten list of lists 47 | mobileSINRs[i-1].extend(li) 48 | 49 | ############### Store results to file ############# 50 | 51 | filename = 'results.txt' 52 | from results import resultshandler 53 | resultshandler.writeResultsToFile(filename, SINRlist) 54 | 55 | ########### Plot #################################### 56 | from plotting import cdf_plot 57 | arrSINRlist = array(SINRlist) 58 | cdf_plot.cdf_plot(arrSINRlist, '-') 59 | -------------------------------------------------------------------------------- /results/resultshandler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Handles results like writing to file. 4 | 5 | File: resultshandler.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import datetime 17 | import numpy as np 18 | import os 19 | 20 | # Global results path 21 | path = 'results/' 22 | 23 | def writeResultsToFile(filename, results): 24 | '''Write strings to a text file.''' 25 | timesuffix = datetime.datetime.now().strftime("_%y_%m_%d_%I:%M:%S%p") 26 | if filename is None: 27 | filename = "data" 28 | 29 | results = ", ".join(map(str, results)) 30 | f = open(path+filename+timesuffix+'.txt','w') 31 | f.write(results) 32 | f.close() 33 | 34 | def saveBin(filenamePrefix, array): 35 | """Save one array to binary savez file in the results folder. The current current date and time are appended to the filename.""" 36 | 37 | timesuffix = datetime.datetime.now().strftime("_%y_%m_%d_%I:%M:%S%p") 38 | if filenamePrefix is None: 39 | filenamePrefix = "data" 40 | 41 | np.savez(path+filenamePrefix+timesuffix, array) 42 | 43 | def loadBin(filename=None): 44 | """Load binary file. Either use filename or call most recent npz file from results folder.""" 45 | import glob 46 | 47 | filelist = glob.glob(path+'*.npz') 48 | newest = max(filelist, key=lambda x: os.stat(x).st_mtime) 49 | data = np.load(newest) 50 | return data['arr_0'], newest # default name #TODO save any number of arrays by name 51 | 52 | if __name__ == '__main__': 53 | abc = np.arange(100) 54 | 55 | #saveBin(None, abc) 56 | print loadBin() 57 | 58 | 59 | -------------------------------------------------------------------------------- /world/basestation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' The base station class 4 | 5 | File: basestation.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from physicalentity import PhysicalEntity 17 | from utils import utils 18 | import numpy as np 19 | 20 | class BaseStation(PhysicalEntity): 21 | "Base station class" 22 | 23 | id_ = 0 24 | 25 | def __init__(self, position, typ='macro', p0=0, m=1, pS=0, antennas=2): # power in dBm 26 | PhysicalEntity.__init__(self, position) 27 | self.typ = typ 28 | self._cells = None # If sectored, the BS serves multiple. This is a list of cells. 29 | self.p0 = p0 # power consumption at zero transmission 30 | self.m = m # power consumption load factor. How consumption rises with transmission power 31 | self.pS = pS # power consumption in sleep mode 32 | 33 | self.id_ = BaseStation.id_ 34 | BaseStation.id_ += 1 35 | 36 | # custom print 37 | def __repr__(self): 38 | return ''.join([self.typ, " BS at ", str(self.position)]) 39 | 40 | # inform about unwanted changes to sector 41 | @property 42 | def sectors(self): 43 | raise DeprecationWarning 44 | return self.cells 45 | 46 | @sectors.setter 47 | def sectors(self, value): 48 | raise DeprecationWarning 49 | self.cells = value 50 | 51 | @property 52 | def cells(self): 53 | if self._cells is None: 54 | self._cells = [] 55 | return self._cells 56 | 57 | @cells.setter 58 | def cells(self, value): 59 | self._cells = value 60 | -------------------------------------------------------------------------------- /plotting/e_per_bit_analysis_plot_ICC2013.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the energy per bit comparison plot for ICC 2013. 4 | x axis: sum rate of the center cell 5 | y axis: cell power consumption 6 | 7 | File: e_per_bit_analysis_plot_ICC2013.py 8 | ''' 9 | 10 | __author__ = "Hauke Holtkamp" 11 | __credits__ = "Hauke Holtkamp" 12 | __license__ = "unknown" 13 | __version__ = "unknown" 14 | __maintainer__ = "Hauke Holtkamp" 15 | __email__ = "h.holtkamp@gmail.com" 16 | __status__ = "Development" 17 | 18 | def plot(filename): 19 | """ Open data file, process, generate pdf and png""" 20 | 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | from utils import utils 24 | 25 | # data comes in a csv 26 | data = np.genfromtxt(filename, delimiter=',') 27 | 28 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 29 | x = data[0]*2 30 | 31 | fig = plt.figure() 32 | ax1 = fig.add_subplot(111) 33 | # second row is BA 34 | ax1.plot(x, data[1]/x, '-rx', label='Sequential bandwidth adaptation') 35 | 36 | # third row is PF 37 | ax1.plot(x, data[2]/x, '-gd', label='Bandwidth-adapting proportional fair') 38 | 39 | # fourth row is RAPS 40 | ax1.plot(x, data[3]/x, '-bs', label='RAPS') 41 | 42 | # plt.axis( [5, 35, 100, 400]) 43 | plt.legend(loc='upper right') 44 | xlabel = 'Cell sum rate in Mpbs' 45 | ylabel = 'Microjoule per bit' 46 | title = 'Energy per bit over sum rate' 47 | ax1.set_xlabel(xlabel) 48 | ax1.set_ylabel(ylabel) 49 | plt.title(title) 50 | plt.savefig(filename+'_eperbit_'+'.pdf', format='pdf') 51 | plt.savefig(filename+'_eperbit_'+'.png', format='png') 52 | 53 | 54 | if __name__ == '__main__': 55 | import sys 56 | filename = sys.argv[1] 57 | plot(filename) 58 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Utility functions, mostly static, like dbm2db etc. 4 | 5 | File: utils.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import numpy as np 17 | import scipy.linalg 18 | 19 | #Converts dBm Watt 20 | def dBmTomW(dBmVal): 21 | return 10.0**((dBmVal - 30.0)/10.0) 22 | 23 | #Converts mW to dBm 24 | def mWTodBm(mWVal): 25 | return 10.0 * np.log10(mWVal) + 30.0 26 | 27 | #Converts dB to Watt 28 | def dBToW(dBVal): 29 | return 10.0**(dBVal / 10.0) 30 | 31 | #Converts Watt to dB 32 | def WTodB(WVal): 33 | return 10.0 * np.log10(WVal) 34 | 35 | #Converts dB to dBm 36 | def db2dbm(db): 37 | return db + 30 38 | 39 | #Converts dBm to dB 40 | def dbm2db(dbm): 41 | return dbm - 30 42 | 43 | # Rayleigh channel value 44 | def rayleighChannel(dim1, dim2): 45 | """Return array of Rayleigh distributed values""" 46 | return np.sqrt(0.5)*(np.random.randn(dim1,dim2)+1j*np.random.rand(dim1,dim2)) 47 | 48 | # Ergodic MIMO capacity 49 | def ergMIMOCapacityCDITCSIR(SINR, SNRrx): 50 | """Ergodic MIMO capacity with Equal Power precoding by the book MIMO wireless communications""" 51 | 52 | if len(SINR.shape) > 2: 53 | raise ValueError('Too many dimensions') 54 | 55 | # transmit anntenas 56 | M = SINR.shape[0] 57 | 58 | # receive antennas 59 | N = SINR.shape[1] 60 | 61 | capacity = np.log2( np.linalg.det( np.identity(N) + SNRrx/M * SINR ) ) 62 | return capacity 63 | 64 | def shift(arr, n): 65 | """ Shift a vector with wrap around. Useful for aranges in for loops.""" 66 | return np.concatenate((arr[n:], arr[:n])) 67 | 68 | if __name__ == '__main__': 69 | pass 70 | -------------------------------------------------------------------------------- /utils/utils.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Utility functions, mostly static, like dbm2db etc. 4 | 5 | File: utils.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import numpy as np 17 | import scipy.linalg 18 | 19 | #Converts dBm Watt 20 | def dBmTomW(dBmVal): 21 | return 10.0**((dBmVal - 30.0)/10.0) 22 | 23 | #Converts mW to dBm 24 | def mWTodBm(mWVal): 25 | return 10.0 * np.log10(mWVal) + 30.0 26 | 27 | #Converts dB to Watt 28 | def dBToW(dBVal): 29 | return 10.0**(dBVal / 10.0) 30 | 31 | #Converts Watt to dB 32 | def WTodB(WVal): 33 | return 10.0 * np.log10(WVal) 34 | 35 | #Converts dB to dBm 36 | def db2dbm(db): 37 | return db + 30 38 | 39 | #Converts dBm to dB 40 | def dbm2db(dbm): 41 | return dbm - 30 42 | 43 | # Rayleigh channel value 44 | def rayleighChannel(dim1, dim2): 45 | """Return array of Rayleigh distributed values""" 46 | return np.sqrt(0.5)*(np.random.randn(dim1,dim2)+1j*np.random.rand(dim1,dim2)) 47 | 48 | # Ergodic MIMO capacity 49 | def ergMIMOCapacityCDITCSIR(SINR, SNRrx): 50 | """Ergodic MIMO capacity with Equal Power precoding by the book MIMO wireless communications""" 51 | 52 | if len(SINR.shape) > 2: 53 | raise ValueError('Too many dimensions') 54 | 55 | # transmit anntenas 56 | M = SINR.shape[0] 57 | 58 | # receive antennas 59 | N = SINR.shape[1] 60 | 61 | capacity = np.log2( np.linalg.det( np.identity(N) + SNRrx/M * SINR ) ) 62 | return capacity 63 | 64 | def shift(arr, n): 65 | """ Shift a vector with wrap around. Useful for aranges in for loops.""" 66 | return np.concatenate((arr[n:], arr[:n])) 67 | 68 | if __name__ == '__main__': 69 | pass 70 | -------------------------------------------------------------------------------- /scripts/precomputeworld.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Precompute one world according to config file and store in uuid file. 4 | 5 | File: precomputeworld.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import sys, getopt, os 17 | from world import world 18 | from configure import wconfig, phy 19 | import cPickle 20 | import uuid 21 | import shutil 22 | import logging 23 | logger = logging.getLogger('RAPS_script') # takes care of printing to std out 24 | 25 | 26 | def main(configfile, outfolder=os.path.join('out','worlds')): 27 | 28 | try: 29 | with open(configfile) as f: pass 30 | except IOError as e: 31 | print ' does not exist.' 32 | sys.exit() 33 | 34 | outpath = os.path.join(outfolder, str(uuid.uuid4())+'.pkl') 35 | if not os.path.exists(outfolder): 36 | os.makedirs(outfolder) 37 | output = open(outpath, 'wb') 38 | 39 | shutil.copyfile(configfile, os.path.splitext(outpath)[0]+'.cfg') # save configuration 40 | 41 | wrld = generateWorld(configfile) 42 | cPickle.dump(wrld, output) 43 | 44 | output.close() 45 | 46 | print "=" * 44 47 | 48 | def generateWorld(configfile): 49 | """Generate a world according to configfile""" 50 | wrld = world.World(wconfig.Wconfig(configfile), phy.PHY(configfile)) 51 | wrld.associatePathlosses() 52 | wrld.calculateSINRs() 53 | wrld.fix_center_cell_users() # set 0 in settings file to disable 54 | return wrld 55 | 56 | if __name__ == "__main__": 57 | 58 | if len(sys.argv) == 2: 59 | configfile = str(sys.argv[1]) 60 | else: 61 | print "Usage: precomputeworlds.py " 62 | sys.exit() 63 | 64 | main(configfile) 65 | -------------------------------------------------------------------------------- /rcg/test_rcg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the RCG module 4 | 5 | File: test_rcg.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import rcg 17 | import unittest 18 | import numpy as np 19 | 20 | class TestSequenceFunctions(unittest.TestCase): 21 | 22 | def setUp(self): 23 | self.costmap1 = np.array([[4,1,2,3],[7,6,3,5],[6,2,4,1],[5,8,1,8],[3,4,6,2],[8,7,8,6],[2,3,5,4],[1,5,7,7]]) # from the Kivanc paper 24 | self.result1 = np.array([3,1,0,1,2,0,2,3]) 25 | self.target1 = np.array([2,2,2,2]) 26 | 27 | self.costmap2 = np.array([[8,5,2,7],[7,6,3,5],[6,2,4,1],[5,8,1,8],[3,4,6,2],[3,7,8,4],[2,3,5,6],[1,1,7,3]]) # from Kivanc thesis 28 | self.result2 = np.array([[3,0,0,1,2,1,3,2]]) 29 | self.target2 = np.array([2,2,2,2]) 30 | 31 | def test_rcg1(self): 32 | # run the example from the Kivanc paper 33 | outMap, initial = rcg.rcg(self.costmap1, self.target1) 34 | self.assertTrue((outMap == self.result1).all()) 35 | 36 | def test_rcg2(self): 37 | # example from Kivanc thesis 38 | outMap, initial = rcg.rcg(self.costmap2, self.target2) 39 | self.assertTrue((outMap == self.result2).all()) 40 | 41 | def test_rcg3(self): 42 | # random numbers for large array 43 | users = 10 44 | subcarriers = 50 45 | subcarriermap = np.around(8*np.random.rand(subcarriers, users)) # rounding makes it readable 46 | target = np.repeat(np.array([subcarriers/users]), users) 47 | outMap, initial = rcg.rcg(subcarriermap, target) 48 | self.assertTrue((np.bincount(np.int32(outMap)) == target).all()) 49 | 50 | 51 | 52 | 53 | 54 | if __name__ == '__main__': 55 | unittest.main() 56 | -------------------------------------------------------------------------------- /results/rename_folders.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Rename folder according to some configurations from the settings file. Makes data analysis more human readable. When a folder already exists, increment the filename 4 | 5 | File: rename_folders.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import glob 17 | import numpy as np 18 | import sys 19 | import os 20 | import ConfigParser 21 | 22 | def main(srcdir): 23 | """Walk srcdir and rename each dir that contains a settings file according to its contents.""" 24 | 25 | dirlist = [] 26 | for dirname, dirnames, filenames in os.walk(srcdir): 27 | for subdir in dirnames: 28 | for filename in glob.glob(os.path.join(dirname, subdir)+'/*settings*'): 29 | dirlist.append(os.path.join(dirname, subdir)) 30 | 31 | for subdir in dirlist: 32 | for filename in glob.glob(subdir+'/*settings*'): 33 | config = ConfigParser.RawConfigParser() 34 | config.read(filename) 35 | user_rate = config.get('General', 'user_rate') 36 | sleep_alignment = config.get('General', 'sleep_alignment') 37 | 38 | srchead, srctail = os.path.split(subdir) 39 | destbase = os.path.join(srchead, sleep_alignment + '_' + str(user_rate)) 40 | i = 0 41 | destname = destbase + '_' + str(i) 42 | 43 | while os.path.exists(destname): 44 | i += 1 45 | destname = destbase + '_' + str(i) 46 | 47 | print 'Renaming ' + subdir + ' to ' + destname + '...' 48 | os.rename(subdir, destname) 49 | 50 | 51 | 52 | 53 | if __name__ == '__main__': 54 | print "'Attempting file operation. Confirm with 'yes':" 55 | import readline 56 | txt = raw_input() 57 | if not txt == 'yes': 58 | sys.exit(1) 59 | 60 | srcdir = sys.argv[1] 61 | 62 | main(srcdir) 63 | -------------------------------------------------------------------------------- /plotting/sum_rate_analysis_plot_ICC2013.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the sum rate comparison plot for ICC 2013. 4 | x axis: sum rate of the center cell 5 | y axis: cell power consumption 6 | second y axis: possibly power per bit 7 | 8 | File: sum_rate_analysis_plot_ICC2013.py 9 | ''' 10 | 11 | __author__ = "Hauke Holtkamp" 12 | __credits__ = "Hauke Holtkamp" 13 | __license__ = "unknown" 14 | __version__ = "unknown" 15 | __maintainer__ = "Hauke Holtkamp" 16 | __email__ = "h.holtkamp@gmail.com" 17 | __status__ = "Development" 18 | 19 | def plot(filename): 20 | """ Open data file, process, generate pdf and png""" 21 | 22 | import numpy as np 23 | import matplotlib.pyplot as plt 24 | from utils import utils 25 | 26 | # data comes in a csv 27 | data = np.genfromtxt(filename, delimiter=',') 28 | 29 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 30 | x = data[0] # Mbps 31 | 32 | fig = plt.figure() 33 | ax1 = fig.add_subplot(111) 34 | # second row is BA 35 | ax1.plot(x, data[1], '-k+', label='Sequential bandwidth adaptation', markersize=10) 36 | 37 | ax1.plot(x, data[2], '-ro', label='Sequential overlapping DTX', markersize=10, linewidth=5) 38 | 39 | ax1.plot(x, data[8], '-r^', label='Kivanc power control', markersize=10, linewidth=5) 40 | 41 | ax1.plot(x, data[3], '-b*', label='Sequential random shift DTX', markersize=10) 42 | 43 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 44 | 45 | # ax1.plot(x, data[5], '-ms', label='PF DTX', markersize=10) 46 | 47 | ax1.plot(x, data[6], '-yx', label='RAPS overlapping DTX', markersize=10) 48 | 49 | ax1.plot(x, data[7], '-gD', label='RAPS random shift DTX', markersize=10) 50 | 51 | 52 | plt.axis( [8, 41, 100, 440]) 53 | plt.legend(loc='upper left', prop={'size':10}) 54 | xlabel = 'Cell sum rate in Mpbs' 55 | ylabel = 'Average cell power consumption in Watt' 56 | title = 'Consumption over sum rate' 57 | ax1.set_xlabel(xlabel) 58 | ax1.set_ylabel(ylabel) 59 | plt.title(title) 60 | plt.savefig(filename+'.pdf', format='pdf') 61 | plt.savefig(filename+'.png', format='png') 62 | 63 | 64 | if __name__ == '__main__': 65 | import sys 66 | filename = sys.argv[1] 67 | plot(filename) 68 | -------------------------------------------------------------------------------- /plotting/plot_delivered_individual_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the delivered rate plot. 4 | x axis: iterations 5 | y axis: average delivered rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | rate = 1 25 | 26 | # data comes in a csv 27 | data = np.genfromtxt(filename, delimiter=',') 28 | 29 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 30 | x = data[0] # Mbps 31 | 32 | fig = plt.figure() 33 | ax1 = fig.add_subplot(111) 34 | # second row is BA 35 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 36 | 37 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 38 | 39 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 40 | 41 | ax1.plot(x, data[4], '-b*', label='Random alignment', markersize=10) 42 | 43 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 44 | 45 | 46 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 47 | 48 | ax1.plot(x, data[6], '-gD', label='p-persistent SINR ranking', markersize=10) 49 | 50 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 51 | 52 | ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10) 53 | 54 | plt.axis( [0, 20, 0, 6e5]) 55 | plt.legend(loc='upper right', prop={'size':20}) 56 | plt.setp(ax1.get_xticklabels(), fontsize=20) 57 | plt.setp(ax1.get_yticklabels(), fontsize=20) 58 | xlabel = 'OFDMA frames' 59 | ylabel = 'Average achieved rate' 60 | title = 'Average number of cells where target rate was missed at ' + str(rate) + ' bps' 61 | ax1.set_xlabel(xlabel, size=20) 62 | ax1.set_ylabel(ylabel, size=20) 63 | # plt.title(title) 64 | plt.savefig(filename+'.pdf', format='pdf') 65 | plt.savefig(filename+'.png', format='png') 66 | 67 | 68 | if __name__ == '__main__': 69 | import sys 70 | filename = sys.argv[1] 71 | plot(filename) 72 | -------------------------------------------------------------------------------- /plotting/plot_miss_rate_analysis_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the sum rate comparison plot for ICC 2013. 4 | x axis: iterations 5 | y axis: average miss rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | rate = 1 25 | 26 | # data comes in a csv 27 | data = np.genfromtxt(filename, delimiter=',') 28 | 29 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 30 | x = data[0] # Mbps 31 | 32 | fig = plt.figure() 33 | ax1 = fig.add_subplot(111) 34 | # second row is BA 35 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 36 | 37 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 38 | 39 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 40 | 41 | ax1.plot(x, data[4], '-b*', label='Random alignment', markersize=10) 42 | 43 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 44 | 45 | 46 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 47 | 48 | ax1.plot(x, data[6], '-gD', label='p-persistent SINR ranking', markersize=10) 49 | 50 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 51 | 52 | ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10) 53 | 54 | # plt.axis( [8, 41, 100, 440]) 55 | plt.legend(loc='lower right', prop={'size':20}) 56 | plt.setp(ax1.get_xticklabels(), fontsize=20) 57 | plt.setp(ax1.get_yticklabels(), fontsize=20) 58 | xlabel = 'OFDMA frames' 59 | ylabel = 'Average miss count' 60 | title = 'Average number of cells where target rate was missed at ' + str(rate) + ' bps' 61 | ax1.set_xlabel(xlabel, size=20) 62 | ax1.set_ylabel(ylabel, size=20) 63 | # plt.title(title) 64 | plt.savefig(filename+'.pdf', format='pdf') 65 | plt.savefig(filename+'.png', format='png') 66 | 67 | 68 | if __name__ == '__main__': 69 | import sys 70 | filename = sys.argv[1] 71 | plot(filename) 72 | -------------------------------------------------------------------------------- /plotting/plot_percentage_satisfied_over_target_rate_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Plot percentage of satisfied users over target rate 4 | x axis: target rate per user 5 | y axis: percentage of satisfied users 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | # data comes in a csv 25 | data = np.genfromtxt(filename, delimiter=',')*100 # 100 for percent 26 | 27 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 28 | x = data[0]/1e8 # Mbps 29 | 30 | fig = plt.figure() 31 | ax1 = fig.add_subplot(111) 32 | # second row is BA 33 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 34 | 35 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 36 | 37 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 38 | 39 | ax1.plot(x, data[2], '-b*', label='Random alignment', markersize=10) 40 | 41 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 42 | 43 | 44 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 45 | 46 | ax1.plot(x, data[3], '-gD', label='P-persistent ranking', markersize=10) 47 | 48 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 49 | 50 | ax1.plot(x, data[4], '-ms', label='DTX alignment with memory', markersize=10) 51 | 52 | plt.axis( [1, 3, 0, 108]) 53 | plt.legend(loc='lower left', prop={'size':20}) 54 | plt.setp(ax1.get_xticklabels(), fontsize=20) 55 | plt.setp(ax1.get_yticklabels(), fontsize=20) 56 | xlabel = 'User target rate in Mbps' 57 | ylabel = 'Percentage of satisfied users' 58 | title = 'Consumption over sum rate' 59 | ax1.set_xlabel(xlabel,size=20) 60 | ax1.set_ylabel(ylabel,size=20) 61 | # plt.title(title) 62 | plt.savefig(filename+'.pdf', format='pdf') 63 | plt.savefig(filename+'.png', format='png') 64 | 65 | 66 | if __name__ == '__main__': 67 | import sys 68 | filename = sys.argv[1] 69 | plot(filename) 70 | -------------------------------------------------------------------------------- /plotting/plot_power_cons_per_iter_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the sum rate comparison plot for ICC 2013. 4 | x axis: iterations 5 | y axis: average power consumption 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | rate = 1 25 | 26 | # data comes in a csv 27 | data = np.genfromtxt(filename, delimiter=',') 28 | 29 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 30 | x = data[0] # Mbps 31 | 32 | fig = plt.figure() 33 | ax1 = fig.add_subplot(111) 34 | # second row is BA 35 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 36 | 37 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 38 | 39 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 40 | 41 | ax1.plot(x, data[4], '-b*', label='Random alignment (SotA)', markersize=10) 42 | 43 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 44 | 45 | 46 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 47 | 48 | ax1.plot(x, data[6], '-gD', label='P-persistent ranking', markersize=10) 49 | 50 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 51 | 52 | ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10) 53 | 54 | plt.axis( [1, 20, 100, 400]) 55 | plt.setp(ax1.get_xticklabels(), fontsize=20) 56 | plt.setp(ax1.get_yticklabels(), fontsize=20) 57 | plt.legend(loc='upper right', prop={'size':20}) 58 | xlabel = 'OFDMA frames' 59 | ylabel = 'Cell power consumption in Watts' 60 | # title = 'Average number of cells where target rate was missed at ' + str(rate) + ' bps' 61 | ax1.set_xlabel(xlabel,size=20) 62 | ax1.set_ylabel(ylabel,size=20) 63 | # plt.title(title) 64 | plt.savefig(filename+'.pdf', format='pdf') 65 | plt.savefig(filename+'.png', format='png') 66 | 67 | 68 | if __name__ == '__main__': 69 | import sys 70 | filename = sys.argv[1] 71 | plot(filename) 72 | -------------------------------------------------------------------------------- /plotting/plot_variance_of_user_rate_over_target_user_rate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Plot standard deviation of delivered rate over target rate 4 | x axis: standard deviation delivered rate per user 5 | y axis: percentage of satisfied users 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | # data comes in a csv 25 | data = np.genfromtxt(filename, delimiter=',')/1e6 # Mbps 26 | 27 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 28 | x = data[0] # Mbps 29 | 30 | fig = plt.figure() 31 | ax1 = fig.add_subplot(111) 32 | # second row is BA 33 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 34 | 35 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 36 | 37 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 38 | 39 | ax1.plot(x, data[2], '-b*', label='Random alignment', markersize=10) 40 | 41 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 42 | 43 | 44 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 45 | 46 | ax1.plot(x, data[3], '-gD', label='P-persistent ranking', markersize=10) 47 | 48 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 49 | 50 | ax1.plot(x, data[4], '-ms', label='DTX alignment with memory', markersize=10) 51 | 52 | plt.axis( [1, 3, 0, 3]) 53 | plt.legend(loc='upper right', prop={'size':20}) 54 | plt.setp(ax1.get_xticklabels(), fontsize=20) 55 | plt.setp(ax1.get_yticklabels(), fontsize=20) 56 | xlabel = 'User target rate in Mbps' 57 | ylabel = 'Standard deviation of \n achieved rate in Mbps' 58 | title = 'Consumption over sum rate' 59 | ax1.set_xlabel(xlabel,size=20) 60 | ax1.set_ylabel(ylabel,size=20) 61 | # plt.title(title) 62 | plt.subplots_adjust(left=0.2) 63 | plt.savefig(filename+'.pdf', format='pdf') 64 | plt.savefig(filename+'.png', format='png') 65 | 66 | 67 | if __name__ == '__main__': 68 | import sys 69 | filename = sys.argv[1] 70 | plot(filename) 71 | -------------------------------------------------------------------------------- /plotting/plot_average_mobile_delivered_rate_over_target_rate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Plot average delivered rate over target rate 4 | x axis: average delivered rate per user 5 | y axis: percentage of satisfied users 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(filename): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | # data comes in a csv 25 | data = np.genfromtxt(filename, delimiter=',')/1e6 # Mbps 26 | 27 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 28 | x = data[0] # Mbps 29 | 30 | fig = plt.figure() 31 | ax1 = fig.add_subplot(111) 32 | # second row is BA 33 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 34 | 35 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 36 | 37 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 38 | 39 | ax1.plot(x, data[2], '-b*', label='Random alignment', markersize=10) 40 | 41 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 42 | 43 | 44 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 45 | 46 | ax1.plot(x, data[3], '-gD', label='P-persistent ranking', markersize=10) 47 | 48 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 49 | 50 | ax1.plot(x, data[4], '-ms', label='DTX alignment with memory', markersize=10) 51 | 52 | # reference line 53 | ax1.plot(x, x, '--k', label='Reference') 54 | 55 | plt.axis( [1, 3, 0, 3]) 56 | plt.legend(loc='lower right', prop={'size':20}) 57 | plt.setp(ax1.get_xticklabels(), fontsize=20) 58 | plt.setp(ax1.get_yticklabels(), fontsize=20) 59 | xlabel = 'User target rate in Mbps' 60 | ylabel = 'Average achieved rate in Mbps' 61 | title = 'Consumption over sum rate' 62 | ax1.set_xlabel(xlabel,size=20) 63 | ax1.set_ylabel(ylabel,size=20) 64 | # plt.title(title) 65 | plt.savefig(filename+'.pdf', format='pdf') 66 | plt.savefig(filename+'.png', format='png') 67 | 68 | 69 | if __name__ == '__main__': 70 | import sys 71 | filename = sys.argv[1] 72 | plot(filename) 73 | -------------------------------------------------------------------------------- /plotting/power_consumption_over_target_sum_rate_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the sum rate comparison plot for ICC 2013. 4 | x axis: sum rate of the center cell 5 | y axis: cell power consumption 6 | second y axis: possibly power per bit 7 | 8 | File: sum_rate_analysis_plot_seqDTX.py 9 | ''' 10 | 11 | __author__ = "Hauke Holtkamp" 12 | __credits__ = "Hauke Holtkamp" 13 | __license__ = "unknown" 14 | __version__ = "unknown" 15 | __maintainer__ = "Hauke Holtkamp" 16 | __email__ = "h.holtkamp@gmail.com" 17 | __status__ = "Development" 18 | 19 | def plot(filename): 20 | """ Open data file, process, generate pdf and png""" 21 | 22 | import numpy as np 23 | import matplotlib.pyplot as plt 24 | from utils import utils 25 | 26 | # data comes in a csv 27 | data = np.genfromtxt(filename, delimiter=',') 28 | 29 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 30 | x = data[0]/1e7 # Mbps 31 | 32 | fig = plt.figure() 33 | ax1 = fig.add_subplot(111) 34 | # second row is BA 35 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 36 | 37 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 38 | 39 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 40 | 41 | ax1.plot(x, data[4], '-b*', label='Random alignment (SotA)', markersize=10) 42 | 43 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 44 | 45 | 46 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 47 | 48 | ax1.plot(x, data[6], '-gD', label='P-persistent ranking', markersize=10) 49 | 50 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 51 | 52 | ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10) 53 | 54 | plt.axis( [1, 3, 100, 440]) 55 | plt.legend(loc='upper left', prop={'size':20}) 56 | plt.setp(ax1.get_xticklabels(), fontsize=20) 57 | plt.setp(ax1.get_yticklabels(), fontsize=20) 58 | xlabel = 'User target rate in Mbps' 59 | ylabel = 'Average cell power consumption in Watts' 60 | title = 'Consumption over sum rate' 61 | ax1.set_xlabel(xlabel,size=20) 62 | ax1.set_ylabel(ylabel,size=20) 63 | # plt.title(title) 64 | plt.savefig(filename+'.pdf', format='pdf') 65 | plt.savefig(filename+'.png', format='png') 66 | 67 | 68 | if __name__ == '__main__': 69 | import sys 70 | filename = sys.argv[1] 71 | plot(filename) 72 | -------------------------------------------------------------------------------- /plotting/JSACplot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Recreates the central JSAC plot: Data rate per user vs supply power consumption. 4 | 5 | File: JSACplot.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import results.resultshandler as rh 17 | import matplotlib 18 | import matplotlib.pyplot as plt 19 | import numpy as np 20 | import datetime 21 | 22 | def savePlot(filename): 23 | """Save MPL plot to pdf.""" 24 | timesuffix = datetime.datetime.now().strftime("_%y_%m_%d_%I-%M-%S%p") 25 | if filename is None: 26 | filename = "JSACplot" 27 | plt.savefig(filename+timesuffix+'.pdf', format='pdf') 28 | print filename+timesuffix+'.pdf saved' 29 | plt.savefig(filename+timesuffix+'.png', format='png') 30 | print filename+timesuffix+'.png saved' 31 | 32 | def showPlot(): 33 | """Draw to screen.""" 34 | plt.show() 35 | 36 | def plotFromData(filename=None): 37 | """Pull data and create plot""" 38 | import itertools 39 | colors = itertools.cycle(['r','g','b','c','m','y','k']) 40 | 41 | if filename is None: 42 | data, filename = rh.loadBin() # loads most recent npz file. data is 2d-array 43 | else: 44 | raise NotImplementedError('Cannot load particular filename yet') 45 | 46 | fig = plt.figure() 47 | ax = fig.add_subplot(111) 48 | plt.title('Power consumption as a function of user rate\n' + filename) 49 | plt.xlabel('User rate in bps') 50 | plt.ylabel('Supply power consumption in Watt') 51 | 52 | data = data.transpose() 53 | xdata = data[:,0] 54 | p = [] 55 | for i in np.arange(1,np.shape(data)[1] ): 56 | color = colors.next() 57 | p.append(plt.plot(xdata, data[:,i],color,label='test')[0]) 58 | 59 | plt.legend([p[0], p[1], p[2], p[3]], ['Theoretical bound','SOTA','After optimization','After quantization'], loc=4) # bottom right 60 | 61 | return ax, filename # TODO the return values are not used, as pyplot keeps track of 'current plot' internally 62 | 63 | 64 | if __name__ == '__main__': 65 | import sys 66 | # If this is called as a script, it plots the most recent results file. 67 | 68 | if len(sys.argv) >= 2: 69 | filename = sys.argv[1] 70 | else: 71 | filename = None 72 | 73 | path = 'plotting/' 74 | targetfilename = 'JSAC' 75 | 76 | 77 | # Create plot 78 | plot = plotFromData(filename) 79 | 80 | # Draw to screen and save 81 | savePlot(path+targetfilename) 82 | # showPlot() 83 | -------------------------------------------------------------------------------- /plotting/delivered_rate_over_target_sum_rate_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the sum rate comparison plot for ICC 2013. 4 | x axis: target sum rate of the center cell 5 | y axis: delivered rate of the center cell 6 | second y axis: possibly power per bit 7 | 8 | File: sum_rate_analysis_plot_seqDTX.py 9 | ''' 10 | 11 | __author__ = "Hauke Holtkamp" 12 | __credits__ = "Hauke Holtkamp" 13 | __license__ = "unknown" 14 | __version__ = "unknown" 15 | __maintainer__ = "Hauke Holtkamp" 16 | __email__ = "h.holtkamp@gmail.com" 17 | __status__ = "Development" 18 | 19 | def plot(filename): 20 | """ Open data file, process, generate pdf and png""" 21 | 22 | import numpy as np 23 | import matplotlib.pyplot as plt 24 | from utils import utils 25 | 26 | # data comes in a csv 27 | # y-data is bit load per user and OFDMA frame 28 | # x-data is target sum rate per cell and second. There are 10 OFDMA frames per second and 10 users in each cell. 29 | data = np.genfromtxt(filename, delimiter=',')/1e4 30 | # first row is x-axis (number of users in cell). Each user has a fixed rate. 31 | x = data[0]/1e2 # Mbps 32 | 33 | fig = plt.figure() 34 | ax1 = fig.add_subplot(111) 35 | # second row is BA 36 | ax1.plot(x, data[1], '-k+', label='Sequential alignment', markersize=10) 37 | 38 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 39 | 40 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 41 | 42 | ax1.plot(x, data[4], '-b*', label='Random alignment', markersize=10) 43 | 44 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 45 | 46 | 47 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 48 | 49 | ax1.plot(x, data[6], '-gD', label='p-persistent SINR ranking', markersize=10) 50 | 51 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 52 | 53 | ax1.plot(x, data[8], '-ms', label='DTX alignment with memory', markersize=10) 54 | 55 | plt.axis( [10, 30, 10, 40]) 56 | plt.legend(loc='upper left', prop={'size':20}) 57 | plt.setp(ax1.get_xticklabels(), fontsize=20) 58 | plt.setp(ax1.get_yticklabels(), fontsize=20) 59 | xlabel = 'Target cell sum rate in Mbps' 60 | ylabel = 'Average delivered cell sum rate in Mbps' 61 | title = 'Consumption over sum rate' 62 | ax1.set_xlabel(xlabel,size=20) 63 | ax1.set_ylabel(ylabel,size=20) 64 | # plt.title(title) 65 | plt.savefig(filename+'.pdf', format='pdf') 66 | plt.savefig(filename+'.png', format='png') 67 | 68 | 69 | if __name__ == '__main__': 70 | import sys 71 | filename = sys.argv[1] 72 | plot(filename) 73 | -------------------------------------------------------------------------------- /configure/createconfigfile.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Creates a new config file, if none exist. 4 | Rename the file to reflect what you have in mind. 5 | 6 | File: createconfigfile.py 7 | """ 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | 17 | import ConfigParser 18 | 19 | config = ConfigParser.RawConfigParser() 20 | 21 | # When adding sections or items, add them in the reverse order of 22 | # how you want them to be displayed in the actual file. 23 | # In addition, please note that using RawConfigParser's and the raw 24 | # mode of ConfigParser's respective set functions, you can assign 25 | # non-string values to keys internally, but will receive an error 26 | # when attempting to write to a file or when you get it in non-raw 27 | # mode. SafeConfigParser does not allow such assignments to take place. 28 | config.add_section('General') 29 | config.set('General', 'tiers', '3') # hexagonal tiers 30 | config.set('General', 'consideredTiers', '1') # hexagonal tiers considered for data 31 | config.set('General', 'interSiteDistance', '500') # ISD 32 | config.set('General', 'usersPerCell', '10') # average number per cell 33 | config.set('General', 'numcenterusers', '10') # average number per cell 34 | config.set('General', 'arrivalRate', '0.12') # for traffic model 35 | config.set('General', 'timeStep', 1) # simulator time step in seconds 36 | config.set('General', 'sectorsPerBS', 3) # sectors served per BS 37 | config.set('General', 'LNSSD', 8) # log normal shadowing standard deviation 38 | config.set('General', 'forbiddenDistance', 35) # meters. The distance ring around a BS where no mobiles are allowed 39 | config.set('General', 'iterations', 10) # simulation iterations for repeated OFDMA frames 40 | config.set('General', 'mobileVelocity', 0) # mobile velocity in m/s 41 | config.set('General', 'enableFrequencySelectiveFading', True) # mobile velocity in m/s 42 | config.set('General', 'numTimeslots', 10) 43 | config.set('General', 'numFreqChunks', 50) 44 | config.set('General', 'centerFrequency', 2e9) # Hz 45 | config.set('General', 'simulationTime', 0.1) # seconds 46 | config.set('General', 'systemBandwidth', 1e7) # Hz 47 | config.set('General', 'temperature', '290') # Kelvin 48 | config.set('General', 'Boltzmannconstant', '4e-23') # W/Hz 49 | config.set('General', 'p0', '200') # Idle power consumption of BS 50 | config.set('General', 'm', '3.75') # Load factor of BS 51 | config.set('General', 'pS', '90') # Sleep power consumption of BS 52 | 53 | # Writing our configuration file to 'settings.cfg' 54 | with open('configure/settings.cfg', 'wb') as configfile: 55 | config.write(configfile) 56 | -------------------------------------------------------------------------------- /configure/createconfigfile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ Creates a new config file with default parameters. 4 | Rename the file to reflect what you have in mind. 5 | 6 | File: createconfigfile.py 7 | """ 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | 17 | import ConfigParser 18 | 19 | config = ConfigParser.RawConfigParser() 20 | 21 | # When adding sections or items, add them in the reverse order of 22 | # how you want them to be displayed in the actual file. 23 | # In addition, please note that using RawConfigParser's and the raw 24 | # mode of ConfigParser's respective set functions, you can assign 25 | # non-string values to keys internally, but will receive an error 26 | # when attempting to write to a file or when you get it in non-raw 27 | # mode. SafeConfigParser does not allow such assignments to take place. 28 | config.add_section('General') 29 | config.set('General', 'tiers', '3') # hexagonal tiers 30 | config.set('General', 'consideredTiers', '1') # hexagonal tiers considered for data 31 | config.set('General', 'interSiteDistance', '500') # ISD 32 | config.set('General', 'usersPerCell', '10') # average number per cell 33 | config.set('General', 'numcenterusers', '10') # average number per cell 34 | config.set('General', 'arrivalRate', '0.12') # for traffic model 35 | config.set('General', 'timeStep', 1) # simulator time step in seconds 36 | config.set('General', 'sectorsPerBS', 3) # sectors served per BS 37 | config.set('General', 'LNSSD', 8) # log normal shadowing standard deviation 38 | config.set('General', 'forbiddenDistance', 35) # meters. The distance ring around a BS where no mobiles are allowed 39 | config.set('General', 'iterations', 10) # simulation iterations for repeated OFDMA frames 40 | config.set('General', 'mobileVelocity', 0) # mobile velocity in m/s 41 | config.set('General', 'enableFrequencySelectiveFading', True) # mobile velocity in m/s 42 | config.set('General', 'numTimeslots', 10) 43 | config.set('General', 'numFreqChunks', 50) 44 | config.set('General', 'centerFrequency', 2e9) # Hz 45 | config.set('General', 'simulationTime', 0.1) # seconds 46 | config.set('General', 'systemBandwidth', 1e7) # Hz 47 | config.set('General', 'temperature', '290') # Kelvin 48 | config.set('General', 'Boltzmannconstant', '4e-23') # W/Hz 49 | config.set('General', 'p0', '200') # Idle power consumption of BS 50 | config.set('General', 'm', '3.75') # Load factor of BS 51 | config.set('General', 'pS', '90') # Sleep power consumption of BS 52 | 53 | # Writing our configuration file to 'settings.cfg' 54 | with open('configure/settings.cfg', 'wb') as configfile: 55 | config.write(configfile) 56 | -------------------------------------------------------------------------------- /plotting/plot_percentage_satisfieder_over_iters_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Generate the delivered rate plot. 4 | x axis: iterations 5 | y axis: percentage of satisfied users 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | def plot(searchpath, rate): 18 | """ Open data file, process, generate pdf and png""" 19 | 20 | import numpy as np 21 | import matplotlib.pyplot as plt 22 | from utils import utils 23 | 24 | import glob 25 | for filename in glob.glob(searchpath+'/percentage_satisfied*'+rate+'.csv'): 26 | print filename 27 | if 'none' in filename: 28 | data_none = np.genfromtxt(filename, delimiter=',') 29 | elif 'dtxs' in filename: 30 | data_dtxs = np.genfromtxt(filename, delimiter=',') 31 | elif 'rand' in filename: 32 | data_rand = np.genfromtxt(filename, delimiter=',') 33 | elif 'sinr' in filename: 34 | data_sinr = np.genfromtxt(filename, delimiter=',') 35 | 36 | # data comes in a csv 37 | data = np.genfromtxt(filename, delimiter=',') 38 | 39 | # first row is x-axis 40 | x = np.arange(data_rand.shape[0]) 41 | 42 | fig = plt.figure() 43 | ax1 = fig.add_subplot(111) 44 | # second row is BA 45 | ax1.plot(x, data_none, '-k+', label='Sequential alignment', markersize=10) 46 | 47 | # ax1.plot(x, data[2], '-ro', label='Random shift each iter', markersize=10) 48 | 49 | # ax1.plot(x, data[3], '-c^', label='Random shift once', markersize=10) 50 | 51 | ax1.plot(x, data_rand, '-b*', label='Random alignment', markersize=10) 52 | 53 | # ax1.plot(x, data[4], '-cp', label='PF bandwidth adapting', markersize=10) 54 | 55 | 56 | # ax1.plot(x, data[5], '-yx', label='Random once', markersize=10) 57 | 58 | ax1.plot(x, data_sinr, '-gD', label='p-persistent SINR ranking', markersize=10) 59 | 60 | # ax1.plot(x, data[7], '-kp', label='Static Reuse 3', markersize=10) 61 | 62 | ax1.plot(x, data_dtxs, '-ms', label='DTX alignment with memory', markersize=10) 63 | 64 | # plt.axis( [0, 20, 0, 6e5]) 65 | plt.legend(loc='upper right', prop={'size':20}) 66 | plt.setp(ax1.get_xticklabels(), fontsize=20) 67 | plt.setp(ax1.get_yticklabels(), fontsize=20) 68 | xlabel = 'OFDMA frames' 69 | ylabel = 'Average achieved rate' 70 | title = 'Average number of cells where target rate was missed at ' + str(rate) + ' bps' 71 | ax1.set_xlabel(xlabel, size=20) 72 | ax1.set_ylabel(ylabel, size=20) 73 | # plt.title(title) 74 | target = searchpath + '/percentage_satisfied_over_iter_' + rate 75 | plt.savefig(target+'.pdf', format='pdf') 76 | plt.savefig(target+'.png', format='png') 77 | 78 | 79 | if __name__ == '__main__': 80 | import sys 81 | searchpath = sys.argv[1] 82 | rate = sys.argv[2] 83 | plot(searchpath, rate) 84 | -------------------------------------------------------------------------------- /documentation/structure.txt: -------------------------------------------------------------------------------- 1 | This is a text file detailing the structure of the network simulator. 2 | 3 | Author: Hauke Holtkamp 4 | Email: h.holtkamp@gmail.com 5 | Year: 2013 6 | 7 | == General == 8 | The 'simulator' generally consists of scripts that do all the action (load files, manage repetitions, logging, world creation and execution, results storage). For example, see scripts/RAPSmulticell.py. 9 | 10 | Unit tests are located inside the module folders. testsuite.py collects all tests. Tests have been reasonably well maintained. 11 | 12 | = Folders = 13 | == configure == 14 | Configuration files 15 | == documentation == 16 | This file 17 | == fsf == 18 | Frequency selective fading generation 19 | == iwf == 20 | Inverse Water-filling (or Margin Adaptation) 21 | == optim == 22 | All calls to the ipopt library 23 | == out == 24 | Default output folder for simulation data, logging, etc. 25 | == plotting == 26 | Anything that calls matplotlib 27 | == quantmap == 28 | Quantization for raps. Should probably better be in raps 29 | == raps == 30 | Resource Allocation using Power Control and Sleep as well as the its benchmarks 31 | == rcg == 32 | Rate-craving greedy. Move to raps? 33 | == results == 34 | Results handlers, data collectors 35 | == scripts == 36 | Scripts to be called from the shell 37 | == utils == 38 | Static functions (e.g. dB conversion, etc.) 39 | == world == 40 | All world elements. Tried to keep world generation apart from other simulation parts. 41 | 42 | = Getting started = 43 | 1. Check configuration in config/settingsRAPSmulticell.cfg 44 | 2. Call script, e.g. 'python scripts/RAPSmulticell.py' 45 | 3. Follow std out 46 | 47 | 48 | 49 | 50 | =================================================================== 51 | Important simulator objects 52 | =================================================================== 53 | 54 | == World == 55 | The world is the master container. It contains physical or spatial objects (e.g. base 56 | stations, mobiles, hexagons, grids). It performs some actions like distributing 57 | these. The world needs to consist of at least one hexagon (tier 0). 58 | 59 | World receives configuration object, one for the physical layer and one for world parameters. 60 | 61 | == Base stations == 62 | When a base station has 1 sector, it is in the center of a hexagon. When it 63 | serves three sectors (default), then it is on the edge of three hexagons. The 64 | first BS is placed on the north vertex of the central hexagon. Note the 65 | important difference between a hexagon and a BS. The map is created in the 66 | following way. 1. Place hexagons. 2. Place BS to cover the hexagons. 67 | 68 | A BS is a site. It has a position, a power consumption and a set of cells that are responsible for the actual transmission. 69 | 70 | == Cell == 71 | A cell is mainly a directional antenna. It has transmission power and mobiles are connected to it. 72 | 73 | == Mobile == 74 | Mobiles have a position and all fading information. Since we simulate the downlink, the fading information at the mobile determines rates, scheduling, etc. 75 | 76 | 77 | -------------------------------------------------------------------------------- /configure/wconfig.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Parameters for the world module 4 | 5 | File: wconfig.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import ConfigParser 17 | import sys 18 | import os 19 | 20 | class Wconfig(): 21 | 22 | def __init__(self, pathToSettingsFile): 23 | try: 24 | open(pathToSettingsFile) 25 | except IOError: 26 | print 'settings.cfg not found. Try running createconfig.py. Aborting...' 27 | sys.exit(0) 28 | config = ConfigParser.RawConfigParser() 29 | config.read(pathToSettingsFile) 30 | 31 | # getfloat() raises an exception if the value is not a float 32 | # getint() and getboolean() also do this for their respective types 33 | 34 | # hexmap tiers. Zeroth tier is the center cell. 35 | self.hexTiers = config.getint('General', 'tiers') 36 | self.consideredTiers = config.getint('General', 'consideredTiers') 37 | self.sectorsPerBS = config.getint('General', 'sectorsPerBS') 38 | 39 | # pathloss related 40 | self.LNSSD = config.getint('General', 'LNSSD') 41 | # in meters 42 | self.interSiteDistance = config.getint('General', 'intersitedistance') 43 | self.forbiddenDistance = config.getint('General', 'forbiddenDistance') 44 | 45 | # mobile velocity in m/s 46 | self.mobileVelocity = config.getint('General', 'mobileVelocity') 47 | self.enableFrequencySelectiveFading = config.getboolean('General', 'enableFrequencySelectiveFading') 48 | 49 | # user distribution related 50 | self.usersPerCell = config.getint('General', 'userspercell') 51 | self.numcenterusers = config.getint('General', 'numcenterusers') 52 | 53 | # noise power 54 | self.N0 = config.getfloat('General', 'boltzmannconstant') 55 | self.systemBandwidth = config.getfloat('General', 'systemBandwidth') 56 | self.temperature = config.getfloat('General', 'temperature') 57 | self.systemNoisePower = self.N0 * self.systemBandwidth * self.temperature 58 | 59 | # for now, power consumption is equal in all BS and, thus, a world parameter 60 | self.p0 = config.getfloat('General', 'p0') 61 | self.m = config.getfloat('General', 'm') 62 | self.pS = config.getfloat('General', 'pS') 63 | self.initial_power = config.get('General', 'initial_power') 64 | 65 | # sleep slot alignment 66 | self.sleep_alignment = config.get('General', 'sleep_alignment') 67 | 68 | # possibly load worlds 69 | load_world = config.get('General', 'load_world') 70 | self.load_world = None 71 | if os.path.exists(load_world): 72 | self.load_world = load_world 73 | elif load_world != 'none': 74 | print 'Unrecognized load_world option: ' + load_world 75 | 76 | # same target rate for all users 77 | self.user_rate = config.getfloat('General', 'user_rate') 78 | 79 | -------------------------------------------------------------------------------- /plotting/convergence_analysis_plot_ICC2013.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Read from csv, plot one line for each row. The x-axis is always arange(len(data)) Save as pdf and png. 4 | 5 | File: plotArray.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | def plot_arr(filename, title, xlabel, ylabel): 17 | """Read file, create MPL plot with tile, and axis labels.""" 18 | 19 | import numpy as np 20 | import matplotlib.pyplot as plt 21 | import matplotlib.mlab as mlab 22 | data = mlab.csv2rec(filename, delimiter=',') 23 | 24 | fig = plt.figure() 25 | ax = fig.add_subplot(111) 26 | 27 | x = np.arange(len(data[0])) 28 | color = 'b' 29 | for i in np.arange(data.shape[0]): 30 | y = np.around(data[i].tolist(), decimals=2) # I don't get recarrays. This is a workaround. 31 | if i > 11: 32 | color = '-xg' 33 | p2, = plt.plot(x, y, color) 34 | else: 35 | p1, = plt.plot(x, y, color) 36 | 37 | from matplotlib.patches import Ellipse 38 | el1 = Ellipse((8, 110), 16, 10, fill=False, color='w') 39 | ax.add_patch(el1) 40 | el2 = Ellipse((8, 120), 16, 10, fill=False, color='w') 41 | ax.add_patch(el2) 42 | el3 = Ellipse((8, 130), 16, 10, fill=False, color='w') 43 | ax.add_patch(el3) 44 | ax.annotate('Level at 115 W', xy=(6., 110), xycoords='data', 45 | xytext=(20, 120), textcoords='offset points', 46 | size=20, 47 | #bbox=dict(boxstyle="round", fc="0.8"), 48 | arrowprops=dict(arrowstyle="simple", 49 | fc="0.6", ec="none", 50 | patchB=el1, 51 | connectionstyle="arc3,rad=0.3"), 52 | ) 53 | ax.annotate('Level at 125 W', xy=(8., 120), xycoords='data', 54 | xytext=(20, 80), textcoords='offset points', 55 | size=20, 56 | #bbox=dict(boxstyle="round", fc="0.8"), 57 | arrowprops=dict(arrowstyle="simple", 58 | fc="0.6", ec="none", 59 | patchB=el2, 60 | connectionstyle="arc3,rad=0.3"), 61 | ) 62 | ax.annotate('Level at 135 W', xy=(10., 130), xycoords='data', 63 | xytext=(20, 40), textcoords='offset points', 64 | size=20, 65 | #bbox=dict(boxstyle="round", fc="0.8"), 66 | arrowprops=dict(arrowstyle="simple", 67 | fc="0.6", ec="none", 68 | patchB=el3, 69 | connectionstyle="arc3,rad=0.3"), 70 | ) 71 | 72 | plt.legend([p1,p2], ['RAPS', 'PF']) 73 | plt.xlabel(xlabel) 74 | plt.ylabel(ylabel) 75 | plt.title(title) 76 | plt.savefig(filename+'.pdf', format='pdf') 77 | plt.savefig(filename+'.png', format='png') 78 | 79 | if __name__ == '__main__': 80 | import sys 81 | filename = sys.argv[1] 82 | plot_arr(filename, 'Power consumption', 'Iterations', 'Average power consumption in Watt') 83 | -------------------------------------------------------------------------------- /world/pathloss.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' This file contains static functions related to pathloss calculations. 4 | 5 | File: pathloss.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from numpy import * 17 | import hexfuns 18 | from utils import utils 19 | 20 | def correlatedLNSMap(numberOfUsers, numberOfBS, LNSSD): 21 | """Generates a correlation matrix containing the Log-Normal-Shadowing values for all user-BS-pairs. LNSSD is the LNS standard deviation in dB. Direct copy from Zubin's MATLAB solution.""" 22 | 23 | correlationMatrix = 0.5 * ones((numberOfBS, numberOfBS)) + diag(0.5*ones(numberOfBS)) 24 | correlationMatrixCholesky = linalg.cholesky(correlationMatrix).T 25 | uncorrelatedRandomValues = LNSSD * random.randn(numberOfUsers, numberOfBS) 26 | correlatedRandomValues = dot(uncorrelatedRandomValues , correlationMatrixCholesky) 27 | return correlatedRandomValues 28 | 29 | def pathloss(mobile, baseStation, cell): 30 | """ pathloss calculation. different models are possible. 31 | The pathloss consists of a distance loss, a loss due to disalignment with the antenna lobe and the shadowing loss (LNS). Returns: Pathloss in linear format. """ 32 | LNS = mobile.baseStations[baseStation]['LNS'] 33 | distance = mobile.baseStations[baseStation]['distance'] 34 | distancepathloss = 128.1 + 37.6*log10(distance/1e3) # distance in meters 35 | antennaG = antennaGain(getAngleUEBSHex(baseStation.position, cell.center, mobile.position)) 36 | pathloss = distancepathloss + LNS - antennaG 37 | #print "%.2f" % pathloss, '=', "%.2f" % distancepathloss, '+', "%.2f" % LNS, '-', "%.2f" % antennaG 38 | return utils.dBToW(-pathloss) 39 | 40 | def getAngleUEBSHex(BSPosition, hexCenter, mobilePosition): 41 | """ Returns the angle of the vectors between BS and mobile as well as BS and hex""" 42 | #print BSPosition, hexCenter, mobilePosition 43 | P12 = hexfuns.distance(BSPosition, hexCenter) 44 | P13 = hexfuns.distance(BSPosition, mobilePosition) 45 | P23 = hexfuns.distance(hexCenter, mobilePosition) 46 | if P12 < 1e-5: # BS is in the hex center. This is omnidirectional case. 47 | angle = 0 48 | else: 49 | angle = arccos((power(P12,2) + power(P13,2) - power(P23,2))/(2 * P12 * P13)) 50 | angle = angle*180/pi 51 | 52 | if angle > 180: 53 | raise ValueError('Angle larger than realistically possible.') 54 | return angle 55 | 56 | def antennaGain(UEBoresightAngle): 57 | """ Standard formula taken from BeFEMTO document. Input in degrees. Output in dB. """ 58 | angleSpread3dB = 70. # degrees 59 | antennaFront2BackRatio = 25. # dB 60 | boresightMaxGain = 14. # dBi #TODO: Is this true for omnidirectional? 61 | azimuthloss = - min(12. * power((UEBoresightAngle / angleSpread3dB), 2) , antennaFront2BackRatio ) 62 | gain = boresightMaxGain + azimuthloss 63 | return gain 64 | 65 | if __name__ == '__main__': 66 | 67 | print "Testing correlatedLNSMap:" 68 | numUsers = 2 69 | numBS = 3 70 | LNSSD = 8 71 | print correlatedLNSMap(numUsers, numBS, LNSSD) 72 | -------------------------------------------------------------------------------- /iwf/test_iwf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the Inverse Waterfilling module 4 | 5 | File: test_iwf.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import iwf 17 | import unittest 18 | import numpy as np 19 | from utils import utils 20 | import scipy.linalg 21 | 22 | class TestSequenceFunctions(unittest.TestCase): 23 | 24 | def setUp(self): 25 | pass 26 | 27 | def test_iwf_cap(self): 28 | # Test that the requested capacity comes out 29 | systemBandwidth = 12. 30 | systemTime = 0.1 31 | targetLoad = 1e2 32 | for i in range(10): 33 | H = utils.rayleighChannel(2,2) 34 | eigvls, eigvects = scipy.linalg.eig(np.dot(H,H.conj().T)) 35 | if i is 0: 36 | eigvals = eigvls 37 | else: 38 | eigvals = np.append(eigvals,eigvls) 39 | eigvals = np.real(eigvals) # clear numberical imaginary parts 40 | 41 | subcarriers = 1. 42 | timeslots = 1. 43 | channelBandwidth = systemBandwidth / subcarriers 44 | transmissionTime = systemTime / timeslots 45 | noiseIfPowerPerChannel = np.ones(20) * 1e-10*systemBandwidth/subcarriers 46 | 47 | powerlvls, waterlvl, cap = iwf.inversewaterfill(eigvals, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime) 48 | np.testing.assert_almost_equal(targetLoad, cap) 49 | 50 | def test_iwf_even(self): 51 | # Test that power levels are equal if channels are equal 52 | systemBandwidth = 12. 53 | systemTime = 0.1 54 | subcarriers = 2. 55 | timeslots = 2. 56 | channelBandwidth = systemBandwidth / subcarriers 57 | transmissionTime = systemTime / timeslots 58 | noiseIfPowerPerChannel = 1e-10*systemBandwidth/subcarriers * np.ones(5) 59 | 60 | eigvals = np.repeat([0.5],5) 61 | targetLoad = 1e2 62 | powerlvls, waterlvl, cap = iwf.inversewaterfill(eigvals, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime) 63 | np.testing.assert_almost_equal(powerlvls[::-1],powerlvls) 64 | 65 | def test_iwf_known(self): 66 | # Test known outcome 67 | 68 | systemBandwidth = 12. 69 | systemTime = 0.1 70 | subcarriers = 2. 71 | timeslots = 2. 72 | channelBandwidth = systemBandwidth / subcarriers 73 | transmissionTime = systemTime / timeslots 74 | noiseIfPowerPerChannel = 1e-10*systemBandwidth/subcarriers * np.ones(8) 75 | 76 | eigvals = np.array([ 0.2296, 0.0255 ,0.1810 ,0.1117 ,0.0129 ,0.2029 ,0.3114 ,0.0299]) * 1e-4 77 | targetLoad = 1.2 78 | powerlvls, waterlvl, cap = iwf.inversewaterfill(eigvals, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime) 79 | powerlvls_answer = 1e-4 * np.array([ 0.2688 ,0 ,0.1986 ,0 ,0 ,0.2344 ,0.3374 ,0]) 80 | waterlvl_answer = 1.2248e-4 81 | np.testing.assert_array_almost_equal(powerlvls, powerlvls_answer) 82 | np.testing.assert_array_almost_equal(waterlvl, waterlvl_answer) 83 | np.testing.assert_array_almost_equal(cap, targetLoad) 84 | 85 | 86 | if __name__ == '__main__': 87 | unittest.main() 88 | -------------------------------------------------------------------------------- /world/hexagon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """Hexagonal shaped object for the visualisation and mapping of mobile 4 | communication networks. 5 | 6 | File: hexagon.py""" 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | 17 | import math 18 | from numpy import * 19 | 20 | #TODO: Handle all coordinates as array([x,y]) 21 | class EWHexagon(object): 22 | "East-West Hexagon object class." 23 | 24 | id_ = 0 25 | 26 | def __init__(self, center, radius): 27 | 28 | innerRadius = 0.5 * math.sqrt(3) * radius 29 | x = center[0] 30 | y = center[1] 31 | 32 | self.west = array([x - radius, y ]) 33 | self.northWest = array([x - radius/2., y + innerRadius] ) 34 | self.northEast = array([x + radius/2., y + innerRadius]) 35 | self.east = array([x + radius, y ]) 36 | self.southEast = array([x + radius/2., y - innerRadius]) 37 | self.southWest = array([x - radius/2., y - innerRadius]) 38 | 39 | self.center = array(center) 40 | self.outerRadius = radius 41 | self.innerRadius = innerRadius 42 | 43 | self.id_ = EWHexagon.id_ 44 | EWHexagon.id_ += 1 45 | 46 | def border(self): 47 | "Returns all border points for plotting." 48 | return [self.west, self.northWest, self.northEast, self.east, 49 | self.southEast, self.southWest, self.west] 50 | 51 | def vertices(self): 52 | "Returns all vertices." 53 | return [self.west, self.northWest, self.northEast, self.east, 54 | self.southEast, self.southWest] 55 | 56 | class NSHexagon(object): 57 | "North-South Hexagon object class." 58 | 59 | id_ = 0 60 | 61 | def __init__(self, center, radius): 62 | 63 | innerRadius = 0.5 * math.sqrt(3) * radius 64 | x = center[0] 65 | y = center[1] 66 | 67 | self.north = [x, y + radius] 68 | self.northEast = [x + innerRadius, y + radius/2.0] 69 | self.southEast = [x + innerRadius, y - radius/2.0] 70 | self.south = [x, y - radius] 71 | self.southWest = [ x - innerRadius, y - radius/2.0] 72 | self.northWest = [ x - innerRadius, y + radius/2.0] 73 | 74 | self.center = center 75 | self.outerRadius = radius 76 | self.innerRadius = innerRadius 77 | 78 | self.id_ = NSHexagon.id_ 79 | NSHexagon.id_ += 1 80 | 81 | def border(self): 82 | "Returns all border points for plotting." 83 | return [self.north, self.northEast, self.southEast, self.south, 84 | self.southWest, self.northWest, self.north] 85 | 86 | def vertices(self): 87 | "Returns all vertices." 88 | return [self.north, self.northEast, self.southEast, self.south, 89 | self.southWest, self.northWest] 90 | 91 | 92 | if __name__ == '__main__': 93 | hexNS = NSHexagon((0,0), 1) 94 | hexEW = EWHexagon((0,2), 1) 95 | # print hex.__dict__ 96 | 97 | # Plot using the Gnuplot package: 98 | import Gnuplot, Gnuplot.funcutils 99 | from numpy import * 100 | 101 | g = Gnuplot.Gnuplot() 102 | g.title('Testing functionality of hexagon mapping') 103 | g('set data style linespoint') 104 | 105 | g.plot(hexNS.border(), hexEW.border()) 106 | raw_input('Please press return to exit...\n') 107 | -------------------------------------------------------------------------------- /quantmap/quantmap.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Maps the real-valued resource shares in mu on integer-valued m_k over N subcarriers and T slots. See my academic papers for details. 4 | Input: 5 | - mu is size(1,users+dtx). sum(mu) == 1. 6 | - N is the number of subcarriers. e.g. 50 on 10 MHz 7 | - T is the number of timeslots to consider, e.g. 20 in an LTE frame 8 | 9 | Output: 10 | - outMap is size(T, K). For each user and slot it contains the 11 | number of resource blocks assigned. sum(sum(outMap)) == N*(T-t_sleep). 12 | 13 | File: quantmap.py 14 | ''' 15 | 16 | __author__ = "Hauke Holtkamp" 17 | __credits__ = "Hauke Holtkamp" 18 | __license__ = "unknown" 19 | __version__ = "unknown" 20 | __maintainer__ = "Hauke Holtkamp" 21 | __email__ = "h.holtkamp@gmail.com" 22 | __status__ = "Development" 23 | 24 | from numpy import * 25 | 26 | def quantmap(alloc, N, T): 27 | """Fit alloc to N x T with some rounding. Output is how many resources each user should receive in each timeslot.""" 28 | 29 | rbmap = empty([N, T]) 30 | rbmap[:] = nan 31 | alloc = array(alloc) # just in case 32 | 33 | K = alloc.size-1 # users 34 | 35 | # Initial mapping over all RB 36 | if N*T*alloc[-1] >= K: # Otherwise sleep duration would be negative 37 | 38 | t_sleep = floor( (N * T * alloc[-1] - K ) / N ) 39 | t_active = T - t_sleep 40 | m_k = ceil( alloc[:-1] * N * T ) 41 | leftoverRBs = N*T - sum(m_k) - t_sleep * N 42 | 43 | else: #% high load. N*T*mu(end) < K 44 | t_sleep = 0 45 | t_active = T 46 | m_k = floor( alloc[:-1] * N * T ) 47 | leftoverRBs = N*T - sum(m_k) 48 | 49 | # add remaining RBs to users round robin 50 | # set index here so the round robin continues where it left off within the while loop. 51 | rnd = random.permutation(K) # random starting point 52 | index = rnd[0] # Can this be done in one line with the one above? 53 | 54 | 55 | # Note that it's possible that a user receives RBs who did not request any since we are overcompensating 56 | while leftoverRBs > 0: 57 | m_k[index] = m_k[index] + 1 58 | leftoverRBs = leftoverRBs - 1 59 | index = mod(index+1, K) # move to next user 60 | 61 | m_k_start = m_k # save value for comparison later 62 | 63 | # Mapping per slot (from budget) 64 | m_slot = empty([t_active, K]) 65 | m_slot[:] = nan 66 | # for each active time slot 67 | 68 | # set index here so the round robin continues where it left off within the while loop. 69 | indx = rnd[0] # Can this be done in one line with the one above? 70 | for slot in arange(t_active): 71 | # take first guess at allocation by floor() 72 | m_slot[slot, :] = floor( m_k/sum(m_k) * N ) 73 | 74 | # fill up the remaining 75 | remainder = N - sum(m_slot[slot, :]) 76 | while remainder > 0: 77 | if nansum(m_slot[:,indx]) < m_k_start[indx]: # only if there is room. otherwise there may be negative slot numbers 78 | m_slot[slot, indx] = m_slot[slot, indx] + 1 79 | remainder = remainder - 1 80 | indx = mod(indx+1, K) # move to next user 81 | 82 | # keep track 83 | m_k = m_k - m_slot[slot, :] 84 | 85 | # test validity 86 | if nansum( m_k + nansum( m_slot,axis=0)) != N * t_active: 87 | disp('Sum mismatch in quantMap.m!') 88 | 89 | # test validity 90 | if (sum(m_slot,axis=0) != m_k_start).all(): 91 | raise ValueError ('Assignment faulty in quantMap.m!') 92 | if any(m_slot<0): 93 | raise ValueError ('Negative assignment in quantMap.m!') 94 | 95 | outMap = empty([T, K]) 96 | outMap[:] = nan # sleep slots will remain nan 97 | outMap[:t_active, :] = m_slot 98 | 99 | return outMap 100 | -------------------------------------------------------------------------------- /quantmap/quantmap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Maps the real-valued resource shares in mu on integer-valued m_k over N subcarriers and T slots. See my academic paper in JSAC for details. 4 | Input: 5 | - mu is size(1,users+dtx). sum(mu) == 1. 6 | - N is the number of subcarriers. e.g. 50 on 10 MHz 7 | - T is the number of timeslots to consider, e.g. 20 in an LTE frame 8 | 9 | Output: 10 | - outMap is size(T, K). For each user and slot it contains the 11 | number of resource blocks assigned. sum(sum(outMap)) == N*(T-t_sleep). 12 | 13 | File: quantmap.py 14 | ''' 15 | 16 | __author__ = "Hauke Holtkamp" 17 | __credits__ = "Hauke Holtkamp" 18 | __license__ = "unknown" 19 | __version__ = "unknown" 20 | __maintainer__ = "Hauke Holtkamp" 21 | __email__ = "h.holtkamp@gmail.com" 22 | __status__ = "Development" 23 | 24 | from numpy import * 25 | 26 | def quantmap(alloc, N, T): 27 | """Fit alloc to N x T with some rounding. Output is how many resources each user should receive in each timeslot.""" 28 | 29 | rbmap = empty([N, T]) 30 | rbmap[:] = nan 31 | alloc = array(alloc) # just in case 32 | 33 | K = alloc.size-1 # users 34 | 35 | # Initial mapping over all RB 36 | if N*T*alloc[-1] >= K: # Otherwise sleep duration would be negative 37 | 38 | t_sleep = floor( (N * T * alloc[-1] - K ) / N ) 39 | t_active = T - t_sleep 40 | m_k = ceil( alloc[:-1] * N * T ) 41 | leftoverRBs = N*T - sum(m_k) - t_sleep * N 42 | 43 | else: #% high load. N*T*mu(end) < K 44 | t_sleep = 0 45 | t_active = T 46 | m_k = floor( alloc[:-1] * N * T ) 47 | leftoverRBs = N*T - sum(m_k) 48 | 49 | # add remaining RBs to users round robin 50 | # set index here so the round robin continues where it left off within the while loop. 51 | rnd = random.permutation(K) # random starting point 52 | index = rnd[0] # Can this be done in one line with the one above? 53 | 54 | 55 | # Note that it's possible that a user receives RBs who did not request any since we are overcompensating 56 | while leftoverRBs > 0: 57 | m_k[index] = m_k[index] + 1 58 | leftoverRBs = leftoverRBs - 1 59 | index = mod(index+1, K) # move to next user 60 | 61 | m_k_start = m_k # save value for comparison later 62 | 63 | # Mapping per slot (from budget) 64 | m_slot = empty([t_active, K]) 65 | m_slot[:] = nan 66 | # for each active time slot 67 | 68 | # set index here so the round robin continues where it left off within the while loop. 69 | indx = rnd[0] # Can this be done in one line with the one above? 70 | for slot in arange(t_active): 71 | # take first guess at allocation by floor() 72 | m_slot[slot, :] = floor( m_k/sum(m_k) * N ) 73 | 74 | # fill up the remaining 75 | remainder = N - sum(m_slot[slot, :]) 76 | while remainder > 0: 77 | if nansum(m_slot[:,indx]) < m_k_start[indx]: # only if there is room. otherwise there may be negative slot numbers 78 | m_slot[slot, indx] = m_slot[slot, indx] + 1 79 | remainder = remainder - 1 80 | indx = mod(indx+1, K) # move to next user 81 | 82 | # keep track 83 | m_k = m_k - m_slot[slot, :] 84 | 85 | # test validity 86 | if nansum( m_k + nansum( m_slot,axis=0)) != N * t_active: 87 | disp('Sum mismatch in quantMap.m!') 88 | 89 | # test validity 90 | if (sum(m_slot,axis=0) != m_k_start).all(): 91 | raise ValueError ('Assignment faulty in quantMap.m!') 92 | if any(m_slot<0): 93 | raise ValueError ('Negative assignment in quantMap.m!') 94 | 95 | outMap = empty([T, K]) 96 | outMap[:] = nan # sleep slots will remain nan 97 | outMap[:t_active, :] = m_slot 98 | 99 | return outMap 100 | -------------------------------------------------------------------------------- /optim/test_optimMinPow2x2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the optimization module 2x2 MIMO 4 | 5 | File: test_optimMinPow2x22x2.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from optim import optimMinPow2x2 17 | import unittest 18 | import numpy as np 19 | from utils import utils 20 | import scipy.linalg 21 | 22 | class TestSequenceFunctions(unittest.TestCase): 23 | 24 | def setUp(self): 25 | self.H = np.array([[[1.-1j,-1.],[-1.,1.]],[[1.-1j,1.],[-1.,1.]],[[0.5,1.j],[1.,-1.j]]]) # Some channel. 2x2, 3 users. 26 | for k in np.arange(self.H.shape[0]): 27 | self.H[k,:,:] = scipy.dot(self.H[k,:,:], self.H[k,:,:].conj().T) 28 | self.x0 = np.array([0.1, 0.1, 0.1]) # 29 | self.n_tx = self.H.shape[1] 30 | self.n_rx = self.H.shape[2] 31 | self.users = self.H.shape[0] 32 | self.noisepower = np.ones(3) 33 | self.rate = 1 34 | self.linkBandwidth = 1 35 | self.p0 = 0 36 | self.m = 1 37 | self.mus = np.array([0.1,0.1,0.1]) 38 | self.pMax = 10 39 | 40 | pass 41 | 42 | def test_eval_f(self): 43 | obj = optimMinPow2x2.eval_f(self.x0, self.noisepower, self.H, self.rate, self.linkBandwidth, self.p0, self.m) 44 | answer = 12.8015 45 | np.testing.assert_approx_equal(obj, answer, significant=3) 46 | 47 | trivialH = np.ones([3,2,2]) 48 | for k in np.arange(self.H.shape[0]): 49 | trivialH[k,:,:] = scipy.dot(trivialH[k,:,:], trivialH[k,:,:].conj().T) 50 | obj = optimMinPow2x2.eval_f(self.x0, self.noisepower, trivialH, self.rate, self.linkBandwidth, self.p0, self.m) 51 | answer = 153.45 52 | np.testing.assert_approx_equal(obj, answer, significant=3) 53 | 54 | def test_eval_grad_f(self): 55 | ans = optimMinPow2x2.eval_grad_f(self.x0, self.noisepower, self.H, self.rate, self.linkBandwidth, self.p0, self.m) 56 | answer = np.array([-162.07682481, -71.55787938, -106.63302593]) 57 | np.testing.assert_array_almost_equal(ans, answer) 58 | 59 | def test_eval_g(self): 60 | """docstring for test_eval_g""" 61 | ans = optimMinPow2x2.eval_g(self.x0, self.noisepower, self.H, self.rate, self.linkBandwidth) 62 | answer = np.array([ 0.3, 59.16385275, 27.62516375, 41.22583897]) 63 | np.testing.assert_array_almost_equal(ans, answer) 64 | 65 | def test_eval_jac_g(self): 66 | """docstring for test_eval_jac_g""" 67 | ans = optimMinPow2x2.eval_jac_g(self.x0, self.noisepower, self.H, self.rate, self.linkBandwidth, 0) 68 | answer = np.array([ 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, -2.21240678e+03, 69 | 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -9.91830431e+02, 70 | 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.47858865e+03]) 71 | np.testing.assert_array_almost_equal(ans, answer, decimal=5) 72 | 73 | def test_eval_jac_g_structure(self): 74 | ans = optimMinPow2x2.eval_jac_g(self.x0, self.noisepower, self.H, self.rate, self.linkBandwidth, 1) 75 | answer = (np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2])) 76 | np.testing.assert_equal(ans, answer) 77 | 78 | def test_ergMIMOsinrCDITCSIR2x2(self): 79 | ans = optimMinPow2x2.ergMIMOsinrCDITCSIR2x2( 1./self.x0[0], self.H[0,:,:], self.noisepower[0]) 80 | np.testing.assert_approx_equal(ans, 59.16385, significant=5) 81 | 82 | def test_dissectH(self): 83 | """docstring for test_dissectH""" 84 | ans = optimMinPow2x2.dissectSINR(self.H[0,:,:]) # 5,2,2 85 | answer = np.array([5.,2.,2.]) 86 | np.testing.assert_array_almost_equal(ans, answer) 87 | 88 | def test_ptxOfMu(self): 89 | """docstring for test_ptxOfMu""" 90 | ans = optimMinPow2x2.ptxOfMu(0.1, self.rate, self.linkBandwidth, self.noisepower, self.H[0,:,:]) # 59.16 91 | answer = 59.16385275 92 | np.testing.assert_almost_equal(ans, answer) 93 | 94 | if __name__ == '__main__': 95 | unittest.main() 96 | 97 | -------------------------------------------------------------------------------- /rcg/rcg.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Performs the RCG algorithm on the input map. 4 | 5 | This function performs the algorithm described in Kivanc 2003, 6 | Computationally efficient bandwidth allocation and power control for 7 | ofdma called Rate Craving Greedy. 8 | It allocates subcarriers between users via a nearest neighbor search if 9 | the final number of subcarriers per user is previously known. 10 | Input: 11 | - costmap: A matrix of dimension (subcarriers, users) where the 12 | entry represents the cost value that is used to allocate 13 | subcarriers (higher is better) 14 | - targetUserAssignment: A vector of dimension (users, 1) that holds the 15 | number of subcarriers that each user should receive. 16 | sum(subcarrierCount) must equal the number of subcarriers. 17 | Output: 18 | - outMap: size = [N]. Each entry contains the user index to whom the resource is assigned. 19 | - initialMap: The allocation after the first step (for debugging) 20 | 21 | File: rcg.py 22 | ''' 23 | 24 | __author__ = "Hauke Holtkamp" 25 | __credits__ = "Hauke Holtkamp" 26 | __license__ = "unknown" 27 | __version__ = "unknown" 28 | __maintainer__ = "Hauke Holtkamp" 29 | __email__ = "h.holtkamp@gmail.com" 30 | __status__ = "Development" 31 | 32 | from numpy import * 33 | 34 | def rcg(costmp, targetUserAssignment): 35 | """Rate craving greedy subcarrier allocation""" 36 | 37 | users = costmp.shape[1] 38 | subcarriers = costmp.shape[0] 39 | costmap = float32(real(costmp)) 40 | 41 | # check input 42 | if len(targetUserAssignment) is not users: 43 | raise ValueError('rcg input mismatch') 44 | 45 | currentSubcarrierAssignment = empty([subcarriers]); currentSubcarrierAssignment[:] = nan 46 | currentUserAssignment = zeros([users]); #currentUserAssignment[:] = nan 47 | 48 | # an NaN array signifies a sleep mode slot 49 | if sum(isnan(targetUserAssignment)) == users: 50 | outMap = empty([subcarriers]) 51 | outMap[:] = nan 52 | initialMap = nan 53 | return outMap, initialMap 54 | 55 | # initial subcarrier assignment by strength regardless of count 56 | for sc in range(subcarriers): 57 | maxindex = argmax(costmap[sc,:]) 58 | currentSubcarrierAssignment[sc] = maxindex # save which user has the best value 59 | currentUserAssignment[maxindex] = currentUserAssignment[maxindex] + 1 60 | 61 | initialMap = currentUserAssignment.copy() 62 | # print initialMap 63 | 64 | # perform the RCG reassignment. Take from the overloaded and give to the dissatisfied. 65 | overloadedUsers = (targetUserAssignment-currentUserAssignment<0) # boolean array 66 | satisfiedUsers = (targetUserAssignment-currentUserAssignment<=0) # boolean array 67 | 68 | for olusrindex in arange(users): 69 | while overloadedUsers[olusrindex]: 70 | 71 | # find nearest neighbor 72 | subcarrierIndicesOfOlusr = (currentSubcarrierAssignment == olusrindex) 73 | subcarrierIndicesOfOtherUsers = (currentSubcarrierAssignment != olusrindex) 74 | 75 | diffmp = abs(diffmap(costmap.copy(), olusrindex)) # generate map of differences 76 | 77 | diffmp[:,where(satisfiedUsers==True)] = nan # clear out the satisfieds. We do not compare to them. 78 | minindx = nanargmin(diffmp[:,where(satisfiedUsers==False)],0) # subcarrier indices of useful values 79 | diffmp[subcarrierIndicesOfOtherUsers,:] = nan # clear out rest 80 | 81 | # trade subcarrier with nearest neighbor (nn) 82 | nnindex = unravel_index(nanargmin(diffmp), diffmp.shape) 83 | tradesc = nnindex[0] 84 | tousr = nnindex[1] 85 | 86 | # trade the nearest neighbor 87 | currentSubcarrierAssignment[tradesc] = tousr 88 | currentUserAssignment[tousr] = currentUserAssignment[tousr] + 1 89 | currentUserAssignment[olusrindex] = currentUserAssignment[olusrindex] - 1 90 | # print targetUserAssignment 91 | # print currentUserAssignment 92 | 93 | # keep track 94 | overloadedUsers = (targetUserAssignment-currentUserAssignment<0) # boolean array 95 | satisfiedUsers = (targetUserAssignment-currentUserAssignment<=0) # boolean array 96 | 97 | outMap = currentSubcarrierAssignment 98 | return outMap, initialMap 99 | 100 | def diffmap(costmap, userindex): 101 | """For a reference user, this function returns the differences rather than absolute values.""" 102 | uservalues = costmap[:,userindex].copy() 103 | uservalues.shape = (uservalues.size,1) # promote dimensions 104 | costmap[:,userindex] = nan # don't compare to self 105 | 106 | diffmap = -uservalues + costmap 107 | return diffmap 108 | 109 | if __name__ == '__main__': 110 | users = 4 111 | subcarriers = 5 112 | costmap = random.random([subcarriers, users]) 113 | subcarrierCount = array([1,2,2,0], dtype=float_) 114 | # subcarrierCount[:] = nan 115 | print rcg(costmap, subcarrierCount) 116 | 117 | -------------------------------------------------------------------------------- /rcg/rcg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Performs the Kivanc RCG algorithm on the input map. 4 | 5 | This function performs the algorithm described in Kivanc 2003, 6 | Computationally efficient bandwidth allocation and power control for 7 | ofdma called Rate Craving Greedy. 8 | It allocates subcarriers between users via a nearest neighbor search if 9 | the final number of subcarriers per user is previously known. 10 | Input: 11 | - costmap: A matrix of dimension (subcarriers, users) where the 12 | entry represents the cost value that is used to allocate 13 | subcarriers (higher is better) 14 | - targetUserAssignment: A vector of dimension (users, 1) that holds the 15 | number of subcarriers that each user should receive. 16 | sum(subcarrierCount) must equal the number of subcarriers. 17 | Output: 18 | - outMap: size = [N]. Each entry contains the user index to whom the resource is assigned. 19 | - initialMap: The allocation after the first step (for debugging) 20 | 21 | File: rcg.py 22 | ''' 23 | 24 | __author__ = "Hauke Holtkamp" 25 | __credits__ = "Hauke Holtkamp" 26 | __license__ = "unknown" 27 | __version__ = "unknown" 28 | __maintainer__ = "Hauke Holtkamp" 29 | __email__ = "h.holtkamp@gmail.com" 30 | __status__ = "Development" 31 | 32 | from numpy import * 33 | 34 | def rcg(costmp, targetUserAssignment): 35 | """Rate craving greedy subcarrier allocation""" 36 | 37 | users = costmp.shape[1] 38 | subcarriers = costmp.shape[0] 39 | costmap = float32(real(costmp)) 40 | 41 | # check input 42 | if len(targetUserAssignment) is not users: 43 | raise ValueError('rcg input mismatch') 44 | 45 | currentSubcarrierAssignment = empty([subcarriers]); currentSubcarrierAssignment[:] = nan 46 | currentUserAssignment = zeros([users]); #currentUserAssignment[:] = nan 47 | 48 | # an NaN array signifies a sleep mode slot 49 | if sum(isnan(targetUserAssignment)) == users: 50 | outMap = empty([subcarriers]) 51 | outMap[:] = nan 52 | initialMap = nan 53 | return outMap, initialMap 54 | 55 | # initial subcarrier assignment by strength regardless of count 56 | for sc in range(subcarriers): 57 | maxindex = argmax(costmap[sc,:]) 58 | currentSubcarrierAssignment[sc] = maxindex # save which user has the best value 59 | currentUserAssignment[maxindex] = currentUserAssignment[maxindex] + 1 60 | 61 | initialMap = currentUserAssignment.copy() 62 | # print initialMap 63 | 64 | # perform the RCG reassignment. Take from the overloaded and give to the dissatisfied. 65 | overloadedUsers = (targetUserAssignment-currentUserAssignment<0) # boolean array 66 | satisfiedUsers = (targetUserAssignment-currentUserAssignment<=0) # boolean array 67 | 68 | for olusrindex in arange(users): 69 | while overloadedUsers[olusrindex]: 70 | 71 | # find nearest neighbor 72 | subcarrierIndicesOfOlusr = (currentSubcarrierAssignment == olusrindex) 73 | subcarrierIndicesOfOtherUsers = (currentSubcarrierAssignment != olusrindex) 74 | 75 | diffmp = abs(diffmap(costmap.copy(), olusrindex)) # generate map of differences 76 | 77 | diffmp[:,where(satisfiedUsers==True)] = nan # clear out the satisfieds. We do not compare to them. 78 | minindx = nanargmin(diffmp[:,where(satisfiedUsers==False)],0) # subcarrier indices of useful values 79 | diffmp[subcarrierIndicesOfOtherUsers,:] = nan # clear out rest 80 | 81 | # trade subcarrier with nearest neighbor (nn) 82 | nnindex = unravel_index(nanargmin(diffmp), diffmp.shape) 83 | tradesc = nnindex[0] 84 | tousr = nnindex[1] 85 | 86 | # trade the nearest neighbor 87 | currentSubcarrierAssignment[tradesc] = tousr 88 | currentUserAssignment[tousr] = currentUserAssignment[tousr] + 1 89 | currentUserAssignment[olusrindex] = currentUserAssignment[olusrindex] - 1 90 | # print targetUserAssignment 91 | # print currentUserAssignment 92 | 93 | # keep track 94 | overloadedUsers = (targetUserAssignment-currentUserAssignment<0) # boolean array 95 | satisfiedUsers = (targetUserAssignment-currentUserAssignment<=0) # boolean array 96 | 97 | outMap = currentSubcarrierAssignment 98 | return outMap, initialMap 99 | 100 | def diffmap(costmap, userindex): 101 | """For a reference user, this function returns the differences rather than absolute values.""" 102 | uservalues = costmap[:,userindex].copy() 103 | uservalues.shape = (uservalues.size,1) # promote dimensions 104 | costmap[:,userindex] = nan # don't compare to self 105 | 106 | diffmap = -uservalues + costmap 107 | return diffmap 108 | 109 | if __name__ == '__main__': 110 | users = 4 111 | subcarriers = 5 112 | costmap = random.random([subcarriers, users]) 113 | subcarrierCount = array([1,2,2,0], dtype=float_) 114 | # subcarrierCount[:] = nan 115 | print rcg(costmap, subcarrierCount) 116 | 117 | -------------------------------------------------------------------------------- /results/collect_delivered_per_mobile_distribution_over_iterations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' collects results data from an expected folder structure. Specifically for a comparison of sequential DTX data sets. 4 | Collect the delivered rates per mobile, grouped by technique and rates. This should be used for plotting distributions of delivered rates per mobile. 5 | 6 | File: collect_missrates_seqDTX.py 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import glob 18 | import numpy as np 19 | import sys 20 | import os 21 | import ConfigParser 22 | from scipy.stats.stats import nanmean 23 | def main(searchpath, outpath): 24 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption. 25 | Input: search path, output path 26 | """ 27 | 28 | data_types = 5 29 | rate = 1e6 * 2.0 # 2 Mbps 30 | iterations = None 31 | sweep_values = [] # we do not know beforehand how much data we have 32 | depth = None 33 | 34 | # enum 35 | axis_index = 0 36 | sequential_index = 1 37 | random_each_iter_index = 2 38 | sinr_index = 3 39 | dtx_segregation = 4 40 | 41 | data_str = {1:'none', 2:'rand', 3:'sinr', 4:'dtxs'} 42 | 43 | # initially, we only check how much data we have 44 | for dirname, dirnames, filenames in os.walk(searchpath): 45 | if depth is None: 46 | depth = len(dirnames) # worst case amount of data 47 | for subdirname in dirnames: 48 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 49 | config = ConfigParser.RawConfigParser() 50 | config.read(filename) 51 | iterations = int(config.getfloat('General', 'iterations')) 52 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 53 | numcenterusers = int(config.getfloat('General', 'numcenterusers')) 54 | 55 | sweep_values = sorted(set(sweep_values)) 56 | # list of lists 57 | result = [] # result[data_type][sweep_value] 58 | for i in np.arange(data_types): 59 | result.append([]) 60 | for j in np.arange(len(sweep_values)): 61 | result[i].append([]) 62 | result[i][j] = np.zeros([20,1]) # prepare dimensions 63 | 64 | count = np.zeros([len(sweep_values), data_types]) 65 | 66 | # now start filling the result 67 | dep = 0 68 | for dirname, dirnames, filenames in os.walk(searchpath): 69 | for subdirname in dirnames: 70 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 71 | 72 | config = ConfigParser.RawConfigParser() 73 | config.read(filename) 74 | rate = int(config.getfloat('General', 'user_rate')) 75 | sleep_alignment = config.get('General', 'sleep_alignment') 76 | initial_power = config.get('General', 'initial_power') 77 | 78 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_per_mobile.csv', delimiter=',') 79 | # seqDTX sequential 80 | if ('DTX' in filename) and (sleep_alignment == 'none'): 81 | index = sequential_index 82 | 83 | # random each iter 84 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 85 | index = random_each_iter_index 86 | 87 | # sinr ordering 88 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 89 | index = sinr_index 90 | 91 | # dtx segregation 92 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 93 | index = dtx_segregation 94 | 95 | else: 96 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 97 | filedata = 0 98 | index = 0 99 | 100 | result[index][sweep_values.index(rate)] = np.append(result[index][sweep_values.index(rate)], filedata, axis=1) 101 | count[sweep_values.index(rate), index] += 1 102 | dep += 1 103 | 104 | # clear placeholder 105 | for i in np.arange(data_types): 106 | for j in np.arange(len(sweep_values)): 107 | result[i][j] = np.delete(result[i][j], 0, 1) 108 | 109 | for i, s in enumerate(sweep_values): 110 | for d in np.arange(1,data_types): 111 | target = outpath + '/delivered_per_mobile_distr_' + data_str[d] + '_' + str(s) + '.csv' 112 | np.savetxt(target, result[d][i], delimiter=',') 113 | 114 | print count.T 115 | 116 | 117 | if __name__ == '__main__': 118 | searchpath = sys.argv[1] 119 | outpath = sys.argv[2] 120 | if not os.path.exists(outpath): 121 | os.makedirs(outpath) 122 | main(searchpath, outpath) 123 | -------------------------------------------------------------------------------- /optim/optimMinPow2x2DTX.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Optimization objective and constraints for 2x2 MIMO minimal power allocation with DTX 4 | 5 | File: optimMinPow2x2DTX.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import scipy.linalg 17 | from numpy import * 18 | 19 | def eval_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m, pS ): 20 | """Objective function. Min power equal power 2x2 MIMO. 21 | Variable is the resource share in TDMA. Last entry in mu[:] is sleep time share. Returns scalar.""" 22 | result = 0 23 | for i in range(mus.size-1): 24 | Ptxi = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 25 | Ppm = (p0 + m*Ptxi) * mus[i] 26 | result = result + Ppm 27 | result = result + mus[-1] * pS 28 | return result 29 | 30 | def eval_grad_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m, pS): 31 | """Gradient of the objective function. Returns array of scalars, each one the partial derivative. Last entry in mu[:] is sleep time share. """ 32 | result = 0 33 | mus = array(mus) # allow iteration 34 | result = zeros((mus.size), dtype=float_) 35 | for i in range(mus.size-1): # the last derivative is different 36 | a,b,M = dissectSINR(SINR[i,:,:]) 37 | capacity = rate / (linkBandwidth * mus[i]) 38 | result[i] = p0 + m*M*noiseIfPower[i]*( ( ( a**2 / b + 2*2**capacity - 1/mus[i] * ( rate/linkBandwidth * log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a/b ) 39 | result[-1] = pS 40 | return result 41 | 42 | def eval_g(mus, noiseIfPower, SINR, rate, linkBandwidth): 43 | """Constraint functions. Returns an array.""" 44 | 45 | mus = array(mus) 46 | result = zeros((mus.size), dtype=float_) 47 | result[0] = sum(mus) # first constraint is the unit sum 48 | 49 | # Other constraints: Maximum transmission power limit 50 | for i in range(mus.size-1): 51 | result[i+1] = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 52 | 53 | #print result 54 | return result 55 | 56 | def eval_jac_g(mus, noiseIfPower, SINR, rate, linkBandwidth, flag): 57 | """Gradient of constraint function/Jacobian. min power equal power 2x2 MIMO. 58 | mus is the resource share in TDMA. Output is a numpy array with the nnzj rows.""" 59 | ncon = mus.size 60 | if flag: # The 'structure of the Jacobian' is the map of which return value refers to which constraint function. There are ncon*ncon constraints overall. There are ncon functions in eval_g, each of which has ncon partial derivatives. 61 | lineindex = array(range(ncon)).repeat(ncon) 62 | rowindex = tile(array(range(ncon)),ncon) 63 | return (lineindex,rowindex) # returns something like [0,0,0,1,1,1,2,2,2], [0,1,2,0,1,2,0,1,2]... 64 | 65 | else: 66 | index = 0 67 | mus = array(mus) # allow iteration 68 | result = zeros((ncon*ncon), dtype=float_) 69 | # The derivatives of the unit sum are just 1 70 | for i in range(ncon): 71 | result[index] = 1 72 | index = index + 1 73 | 74 | # The derivatives of each power constraint: 75 | for i in range(ncon-1): # the number of power constraints 76 | for j in range(ncon): # the number of partial derivatives per power constraint 77 | if i == j: # there is a partial derivative 78 | a,b,M = dissectSINR(SINR[i,:,:]) 79 | capacity = rate / (linkBandwidth * mus[i]) 80 | result[index] = M*noiseIfPower[i]* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus[i]**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 81 | else: # there is no partial derivative 82 | result[index] = 0 # partial derivative is zero 83 | 84 | index = index + 1 85 | 86 | return result 87 | 88 | def ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower): 89 | """Ergodic MIMO SNR as a function of achieved capacity and channel.""" 90 | a,b,M = dissectSINR(SINR) 91 | if capacity > 0.5e3: 92 | value = inf # avoid overflow warning 93 | else: 94 | value = noiseIfPower * (M / b) * ( -a + sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) 95 | return value 96 | 97 | def dissectSINR(SINR): 98 | """Take apart SINR into some values that we need often. If SINR is trivial, one eigenvalue is zero.""" 99 | M = SINR.shape[0] 100 | #eigvals, eigvects = scipy.linalg.eig(scipy.dot(H,H.conj().T)) # THIS LINE DETERMINES WHETHER WE ARE WORKING IN CHANNEL STATE OR SINR 101 | eigvals, eigvects = scipy.linalg.eig(SINR) # SINR is a bad label. It is actually the effective channel 102 | e1 = eigvals[0].real 103 | e2 = eigvals[1].real 104 | a = e1 + e2 105 | b = 2*e1 * e2 106 | 107 | return (a,b,M) 108 | 109 | def ptxOfMu(mu, rate, linkBandwidth, noiseIfPower, SINR): 110 | """Returns transmission power needed for a certain channel capacity as a function of the MIMO channel and noise power.""" 111 | capacity = rate / (linkBandwidth * mu) 112 | return ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower) 113 | 114 | -------------------------------------------------------------------------------- /optim/optimMinPow2x2DTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Optimization objective and constraints for 2x2 MIMO minimal power allocation with DTX. See my papers for details. 4 | 5 | File: optimMinPow2x2DTX.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import scipy.linalg 17 | from numpy import * 18 | 19 | def eval_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m, pS ): 20 | """Objective function. Min power equal power 2x2 MIMO. 21 | Variable is the resource share in TDMA. Last entry in mu[:] is sleep time share. Returns scalar.""" 22 | result = 0 23 | for i in range(mus.size-1): 24 | Ptxi = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 25 | Ppm = (p0 + m*Ptxi) * mus[i] 26 | result = result + Ppm 27 | result = result + mus[-1] * pS 28 | return result 29 | 30 | def eval_grad_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m, pS): 31 | """Gradient of the objective function. Returns array of scalars, each one the partial derivative. Last entry in mu[:] is sleep time share. """ 32 | result = 0 33 | mus = array(mus) # allow iteration 34 | result = zeros((mus.size), dtype=float_) 35 | for i in range(mus.size-1): # the last derivative is different 36 | a,b,M = dissectSINR(SINR[i,:,:]) 37 | capacity = rate / (linkBandwidth * mus[i]) 38 | result[i] = p0 + m*M*noiseIfPower[i]*( ( ( a**2 / b + 2*2**capacity - 1/mus[i] * ( rate/linkBandwidth * log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a/b ) 39 | result[-1] = pS 40 | return result 41 | 42 | def eval_g(mus, noiseIfPower, SINR, rate, linkBandwidth): 43 | """Constraint functions. Returns an array.""" 44 | 45 | mus = array(mus) 46 | result = zeros((mus.size), dtype=float_) 47 | result[0] = sum(mus) # first constraint is the unit sum 48 | 49 | # Other constraints: Maximum transmission power limit 50 | for i in range(mus.size-1): 51 | result[i+1] = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 52 | 53 | #print result 54 | return result 55 | 56 | def eval_jac_g(mus, noiseIfPower, SINR, rate, linkBandwidth, flag): 57 | """Gradient of constraint function/Jacobian. min power equal power 2x2 MIMO. 58 | mus is the resource share in TDMA. Output is a numpy array with the nnzj rows.""" 59 | ncon = mus.size 60 | if flag: # The 'structure of the Jacobian' is the map of which return value refers to which constraint function. There are ncon*ncon constraints overall. There are ncon functions in eval_g, each of which has ncon partial derivatives. 61 | lineindex = array(range(ncon)).repeat(ncon) 62 | rowindex = tile(array(range(ncon)),ncon) 63 | return (lineindex,rowindex) # returns something like [0,0,0,1,1,1,2,2,2], [0,1,2,0,1,2,0,1,2]... 64 | 65 | else: 66 | index = 0 67 | mus = array(mus) # allow iteration 68 | result = zeros((ncon*ncon), dtype=float_) 69 | # The derivatives of the unit sum are just 1 70 | for i in range(ncon): 71 | result[index] = 1 72 | index = index + 1 73 | 74 | # The derivatives of each power constraint: 75 | for i in range(ncon-1): # the number of power constraints 76 | for j in range(ncon): # the number of partial derivatives per power constraint 77 | if i == j: # there is a partial derivative 78 | a,b,M = dissectSINR(SINR[i,:,:]) 79 | capacity = rate / (linkBandwidth * mus[i]) 80 | result[index] = M*noiseIfPower[i]* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus[i]**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 81 | else: # there is no partial derivative 82 | result[index] = 0 # partial derivative is zero 83 | 84 | index = index + 1 85 | 86 | return result 87 | 88 | def ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower): 89 | """Ergodic MIMO SNR as a function of achieved capacity and channel.""" 90 | a,b,M = dissectSINR(SINR) 91 | if capacity > 0.5e3: 92 | value = inf # avoid overflow warning 93 | else: 94 | value = noiseIfPower * (M / b) * ( -a + sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) 95 | return value 96 | 97 | def dissectSINR(SINR): 98 | """Take apart SINR into some values that we need often. If SINR is trivial, one eigenvalue is zero.""" 99 | M = SINR.shape[0] 100 | #eigvals, eigvects = scipy.linalg.eig(scipy.dot(H,H.conj().T)) # THIS LINE DETERMINES WHETHER WE ARE WORKING IN CHANNEL STATE OR SINR 101 | eigvals, eigvects = scipy.linalg.eig(SINR) # SINR is a bad label. It is actually the effective channel 102 | e1 = eigvals[0].real 103 | e2 = eigvals[1].real 104 | a = e1 + e2 105 | b = 2*e1 * e2 106 | 107 | return (a,b,M) 108 | 109 | def ptxOfMu(mu, rate, linkBandwidth, noiseIfPower, SINR): 110 | """Returns transmission power needed for a certain channel capacity as a function of the MIMO channel and noise power.""" 111 | capacity = rate / (linkBandwidth * mu) 112 | return ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower) 113 | 114 | -------------------------------------------------------------------------------- /results/collect_average_user_rate_over_target_user_rate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''Collect the average user rate over the target rate 4 | y-axis: average user rate 5 | x-axis: target rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import glob 18 | import numpy as np 19 | import sys 20 | import os 21 | import ConfigParser 22 | from scipy.stats.stats import nanmean 23 | def main(searchpath, outpath): 24 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually, build a data csv for plotting. 25 | Input: search path, output path 26 | """ 27 | 28 | data_types = 5 29 | rate = 1e6 * 2.0 # 2 Mbps 30 | iterations = None 31 | sweep_values = [] # we do not know beforehand how much data we have 32 | depth = None 33 | 34 | # enum 35 | axis_index = 0 36 | sequential_index = 1 37 | random_each_iter_index = 2 38 | sinr_index = 3 39 | dtx_segregation = 4 40 | 41 | data_str = {1:'none', 2:'rand', 3:'sinr', 4:'dtxs'} 42 | 43 | # initially, we only check how much data we have 44 | for dirname, dirnames, filenames in os.walk(searchpath): 45 | if depth is None: 46 | depth = len(dirnames) # worst case amount of data 47 | for subdirname in dirnames: 48 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 49 | config = ConfigParser.RawConfigParser() 50 | config.read(filename) 51 | iterations = int(config.getfloat('General', 'iterations')) 52 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 53 | numcenterusers = int(config.getfloat('General', 'numcenterusers')) 54 | 55 | sweep_values = sorted(set(sweep_values)) 56 | # list of lists 57 | result = [] # result[data_type][sweep_value] 58 | for i in np.arange(data_types): 59 | result.append([]) 60 | for j in np.arange(len(sweep_values)): 61 | result[i].append([]) 62 | result[i][j] = np.zeros([20,1]) # prepare dimensions 63 | 64 | count = np.zeros([len(sweep_values), data_types]) 65 | 66 | # now start filling the result 67 | dep = 0 68 | for dirname, dirnames, filenames in os.walk(searchpath): 69 | for subdirname in dirnames: 70 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 71 | 72 | config = ConfigParser.RawConfigParser() 73 | config.read(filename) 74 | rate = int(config.getfloat('General', 'user_rate')) 75 | sleep_alignment = config.get('General', 'sleep_alignment') 76 | initial_power = config.get('General', 'initial_power') 77 | iterations = config.get('General', 'iterations') 78 | 79 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_per_mobile.csv', delimiter=',') 80 | # seqDTX sequential 81 | if ('DTX' in filename) and (sleep_alignment == 'none'): 82 | index = sequential_index 83 | 84 | # random each iter 85 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 86 | index = random_each_iter_index 87 | 88 | # sinr ordering 89 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 90 | index = sinr_index 91 | 92 | # dtx segregation 93 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 94 | index = dtx_segregation 95 | 96 | else: 97 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 98 | filedata = 0 99 | index = 0 100 | 101 | result[index][sweep_values.index(rate)] = np.append(result[index][sweep_values.index(rate)], filedata, axis=1) 102 | count[sweep_values.index(rate), index] += 1 103 | dep += 1 104 | 105 | 106 | # clear placeholder 107 | for i in np.arange(data_types): 108 | for j in np.arange(len(sweep_values)): 109 | result[i][j] = np.delete(result[i][j], 0, 1) 110 | 111 | # build percentages 112 | avg_rates = np.zeros([data_types, len(sweep_values)]) 113 | for dt in np.arange(1,data_types): 114 | for si, sv in enumerate(sweep_values): 115 | elements = 100*result[dt][si] # times 100 for users and timeslots per frame 116 | 117 | # percentage at last iteration 118 | avg_rates[dt,si] = np.mean(elements) # should be around the value of sv 119 | 120 | # x-axis 121 | avg_rates[0,:] = sweep_values 122 | 123 | target = outpath + '/average_user_rates_over_target_rate.csv' 124 | np.savetxt(target, avg_rates, delimiter=',') 125 | 126 | print avg_rates 127 | print count.T 128 | 129 | 130 | if __name__ == '__main__': 131 | searchpath = sys.argv[1] 132 | outpath = sys.argv[2] 133 | if not os.path.exists(outpath): 134 | os.makedirs(outpath) 135 | main(searchpath, outpath) 136 | -------------------------------------------------------------------------------- /results/collect_variance_of_user_rate_over_target_user_rate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''Collect the variance of user rate over the target rate 4 | y-axis: variance of user rate 5 | x-axis: target rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import glob 18 | import numpy as np 19 | import sys 20 | import os 21 | import ConfigParser 22 | from scipy.stats.stats import nanmean 23 | def main(searchpath, outpath): 24 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually, build a data csv for plotting. 25 | Input: search path, output path 26 | """ 27 | 28 | data_types = 5 29 | rate = 1e6 * 2.0 # 2 Mbps 30 | iterations = None 31 | sweep_values = [] # we do not know beforehand how much data we have 32 | depth = None 33 | 34 | # enum 35 | axis_index = 0 36 | sequential_index = 1 37 | random_each_iter_index = 2 38 | sinr_index = 3 39 | dtx_segregation = 4 40 | 41 | data_str = {1:'none', 2:'rand', 3:'sinr', 4:'dtxs'} 42 | 43 | # initially, we only check how much data we have 44 | for dirname, dirnames, filenames in os.walk(searchpath): 45 | if depth is None: 46 | depth = len(dirnames) # worst case amount of data 47 | for subdirname in dirnames: 48 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 49 | config = ConfigParser.RawConfigParser() 50 | config.read(filename) 51 | iterations = int(config.getfloat('General', 'iterations')) 52 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 53 | numcenterusers = int(config.getfloat('General', 'numcenterusers')) 54 | 55 | sweep_values = sorted(set(sweep_values)) 56 | # list of lists 57 | result = [] # result[data_type][sweep_value] 58 | for i in np.arange(data_types): 59 | result.append([]) 60 | for j in np.arange(len(sweep_values)): 61 | result[i].append([]) 62 | result[i][j] = np.zeros([20,1]) # prepare dimensions 63 | 64 | count = np.zeros([len(sweep_values), data_types]) 65 | 66 | # now start filling the result 67 | dep = 0 68 | for dirname, dirnames, filenames in os.walk(searchpath): 69 | for subdirname in dirnames: 70 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 71 | 72 | config = ConfigParser.RawConfigParser() 73 | config.read(filename) 74 | rate = int(config.getfloat('General', 'user_rate')) 75 | sleep_alignment = config.get('General', 'sleep_alignment') 76 | initial_power = config.get('General', 'initial_power') 77 | iterations = config.get('General', 'iterations') 78 | 79 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_per_mobile.csv', delimiter=',') 80 | # seqDTX sequential 81 | if ('DTX' in filename) and (sleep_alignment == 'none'): 82 | index = sequential_index 83 | 84 | # random each iter 85 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 86 | index = random_each_iter_index 87 | 88 | # sinr ordering 89 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 90 | index = sinr_index 91 | 92 | # dtx segregation 93 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 94 | index = dtx_segregation 95 | 96 | else: 97 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 98 | filedata = 0 99 | index = 0 100 | 101 | result[index][sweep_values.index(rate)] = np.append(result[index][sweep_values.index(rate)], filedata, axis=1) 102 | count[sweep_values.index(rate), index] += 1 103 | dep += 1 104 | 105 | 106 | # clear placeholder 107 | for i in np.arange(data_types): 108 | for j in np.arange(len(sweep_values)): 109 | result[i][j] = np.delete(result[i][j], 0, 1) 110 | 111 | # build percentages 112 | avg_rates = np.zeros([data_types, len(sweep_values)]) 113 | for dt in np.arange(1,data_types): 114 | for si, sv in enumerate(sweep_values): 115 | elements = 100*result[dt][si] # times 100 for users and timeslots per frame. contains all user rates over all iterations 116 | 117 | # percentage at last iteration 118 | avg_rates[dt,si] = np.std(elements) # should be around the value of sv 119 | 120 | # x-axis 121 | avg_rates[0,:] = sweep_values 122 | 123 | target = outpath + '/standard_deviation_of_user_rate_over_target_user_rate.csv' 124 | np.savetxt(target, avg_rates, delimiter=',') 125 | 126 | print avg_rates 127 | print count.T 128 | 129 | 130 | if __name__ == '__main__': 131 | searchpath = sys.argv[1] 132 | outpath = sys.argv[2] 133 | if not os.path.exists(outpath): 134 | os.makedirs(outpath) 135 | main(searchpath, outpath) 136 | -------------------------------------------------------------------------------- /world/hexfuns.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' A variety of functions about hexagons and cells. 4 | 5 | File: hexfuns.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from numpy import * 17 | import math 18 | from hexagon import * 19 | import basestation 20 | import mobile 21 | 22 | def cellsFromTiers(tiers): 23 | """Tiers are the distance from the central hexagon. This returns the number 24 | overall cells depending on the number of tiers.""" 25 | 26 | return sum(6*r_[1:tiers+1])+1 27 | 28 | def hexmap(tiers, interSiteDistance): 29 | """Returns list of xy-coordinates with unit outer radius for hexagonal 30 | cells""" 31 | 32 | # generate large square map of hex centre points. The even row vertices are 33 | # shifted by half a unit. Everything is normalized, we scale later. 34 | maxDim = tiers * 2 + 1 35 | innerRadius = 0.5 * interSiteDistance 36 | outerRadius = 2 * innerRadius / math.sqrt(3) 37 | origin = [0,0] 38 | 39 | pointListMap = [] # Generic map 40 | pointList = [] # Only the desired points 41 | 42 | stepX = 2 * innerRadius 43 | stepY = 1.5 * outerRadius 44 | inclusionDistance = (( tiers + 0.1) * stepX ) 45 | 46 | for indexX in arange( stepX * -(maxDim+1)/2., stepX * (maxDim+1)/2.+1, stepX ): 47 | linecount = 1 # keeps track of parity 48 | for indexY in arange( stepY * -(maxDim+1)/2., stepY * (maxDim+1)/2.+1, stepY): 49 | linecount += 1 50 | if ((linecount+tiers)%2 == 0): # puts one point at origin 51 | pointListMap.append(array([indexX + innerRadius, indexY] )) 52 | else: 53 | pointListMap.append(array([indexX, indexY])) 54 | 55 | # Only keep points that are close enough 56 | for point in pointListMap: 57 | if (pointInHex(point, EWHexagon(origin, inclusionDistance))): 58 | pointList.append(point) 59 | 60 | return pointList 61 | 62 | def pointInHex(point, hexagon): 63 | "Tells whether point lies inside hexagon.""" 64 | # determine if a point is inside a given polygon or not 65 | # Polygon is a list of (x,y) pairs. 66 | # Credit to ariel.com.au 67 | 68 | x = point[0] 69 | y = point[1] 70 | poly = hexagon.vertices() 71 | 72 | n = len(poly) 73 | inside =False 74 | 75 | p1x,p1y = poly[0] 76 | for i in range(n+1): 77 | p2x,p2y = poly[i % n] 78 | if y > min(p1y,p2y): 79 | if y <= max(p1y,p2y): 80 | if x <= max(p1x,p2x): 81 | if p1y != p2y: 82 | xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x 83 | if p1x == p2x or x <= xinters: 84 | inside = not inside 85 | p1x,p1y = p2x,p2y 86 | 87 | return inside 88 | 89 | 90 | def distance(pointA, pointB): 91 | "Wrapper for linalg.norm" 92 | # be able to use default list and numpy types 93 | if pointA.__class__.__name__ == 'list': 94 | pointA = array(pointA) 95 | if pointB.__class__.__name__ == 'list': 96 | pointB = array(pointB) 97 | return linalg.norm(pointA - pointB) 98 | 99 | def uniformlyDistributedPointInHexagon(hexagon): 100 | radius = hexagon.outerRadius * 2. # radius is the side of the square 101 | 102 | # distribute uniformly over square and reroll if outside of hex 103 | while True: 104 | x = radius * random.random() - 0.5 * radius 105 | y = radius * random.random() - 0.5 * radius 106 | 107 | point = array([x,y]) + array(hexagon.center) 108 | 109 | if (pointInHex(point,hexagon)): 110 | break 111 | 112 | #print 'Point not in hex.' 113 | 114 | return point 115 | 116 | def inner2OuterRadius(inner): 117 | return 2*inner/math.sqrt(3) 118 | 119 | def outer2InnerRadius(outer): 120 | return 0.5*math.sqrt(3)*outer 121 | 122 | 123 | if __name__ == '__main__': 124 | 125 | print 'Test pointInHex:' 126 | point1 = [0,0] 127 | point2 = [1,1] 128 | point3 = [10,10] 129 | hex1 = NSHexagon([0,0],9) 130 | print 'Point1 is in hex: ', pointInHex(point1, hex1) 131 | print 'Point2 is in hex: ', pointInHex(point2, hex1) 132 | print 'Point3 is in hex: ', pointInHex(point3, hex1) 133 | print 'Distance between point1 and point 2 is: ', distance(point1, point2) 134 | 135 | tiers = 2 136 | 137 | # if args were given, replace tiers 138 | import sys 139 | if len(sys.argv) > 1: 140 | tiers = int(sys.argv[1]) 141 | 142 | interSiteDistance = 100 143 | outerRadius = interSiteDistance / math.sqrt(3) 144 | 145 | pointList = hexmap(tiers, interSiteDistance) 146 | 147 | # Plot 148 | import Gnuplot, Gnuplot.funcutils 149 | g = Gnuplot.Gnuplot() 150 | g.title('Testing point distribution.') 151 | g('set data style points') 152 | 153 | # prepare data for plot 154 | hexagons = [] 155 | for point in pointList: 156 | hexagon = NSHexagon(point,outerRadius) 157 | hexagons.append(hexagon.border()) 158 | 159 | # test distribution function 160 | print uniformlyDistributedPointInHexagon(NSHexagon([0,0],outerRadius)) 161 | 162 | g.plot(Gnuplot.Data(pointList, with_='points'), Gnuplot.Data(hexagons, 163 | with_='lines')) 164 | 165 | raw_input('Press button to close...') 166 | 167 | 168 | -------------------------------------------------------------------------------- /results/collect_delivered_rate_over_iterations_seqDTX.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' collects delivered rates from an expected folder structure. Specifically for a comparison of sequential DTX data sets. 4 | outputs several csv files. One for each target sum rate. 5 | y-axis: delivered rate 6 | x-axis: iterations 7 | 8 | File: collect_missrates_seqDTX.py 9 | ''' 10 | 11 | __author__ = "Hauke Holtkamp" 12 | __credits__ = "Hauke Holtkamp" 13 | __license__ = "unknown" 14 | __version__ = "unknown" 15 | __maintainer__ = "Hauke Holtkamp" 16 | __email__ = "h.holtkamp@gmail.com" 17 | __status__ = "Development" 18 | 19 | import glob 20 | import numpy as np 21 | import sys 22 | import os 23 | import ConfigParser 24 | from scipy.stats.stats import nanmean 25 | def main(searchpath, outpath): 26 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption. 27 | Input: search path, output path 28 | """ 29 | 30 | data_types = 9 31 | rate = 1e6 * 2.0 # 2 Mbps 32 | iterations = None 33 | sweep_values = [] # we do not know beforehand how much data we have 34 | depth = None 35 | 36 | # enum 37 | axis_index = 0 38 | sequential_index = 1 39 | random_each_iter_index = 4 40 | sinr_index = 6 41 | dtx_segregation = 8 42 | 43 | # initially, we only check how much data we have 44 | for dirname, dirnames, filenames in os.walk(searchpath): 45 | if depth is None: 46 | depth = len(dirnames) # worst case amount of data 47 | for subdirname in dirnames: 48 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 49 | config = ConfigParser.RawConfigParser() 50 | config.read(filename) 51 | iterations = int(config.getfloat('General', 'iterations')) 52 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 53 | 54 | sweep_values = sorted(set(sweep_values)) 55 | result = np.empty([iterations, len(sweep_values), data_types, depth]) 56 | result[:] = np.nan 57 | count = np.zeros([len(sweep_values), data_types]) 58 | 59 | # now start filling the result 60 | dep = 0 61 | for dirname, dirnames, filenames in os.walk(searchpath): 62 | for subdirname in dirnames: 63 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 64 | 65 | config = ConfigParser.RawConfigParser() 66 | config.read(filename) 67 | rate = int(config.getfloat('General', 'user_rate')) # sum rate 68 | sleep_alignment = config.get('General', 'sleep_alignment') 69 | 70 | # seqDTX sequential 71 | if ('DTX' in filename) and (sleep_alignment == 'none'): 72 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',') 73 | index = sequential_index 74 | result[:, sweep_values.index(rate), index, dep] = filedata[1,1:] 75 | count[sweep_values.index(rate), index] += 1 76 | 77 | # random each iter 78 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 79 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',') 80 | index = random_each_iter_index 81 | result[:, sweep_values.index(rate), index, dep] = filedata[1,1:] 82 | count[sweep_values.index(rate), index] += 1 83 | 84 | # sinr ordering 85 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 86 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',') 87 | index = sinr_index 88 | result[:, sweep_values.index(rate), index, dep] = filedata[1,1:] 89 | count[sweep_values.index(rate), index] += 1 90 | 91 | # dtx segregation 92 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 93 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',') 94 | index = dtx_segregation 95 | result[:, sweep_values.index(rate), index, dep] = filedata[1,1:] 96 | count[sweep_values.index(rate), index] += 1 97 | 98 | else: 99 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 100 | 101 | if (filedata[1,-1]>2*rate/10).any(): 102 | # find outliers 103 | print filedata[1,-1], filename.split('/')[7] 104 | 105 | dep += 1 106 | 107 | #### 108 | result[np.where(result==0)]=np.nan # remove outage data 109 | #### 110 | 111 | result = nanmean(result, axis=3) 112 | 113 | for i, s in enumerate(sweep_values): 114 | target = outpath + '/delivered_rate_' + str(s) + '.csv' 115 | res = result[:, i, :] 116 | res[:,axis_index] = np.arange(1, iterations+1) # provide x-axis 117 | res = res.T # expected format 118 | np.savetxt(target, res, delimiter=',') 119 | 120 | print count.T 121 | 122 | 123 | if __name__ == '__main__': 124 | searchpath = sys.argv[1] 125 | outpath = sys.argv[2] 126 | if not os.path.exists(outpath): 127 | os.makedirs(outpath) 128 | main(searchpath, outpath) 129 | -------------------------------------------------------------------------------- /iwf/iwf.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' The inverse water-filling algorithm for bit loads. If it is required for rates, just set bandwidth and time to unity. 4 | The IWF allocates power levels to orthogonal channels such that the sum power consumption is minimized while fulfilling a rate or bit load constraint. 5 | 6 | Input: 7 | channelValues - SINR on each channel 8 | targetLoad - overall bit load target (over all channels) 9 | noiseIfPowerPerChannel - deprecated 10 | channelBandwidth - bandwidth of each channel in Hz 11 | transmissionTime - duration of each 'channel' in seconds 12 | 13 | Output: 14 | powerlevels - power level in Watt for each input channel 15 | waterlevel - the waterlevel solution 16 | capacity - the achieved bit load to confirm the target load 17 | 18 | File: iwf.py 19 | ''' 20 | 21 | __author__ = "Hauke Holtkamp" 22 | __credits__ = "Hauke Holtkamp" 23 | __license__ = "unknown" 24 | __version__ = "unknown" 25 | __maintainer__ = "Hauke Holtkamp" 26 | __email__ = "h.holtkamp@gmail.com" 27 | __status__ = "Development" 28 | 29 | from numpy import * 30 | 31 | # TODO: channelValues will contain all channel information and noiseIfPower will be removed 32 | def inversewaterfill(channelValues, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime): 33 | """The inverse water-filling algorithm. 34 | For each channel, returns a power level such that overall power is minimized and the target load is achieved""" 35 | 36 | K = channelValues.size # problem dimensions 37 | 38 | # Sort channels by descending quality 39 | channelValuesIndices = argsort(channelValues)[::-1] # we store the indices to reorder in the end 40 | channelValuesSorted = channelValues[channelValuesIndices] 41 | nifValuesSorted = noiseIfPowerPerChannel[channelValuesIndices] 42 | 43 | # Initial waterlevel for K = 0 44 | k = 1 45 | channelSet = channelValuesSorted[0:k] 46 | nifSet = nifValuesSorted[0:k] 47 | waterlevelExp = waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelSet, k, nifSet) 48 | 49 | # Keep lowering the waterlevel until it hits the comparison wall 50 | while (waterlevelExp > log2(comparison(nifValuesSorted[k], transmissionTime, channelBandwidth, channelValuesSorted[k]))): 51 | k = k+1 52 | channelSet = channelValuesSorted[0:k] 53 | nifSet = nifValuesSorted[0:k] 54 | waterlevelExp = waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelSet , k, nifSet) 55 | 56 | if k == K: 57 | break 58 | 59 | # Waterlevel has been found. Use it to find power levels. 60 | waterlvl = waterlevel(waterlevelExp) 61 | powerlvls = powerlevels(waterlvl, channelBandwidth, transmissionTime, nifSet, channelSet) 62 | powerlvls = concatenate([powerlvls,zeros([K-powerlvls.size])]) #TODO: need to handle when k == 1 63 | 64 | # Sort back to match initial input 65 | powerlvlsOrig = ones_like(powerlvls) # allocate np array 66 | powerlvlsOrig[channelValuesIndices] = powerlvls 67 | 68 | # Finish up 69 | cap = capacity(channelBandwidth, transmissionTime, powerlvlsOrig, channelValues, noiseIfPowerPerChannel) # this really just confirms we haven't made a mistake 70 | return powerlvlsOrig, waterlvl, cap 71 | 72 | def waterlevel(exponent): 73 | """Converts the exponent to waterlevel.""" 74 | return 2**exponent 75 | 76 | def waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelValues, k, noiseIfPowerPerChannel): 77 | """Numerically, it is advantageous to handle exponents rather than waterlevels. See our paper about the details.""" 78 | return targetLoad/(channelBandwidth*transmissionTime*(k)) - (1./(k))*sum(log2(channelValues*transmissionTime*channelBandwidth/(noiseIfPowerPerChannel*log(2)))) 79 | 80 | def powerlevels(waterlevel, channelBandwidth, transmissionTime, noiseIfPowerPerChannel, channelValues): 81 | """Finds powerlevels from waterlevel and channel values.""" 82 | return waterlevel * channelBandwidth * transmissionTime / log(2) - noiseIfPowerPerChannel / channelValues 83 | 84 | def comparison(noiseIfPowerPerChannel, transmissionTime, channelBandwidth, channelValue): 85 | """At one point in the algorithm, a check is required whether to proceed. See paper for why.""" 86 | return noiseIfPowerPerChannel * log(2) / (channelValue*channelBandwidth*transmissionTime) 87 | 88 | def capacity(channelBandwidth, transmissionTime, powerlvls, channelValues, noiseIfPowerPerChannel): 89 | """Returns the Shannon capacity over multiple channels.""" 90 | return channelBandwidth * transmissionTime * sum(log2( 1. + powerlvls * channelValues / ( noiseIfPowerPerChannel))) 91 | 92 | if __name__ == '__main__': 93 | systemBandwidth = 1. 94 | systemTime = 1. 95 | targetLoad = 1e2 96 | channelStateAvg = 1e-5 97 | from utils import utils 98 | for i in range(10): 99 | H = utils.rayleighChannel(2,2) 100 | import scipy.linalg 101 | eigvls, eigvects = linalg.eig(dot(H,H.conj().T)) 102 | if i is 0: 103 | eigvals = eigvls 104 | else: 105 | eigvals = append(eigvals,eigvls) 106 | eigvals = real(eigvals) # clear numberical imaginary parts 107 | 108 | # DEBUG 109 | # eigvals = array([6,6,4,6,4,6])*0.1 110 | 111 | subcarriers = 1. 112 | timeslots = 1. 113 | channelBandwidth = systemBandwidth / subcarriers 114 | transmissionTime = systemTime / timeslots 115 | noiseIfPowerPerChannel = 1e-10 / subcarriers 116 | 117 | powerlvls, waterlvl, cap = inversewaterfill(eigvals, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime) 118 | print powerlvls, waterlvl, cap 119 | -------------------------------------------------------------------------------- /fsf/fsf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Frequency selective fading module. 4 | % frequencySelectiveFading(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity) 5 | % Generates the frequency and time dependent channel response based on a model from the UoE. 6 | % The model was originally written by B. Ghimire in 2006, but documentation was not 7 | % available. This is a copy paste application. 8 | % The origin of this the WINNER project. 9 | % 10 | % Input: 11 | % - N: Number of frequency chunks, e.g. chunks or subcarriers 12 | % - T: Number of time chunks, e.g. subframes or time slots 13 | % - centerFrequency: frequency of the center chunk. In Hz. 14 | % - totalTime: total time considered. Each time chunk has duration 15 | % totalTime/T. In seconds. 16 | % - bandwidth: total transmission bandwidth. Each frequency chunk has 17 | % frequency spacing bandwidth/N. In Hz. 18 | % - relativeVelocity: in meters per second. Represents relative speed between 19 | % transmitter and receiver. Doppler frequency derived by relativeVelocity[m/s] * 20 | % centerFrequency / speedOfLight 21 | % 22 | % Output: 23 | % - H: size(T,N): complex value 24 | 25 | File: fsf.py 26 | ''' 27 | 28 | __author__ = "Hauke Holtkamp" 29 | __credits__ = "Hauke Holtkamp" 30 | __license__ = "unknown" 31 | __version__ = "unknown" 32 | __maintainer__ = "Hauke Holtkamp" 33 | __email__ = "h.holtkamp@gmail.com" 34 | __status__ = "Development" 35 | 36 | from numpy import * 37 | import numpy as np 38 | from utils import utils 39 | 40 | def fsf(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity): 41 | """Returns frequency selective fading over a number of subcarriers and time slots.""" 42 | delay_taps = 1e-05 * array([0, 43 | 0.0060, 44 | 0.0075, 45 | 0.0145, 46 | 0.0150, 47 | 0.0155, 48 | 0.0190, 49 | 0.0220, 50 | 0.0225, 51 | 0.0230, 52 | 0.0335, 53 | 0.0370, 54 | 0.0430, 55 | 0.0510, 56 | 0.0685, 57 | 0.0725, 58 | 0.0735, 59 | 0.0800, 60 | 0.0960, 61 | 0.1020, 62 | 0.1100, 63 | 0.1210, 64 | 0.1845]) 65 | delay_taps.shape = (23,1) # promote 66 | tapGains_dB = array([-6.4000, 67 | -3.4000, 68 | -2.0000, 69 | -3.0000, 70 | -3.5500, 71 | -7.0000, 72 | -3.4000, 73 | -3.4000, 74 | -5.6000, 75 | -7.4000, 76 | -4.6000, 77 | -7.8000, 78 | -7.8000, 79 | -9.3000, 80 | -12.0000, 81 | -8.5000, 82 | -13.2000, 83 | -11.2000, 84 | -20.8000, 85 | -14.5000, 86 | -11.7000, 87 | -17.2000, 88 | -16.7000]) 89 | tapGains_dB.shape = (23,1) # promote 90 | 91 | startTime = 0 # no effect 92 | chunkwidth = bandwidth/N 93 | N_harmonics = 5 # magic number from model 94 | numTaps = delay_taps.size 95 | speedOfLight = 3e8 96 | dopplerFrequency = relativeVelocity * centerFrequency / speedOfLight 97 | 98 | chunkCenters = linspace(centerFrequency-bandwidth/2,centerFrequency+bandwidth/2-chunkwidth, num=N) 99 | timeStamp = linspace(startTime, startTime+totalTime*(1-1./T),num=T) 100 | 101 | tapGains = utils.dBToW(tapGains_dB) 102 | tapGainsNorm = tapGains/np.sum(tapGains) 103 | N_chunks = chunkCenters.size 104 | 105 | path_rand = random.rand(numTaps, N_harmonics) 106 | phase_rand = random.rand(numTaps, N_harmonics) 107 | 108 | H = empty([chunkCenters.size, timeStamp.size ],dtype=complex) 109 | H[:] = nan 110 | for t in range(timeStamp.size): 111 | disc_dopp_freq = dopplerFrequency * cos(2*math.pi*path_rand) 112 | disc_dopp_phase = 2*math.pi*phase_rand 113 | 114 | for f in range(chunkCenters.size): 115 | H[f,t] = instantChunkFading(timeStamp[t], chunkCenters[f], tapGainsNorm,disc_dopp_phase, disc_dopp_freq, delay_taps, N_harmonics) 116 | 117 | # normalize 118 | H[:,t] = H[:,t]/sum(abs(H[:,t]))*N_chunks 119 | 120 | return H, chunkCenters, timeStamp 121 | 122 | def instantChunkFading(t, f, tapGainsNorm,disc_dopp_phase, disc_dopp_freq, delay_taps, N_harmonics): 123 | """Fading model over a single frequency chunk.""" 124 | return ((np.sum(tapGainsNorm*np.sum(exp(1.j*disc_dopp_freq*t + 1.j*disc_dopp_phase - 1.j*2*math.pi*f*repeat(delay_taps,N_harmonics,axis=1)),axis=1).reshape(23,1),axis=0))/N_harmonics).reshape(1,1)[0][0] 125 | 126 | if __name__ == '__main__': 127 | N = 50 128 | T = 10 129 | centerFrequency = 2e9 130 | totalTime = 0.01 131 | bandwidth = 1e7 132 | relativeVelocity = 30 133 | H, chunkCenters, timeStamp = fsf(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity) 134 | 135 | 136 | # Plotting 137 | from mpl_toolkits.mplot3d import axes3d, Axes3D 138 | import matplotlib.pyplot as plt 139 | 140 | # imports specific to the plots in this example 141 | import numpy as np 142 | from matplotlib import cm 143 | from mpl_toolkits.mplot3d.axes3d import get_test_data 144 | from matplotlib.ticker import LinearLocator, FormatStrFormatter 145 | 146 | # Twice as wide as it is tall. 147 | fig = plt.figure() 148 | 149 | #---- First subplot 150 | # ax = fig.gca(projection='3d') # changes in matplotlib 0.99x 151 | ax = Axes3D(fig) 152 | 153 | Y = chunkCenters 154 | X = timeStamp 155 | Z = abs(H) 156 | X, Y = np.meshgrid(X,Y) 157 | 158 | 159 | surf = ax.plot_surface(Y, X, Z, rstride=1, cstride=1, cmap=cm.jet, 160 | linewidth=0, antialiased=False) 161 | # ax.set_zlim3d(-1.01, 1.01) 162 | 163 | fig.colorbar(surf, shrink=0.5, aspect=10) 164 | # ax.zaxis.set_major_locator(LinearLocator(10)) 165 | # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) 166 | 167 | plt.show() 168 | -------------------------------------------------------------------------------- /iwf/iwf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' The margin adaptive or Inverse Water-Filling (IWF) algorithm for bit loads. If it is required for rates, just set bandwidth and time to unity. 4 | The IWF allocates power levels to orthogonal channels such that the sum power consumption is minimized while fulfilling a rate or bit load constraint. 5 | 6 | Input: 7 | channelValues - SINR on each channel 8 | targetLoad - overall bit load target (over all channels) 9 | noiseIfPowerPerChannel - deprecated 10 | channelBandwidth - bandwidth of each channel in Hz 11 | transmissionTime - duration of each 'channel' in seconds 12 | 13 | Output: 14 | powerlevels - power level in Watt for each input channel 15 | waterlevel - the waterlevel solution 16 | capacity - the achieved bit load to confirm the target load 17 | 18 | File: iwf.py 19 | ''' 20 | 21 | __author__ = "Hauke Holtkamp" 22 | __credits__ = "Hauke Holtkamp" 23 | __license__ = "unknown" 24 | __version__ = "unknown" 25 | __maintainer__ = "Hauke Holtkamp" 26 | __email__ = "h.holtkamp@gmail.com" 27 | __status__ = "Development" 28 | 29 | from numpy import * 30 | 31 | # TODO: channelValues will contain all channel information and noiseIfPower will be removed 32 | def inversewaterfill(channelValues, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime): 33 | """The inverse water-filling algorithm. 34 | For each channel, returns a power level such that overall power is minimized and the target load is achieved""" 35 | 36 | K = channelValues.size # problem dimensions 37 | 38 | # Sort channels by descending quality 39 | channelValuesIndices = argsort(channelValues)[::-1] # we store the indices to reorder in the end 40 | channelValuesSorted = channelValues[channelValuesIndices] 41 | nifValuesSorted = noiseIfPowerPerChannel[channelValuesIndices] 42 | 43 | # Initial waterlevel for K = 0 44 | k = 1 45 | channelSet = channelValuesSorted[0:k] 46 | nifSet = nifValuesSorted[0:k] 47 | waterlevelExp = waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelSet, k, nifSet) 48 | 49 | # Keep lowering the waterlevel until it hits the comparison wall 50 | while (waterlevelExp > log2(comparison(nifValuesSorted[k], transmissionTime, channelBandwidth, channelValuesSorted[k]))): 51 | k = k+1 52 | channelSet = channelValuesSorted[0:k] 53 | nifSet = nifValuesSorted[0:k] 54 | waterlevelExp = waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelSet , k, nifSet) 55 | 56 | if k == K: 57 | break 58 | 59 | # Waterlevel has been found. Use it to find power levels. 60 | waterlvl = waterlevel(waterlevelExp) 61 | powerlvls = powerlevels(waterlvl, channelBandwidth, transmissionTime, nifSet, channelSet) 62 | powerlvls = concatenate([powerlvls,zeros([K-powerlvls.size])]) #TODO: need to handle when k == 1 63 | 64 | # Sort back to match initial input 65 | powerlvlsOrig = ones_like(powerlvls) # allocate np array 66 | powerlvlsOrig[channelValuesIndices] = powerlvls 67 | 68 | # Finish up 69 | cap = capacity(channelBandwidth, transmissionTime, powerlvlsOrig, channelValues, noiseIfPowerPerChannel) # this really just confirms we haven't made a mistake 70 | return powerlvlsOrig, waterlvl, cap 71 | 72 | def waterlevel(exponent): 73 | """Converts the exponent to waterlevel.""" 74 | return 2**exponent 75 | 76 | def waterlevelExponent(targetLoad, channelBandwidth, transmissionTime, channelValues, k, noiseIfPowerPerChannel): 77 | """Numerically, it is advantageous to handle exponents rather than waterlevels. See our paper about the details.""" 78 | return targetLoad/(channelBandwidth*transmissionTime*(k)) - (1./(k))*sum(log2(channelValues*transmissionTime*channelBandwidth/(noiseIfPowerPerChannel*log(2)))) 79 | 80 | def powerlevels(waterlevel, channelBandwidth, transmissionTime, noiseIfPowerPerChannel, channelValues): 81 | """Finds powerlevels from waterlevel and channel values.""" 82 | return waterlevel * channelBandwidth * transmissionTime / log(2) - noiseIfPowerPerChannel / channelValues 83 | 84 | def comparison(noiseIfPowerPerChannel, transmissionTime, channelBandwidth, channelValue): 85 | """At one point in the algorithm, a check is required whether to proceed. See paper for why.""" 86 | return noiseIfPowerPerChannel * log(2) / (channelValue*channelBandwidth*transmissionTime) 87 | 88 | def capacity(channelBandwidth, transmissionTime, powerlvls, channelValues, noiseIfPowerPerChannel): 89 | """Returns the Shannon capacity over multiple channels.""" 90 | return channelBandwidth * transmissionTime * sum(log2( 1. + powerlvls * channelValues / ( noiseIfPowerPerChannel))) 91 | 92 | if __name__ == '__main__': 93 | systemBandwidth = 1. 94 | systemTime = 1. 95 | targetLoad = 1e2 96 | channelStateAvg = 1e-5 97 | from utils import utils 98 | for i in range(10): 99 | H = utils.rayleighChannel(2,2) 100 | import scipy.linalg 101 | eigvls, eigvects = linalg.eig(dot(H,H.conj().T)) 102 | if i is 0: 103 | eigvals = eigvls 104 | else: 105 | eigvals = append(eigvals,eigvls) 106 | eigvals = real(eigvals) # clear numberical imaginary parts 107 | 108 | # DEBUG 109 | # eigvals = array([6,6,4,6,4,6])*0.1 110 | 111 | subcarriers = 1. 112 | timeslots = 1. 113 | channelBandwidth = systemBandwidth / subcarriers 114 | transmissionTime = systemTime / timeslots 115 | noiseIfPowerPerChannel = 1e-10 / subcarriers 116 | 117 | powerlvls, waterlvl, cap = inversewaterfill(eigvals, targetLoad, noiseIfPowerPerChannel, channelBandwidth, transmissionTime) 118 | print powerlvls, waterlvl, cap 119 | -------------------------------------------------------------------------------- /fsf/fsf.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Frequency selective fading module. 4 | % frequencySelectiveFading(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity) 5 | % Generates the frequency and time dependent channel response based on a model from the UoE. 6 | % The model was originally written by B. Ghimire in 2006, but documentation was not 7 | % available. This is a copy paste application. 8 | % According to Gunther, the origin of this WINNER. 9 | % 10 | % Input: 11 | % - N: Number of frequency chunks, e.g. chunks or subcarriers 12 | % - T: Number of time chunks, e.g. subframes or time slots 13 | % - centerFrequency: frequency of the center chunk. In Hz. 14 | % - totalTime: total time considered. Each time chunk has duration 15 | % totalTime/T. In seconds. 16 | % - bandwidth: total transmission bandwidth. Each frequency chunk has 17 | % frequency spacing bandwidth/N. In Hz. 18 | % - relativeVelocity: in meters per second. Represents relative speed between 19 | % transmitter and receiver. Doppler frequency derived by relativeVelocity[m/s] * 20 | % centerFrequency / speedOfLight 21 | % 22 | % Output: 23 | % - H: size(T,N): complex value 24 | 25 | File: fsf.py 26 | ''' 27 | 28 | __author__ = "Hauke Holtkamp" 29 | __credits__ = "Hauke Holtkamp" 30 | __license__ = "unknown" 31 | __version__ = "unknown" 32 | __maintainer__ = "Hauke Holtkamp" 33 | __email__ = "h.holtkamp@gmail.com" 34 | __status__ = "Development" 35 | 36 | from numpy import * 37 | import numpy as np 38 | from utils import utils 39 | 40 | def fsf(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity): 41 | """Returns frequency selective fading over a number of subcarriers and time slots.""" 42 | delay_taps = 1e-05 * array([0, 43 | 0.0060, 44 | 0.0075, 45 | 0.0145, 46 | 0.0150, 47 | 0.0155, 48 | 0.0190, 49 | 0.0220, 50 | 0.0225, 51 | 0.0230, 52 | 0.0335, 53 | 0.0370, 54 | 0.0430, 55 | 0.0510, 56 | 0.0685, 57 | 0.0725, 58 | 0.0735, 59 | 0.0800, 60 | 0.0960, 61 | 0.1020, 62 | 0.1100, 63 | 0.1210, 64 | 0.1845]) 65 | delay_taps.shape = (23,1) # promote 66 | tapGains_dB = array([-6.4000, 67 | -3.4000, 68 | -2.0000, 69 | -3.0000, 70 | -3.5500, 71 | -7.0000, 72 | -3.4000, 73 | -3.4000, 74 | -5.6000, 75 | -7.4000, 76 | -4.6000, 77 | -7.8000, 78 | -7.8000, 79 | -9.3000, 80 | -12.0000, 81 | -8.5000, 82 | -13.2000, 83 | -11.2000, 84 | -20.8000, 85 | -14.5000, 86 | -11.7000, 87 | -17.2000, 88 | -16.7000]) 89 | tapGains_dB.shape = (23,1) # promote 90 | 91 | startTime = 0 # no effect 92 | chunkwidth = bandwidth/N 93 | N_harmonics = 5 # magic number from model 94 | numTaps = delay_taps.size 95 | speedOfLight = 3e8 96 | dopplerFrequency = relativeVelocity * centerFrequency / speedOfLight 97 | 98 | chunkCenters = linspace(centerFrequency-bandwidth/2,centerFrequency+bandwidth/2-chunkwidth, num=N) 99 | timeStamp = linspace(startTime, startTime+totalTime*(1-1./T),num=T) 100 | 101 | tapGains = utils.dBToW(tapGains_dB) 102 | tapGainsNorm = tapGains/np.sum(tapGains) 103 | N_chunks = chunkCenters.size 104 | 105 | path_rand = random.rand(numTaps, N_harmonics) 106 | phase_rand = random.rand(numTaps, N_harmonics) 107 | 108 | H = empty([chunkCenters.size, timeStamp.size ],dtype=complex) 109 | H[:] = nan 110 | for t in range(timeStamp.size): 111 | disc_dopp_freq = dopplerFrequency * cos(2*math.pi*path_rand) 112 | disc_dopp_phase = 2*math.pi*phase_rand 113 | 114 | for f in range(chunkCenters.size): 115 | H[f,t] = instantChunkFading(timeStamp[t], chunkCenters[f], tapGainsNorm,disc_dopp_phase, disc_dopp_freq, delay_taps, N_harmonics) 116 | 117 | # normalize 118 | H[:,t] = H[:,t]/sum(abs(H[:,t]))*N_chunks 119 | 120 | return H, chunkCenters, timeStamp 121 | 122 | def instantChunkFading(t, f, tapGainsNorm,disc_dopp_phase, disc_dopp_freq, delay_taps, N_harmonics): 123 | """Fading model over a single frequency chunk.""" 124 | return ((np.sum(tapGainsNorm*np.sum(exp(1.j*disc_dopp_freq*t + 1.j*disc_dopp_phase - 1.j*2*math.pi*f*repeat(delay_taps,N_harmonics,axis=1)),axis=1).reshape(23,1),axis=0))/N_harmonics).reshape(1,1)[0][0] 125 | 126 | if __name__ == '__main__': 127 | N = 50 128 | T = 10 129 | centerFrequency = 2e9 130 | totalTime = 0.01 131 | bandwidth = 1e7 132 | relativeVelocity = 30 133 | H, chunkCenters, timeStamp = fsf(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity) 134 | 135 | 136 | # Plotting 137 | from mpl_toolkits.mplot3d import axes3d, Axes3D 138 | import matplotlib.pyplot as plt 139 | 140 | # imports specific to the plots in this example 141 | import numpy as np 142 | from matplotlib import cm 143 | from mpl_toolkits.mplot3d.axes3d import get_test_data 144 | from matplotlib.ticker import LinearLocator, FormatStrFormatter 145 | 146 | # Twice as wide as it is tall. 147 | fig = plt.figure() 148 | 149 | #---- First subplot 150 | # ax = fig.gca(projection='3d') # changes in matplotlib 0.99x 151 | ax = Axes3D(fig) 152 | 153 | Y = chunkCenters 154 | X = timeStamp 155 | Z = abs(H) 156 | X, Y = np.meshgrid(X,Y) 157 | 158 | 159 | surf = ax.plot_surface(Y, X, Z, rstride=1, cstride=1, cmap=cm.jet, 160 | linewidth=0, antialiased=False) 161 | # ax.set_zlim3d(-1.01, 1.01) 162 | 163 | fig.colorbar(surf, shrink=0.5, aspect=10) 164 | # ax.zaxis.set_major_locator(LinearLocator(10)) 165 | # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) 166 | 167 | plt.show() 168 | -------------------------------------------------------------------------------- /results/collect_percentage_over_satifsfied_users_over_delta_sumrate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''Collect the percentage of satisfied users over the target rate. 4 | y-axis: percentage of satisfied users 5 | x-axis: target rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import glob 18 | import numpy as np 19 | import sys 20 | import os 21 | import ConfigParser 22 | from scipy.stats.stats import nanmean 23 | def main(searchpath, outpath, delta): 24 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually, collect a percentage of achieving the target rate minus a delta. 25 | Input: search path, output path 26 | """ 27 | 28 | # delta factor. Change the evaluation of the target rate. 29 | # e.g. 2300000 (1 + -0.5) = 1150000 30 | 31 | data_types = 5 32 | rate = 1e6 * 2.0 # 2 Mbps 33 | iterations = None 34 | sweep_values = [] # we do not know beforehand how much data we have 35 | depth = None 36 | 37 | # enum 38 | axis_index = 0 39 | sequential_index = 1 40 | random_each_iter_index = 2 41 | sinr_index = 3 42 | dtx_segregation = 4 43 | 44 | data_str = {1:'none', 2:'rand', 3:'sinr', 4:'dtxs'} 45 | 46 | # initially, we only check how much data we have 47 | for dirname, dirnames, filenames in os.walk(searchpath): 48 | if depth is None: 49 | depth = len(dirnames) # worst case amount of data 50 | for subdirname in dirnames: 51 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 52 | config = ConfigParser.RawConfigParser() 53 | config.read(filename) 54 | iterations = int(config.getfloat('General', 'iterations')) 55 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 56 | numcenterusers = int(config.getfloat('General', 'numcenterusers')) 57 | 58 | sweep_values = sorted(set(sweep_values)) # e.g. 1000000, 2300000 59 | 60 | # list of lists 61 | result = [] # result[data_type][sweep_value] 62 | for i in np.arange(data_types): 63 | result.append([]) 64 | for j in np.arange(len(sweep_values)): 65 | result[i].append([]) 66 | result[i][j] = np.zeros([20,1]) # prepare dimensions 67 | 68 | count = np.zeros([len(sweep_values), data_types]) 69 | 70 | # now start filling the result 71 | dep = 0 72 | for dirname, dirnames, filenames in os.walk(searchpath): 73 | for subdirname in dirnames: 74 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 75 | 76 | config = ConfigParser.RawConfigParser() 77 | config.read(filename) 78 | rate = int(config.getfloat('General', 'user_rate')) 79 | sleep_alignment = config.get('General', 'sleep_alignment') 80 | initial_power = config.get('General', 'initial_power') 81 | iterations = config.get('General', 'iterations') 82 | 83 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_per_mobile.csv', delimiter=',') 84 | # seqDTX sequential 85 | if ('DTX' in filename) and (sleep_alignment == 'none'): 86 | index = sequential_index 87 | 88 | # random each iter 89 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 90 | index = random_each_iter_index 91 | 92 | # sinr ordering 93 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 94 | index = sinr_index 95 | 96 | # dtx segregation 97 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 98 | index = dtx_segregation 99 | 100 | else: 101 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 102 | filedata = 0 103 | index = 0 104 | 105 | result[index][sweep_values.index(rate)] = np.append(result[index][sweep_values.index(rate)], filedata, axis=1) 106 | count[sweep_values.index(rate), index] += 1 107 | dep += 1 108 | 109 | # clear placeholder 110 | for i in np.arange(data_types): 111 | for j in np.arange(len(sweep_values)): 112 | result[i][j] = np.delete(result[i][j], 0, 1) 113 | 114 | # build percentages 115 | perc_tot = np.zeros([data_types, len(sweep_values)]) 116 | for dt in np.arange(1,data_types): 117 | for si, sv in enumerate(sweep_values): 118 | elements = 100*result[dt][si]/(sv * (1 + delta)) # times 100 for users and timeslots per frame 119 | 120 | # percentage at last iteration 121 | perc_tot[dt,si] = (elements[-1,:] > 1).sum()/float(len(elements[-1,:])) 122 | 123 | # x-axis 124 | perc_tot[0,:] = np.array(sweep_values) * (1+delta) 125 | 126 | target = outpath + '/percentage_satisfied_over_delta_rate' + str(delta) + '.csv' 127 | np.savetxt(target, perc_tot, delimiter=',') 128 | 129 | print perc_tot 130 | print count.T 131 | 132 | 133 | if __name__ == '__main__': 134 | searchpath = sys.argv[1] 135 | outpath = sys.argv[2] 136 | delta = float(sys.argv[3]) 137 | if not os.path.exists(outpath): 138 | os.makedirs(outpath) 139 | main(searchpath, outpath, delta) 140 | -------------------------------------------------------------------------------- /results/collect_percentage_over_satifsfied_users_over_target_sumrate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''Collect the percentage of satisfied users over the target rate. 4 | y-axis: percentage of satisfied users 5 | x-axis: target rate 6 | 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import glob 18 | import numpy as np 19 | import sys 20 | import os 21 | import ConfigParser 22 | from scipy.stats.stats import nanmean 23 | def main(searchpath, outpath): 24 | """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption. 25 | Input: search path, output path 26 | """ 27 | 28 | data_types = 5 29 | rate = 1e6 * 2.0 # 2 Mbps 30 | iterations = None 31 | sweep_values = [] # we do not know beforehand how much data we have 32 | depth = None 33 | 34 | # enum 35 | axis_index = 0 36 | sequential_index = 1 37 | random_each_iter_index = 2 38 | sinr_index = 3 39 | dtx_segregation = 4 40 | 41 | data_str = {1:'none', 2:'rand', 3:'sinr', 4:'dtxs'} 42 | 43 | # initially, we only check how much data we have 44 | for dirname, dirnames, filenames in os.walk(searchpath): 45 | if depth is None: 46 | depth = len(dirnames) # worst case amount of data 47 | for subdirname in dirnames: 48 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'): 49 | config = ConfigParser.RawConfigParser() 50 | config.read(filename) 51 | iterations = int(config.getfloat('General', 'iterations')) 52 | sweep_values.append(int(config.getfloat('General', 'user_rate'))) 53 | numcenterusers = int(config.getfloat('General', 'numcenterusers')) 54 | 55 | sweep_values = sorted(set(sweep_values)) 56 | # list of lists 57 | result = [] # result[data_type][sweep_value] 58 | for i in np.arange(data_types): 59 | result.append([]) 60 | for j in np.arange(len(sweep_values)): 61 | result[i].append([]) 62 | result[i][j] = np.zeros([20,1]) # prepare dimensions 63 | 64 | count = np.zeros([len(sweep_values), data_types]) 65 | 66 | # now start filling the result 67 | dep = 0 68 | for dirname, dirnames, filenames in os.walk(searchpath): 69 | for subdirname in dirnames: 70 | for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'): 71 | 72 | config = ConfigParser.RawConfigParser() 73 | config.read(filename) 74 | rate = int(config.getfloat('General', 'user_rate')) 75 | sleep_alignment = config.get('General', 'sleep_alignment') 76 | initial_power = config.get('General', 'initial_power') 77 | iterations = config.get('General', 'iterations') 78 | 79 | filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_per_mobile.csv', delimiter=',') 80 | # seqDTX sequential 81 | if ('DTX' in filename) and (sleep_alignment == 'none'): 82 | index = sequential_index 83 | 84 | # random each iter 85 | elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 86 | index = random_each_iter_index 87 | 88 | # sinr ordering 89 | elif ('DTX' in filename) and (sleep_alignment == 'sinr'): 90 | index = sinr_index 91 | 92 | # dtx segregation 93 | elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'): 94 | index = dtx_segregation 95 | 96 | else: 97 | print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename 98 | filedata = 0 99 | index = 0 100 | 101 | result[index][sweep_values.index(rate)] = np.append(result[index][sweep_values.index(rate)], filedata, axis=1) 102 | count[sweep_values.index(rate), index] += 1 103 | dep += 1 104 | 105 | # clear placeholder 106 | for i in np.arange(data_types): 107 | for j in np.arange(len(sweep_values)): 108 | result[i][j] = np.delete(result[i][j], 0, 1) 109 | 110 | # build percentages 111 | perc_tot = np.zeros([data_types, len(sweep_values)]) 112 | for dt in np.arange(1,data_types): 113 | for si, sv in enumerate(sweep_values): 114 | elements = 100*result[dt][si]/sv # times 100 for users and timeslots per frame 115 | # percentages for each iteration 116 | mean_el = np.sum((elements>1),axis=1)/float(elements.shape[1]) 117 | target = outpath + 'percentage_satisfied_over_iters_' + data_str[dt] + '_' + str(sv) + '.csv' 118 | np.savetxt(target, mean_el, delimiter=',') 119 | 120 | # percentage at last iteration 121 | perc_tot[dt,si] = (elements[-1,:] > 1).sum()/float(len(elements[-1,:])) 122 | 123 | # x-axis 124 | perc_tot[0,:] = sweep_values 125 | 126 | target = outpath + '/percentage_satisfied_over_target_rate.csv' 127 | np.savetxt(target, perc_tot, delimiter=',') 128 | 129 | print perc_tot 130 | print count.T 131 | 132 | 133 | if __name__ == '__main__': 134 | searchpath = sys.argv[1] 135 | outpath = sys.argv[2] 136 | if not os.path.exists(outpath): 137 | os.makedirs(outpath) 138 | main(searchpath, outpath) 139 | -------------------------------------------------------------------------------- /optim/test_optim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the optimization module 4 | 5 | File: test_optim.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | from optim import optimMinPow, optimMinPow2x2DTX, optimMinPow2x2 17 | import unittest 18 | import numpy as np 19 | from utils import utils 20 | import scipy.linalg 21 | 22 | class TestSequenceFunctions(unittest.TestCase): 23 | 24 | def setUp(self): 25 | pass 26 | 27 | def test_optimPC(self): 28 | # test for simple problem with known outcome 29 | H = np.array([[[1.-1j,-1.],[-1.,1.]],[[1.-1j,1.],[-1.,1.]],[[0.5,1.j],[1.,-1.j]]]) 30 | for k in np.arange(3): 31 | H[k,:,:] = scipy.dot(H[k,:,:], H[k,:,:].conj().T) 32 | noisepower = np.ones(3) 33 | rate = 1 34 | linkBandwidth = 1 35 | p0 = 0 36 | m = 1 37 | pMax = 10 38 | 39 | obj, solution, status = optimMinPow.optimizePC(H, noisepower, rate, linkBandwidth, pMax, p0, m) 40 | answerObj = 2.0422355422276 41 | answerSol = np.array([ 0.3823792 , 0.28708598, 0.33053482]) 42 | np.testing.assert_almost_equal(obj, answerObj) 43 | np.testing.assert_almost_equal(solution, answerSol) 44 | 45 | def test_optimPCDTX_trivial(self): 46 | # test for simple problem with known outcome 47 | H = np.ones([3,2,2]) 48 | for k in np.arange(3): 49 | H[k,:,:] = scipy.dot(H[k,:,:], H[k,:,:].conj().T) 50 | noisepower = np.ones(3) 51 | rate = 1 52 | linkBandwidth = 1 53 | p0 = 10 54 | m = 2 55 | pS = 5 56 | pMax = 10 57 | 58 | obj, solution, status = optimMinPow.optimizePCDTX(H, noisepower, rate, linkBandwidth, pMax, p0, m, pS, 0) 59 | answerObj = 17.000000115485285 60 | answerSol = np.array([ 0.33333334, 0.33333334, 0.33333334, 0. ]) 61 | np.testing.assert_almost_equal(obj, answerObj) 62 | np.testing.assert_almost_equal(solution, answerSol) 63 | 64 | for k in np.arange(H.shape[0]): 65 | ptx = optimMinPow2x2DTX.ptxOfMu(solution[k], rate, linkBandwidth, noisepower[k], H[k,:,:]) 66 | rate_test = solution[k]*np.real(utils.ergMIMOCapacityCDITCSIR(H[k,:,:], ptx)) 67 | np.testing.assert_almost_equal(rate_test, rate) 68 | 69 | # TODO: Find out why this fails to find a solution 70 | # CSI_Optim = np.array([[[ 7.47e-04+0.j, 7.47e-04+0.j], 71 | # [ 7.47e-04+0.j, 7.47e-04+0.j]], 72 | # 73 | # [[ 6.31e-05+0.j, 6.31e-05+0.j], 74 | # [ 6.31e-05+0.j, 6.31e-05+0.j]], 75 | # 76 | # [[ 3.47e-05+0.j, 3.47e-05+0.j], 77 | # [ 3.47e-05+0.j, 3.47e-05+0.j]]]) 78 | # print np.real(scipy.linalg.eig(np.dot(CSI_Optim[0,:,:],CSI_Optim[0,:,:].conj().T))[0]) 79 | # import pdb; pdb.set_trace() 80 | # 81 | # PnoiseIf_Optim = np.array([ 4.00e-14, 4.00e-14, 4.00e-14]) 82 | # rate = 35000 83 | # bandwidth = 1e7 84 | # pMax = 40 85 | # p0 = 200 86 | # m = 3.75 87 | # pS = 90 88 | # pSupplyOptim, resourceAlloc, status = optimMinPow.optimizePCDTX(CSI_Optim, PnoiseIf_Optim, rate, bandwidth, pMax, p0, m, pS) 89 | 90 | def test_optimPCDTX(self): 91 | # test for simple problem with known outcome 92 | H = np.array([[[1.-1j,-1.],[-1.,1.]],[[1.-1j,1.],[-1.,1.]],[[0.5,1.j],[1.,-1.j]]]) 93 | for k in np.arange(3): 94 | H[k,:,:] = scipy.dot(H[k,:,:], H[k,:,:].conj().T) 95 | noisepower = np.ones(3) 96 | rate = 1 97 | linkBandwidth = 1 98 | p0 = 10 99 | m = 2 100 | pS = 5 101 | pMax = 10 102 | 103 | obj, solution, status = optimMinPow.optimizePCDTX(H, noisepower, rate, linkBandwidth, pMax, p0, m, pS, 0) 104 | answerObj = 13.9204261 105 | answerSol = np.array([ 0.32342002, 0.24371824, 0.27855287, 0.15430887]) 106 | np.testing.assert_almost_equal(obj, answerObj) 107 | np.testing.assert_almost_equal(solution, answerSol) 108 | 109 | for k in np.arange(H.shape[0]): 110 | ptx = optimMinPow2x2DTX.ptxOfMu(solution[k], rate, linkBandwidth, noisepower[k], H[k,:,:]) 111 | rate_test = solution[k]*np.real(utils.ergMIMOCapacityCDITCSIR(H[k,:,:], ptx)) 112 | np.testing.assert_almost_equal(rate_test, rate) 113 | 114 | def test_optimPCDTXrandomChannel(self): 115 | # test for simple problem with known outcome 116 | users = 22 117 | n_tx = 2 118 | n_rx = 2 119 | H = np.empty([users, n_tx, n_rx],dtype=complex) # very important to make it complex! 120 | for k in np.arange(users): 121 | H[k,:,:] = 10e-7*utils.rayleighChannel(n_tx,n_rx) 122 | H[k,:,:] = scipy.dot(H[k,:,:], H[k,:,:].conj().T) 123 | noisepower = np.ones(users) * 4e-14 124 | rate = 1.2e7/users # bps 125 | linkBandwidth = 1e7 126 | p0 = 100 127 | m = 2.4 128 | pS = 50 129 | pMax = 40 130 | 131 | obj, solution, status = optimMinPow.optimizePCDTX(H, noisepower, rate, linkBandwidth, pMax, p0, m, pS, 0) 132 | 133 | # Test that all calls were correct and their order. What goes in must come out. 134 | for k in np.arange(users): 135 | ptx = optimMinPow2x2DTX.ptxOfMu(solution[k], rate, linkBandwidth, noisepower[k], H[k,:,:]) # power as a function of the MIMO link 136 | rate_test = solution[k]*np.real(utils.ergMIMOCapacityCDITCSIR(H[k,:,:], ptx/noisepower[k]))*linkBandwidth # bps 137 | np.testing.assert_almost_equal(rate_test, rate) 138 | 139 | 140 | 141 | 142 | if __name__ == '__main__': 143 | unittest.main() 144 | -------------------------------------------------------------------------------- /plotting/channelplotter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' This module provides a range of plots to visualize channel states 4 | Plots are either displayed using plt.show() or written to pdf file. 5 | 6 | File: channelplotter.py 7 | ''' 8 | 9 | __author__ = "Hauke Holtkamp" 10 | __credits__ = "Hauke Holtkamp" 11 | __license__ = "unknown" 12 | __version__ = "unknown" 13 | __maintainer__ = "Hauke Holtkamp" 14 | __email__ = "h.holtkamp@gmail.com" 15 | __status__ = "Development" 16 | 17 | import matplotlib 18 | import matplotlib.pyplot as plt 19 | import world 20 | import numpy as np 21 | 22 | def randomColorArray(length): 23 | """Generate RGB color array of length colors""" 24 | from numpy.random import rand 25 | return rand(length, 3) 26 | 27 | colors = randomColorArray(10000) # maintain in class, so that colors are consistent over one run 28 | 29 | def bar(channels, title, filename, log=False): 30 | ''' Simple bar chart ''' 31 | fig = plt.figure() 32 | ax = fig.add_subplot(111) 33 | 34 | channels = np.abs(channels) # in case it's complex data 35 | if log: 36 | channels = np.log10(channels) 37 | 38 | idx = np.arange(len(channels)) 39 | 40 | ax.bar(idx, channels) 41 | 42 | ax.set_title(title) 43 | ax.grid(True) 44 | plt.savefig(filename, format='pdf') 45 | #plt.show() 46 | 47 | def OFDMAchannel(channel, title, filename, log=False): 48 | '''3D surf of OFDMA frame. 49 | channel is an np-array of dimensions [freq, time, users]''' 50 | channel = np.abs(channel) 51 | if log: 52 | channel = np.log10(channel) 53 | 54 | channel = np.atleast_3d(channel) # so the loop works also for 2d 55 | 56 | # Plotting 57 | from mpl_toolkits.mplot3d import axes3d, Axes3D 58 | 59 | # imports specific to the plots in this example 60 | from matplotlib import cm 61 | 62 | # multipage pdf example 63 | from matplotlib.backends.backend_pdf import PdfPages 64 | pp = PdfPages(filename+'.pdf') 65 | 66 | for k in np.arange(channel.shape[2]): 67 | 68 | fig = plt.figure() 69 | 70 | #---- First subplot 71 | ax = Axes3D(fig) 72 | 73 | Y = np.arange(channel.shape[0]) 74 | X = np.arange(channel.shape[1]) 75 | Z = np.abs(channel[:,:,k]) 76 | X, Y = np.meshgrid(X,Y) 77 | 78 | surf = ax.plot_surface(Y, X, Z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False) 79 | ax.set_xlabel('Frequency chunks') 80 | ax.set_ylabel('Time slots') 81 | ax.set_zlabel('unit-power SINR') 82 | plt.suptitle(title) # no effect 83 | 84 | fig.colorbar(surf, shrink=0.5, aspect=10) 85 | plt.savefig(filename+'.png',format='png') # not multipage, but good enough for a powerpoint illustration 86 | plt.savefig(pp, format='pdf') 87 | plt.close() 88 | 89 | # close up 90 | pp.close() 91 | 92 | def hist3d(data, title, filename, colorindextot='b'): 93 | """Plot a 3d histogram. Useful for plots of OFDMA power allocation. 94 | Input: 95 | data: (N,T) 96 | tite: plot title 97 | filename: file name 98 | colorindex: color index for coloring bars differently 99 | Output: 100 | None""" 101 | 102 | data = np.atleast_3d(data) # so the loop works also for 2d 103 | colorindextot = np.atleast_3d(colorindextot) 104 | colorindextot[np.isnan(colorindextot)] = 22222 # suppose we don't have that many users in a cell 105 | colorindextot = colorindextot.astype('S7') 106 | 107 | # PDFs cannot be created if a data value is zero. Workaround. 108 | data = data + 1e-20 109 | 110 | # N is x 111 | # T is y 112 | N = data.shape[0] 113 | T = data.shape[1] 114 | from mpl_toolkits.mplot3d import Axes3D 115 | 116 | # multipage pdf example 117 | from matplotlib.backends.backend_pdf import PdfPages 118 | pp = PdfPages(filename+'.pdf') 119 | 120 | for k in np.arange(data.shape[2]): 121 | 122 | # prepare coloring 123 | colorindex = colorindextot[:,:,k] 124 | for idx in np.arange(len(np.unique(colorindex))): 125 | colorindex[colorindex==np.unique(colorindex)[idx]] = matplotlib.colors.rgb2hex(colors[idx]) 126 | 127 | fig = plt.figure() 128 | # ax = fig.add_subplot(111, projection='3d') 129 | ax = Axes3D(fig) 130 | 131 | xedges = np.linspace(0.5,N-0.5,num=N) 132 | yedges = np.linspace(0.5,T-0.5,num=T) 133 | 134 | elements = N*T 135 | xpos, ypos = np.meshgrid(xedges+0.1, yedges+0.1) 136 | 137 | xpos = xpos.flatten('F') 138 | ypos = ypos.flatten('F') 139 | zpos = np.zeros(xpos.shape) 140 | dx = 0.8* np.ones_like(zpos) 141 | dy = dx.copy() 142 | dz = data[:,:,k].flatten() 143 | colorindex = colorindex.flatten().repeat(6) 144 | 145 | if colorindex.size == 0: 146 | colorindex = np.empty(xpos.shape[0]*6,dtype=str) 147 | colorindex[:] = 'b' 148 | colorindex[0:5] = 'r' 149 | 150 | ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colorindex) 151 | ax.view_init(None,45) # hand picked favorite view 152 | ax.set_xlabel('Frequency chunks') 153 | ax.set_ylabel('Time slots') 154 | ax.set_zlabel('Transmission power in dBm') 155 | 156 | ax.plot([0],[0],[30],'w') # invisible point scales the axis 157 | 158 | plt.suptitle(title) # no effect 159 | plt.savefig(filename+'.png', format='png') # not multipage, but good enough for a powerpoint illustration 160 | plt.savefig(pp, format='pdf') 161 | plt.close() 162 | pp.close() 163 | 164 | 165 | 166 | if __name__ == '__main__': 167 | 168 | filename = 'test' 169 | title = 'test' 170 | data = np.array([[0,2],[3,4],[5,6],[7,8],[9,10]]) 171 | cplot = ChannelPlotter() 172 | cplot.hist3d(data, title, filename) 173 | 174 | 175 | -------------------------------------------------------------------------------- /optim/optimMinPow2x2.py~: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Optimization objective and constraints for 2x2 MIMO minimal power allocation 4 | 5 | File: optimMinPow2x2.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import scipy.linalg 17 | from numpy import * 18 | 19 | def eval_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m ): 20 | """Objective function. Min power equal power 2x2 MIMO. 21 | Variable is the resource share in TDMA. Returns scalar.""" 22 | 23 | if shape(noiseIfPower) != shape(mus): 24 | raise ValueError('Shape mismatch') 25 | 26 | result = 0 27 | 28 | if mus.size is 1: # mus is integer 29 | return mus*(p0 + m*ptxOfMu(mus, rate, linkBandwidth, noiseIfPower, SINR[0,:,:])) 30 | else: 31 | for i in range(mus.size): 32 | Ptxi = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 33 | Ppm = (p0 + m*Ptxi) * mus[i] 34 | result = result + Ppm 35 | 36 | #print result 37 | return result 38 | 39 | def eval_grad_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m): 40 | """Gradient of the objective function. Returns array of scalars, each one the partial derivative.""" 41 | result = 0 42 | mus = array(mus) # allow iteration 43 | if mus.size is 1: 44 | a,b,M = dissectSINR(SINR[0,:,:]) 45 | capacity = rate / (linkBandwidth * mus) 46 | return p0 + m*M*noiseIfPower*( ( ( a**2 / b + 2*2**capacity - 1/mus * ( rate/linkBandwidth* log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a / b ) 47 | else: 48 | result = zeros((mus.size), dtype=float_) 49 | for i in range(mus.size): 50 | a,b,M = dissectSINR(SINR[i,:,:]) 51 | capacity = rate / (linkBandwidth * mus[i]) 52 | result[i] = p0 + m*M*noiseIfPower[i]*( ( ( a**2 / b + 2*2**capacity - 1/mus[i] * ( rate/linkBandwidth * log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a/b ) 53 | #print result 54 | return result 55 | 56 | def eval_g(mus, noiseIfPower, SINR, rate, linkBandwidth): 57 | """Constraint functions. Returns an array.""" 58 | 59 | mus = array(mus) 60 | result = zeros((mus.size+1), dtype=float_) 61 | result[0] = sum(mus) # first constraint is the unit sum 62 | # Other constraints: Maximum transmission power limit 63 | if mus.size is 1: 64 | result[1] = ptxOfMu(mus, rate, linkBandwidth, noiseIfPower, SINR[0,:,:]) 65 | return result 66 | else: 67 | for i in range(mus.size): 68 | result[i+1] = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 69 | 70 | #print result 71 | return result 72 | 73 | def eval_jac_g(mus, noiseIfPower, SINR, rate, linkBandwidth, flag): 74 | """Gradient of constraint function/Jacobian. min power equal power 2x2 MIMO. 75 | mus is the resource share in TDMA. Output is a numpy array with the nnzj rows.""" 76 | if mus.size is 1: 77 | a,b,M = dissectSINR(SINR[0,:,:]) 78 | capacity = rate / (linkBandwidth * mus) 79 | result = M*noiseIfPower* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 80 | return result 81 | 82 | nvar = mus.size 83 | if flag: # The 'structure of the Jacobian' is the map of which return value refers to which constraint function. There are nvar*(1+nvar) constraints overall. There are 1+nvar functions in eval_g, each of which has nvar partial derivatives. 84 | lineindex = array(range(1+nvar)).repeat(nvar) 85 | rowindex = tile(array(range(nvar)),nvar+1) 86 | return (lineindex,rowindex) 87 | 88 | else: 89 | index = 0 90 | mus = array(mus) # allow iteration 91 | result = zeros((mus.size*(mus.size+1)), dtype=float_) 92 | # The derivatives of the unit sum are just 1 93 | for i in range(mus.size): 94 | result[index] = 1 95 | index = index + 1 96 | 97 | # The derivatives of each power constraint: 98 | for i in range(mus.size): # the number of power constraints 99 | for j in range(mus.size): # the number of partial derivatives per power constraint 100 | if i == j: # there is a partial derivative 101 | a,b,M = dissectSINR(SINR[i,:,:]) 102 | capacity = rate / (linkBandwidth * mus[i]) 103 | result[index] = M*noiseIfPower[i]* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus[i]**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 104 | else: # there is no partial derivative 105 | result[index] = 0 # partial derivative is zero 106 | 107 | index = index + 1 108 | 109 | #print result 110 | return result 111 | 112 | def ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower): 113 | """Ergodic MIMO SNR as a function of achieved capacity and channel.""" 114 | a,b,M = dissectSINR(SINR) 115 | return noiseIfPower * (M / b) * ( -a + sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) 116 | 117 | def dissectSINR(SINR): 118 | """Take apart SINR into some values that we need often.""" 119 | M = SINR.shape[0] 120 | # eigvals, eigvects = scipy.linalg.eig(scipy.dot(SINR,SINR.conj().T)) 121 | eigvals, eigvects = scipy.linalg.eig(SINR) 122 | e1 = eigvals[0].real 123 | e2 = eigvals[1].real 124 | a = e1 + e2 125 | b = 2*e1 * e2 126 | 127 | return (a,b,M) 128 | 129 | def ptxOfMu(mu, rate, linkBandwidth, noiseIfPower, SINR): 130 | """Returns transmission power needed for a certain channel capacity as a function of the MIMO channel and noise power.""" 131 | capacity = rate / (linkBandwidth * mu) 132 | return ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower) 133 | 134 | -------------------------------------------------------------------------------- /optim/optimMinPow2x2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Optimization objective and constraints for 2x2 MIMO minimal power allocation. See my academic papers for documentation. 4 | 5 | File: optimMinPow2x2.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import scipy.linalg 17 | from numpy import * 18 | 19 | def eval_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m ): 20 | """Objective function. Min power equal power 2x2 MIMO. 21 | Variable is the resource share in TDMA. Returns scalar.""" 22 | 23 | if shape(noiseIfPower) != shape(mus): 24 | raise ValueError('Shape mismatch') 25 | 26 | result = 0 27 | 28 | if mus.size is 1: # mus is integer 29 | return mus*(p0 + m*ptxOfMu(mus, rate, linkBandwidth, noiseIfPower, SINR[0,:,:])) 30 | else: 31 | for i in range(mus.size): 32 | Ptxi = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 33 | Ppm = (p0 + m*Ptxi) * mus[i] 34 | result = result + Ppm 35 | 36 | #print result 37 | return result 38 | 39 | def eval_grad_f(mus, noiseIfPower, SINR, rate, linkBandwidth, p0, m): 40 | """Gradient of the objective function. Returns array of scalars, each one the partial derivative.""" 41 | result = 0 42 | mus = array(mus) # allow iteration 43 | if mus.size is 1: 44 | a,b,M = dissectSINR(SINR[0,:,:]) 45 | capacity = rate / (linkBandwidth * mus) 46 | return p0 + m*M*noiseIfPower*( ( ( a**2 / b + 2*2**capacity - 1/mus * ( rate/linkBandwidth* log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a / b ) 47 | else: 48 | result = zeros((mus.size), dtype=float_) 49 | for i in range(mus.size): 50 | a,b,M = dissectSINR(SINR[i,:,:]) 51 | capacity = rate / (linkBandwidth * mus[i]) 52 | result[i] = p0 + m*M*noiseIfPower[i]*( ( ( a**2 / b + 2*2**capacity - 1/mus[i] * ( rate/linkBandwidth * log(2) * 2**capacity) - 2 ) / sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) - a/b ) 53 | #print result 54 | return result 55 | 56 | def eval_g(mus, noiseIfPower, SINR, rate, linkBandwidth): 57 | """Constraint functions. Returns an array.""" 58 | 59 | mus = array(mus) 60 | result = zeros((mus.size+1), dtype=float_) 61 | result[0] = sum(mus) # first constraint is the unit sum 62 | # Other constraints: Maximum transmission power limit 63 | if mus.size is 1: 64 | result[1] = ptxOfMu(mus, rate, linkBandwidth, noiseIfPower, SINR[0,:,:]) 65 | return result 66 | else: 67 | for i in range(mus.size): 68 | result[i+1] = ptxOfMu(mus[i], rate, linkBandwidth, noiseIfPower[i], SINR[i,:,:]) 69 | 70 | #print result 71 | return result 72 | 73 | def eval_jac_g(mus, noiseIfPower, SINR, rate, linkBandwidth, flag): 74 | """Gradient of constraint function/Jacobian. min power equal power 2x2 MIMO. 75 | mus is the resource share in TDMA. Output is a numpy array with the nnzj rows.""" 76 | if mus.size is 1: 77 | a,b,M = dissectSINR(SINR[0,:,:]) 78 | capacity = rate / (linkBandwidth * mus) 79 | result = M*noiseIfPower* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 80 | return result 81 | 82 | nvar = mus.size 83 | if flag: # The 'structure of the Jacobian' is the map of which return value refers to which constraint function. There are nvar*(1+nvar) constraints overall. There are 1+nvar functions in eval_g, each of which has nvar partial derivatives. 84 | lineindex = array(range(1+nvar)).repeat(nvar) 85 | rowindex = tile(array(range(nvar)),nvar+1) 86 | return (lineindex,rowindex) 87 | 88 | else: 89 | index = 0 90 | mus = array(mus) # allow iteration 91 | result = zeros((mus.size*(mus.size+1)), dtype=float_) 92 | # The derivatives of the unit sum are just 1 93 | for i in range(mus.size): 94 | result[index] = 1 95 | index = index + 1 96 | 97 | # The derivatives of each power constraint: 98 | for i in range(mus.size): # the number of power constraints 99 | for j in range(mus.size): # the number of partial derivatives per power constraint 100 | if i == j: # there is a partial derivative 101 | a,b,M = dissectSINR(SINR[i,:,:]) 102 | capacity = rate / (linkBandwidth * mus[i]) 103 | result[index] = M*noiseIfPower[i]* ( - (rate/linkBandwidth)* log(2) * 2**capacity) / (mus[i]**2 * sqrt( a**2 + 2*b*(2**capacity - 1))) 104 | else: # there is no partial derivative 105 | result[index] = 0 # partial derivative is zero 106 | 107 | index = index + 1 108 | 109 | #print result 110 | return result 111 | 112 | def ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower): 113 | """Ergodic MIMO SNR as a function of achieved capacity and channel.""" 114 | a,b,M = dissectSINR(SINR) 115 | return noiseIfPower * (M / b) * ( -a + sqrt( a**2 + 2 * b * (2**capacity - 1) ) ) 116 | 117 | def dissectSINR(SINR): 118 | """Take apart SINR into some values that we need often.""" 119 | M = SINR.shape[0] 120 | # eigvals, eigvects = scipy.linalg.eig(scipy.dot(SINR,SINR.conj().T)) 121 | eigvals, eigvects = scipy.linalg.eig(SINR) 122 | e1 = eigvals[0].real 123 | e2 = eigvals[1].real 124 | a = e1 + e2 125 | b = 2*e1 * e2 126 | 127 | return (a,b,M) 128 | 129 | def ptxOfMu(mu, rate, linkBandwidth, noiseIfPower, SINR): 130 | """Returns transmission power needed for a certain channel capacity as a function of the MIMO channel and noise power.""" 131 | capacity = rate / (linkBandwidth * mu) 132 | return ergMIMOsinrCDITCSIR2x2(capacity, SINR, noiseIfPower) 133 | 134 | -------------------------------------------------------------------------------- /fsf/test_fsf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Unit test for the Frequency Selective Fading module 4 | 5 | File: test_fsf.py 6 | ''' 7 | 8 | __author__ = "Hauke Holtkamp" 9 | __credits__ = "Hauke Holtkamp" 10 | __license__ = "unknown" 11 | __version__ = "unknown" 12 | __maintainer__ = "Hauke Holtkamp" 13 | __email__ = "h.holtkamp@gmail.com" 14 | __status__ = "Development" 15 | 16 | import fsf 17 | import unittest 18 | import numpy as np 19 | from utils import utils 20 | 21 | class TestSequenceFunctions(unittest.TestCase): 22 | 23 | def setUp(self): 24 | pass 25 | 26 | def test_fsf_mean(self): 27 | """ Test unity mean. """ 28 | N = 50 29 | T = 10 30 | centerFrequency = 2e9 31 | totalTime = 0.01 32 | bandwidth = 1e7 33 | relativeVelocity = 3 34 | 35 | H, chunkCenters, timeStamp = fsf.fsf(N, T, centerFrequency, totalTime, bandwidth, relativeVelocity) 36 | ans = np.mean(np.abs(H)) 37 | result = np.ones(T) 38 | np.testing.assert_array_almost_equal(ans, result) 39 | 40 | def test_instantChunkFading(self): 41 | tapGainsNorm = np.array([ 42 | 0.0449, 43 | 0.0895, 44 | 0.1235, 45 | 0.0981, 46 | 0.0865, 47 | 0.0391, 48 | 0.0895, 49 | 0.0895, 50 | 0.0539, 51 | 0.0356, 52 | 0.0679, 53 | 0.0325, 54 | 0.0325, 55 | 0.0230, 56 | 0.0124, 57 | 0.0277, 58 | 0.0094, 59 | 0.0149, 60 | 0.0016, 61 | 0.0069, 62 | 0.0132, 63 | 0.0037, 64 | 0.0042]) 65 | tapGainsNorm.shape = (23,1) # promote 66 | t = 0.004 67 | f = 2.0048e9 68 | disc_dopp_freq = np.array([ 69 | [-15.5606, -19.6372, 15.2520, 0.1108, 19.3279], 70 | [-6.5683, 11.1790, -8.6475, -1.8778, -14.0363], 71 | [15.2916, 1.3191, 4.4370, -18.4350, 19.9959], 72 | [19.1631, -11.8624, 6.5684, 5.3569, 1.0966], 73 | [-5.2384, 18.0584, -16.9274, 19.8232, 17.3193], 74 | [19.1614, -6.3499, 2.1299, -1.6424, -19.9360], 75 | [ 1.3774, 2.7174, 4.5710, -16.2640, 13.4078], 76 | [18.7846, -5.7717, -16.2102, -4.9658, -10.7869], 77 | [19.6819, 1.5025, 10.7101, -12.6416, 18.1574], 78 | [17.9368, -19.3611, 7.6408, -11.9789, 17.8282], 79 | [-13.7449, -14.3416, 12.6645, 19.3312, 19.3916], 80 | [ 19.9011, -13.4247, -19.9076, -19.9032, 19.9660], 81 | [-19.9948, 19.9393, -19.6215, 19.9349, -6.2241], 82 | [ 18.9880, 3.3680, 19.9890, 9.4799, 12.9395], 83 | [ 18.2249, -4.1318, 0.9548, -14.0051, 9.8152], 84 | [ 18.8720, 10.9931, -19.9261, -15.5387, -7.3034], 85 | [ 17.1861, -1.4958, -18.6823, -5.4596, 11.6007], 86 | [-19.9944, -19.4111, 19.7429, -19.5167, 2.5668], 87 | [ 7.1822, -13.0292, -11.7887, 18.5423, 14.1513], 88 | [-14.6792, -14.8353, 6.4546, -18.7910, 18.9747], 89 | [ 14.1708, -7.2981, -19.8194, 18.2591, -12.7953], 90 | [-17.5187, -13.4457, 15.3212, 0.2499, 17.6333], 91 | [ 18.1196, 3.6307, -18.6036, 19.5076, 2.5944]]) 92 | disc_dopp_phase = np.array([ 93 | [3.5006, 0.6506, 1.0861, 5.0804, 3.0689], 94 | [0.5910, 1.7973, 0.0347, 0.4820, 5.3503], 95 | [4.5771, 3.7784, 1.3634, 1.8850, 0.4282], 96 | [2.6396, 5.4569, 3.3608, 0.6612, 3.3064], 97 | [4.2151, 5.1251, 2.1518, 4.4165, 2.0659], 98 | [5.0685, 2.3557, 1.0414, 2.8444, 2.1495], 99 | [5.5907, 3.0346, 3.8443, 2.1966, 4.5931], 100 | [4.1985, 4.8872, 1.7899, 5.0602, 5.2349], 101 | [5.2085, 1.3760, 1.8995, 0.3599, 4.4470], 102 | [6.0822, 0.9750, 3.1461, 1.1830, 0.9937], 103 | [4.7097, 1.5513, 3.3219, 1.7564, 6.1375], 104 | [3.6515, 6.1396, 0.0890, 4.7316, 3.5173], 105 | [5.9846, 3.3977, 0.7420, 2.5553, 2.9666], 106 | [1.3498, 4.1761, 0.8743, 2.0005, 2.4837], 107 | [1.2943, 4.2291, 4.7883, 2.7557, 0.5300], 108 | [6.1913, 3.8556, 1.3004, 2.2014, 1.7365], 109 | [3.2449, 0.4187, 3.1637, 2.8953, 2.3058], 110 | [1.4966, 2.8960, 1.4687, 2.5900, 5.0088], 111 | [2.4267, 2.2054, 4.1603, 5.9509, 0.0743], 112 | [1.3372, 3.8027, 4.0326, 5.9558, 1.4432], 113 | [1.8749, 3.8463, 5.3386, 2.8930, 4.3170], 114 | [4.3231, 1.9115, 5.4909, 2.3259, 0.2305], 115 | [3.9121, 5.2721, 5.9491, 2.1157, 4.9408]]) 116 | delay_taps = 1e-05 * np.array([0, 117 | 0.0060, 118 | 0.0075, 119 | 0.0145, 120 | 0.0150, 121 | 0.0155, 122 | 0.0190, 123 | 0.0220, 124 | 0.0225, 125 | 0.0230, 126 | 0.0335, 127 | 0.0370, 128 | 0.0430, 129 | 0.0510, 130 | 0.0685, 131 | 0.0725, 132 | 0.0735, 133 | 0.0800, 134 | 0.0960, 135 | 0.1020, 136 | 0.1100, 137 | 0.1210, 138 | 0.1845]) 139 | delay_taps.shape = (23,1) # promot 140 | N_harmonics = 5 141 | 142 | ans = fsf.instantChunkFading(t, f, tapGainsNorm,disc_dopp_phase, disc_dopp_freq, delay_taps, N_harmonics) 143 | 144 | result = 0.0616 - 0.2216j 145 | np.testing.assert_approx_equal(np.real(ans), np.real(result), significant=3) 146 | np.testing.assert_approx_equal(np.imag(ans), np.imag(result), significant=3) 147 | 148 | 149 | if __name__ == '__main__': 150 | unittest.main() 151 | -------------------------------------------------------------------------------- /optim/optimMinPow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' Optimization entry point. Interface with PyIPOPT. 4 | File: optimMinPow.py 5 | ''' 6 | 7 | __author__ = "Hauke Holtkamp" 8 | __credits__ = "Hauke Holtkamp" 9 | __license__ = "unknown" 10 | __version__ = "unknown" 11 | __maintainer__ = "Hauke Holtkamp" 12 | __email__ = "h.holtkamp@gmail.com" 13 | __status__ = "Development" 14 | 15 | from numpy import * 16 | from optim import optimMinPow2x2 17 | from optim import optimMinPow2x2DTX 18 | import pyipopt 19 | 20 | def optimizePC(channel, noiseIfPower, rate, linkBandwidth, pMax, p0, m, verbosity=0): 21 | ''' Uses channel values, PHY parameters and power consumption characteristics to find minimal resource allocation. Returns resource allocation, objective value and IPOPT status. 22 | Input: 23 | channel - 3d array. 0d users, 1d n_tx, 2d n_rx 24 | noiseIfPower - total noise power over the linkbandwidth 25 | rate - target rate in bps 26 | pMax - maximum allowed transmission power 27 | p0 - power consumption at zero transmission (not sleep) 28 | m - power consumption load factor 29 | verbosity - IPOPT verbosity level 30 | Output: 31 | obj - solution objective value 32 | solution - resource share per user 33 | status - IPOPT status ''' 34 | 35 | # the channel dimensions tell some more parameters 36 | users = channel.shape[0] 37 | n_tx = channel.shape[1] 38 | n_rx = channel.shape[2] 39 | 40 | # preparing IPOPT parameters 41 | nvar = users # for readability 42 | x_L = zeros((nvar), dtype=float_) * 0.0 43 | x_U = ones((nvar), dtype=float_) * 1.0 44 | ncon = nvar + 1 # transmit power constraints and the unit sum 45 | g_L = zeros(1+nvar) # unit sum and all power constraints 46 | g_L[0] = 1. 47 | g_U = pMax * ones(1+nvar) # unit sum and all power constraints 48 | g_U[0] = 1. 49 | nnzj = nvar * (1+nvar) 50 | nnzh = 0 #used? 51 | x0 = repeat([1./(nvar+1)],nvar) # Starting point 52 | 53 | # IPOPT requires single parameter functions 54 | if n_tx is 2 and n_rx is 2: 55 | eval_f = lambda mus: optimMinPow2x2.eval_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m) 56 | eval_grad_f = lambda mus: optimMinPow2x2.eval_grad_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m) 57 | eval_g = lambda mus: optimMinPow2x2.eval_g(mus, noiseIfPower, channel, rate, linkBandwidth) 58 | eval_jac_g = lambda mus, flag: optimMinPow2x2.eval_jac_g(mus, noiseIfPower, channel, rate, linkBandwidth, flag) 59 | else: 60 | raise NotImplementedError # other combinations may be needed later 61 | 62 | # Call solve() 63 | pyipopt.set_loglevel(min([2,verbosity])) # verbose 64 | nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) 65 | #nlp.int_option("max_iter", 3000) 66 | #nlp.num_option("tol", 1e-8) 67 | #nlp.num_option("acceptable_tol", 1e-2) 68 | #nlp.int_option("acceptable_iter", 0) 69 | nlp.str_option("derivative_test", "first-order") 70 | nlp.str_option("derivative_test_print_all", "no") 71 | #nlp.str_option("print_options_documentation", "yes") 72 | nlp.int_option("print_level", min([verbosity,12])) # maximum is 12 73 | nlp.str_option("print_user_options", "yes") 74 | 75 | solution, zl, zu, obj, status = nlp.solve(x0) 76 | nlp.close() 77 | 78 | return obj, solution, status 79 | 80 | def optimizePCDTX(channel, noiseIfPower, rate, linkBandwidth, pMax, p0, m, pS, verbosity=0): 81 | ''' Uses channel values, PHY parameters and power consumption characteristics to find minimal resource allocation under power control with DTX. Returns resource allocation, objective value and IPOPT status. 82 | Input: 83 | channel - 3d array. 0d users, 1d n_tx, 2d n_rx 84 | noiseIfPower - total noise power over the linkbandwidth 85 | rate - target rate in bps 86 | pMax - maximum allowed transmission power 87 | p0 - power consumption at zero transmission (not sleep) 88 | pS - power consumption during sleep mode 89 | m - power consumption load factor 90 | verbosity - IPOPT verbosity level 91 | Output: 92 | obj - solution objective value 93 | solution - resource share per user 94 | status - IPOPT status ''' 95 | 96 | # the channel dimensions tell some more parameters 97 | users = channel.shape[0] 98 | n_tx = channel.shape[1] 99 | n_rx = channel.shape[2] 100 | 101 | # preparing IPOPT parameters 102 | nvar = users + 1 # sleep mode is integrated as the last parameter 103 | x_L = zeros((nvar), dtype=float_) * 0.0 104 | x_U = ones((nvar), dtype=float_) * 1.0 105 | ncon = users + 1 # transmit power constraints and the unit sum 106 | g_L = zeros(ncon) # unit sum and all power constraints 107 | g_L[0] = 1. 108 | g_U = pMax * ones(ncon) # unit sum and all power constraints 109 | g_U[0] = 1. 110 | nnzj = ncon * ncon 111 | nnzh = 0 # tell that there is no hessian (Hessian approximation) 112 | x0 = repeat([1./(nvar + 1)], nvar) # Starting point 113 | 114 | # IPOPT requires single parameter functions 115 | if n_tx is 2 and n_rx is 2: 116 | eval_f = lambda mus: optimMinPow2x2DTX.eval_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m, pS) 117 | eval_grad_f = lambda mus: optimMinPow2x2DTX.eval_grad_f(mus, noiseIfPower, channel, rate, linkBandwidth, p0, m, pS) 118 | eval_g = lambda mus: optimMinPow2x2DTX.eval_g(mus, noiseIfPower, channel, rate, linkBandwidth) 119 | eval_jac_g = lambda mus, flag: optimMinPow2x2DTX.eval_jac_g(mus, noiseIfPower, channel, rate, linkBandwidth, flag) 120 | else: 121 | raise NotImplementedError # other combinations may be needed later 122 | 123 | pyipopt.set_loglevel(min([verbosity, 2])) # verbose 124 | nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) 125 | #nlp.int_option("max_iter", 3000) 126 | #nlp.num_option("tol", 1e-8) 127 | #nlp.num_option("acceptable_tol", 1e-2) 128 | #nlp.int_option("acceptable_iter", 0) 129 | nlp.str_option("derivative_test", "first-order") 130 | nlp.str_option("derivative_test_print_all", "yes") 131 | #nlp.str_option("print_options_documentation", "yes") 132 | nlp.int_option("print_level", min([verbosity, 12])) # maximum is 12 133 | nlp.str_option("print_user_options", "yes") 134 | 135 | solution, zl, zu, obj, status = nlp.solve(x0) 136 | nlp.close() 137 | 138 | if sum(solution) > 1.0001 or status is not 0: 139 | print 'Sum of solution:', sum(solution) 140 | print 'Status:', status 141 | raise ValueError('Invalid solution') 142 | 143 | return obj, solution, status 144 | --------------------------------------------------------------------------------