├── chapter9 ├── hazusData.mat ├── hazardEpistemicData.mat ├── demonstrate_HAZUS_loss.m ├── illustrate_vulnerability.m ├── risk_metrics_epistemic.m ├── risk_target.m ├── illustrate_fragility.m ├── risk_metrics.m ├── fn_HAZUS_loss.m └── demonstrate_HAZUS_fragility.m ├── utils ├── gmm_eval.m ├── gmm_bjf97.m └── gmm_cy2014.m ├── LICENSE ├── README.md ├── chapter6 ├── psha_example_calcs_two_rup.m ├── fn_PSHA_given_M_lambda.m └── psha_example_calcs_GR.m ├── chapter4 └── empiricalGroundMotionPredictionExample.m └── chapter5 └── SimplifiedHfMethodExample.m /chapter9/hazusData.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bakerjw/seismic-hazard-and-risk/HEAD/chapter9/hazusData.mat -------------------------------------------------------------------------------- /chapter9/hazardEpistemicData.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bakerjw/seismic-hazard-and-risk/HEAD/chapter9/hazardEpistemicData.mat -------------------------------------------------------------------------------- /utils/gmm_eval.m: -------------------------------------------------------------------------------- 1 | function [sa, sigma] = gmm_eval(T, M, rup, gmpeFlag) 2 | % master function to call a relevant GMPE and get median Sa and log 3 | % standard deviation 4 | % 5 | % INPUTS 6 | % 7 | % T IM period of interest 8 | % M rupture magnitude 9 | % rup data structure with rupture parameters 10 | % gmpeFlag =1 for BJF97, =2 for CY14 11 | % 12 | % sa median spectral acceleration, given rup 13 | % sigma log standard deviation, given rup 14 | % 15 | 16 | if gmpeFlag == 1 % BJF 1997 model 17 | [sa, sigma] = gmm_bjf97(M, rup.R, T, rup.Fault_Type, rup.Vs30); 18 | 19 | elseif gmpeFlag == 2 % CY 2014 model 20 | [sa, sigma] = gmm_cy2014( M, T, rup.R, rup.R, rup.R, rup.Ztor, rup.delta, rup.rupLambda, rup.Z10, rup.Vs30, rup.Fhw, rup.FVS30, rup.region); 21 | 22 | else 23 | fprintf('Invalid gmpeFlag \n') 24 | end 25 | 26 | 27 | end 28 | 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Jack Baker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Seismic Hazard and Risk Analysis calculations 2 | Supporting calculations for the textbook Seismic Hazard and Risk Analysis 3 | 4 | Baker, J. W., Bradley, B. A., and Stafford, P. J. (2021). Seismic Hazard and Risk Analysis. Cambridge University Press, Cambridge, England. www.cambridge.org/9781108425056 5 | 6 | Additional resources available at www.pshabook.com 7 | 8 | # Disclaimer 9 | These are simple demonstration calculations, to further illustrate the results from the book. 10 | If you think there is an error in any of these materials, feel free to file a New Issue in the repository. We are, however, unable to respond to individual inquiries regarding any of these calculations. 11 | 12 | You are welcome to download and use any of these materials, as long as you acknowledge the above-referenced book as the source of the data. Please refer 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 | SOFTWARE. Please review the license file associated with this repository for further conditions. 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /chapter9/demonstrate_HAZUS_loss.m: -------------------------------------------------------------------------------- 1 | % make illustrative loss ratio predictions using HAZUS 2 | 3 | % compute loss predictions using HAZUS MH 2.1 4 | % Jack Baker 5 | % 12/20/2017 6 | % modified 1/7/2021 to allow optional color figures 7 | 8 | clear; close all; clc; 9 | 10 | colorspec{1} = [56 95 150]/255; 11 | colorspec{2} = [207 89 33]/255; 12 | colorspec{3} = [158 184 219]/255; 13 | colorspec{4} = [231 184 0]/255; 14 | colorspec{5} = [128 0 0]/255; 15 | 16 | linespec{1} = '-'; 17 | linespec{2} = '-'; 18 | linespec{3} = '-'; 19 | linespec{4} = '-'; 20 | linespec{5} = '-'; 21 | 22 | 23 | 24 | pgaVals = 0.01:0.01:1.5; % PGA values of interest 25 | 26 | % specify a text case (see fn_HAZUS_loss for allowable options) 27 | analysisCase.codeLevel = 1; 28 | analysisCase.buildingType = 'C2L'; 29 | analysisCase.occType = 'COM1'; 30 | 31 | % perform calculations 32 | [lossRatio, caseLabel, structLossRatio, nonStructAccLossRatio, nonStructDriftLossRatio] = fn_HAZUS_loss(analysisCase, pgaVals); 33 | 34 | %% plot contributions of each type of damage 35 | figure 36 | plot(pgaVals, lossRatio, linespec{1}, 'linewidth', 2, 'color', colorspec{1}) 37 | hold on 38 | plot(pgaVals, nonStructAccLossRatio, linespec{2}, 'linewidth', 2, 'color', colorspec{2}) 39 | plot(pgaVals, structLossRatio, linespec{3}, 'linewidth', 2, 'color', colorspec{3}) 40 | plot(pgaVals, nonStructDriftLossRatio, linespec{4}, 'linewidth', 2, 'color', colorspec{4}) 41 | xlabel('Peak Ground Acceleration, PGA [g]') 42 | ylabel('Loss ratio') 43 | legend('Total', 'Nonstructural (Acceleration Sensitive)', 'Structural loss', 'Nonstructural (Drift Sensitive)', 'location', 'Northwest') 44 | 45 | 46 | %% perform calculations for four code levels 47 | clear lossRatio caseLabel 48 | 49 | figure 50 | for i=1:4 51 | analysisCase.codeLevel = 5-i; % flip the ordering 52 | [lossRatio(:,i), caseLabel{i}] = fn_HAZUS_loss(analysisCase, pgaVals); 53 | end 54 | 55 | % manually make shorter labels 56 | shortLabel{1} = 'Pre-code'; 57 | shortLabel{2} = 'Low-code'; 58 | shortLabel{3} = 'Moderate-code'; 59 | shortLabel{4} = 'High-code'; 60 | 61 | 62 | % plot total loss ratios 63 | plot(pgaVals, lossRatio(:,1), linespec{1}, 'linewidth', 2, 'color', colorspec{1}) 64 | hold on 65 | plot(pgaVals, lossRatio(:,2), linespec{2}, 'linewidth', 2, 'color', colorspec{2}) 66 | plot(pgaVals, lossRatio(:,3), linespec{3}, 'linewidth', 2, 'color', colorspec{3}) 67 | plot(pgaVals, lossRatio(:,4), linespec{4}, 'linewidth', 2, 'color', colorspec{4}) 68 | 69 | xlabel('Peak Ground Acceleration, PGA [g]') 70 | ylabel('Loss ratio') 71 | legend(shortLabel, 'location', 'southeast') 72 | 73 | -------------------------------------------------------------------------------- /chapter6/psha_example_calcs_two_rup.m: -------------------------------------------------------------------------------- 1 | % PSHA calculation considering two ruptures. 2 | % The calculation follows the example of Section 6.3.2 3 | % 4 | % Created by Jack Baker 5 | 6 | clear; close all; clc 7 | addpath('../utils/') 8 | 9 | % basic setup 10 | x = logspace(log10(0.001), log10(2), 100); % IM values to consider 11 | T = 1; % period of interest 12 | IM_label = 'SA(1 s)'; 13 | gmpeFlag = 1; % =1 for BJF97, =2 for CY14 14 | 15 | % specify colors and line styles for plots 16 | colorspec{1} = [56 95 150]/255; 17 | colorspec{2} = [207 89 33]/255; 18 | colorspec{3} = [158 184 219]/255; 19 | 20 | 21 | % seismicity parameters 22 | Fault_Type = 1; % 1 is strike slip 23 | Vs30 = 500; 24 | 25 | % plotting parameters 26 | figureAxisLimits = [0.05 max(x) 1e-5 1e-1]; 27 | figureXTickVals = [0.05 0.1 0.5 1 2]; 28 | 29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 30 | %% first rupture 31 | 32 | lambda_A = 1/100; 33 | M_A = 6.5; 34 | R_A = 10; 35 | 36 | % compute rates (and intermediate results) for specific IM levels 37 | [medianIM, sigmaIM] = gmm_bjf97(M_A, R_A, T, Fault_Type, Vs30); 38 | imLevel(1) = 0.2; 39 | imLevel(2) = 0.5; 40 | imProbabilitiesA = 1 - normcdf(log(imLevel),log(medianIM),sigmaIM) 41 | imRateA = lambda_A * imProbabilitiesA; % get rates for two example cases 42 | 43 | % compute rates for a range of IM levels 44 | p_A = 1 - normcdf(log(x),log(medianIM),sigmaIM); 45 | lambda_IM_A = lambda_A * p_A; % IM rates from rup_1 46 | 47 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 48 | %% second rupture 49 | 50 | % define second fault 51 | lambda_B = 1/500; 52 | M_B = 7.5; 53 | R_B = 10; 54 | 55 | % compute rates (and intermediate results) for specific IM levels 56 | [medianIM, sigmaIM] = gmm_bjf97(M_B, R_B, T, Fault_Type, Vs30); 57 | imProbabilitiesB = 1 - normcdf(log(imLevel),log(medianIM),sigmaIM) 58 | imRateB = lambda_B * imProbabilitiesB; % get rates for two example cases 59 | imRateTot = imRateA + imRateB; 60 | 61 | % compute rates for a range of IM levels 62 | p_B = 1 - normcdf(log(x),log(medianIM),sigmaIM); 63 | lambda_IM_B = lambda_B * p_B; % IM rates from rup_2 64 | 65 | lambda_IM_Tot = lambda_IM_A + lambda_IM_B; 66 | 67 | figure 68 | loglog(x, lambda_IM_Tot,'-', 'linewidth', 2, 'color', colorspec{1}) 69 | hold on 70 | loglog(x, lambda_IM_A,'-', 'linewidth', 2, 'Color', colorspec{2}) 71 | loglog(x, lambda_IM_B,'-', 'linewidth', 2, 'Color', colorspec{3}) 72 | 73 | plot(imLevel, imRateTot, 'o', 'MarkerEdgeColor', colorspec{1}) 74 | plot(imLevel, imRateA, 'o', 'MarkerEdgeColor', colorspec{2}) 75 | plot(imLevel, imRateB, 'o', 'MarkerEdgeColor', colorspec{3}) 76 | 77 | % annotate text results for example cases 78 | text1 = ['\lambda(' IM_label ' > ' num2str(imLevel(1)) ' g) = ' num2str(imRateTot(1),3)]; 79 | text2 = ['\lambda(' IM_label ' > ' num2str(imLevel(2)) ' g) = ' num2str(imRateTot(2),3)]; 80 | text(imLevel(1)*1.1, imRateTot(1)*1.1,text1) 81 | text(imLevel(2)*1.05, imRateTot(2)*1.2,text2) 82 | 83 | xlabel(['Spectral Acceleration, ' IM_label ' [g]']) 84 | ylabel('Annual rate of exceedance, \lambda') 85 | axis(figureAxisLimits) 86 | legend('Total hazard', 'rup_1', 'rup_2'); 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /chapter4/empiricalGroundMotionPredictionExample.m: -------------------------------------------------------------------------------- 1 | %Evaluate the BJF97 model for several M, R cases and write the results (Figure 4.26, Table 4.1) 2 | %Brendon Bradley 3 | %May 2020 4 | 5 | T=1; %Vibration period to consider (s) 6 | if T > 2; disp('Error: max(T) = 2s for the BJF97 model'); return; end 7 | 8 | %earthquake scenario(s) to consider 9 | M = 6.5; %moment magnitude 10 | R = [3,10,30]; %source-to-site distances (km) 11 | Sa1pt0_threshold = 0.4; %IM threshold to compute exceedance probabilities for 12 | 13 | 14 | %get the GMM prediction - note this requires to GMM to be in the same 15 | %directory as this file, or use the 'addpath' function to point to another 16 | %directory. more information on this function can be found at: 17 | %https://github.com/bakerjw/seismic-hazard-and-risk/tree/main/utils/gmm_bjf97.m 18 | Fault_Type=1; 19 | Vs=500; 20 | for i=1:length(R) 21 | [sa(i), sigma(i)] = gmm_bjf97(M, R(i), T, Fault_Type, Vs); 22 | end 23 | 24 | %output the numerical values to the command line 25 | fprintf('Output values \n'); 26 | fprintf('--------------------------------------------------------- \n'); 27 | fprintf(' R[km] mu_lnIM im_50 [g] sigma_lnIM Prob[SA(1s) > 0.4g] \n'); 28 | fprintf('--------------------------------------------------------- \n'); 29 | for i=1:length(R) 30 | fprintf('%10.3f %10.3f %10.3e %10.3f %10.3e \n',R(i),log(sa(i)),sa(i),sigma(i),1-normcdf(log(Sa1pt0_threshold),log(sa(i)),sigma(i))); 31 | end 32 | fprintf('--------------------------------------------------------- \n'); 33 | 34 | %Plotting preparation 35 | %get for the full R value range to make plot 36 | R_plot=1:1:100; 37 | for j=1:length(R_plot) 38 | [sa_plot(j), sigma_plot(j)] = gmm_bjf97(M, R_plot(j), T, Fault_Type, Vs); 39 | end 40 | 41 | %get the necessary details for plotting the pdf distribution 42 | eps_range=-3.7:0.01:3.7; %range of epsilon values to plot 43 | for i=1:length(R) 44 | sa_vals(:,i)=sa(i)*exp(eps_range*sigma(i)); 45 | sa_pdf(:,i)=normpdf(log(sa_vals(:,i)),log(sa(i)),sigma(i)); 46 | sa_pdf_norm(:,i)=0.6*sa_pdf(:,i)/max(sa_pdf(:,i)); 47 | end 48 | 49 | %details for markers 50 | Colors=[1 0 0;0 0 1;0 1 0]; % 51 | Markers={'o';'sq';'>'}; 52 | 53 | %make figure 54 | figure; 55 | %plot BJF97 prediction for full R range 56 | h(2)=loglog(R_plot,sa_plot,'-k','LineWidth',2); hold on; 57 | h(3)=loglog(R_plot,sa_plot.*exp(sigma_plot),'--k','LineWidth',2); 58 | loglog(R_plot,sa_plot.*exp(-sigma_plot),'--k','LineWidth',2); 59 | %plot the SA(1s) threshold 60 | h(1)=loglog(R_plot,ones(length(R_plot),1)*Sa1pt0_threshold,':','Color',[0.5 0.5 0.5],'LineWidth',2); 61 | %plot the lognormal distributions 62 | for i=1:length(R) 63 | loglog(R(i)*ones(length(eps_range),1),sa_vals(:,i),'-','Color',[0.5 0.5 0.5],'LineWidth',1); 64 | loglog(R(i)*(1+sa_pdf_norm(:,i)),sa_vals(:,i),'-','Color',[0.5 0.5 0.5],'LineWidth',1); 65 | %fill in the values which exceed the threshold 66 | for j=1:length(sa_pdf_norm) 67 | if (sa_vals(j,i)>Sa1pt0_threshold) 68 | loglog(R(i)*[1 1+sa_pdf_norm(j,i)],sa_vals(j,i)*[1 1],'-','Color',[0.5 0.5 0.5]); 69 | end 70 | end 71 | end 72 | 73 | %add text 74 | set(groot,'defaultLegendInterpreter','latex') 75 | legend(h,'Target SA(1 s)','$\mu_{\ln SA}$','$\mu_{\ln SA} \pm \sigma_{\ln SA}$','Location','SouthWest'); 76 | 77 | %axes formatting 78 | xlim([1 100]); 79 | ylim([0.01 2]); 80 | xlabel('Source-to-site distance, R [km]'); ylabel('Spectral acceleration, SA(1 s) [g]'); 81 | 82 | 83 | -------------------------------------------------------------------------------- /chapter6/fn_PSHA_given_M_lambda.m: -------------------------------------------------------------------------------- 1 | function [lambda, example_output, disagg] = fn_PSHA_given_M_lambda(lambda_M, M_vals, T, x, x_example, rup, gmpeFlag) 2 | 3 | % Compute PSHA, with rupture rates for each M precomputed 4 | 5 | % Created by Jack Baker 6 | 7 | %%%%%%% INPUTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 8 | % 9 | % lambda_M exceedance rate of EQs for each M 10 | % M_vals values of M corresponding to lambda_M 11 | % x IM values of interest 12 | % x_example example IM value for table 13 | % rup data structure with rupture parameters 14 | % gmpeFlag =1 for BJF97, =2 for CY14 15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 16 | 17 | 18 | 19 | 20 | lambda_occur = [-diff(lambda_M) lambda_M(end)]; % find occurence rates from exceedance rates 21 | 22 | % p(exceeding each x threshold value | M) 23 | for j = 1:length(x) 24 | for i = 1:length(M_vals) 25 | [sa, sigma] = gmm_eval(T, M_vals(i), rup, gmpeFlag); 26 | p_given_M(i) = 1 - normcdf(log(x(j)),log(sa),sigma); 27 | end 28 | 29 | lambda.x(j) = sum(lambda_occur .* p_given_M); % rate of exceeding x 30 | disagg.all(j,:) = (lambda_occur .* p_given_M) / lambda.x(j); 31 | 32 | end 33 | 34 | 35 | % calcs for example IM case 36 | for i = 1:length(M_vals) 37 | [sa, sigma] = gmm_eval(T, M_vals(i), rup, gmpeFlag); 38 | p_ex(i) = 1 - normcdf(log(x_example),log(sa),sigma); 39 | end 40 | 41 | example_output = [[1:length(M_vals)]' M_vals' lambda_occur' p_ex' (lambda_occur' .* p_ex')]; 42 | lambda.example = sum(lambda_occur .* p_ex); 43 | disagg.example = (lambda_occur .* p_ex) / lambda.example; 44 | disagg.Mbar = sum(M_vals.*disagg.example); 45 | 46 | % disagg conditional on occurence for example IM case 47 | xInc = x_example*1.02; % do computations at an increment on x 48 | for i = 1:length(M_vals) 49 | [sa, sigma] = gmm_eval(T, M_vals(i), rup, gmpeFlag); 50 | pInc(i) = 1 - normcdf(log(xInc),log(sa),sigma); 51 | end 52 | lambdaInc = sum(lambda_occur .* pInc); 53 | disagg.equal = ((lambda_occur .* p_ex) - (lambda_occur .* pInc)) / (lambda.example-lambdaInc); 54 | disagg.equalMbar = sum(M_vals.*disagg.equal); 55 | 56 | 57 | % disaggs with epsilon 58 | deltaEps = 1; % final binning 59 | epsVals = -3:deltaEps:3; % epsilon bins 60 | 61 | deltaEpsFine = 0.1; % initial finer binning 62 | epsValsFine = -3.5:deltaEpsFine:3.5; % midpoints of bins 63 | p_eps = normpdf(epsValsFine) * deltaEpsFine; % estimate PDF using a PMF with discrete epsilon increments 64 | lambda_M_and_eps = lambda_occur' * p_eps; % rate of events with a given magnitude and epsilon 65 | 66 | 67 | for i = 1:length(M_vals) 68 | [sa, sigma] = gmm_eval(T, M_vals(i), rup, gmpeFlag); 69 | Ind(i,:) = (log(sa) + epsValsFine*sigma > log(x_example)); % indicator that the M/epsilon value causes IM > x_example 70 | end 71 | exceedRatesFine = Ind .* lambda_M_and_eps; % rates of given M/epsilon values exceeding IM 72 | lambdaExceed = sum(sum(exceedRatesFine)); % this is close to lambda.example, but may differ by a few percent due to epsilon discretization 73 | 74 | % compute mean epsilon 75 | epsDeagg = sum(exceedRatesFine) ./ sum(sum(exceedRatesFine)); 76 | disagg.epsBar = sum(epsValsFine.*epsDeagg); 77 | 78 | % aggregate results to coarser epsilon bins 79 | for j=1:length(epsVals) 80 | idx = epsValsFine >= (epsVals(j)-deltaEps/2) & epsValsFine < (epsVals(j)+deltaEps/2); 81 | exceedRates(:,j) = sum(exceedRatesFine(:,idx),2); 82 | end 83 | 84 | disagg.epsVals = epsVals; % return bin midpoints 85 | disagg.M_Eps = exceedRates / lambdaExceed; % magnitude and epsilon disaggregation 86 | disagg.eps = sum(exceedRates) / lambdaExceed; % epsilon disaggregation 87 | 88 | 89 | disagg.equalMbar = sum(M_vals.*disagg.equal); 90 | 91 | -------------------------------------------------------------------------------- /chapter9/illustrate_vulnerability.m: -------------------------------------------------------------------------------- 1 | % Vulnerability function illustrations 2 | % Jack Baker 3 | % 3/28/2018 4 | % Four-parameter Beta discretization updated 10/21/2020 5 | % color figure option added 12/31/2020 6 | 7 | clear; close all; clc; 8 | 9 | colorspec{1} = [56 95 150]/255; 10 | colorspec{2} = [207 89 33]/255; 11 | colorspec{3} = [158 184 219]/255; 12 | colorspec{4} = [231 184 0]/255; 13 | colorspec{5} = [128 0 0]/255; 14 | 15 | linespec{1} = '-'; 16 | linespec{2} = '-'; 17 | linespec{3} = '-'; 18 | linespec{4} = '-'; 19 | linespec{5} = '-'; 20 | 21 | 22 | 23 | %% settings 24 | im = logspace(log10(0.001), log10(2), 100); % IM values to consider 25 | c = logspace(log10(0.02), log10(380), 200); % consequence levels to consider 26 | 27 | 28 | %% power-law vulnerability 29 | a = 100; 30 | b = 1.3; 31 | betaIM = 0.4; 32 | 33 | medianC = a.*im.^b; 34 | 35 | % PDF 36 | imLevel = 1.; % example IM of interest 37 | cLevel = 150; % cost threshold of interest 38 | c = sort([c cLevel]); % include this specific threshold in the consequence vector, for help with plot 39 | pdfSF = 30; % scale factor to apply to PDF in figure (to size it appropriately for plotting) 40 | pdfC = lognpdf(c,log(a.*imLevel.^b),betaIM); 41 | 42 | probOfExceed = 1 - logncdf(cLevel,log(a.*imLevel.^b),betaIM) % numerical probability of shaded area 43 | 44 | % patch of PDF exceedance area 45 | idx = find(c>=cLevel); 46 | patchX = [imLevel [imLevel*ones(size(idx)) - pdfSF*pdfC(idx)] imLevel]; % x values for PDF patch 47 | patchY = [cLevel c(idx) cLevel]; 48 | 49 | figure 50 | plot(im, medianC, '-k', 'linewidth', 2) 51 | hold on 52 | plot(im, medianC*exp(betaIM), '-', 'linewidth', 2, 'color', colorspec{3}) 53 | plot(im, medianC/exp(betaIM), '-', 'linewidth', 2, 'color', colorspec{3}) 54 | plot(im, cLevel*ones(size(im)), ':k') 55 | plot(imLevel*ones(size(c)) - pdfSF*pdfC, c, '-', 'linewidth', 1, 'color', colorspec{2}) 56 | plot(imLevel*ones(size(c)), c, '-', 'linewidth', 1, 'color', colorspec{2}) 57 | p = patch(patchX, patchY, 'k'); 58 | set(p,'facecolor', [0.7 0.7 0.7]) 59 | 60 | % labels 61 | text(1.6, 230,'Median','FontSize',7) 62 | text(1.6, 310,'84%','FontSize',7) 63 | text(1.6, 110,'16%','FontSize',7) 64 | text(0.65, 200, sprintf('PDF, given \nIM = 1'),'FontSize',7) 65 | 66 | 67 | xlabel('Intensity Measure, IM') 68 | ylabel('Consequence, C') 69 | yticks([0:100:400]) 70 | axis([0 2 0 400]) 71 | 72 | %% beta distribution vulnerability 73 | 74 | % consequence values 75 | x=0:0.01:1; 76 | 77 | % parameter values 78 | r = [0.5 2 4 7 4 ]; 79 | q = [4 7 4 2 0.5]; 80 | 81 | 82 | 83 | 84 | for i = 1:4 85 | f_x(:,i) = betapdf(x,r(i),q(i)); 86 | legendtext{i} = ['r = ' num2str(r(i)) ', q = ' num2str(q(i))']; 87 | end 88 | 89 | figure 90 | for i = 1:4 91 | plot(x, f_x(:,i), linespec{i}, 'linewidth', 2, 'color', colorspec{i}) ; 92 | hold on 93 | end 94 | xlabel('Consequence, C'); 95 | ylabel('f_C(c)'); 96 | legend(legendtext, 'location', 'northeast') 97 | axis([0 1 0 6]) 98 | 99 | %% four-parameter beta 100 | 101 | dx = 0.001; 102 | x=0:dx:1; 103 | 104 | % parameter values 105 | r = 2; 106 | q = 7; 107 | p_0 = 0.2; 108 | p_1 = 0.1; 109 | 110 | % continuous part of the distribution 111 | f_z = (1-p_0-p_1) * betapdf(x,r,q); 112 | 113 | % discretize 114 | dxDisc = 0.05; 115 | xDisc = 0.025:dxDisc:0.975; 116 | for i=1:length(xDisc) % aggregate probabilities 117 | idx = ( (x>xDisc(i)-0.5*dxDisc) & (x<=xDisc(i)+0.5*dxDisc)); % relevant range of x values 118 | f_zCont(i) = sum(f_z(idx))*dx; 119 | end 120 | 121 | % add discrete probabilities to appropriate bins 122 | f_zCont(1) = f_zCont(1) + p_0; 123 | f_zCont(end) = f_zCont(end) + p_1; 124 | f_z(1) = p_0; 125 | f_z(2) = p_1; 126 | 127 | 128 | figure 129 | bar(xDisc, f_zCont, 1, 'FaceColor',colorspec{3}) 130 | hold on 131 | xlabel('Consequence, C'); 132 | ylabel('Probability'); 133 | % axis([-0.05 1.05 0 0.25]) 134 | axis([0 1 0 0.25]) 135 | 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /chapter9/risk_metrics_epistemic.m: -------------------------------------------------------------------------------- 1 | % Logic tree calculation for failure rate 2 | % Jack Baker 3 | % 8/3/2018 4 | % modified 1/7/2021 to allow optional color figures 5 | 6 | clear; close all; clc; 7 | 8 | colorspec{1} = [56 95 150]/255; 9 | colorspec{2} = [207 89 33]/255; 10 | colorspec{3} = [158 184 219]/255; 11 | colorspec{4} = [231 184 0]/255; 12 | colorspec{5} = [128 0 0]/255; 13 | 14 | linespec{1} = '-'; 15 | linespec{2} = '-'; 16 | linespec{3} = '-'; 17 | linespec{4} = '-'; 18 | linespec{5} = '-'; 19 | 20 | 21 | %% load hazard curve data from Chapter 6 (ch6/calculations/PSHA_calc_w_epistemic.m) 22 | load hazardEpistemicData 23 | wtHaz = wt; % rename weight vector to avoid ambiguity 24 | clear wt 25 | x = logspace(log10(0.05), log10(2), 100); % IM values to consider 26 | 27 | 28 | 29 | %% fragility function specification 30 | betaIM = 0.4; 31 | thetaIM = [0.4 0.6]; 32 | wtTheta = [0.5 0.5]; 33 | 34 | for j = 1:length(thetaIM) 35 | fragility(j,:) = normcdf(log(x), log(thetaIM(j)), betaIM); 36 | end 37 | 38 | 39 | %% risk calculations over all logic tree branches 40 | 41 | idx = 0; % running index for the outputs 42 | for i=1:length(wtHaz) % loop over hazard curve branches 43 | dLambda = abs([diff(lambda_x(i,:)) 0]); % derivative of hazard curve 44 | 45 | for j=1:length(thetaIM) % loop over fragility branches 46 | idx = idx + 1; % increment index 47 | 48 | failRate(idx) = sum(fragility(j,:) .* dLambda); 49 | wtMaster(idx,:) = wtHaz(i) * wtTheta(j); 50 | end 51 | end 52 | 53 | %% mean hazard and mean fragility 54 | for k = 1:length(x) 55 | lambdaImMean(:,k) = sum(lambda_x(:,k) .* wtHaz); % mean hazard 56 | fragilityMean(:,k) = sum(fragility(:,k) .* wtTheta'); % mean fragility 57 | end 58 | 59 | dLambdaMean = abs([diff(lambdaImMean) 0]); % derivative of mean hazard curve 60 | failRateMeanInputs = sum(fragilityMean .* dLambdaMean); % failure rate using mean hazard and mean fragility 61 | 62 | 63 | 64 | %% fractiles of failure rate 65 | % fractiles of hazard 66 | [failRateSort, dataIDX] = sort(failRate); % order branches from lowest rate to highest 67 | weightCum = cumsum(wtMaster(dataIDX)); % order weights appropriately, and cumulatively sum 68 | 69 | 70 | figure 71 | stairs(failRateSort, weightCum, 'linewidth', 2, 'color', colorspec{3}) 72 | hold on 73 | plot(failRateMeanInputs*[1 1], [0 1], '-', 'linewidth', 2, 'color', [0 0 0]) 74 | legend('Empirical CDF from logic tree', '\lambda(F) from mean inputs', 'location', 'southeast') 75 | set(gca, 'ylim', [0 1]) 76 | xlabel('Annual failure rate, \lambda(F)') 77 | ylabel('Cumulative probability') 78 | 79 | %% bar chart of failure rate 80 | 81 | xInt = 0.2e-3; % width of intervals to plot 82 | xPlot = xInt/2:xInt:13*xInt; % IM intervals to plot 83 | for i=1:length(xPlot) 84 | idx = find( (failRate>=xPlot(i)-xInt/2) & (failRatex)') 83 | legend(['\beta = ' num2str(betaIM)], ['\beta = ' num2str(betaIM2)], ['\beta = ' num2str(betaIM3)], 'Ground-motion hazard', 'location', 'southeast') 84 | xlim([0 1]) 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /chapter9/illustrate_fragility.m: -------------------------------------------------------------------------------- 1 | % Fragility function illustrations 2 | % Jack Baker 3 | % 12/8/2017 4 | % modified 1/7/2021 to allow optional color figures 5 | 6 | clear; close all; clc; 7 | 8 | colorspec{1} = [56 95 150]/255; 9 | colorspec{2} = [207 89 33]/255; 10 | colorspec{3} = [158 184 219]/255; 11 | colorspec{4} = [231 184 0]/255; 12 | colorspec{5} = [128 0 0]/255; 13 | 14 | linespec{1} = '-'; 15 | linespec{2} = '-'; 16 | linespec{3} = '-'; 17 | linespec{4} = '-'; 18 | linespec{5} = '-'; 19 | 20 | 21 | %% load previous hazard curve 22 | x = [logspace(log10(0.001), log10(2), 100) 2.1]; % IM values to consider 23 | 24 | % mean hazard from simplified logic tree (ch6/PSHA_calc_w_epistemic.m) 25 | lambdaIM = [0.0251694954594308,0.0243005973346253,0.0234365472558623,0.0225792462468511,0.0217305158448367,0.0208920837822666,0.0200655715142511,0.0192524837495085,0.0184542000865126,0.0176719688005284,0.0169069027730509,0.0161599775046000,0.0154320311063565,0.0147237661269249,0.0140357530384293,0.0133684351816874,0.0127221349535275,0.0120970610102702,0.0114933162595579,0.0109109064174252,0.0103497489179256,0.00980968197780037,0.00929047363756543,0.00879183062195665,0.00831340688589137,0.00785481173602961,0.00741561744179956,0.00699536627266170,0.00659357691982798,0.00620975028017252,0.00584337459734840,0.00549392996997187,0.00516089224907525,0.00484373635688936,0.00454193906650127,0.00425498128721521,0.00398234990373885,0.00372353921886845,0.00347805204941328,0.00324540052394014,0.00302510662878499,0.00281670254590154,0.00261973082270939,0.00243374441035183,0.00225830660283430,0.00209299090552312,0.00193738085754465,0.00179106982882404,0.00165366080890037,0.00152476620129790,0.00140400763414932,0.00129101579497441,0.00118543029502282,0.00108689956639214,0.000995080793225799,0.000909639876668399,0.000830251431896055,0.000756598814428752,0.000688374172055584,0.000625278518042679,0.000567021820830418,0.000513323105142555,0.000463910559307276,0.000418521643610899,0.000376903194651442,0.000338811520914285,0.000304012485139082,0.000272281569469359,0.000243403919858480,0.000217174366732617,0.000193397419468867,0.000171887232821039,0.000152467544004570,0.000134971579723542,0.000119241932976582,0.000105130410005028,9.24978482376873e-05,8.12139065351012e-05,7.11568294363794e-05,6.22131874592354e-05,5.42775957953396e-05,4.72524139769621e-05,4.10474292661717e-05,3.55795266353024e-05,3.07723482686310e-05,2.65559455227739e-05,2.28664262410104e-05,1.96456002286791e-05,1.68406255681954e-05,1.44036582884222e-05,1.22915077099856e-05,1.04652995717140e-05,8.89014880956513e-06,7.53484361431181e-06,6.37154214337735e-06,5.37548301094735e-06,4.52471043365022e-06,3.79981467097529e-06,3.18368817390031e-06,2.66129764494389e-06, 10e-10]; 26 | 27 | 28 | 29 | %% illustrate fragility 30 | thetaIM = 0.5; % assumed fragility median 31 | betaIM = 0.4; % assumed fragility beta 32 | 33 | pFail = normcdf(log(x), log(thetaIM), betaIM); 34 | 35 | 36 | figure 37 | plot(x, pFail, '-k', 'linewidth', 2, 'color', colorspec{1}) 38 | hold on 39 | plot([0.01 thetaIM thetaIM], [0.5 0.5 0], ':k') 40 | xlabel('Intensity Measure, IM') 41 | ylabel('P(F | IM = x)') 42 | axis([0 1.5 0 1]) 43 | 44 | 45 | %% failure rate example 46 | 47 | 48 | dLambda = abs([diff(lambdaIM) 0]); % derivative of hazard curve 49 | failContrib = pFail .* dLambda; 50 | failRate = sum(failContrib) 51 | 52 | % second fragility 53 | thetaIM2 = thetaIM*1.2; 54 | pFail2 = normcdf(log(x), log(thetaIM2), betaIM); 55 | failRate2 = sum(pFail2 .* dLambda) 56 | failRateRatio = failRate2/failRate 57 | 58 | % plot hazard and fragility 59 | figure 60 | % yyaxis(axes1,'left'); 61 | yyaxis left 62 | set(gca,'YColor',[0 0 0]); 63 | 64 | plot(x, pFail, '-', 'linewidth', 2, 'color', colorspec{1}) 65 | hold on 66 | plot(x, pFail2, '-', 'linewidth', 2, 'color', colorspec{2}) 67 | % ylabel('P(F | SA(1s)=x)') 68 | ylabel('Failure probability, P(F | IM = x)') 69 | yyaxis right 70 | set(gca,'YColor',[0 0 0]); 71 | plot(x, lambdaIM, '-', 'linewidth', 2, 'color', colorspec{3}) 72 | ylim([0 0.002]) 73 | % xlabel('SA(1s) [g]') 74 | xlabel('SA(1 s) [g]') 75 | % ylabel('\lambda_{SA(1s)>x}') 76 | ylabel('Annual rate of exceedance, \lambda(IM>x)') 77 | xlim([0 1.5]) 78 | legend('Fragility, \theta = 0.5 g', 'Fragility, \theta = 0.6 g', 'Ground-motion hazard', 'location', 'southeast') 79 | 80 | 81 | %% discretize down for tabular output 82 | xShort = [0.001 0.01 0.1:0.1:1.5]'; 83 | lambdaShort = interp1(x,lambdaIM,xShort); 84 | dLambdaShort = abs([diff(lambdaShort); lambdaShort(end)]); % derivative of hazard curve 85 | pFailShort = normcdf(log(xShort), log(thetaIM), betaIM); 86 | pFail2Short = normcdf(log(xShort), log(thetaIM2), betaIM); 87 | 88 | failRate = sum(pFailShort .* dLambdaShort) 89 | failRate2 = sum(pFail2Short .* dLambdaShort) 90 | failRateRatio = failRate2/failRate 91 | 92 | 93 | %% 94 | figure 95 | bar(xShort, pFailShort .* dLambdaShort, 11,'FaceColor', colorspec{3}) 96 | 97 | xlabel('SA(1 s) [g]') 98 | ylabel('P(F | IM = x_i) \Delta\lambda_i') 99 | xlim([0 1.6]) 100 | ylim([0 8e-6]) 101 | 102 | -------------------------------------------------------------------------------- /chapter9/risk_metrics.m: -------------------------------------------------------------------------------- 1 | % Calculations for risk metrics 2 | % Jack Baker 3 | % 4/6/2018 4 | % modified 1/7/2021 to allow optional color figures 5 | 6 | clear; close all; clc; 7 | 8 | colorspec{1} = [56 95 150]/255; 9 | colorspec{2} = [207 89 33]/255; 10 | colorspec{3} = [158 184 219]/255; 11 | colorspec{4} = [231 184 0]/255; 12 | colorspec{5} = [128 0 0]/255; 13 | 14 | linespec{1} = '-'; 15 | linespec{2} = '-'; 16 | linespec{3} = '-'; 17 | linespec{4} = '-'; 18 | linespec{5} = '-'; 19 | 20 | %% load previous hazard curve 21 | x = logspace(log10(0.001), log10(2), 100); % IM values to consider 22 | lambdaIM = [0.0499999999999835,0.0499999999999526,0.0499999999998669,0.0499999999996345,0.0499999999990176,0.0499999999974144,0.0499999999933375,0.0499999999831917,0.0499999999584814,0.0499999998995826,0.0499999997621849,0.0499999994484950,0.0499999987475601,0.0499999972146465,0.0499999939334784,0.0499999870593386,0.0499999729633268,0.0499999446707956,0.0499998890860822,0.0499997821909026,0.0499995809611956,0.0499992101351074,0.0499985411650159,0.0499973597118274,0.0499953169575537,0.0499918589711437,0.0499861276172992,0.0499768264207973,0.0499620458657973,0.0499390453471067,0.0499039938718262,0.0498516789024106,0.0497752023107870,0.0496656935777901,0.0495120817263140,0.0493009769325844,0.0490167177301840,0.0486416375228157,0.0481565925664885,0.0475417716599423,0.0467777762788488,0.0458469217463872,0.0447346702805233,0.0434310719208310,0.0419320663410720,0.0402404933156301,0.0383666756121702,0.0363284753012917,0.0341507789548300,0.0318644315090385,0.0295047030249437,0.0271094271027446,0.0247169857853305,0.0223643281392259,0.0200851972516911,0.0179087063031132,0.0158583552476435,0.0139515239705416,0.0121994242741281,0.0106074489453449,0.00917582621898981,0.00790047393385860,0.00677394856623091,0.00578639701584792,0.00492643928524683,0.00418193363858863,0.00354059865814674,0.00299048611434400,0.00252031323130196,0.00211967238204122,0.00177914094532868,0.00149031496570583,0.00124578853774161,0.00103909759475864,0.000864642934085106,0.000717603509591866,0.000593847667147087,0.000489847276838750,0.000402597661454561,0.000329544772875408,0.000268520118584374,0.000217683367966791,0.000175472258661902,0.000140559281498994,0.000111814575336414,8.82744592427963e-05,6.91150373044643e-05,5.36303153811853e-05,4.12142659388887e-05,3.13462711919490e-05,2.35793746149071e-05,1.75307851282250e-05,1.28741130601395e-05,9.33287420074566e-06,6.67487504051842e-06,4.70718178205709e-06,3.27146869982854e-06,2.23962826983520e-06,1.50959805555399e-06,1.00141231896129e-06]; 23 | dLambda = abs([diff([lambdaIM 0])]); % derivative of hazard curve 24 | 25 | %% base vulnerability model 26 | a = 100; 27 | b = 1.3; 28 | betaIM = 0.4; 29 | 30 | %% loss exceedance curve 31 | dLoss = 0.1; 32 | loss = [0.00 dLoss:dLoss:100]; 33 | 34 | for i = 1:length(loss) 35 | pExceed = 1-logncdf(loss(i),log(a.*x.^b),betaIM); % p(L>loss(i)|IM) 36 | lambdaExceed(i) = sum(pExceed .* dLambda); % lambda(L>loss(i) 37 | end 38 | 39 | %% expected annual loss 40 | 41 | meanLoss = (a.*x.^b) * exp(0.5*betaIM^2); % mean loss given IM (using formula for mean of a lognormal distribution) 42 | AAL = sum(meanLoss .* dLambda) % sum mean loss given IM * dlamda 43 | 44 | AAL2 = sum(lambdaExceed*dLoss) % use area under exceedance curve 45 | 46 | figure 47 | loglog(loss, lambdaExceed, '-', 'linewidth', 2, 'color', colorspec{1}) 48 | ylabel('Annual rate of exceedance, \lambda(C>c)') 49 | xlabel('Consequence, C') 50 | set(gca, 'xtick', [0.1 1 10 100]) 51 | set(gca, 'xticklabel', {0.1, 1, 10, 100}) 52 | 53 | %% coarsely discretized AAL 54 | 55 | % input hazTable from ch6/psha_example_calcs_GR.m (also table 56 | % tab:psha_gr_ex_results) 57 | xCoarse = [0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1]'; 58 | dxCoarse = 0.1; % step size of IM intervals 59 | lambdaIMCoarse = [0.009200766 0.0021162 0.000819076 0.000399591 0.000220265 0.00013068 8.15E-05 5.27E-05 3.50E-05 2.38E-05]'; 60 | 61 | dLambdaCoarse = abs([diff([lambdaIMCoarse; 0])]); % derivative of hazard curve 62 | meanC = (a.*(xCoarse+dxCoarse/2).^b) * exp(0.5*betaIM^2); 63 | product = dLambdaCoarse .* meanC; 64 | AAL3 = sum(product) 65 | 66 | aalTable = [xCoarse lambdaIMCoarse dLambdaCoarse meanC product]; 67 | 68 | 69 | % evaluate discretization 70 | figure 71 | subplot(1,2,1) 72 | bar(xCoarse, dLambdaCoarse .* meanC/dxCoarse, 1, 'FaceColor', colorspec{3}, 'EdgeColor','none') 73 | axis([0 1.5 0 1.2]) 74 | xlabel('Intensity Measure, IM') 75 | ylabel('E[C | IM=x_i] \Delta \lambda_i / \Delta x') 76 | 77 | % re-discretize the fine-discretization case to linear spacing 78 | dx = 0.01; 79 | xLin = 0.01:0.01:2; % IM values to consider 80 | lambdaIMLin = interp1(x,lambdaIM, xLin); 81 | dLambdaLin = abs([diff([lambdaIMLin 0])]); % derivative of hazard curve 82 | meanLossLin = (a.*xLin.^b) * exp(0.5*betaIM^2); % mean loss given IM (using formula for mean of a lognormal distribution) 83 | AAL4 = sum(meanLossLin .* dLambdaLin) % sum mean loss given IM * dlamda 84 | text(-0.1,-0.07,'(a)','Units', 'Normalized', 'VerticalAlignment', 'Top') 85 | 86 | 87 | subplot(1,2,2) 88 | bar(xLin, meanLossLin .* dLambdaLin / dx, 1, 'FaceColor',colorspec{3}, 'EdgeColor','none') 89 | % bar(xLin, meanLossLin .* dLambdaLin / dx, 1, 'FaceColor',colorspec{3}, 'EdgeColor','k', 'edgewidth', 0.25) 90 | axis([0 1.5 0 1.2]) 91 | xlabel('Intensity Measure, IM') 92 | ylabel('E[C | IM=x_i] \Delta \lambda_i / \Delta x') 93 | text(-0.1,-0.07,'(b)','Units', 'Normalized', 'VerticalAlignment', 'Top') 94 | 95 | 96 | %% Exceedance Probability simple example 97 | 98 | IM = 0.1:0.1:0.3; 99 | rates = [0.06 0.016 0.003]; 100 | 101 | P_Ex = [0.1 0.4 0.6]; % newly assumed values 102 | duration = 1; 103 | 104 | dLambdaSimple = abs([diff([rates 0])]); 105 | 106 | % equation~\ref{eq:risk_exceedance_probability_curve_discrete} 107 | lambdaExceed = sum(P_Ex .* dLambdaSimple) 108 | PExceed = 1 - exp(-lambdaExceed .* duration); 109 | 110 | % approximate approach 111 | P_IM = 1 - exp(-dLambdaSimple .* duration) 112 | 113 | compareRates = [dLambdaSimple.*duration; P_IM] 114 | 115 | PExceedApprox = sum(P_Ex .* P_IM); 116 | 117 | comparePExceed = [PExceed; PExceedApprox] 118 | 119 | %% revised Exceedance Probability numbers 120 | rates = 20*rates; 121 | 122 | dLambdaSimple = abs([diff([rates 0])]); 123 | 124 | % equation~\ref{eq:risk_exceedance_probability_curve_discrete} 125 | lambdaExceed = sum(P_Ex .* dLambdaSimple) 126 | PExceed = 1 - exp(-lambdaExceed .* duration); 127 | 128 | % approximate approach 129 | P_IM = 1 - exp(-dLambdaSimple .* duration) 130 | 131 | compareRates = [dLambdaSimple.*duration; P_IM] 132 | 133 | PExceedApprox = sum(P_Ex .* P_IM); 134 | 135 | comparePExceed = [PExceed; PExceedApprox] 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /chapter6/psha_example_calcs_GR.m: -------------------------------------------------------------------------------- 1 | % PSHA calculation using a Gutenberg-Richter magnitude distribution. 2 | % The calculation follows the example of Section 6.3.3 3 | % 4 | % Created by Jack Baker 5 | 6 | 7 | clear; close all; clc 8 | addpath('../utils/') 9 | 10 | 11 | 12 | % basic setup 13 | x = logspace(log10(0.001), log10(2), 100); % IM values to consider 14 | T = 1; % period of interest 15 | IM_label = 'SA(1 s)'; 16 | Axis_label = 'Spectral Acceleration, SA(1 s) [g]'; 17 | gmpeFlag = 1; % =1 for BJF97, =2 for CY14 18 | 19 | % specify colors and line styles for plots 20 | colorspec{1} = [56 95 150]/255; 21 | colorspec{2} = [207 89 33]/255; 22 | colorspec{3} = [158 184 219]/255; 23 | 24 | 25 | % seismicity parameters 26 | R = 10; 27 | Rrup = R; Rjb = R; 28 | rup.Fault_Type = 1; % 1 is strike slip 29 | rup.Vs30 = 500; 30 | rup.R = 10; 31 | % CY parameters 32 | rup.Ztor = 0; 33 | rup.delta = 90; 34 | rup.rupLambda = 0; 35 | rup.Z10 = 999; 36 | rup.Fhw = 0; 37 | rup.FVS30 = 0; 38 | rup.region = 1; 39 | 40 | 41 | % plotting parameters 42 | figureAxisLimits = [0.05 max(x) 1\0.99e-5 1e-1]; 43 | figureXTickVals = [0.05 0.1 0.5 1 2]; 44 | 45 | 46 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 47 | %% point source with G-R magnitudes 48 | 49 | M = [5 6 7 8]; 50 | 51 | % From Table 3.5, fixed rate of M>5, M_max = 8 52 | lambda_M = [0.05 0.03153 0.01988 0.01252 0.007882 0.004955 0.003108 0.001942 0.001207 0.0007432 0.0004505 0.0002657 0.0001492 7.57E-05 2.93E-05]; 53 | M_vals = [5.1 5.3 5.5 5.7 5.9 6.1 6.3 6.5 6.7 6.9 7.1 7.3 7.5 7.7 7.9]; 54 | 55 | x_example = 0.2; % example values for table 56 | [lambda, example_output, disagg] = fn_PSHA_given_M_lambda(lambda_M, M_vals, T, x, x_example, rup, gmpeFlag); 57 | 58 | x_example2 = 0.5; % output results for a second threshold 59 | [lambda2, example_output2, disagg2] = fn_PSHA_given_M_lambda(lambda_M, M_vals, T, x, x_example2, rup, gmpeFlag); 60 | 61 | 62 | %% hazard curve 63 | figure 64 | loglog(x, lambda.x,'-', 'linewidth', 2, 'color', colorspec{1}) 65 | hold on 66 | plot(x_example, lambda.example, 'o', 'color', colorspec{1}) 67 | plot(x_example2, lambda2.example, 'o', 'color', colorspec{1}) 68 | 69 | % annotate text results for example cases 70 | text1 = ['\lambda(' IM_label ' > ' num2str(x_example) ' g) = ' num2str(lambda.example,3)]; 71 | text2 = {['\lambda(' IM_label ' > ' num2str(x_example2) ' g) ']; [' = ' num2str(lambda2.example,3)]}; 72 | 73 | text(x_example*1.1, lambda.example*1.2,text1) 74 | text(x_example2*1.1, lambda2.example*1.2,text2) 75 | 76 | xlabel(Axis_label) 77 | ylabel('Annual rate of exceedance, \lambda') 78 | axis(figureAxisLimits) 79 | set(gca, 'xtick', figureXTickVals) 80 | 81 | %% output a subset of the hazard curve for use in a table 82 | imSmall = [1e-3 0.1:0.1:1]; 83 | ratesSmall = exp(interp1(log(x),log(lambda.x),log(imSmall))); %loglog interpolate 84 | hazTable = [imSmall' ratesSmall']; 85 | 86 | 87 | 88 | %% disaggregation 89 | figure 90 | subplot(1,2,1) 91 | bar(M_vals, disagg.example, 1,'FaceColor', colorspec{3}) 92 | hold on 93 | plot( disagg.Mbar*[1 1], [0 1], ':k', 'linewidth', 2) % plot mean magnitude 94 | hx = xlabel('Magnitude, M'); 95 | hy = ylabel(['P(m | ' IM_label ' > ' num2str(x_example) ' g)']); 96 | axis([5 8 0 0.2]) 97 | 98 | subplot(1,2,2) 99 | bar(M_vals, disagg2.example, 1,'FaceColor', colorspec{3}) 100 | hold on 101 | plot( disagg2.Mbar*[1 1], [0 1], ':k', 'linewidth', 2) % plot mean magnitude 102 | hx = xlabel('Magnitude, M'); 103 | hy = ylabel(['P(m | ' IM_label ' > ' num2str(x_example2) ' g)']); 104 | axis([5 8 0 0.2]) 105 | 106 | mBar = [disagg.Mbar disagg2.Mbar] 107 | 108 | % tabulate output 109 | disagg_table = [M_vals' disagg.example' disagg2.example']; 110 | 111 | 112 | 113 | %% Metrics to evaluate calculations and figure 114 | 115 | % im with given rate 116 | rateTarg = 1/1000; 117 | imTarg = interp1(ratesSmall, imSmall, rateTarg) % linear interpolation 118 | imTarg = exp(interp1(log(ratesSmall), log(imSmall), log(rateTarg))) % log interpolation 119 | 120 | lnImManual = ( (log (0.2) - log (0.3)) * (log (1E-3) - log (6.81E-4)) ) / (log (2.7E-3) - log (6.81E-4)) + log(0.3) % manual log interpolation 121 | imManual = exp(lnImManual) 122 | 123 | % hazard curves slope 124 | imSlope = [0.2 0.3]; 125 | rateSlope = exp(interp1(log(x),log(lambda.x),log(imSlope))); %loglog interpolate 126 | kEst = - (log(rateSlope(1)) - log(rateSlope(2)))/ (log(imSlope(1))- log(imSlope(2))); 127 | k0Est = rateSlope(1) / exp(-kEst * log(imSlope(1))); 128 | lambdaPowerLaw = k0Est * exp(-kEst*log(x)); 129 | 130 | %% hazard curve derivative 131 | 132 | dLambda = -diff([ratesSmall 0]); 133 | 134 | figure 135 | subplot(1,2,1); 136 | bar(imSmall+0.05, dLambda, 1,'FaceColor', colorspec{3}) 137 | axis([0 1 0 0.05]) 138 | text(-0.1,-0.07,'(a)','Units', 'Normalized', 'VerticalAlignment', 'Top') 139 | xlabel(Axis_label) 140 | ylabel('\Delta \lambda_i ') 141 | text(-0.1,-0.07,'(a)','Units', 'Normalized', 'VerticalAlignment', 'Top') 142 | 143 | % finer discretization 144 | xFine = 0.01:0.01:1; 145 | lambdaFine = exp(interp1(log(x),log(lambda.x),log(xFine))); %loglog interpolate 146 | dLambdaFine = -diff([lambdaFine 0]); 147 | 148 | subplot(1,2,2); 149 | bar(xFine+0.005, dLambdaFine, 1,'FaceColor', colorspec{3}) 150 | axis([0 1 0 0.008]) 151 | text(-0.1,-0.07,'(b)','Units', 'Normalized', 'VerticalAlignment', 'Top') 152 | 153 | xlabel(Axis_label) 154 | ylabel('\Delta \lambda_i ') 155 | text(-0.1,-0.07,'(b)','Units', 'Normalized', 'VerticalAlignment', 'Top') 156 | 157 | 158 | 159 | %% summary plot 160 | figure 161 | h1 = loglog(x, lambda.x,'-', 'linewidth', 2, 'color', colorspec{1}); 162 | hold on 163 | plot(imTarg, rateTarg, 'ok') 164 | h2 = plot(x, lambdaPowerLaw,'-', 'linewidth', 2, 'color', colorspec{3}); 165 | plot([0.01 imTarg imTarg], [rateTarg rateTarg 1e-10], ':k', 'linewidth', 1) 166 | % annotate text results for example cases 167 | text1 = ['\lambda(' IM_label ' > ' num2str(x_example) ' g) = ' num2str(lambda.example,3)]; 168 | text2 = ['\lambda(' IM_label ' > ' num2str(imTarg,3) ' g) = ' num2str(rateTarg,3)]; 169 | % text3 = ['\lambda(M \geq M_{min}) = ' num2str(lambda_M(1))]; 170 | text3 = ['\Sigma_i \lambda(rup_i) = ' num2str(lambda_M(1))]; 171 | % text(x_example*1.05, lambda.example*1.2,text1) 172 | text(imTarg*1.05, rateTarg*1.5,text2) 173 | text(0.01*1.05, lambda_M(1)*1.25,text3) 174 | legend([h1 h2], 'Original hazard curve', 'Fitted power-law hazard curve', 'location', 'southwest') 175 | xlabel(Axis_label) 176 | ylabel('Annual rate of exceedance, \lambda') 177 | axis([0.01 figureAxisLimits(2:4)]) % include lower IM values 178 | set(gca, 'xtick', figureXTickVals) 179 | 180 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /utils/gmm_bjf97.m: -------------------------------------------------------------------------------- 1 | function [sa, sigma] = gmm_bjf97(M, R, T, Fault_Type, Vs) 2 | 3 | % by Jack Baker, 2/1/05 4 | % Stanford University 5 | % bakerjw@stanford.edu 6 | % 7 | % Modified 6/21/2016 to add optional sigmaFactor input 8 | % 9 | % Make median and standard deviation predictions using the following model: 10 | % 11 | % Boore, D. M., Joyner, W. B., and Fumal, T. E. (1997). ?Equations for 12 | % Estimating Horizontal Response Spectra and Peak Acceleration from 13 | % Western North American Earthquakes: A Summary of Recent Work.? 14 | % Seismological Research Letters, 68(1), 128?153. 15 | % 16 | % This script includes standard deviations for either 17 | % arbitrary or average components of ground motion 18 | % 19 | % This script has also been modified to correct an error in the original 20 | % publication. See Boore, DM (2005). "Erratum: Equations for Estimating 21 | % Horizontal Response Spectra and Peak Acceleration from Western North 22 | % American Earthquakes: A Summary of Recent Work." Seismological Research 23 | % Letters, 76(3), 368-369. 24 | 25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 26 | % INPUT 27 | % 28 | % M = Moment Magnitude 29 | % R = joyner boore distance 30 | % T = period (0.001 to 2s) 31 | % Fault_Type = 1 for strike-slip fault 32 | % = 2 for reverse-slip fault 33 | % = 0 for non-specified mechanism 34 | % Vs = shear wave velocity averaged over top 30 m (use 310 for soil, 620 for rock) 35 | % sigmaFactor = arbitrary factor by which to scale sigma (for sensitivity 36 | % studies) 37 | % 38 | % OUTPUT 39 | % 40 | % sa = median spectral acceleration prediction 41 | % sigma = logarithmic standard deviation of spectral acceleration 42 | % prediction FOR AN ARBITRARY OR AVERAGE COMPONENT 43 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 44 | 45 | 46 | % coefficients 47 | % note: period = 0.001 is the PGA case (coded here as non-zero to faciliate 48 | % log-interpolation) 49 | period = [ 0.001 0.1 0.11 0.12 0.13 0.14 0.15 0.16 0.17 0.18 0.19 0.2 0.22 0.24 0.26 0.28 0.3 0.32 0.34 0.36 0.38 0.4 0.42 0.44 0.46 0.48 0.5 0.55 0.6 0.65 0.7 0.75 0.8 0.85 0.9 0.95 1 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2 ]; 50 | B1ss = [ -0.313 1.006 1.072 1.109 1.128 1.135 1.128 1.112 1.09 1.063 1.032 0.999 0.925 0.847 0.764 0.681 0.598 0.518 0.439 0.361 0.286 0.212 0.14 0.073 0.005 -0.058 -0.122 -0.268 -0.401 -0.523 -0.634 -0.737 -0.829 -0.915 -0.993 -1.066 -1.133 -1.249 -1.345 -1.428 -1.495 -1.552 -1.598 -1.634 -1.663 -1.685 -1.699 ]; 51 | B1rv = [ -0.117 1.087 1.164 1.215 1.246 1.261 1.264 1.257 1.242 1.222 1.198 1.17 1.104 1.033 0.958 0.881 0.803 0.725 0.648 0.57 0.495 0.423 0.352 0.282 0.217 0.151 0.087 -0.063 -0.203 -0.331 -0.452 -0.562 -0.666 -0.761 -0.848 -0.932 -1.009 -1.145 -1.265 -1.37 -1.46 -1.538 -1.608 -1.668 -1.718 -1.763 -1.801 ]; 52 | B1all = [ -0.242 1.059 1.13 1.174 1.2 1.208 1.204 1.192 1.173 1.151 1.122 1.089 1.019 0.941 0.861 0.78 0.7 0.619 0.54 0.462 0.385 0.311 0.239 0.169 0.102 0.036 -0.025 -0.176 -0.314 -0.44 -0.555 -0.661 -0.76 -0.851 -0.933 -1.01 -1.08 -1.208 -1.315 -1.407 -1.483 -1.55 -1.605 -1.652 -1.689 -1.72 -1.743 ]; 53 | B2 = [ 0.527 0.753 0.732 0.721 0.711 0.707 0.702 0.702 0.702 0.705 0.709 0.711 0.721 0.732 0.744 0.758 0.769 0.783 0.794 0.806 0.82 0.831 0.84 0.852 0.863 0.873 0.884 0.907 0.928 0.946 0.962 0.979 0.992 1.006 1.018 1.027 1.036 1.052 1.064 1.073 1.08 1.085 1.087 1.089 1.087 1.087 1.085 ]; 54 | B3 = [ 0 -0.226 -0.23 -0.233 -0.233 -0.23 -0.228 -0.226 -0.221 -0.216 -0.212 -0.207 -0.198 -0.189 -0.18 -0.168 -0.161 -0.152 -0.143 -0.136 -0.127 -0.12 -0.113 -0.108 -0.101 -0.097 -0.09 -0.078 -0.069 -0.06 -0.053 -0.046 -0.041 -0.037 -0.035 -0.032 -0.032 -0.03 -0.032 -0.035 -0.039 -0.044 -0.051 -0.058 -0.067 -0.074 -0.085 ]; 55 | B5 = [ -0.778 -0.934 -0.937 -0.939 -0.939 -0.938 -0.937 -0.935 -0.933 -0.93 -0.927 -0.924 -0.918 -0.912 -0.906 -0.899 -0.893 -0.888 -0.882 -0.877 -0.872 -0.867 -0.862 -0.858 -0.854 -0.85 -0.846 -0.837 -0.83 -0.823 -0.818 -0.813 -0.809 -0.805 -0.802 -0.8 -0.798 -0.795 -0.794 -0.793 -0.794 -0.796 -0.798 -0.801 -0.804 -0.808 -0.812 ]; 56 | Bv = [ -0.371 -0.212 -0.211 -0.215 -0.221 -0.228 -0.238 -0.248 -0.258 -0.27 -0.281 -0.292 -0.315 -0.338 -0.36 -0.381 -0.401 -0.42 -0.438 -0.456 -0.472 -0.487 -0.502 -0.516 -0.529 -0.541 -0.553 -0.579 -0.602 -0.622 -0.639 -0.653 -0.666 -0.676 -0.685 -0.692 -0.698 -0.706 -0.71 -0.711 -0.709 -0.704 -0.697 -0.689 -0.679 -0.667 -0.655 ]; 57 | Va = [ 1396 1112 1291 1452 1596 1718 1820 1910 1977 2037 2080 2118 2158 2178 2173 2158 2133 2104 2070 2032 1995 1954 1919 1884 1849 1816 1782 1710 1644 1592 1545 1507 1476 1452 1432 1416 1406 1396 1400 1416 1442 1479 1524 1581 1644 1714 1795 ]; 58 | h = [ 5.57 6.27 6.65 6.91 7.08 7.18 7.23 7.24 7.21 7.16 7.1 7.02 6.83 6.62 6.39 6.17 5.94 5.72 5.5 5.3 5.1 4.91 4.74 4.57 4.41 4.26 4.13 3.82 3.57 3.36 3.2 3.07 2.98 2.92 2.89 2.88 2.9 2.99 3.14 3.36 3.62 3.92 4.26 4.62 5.01 5.42 5.85 ]; 59 | sigma1 = [ 0.431 0.44 0.437 0.437 0.435 0.435 0.435 0.435 0.435 0.435 0.435 0.435 0.437 0.437 0.437 0.44 0.44 0.442 0.444 0.444 0.447 0.447 0.449 0.449 0.451 0.451 0.454 0.456 0.458 0.461 0.463 0.465 0.467 0.467 0.47 0.472 0.474 0.477 0.479 0.481 0.484 0.486 0.488 0.49 0.493 0.493 0.495 ]; 60 | sigmac = [0.160 0.134 0.141 0.148 0.153 0.158 0.163 0.166 0.169 0.173 0.176 0.177 0.182 0.185 0.189 0.192 0.195 0.197 0.199 0.200 0.202 0.204 0.205 0.206 0.209 0.210 0.211 0.214 0.216 0.218 0.220 0.221 0.223 0.226 0.228 0.230 0.230 0.233 0.236 0.239 0.241 0.244 0.246 0.249 0.251 0.254 0.256]; 61 | sigmar = [ 0.460 0.460 0.459 0.461 0.461 0.463 0.465 0.466 0.467 0.468 0.469 0.470 0.473 0.475 0.476 0.480 0.481 0.484 0.487 0.487 0.491 0.491 0.494 0.494 0.497 0.497 0.501 0.504 0.506 0.510 0.513 0.515 0.518 0.519 0.522 0.525 0.527 0.531 0.534 0.537 0.541 0.544 0.546 0.550 0.553 0.555 0.557]; 62 | sigmae = [ 0.184 0 0 0 0 0 0 0 0 0.002 0.005 0.009 0.016 0.025 0.032 0.039 0.048 0.055 0.064 0.071 0.078 0.085 0.092 0.099 0.104 0.111 0.115 0.129 0.143 0.154 0.166 0.175 0.184 0.191 0.2 0.207 0.214 0.226 0.235 0.244 0.251 0.256 0.262 0.267 0.269 0.274 0.276 ]; 63 | sigmalny = [ 0.495 0.460 0.459 0.461 0.461 0.463 0.465 0.466 0.467 0.468 0.469 0.470 0.474 0.475 0.477 0.482 0.484 0.487 0.491 0.492 0.497 0.499 0.502 0.504 0.508 0.510 0.514 0.520 0.526 0.533 0.539 0.544 0.549 0.553 0.559 0.564 0.569 0.577 0.583 0.590 0.596 0.601 0.606 0.611 0.615 0.619 0.622]; 64 | 65 | % interpolate between periods if neccesary 66 | if (isempty(find(period == T, 1))) 67 | index_low = sum(period High Code 22 | % 2 --> Moderate Code 23 | % 3 --> Low Code 24 | % 4 --> Pre-Code 25 | % 26 | % analysisCase.buildingType % 2- or 3-letter code for construction type. Allowable options: 27 | % W1 Wood, Light Frame (< 5,000 sq. ft.) 28 | % W2 Wood, Commercial and Industrial (> 5,000 sq. ft.) 29 | % S1L Steel Moment Frame 30 | % S1M Steel Moment Frame 31 | % S1H Steel Moment Frame 32 | % S2L Steel Braced Frame 33 | % S2M Steel Braced Frame 34 | % S2H Steel Braced Frame 35 | % S3 Steel Light Frame 36 | % S4L Steel Frame with Cast?in?Place Concrete Shear Walls 37 | % S4M Steel Frame with Cast?in?Place Concrete Shear Walls 38 | % S4H Steel Frame with Cast?in?Place Concrete Shear Walls 39 | % S5L Steel Frame with Unreinforced Masonry Infill Walls 40 | % S5M Steel Frame with Unreinforced Masonry Infill Walls 41 | % S5H Steel Frame with Unreinforced Masonry Infill Walls 42 | % C1L Concrete Moment Frame 43 | % C1M Concrete Moment Frame 44 | % C1H Concrete Moment Frame 45 | % C2L Concrete Shear Walls 46 | % C2M Concrete Shear Walls 47 | % C2H Concrete Shear Walls 48 | % C3L Concrete Frame with Unreinforced Masonry Infill Walls 49 | % C3M Concrete Frame with Unreinforced Masonry Infill Walls 50 | % C3H Concrete Frame with Unreinforced Masonry Infill Walls 51 | % PC1 Precast Concrete Tilt?Up Walls 52 | % PC2L Precast Concrete Frames with Concrete Shear Walls 53 | % PC2M Precast Concrete Frames with Concrete Shear Walls 54 | % PC2H Precast Concrete Frames with Concrete Shear Walls 55 | % RM1L Reinforced Masonry Bearing Walls with Wood or Metal Deck Diaphragms 56 | % RM1M Reinforced Masonry Bearing Walls with Wood or Metal Deck Diaphragms 57 | % RM2L Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 58 | % RM2M Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 59 | % RM2H Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 60 | % URML Unreinforced Masonry Bearing Walls 61 | % URMM Unreinforced Masonry Bearing Walls 62 | % MH Mobile Homes 63 | % 64 | % analysisCase.occType % 4- or 5-letter code for construction type. Allowable options: 65 | % RES1 Single Family Dwelling 66 | % RES2 Mobile Home 67 | % RES3 Multi Family Dwelling 68 | % RES4 Temporary Lodging 69 | % RES5 Institutional Dormitory 70 | % RES6 Nursing Home 71 | % COM1 Retail Trade 72 | % COM2 Wholesale Trade 73 | % COM3 Personal and Repair Services 74 | % COM4 Professional/Technical/ Business Services 75 | % COM5 Banks/Financial Institutions 76 | % COM6 Hospital 77 | % COM7 Medical Office/Clinic 78 | % COM8 Entertainment & Recreation 79 | % COM9 Theaters 80 | % COM10 Parking 81 | % IND1 Heavy 82 | % IND2 Light 83 | % IND3 Food/Drugs/Chemicals 84 | % IND4 Metals/Minerals Processing 85 | % IND5 High Technology 86 | % IND6 Construction 87 | % AGR1 Agriculture 88 | % REL1 Church/Membership/Organization 89 | % GOV1 General Services 90 | % GOV2 Emergency Response 91 | % EDU1 Schools/Libraries 92 | % EDU2 Colleges/Universities 93 | % 94 | % pgaVals PGA values for which to compute loss ratios 95 | % 96 | % OUTPUT VARIABLES: 97 | % 98 | % lossRatio loss ratio (total loss) for each PGA 99 | % 100 | % caseLabel text label describing the analysis case 101 | % 102 | % structLossRatio loss ratio (structural) for each PGA 103 | % 104 | % nonStructAccLossRatio loss ratio (nonstructural acceleration 105 | % sensitive) for each PGA 106 | % 107 | % nonStructDriftLossRatio loss ratio (nonstructural drift 108 | % sensitive) for each PGA 109 | % 110 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 111 | 112 | 113 | load hazusData % load data created by import_HAZUS_data.m 114 | 115 | % find index for building type 116 | idxBldg = find(strcmp(analysisCase.buildingType, hazusData.buildingTypeCode)); 117 | assert(length(idxBldg)==1, ['Error with specified buiding type, ' analysisCase.buildingType]) % make sure an appropriate building type was specified 118 | 119 | % find index for occupancy type 120 | idxOcc = find(strcmp(analysisCase.occType, hazusData.occCode)); 121 | assert(length(idxOcc)==1, ['Error with specified occupancy type, ' analysisCase.occType]) % make sure an appropriate occupancy type was specified 122 | 123 | % make sure pgaVals is a column 124 | pgaVals = pgaVals(:); 125 | 126 | % get fragility parameters for the given structure type and code level 127 | medianDS = hazusData.medians{analysisCase.codeLevel}(idxBldg,:); 128 | betaDS = hazusData.betas{analysisCase.codeLevel}(idxBldg,:); 129 | assert(~isnan(medianDS(1)), ['Error, this building type and code level is not allowed']) % make sure an appropriate occupancy type was specified 130 | 131 | % get loss ratios for the given occupancy type 132 | lossStruct = hazusData.lossStruct(idxOcc,:); 133 | lossAccNS = hazusData.lossAccNS(idxOcc,:); 134 | lossDriftNS = hazusData.lossDriftNS(idxOcc,:); 135 | 136 | % damage state exceedance probabilities per pgaVals 137 | for i = 1:length(pgaVals) 138 | pDsExceed(i,:) = normcdf( log(pgaVals(i)./medianDS)./betaDS); 139 | end 140 | pDsExceed = [pDsExceed zeros(size(pgaVals))]; % pad with zeros for probability of exceeding DS 5, to help with differentiation in the next step 141 | 142 | for j = 1:4 143 | pDsEqual(:,j) = pDsExceed(:,j) - pDsExceed(:,j+1); 144 | end 145 | 146 | % compute loss ratios per PGA 147 | structLossRatio = sum(pDsEqual .* (ones(size(pgaVals)) * lossStruct ),2)/100; 148 | nonStructAccLossRatio = sum(pDsEqual .* (ones(size(pgaVals)) * lossAccNS ),2)/100; 149 | nonStructDriftLossRatio = sum(pDsEqual .* (ones(size(pgaVals)) * lossDriftNS),2)/100; 150 | 151 | lossRatio = structLossRatio + nonStructAccLossRatio + nonStructDriftLossRatio; 152 | 153 | % make a label for the analysis case 154 | caseLabel = [analysisCase.buildingType ', ' analysisCase.occType ', ' hazusData.codeLevel{analysisCase.codeLevel} '-code']; 155 | 156 | end 157 | 158 | -------------------------------------------------------------------------------- /chapter9/demonstrate_HAZUS_fragility.m: -------------------------------------------------------------------------------- 1 | % Use HAZUS fragility parameters to plot example fragility functions 2 | % Jack Baker 3 | % modified 1/7/2021 to allow optional color figures 4 | 5 | % 6 | % INPUT VARIABLES: 7 | % 8 | % analysisCase.codeLevel % flag for code level: 9 | % 1 --> High Code 10 | % 2 --> Moderate Code 11 | % 3 --> Low Code 12 | % 4 --> Pre-Code 13 | % 14 | % analysisCase.buildingType % 2- or 3-letter code for construction type. Allowable options: 15 | % W1 Wood, Light Frame (< 5,000 sq. ft.) 16 | % W2 Wood, Commercial and Industrial (> 5,000 sq. ft.) 17 | % S1L Steel Moment Frame 18 | % S1M Steel Moment Frame 19 | % S1H Steel Moment Frame 20 | % S2L Steel Braced Frame 21 | % S2M Steel Braced Frame 22 | % S2H Steel Braced Frame 23 | % S3 Steel Light Frame 24 | % S4L Steel Frame with Cast?in?Place Concrete Shear Walls 25 | % S4M Steel Frame with Cast?in?Place Concrete Shear Walls 26 | % S4H Steel Frame with Cast?in?Place Concrete Shear Walls 27 | % S5L Steel Frame with Unreinforced Masonry Infill Walls 28 | % S5M Steel Frame with Unreinforced Masonry Infill Walls 29 | % S5H Steel Frame with Unreinforced Masonry Infill Walls 30 | % C1L Concrete Moment Frame 31 | % C1M Concrete Moment Frame 32 | % C1H Concrete Moment Frame 33 | % C2L Concrete Shear Walls 34 | % C2M Concrete Shear Walls 35 | % C2H Concrete Shear Walls 36 | % C3L Concrete Frame with Unreinforced Masonry Infill Walls 37 | % C3M Concrete Frame with Unreinforced Masonry Infill Walls 38 | % C3H Concrete Frame with Unreinforced Masonry Infill Walls 39 | % PC1 Precast Concrete Tilt?Up Walls 40 | % PC2L Precast Concrete Frames with Concrete Shear Walls 41 | % PC2M Precast Concrete Frames with Concrete Shear Walls 42 | % PC2H Precast Concrete Frames with Concrete Shear Walls 43 | % RM1L Reinforced Masonry Bearing Walls with Wood or Metal Deck Diaphragms 44 | % RM1M Reinforced Masonry Bearing Walls with Wood or Metal Deck Diaphragms 45 | % RM2L Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 46 | % RM2M Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 47 | % RM2H Reinforced Masonry Bearing Walls with Precast Concrete Diaphragms 48 | % URML Unreinforced Masonry Bearing Walls 49 | % URMM Unreinforced Masonry Bearing Walls 50 | % MH Mobile Homes 51 | % 52 | % analysisCase.occType % 4- or 5-letter code for construction type. Allowable options: 53 | % RES1 Single Family Dwelling 54 | % RES2 Mobile Home 55 | % RES3 Multi Family Dwelling 56 | % RES4 Temporary Lodging 57 | % RES5 Institutional Dormitory 58 | % RES6 Nursing Home 59 | % COM1 Retail Trade 60 | % COM2 Wholesale Trade 61 | % COM3 Personal and Repair Services 62 | % COM4 Professional/Technical/ Business Services 63 | % COM5 Banks/Financial Institutions 64 | % COM6 Hospital 65 | % COM7 Medical Office/Clinic 66 | % COM8 Entertainment & Recreation 67 | % COM9 Theaters 68 | % COM10 Parking 69 | % IND1 Heavy 70 | % IND2 Light 71 | % IND3 Food/Drugs/Chemicals 72 | % IND4 Metals/Minerals Processing 73 | % IND5 High Technology 74 | % IND6 Construction 75 | % AGR1 Agriculture 76 | % REL1 Church/Membership/Organization 77 | % GOV1 General Services 78 | % GOV2 Emergency Response 79 | % EDU1 Schools/Libraries 80 | % EDU2 Colleges/Universities 81 | % 82 | % pgaVals PGA values for which to compute loss ratios 83 | % 84 | 85 | colorspec{1} = [56 95 150]/255; 86 | colorspec{2} = [207 89 33]/255; 87 | colorspec{3} = [158 184 219]/255; 88 | colorspec{4} = [231 184 0]/255; 89 | colorspec{5} = [128 0 0]/255; 90 | 91 | linespec{1} = '-'; 92 | linespec{2} = '-'; 93 | linespec{3} = '-'; 94 | linespec{4} = '-'; 95 | linespec{5} = '-'; 96 | 97 | 98 | % specify a text case (see fn_HAZUS_loss for allowable options) 99 | analysisCase.codeLevel = 1; 100 | analysisCase.buildingType = 'C2L'; 101 | analysisCase.occType = 'COM1'; 102 | pgaVals = 0.01:0.01:1.5; % PGA values of interest 103 | pgaVals = pgaVals(:); % make sure pgaVals is a column 104 | pgaEx = 0.5; % example value 105 | 106 | 107 | load hazusData % load data created by import_HAZUS_data.m 108 | 109 | % find index for building type 110 | idxBldg = find(strcmp(analysisCase.buildingType, hazusData.buildingTypeCode)); 111 | % find index for occupancy type 112 | idxOcc = find(strcmp(analysisCase.occType, hazusData.occCode)); 113 | 114 | 115 | % get fragility parameters for the given structure type and code level 116 | medianDS = hazusData.medians{analysisCase.codeLevel}(idxBldg,:); 117 | betaDS = hazusData.betas{analysisCase.codeLevel}(idxBldg,:); 118 | assert(~isnan(medianDS(1)), ['Error, this building type and code level is not allowed']) % make sure an appropriate occupancy type was specified 119 | 120 | % example numbers 121 | p1 = normcdf(log(pgaEx), log(medianDS(1)), betaDS(1)) 122 | p2 = normcdf(log(pgaEx), log(medianDS(2)), betaDS(2)) 123 | p1_Equal = p1-p2 124 | 125 | 126 | figure 127 | hold on 128 | for i = 1:4 129 | plot(pgaVals, normcdf(log(pgaVals), log(medianDS(i)), betaDS(i)), linespec{i}, 'linewidth', 2, 'color', colorspec{i}) 130 | end 131 | plot([pgaEx pgaEx], [0 1], ':k') 132 | plot([0 pgaEx], [p1 p1], ':k') 133 | plot([0 pgaEx], [p2 p2], ':k') 134 | legend('ds_1', 'ds_2', 'ds_3', 'ds_4', 'location', 'Northeast') 135 | xlabel('Peak Ground Acceleration, PGA [g]') 136 | ylabel('P(DS \geq ds_i | PGA = x)') 137 | axis([0 1.5 0 1]) 138 | 139 | % annotate text results for example cases 140 | text1 = ['P(DS \geq ds_1) = ' num2str(p1,2)]; 141 | text2 = ['P(DS \geq ds_2) = ' num2str(p2,2)]; 142 | text(.05, p1+0.03,text1,'FontSize',7) 143 | text(.05, p2+0.03,text2,'FontSize',7) 144 | text(.55, p2+0.15,['P(DS = ds_1) = ' num2str(p1_Equal,1)],'FontSize',7) 145 | 146 | 147 | 148 | 149 | %% loss ratios for the given occupancy type 150 | lossStruct = hazusData.lossStruct(idxOcc,:); 151 | lossAccNS = hazusData.lossAccNS(idxOcc,:); 152 | lossDriftNS = hazusData.lossDriftNS(idxOcc,:); 153 | 154 | % example numbers for text 155 | lossTotal = (lossStruct + lossAccNS + lossDriftNS)/100 % loss ratios given DS 156 | 157 | diff(-[1 normcdf(log(pgaEx), log(medianDS), betaDS) 0]) % probabilities of each DS, given PGA 158 | 159 | 160 | % calculate loss ratio for all PGAs 161 | [lossRatio] = fn_HAZUS_loss(analysisCase, pgaVals); 162 | lossEx = fn_HAZUS_loss(analysisCase, pgaEx) % value for specific case (without rounding) 163 | 164 | % plot loss ratio 165 | figure 166 | % plot(pgaVals, structLossRatio, '--') 167 | % hold on 168 | % plot(pgaVals, nonStructAccLossRatio, ':') 169 | % plot(pgaVals, nonStructDriftLossRatio, '-.') 170 | plot(pgaVals, lossRatio, '-', 'linewidth', 2, 'color', colorspec{1}) 171 | hold on 172 | plot([pgaEx pgaEx 0], [0 lossEx lossEx], ':k', 'linewidth', 1) 173 | plot(pgaEx, lossEx, 'ok', 'linewidth', 1) 174 | xlabel('Peak Ground Acceleration, PGA [g]') 175 | ylabel('Mean loss ratio, E[C | PGA]') 176 | text(0.56, 0.12, sprintf('PGA = %.2f g \nLoss Ratio = %.2f', pgaEx, lossEx), 'FontSize', 9) 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /chapter5/SimplifiedHfMethodExample.m: -------------------------------------------------------------------------------- 1 | function SimplifiedHfMethodExample 2 | %Plot several figures (Figure 5.15, .16, .17, .19) which illustrate the 3 | %essential components of the simplified-physics 'stochastic' method. 4 | %Brendon Bradley 5 | %June 2020 6 | 7 | %figure to produce 8 | runtype=4; %1=source - Figure 5.15; 9 | % 2=path - Figure 5.16; 10 | % 3=site - Figure 5.17; 11 | % 4=convolution to create complete FAS and time series - Figure 5.19 12 | 13 | 14 | if runtype==1 %source function 15 | 16 | %magnitude and stress parameter values 17 | M=[6.0 6.0 7.5 7.5]; 18 | DSigma=[50 100 50 100]; %bar 19 | 20 | for i=1:length(M) 21 | dSigma=DSigma(i); 22 | f=logspace(-2,2,100); 23 | E=sourceSpectra(f,M(i),dSigma); 24 | 25 | end 26 | 27 | %Plotting details 28 | figure 29 | %colors and line styles 30 | Colors=[0.5 0.5 0 0]; 31 | LineStyles={'-';'--';'-';'--'}; 32 | %plot source spectra 33 | for i=1:length(M) 34 | loglog(f,E,'LineStyle',LineStyles{i},'Color',Colors(i)*[1 1 1],'LineWidth',2); hold on; 35 | end 36 | xlabel('Frequency, f [Hz]'); ylabel('Fourier Acceleration Spectrum [m/s^2/Hz]'); 37 | xlims=[0.01 10]; ylims=[1e-2 2.5e1]; 38 | xlim(xlims); ylim(ylims); 39 | text(1.3e-2,3e-1,'M7.5','HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 40 | text(1e-1,5e-2,'M6.0','HorizontalAlignment','right','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 41 | text(2,3.0,'\Delta\sigma=10 MPa','HorizontalAlignment','left','VerticalAlignment','bottom','FontSize',14,'Color',[0 0 0]); 42 | text(2,1.6,'\Delta\sigma=5 MPa','HorizontalAlignment','left','VerticalAlignment','top','FontSize',14,'Color',[0 0 0]); 43 | 44 | elseif runtype==2 %path attenuation 45 | %parameters for calculation 46 | f=[0.1 5]; %frequencies of interest 47 | V=3.6; %shear-wave velocity at source location (km/s) 48 | Q=[180 371]; %anelastic attenuation values considered for the respective frequencies 49 | region=2; %1=CEUS; 2=WUS 50 | 51 | %source-to-site distances to consider 52 | R=logspace(log10(1.25),log10(1000/1.25),100); 53 | 54 | %get geo spreading function 55 | Z=geospreading(R,region); 56 | 57 | %get path attenuation 58 | for i=1:length(f) 59 | P(:,i)=Z.*exp(-pi*f(i)*R/(Q(i)*V)); 60 | end 61 | 62 | %plotting 63 | Colors=[0 0.3 0.6]; 64 | LineStyles={'-';'--';'-.'}; 65 | figure; 66 | loglog(R,Z,'LineStyle',LineStyles{1},'Color',Colors(i)*[1 1 1],'LineWidth',2); hold on; 67 | for i=1:length(f) 68 | loglog(R,P(:,i),'LineStyle',LineStyles{i+1},'Color',Colors(i+1)*[1 1 1],'LineWidth',2); hold on; 69 | end 70 | xlabel('Source-to-site distance, R [km]'); ylabel('Path attenuation, P'); 71 | xlims=[1 1e3]; ylims=[1.e-3 1.e0]; 72 | xlim(xlims); ylim(ylims); 73 | %plot text 74 | text(35,4e-2,{'Geometric';'spreading'},'HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 75 | bodyWaveLineX=[10 15]; 76 | plot(bodyWaveLineX,1./bodyWaveLineX*1.2,'-k'); 77 | text(geomean(bodyWaveLineX),geomean(1./bodyWaveLineX)*1.2,'1/R','HorizontalAlignment','left','VerticalAlignment','bottom','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 78 | surfWaveLineX=[180 180*1.5]; 79 | plot(surfWaveLineX,(1/70)*(130./surfWaveLineX).^0.5*1.2,'-k'); 80 | text(geomean(surfWaveLineX),geomean((1/70)*(130./surfWaveLineX).^0.5)*1.2,'$1/{\surd}R$','HorizontalAlignment','left','VerticalAlignment','bottom','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 81 | text(600,4e-3,{'f=0.1 Hz';'Q=180'},'HorizontalAlignment','right','VerticalAlignment','top','FontSize',14,'Color',[0 0 0]); 82 | text(100,4e-3,{'f=5 Hz';'Q=371'},'HorizontalAlignment','right','VerticalAlignment','top','FontSize',14,'Color',[0 0 0]); 83 | 84 | elseif runtype==3 %site amplification 85 | 86 | %crustal velocity profile 87 | [z,Vs,rho]=booreSiteAmpProfile; 88 | %QWL-based amplification 89 | Vsrc=3.5; rhosrc=2.8; %velocity and density at the source location (km/s and t/m^3, resp.) 90 | [f,Amp]=QWL(z,Vs,rho,Vsrc,rhosrc); %amplification based on quarter-wavelength theory 91 | 92 | %add dimuniation due to kappa 93 | kappa0=[0 0.01 0.02 0.04 0.08]; 94 | for i=1:length(kappa0) 95 | G(:,i) = Amp.*exp(-pi*kappa0(i)*f); 96 | end 97 | 98 | %plot the amplification 99 | figure; 100 | loglog(f,G,'-k','LineWidth',2); hold on; 101 | xlabel('Frequency, f [Hz]'); ylabel('Site Amplification, S'); 102 | xlims=[5e-2 1e2]; ylims=[0.5 5]; 103 | xlim(xlims); ylim(ylims); 104 | YTicks=[0.5:0.1:5]; 105 | YTickLabels=cell(length(YTicks),1); 106 | YTickLabels{6}='1'; YTickLabels{16}='2';YTickLabels{26}='3'; 107 | YTickLabels{36}='4';YTickLabels{46}='5'; 108 | set(gca,'YTick',YTicks,'YTickLabel',YTickLabels); 109 | text(20,2.5,'$\kappa_0=0.0$','HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 110 | text(17,1.65,'0.01','HorizontalAlignment','left','VerticalAlignment','bottom','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 111 | text(9,1.5,'0.02','HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 112 | text(5,1.3,'0.04','HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 113 | text(2,1.05,'0.08','HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0],'Interpreter','latex'); 114 | 115 | elseif runtype==4 %combined source+path+site for scenario rupture 116 | 117 | %step 1: Compute the FAS model 118 | %source 119 | M=6; 120 | dSigma=50; 121 | f=logspace(-2,2,100); 122 | E=sourceSpectra(f,M,dSigma); 123 | %path 124 | R=20; 125 | region=2; %1=CEUS; 2=WUS 126 | G=geospreading(R,region); 127 | V=3.5; 128 | for i=1:length(f) 129 | if f(i)<1 130 | Q=180; 131 | else 132 | Q=180*f(i)^0.45; 133 | end 134 | end 135 | D=exp(-pi*f*R./(Q*V)); 136 | P=G.*D; 137 | 138 | %site 139 | %get profile 140 | [z,Vs,rho]=booreSiteAmpProfile; 141 | %get QWL-based amplification 142 | Vsrc=3.5; rhosrc=2.8; 143 | [f_amp,Amp]=QWL(z,Vs,rho,Vsrc,rhosrc); 144 | %interpolate onto f array 145 | [I]=interpAmpFn(f_amp,Amp,f); 146 | %add dimuniation due to kappa 147 | kappa0=0.045; 148 | K=exp(-pi*kappa0*f); 149 | S = I.*K; 150 | 151 | %total spectrum 152 | A=E.*P.*S; 153 | 154 | %step 2: white noise generation, and windowing 155 | %use Boore and Thompson (2014) to get time series duration 156 | fa = 10^(2.181-0.496*M); 157 | Ds=1/(2*fa); 158 | Dp=PathDurationBT14(R); 159 | Tgm=Ds+Dp; 160 | 161 | %create the time domain white noise 162 | dt=0.005; 163 | t=0:dt:min(3*Tgm,26); 164 | tshift=3; 165 | rng(1); %set random seed to be repeatable. 166 | noise=randn(1,length(t)); 167 | 168 | %Saragoni and Hart windowing function 169 | eps=0.2; eta=0.05; 170 | b= -(eps*log(eta))/(1+eps*(log(eps)-1)); 171 | c = b/eps; 172 | a = (exp(1)/eps)^b; 173 | ftgm=2; 174 | teta=ftgm*Tgm; 175 | w=a*(t/teta).^b.*exp(-c.*(t/teta)); 176 | 177 | %now plot windowed noise 178 | figure; 179 | normCoeff=max(abs(noise)); 180 | plot(t,w,'LineWidth',2,'Color',0.7*[1 1 1]); hold on; 181 | plot(t,-w,'LineWidth',2,'Color',0.7*[1 1 1]); 182 | plot(t,noise.*w/normCoeff,'k','LineWidth',1.); hold on 183 | plot([-1.0 0],[0 0],'-k'); 184 | % xlabel('Time, t [s]'); 185 | ylabel('Acceleration (unscaled)'); 186 | text(8,0.5,{'Windowing function'},'HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',0.7*[1 1 1]); 187 | text(24.5,0.85,{'Step 2: Windowed white noise'},'HorizontalAlignment','right','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 188 | xlims=[-1.5 25]; ylims=[-1.05 1.05]; 189 | xlim(xlims); ylim(ylims); 190 | set(gca,'XTickLabel',{}); 191 | 192 | %step 3: shape the windowed noise and convert back to time domain 193 | %first convert into fourier spectrum 194 | [f_,fas]=getFourierSpectra(noise.*w,dt); 195 | norm=sqrt(sum(real(fas).^2)/length(fas)); 196 | fasN=fas/norm; 197 | 198 | %now the inverse fft to get the time series 199 | nfft=2^ceil(log(length(noise.*w))/log(2)); 200 | Fas=fft(noise.*w,nfft); 201 | Fas_orig=Fas; 202 | rms_norm=sqrt(sum(real(Fas).^2)/length(Fas)); 203 | df=(1/dt)*(1)/nfft; 204 | for i=2:nfft/2 205 | f_(i)=(i-1)*df; 206 | [A_(i)]=interpFourierSpectrum(f,A,f_(i)); 207 | sfact(i)=A_(i)/rms_norm; 208 | Fas(i)=sfact(i)*Fas(i); 209 | Fas(nfft+1-i)=sfact(i)*Fas(nfft-i); 210 | end 211 | sfact(1)=0; 212 | Fas(1)=0; 213 | fNq=1/(2*dt); 214 | [ANq]=interpFourierSpectrum(f,A,fNq); 215 | Fas(nfft)=ANq*Fas(nfft)/norm; 216 | %plot 217 | figure 218 | loglog(f_(2:end),abs(Fas(2:nfft/2)),'k','LineWidth',1.); hold on; 219 | loglog(f,A,'LineWidth',2,'Color',0.7*[1 1 1]); hold on; 220 | xlabel('Frequency, f [Hz]'); ylabel('Fourier Acceleration Spectrum [m/s^2/Hz]'); 221 | xlims=[0.01 100]; ylims=[5e-6 5e-1]; 222 | xlim(xlims); ylim(ylims); 223 | text(xlims(1)*1.2,ylims(2)*0.7,{'Step 4: Spectrum-scaled noise'},'HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 224 | text(0.04,1e-3,{'Modelled spectrum, A(f)'},'HorizontalAlignment','left','VerticalAlignment','middle','FontSize',14,'Color',0.7*[1 1 1]); 225 | 226 | %comparison plot; 227 | % figure; 228 | % plot([1:nfft/2],sfact); 229 | % semilogy([1:nfft],abs(Fas_orig),[1:nfft],abs(Fas)); hold on; 230 | 231 | % figure; plot(f_,A_); 232 | % Atotal=[A_ flipud(A_)]; 233 | % figure; plot([f_ (200-f_)],Atotal); 234 | acc_scaled=(1/dt)*ifft(Fas,nfft); 235 | t2=[0:1:nfft-1]*dt; 236 | figure; 237 | plot(t2(1:length(t)),acc_scaled(1:length(t)),'-k','LineWidth',1.); hold on; 238 | plot([-1 0],[0 0],'-k','LineWidth',1.); 239 | xlabel('Time, t [s]'); ylabel('Acceleration, a [m/s^2]'); 240 | xlims=[-1.5 25]; ylims=[-0.7 0.7]; 241 | xlim(xlims); ylim(ylims); 242 | text(24.5,0.6,{'Step 5: Simulated ground motion'},'HorizontalAlignment','right','VerticalAlignment','middle','FontSize',14,'Color',[0 0 0]); 243 | 244 | 245 | end 246 | 247 | end 248 | 249 | function [A_target]=interpFourierSpectrum(f,A,f_target) 250 | %interpolate with constants at bounds 251 | 252 | for i=1:length(f_target) 253 | if f_target(i)f(end) 256 | A_target(i)=A(end); 257 | else 258 | A_target(i)=interp1(f,A,f_target(i)); 259 | end 260 | end 261 | 262 | end 263 | 264 | function [f,fas]=getFourierSpectra(acc,dt) 265 | 266 | nfft=2^ceil(log(length(acc))/log(2)); 267 | Fas=fft(acc,nfft); 268 | % figure;plot([1:1:nfft],abs(Fas)); 269 | f=(1/dt)*(0:nfft/2)/nfft; 270 | fas=Fas(1:length(f)); 271 | %now remove the first point for f=0 272 | f(1)=[]; 273 | fas(1)=[]; 274 | 275 | %testing; 276 | % acc2=ifft(Fas); 277 | % A=1; 278 | 279 | end 280 | 281 | function Dp=PathDurationBT14(R) 282 | 283 | tabulated=[ 284 | 0 0 285 | 7 2.4 286 | 45 8.4 287 | 125 10.9 288 | 175 17.4 289 | 270 34.2 290 | ]; 291 | 292 | for i=1:length(R) 293 | if R(i)<270 294 | Dp(i)=interp1(tabulated(:,1),tabulated(:,2),R(i)); 295 | else 296 | Dp(i)=34.2+0.156*(R(i)-270); 297 | end 298 | end 299 | 300 | end 301 | 302 | 303 | 304 | function [A]=interpAmpFn(f_amp,Amp,f) 305 | %interpolate with constants at bounds 306 | 307 | for i=1:length(f) 308 | if f(i)>f_amp(2) 309 | A(i)=Amp(2); 310 | elseif f(i)= 30 & lambda <= 150; % frv: 1 for lambda between 30 and 150, 0 otherwise 55 | fnm = lambda >= -120 & lambda <= -60; % fnm: 1 for lambda between -120 and -60, 0 otherwise 56 | 57 | if Fhw == 1 58 | HW = 1; 59 | elseif Fhw == 0 60 | HW = 0; 61 | else 62 | HW = Rx>=0; 63 | end 64 | 65 | d_DPP=0; % for median calculatio, d_DPP=0. 66 | 67 | if length (T) == 1 && T == 1000; % Compute Sa and sigma with pre-defined period 68 | Sa=zeros(1,length(period)-2); 69 | sigma=zeros(1,length(period)-2); 70 | period1=period(3:end); 71 | for ip=3:length(period) 72 | [Sa(ip-2),sigma(ip-2)]=CY_2014_sub(M, ip, Rup, Rjb, Rx, Ztor, delta, frv, fnm, HW, Z10, Vs30, FVS30,region, d_DPP); 73 | [PGA,sigma_PGA]=CY_2014_sub(M,2,Rup, Rjb, Rx, Ztor, delta, frv, fnm, HW, Z10, Vs30, FVS30,region, d_DPP); 74 | if Sa(ip-2) Ti)); 87 | ip_low = find(period==T_low); 88 | ip_high = find(period==T_high); 89 | 90 | [Sa_low, sigma_low] = CY_2014_sub(M, ip_low, Rup, Rjb, Rx, Ztor, delta, frv, fnm, HW, Z10, Vs30, FVS30, region,d_DPP); 91 | [Sa_high, sigma_high] = CY_2014_sub(M, ip_high, Rup, Rjb, Rx, Ztor, delta, frv, fnm, HW, Z10, Vs30, FVS30, region, d_DPP); 92 | [PGA, sigma_PGA]=CY_2014_sub(M,2,Rup, Rjb, Rx, Ztor, delta, frv, fnm, HW, Z10, Vs30, FVS30, region, d_DPP); 93 | 94 | x = [log(T_low) log(T_high)]; 95 | Y_sa = [log(Sa_low) log(Sa_high)]; 96 | Y_sigma = [sigma_low sigma_high]; 97 | Sa(i) = exp(interp1(x, Y_sa, log(Ti))); 98 | sigma(i) = interp1(x, Y_sigma, log(Ti)); 99 | if Sa(i)6 && M<6.9 182 | term10= gamma_JP_IT (ip)*term10; 183 | end 184 | end 185 | if region == 3 186 | term10 = gamma_Wn(ip)* term10; 187 | end 188 | 189 | %% Style of faulting term 190 | term2=(c1_a(ip)+c1_c(ip)/(cosh(2*max(M-4.5,0))))*F_RV; 191 | term3=(c1_b(ip)+c1_d(ip)/cosh(2*max(M-4.5,0)))*F_NM; 192 | 193 | 194 | %% Ztor term 195 | if F_RV==1 196 | E_Ztor = (max(2.704-1.226*max(M-5.849,0),0))^2; 197 | else 198 | E_Ztor = (max(2.673-1.136*max(M-4.970,0),0))^2; 199 | end 200 | 201 | if Ztor == 999 202 | Ztor = E_Ztor; 203 | end 204 | delta_ZTOR=Ztor-E_Ztor; 205 | 206 | term4=(c7(ip)+c7_b(ip)/cosh(2*max(M-4.5,0)))*delta_ZTOR; 207 | %% Hanging wall term 208 | term12=c9(ip)*HW*cos(delta)*(c9_a(ip)+(1-c9_a(ip))*tanh(Rx/c9_b(ip)))... 209 | *(1-sqrt(R_JB^2+Ztor^2)/(R_RUP+1)); 210 | 211 | %% Basin Depth term 212 | % Z1.0 (m) ~ Vs30 (m/s) relationship 213 | 214 | if region ~= 2 % in California and non-Japan region 215 | z_1= exp(-7.15/4*log((Vs30^4+570.94^4)/(1360^4+570.94^4))); 216 | else 217 | z_1= exp(-5.23/2*log((Vs30^2+412.39^2)/(1360^2+412.39^2))); 218 | end 219 | 220 | if Z10 ==999 221 | d_Z1 = 0; 222 | else 223 | d_Z1=Z10*1000-z_1; 224 | end 225 | %% Dip term 226 | term5=(c11(ip)+c11_b(ip)/cosh(2*max(M-4.5,0)))*(cos(delta)^2); 227 | 228 | %% Directivity 229 | term11=c8(ip)*max(1-max(R_RUP-40,0)/30,0)*min(max(M-5.5,0)/0.8,1)... 230 | *exp(-c8_a(ip)*(M-c8_b(ip))^2)*d_DPP; 231 | 232 | term1=c1(ip); 233 | 234 | ln_yrefij=term1+term2+term3+term4+term5+term6+term7+term8+term9+term10+... 235 | term11+term12; 236 | 237 | yrefij=exp(ln_yrefij); 238 | 239 | %% Site response 240 | term14=phi1(ip)*min(log(Vs30/1130),0); 241 | term15=phi2(ip)*(exp(phi3(ip)*(min(Vs30,1130)-360))-exp(phi3(ip)*(1130-360)))*log((yrefij+phi4(ip))/phi4(ip)); 242 | term16=phi5(ip)*(1-exp(-d_Z1/phi6(ip))); 243 | 244 | Sa= yrefij*exp(term14+term15+term16); 245 | %% Compute standard deviation 246 | Finferred=(FVS30==0); % 1: Vs30 is inferred from geology. 247 | Fmeasured=(FVS30==1); % 1: Vs30 is measured. 248 | 249 | 250 | NL0=phi2(ip)*(exp(phi3(ip)*(min(Vs30,1130)-360))-exp(phi3(ip)*(1130-360)))*(yrefij/(yrefij+phi4(ip))); 251 | sigmaNL0 = (sigma1(ip)+(sigma2(ip) - sigma1(ip))/1.5*(min(max(M,5),6.5)-5))*sqrt((sigma3(ip)*Finferred + 0.7* Fmeasured) + (1+NL0)^2); 252 | 253 | tau = tau1(ip) + (tau2(ip)-tau1(ip))/1.5 * (min(max(M,5),6.5)-5); 254 | sigma=sqrt((1+NL0)^2*tau^2+sigmaNL0^2); 255 | 256 | 257 | 258 | 259 | 260 | --------------------------------------------------------------------------------