├── README.md ├── LICENSE.md └── vcftidy.py /README.md: -------------------------------------------------------------------------------- 1 | vcftidy 2 | ======= 3 | 4 | bring some order to the *fruit salad* that is VCF. 5 | 6 | About 7 | ----- 8 | 9 | VCF is the standard for representing variants, but different aligners use different conventions for key pieces of information, e.g. alt depths in the sample fields. vcftidy aims to improve this by 10 | 11 | 1. putting ref and alt read depths into the AD field for each genotype (a la GATK) and setting Number=A for the header. 12 | 2. splitting multiple alts 13 | 3. normalizing (trimming and left-aligning) the variants. 14 | 15 | Where 2 and 3 should greatly reduce false negatives due to incorrect annotations. If you have a common error in a VCF, please open an issue so that we can address it in `vcftidy`. 16 | 17 | Use 18 | --- 19 | 20 | $ python vcftidy.py $VCF $REFERENCE\_FASTA > $TIDY\_VCF 21 | 22 | See Also 23 | -------- 24 | 25 | + [vt](https://github.com/atks/vt) and associated [paper](http://t.co/J48Irh4wEe) does a nice job *decomposing* and *normalizing* variants. 26 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 quinlan-lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /vcftidy.py: -------------------------------------------------------------------------------- 1 | """ 2 | This implements: 3 | 4 | 1. allele splitting: ref=A, alt=AT,AAT,ACT becomes 3 separate variants. 5 | Adjusts PL (or GL) and removes AF and DP fields as needed. 6 | 7 | 2. left alignment as described in 'Unified Representation of Genetic Variants', 8 | Bioinformatics Bioinformatics (2015): btv112. From Tan et al. 9 | 10 | 3. simplify variants: pos=123, ref=AAT, alt=AT becomes pos=124, ref=AT, alt=T 11 | 12 | 4. When splitting alleles, it pulls out the correct subset of annotations from 13 | SnpEff (ANN= or EFF= tags) and VEP. 14 | 15 | 16 | It adjusts AD for GATK and RO,AO for freebayes. It should also parse the header and 17 | keep fields with Number=R or Number=A or Number=G, adjust those, and discard 18 | others from the format field. 19 | """ 20 | 21 | from collections import OrderedDict 22 | import sys 23 | 24 | INFO_EXCLUDE = ('AF',) 25 | GT_EXCLUDE = ('DP',) 26 | 27 | def snpeff_ann(ann, alt, alts): 28 | # in these, the annos are separated by "," and the fields within each anno 29 | # are the alleles. They are just the allele itself 30 | ann = ann.split(",") 31 | return ",".join(x for x in ann if x.startswith("%s|" % alt)) 32 | 33 | def snpeff_eff(eff, alt, alts): 34 | """ 35 | >>> eff = 'CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE28E|196|MICAL3|protein_coding|CODING|ENST00000498573|1|1|WARNING_TRANSCRIPT_NO_START_CODON),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE4E|188|MICAL3|protein_coding|CODING|ENST00000578984|1|1|WARNING_TRANSCRIPT_INCOMPLETE),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE998E|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|1),CODON_INSERTION(MODERATE||gag/gaGGAg|E28EE|196|MICAL3|protein_coding|CODING|ENST00000498573|1|2|WARNING_TRANSCRIPT_NO_START_CODON),CODON_INSERTION(MODERATE||gag/gaGGAg|E4EE|188|MICAL3|protein_coding|CODING|ENST00000578984|1|2|WARNING_TRANSCRIPT_INCOMPLETE),CODON_INSERTION(MODERATE||gag/gaGGAg|E998EE|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|2),DOWNSTREAM(MODIFIER||2412||966|MICAL3|protein_coding|CODING|ENST00000400561||2),DOWNSTREAM(MODIFIER||2415||966|MICAL3|protein_coding|CODING|ENST00000400561||1),DOWNSTREAM(MODIFIER||2422||966|MICAL3|protein_coding|CODING|ENST00000444520||2)' 36 | >>> snpeff_eff(eff, 'TTT', ['CTTCT', 'TTT']) 37 | 'CODON_INSERTION(MODERATE||gag/gaGGAg|E28EE|196|MICAL3|protein_coding|CODING|ENST00000498573|1|2|WARNING_TRANSCRIPT_NO_START_CODON),CODON_INSERTION(MODERATE||gag/gaGGAg|E4EE|188|MICAL3|protein_coding|CODING|ENST00000578984|1|2|WARNING_TRANSCRIPT_INCOMPLETE),CODON_INSERTION(MODERATE||gag/gaGGAg|E998EE|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|2),DOWNSTREAM(MODIFIER||2412||966|MICAL3|protein_coding|CODING|ENST00000400561||2),DOWNSTREAM(MODIFIER||2422||966|MICAL3|protein_coding|CODING|ENST00000444520||2)' 38 | 39 | >>> snpeff_eff(eff, 'CTTCT', ['CTTCT', 'TTT']) 40 | 'CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE28E|196|MICAL3|protein_coding|CODING|ENST00000498573|1|1|WARNING_TRANSCRIPT_NO_START_CODON),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE4E|188|MICAL3|protein_coding|CODING|ENST00000578984|1|1|WARNING_TRANSCRIPT_INCOMPLETE),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE998E|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|1),DOWNSTREAM(MODIFIER||2415||966|MICAL3|protein_coding|CODING|ENST00000400561||1)' 41 | 42 | """ 43 | # http://snpeff.sourceforge.net/SnpEff_manual.version_4_0.html#input 44 | eff = eff.split(",") 45 | 46 | # this version of snpeff uses the 1-based index of the alt. 47 | # it stores that index in the 10th position of the EFF field. 48 | idx = str(alts.index(alt) + 1) 49 | subset = [e for e in eff if e.split("|")[10].rstrip(")") == idx] 50 | 51 | return ",".join(subset) 52 | 53 | 54 | def vep_csq(csq, alt, alts): 55 | """ 56 | >>> csq = 'intron_variant|||ENSG00000047056|WDR37|ENST00000263150||||-/494|protein_coding|1,downstream_gene_variant|||ENSG00000047056|WDR37|ENST00000436154||||-/209|protein_coding|1,intron_variant|||ENSG00000047056|WDR37|ENST00000358220||||-/494|protein_coding|1,stop_lost|Tga/Cga|*/R|ENSG00000047056|WDR37|ENST00000381329|9/9|||250/249|protein_coding|1' 57 | >>> vep_csq(csq, 'T', 'T') == csq 58 | True 59 | >>> vep_csq(csq, 'T', ['T', 'A']) == csq 60 | True 61 | 62 | >>> vep_csq(csq, 'A', ['T', 'A']) == '' 63 | True 64 | >>> extra = csq.split(",")[-1] 65 | 66 | # change its source 67 | >>> extra = extra[:-1] + "2" 68 | >>> vep_csq(csq + "," + extra, 'A', ['T', 'A']) == extra 69 | True 70 | >>> vep_csq(csq + "," + extra, 'T', ['T', 'A']) == csq 71 | True 72 | """ 73 | 74 | csq = csq.split(',') 75 | idx = str(alts.index(alt) + 1) 76 | subset = [c for c in csq if c.split('|')[-1] == idx] 77 | return ",".join(subset) 78 | 79 | # these have the signature fn(info_dict, alt, alts). 80 | # e.g. fn(info, 'A', ['A', 'C', 'T']) 81 | EFFECT_PARSERS = {'ANN': snpeff_ann, 82 | 'EFF': snpeff_eff, 83 | 'CSQ': vep_csq} 84 | 85 | def fix_info(info, alt, alts): 86 | """ 87 | this just dispatches to the correct function from EFFECT_PARSERS 88 | and sets the field 89 | """ 90 | info = info.copy() 91 | assert isinstance(info, dict) 92 | try: 93 | # check if e.g. EFF/ANN/SQR are present in INFO and use the 94 | # correct func. 95 | field, fixer_fn = next((k, EFFECT_PARSERS[k]) for k in EFFECT_PARSERS if k in info) 96 | except StopIteration: 97 | return info 98 | info[field] = fixer_fn(info[field], alt, alts) 99 | return info 100 | 101 | def infostr(infodict, exclude=INFO_EXCLUDE): 102 | if isinstance(infodict, basestring): 103 | return infodict 104 | for k in infodict: 105 | if isinstance(infodict[k], list): 106 | infodict[k] = ",".join(map(str, infodict[k])) 107 | s = ";".join(k + (('=' + str(v)) if v is not None else '') for 108 | k, v in infodict.iteritems() if not k in exclude) 109 | return s 110 | 111 | import copy 112 | 113 | def fix_genos(genos, alt_idx, exclude=GT_EXCLUDE): 114 | """ 115 | This example is taken from the Vt wiki for documenting the vt decompose 116 | tools which performs a similar task. 117 | 118 | >>> genos = [{'GT': ['1', '2'], 'PL': '281,5,9,58,0,115,338,46,116,809', 'DP': '81'}, 119 | ... {'GT': ['0', '0'], 'PL': '0,30,323,31,365,483,38,291,325,567', 'DP': '86'}] 120 | 121 | >>> fix_genos(genos, 3) 122 | [{'GT': ['.', '.'], 'PL': '281,338,809'}, {'GT': ['0', '0'], 'PL': '0,38,567'}] 123 | 124 | >>> fix_genos(genos, 2) 125 | [{'GT': ['.', '1'], 'PL': '281,58,115'}, {'GT': ['0', '0'], 'PL': '0,31,483'}] 126 | 127 | >>> fix_genos(genos, 1) 128 | [{'GT': ['1', '.'], 'PL': '281,5,9'}, {'GT': ['0', '0'], 'PL': '0,30,323'}] 129 | 130 | >>> genos = [{'GT': ['0', '1'], 'PL': '281,5,9,58,0,115,338,46,116,809', 'DP': '81', 'AO': ['22', '33']}, 131 | ... {'GT': ['0', '2'], 'PL': '0,30,323,31,365,483,38,291,325,567', 'DP': '86', 'AO': ['122', '133']}] 132 | 133 | >>> fix_genos(genos, 1) 134 | [{'GT': ['0', '1'], 'AO': '22', 'PL': '281,5,9'}, {'GT': ['0', '.'], 'AO': '122', 'PL': '0,30,323'}] 135 | 136 | >>> fix_genos(genos, 2) 137 | [{'GT': ['0', '.'], 'AO': '33', 'PL': '281,58,115'}, {'GT': ['0', '1'], 'AO': '133', 'PL': '0,31,483'}] 138 | 139 | 140 | >>> genos = [{'GT': ['0', '0'], 'PL': '281,5,9,58,0,115,338,46,116,809', 'GL': '281,5,9,58,0,115,338,46,116,809', 'DP': '81', 'AO': '22'}] 141 | >>> fix_genos(genos, 1) 142 | [{'GT': ['0', '0'], 'GL': '281,5,9', 'AO': '22', 'PL': '281,5,9'}] 143 | 144 | >>> fix_genos(genos, 2) 145 | [{'GT': ['0', '0'], 'GL': '281,58,115', 'AO': '-1', 'PL': '281,58,115'}] 146 | 147 | >>> genos = [{'GT': ['0', '2']}, {'GT': ['0', '1']}, {'GT': ['0', '1']}] 148 | >>> fix_genos(genos, 1) 149 | [{'GT': ['0', '.']}, {'GT': ['0', '1']}, {'GT': ['0', '1']}] 150 | 151 | >>> fix_genos(genos, 2) 152 | [{'GT': ['0', '1']}, {'GT': ['0', '.']}, {'GT': ['0', '.']}] 153 | 154 | 155 | """ 156 | 157 | assert int(alt_idx) > 0 158 | # this copy needed since we are modifying in place. 159 | genos = [copy.deepcopy(x) for x in genos] 160 | 161 | alt_idx = str(alt_idx) 162 | for n, geno in enumerate(genos): 163 | # this copy *also* needed since we are modifying in place. 164 | geno['GT'] = geno['GT'][:] 165 | for ex in exclude: 166 | geno.pop(ex, None) 167 | 168 | # pull out only the appropriate likelihoods. 169 | # according to VCF spec, these will be in the order: 170 | # 0/0, 0/1, 1/1, 0/2, 1/2, 2/2 171 | # ordering given by: Order(j/k) = (k*(k+1)/2)+j. 172 | 173 | a, b = 0, int(alt_idx) 174 | for key in ('PL', 'GL'): 175 | if not key in geno: continue 176 | 177 | vals = geno[key].split(",") 178 | # so do this for (a, a), (a, b), (b, b) 179 | likelihood = [] 180 | for j, k in ((a, a), (a, b), (b, b)): 181 | order = (k * (k + 1) / 2) + j 182 | #print >>sys.stderr, a, b, (j, k), 'order:' + str(order), vals#, 'value:' + str(vals[order]) 183 | try: 184 | likelihood.append(vals[order]) 185 | except IndexError: 186 | # needed for older freebayes. 187 | likelihood.append('-1') 188 | geno[key] = ",".join(likelihood) 189 | 190 | for key in ('AO', 'PAO', 'TYPE', 'QA', 'PQA', 'SAF'): 191 | if key in geno: 192 | #assert "," in geno[key] or isinstance(geno[key], list), (key, geno) 193 | if not isinstance(geno[key], list): 194 | vals = geno[key].split(",") 195 | else: 196 | vals = map(str, geno[key]) 197 | try: 198 | geno[key] = vals[int(alt_idx) - 1] 199 | except IndexError: 200 | # required for older versions of freebayes which didn't 201 | # correctly support variants with number of alts > 2. 202 | geno[key] = '-1' 203 | 204 | # these are split by comma and include the ref as the first. 205 | for key in ('AD',): 206 | if key in geno: 207 | vals = geno[key] 208 | if isinstance(vals, basestring): 209 | vals = vals.split(",") 210 | # don't subtract 1 from vals index since the ref is 0 index. 211 | geno[key] = ",".join((vals[0], vals[int(alt_idx)])) 212 | 213 | # set the current allele to 1, keep 0 at 0 and set all others to '.' 214 | for i, allele in enumerate(geno['GT']): 215 | 216 | if allele == alt_idx: 217 | geno['GT'][i] = '1' 218 | elif allele != '0': 219 | geno['GT'][i] = '.' 220 | 221 | # adjust the depth to be alt-specific. 222 | if 'RO' in geno and 'AO' in geno: 223 | if '-1' in (geno['RO'], geno['AO']): 224 | geno['DP'] = '-1' 225 | else: 226 | geno['DP'] = str(int(geno['AO']) + int(geno['RO'])) 227 | 228 | if 'AD' in geno: 229 | vals = map(int, geno['AD'].split(",")) 230 | geno['DP'] = str(sum(vals)) 231 | 232 | return genos 233 | 234 | def fmt_genos(genos): 235 | for g in genos: 236 | if 'GT' in g: 237 | sep = g.pop('__sep', '/') 238 | g['GT'] = sep.join(g['GT']) 239 | vals = [":".join(str(v) for v in f.values()) for f in genos] 240 | return vals 241 | 242 | 243 | def varsplit(ref, alts, info, frmt, gts): 244 | """ 245 | Split a variant with multiple alternate alleles into separate records to aid 246 | in annotation. 247 | 248 | >>> n = varsplit('TA', ['TAA', 'TAAA', 'T'], 'AF=0.2,0.3,0.1;KKKK', 249 | ... 'GT:DP:PL', ['1/2:81:281,5,9,58,0,115,338,46,116,809', 250 | ... '0/0:86:0,30,323,31,365,483,38,291,325,567']) 251 | >>> next(n) 252 | ('TA', 'TAA', 'KKKK;ORIG_ALLELES=TA/TAA,TAAA,T;ORIG_ALLELE_i=1', 'GT:PL', ['1/.:281,5,9', '0/0:0,30,323']) 253 | 254 | >>> next(n) 255 | ('TA', 'TAAA', 'KKKK;ORIG_ALLELES=TA/TAA,TAAA,T;ORIG_ALLELE_i=2', 'GT:PL', ['./1:281,58,115', '0/0:0,31,483']) 256 | 257 | >>> next(n) 258 | ('TA', 'T', 'KKKK;ORIG_ALLELES=TA/TAA,TAAA,T;ORIG_ALLELE_i=3', 'GT:PL', ['./.:281,338,809', '0/0:0,38,567']) 259 | 260 | 261 | # test new snp eff ANN= field 262 | >>> n = varsplit('G', ['A', 'C', 'T'], 263 | ... 'ANN=A|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.|||||||', 264 | ... 'GT:DP:PL', ['1/2:81:281,5,9,58,0,115,338,46,116,809']) 265 | 266 | >>> next(n) 267 | ('G', 'A', 'ANN=A|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||,A|.|.|.|.|.|.|.|.|.||||||;ORIG_ALLELES=G/A,C,T;ORIG_ALLELE_i=1', 'GT:PL', ['1/.:281,5,9']) 268 | 269 | >>> next(n) 270 | ('G', 'C', 'ANN=C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||,C|.|.|.|.|.|.|.|.|.||||||;ORIG_ALLELES=G/A,C,T;ORIG_ALLELE_i=2', 'GT:PL', ['./1:281,58,115']) 271 | 272 | >>> next(n) 273 | ('G', 'T', 'ANN=T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.||||||,T|.|.|.|.|.|.|.|.|.|||||||;ORIG_ALLELES=G/A,C,T;ORIG_ALLELE_i=3', 'GT:PL', ['./.:281,338,809']) 274 | 275 | >>> next(n) 276 | Traceback (most recent call last): 277 | ... 278 | StopIteration 279 | 280 | # test older snpeff EFF= field 281 | >>> n = varsplit('TTCC', ['T', 'TTCCTCC'], 'EFF=CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE28E|196|MICAL3|protein_coding|CODING|ENST00000498573|1|1|WARNING_TRANSCRIPT_NO_START_CODON),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE4E|188|MICAL3|protein_coding|CODING|ENST00000578984|1|1|WARNING_TRANSCRIPT_INCOMPLETE),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE998E|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|1),CODON_INSERTION(MODERATE||gag/gaGGAg|E28EE|196|MICAL3|protein_coding|CODING|ENST00000498573|1|2|WARNING_TRANSCRIPT_NO_START_CODON),CODON_INSERTION(MODERATE||gag/gaGGAg|E4EE|188|MICAL3|protein_coding|CODING|ENST00000578984|1|2|WARNING_TRANSCRIPT_INCOMPLETE),CODON_INSERTION(MODERATE||gag/gaGGAg|E998EE|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|2),DOWNSTREAM(MODIFIER||2412||966|MICAL3|protein_coding|CODING|ENST00000400561||2),DOWNSTREAM(MODIFIER||2415||966|MICAL3|protein_coding|CODING|ENST00000400561||1),DOWNSTREAM(MODIFIER||2422||966|MICAL3|protein_coding|CODING|ENST00000444520||2),DOWNSTREAM(MODIFIER||2425||966|MICAL3|protein_coding|CODING|ENST00000444520||1)', 'GT:DP:PL', ['1/2:81:281,5,9,58,0,115,338']) 282 | >>> next(n) 283 | ('TTCC', 'T', 'EFF=CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE28E|196|MICAL3|protein_coding|CODING|ENST00000498573|1|1|WARNING_TRANSCRIPT_NO_START_CODON),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE4E|188|MICAL3|protein_coding|CODING|ENST00000578984|1|1|WARNING_TRANSCRIPT_INCOMPLETE),CODON_CHANGE_PLUS_CODON_DELETION(MODERATE||gaggaa/gaa|EE998E|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|1),DOWNSTREAM(MODIFIER||2415||966|MICAL3|protein_coding|CODING|ENST00000400561||1),DOWNSTREAM(MODIFIER||2425||966|MICAL3|protein_coding|CODING|ENST00000444520||1);ORIG_ALLELES=TTCC/T,TTCCTCC;ORIG_ALLELE_i=1', 'GT:PL', ['1/.:281,5,9']) 284 | 285 | >>> next(n) 286 | ('TTCC', 'TTCCTCC', 'EFF=CODON_INSERTION(MODERATE||gag/gaGGAg|E28EE|196|MICAL3|protein_coding|CODING|ENST00000498573|1|2|WARNING_TRANSCRIPT_NO_START_CODON),CODON_INSERTION(MODERATE||gag/gaGGAg|E4EE|188|MICAL3|protein_coding|CODING|ENST00000578984|1|2|WARNING_TRANSCRIPT_INCOMPLETE),CODON_INSERTION(MODERATE||gag/gaGGAg|E998EE|2002|MICAL3|protein_coding|CODING|ENST00000441493|21|2),DOWNSTREAM(MODIFIER||2412||966|MICAL3|protein_coding|CODING|ENST00000400561||2),DOWNSTREAM(MODIFIER||2422||966|MICAL3|protein_coding|CODING|ENST00000444520||2);ORIG_ALLELES=TTCC/T,TTCCTCC;ORIG_ALLELE_i=2', 'GT:PL', ['./1:281,58,115']) 287 | 288 | >>> next(n) 289 | Traceback (most recent call last): 290 | ... 291 | StopIteration 292 | 293 | 294 | """ 295 | if len(alts) == 1: 296 | yield ref, alts[0], infostr(info, ()), frmt, gts 297 | raise StopIteration 298 | 299 | if not isinstance(info, dict): 300 | info = OrderedDict((kv[0], (kv[1] if len(kv) > 1 else None)) for kv in (x.split('=') for x in info.split(';'))) 301 | 302 | fmts = frmt.split(':') 303 | gts = [OrderedDict(zip(fmts, x.split(':'))) for x in gts] 304 | 305 | for i, g in enumerate(gts): 306 | if 'GT' in g: 307 | gts[i]['__sep'] = "|" if "|" in g['GT'] else "/" 308 | gts[i]['GT'] = g['GT'].split(gts[i]['__sep']) 309 | 310 | for i, alt in enumerate(alts, start=1): 311 | if any('GT' in g for g in gts): 312 | fixed_genos = fix_genos(gts, i) 313 | else: 314 | fixed_genos = gts 315 | if fixed_genos: 316 | fields = [f for f in fixed_genos[0].keys() if f != "__sep"] # ordered 317 | else: 318 | fields = [] 319 | info_sub = fix_info(info, alt, alts) 320 | # now we temporarily replace info[field] with ann 321 | 322 | info_sub['ORIG_ALLELES'] = '%s/%s' % (ref, ",".join(alts)) 323 | info_sub['ORIG_ALLELE_i'] = str(i) 324 | ret = ref, alt, infostr(info_sub), ":".join(fields), fmt_genos(fixed_genos) 325 | yield ret 326 | 327 | 328 | def leftalign(chrom, pos, ref, alt, fa, max_shift=1000): 329 | seq = fa[chrom][max(0, pos - max_shift - 1):pos + len(ref) - 1] 330 | assert seq.endswith(ref), (chrom, pos, ref, alt, seq[-10:]) 331 | return _leftalign(pos, ref, alt, seq)[:3] 332 | 333 | 334 | def _leftalign(pos, ref, alt, seq): 335 | """ 336 | 337 | simple implementation from the vt paper: 338 | # actual variant is 2-base CA insertion. 339 | Last argument indicates whether we ran out of sequence and therefore did not 340 | finish left-aligning before running out of sequence. (False is bad). 341 | 342 | >>> _leftalign(123, 'CAC', 'C', 'GGGCACACAC') 343 | (118, 'GCA', 'G', True) 344 | 345 | # run out of sequence! 346 | >>> _leftalign(123, 'CAC', 'C', 'CACACAC') 347 | (119, 'CAC', 'C', False) 348 | 349 | >>> _leftalign(123, 'CCA', 'CAA', 'ACCCCCCA') 350 | (123, 'CC', 'CA', True) 351 | 352 | # have to left-trim after left-align 353 | >>> normalize(*_leftalign(123, 'CCA', 'CAA', 'ACCCCCCA')[:3], left_only=True) 354 | (124, 'C', 'A') 355 | 356 | >>> _leftalign(123, 'C', 'A', 'ACCCCCC') 357 | (123, 'C', 'A', True) 358 | 359 | """ 360 | assert seq.endswith(ref) 361 | assert ref != alt 362 | seq = seq[:-len(ref)] 363 | ref, alt = list(ref), list(alt) 364 | j = 0 365 | 366 | quit = False 367 | while j < len(seq) and not quit: 368 | quit = True 369 | 370 | if ref[-1] == alt[-1]: 371 | ref, alt = ref[:-1], alt[:-1] 372 | quit = False 373 | 374 | if len(ref) == 0 or len(alt) == 0: 375 | j += 1 376 | ref = [seq[-j]] + ref 377 | alt = [seq[-j]] + alt 378 | quit = False 379 | 380 | return pos - j, "".join(ref), "".join(alt), quit 381 | 382 | def normalize(pos, ref, alt, left_only=False): 383 | """simplify a ref/alt a la vt normalize so that ref=CC, alt=CCT becomes 384 | ref=C, alt=CT. this helps in annotating variants. 385 | 386 | This code relies on the observation by Eric Minikel that many annotation 387 | misses can be addressed by removing common suffix and prefixes. 388 | (http://www.cureffi.org/2014/04/24/converting-genetic-variants-to-their-minimal-representation/) 389 | 390 | >>> normalize(123, 'T', 'C') 391 | (123, 'T', 'C') 392 | 393 | >>> normalize(123, 'CC', 'CCT') 394 | (124, 'C', 'CT') 395 | 396 | >>> normalize(123, 'TCCCCT', 'CCCCT') 397 | (123, 'TC', 'C') 398 | 399 | >>> normalize(123, 'TCCCCTA', 'CCCCT') 400 | (123, 'TCCCCTA', 'CCCCT') 401 | 402 | >>> normalize(123, 'TCCCCTA', 'CCCCTA') 403 | (123, 'TC', 'C') 404 | 405 | >>> normalize(123, 'AAATCCCCTA', 'AAACCCCTA') 406 | (125, 'AT', 'A') 407 | 408 | >>> normalize(123, 'CAAATCCCCTAG', 'AAACCCCTA') 409 | (123, 'CAAATCCCCTAG', 'AAACCCCTA') 410 | """ 411 | if len(ref) == len(alt) == 1: 412 | return pos, ref, alt 413 | 414 | # logic for trimming from either end is the same so we just reverse the 415 | # string to trim the suffix (i == 0) and correct position when doing prefix 416 | # (i == 1). To support alleles that have already been right-trimmed from 417 | # _leftalign, we allow the left_only argument to just do prefix-trimming. 418 | if left_only: 419 | sides = (1,) 420 | else: 421 | sides = (0, 1) 422 | for i in sides: 423 | if i == 0: # suffix so flip 424 | ref, alt = ref[::-1], alt[::-1] 425 | 426 | n, min_len = 0, min(len(ref), len(alt)) 427 | while n + 1 < min_len and alt[n] == ref[n]: 428 | n += 1 429 | 430 | alt, ref = alt[n:], ref[n:] 431 | if i == 0: # flip back 432 | ref, alt = ref[::-1], alt[::-1] 433 | else: # add to position since we stripped the prefix 434 | pos += n 435 | 436 | return pos, ref, alt 437 | 438 | 439 | def leftnorm(chrom, pos, ref, alt, fa=None): 440 | """ 441 | this is the normalization function that should be used. 442 | if no fasta is present, then it just normalizes. Otherwise 443 | it left-aligns and then normalizes. 444 | """ 445 | if fa is None: 446 | return normalize(pos, ref, alt) 447 | 448 | return normalize(*leftalign(chrom, pos, ref, alt, fa), left_only=True) 449 | 450 | 451 | def main(fh, fa=None, buffer_size=5000): 452 | 453 | # heap so that variants remain in sorted order even after normalizing. 454 | import heapq 455 | 456 | if fa: 457 | from pyfaidx import Fasta 458 | fa = Fasta(fa, as_raw=True, read_ahead=40000) 459 | 460 | def gen_variants(fh): 461 | for line in fh: 462 | fields = line.rstrip().split("\t") 463 | pos, ref, alts, info = (int(fields[1]), fields[3], fields[4].split(","), fields[7]) 464 | if len(fields) > 8: 465 | frmt, gts = fields[8], fields[9:] 466 | else: 467 | frmt, gts = '', [] 468 | 469 | if len(alts) > 1: 470 | for sref, salt, sinfo, sfmt, sgts in varsplit(ref, alts, info, frmt, gts): 471 | 472 | fields[1], fields[3], fields[4] = leftnorm(fields[0], pos, sref, salt, fa) 473 | fields[7] = sinfo 474 | if len(fields) > 8: 475 | fields[8], fields[9:] = sfmt, sgts 476 | nfields = fields[:] 477 | nfields.insert(2, len(fields[3])) 478 | yield nfields 479 | else: 480 | fields[1], fields[3], fields[4] = leftnorm(fields[0], pos, ref, alts[0], fa) 481 | fields.insert(2, len(fields[3])) 482 | yield fields 483 | def _print(ov): 484 | ov[1] = str(ov[1]) 485 | ov.pop(2) 486 | print "\t".join(ov) 487 | 488 | var_buffer = [] 489 | last_chrom = None 490 | 491 | info_seen = False 492 | for line in fh: 493 | # just output header 494 | if line[0] == "#": 495 | print line.rstrip() 496 | if not info_seen and line.startswith('##INFO'): 497 | print '##INFO=' 498 | print '##INFO=' 499 | info_seen = True 500 | 501 | if line.startswith("#CHROM\t"): break 502 | 503 | for v in gen_variants(fh): 504 | # side step chromosome ordering problems. 505 | if v[0] != last_chrom: 506 | while var_buffer: 507 | _print(heapq.heappop(var_buffer)) 508 | 509 | if len(var_buffer) == buffer_size: 510 | ov = heapq.heappushpop(var_buffer, v) 511 | _print(ov) 512 | else: 513 | heapq.heappush(var_buffer, v) 514 | last_chrom = v[0] 515 | 516 | while var_buffer: 517 | _print(heapq.heappop(var_buffer)) 518 | 519 | if __name__ == "__main__": 520 | import sys, os 521 | import doctest 522 | sys.stderr.write(str(doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE, 523 | verbose=0)) + "\n") 524 | import gzip 525 | 526 | if len(sys.argv) >= 2 and (os.path.isfile(sys.argv[1]) or sys.argv[1] == "-"): 527 | if sys.argv[1] == "-": fh = sys.stdin 528 | elif sys.argv[1].endswith(".gz"): fh = gzip.open(sys.argv[1]) 529 | else: fh = open(sys.argv[1]) 530 | 531 | ref = None 532 | if len(sys.argv) == 3 and os.path.isfile(sys.argv[2]): 533 | ref = sys.argv[2] 534 | 535 | sys.exit(main(fh, ref)) 536 | 537 | --------------------------------------------------------------------------------