├── Makefile ├── README.md ├── arch ├── arch.h ├── vector_alias.h ├── wscript ├── x86_64_avx2 │ ├── arch_util.h │ ├── v16i8.h │ ├── v2i32.h │ ├── v2i64.h │ ├── v32i16.h │ ├── v32i8.h │ └── vector.h └── x86_64_sse41 │ ├── arch_util.h │ ├── v16i8.h │ ├── v2i32.h │ ├── v2i64.h │ ├── v32i16.h │ ├── v32i8.h │ └── vector.h ├── aw.c ├── aw.h ├── bench.h ├── comb.c ├── comb.h ├── examples ├── graph1.gfa ├── graph2.gfa ├── linear1.fa └── linear2.fa ├── fna.c ├── fna.h ├── gaba.c ├── gaba.h ├── gaba_wrap.c ├── ggsea.c ├── ggsea.h ├── gref.c ├── gref.h ├── hmap.c ├── hmap.h ├── kopen.c ├── kopen.h ├── kvec.h ├── lmm.h ├── log.h ├── mem.h ├── ngx_rbtree.c ├── ngx_rbtree.h ├── psort.c ├── psort.h ├── psort_radix_internal.c ├── ptask.c ├── ptask.h ├── queue.c ├── queue.h ├── queue_internal.c ├── queue_internal.h ├── sassert.h ├── sr.c ├── sr.h ├── tree.c ├── tree.h ├── unittest.c ├── unittest.h ├── waf ├── wscript ├── zf.c └── zf.h /Makefile: -------------------------------------------------------------------------------- 1 | 2 | CC=gcc 3 | PREFIX=/usr/local 4 | 5 | all: build 6 | 7 | configure: 8 | python waf configure CC=${CC} --prefix=${PREFIX} 9 | 10 | build: configure 11 | python waf build 12 | 13 | clean: configure 14 | python waf clean 15 | 16 | install: 17 | python waf configure CC=${CC} --prefix=${PREFIX} install 18 | 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Comb aligner 2 | 3 | Comb aligner is a prototype implementation of a graphical local alignment algorithm, calculating a set of high-scoring paths between two nucleotide string graphs. The aligner accepts FASTA, FASTQ, and [GFA](https://github.com/GFA-spec/GFA-spec) format for the input (query and reference) sequences and dumps alignments in the [SAM](https://github.com/samtools/hts-specs) or the [Graphical Pairwise Alignment (GPA)](https://github.com/ocxtal/gpa) format. 4 | 5 | 6 | ## Overview of the algorithm 7 | 8 | The algorithmic design basically follows the seed-and-extend method. Input files are parsed with [libfna](https://github.com/ocxtal/libfna) into individual sequence segments and links. The set of parsed sequences and links are stored in [gref](https://github.com/ocxtal/libgref) objects, then indexed by k-mer hashing. [Libggsea](https://github.com/ocxtal/libggsea), which takes two gref objects, iterates all the k-mers over one object and matches them on the other one. The matched seeds are extended upwards and downwards with [libgaba](https://github.com/ocxtal/libgaba) in an adaptive banded way, where the graphs are dynamically expanded into trees with their root at the seed positions and traversed in a breadth-first order. The alignment paths are generated between pairs of maximum score positions in the trees and dumped into the SAM or GPA format with [libaw](https://github.com/ocxtal/libaw). 9 | 10 | 11 | ## Build 12 | 13 | Python (>= 2.7 or 3.3) is required to run the build script dependent on the waf build framework. The programs, entirely written in C99, can be compiled with gcc-compatible compilers passing the CC option to the waf configure argument. 14 | 15 | ``` 16 | make CC=clang PREFIX=/usr/local 17 | sudo make install 18 | ``` 19 | 20 | ## Usage 21 | 22 | ``` 23 | comb [options] -o 24 | ``` 25 | 26 | ### Options 27 | 28 | #### Indexing options 29 | 30 | * **-k\** k-mer and seed length 31 | 32 | #### Scoring options 33 | 34 | A penalty for a gap with length k is represented in a Gi + k*Ge form. 35 | 36 | * **-a\** match award M in a positive integer 37 | * **-b\** mismatch penalty X in a positive integer 38 | * **-p\** gap open penalty Gi in a positive integer or zero 39 | * **-q\** gap extension penalty Ge in a positive integer 40 | * **-x\** xdrop threshold in a positive integer 41 | * **-m\** minimum score for reporting 42 | 43 | #### Others 44 | 45 | * **-h** print help 46 | * **-v** print version info 47 | * **-t** number of threads 48 | 49 | 50 | ## Examples 51 | 52 | ### Linear-to-linear 53 | 54 | Linear-to-linear alignment is the normal, conventional alignment concept that is implemented in many programs like the BLAST, BWA and so on... The comb aligner can handle the similar tasks, taking FASTA reference and FASTA/Q reads then generating SAM output. 55 | 56 | ``` 57 | comb examples/linear1.fa examples/linear2.fa -o test.sam 58 | ``` 59 | 60 | ### Linear-to-graph 61 | 62 | The GFA format is acceptable as an input reference sequence object. The sequence segments in the GFA file are indexed in the same way as the linear references. The alignments are reported in the GPA format by default. 63 | 64 | ``` 65 | comb examples/graph1.gfa examples/linear2.fa -o test.gpa 66 | ``` 67 | 68 | ### Graph-to-graph 69 | 70 | The comb aligner can find a set of high-score paths between two graphical sequence objects. 71 | 72 | ``` 73 | comb examples/graph1.gfa examples/graph2.gfa -o test.gpa 74 | ``` 75 | 76 | ## Notes 77 | 78 | ### Issues 79 | 80 | * The software is not stable. It may report wrong results and segfaults in an unexpected way. 81 | * The libfna parser library and libgref graph indexing library cannot handle links with overlaps (links with non-0M cigar in the GFA format). 82 | 83 | ### TODO 84 | 85 | * Fix known bugs listed above. 86 | * Add VCF parser to enable SNP and short indel modifications. It also requires implementing two functions, `append_snp` and `split_segment` in the gref library. 87 | * Add seed filtering to improve performance. 88 | * Add matrix merge to reduce computational complexity. 89 | 90 | 91 | ## License 92 | 93 | Apache v2. 94 | 95 | Copyright (c) 2016, Hajime Suzuki 96 | -------------------------------------------------------------------------------- /arch/arch.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file arch.h 4 | */ 5 | #ifndef _ARCH_H_INCLUDED 6 | #define _ARCH_H_INCLUDED 7 | 8 | 9 | #ifdef __x86_64__ 10 | # if defined(__AVX2__) 11 | # include "x86_64_avx2/arch_util.h" 12 | # include "x86_64_avx2/vector.h" 13 | # elif defined(__SSE4_1__) 14 | # include "x86_64_sse41/arch_util.h" 15 | # include "x86_64_sse41/vector.h" 16 | # else 17 | # error "No SIMD instruction set enabled. Check if SSE4.1 or AVX2 instructions are available and add `-msse4.1' or `-mavx2' to CFLAGS." 18 | # endif 19 | 20 | /* map reverse-complement sequence out of the canonical-formed address */ 21 | #define GREF_SEQ_LIM ( (uint8_t const *)0x800000000000 ) 22 | 23 | #endif 24 | 25 | #ifdef AARCH64 26 | 27 | /* use x86_64 default */ 28 | #define GREF_SEQ_LIM ( (uint8_t const *)0x800000000000 ) 29 | 30 | #endif 31 | 32 | #ifdef PPC64 33 | 34 | /* use x86_64 default */ 35 | #define GREF_SEQ_LIM ( (uint8_t const *)0x800000000000 ) 36 | 37 | #endif 38 | 39 | #if !defined(_ARCH_UTIL_H_INCLUDED) || !defined(_VECTOR_H_INCLUDED) 40 | # error "No SIMD environment detected. Check CFLAGS." 41 | #endif 42 | 43 | #ifndef GREF_SEQ_LIM 44 | # error "No architecuture detected. Check CFLAGS." 45 | #endif 46 | 47 | 48 | /* elem_t and move definitions */ 49 | #define _rd(p) ( *((elem_t *)p) ) 50 | #define _wr(p, k) { *((elem_t *)p) = (k); } 51 | #define _ex(k, p) ( ((k)>>((p)*8)) & (WCR_OCC_SIZE-1) ) 52 | #define _p(v) ( (elem_t)(v) ) 53 | #define _e(v) ( (uint64_t)(v) ) 54 | 55 | 56 | #endif /* #ifndef _ARCH_H_INCLUDED */ 57 | /** 58 | * end of arch.h 59 | */ 60 | -------------------------------------------------------------------------------- /arch/vector_alias.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file vector_alias.h 4 | * 5 | * @brief make alias to vector macros 6 | */ 7 | #ifndef _VECTOR_ALIAS_PREFIX 8 | #error "_VECTOR_ALIAS_PREFIX must be defined when alias.h is included." 9 | #else /* _VECTOR_ALIAS_PREFIX */ 10 | 11 | #define vector_prefix _VECTOR_ALIAS_PREFIX 12 | #define vector_alias_join_intl(a,b) a##b 13 | #define vector_alias_join(a,b) vector_alias_join_intl(a,b) 14 | 15 | #define vec_t vector_alias_join(vector_prefix, _t) 16 | #define vec_mask_t vector_alias_join(vector_prefix, _mask_t) 17 | #define vec_masku_t vector_alias_join(vector_prefix, _masku_t) 18 | 19 | /* address */ 20 | #define _pv vector_alias_join(_pv_, vector_prefix) 21 | 22 | /* load and store */ 23 | #define _load vector_alias_join(_load_, vector_prefix) 24 | #define _loadu vector_alias_join(_loadu_, vector_prefix) 25 | #define _store vector_alias_join(_store_, vector_prefix) 26 | #define _storeu vector_alias_join(_storeu_, vector_prefix) 27 | 28 | /* broadcast */ 29 | #define _set vector_alias_join(_set_, vector_prefix) 30 | #define _zero vector_alias_join(_zero_, vector_prefix) 31 | #define _seta vector_alias_join(_seta_, vector_prefix) 32 | #define _swap vector_alias_join(_swap_, vector_prefix) 33 | 34 | /* logics */ 35 | #define _not vector_alias_join(_not_, vector_prefix) 36 | #define _and vector_alias_join(_and_, vector_prefix) 37 | #define _or vector_alias_join(_or_, vector_prefix) 38 | #define _xor vector_alias_join(_xor_, vector_prefix) 39 | #define _andn vector_alias_join(_andn_, vector_prefix) 40 | 41 | /* arithmetics */ 42 | #define _add vector_alias_join(_add_, vector_prefix) 43 | #define _sub vector_alias_join(_sub_, vector_prefix) 44 | #define _adds vector_alias_join(_adds_, vector_prefix) 45 | #define _subs vector_alias_join(_subs_, vector_prefix) 46 | #define _max vector_alias_join(_max_, vector_prefix) 47 | #define _min vector_alias_join(_min_, vector_prefix) 48 | 49 | /* shuffle */ 50 | #define _shuf vector_alias_join(_shuf_, vector_prefix) 51 | 52 | /* blend */ 53 | #define _sel vector_alias_join(_sel_, vector_prefix) 54 | 55 | /* compare */ 56 | #define _eq vector_alias_join(_eq_, vector_prefix) 57 | #define _lt vector_alias_join(_lt_, vector_prefix) 58 | #define _gt vector_alias_join(_gt_, vector_prefix) 59 | 60 | /* insert and extract */ 61 | #define _ins vector_alias_join(_ins_, vector_prefix) 62 | #define _ext vector_alias_join(_ext_, vector_prefix) 63 | 64 | /* shift */ 65 | #define _bsl vector_alias_join(_bsl_, vector_prefix) 66 | #define _bsr vector_alias_join(_bsr_, vector_prefix) 67 | #define _shl vector_alias_join(_shl_, vector_prefix) 68 | #define _shr vector_alias_join(_shr_, vector_prefix) 69 | #define _sal vector_alias_join(_sal_, vector_prefix) 70 | #define _sar vector_alias_join(_sar_, vector_prefix) 71 | 72 | /* mask */ 73 | #define _mask vector_alias_join(_mask_, vector_prefix) 74 | 75 | /* broadcast */ 76 | #define _from_v16i8 vector_alias_join(_from_v16i8_, vector_prefix) 77 | #define _from_v32i8 vector_alias_join(_from_v32i8_, vector_prefix) 78 | #define _to_v16i8 vector_alias_join(_to_v16i8_, vector_prefix) 79 | #define _to_v32i8 vector_alias_join(_to_v32i8_, vector_prefix) 80 | 81 | /* print */ 82 | #define _print vector_alias_join(_print_, vector_prefix) 83 | 84 | #endif /* _VECTOR_ALIAS_PREFIX */ 85 | /** 86 | * end of vector_alias.h 87 | */ 88 | -------------------------------------------------------------------------------- /arch/wscript: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # encoding: utf-8 3 | 4 | def options(opt): 5 | pass 6 | 7 | def configure(conf): 8 | # first check if AVX2 intrinsics are available on the compiler 9 | # note: check not the cpu but the compiler thus inline rdtsc is not appropriate here 10 | if(conf.check_cc(fragment=''' 11 | #include 12 | int main(void) { 13 | volatile __m256i a = _mm256_set1_epi8(1); 14 | volatile __m256i b = _mm256_set1_epi8(-2); 15 | volatile __m256i c = _mm256_add_epi8(a, b); 16 | return 0; 17 | } 18 | ''', 19 | execute = True, 20 | cflags = '-march=core-avx2' if conf.env.CC_NAME == 'icc' else '-mavx2', 21 | mandatory = False, 22 | msg = 'Checking for AVX2 instructions')): 23 | 24 | # sse4.1 instructions are also available on the avx2 enabled machines. 25 | conf.env.append_value('CFLAGS', 26 | ['-march=core-avx2', '-axcore-avx2'] if conf.env.CC_NAME == 'icc' else '-mavx2') 27 | 28 | # next, check if SSE4 intrinsics are availavle. 29 | elif(conf.check_cc(fragment=''' 30 | #include 31 | int main(void) { 32 | volatile __m128i a = _mm_set1_epi8(1); 33 | volatile __m128i b = _mm_set1_epi8(-2); 34 | volatile __m128i c = _mm_add_epi8(a, b); 35 | return 0; 36 | } 37 | ''', 38 | execute = True, 39 | cflags = '-msse4.1', 40 | mandatory = False, 41 | msg = 'Checking for SSE4.1 instructions')): 42 | 43 | conf.env.append_value('CFLAGS', 44 | ['-msse4.1', '-axsse4.1'] if conf.env.CC_NAME == 'icc' else '-msse4.1') 45 | 46 | def build(bld): 47 | pass 48 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/arch_util.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file arch_util.h 4 | * 5 | * @brief architecture-dependent utilities devided from util.h 6 | */ 7 | #ifndef _ARCH_UTIL_H_INCLUDED 8 | #define _ARCH_UTIL_H_INCLUDED 9 | 10 | #include "vector.h" 11 | #include 12 | #include 13 | 14 | /** 15 | * misc bit operations (popcnt, tzcnt, and lzcnt) 16 | */ 17 | 18 | /** 19 | * @macro popcnt 20 | */ 21 | #if 1 22 | #define popcnt(x) ( (uint64_t)_mm_popcnt_u64(x) ) 23 | #else 24 | static inline 25 | int popcnt(uint64_t n) 26 | { 27 | uint64_t c = 0; 28 | c = (n & 0x5555555555555555) + ((n>>1) & 0x5555555555555555); 29 | c = (c & 0x3333333333333333) + ((c>>2) & 0x3333333333333333); 30 | c = (c & 0x0f0f0f0f0f0f0f0f) + ((c>>4) & 0x0f0f0f0f0f0f0f0f); 31 | c = (c & 0x00ff00ff00ff00ff) + ((c>>8) & 0x00ff00ff00ff00ff); 32 | c = (c & 0x0000ffff0000ffff) + ((c>>16) & 0x0000ffff0000ffff); 33 | c = (c & 0x00000000ffffffff) + ((c>>32) & 0x00000000ffffffff); 34 | return(c); 35 | } 36 | #endif 37 | 38 | /** 39 | * @macro tzcnt 40 | * @brief trailing zero count (count #continuous zeros from LSb) 41 | */ 42 | #if 1 43 | /** immintrin.h is already included */ 44 | #define tzcnt(x) ( (uint64_t)_tzcnt_u64(x) ) 45 | #else 46 | static inline 47 | int tzcnt(uint64_t n) 48 | { 49 | n |= n<<1; 50 | n |= n<<2; 51 | n |= n<<4; 52 | n |= n<<8; 53 | n |= n<<16; 54 | n |= n<<32; 55 | return(64-popcnt(n)); 56 | } 57 | #endif 58 | 59 | /** 60 | * @macro lzcnt 61 | * @brief leading zero count (count #continuous zeros from MSb) 62 | */ 63 | #if 1 64 | #define lzcnt(x) ( (uint64_t)_lzcnt_u64(x) ) 65 | #else 66 | static inline 67 | int lzcnt(uint64_t n) 68 | { 69 | n |= n>>1; 70 | n |= n>>2; 71 | n |= n>>4; 72 | n |= n>>8; 73 | n |= n>>16; 74 | n |= n>>32; 75 | return(64-popcnt(n)); 76 | } 77 | #endif 78 | 79 | /** 80 | * @macro _loadu_u64, _storeu_u64 81 | */ 82 | #define _loadu_u64(p) ( *((uint64_t *)(p)) ) 83 | #define _storeu_u64(p, e) { *((uint64_t *)(p)) = (e); } 84 | 85 | /** 86 | * @macro _aligned_block_memcpy 87 | * 88 | * @brief copy size bytes from src to dst. 89 | * 90 | * @detail 91 | * src and dst must be aligned to 16-byte boundary. 92 | * copy must be multipe of 16. 93 | */ 94 | #define _ymm_rd_a(src, n) (ymm##n) = _mm256_load_si256((__m256i *)(src) + (n)) 95 | #define _ymm_rd_u(src, n) (ymm##n) = _mm256_loadu_si256((__m256i *)(src) + (n)) 96 | #define _ymm_wr_a(dst, n) _mm256_store_si256((__m256i *)(dst) + (n), (ymm##n)) 97 | #define _ymm_wr_u(dst, n) _mm256_storeu_si256((__m256i *)(dst) + (n), (ymm##n)) 98 | #define _memcpy_blk_intl(dst, src, size, _wr, _rd) { \ 99 | /** duff's device */ \ 100 | uint8_t *_src = (uint8_t *)(src), *_dst = (uint8_t *)(dst); \ 101 | int64_t const _nreg = 16; /** #ymm registers == 16 */ \ 102 | int64_t const _tcnt = (size) / sizeof(__m256i); \ 103 | int64_t const _offset = ((_tcnt - 1) & (_nreg - 1)) - (_nreg - 1); \ 104 | int64_t _jmp = _tcnt & (_nreg - 1); \ 105 | int64_t _lcnt = (_tcnt + _nreg - 1) / _nreg; \ 106 | register __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7; \ 107 | register __m256i ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15; \ 108 | _src += _offset * sizeof(__m256i); \ 109 | _dst += _offset * sizeof(__m256i); \ 110 | switch(_jmp) { \ 111 | case 0: do { _rd(_src, 0); \ 112 | case 15: _rd(_src, 1); \ 113 | case 14: _rd(_src, 2); \ 114 | case 13: _rd(_src, 3); \ 115 | case 12: _rd(_src, 4); \ 116 | case 11: _rd(_src, 5); \ 117 | case 10: _rd(_src, 6); \ 118 | case 9: _rd(_src, 7); \ 119 | case 8: _rd(_src, 8); \ 120 | case 7: _rd(_src, 9); \ 121 | case 6: _rd(_src, 10); \ 122 | case 5: _rd(_src, 11); \ 123 | case 4: _rd(_src, 12); \ 124 | case 3: _rd(_src, 13); \ 125 | case 2: _rd(_src, 14); \ 126 | case 1: _rd(_src, 15); \ 127 | switch(_jmp) { \ 128 | case 0: _wr(_dst, 0); \ 129 | case 15: _wr(_dst, 1); \ 130 | case 14: _wr(_dst, 2); \ 131 | case 13: _wr(_dst, 3); \ 132 | case 12: _wr(_dst, 4); \ 133 | case 11: _wr(_dst, 5); \ 134 | case 10: _wr(_dst, 6); \ 135 | case 9: _wr(_dst, 7); \ 136 | case 8: _wr(_dst, 8); \ 137 | case 7: _wr(_dst, 9); \ 138 | case 6: _wr(_dst, 10); \ 139 | case 5: _wr(_dst, 11); \ 140 | case 4: _wr(_dst, 12); \ 141 | case 3: _wr(_dst, 13); \ 142 | case 2: _wr(_dst, 14); \ 143 | case 1: _wr(_dst, 15); \ 144 | } \ 145 | _src += _nreg * sizeof(__m256i); \ 146 | _dst += _nreg * sizeof(__m256i); \ 147 | _jmp = 0; \ 148 | } while(--_lcnt > 0); \ 149 | } \ 150 | } 151 | #define _memcpy_blk_aa(dst, src, len) _memcpy_blk_intl(dst, src, len, _ymm_wr_a, _ymm_rd_a) 152 | #define _memcpy_blk_au(dst, src, len) _memcpy_blk_intl(dst, src, len, _ymm_wr_a, _ymm_rd_u) 153 | #define _memcpy_blk_ua(dst, src, len) _memcpy_blk_intl(dst, src, len, _ymm_wr_u, _ymm_rd_a) 154 | #define _memcpy_blk_uu(dst, src, len) _memcpy_blk_intl(dst, src, len, _ymm_wr_u, _ymm_rd_u) 155 | #define _memset_blk_intl(dst, a, size, _wr) { \ 156 | uint8_t *_dst = (uint8_t *)(dst); \ 157 | __m256i const ymm0 = _mm256_set1_epi8((int8_t)a); \ 158 | int64_t i; \ 159 | for(i = 0; i < size / sizeof(__m256i); i++) { \ 160 | _wr(_dst, 0); _dst += sizeof(__m256i); \ 161 | } \ 162 | } 163 | #define _memset_blk_a(dst, a, size) _memset_blk_intl(dst, a, size, _ymm_wr_a) 164 | #define _memset_blk_u(dst, a, size) _memset_blk_intl(dst, a, size, _ymm_wr_u) 165 | 166 | 167 | /** 168 | * substitution matrix abstraction 169 | */ 170 | /* store */ 171 | #define _store_sb(_scv, sv16) { _store_v32i8((_scv).v1, _from_v16i8_v32i8(sv16)); } 172 | 173 | /* load */ 174 | #define _load_sb(scv) ( _from_v32i8(_load_v32i8((scv).v1)) ) 175 | 176 | /** 177 | * gap penalty vector abstraction macros 178 | */ 179 | /* store */ 180 | #define _make_gap(_e1, _e2, _e3, _e4) ( \ 181 | (v16i8_t){ _mm_set_epi8( \ 182 | (_e4), (_e4), (_e4), (_e4), \ 183 | (_e3), (_e3), (_e3), (_e3), \ 184 | (_e2), (_e2), (_e2), (_e2), \ 185 | (_e1), (_e1), (_e1), (_e1)) \ 186 | } \ 187 | ) 188 | #define _store_adjh(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 189 | _store_v32i8((_scv).v3, _from_v16i8_v32i8(_make_gap(_adjh, _adjv, _ofsh, _ofsv))) \ 190 | } 191 | #define _store_adjv(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 192 | /* nothing to do */ \ 193 | /*_store_v32i8((_scv).v3, _from_v16i8_v32i8(_make_gap(_adjh, _adjv, _ofsh, _ofsv)))*/ \ 194 | } 195 | #define _store_ofsh(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 196 | /* nothing to do */ \ 197 | /* _store_v32i8((_scv).v5, _from_v16i8_v32i8(_make_gap(_adjh, _adjv, _ofsh, _ofsv)))*/ \ 198 | } 199 | #define _store_ofsv(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 200 | /* nothing to do */ \ 201 | /*_store_v32i8((_scv).v5, _from_v16i8_v32i8(_make_gap(_adjh, _adjv, _ofsh, _ofsv)))*/ \ 202 | } 203 | 204 | /* load */ 205 | #define _load_gap(_ptr, _idx) ( \ 206 | (v32i8_t){ _mm256_shuffle_epi32(_mm256_load_si256((__m256i const *)(_ptr)), (_idx)) } \ 207 | ) 208 | 209 | #define _load_adjh(_scv) ( _from_v32i8(_load_gap((_scv).v3, 0x00)) ) 210 | #define _load_adjv(_scv) ( _from_v32i8(_load_gap((_scv).v3, 0x55)) ) 211 | #define _load_ofsh(_scv) ( _from_v32i8(_load_gap((_scv).v3, 0xaa)) ) 212 | #define _load_ofsv(_scv) ( _from_v32i8(_load_gap((_scv).v3, 0xff)) ) 213 | 214 | 215 | 216 | /* cache line operation */ 217 | #define WCR_BUF_SIZE ( 128 ) /** two cache lines in x86_64 */ 218 | #define memcpy_buf(_dst, _src) { \ 219 | register __m256i *_s = (__m256i *)(_src); \ 220 | register __m256i *_d = (__m256i *)(_dst); \ 221 | __m256i ymm0 = _mm256_load_si256(_s); \ 222 | __m256i ymm1 = _mm256_load_si256(_s + 1); \ 223 | __m256i ymm2 = _mm256_load_si256(_s + 2); \ 224 | __m256i ymm3 = _mm256_load_si256(_s + 3); \ 225 | _mm256_stream_si256(_d, ymm0); \ 226 | _mm256_stream_si256(_d + 1, ymm1); \ 227 | _mm256_stream_si256(_d + 2, ymm2); \ 228 | _mm256_stream_si256(_d + 3, ymm3); \ 229 | } 230 | 231 | /* 128bit register operation */ 232 | #define elem_128_t __m128i 233 | #define rd_128(_ptr) ( _mm_load_si128((__m128i *)(_ptr)) ) 234 | #define wr_128(_ptr, _e) { _mm_store_si128((__m128i *)(_ptr), (_e)); } 235 | #define _ex_128(k, h) ( _mm_extract_epi64((elem_128_t)k, h) ) 236 | #define ex_128(k, p) ( ((((p)>>3) ? _ex_128(k, 1) : _ex_128(k, 0))>>(((p) & 0x07)<<3)) & (WCR_OCC_SIZE-1) ) 237 | #define p_128(v) ( _mm_cvtsi64_si128((uint64_t)(v)) ) 238 | #define e_128(v) ( (uint64_t)_mm_cvtsi128_si64((__m128i)(v)) ) 239 | 240 | 241 | #endif /* #ifndef _ARCH_UTIL_H_INCLUDED */ 242 | /** 243 | * end of arch_util.h 244 | */ 245 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/v16i8.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v16i8.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V16I8_H_INCLUDED 8 | #define _V16I8_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v16i8_s { 15 | __m128i v1; 16 | } v16i8_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v16i8_1(u) 20 | #define _e_x_v16i8_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v16i8_1(a) (a).v1 24 | #define _e_v_v16i8_2(a) (a).v1 25 | #define _e_vv_v16i8_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v16i8_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v16i8_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v16i8_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v16i8_1(imm) (imm) 32 | #define _e_i_v16i8_2(imm) (imm) 33 | #define _e_vi_v16i8_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v16i8_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v16i8_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v16i8_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v16i8_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v16i8_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v16i8(ptr) ( _addr_v16i8_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v16i8_1(ptr) _addr_v16i8_1(ptr) 44 | #define _e_p_v16i8_2(ptr) _addr_v16i8_2(ptr) 45 | #define _e_pv_v16i8_1(ptr, a) _addr_v16i8_1(ptr), (a).v1 46 | #define _e_pv_v16i8_2(ptr, a) _addr_v16i8_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v16i8(intrin) _mm_##intrin##_epi8 50 | #define _i_v16i8x(intrin) _mm_##intrin##_si128 51 | 52 | /* apply */ 53 | #define _a_v16i8(intrin, expander, ...) ( \ 54 | (v16i8_t) { \ 55 | _i_v16i8(intrin)(expander##_v16i8_1(__VA_ARGS__)) \ 56 | } \ 57 | ) 58 | #define _a_v16i8x(intrin, expander, ...) ( \ 59 | (v16i8_t) { \ 60 | _i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)) \ 61 | } \ 62 | ) 63 | #define _a_v16i8xv(intrin, expander, ...) { \ 64 | _i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)); \ 65 | } 66 | 67 | /* load and store */ 68 | #define _load_v16i8(...) _a_v16i8x(load, _e_p, __VA_ARGS__) 69 | #define _loadu_v16i8(...) _a_v16i8x(loadu, _e_p, __VA_ARGS__) 70 | #define _store_v16i8(...) _a_v16i8xv(store, _e_pv, __VA_ARGS__) 71 | #define _storeu_v16i8(...) _a_v16i8xv(storeu, _e_pv, __VA_ARGS__) 72 | 73 | /* broadcast */ 74 | #define _set_v16i8(...) _a_v16i8(set1, _e_i, __VA_ARGS__) 75 | #define _zero_v16i8() _a_v16i8x(setzero, _e_x, _unused) 76 | 77 | /* swap (reverse) */ 78 | #define _swap_idx_v16i8() ( \ 79 | _mm_set_epi8( \ 80 | 0, 1, 2, 3, 4, 5, 6, 7, \ 81 | 8, 9, 10, 11, 12, 13, 14, 15) \ 82 | ) 83 | #define _swap_v16i8(a) ( \ 84 | (v16i8_t) { \ 85 | _mm_shuffle_epi8((a).v1, _swap_idx_v16i8()) \ 86 | } \ 87 | ) 88 | 89 | /* logics */ 90 | #define _not_v16i8(...) _a_v16i8x(not, _e_v, __VA_ARGS__) 91 | #define _and_v16i8(...) _a_v16i8x(and, _e_vv, __VA_ARGS__) 92 | #define _or_v16i8(...) _a_v16i8x(or, _e_vv, __VA_ARGS__) 93 | #define _xor_v16i8(...) _a_v16i8x(xor, _e_vv, __VA_ARGS__) 94 | #define _andn_v16i8(...) _a_v16i8x(andnot, _e_vv, __VA_ARGS__) 95 | 96 | /* arithmetics */ 97 | #define _add_v16i8(...) _a_v16i8(add, _e_vv, __VA_ARGS__) 98 | #define _sub_v16i8(...) _a_v16i8(sub, _e_vv, __VA_ARGS__) 99 | #define _adds_v16i8(...) _a_v16i8(adds, _e_vv, __VA_ARGS__) 100 | #define _subs_v16i8(...) _a_v16i8(subs, _e_vv, __VA_ARGS__) 101 | #define _max_v16i8(...) _a_v16i8(max, _e_vv, __VA_ARGS__) 102 | #define _min_v16i8(...) _a_v16i8(min, _e_vv, __VA_ARGS__) 103 | 104 | /* shuffle */ 105 | #define _shuf_v16i8(...) _a_v16i8(shuffle, _e_vv, __VA_ARGS__) 106 | 107 | /* blend */ 108 | // #define _sel_v16i8(...) _a_v16i8(blendv, _e_vvv, __VA_ARGS__) 109 | 110 | /* compare */ 111 | #define _eq_v16i8(...) _a_v16i8(cmpeq, _e_vv, __VA_ARGS__) 112 | #define _lt_v16i8(...) _a_v16i8(cmplt, _e_vv, __VA_ARGS__) 113 | #define _gt_v16i8(...) _a_v16i8(cmpgt, _e_vv, __VA_ARGS__) 114 | 115 | /* insert and extract */ 116 | #define _ins_v16i8(a, val, imm) { \ 117 | (a).v1 = _i_v16i8(insert)((a).v1, (val), (imm)); \ 118 | } 119 | #define _ext_v16i8(a, imm) ( \ 120 | (int8_t)_i_v16i8(extract)((a).v1, (imm)) \ 121 | ) 122 | 123 | /* shift */ 124 | #define _bsl_v16i8(a, imm) ( \ 125 | (v16i8_t) { \ 126 | _i_v16i8x(slli)((a).v1, (imm)) \ 127 | } \ 128 | ) 129 | #define _bsr_v16i8(a, imm) ( \ 130 | (v16i8_t) { \ 131 | _i_v16i8x(srli)((a).v1, (imm)) \ 132 | } \ 133 | ) 134 | #define _shl_v16i8(a, imm) ( \ 135 | (v16i8_t) { \ 136 | _mm_slli_epi32((a).v1, (imm)) \ 137 | } \ 138 | ) 139 | #define _shr_v16i8(a, imm) ( \ 140 | (v16i8_t) { \ 141 | _mm_srli_epi32((a).v1, (imm)) \ 142 | } \ 143 | ) 144 | #define _sal_v16i8(a, imm) ( \ 145 | (v16i8_t) { \ 146 | _mm_slai_epi32((a).v1, (imm)) \ 147 | } \ 148 | ) 149 | #define _sar_v16i8(a, imm) ( \ 150 | (v16i8_t) { \ 151 | _mm_srai_epi32((a).v1, (imm)) \ 152 | } \ 153 | ) 154 | 155 | /* mask */ 156 | #define _mask_v16i8(a) ( \ 157 | (v16_mask_t) { \ 158 | .m1 = _i_v16i8(movemask)((a).v1) \ 159 | } \ 160 | ) 161 | 162 | /* debug print */ 163 | #ifdef _LOG_H_INCLUDED 164 | #define _print_v16i8(a) { \ 165 | debug("(v16i8_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 166 | #a, \ 167 | _ext_v16i8(a, 15), \ 168 | _ext_v16i8(a, 14), \ 169 | _ext_v16i8(a, 13), \ 170 | _ext_v16i8(a, 12), \ 171 | _ext_v16i8(a, 11), \ 172 | _ext_v16i8(a, 10), \ 173 | _ext_v16i8(a, 9), \ 174 | _ext_v16i8(a, 8), \ 175 | _ext_v16i8(a, 7), \ 176 | _ext_v16i8(a, 6), \ 177 | _ext_v16i8(a, 5), \ 178 | _ext_v16i8(a, 4), \ 179 | _ext_v16i8(a, 3), \ 180 | _ext_v16i8(a, 2), \ 181 | _ext_v16i8(a, 1), \ 182 | _ext_v16i8(a, 0)); \ 183 | } 184 | #else 185 | #define _print_v16i8(x) ; 186 | #endif 187 | 188 | #endif /* _V16I8_H_INCLUDED */ 189 | /** 190 | * end of v16i8.h 191 | */ 192 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/v2i32.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v2i32.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V2I32_H_INCLUDED 8 | #define _V2I32_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v2i32_s { 15 | __m128i v1; 16 | } v2i32_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v2i32_1(u) 20 | #define _e_x_v2i32_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v2i32_1(a) (a).v1 24 | #define _e_v_v2i32_2(a) (a).v1 25 | #define _e_vv_v2i32_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v2i32_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v2i32_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v2i32_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v2i32_1(imm) (imm) 32 | #define _e_i_v2i32_2(imm) (imm) 33 | #define _e_vi_v2i32_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v2i32_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v2i32_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v2i32_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v2i32_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v2i32_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v2i32(ptr) ( _addr_v2i32_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v2i32_1(ptr) _addr_v2i32_1(ptr) 44 | #define _e_p_v2i32_2(ptr) _addr_v2i32_2(ptr) 45 | #define _e_pv_v2i32_1(ptr, a) _addr_v2i32_1(ptr), (a).v1 46 | #define _e_pv_v2i32_2(ptr, a) _addr_v2i32_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v2i32(intrin) _mm_##intrin##_epi32 50 | #define _i_v2i32e(intrin) _mm_##intrin##_epi64 51 | #define _i_v2i32x(intrin) _mm_##intrin##_si128 52 | 53 | /* apply */ 54 | #define _a_v2i32(intrin, expander, ...) ( \ 55 | (v2i32_t) { \ 56 | _i_v2i32(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 57 | } \ 58 | ) 59 | #define _a_v2i32e(intrin, expander, ...) ( \ 60 | (v2i32_t) { \ 61 | _i_v2i32e(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 62 | } \ 63 | ) 64 | #define _a_v2i32ev(intrin, expander, ...) { \ 65 | _i_v2i32e(intrin)(expander##_v2i32_1(__VA_ARGS__)); \ 66 | } 67 | #define _a_v2i32x(intrin, expander, ...) ( \ 68 | (v2i32_t) { \ 69 | _i_v2i32x(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 70 | } \ 71 | ) 72 | #define _a_v2i32xv(intrin, expander, ...) { \ 73 | _i_v2i32x(intrin)(expander##_v2i32_1(__VA_ARGS__)); \ 74 | } 75 | 76 | /* load and store */ 77 | #define _load_v2i32(...) _a_v2i32e(loadl, _e_p, __VA_ARGS__) 78 | #define _loadu_v2i32(...) _a_v2i32e(loadl, _e_p, __VA_ARGS__) 79 | #define _store_v2i32(...) _a_v2i32ev(storel, _e_pv, __VA_ARGS__) 80 | #define _storeu_v2i32(...) _a_v2i32ev(storel, _e_pv, __VA_ARGS__) 81 | 82 | /* broadcast */ 83 | #define _set_v2i32(...) _a_v2i32(set1, _e_i, __VA_ARGS__) 84 | #define _zero_v2i32() _a_v2i32x(setzero, _e_x, _unused) 85 | #define _seta_v2i32(x, y) ( \ 86 | (v2i32_t) { \ 87 | _mm_cvtsi64_si128((((uint64_t)(x))<<32) | ((uint32_t)(y))) \ 88 | } \ 89 | ) 90 | #define _swap_v2i32(x) ( \ 91 | (v2i32_t) { \ 92 | _mm_shuffle_epi32((x).v1, 0x01) \ 93 | } \ 94 | ) 95 | 96 | /* logics */ 97 | #define _not_v2i32(...) _a_v2i32x(not, _e_v, __VA_ARGS__) 98 | #define _and_v2i32(...) _a_v2i32x(and, _e_vv, __VA_ARGS__) 99 | #define _or_v2i32(...) _a_v2i32x(or, _e_vv, __VA_ARGS__) 100 | #define _xor_v2i32(...) _a_v2i32x(xor, _e_vv, __VA_ARGS__) 101 | #define _andn_v2i32(...) _a_v2i32x(andnot, _e_vv, __VA_ARGS__) 102 | 103 | /* arithmetics */ 104 | #define _add_v2i32(...) _a_v2i32(add, _e_vv, __VA_ARGS__) 105 | #define _sub_v2i32(...) _a_v2i32(sub, _e_vv, __VA_ARGS__) 106 | #define _adds_v2i32(...) _a_v2i32(adds, _e_vv, __VA_ARGS__) 107 | #define _subs_v2i32(...) _a_v2i32(subs, _e_vv, __VA_ARGS__) 108 | #define _max_v2i32(...) _a_v2i32(max, _e_vv, __VA_ARGS__) 109 | #define _min_v2i32(...) _a_v2i32(min, _e_vv, __VA_ARGS__) 110 | 111 | /* blend */ 112 | #define _sel_v2i32(mask, a, b) ( \ 113 | (v2i64_t) { \ 114 | _mm_blendv_epi8((b).v1, (a).v1, (mask).v1) \ 115 | } \ 116 | ) 117 | 118 | /* compare */ 119 | #define _eq_v2i32(...) _a_v2i32(cmpeq, _e_vv, __VA_ARGS__) 120 | #define _lt_v2i32(...) _a_v2i32(cmplt, _e_vv, __VA_ARGS__) 121 | #define _gt_v2i32(...) _a_v2i32(cmpgt, _e_vv, __VA_ARGS__) 122 | 123 | /* insert and extract */ 124 | #define _ins_v2i32(a, val, imm) { \ 125 | (a).v1 = _i_v2i32((a).v1, (val), (imm)); \ 126 | } 127 | #define _ext_v2i32(a, imm) ( \ 128 | (int32_t)_i_v2i32(extract)((a).v1, (imm)) \ 129 | ) 130 | 131 | /* shift */ 132 | #define _sal_v2i32(a, imm) ( \ 133 | (v2i32_t) {_i_v2i32(slai)((a).v1, (imm))} \ 134 | ) 135 | #define _sar_v2i32(a, imm) ( \ 136 | (v2i32_t) {_i_v2i32(srai)((a).v1, (imm))} \ 137 | ) 138 | 139 | /* mask */ 140 | #define _mask_v2i32(a) ( \ 141 | (uint32_t) (0xff & _mm_movemask_epi8((a).v1)) \ 142 | ) 143 | #define V2I32_MASK_00 ( 0x00 ) 144 | #define V2I32_MASK_01 ( 0x0f ) 145 | #define V2I32_MASK_10 ( 0xf0 ) 146 | #define V2I32_MASK_11 ( 0xff ) 147 | 148 | /* transpose */ 149 | #define _lo_v2i32(a, b) ( \ 150 | (v2i32_t) { \ 151 | _mm_unpacklo_epi32((a).v1, (b).v1) \ 152 | } \ 153 | ) 154 | #define _hi_v2i32(a, b) ( \ 155 | (v2i32_t) { \ 156 | _mm_shuffle_epi32(_mm_unpacklo_epi32((a).v1, (b).v1), 0x0e) \ 157 | } \ 158 | ) 159 | 160 | /* debug print */ 161 | #ifdef _LOG_H_INCLUDED 162 | #define _print_v2i32(a) { \ 163 | debug("(v2i32_t) %s(%d, %d)", #a, _ext_v2i32(a, 1), _ext_v2i32(a, 0)); \ 164 | } 165 | #else 166 | #define _print_v2i32(x) ; 167 | #endif 168 | 169 | #endif /* _V2I32_H_INCLUDED */ 170 | /** 171 | * end of v2i32.h 172 | */ 173 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/v2i64.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v2i64.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V2I64_H_INCLUDED 8 | #define _V2I64_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v2i64_s { 15 | __m128i v1; 16 | } v2i64_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v2i64_1(u) 20 | #define _e_x_v2i64_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v2i64_1(a) (a).v1 24 | #define _e_v_v2i64_2(a) (a).v1 25 | #define _e_vv_v2i64_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v2i64_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v2i64_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v2i64_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v2i64_1(imm) (imm) 32 | #define _e_i_v2i64_2(imm) (imm) 33 | #define _e_vi_v2i64_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v2i64_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v2i64_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v2i64_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v2i64_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v2i64_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v2i64(ptr) ( _addr_v2i64_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v2i64_1(ptr) _addr_v2i64_1(ptr) 44 | #define _e_p_v2i64_2(ptr) _addr_v2i64_2(ptr) 45 | #define _e_pv_v2i64_1(ptr, a) _addr_v2i64_1(ptr), (a).v1 46 | #define _e_pv_v2i64_2(ptr, a) _addr_v2i64_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v2i64(intrin) _mm_##intrin##_epi64 50 | #define _i_v2i64x(intrin) _mm_##intrin##_si128 51 | 52 | /* apply */ 53 | #define _a_v2i64(intrin, expander, ...) ( \ 54 | (v2i64_t) { \ 55 | _i_v2i64(intrin)(expander##_v2i64_1(__VA_ARGS__)) \ 56 | } \ 57 | ) 58 | #define _a_v2i64x(intrin, expander, ...) ( \ 59 | (v2i64_t) { \ 60 | _i_v2i64x(intrin)(expander##_v2i64_1(__VA_ARGS__)) \ 61 | } \ 62 | ) 63 | #define _a_v2i64xv(intrin, expander, ...) { \ 64 | _i_v2i64x(intrin)(expander##_v2i64_1(__VA_ARGS__)); \ 65 | } 66 | 67 | /* load and store */ 68 | #define _load_v2i64(...) _a_v2i64x(load, _e_p, __VA_ARGS__) 69 | #define _loadu_v2i64(...) _a_v2i64x(loadu, _e_p, __VA_ARGS__) 70 | #define _store_v2i64(...) _a_v2i64xv(store, _e_pv, __VA_ARGS__) 71 | #define _storeu_v2i64(...) _a_v2i64xv(storeu, _e_pv, __VA_ARGS__) 72 | 73 | /* broadcast */ 74 | // #define _set_v2i64(...) _a_v2i64(set1, _e_i, __VA_ARGS__) 75 | #define _set_v2i64(x) ( (v2i64_t) { _mm_set1_epi64x(x) } ) 76 | #define _zero_v2i64() _a_v2i64x(setzero, _e_x, _unused) 77 | #define _seta_v2i64(x, y) ( (v2i64_t) { _mm_set_epi64x(x, y) } ) 78 | #define _swap_v2i64(x) ( \ 79 | (v2i32_t) { \ 80 | _mm_shuffle_epi32((x).v1, 0x1b) \ 81 | } \ 82 | ) 83 | 84 | /* logics */ 85 | #define _not_v2i64(...) _a_v2i64x(not, _e_v, __VA_ARGS__) 86 | #define _and_v2i64(...) _a_v2i64x(and, _e_vv, __VA_ARGS__) 87 | #define _or_v2i64(...) _a_v2i64x(or, _e_vv, __VA_ARGS__) 88 | #define _xor_v2i64(...) _a_v2i64x(xor, _e_vv, __VA_ARGS__) 89 | #define _andn_v2i64(...) _a_v2i64x(andnot, _e_vv, __VA_ARGS__) 90 | 91 | /* arithmetics */ 92 | #define _add_v2i64(...) _a_v2i64(add, _e_vv, __VA_ARGS__) 93 | #define _sub_v2i64(...) _a_v2i64(sub, _e_vv, __VA_ARGS__) 94 | #define _adds_v2i64(...) _a_v2i64(adds, _e_vv, __VA_ARGS__) 95 | #define _subs_v2i64(...) _a_v2i64(subs, _e_vv, __VA_ARGS__) 96 | // #define _max_v2i64(...) _a_v2i64(max, _e_vv, __VA_ARGS__) 97 | // #define _min_v2i64(...) _a_v2i64(min, _e_vv, __VA_ARGS__) 98 | #define _max_v2i64(a, b) ( (v2i64_t) { _mm_max_epi32(a.v1, b.v1) } ) 99 | #define _min_v2i64(a, b) ( (v2i64_t) { _mm_min_epi32(a.v1, b.v1) } ) 100 | 101 | /* shuffle */ 102 | // #define _shuf_v2i64(...) _a_v2i64(shuffle, _e_vv, __VA_ARGS__) 103 | 104 | /* blend */ 105 | #define _sel_v2i64(mask, a, b) ( \ 106 | (v2i64_t) { \ 107 | _mm_blendv_epi8((b).v1, (a).v1, (mask).v1) \ 108 | } \ 109 | ) 110 | 111 | /* compare */ 112 | #define _eq_v2i64(...) _a_v2i64(cmpeq, _e_vv, __VA_ARGS__) 113 | #define _lt_v2i64(...) _a_v2i64(cmplt, _e_vv, __VA_ARGS__) 114 | #define _gt_v2i64(...) _a_v2i64(cmpgt, _e_vv, __VA_ARGS__) 115 | 116 | /* insert and extract */ 117 | #define _ins_v2i64(a, val, imm) { \ 118 | (a).v1 = _i_v2i64((a).v1, (val), (imm)); \ 119 | } 120 | #define _ext_v2i64(a, imm) ( \ 121 | (int64_t)_i_v2i64(extract)((a).v1, (imm)) \ 122 | ) 123 | 124 | /* mask */ 125 | #define _mask_v2i64(a) ( \ 126 | (uint32_t) (_mm_movemask_epi8((a).v1)) \ 127 | ) 128 | #define V2I64_MASK_00 ( 0x0000 ) 129 | #define V2I64_MASK_01 ( 0x00ff ) 130 | #define V2I64_MASK_10 ( 0xff00 ) 131 | #define V2I64_MASK_11 ( 0xffff ) 132 | 133 | /* convert */ 134 | #define _cvt_v2i32_v2i64(a) ( \ 135 | (v2i64_t) { \ 136 | _mm_cvtepi32_epi64((a).v1) \ 137 | } \ 138 | ) 139 | 140 | /* transpose */ 141 | #define _lo_v2i64(a, b) ( \ 142 | (v2i64_t) { \ 143 | _mm_unpacklo_epi64((a).v1, (b).v1) \ 144 | } \ 145 | ) 146 | #define _hi_v2i64(a, b) ( \ 147 | (v2i64_t) { \ 148 | _mm_unpackhi_epi64((a).v1, (b).v1) \ 149 | } \ 150 | ) 151 | 152 | /* debug print */ 153 | #ifdef _LOG_H_INCLUDED 154 | #define _print_v2i64(a) { \ 155 | debug("(v2i64_t) %s(%lld, %lld)", #a, _ext_v2i64(a, 1), _ext_v2i64(a, 0)); \ 156 | } 157 | #else 158 | #define _print_v2i64(x) ; 159 | #endif 160 | 161 | #endif /* _V2I64_H_INCLUDED */ 162 | /** 163 | * end of v2i64.h 164 | */ 165 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/v32i16.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v32i16.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V32I16_H_INCLUDED 8 | #define _V32I16_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v32i16_s { 15 | __m256i v1; 16 | __m256i v2; 17 | } v32i16_t; 18 | 19 | /* expanders (without argument) */ 20 | #define _e_x_v32i16_1(u) 21 | #define _e_x_v32i16_2(u) 22 | 23 | /* expanders (without immediate) */ 24 | #define _e_v_v32i16_1(a) (a).v1 25 | #define _e_v_v32i16_2(a) (a).v2 26 | #define _e_vv_v32i16_1(a, b) (a).v1, (b).v1 27 | #define _e_vv_v32i16_2(a, b) (a).v2, (b).v2 28 | #define _e_vvv_v32i16_1(a, b, c) (a).v1, (b).v1, (c).v1 29 | #define _e_vvv_v32i16_2(a, b, c) (a).v2, (b).v2, (c).v2 30 | 31 | /* expanders with immediate */ 32 | #define _e_i_v32i16_1(imm) (imm) 33 | #define _e_i_v32i16_2(imm) (imm) 34 | #define _e_vi_v32i16_1(a, imm) (a).v1, (imm) 35 | #define _e_vi_v32i16_2(a, imm) (a).v2, (imm) 36 | #define _e_vvi_v32i16_1(a, b, imm) (a).v1, (b).v1, (imm) 37 | #define _e_vvi_v32i16_2(a, b, imm) (a).v2, (b).v2, (imm) 38 | 39 | /* address calculation macros */ 40 | #define _addr_v32i16_1(imm) ( (__m256i *)(imm) ) 41 | #define _addr_v32i16_2(imm) ( (__m256i *)(imm) + 1 ) 42 | #define _pv_v32i16(ptr) ( _addr_v32i16_1(ptr) ) 43 | /* expanders with pointers */ 44 | #define _e_p_v32i16_1(ptr) _addr_v32i16_1(ptr) 45 | #define _e_p_v32i16_2(ptr) _addr_v32i16_2(ptr) 46 | #define _e_pv_v32i16_1(ptr, a) _addr_v32i16_1(ptr), (a).v1 47 | #define _e_pv_v32i16_2(ptr, a) _addr_v32i16_2(ptr), (a).v2 48 | 49 | /* expand intrinsic name */ 50 | #define _i_v32i16(intrin) _mm256_##intrin##_epi16 51 | #define _i_v32i16x(intrin) _mm256_##intrin##_si256 52 | 53 | /* apply */ 54 | #define _a_v32i16(intrin, expander, ...) ( \ 55 | (v32i16_t) { \ 56 | _i_v32i16(intrin)(expander##_v32i16_1(__VA_ARGS__)), \ 57 | _i_v32i16(intrin)(expander##_v32i16_2(__VA_ARGS__)) \ 58 | } \ 59 | ) 60 | #define _a_v32i16x(intrin, expander, ...) ( \ 61 | (v32i16_t) { \ 62 | _i_v32i16x(intrin)(expander##_v32i16_1(__VA_ARGS__)), \ 63 | _i_v32i16x(intrin)(expander##_v32i16_2(__VA_ARGS__)) \ 64 | } \ 65 | ) 66 | #define _a_v32i16xv(intrin, expander, ...) { \ 67 | _i_v32i16x(intrin)(expander##_v32i16_1(__VA_ARGS__)); \ 68 | _i_v32i16x(intrin)(expander##_v32i16_2(__VA_ARGS__)); \ 69 | } 70 | 71 | /* load and store */ 72 | #define _load_v32i16(...) _a_v32i16x(load, _e_p, __VA_ARGS__) 73 | #define _loadu_v32i16(...) _a_v32i16x(loadu, _e_p, __VA_ARGS__) 74 | #define _store_v32i16(...) _a_v32i16xv(store, _e_pv, __VA_ARGS__) 75 | #define _storeu_v32i16(...) _a_v32i16xv(storeu, _e_pv, __VA_ARGS__) 76 | 77 | /* broadcast */ 78 | #define _set_v32i16(...) _a_v32i16(set1, _e_i, __VA_ARGS__) 79 | #define _zero_v32i16() _a_v32i16x(setzero, _e_x, _unused) 80 | 81 | /* logics */ 82 | #define _not_v32i16(...) _a_v32i16x(not, _e_v, __VA_ARGS__) 83 | #define _and_v32i16(...) _a_v32i16x(and, _e_vv, __VA_ARGS__) 84 | #define _or_v32i16(...) _a_v32i16x(or, _e_vv, __VA_ARGS__) 85 | #define _xor_v32i16(...) _a_v32i16x(xor, _e_vv, __VA_ARGS__) 86 | #define _andn_v32i16(...) _a_v32i16x(andnot, _e_vv, __VA_ARGS__) 87 | 88 | /* arithmetics */ 89 | #define _add_v32i16(...) _a_v32i16(add, _e_vv, __VA_ARGS__) 90 | #define _sub_v32i16(...) _a_v32i16(sub, _e_vv, __VA_ARGS__) 91 | #define _adds_v32i16(...) _a_v32i16(adds, _e_vv, __VA_ARGS__) 92 | #define _subs_v32i16(...) _a_v32i16(subs, _e_vv, __VA_ARGS__) 93 | #define _max_v32i16(...) _a_v32i16(max, _e_vv, __VA_ARGS__) 94 | #define _min_v32i16(...) _a_v32i16(min, _e_vv, __VA_ARGS__) 95 | 96 | /* compare */ 97 | #define _eq_v32i16(...) _a_v32i16(cmpeq, _e_vv, __VA_ARGS__) 98 | #define _lt_v32i16(...) _a_v32i16(cmplt, _e_vv, __VA_ARGS__) 99 | #define _gt_v32i16(...) _a_v32i16(cmpgt, _e_vv, __VA_ARGS__) 100 | 101 | /* insert and extract */ 102 | #define _ins_v32i16(a, val, imm) { \ 103 | if((imm) < sizeof(__m256i)/sizeof(int16_t)) { \ 104 | (a).v1 = _i_v32i8(insert)((a).v1, (val), (imm)); \ 105 | } else if((imm) < 2*sizeof(__m256i)/sizeof(int16_t)) { \ 106 | (a).v2 = _i_v32i8(insert)((a).v2, (val), (imm) - sizeof(__m256i)/sizeof(int16_t)); \ 107 | } \ 108 | } 109 | #define _ext_v32i16(a, imm) ( \ 110 | (int16_t)(((imm) < sizeof(__m256i)/sizeof(int16_t)) \ 111 | ? _i_v32i16(extract)((a).v1, (imm)) \ 112 | : _i_v32i16(extract)((a).v2, (imm) - sizeof(__m256i)/sizeof(int16_t))) \ 113 | ) 114 | 115 | /* mask */ 116 | #define _mask_v32i16(a) ( \ 117 | (v32_mask_t) { \ 118 | .m1 = _mm256_movemask_epi8( \ 119 | _mm256_permute4x64_epi64( \ 120 | _mm256_packs_epi16((a).v1, (a).v2), 0xd8)) \ 121 | } \ 122 | ) 123 | 124 | /* horizontal max (reduction max) */ 125 | #define _hmax_v32i16(a) ({ \ 126 | __m256i _vmax = _mm256_max_epi16((a).v1, (a).v2); \ 127 | _vmax = _mm256_max_epi16(_vmax, \ 128 | _mm256_castsi128_si256(_mm256_extracti128_si256(_vmax, 1))); \ 129 | _vmax = _mm256_max_epi16(_vmax, \ 130 | _mm256_srli_si256(_vmax, 8)); \ 131 | _vmax = _mm256_max_epi16(_vmax, \ 132 | _mm256_srli_si256(_vmax, 4)); \ 133 | _vmax = _mm256_max_epi16(_vmax, \ 134 | _mm256_srli_si256(_vmax, 2)); \ 135 | (int16_t)_mm256_extract_epi16(_vmax, 0); \ 136 | }) 137 | 138 | #define _cvt_v32i8_v32i16(a) ( \ 139 | (v32i16_t) { \ 140 | _mm256_cvtepi8_epi16(_mm256_castsi256_si128((a).v1)), \ 141 | _mm256_cvtepi8_epi16(_mm256_extracti128_si256((a).v1, 1)) \ 142 | } \ 143 | ) 144 | 145 | /* debug print */ 146 | #ifdef _LOG_H_INCLUDED 147 | #define _print_v32i16(a) { \ 148 | debug("(v32i16_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, " \ 149 | "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 150 | #a, \ 151 | _ext_v32i16(a, 31), \ 152 | _ext_v32i16(a, 30), \ 153 | _ext_v32i16(a, 29), \ 154 | _ext_v32i16(a, 28), \ 155 | _ext_v32i16(a, 27), \ 156 | _ext_v32i16(a, 26), \ 157 | _ext_v32i16(a, 25), \ 158 | _ext_v32i16(a, 24), \ 159 | _ext_v32i16(a, 23), \ 160 | _ext_v32i16(a, 22), \ 161 | _ext_v32i16(a, 21), \ 162 | _ext_v32i16(a, 20), \ 163 | _ext_v32i16(a, 19), \ 164 | _ext_v32i16(a, 18), \ 165 | _ext_v32i16(a, 17), \ 166 | _ext_v32i16(a, 16), \ 167 | _ext_v32i16(a, 15), \ 168 | _ext_v32i16(a, 14), \ 169 | _ext_v32i16(a, 13), \ 170 | _ext_v32i16(a, 12), \ 171 | _ext_v32i16(a, 11), \ 172 | _ext_v32i16(a, 10), \ 173 | _ext_v32i16(a, 9), \ 174 | _ext_v32i16(a, 8), \ 175 | _ext_v32i16(a, 7), \ 176 | _ext_v32i16(a, 6), \ 177 | _ext_v32i16(a, 5), \ 178 | _ext_v32i16(a, 4), \ 179 | _ext_v32i16(a, 3), \ 180 | _ext_v32i16(a, 2), \ 181 | _ext_v32i16(a, 1), \ 182 | _ext_v32i16(a, 0)); \ 183 | } 184 | #else 185 | #define _print_v32i16(x) ; 186 | #endif 187 | 188 | #endif /* _V32I16_H_INCLUDED */ 189 | /** 190 | * end of v32i16.h 191 | */ 192 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/v32i8.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v32i8.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V32I8_H_INCLUDED 8 | #define _V32I8_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v32i8_s { 15 | __m256i v1; 16 | } v32i8_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v32i8_1(u) 20 | 21 | /* expanders (without immediate) */ 22 | #define _e_v_v32i8_1(a) (a).v1 23 | #define _e_vv_v32i8_1(a, b) (a).v1, (b).v1 24 | #define _e_vvv_v32i8_1(a, b, c) (a).v1, (b).v1, (c).v1 25 | 26 | /* expanders with immediate */ 27 | #define _e_i_v32i8_1(imm) (imm) 28 | #define _e_vi_v32i8_1(a, imm) (a).v1, (imm) 29 | #define _e_vvi_v32i8_1(a, b, imm) (a).v1, (b).v1, (imm) 30 | 31 | /* address calculation macros */ 32 | #define _addr_v32i8_1(imm) ( (__m256i *)(imm) ) 33 | #define _pv_v32i8(ptr) ( _addr_v32i8_1(ptr) ) 34 | /* expanders with pointers */ 35 | #define _e_p_v32i8_1(ptr) _addr_v32i8_1(ptr) 36 | #define _e_pv_v32i8_1(ptr, a) _addr_v32i8_1(ptr), (a).v1 37 | 38 | /* expand intrinsic name */ 39 | #define _i_v32i8(intrin) _mm256_##intrin##_epi8 40 | #define _i_v32i8x(intrin) _mm256_##intrin##_si256 41 | 42 | /* apply */ 43 | #define _a_v32i8(intrin, expander, ...) ( \ 44 | (v32i8_t) { \ 45 | _i_v32i8(intrin)(expander##_v32i8_1(__VA_ARGS__)) \ 46 | } \ 47 | ) 48 | #define _a_v32i8x(intrin, expander, ...) ( \ 49 | (v32i8_t) { \ 50 | _i_v32i8x(intrin)(expander##_v32i8_1(__VA_ARGS__)) \ 51 | } \ 52 | ) 53 | #define _a_v32i8xv(intrin, expander, ...) { \ 54 | _i_v32i8x(intrin)(expander##_v32i8_1(__VA_ARGS__)); \ 55 | } 56 | 57 | /* load and store */ 58 | #define _load_v32i8(...) _a_v32i8x(load, _e_p, __VA_ARGS__) 59 | #define _loadu_v32i8(...) _a_v32i8x(loadu, _e_p, __VA_ARGS__) 60 | #define _store_v32i8(...) _a_v32i8xv(store, _e_pv, __VA_ARGS__) 61 | #define _storeu_v32i8(...) _a_v32i8xv(storeu, _e_pv, __VA_ARGS__) 62 | 63 | /* broadcast */ 64 | #define _set_v32i8(...) _a_v32i8(set1, _e_i, __VA_ARGS__) 65 | #define _zero_v32i8() _a_v32i8x(setzero, _e_x, _unused) 66 | 67 | /* swap (reverse) */ 68 | #define _swap_idx_v32i8() ( \ 69 | _mm256_broadcastsi128_si256(_mm_set_epi8( \ 70 | 0, 1, 2, 3, 4, 5, 6, 7, \ 71 | 8, 9, 10, 11, 12, 13, 14, 15)) \ 72 | ) 73 | #define _swap_v32i8(a) ( \ 74 | (v32i8_t) { \ 75 | _mm256_permute2x128_si256( \ 76 | _mm256_shuffle_epi8((a).v1, _swap_idx_v32i8()), \ 77 | _mm256_shuffle_epi8((a).v1, _swap_idx_v32i8()), \ 78 | 0x01) \ 79 | } \ 80 | ) 81 | 82 | /* logics */ 83 | #define _not_v32i8(...) _a_v32i8x(not, _e_v, __VA_ARGS__) 84 | #define _and_v32i8(...) _a_v32i8x(and, _e_vv, __VA_ARGS__) 85 | #define _or_v32i8(...) _a_v32i8x(or, _e_vv, __VA_ARGS__) 86 | #define _xor_v32i8(...) _a_v32i8x(xor, _e_vv, __VA_ARGS__) 87 | #define _andn_v32i8(...) _a_v32i8x(andnot, _e_vv, __VA_ARGS__) 88 | 89 | /* arithmetics */ 90 | #define _add_v32i8(...) _a_v32i8(add, _e_vv, __VA_ARGS__) 91 | #define _sub_v32i8(...) _a_v32i8(sub, _e_vv, __VA_ARGS__) 92 | #define _adds_v32i8(...) _a_v32i8(adds, _e_vv, __VA_ARGS__) 93 | #define _subs_v32i8(...) _a_v32i8(subs, _e_vv, __VA_ARGS__) 94 | #define _max_v32i8(...) _a_v32i8(max, _e_vv, __VA_ARGS__) 95 | #define _min_v32i8(...) _a_v32i8(min, _e_vv, __VA_ARGS__) 96 | 97 | /* shuffle */ 98 | #define _shuf_v32i8(...) _a_v32i8(shuffle, _e_vv, __VA_ARGS__) 99 | 100 | /* blend */ 101 | // #define _sel_v32i8(...) _a_v32i8(blendv, _e_vvv, __VA_ARGS__) 102 | 103 | /* compare */ 104 | #define _eq_v32i8(...) _a_v32i8(cmpeq, _e_vv, __VA_ARGS__) 105 | #define _lt_v32i8(...) _a_v32i8(cmplt, _e_vv, __VA_ARGS__) 106 | #define _gt_v32i8(...) _a_v32i8(cmpgt, _e_vv, __VA_ARGS__) 107 | 108 | /* insert and extract */ 109 | #define _ins_v32i8(a, val, imm) { \ 110 | (a).v1 = _i_v32i8(insert)((a).v1, (val), (imm)); \ 111 | } 112 | #define _ext_v32i8(a, imm) ( \ 113 | (int8_t)_i_v32i8(extract)((a).v1, (imm)) \ 114 | ) 115 | 116 | /* shift */ 117 | #define _bsl_v32i8(a, imm) ( \ 118 | (v32i8_t) { \ 119 | _mm256_alignr_epi8( \ 120 | (a).v1, \ 121 | _mm256_permute2x128_si256((a).v1, (a).v1, 0x08), \ 122 | 15) \ 123 | } \ 124 | ) 125 | #define _bsr_v32i8(a, imm) ( \ 126 | (v32i8_t) { \ 127 | _mm256_alignr_epi8( \ 128 | _mm256_castsi128_si256( \ 129 | _mm256_extracti128_si256((a).v1, 1)), \ 130 | (a).v1, \ 131 | 1) \ 132 | } \ 133 | ) 134 | #define _shl_v32i8(a, imm) ( \ 135 | (v32i8_t) { \ 136 | _mm256_slli_epi32((a).v1, (imm)) \ 137 | } \ 138 | ) 139 | #define _shr_v32i8(a, imm) ( \ 140 | (v32i8_t) { \ 141 | _mm256_srli_epi32((a).v1, (imm)) \ 142 | } \ 143 | ) 144 | #define _sal_v32i8(a, imm) ( \ 145 | (v32i8_t) { \ 146 | _mm256_slai_epi32((a).v1, (imm)) \ 147 | } \ 148 | ) 149 | #define _sar_v32i8(a, imm) ( \ 150 | (v32i8_t) { \ 151 | _mm256_srai_epi32((a).v1, (imm)) \ 152 | } \ 153 | ) 154 | 155 | /* mask */ 156 | #define _mask_v32i8(a) ( \ 157 | (v32_mask_t) { \ 158 | .m1 = _i_v32i8(movemask)((a).v1) \ 159 | } \ 160 | ) 161 | 162 | /* debug print */ 163 | #ifdef _LOG_H_INCLUDED 164 | #define _print_v32i8(a) { \ 165 | debug("(v32i8_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, " \ 166 | "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 167 | #a, \ 168 | _ext_v32i8(a, 31), \ 169 | _ext_v32i8(a, 30), \ 170 | _ext_v32i8(a, 29), \ 171 | _ext_v32i8(a, 28), \ 172 | _ext_v32i8(a, 27), \ 173 | _ext_v32i8(a, 26), \ 174 | _ext_v32i8(a, 25), \ 175 | _ext_v32i8(a, 24), \ 176 | _ext_v32i8(a, 23), \ 177 | _ext_v32i8(a, 22), \ 178 | _ext_v32i8(a, 21), \ 179 | _ext_v32i8(a, 20), \ 180 | _ext_v32i8(a, 19), \ 181 | _ext_v32i8(a, 18), \ 182 | _ext_v32i8(a, 17), \ 183 | _ext_v32i8(a, 16), \ 184 | _ext_v32i8(a, 15), \ 185 | _ext_v32i8(a, 14), \ 186 | _ext_v32i8(a, 13), \ 187 | _ext_v32i8(a, 12), \ 188 | _ext_v32i8(a, 11), \ 189 | _ext_v32i8(a, 10), \ 190 | _ext_v32i8(a, 9), \ 191 | _ext_v32i8(a, 8), \ 192 | _ext_v32i8(a, 7), \ 193 | _ext_v32i8(a, 6), \ 194 | _ext_v32i8(a, 5), \ 195 | _ext_v32i8(a, 4), \ 196 | _ext_v32i8(a, 3), \ 197 | _ext_v32i8(a, 2), \ 198 | _ext_v32i8(a, 1), \ 199 | _ext_v32i8(a, 0)); \ 200 | } 201 | #else 202 | #define _print_v32i8(x) ; 203 | #endif 204 | 205 | #endif /* _V32I8_H_INCLUDED */ 206 | /** 207 | * end of v32i8.h 208 | */ 209 | -------------------------------------------------------------------------------- /arch/x86_64_avx2/vector.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file vector.h 4 | * 5 | * @brief header for various vector (SIMD) macros 6 | */ 7 | #ifndef _VECTOR_H_INCLUDED 8 | #define _VECTOR_H_INCLUDED 9 | 10 | /** 11 | * @struct v32_mask_s 12 | * 13 | * @brief common 32cell-wide mask type 14 | */ 15 | typedef struct v32_mask_s { 16 | uint32_t m1; 17 | } v32_mask_t; 18 | typedef struct v32_mask_s v32i8_mask_t; 19 | 20 | /** 21 | * @union v32_mask_u 22 | */ 23 | typedef union v32_mask_u { 24 | v32_mask_t mask; 25 | uint32_t all; 26 | } v32_masku_t; 27 | typedef union v32_mask_u v32i8_masku_t; 28 | 29 | /** 30 | * @struct v16_mask_s 31 | * 32 | * @brief common 16cell-wide mask type 33 | */ 34 | typedef struct v16_mask_s { 35 | uint16_t m1; 36 | } v16_mask_t; 37 | typedef struct v16_mask_s v16i8_mask_t; 38 | 39 | /** 40 | * @union v16_mask_u 41 | */ 42 | typedef union v16_mask_u { 43 | v16_mask_t mask; 44 | uint16_t all; 45 | } v16_masku_t; 46 | typedef union v16_mask_u v16i8_masku_t; 47 | 48 | /** 49 | * abstract vector types 50 | * 51 | * v2i32_t, v2i64_t for pair of 32-bit, 64-bit signed integers. Mainly for 52 | * a pair of coordinates. Conversion between the two types are provided. 53 | * 54 | * v16i8_t is a unit vector for substitution matrices and gap vectors. 55 | * Broadcast to v16i8_t and v32i8_t are provided. 56 | * 57 | * v32i8_t is a unit vector for small differences in banded alignment. v16i8_t 58 | * vector can be broadcasted to high and low 16 elements of v32i8_t. It can 59 | * also expanded to v32i16_t. 60 | * 61 | * v32i16_t is for middle differences in banded alignment. It can be converted 62 | * from v32i8_t 63 | */ 64 | #include "v2i32.h" 65 | #include "v2i64.h" 66 | #include "v16i8.h" 67 | #include "v32i8.h" 68 | #include "v32i16.h" 69 | 70 | /* conversion and cast between vector types */ 71 | #define _from_v16i8_v32i8(x) (v32i8_t){ _mm256_broadcastsi128_si256((x).v1) } 72 | #define _from_v32i8_v32i8(x) (v32i8_t){ (x).v1 } 73 | #define _from_v16i8_v16i8(x) (v16i8_t){ (x).v1 } 74 | #define _from_v32i8_v16i8(x) (v16i8_t){ _mm256_castsi256_si128((x).v1) } 75 | 76 | /* inversed alias */ 77 | #define _to_v32i8_v16i8(x) (v32i8_t){ _mm256_broadcastsi128_si256((x).v1) } 78 | #define _to_v32i8_v32i8(x) (v32i8_t){ (x).v1 } 79 | #define _to_v16i8_v16i8(x) (v16i8_t){ (x).v1 } 80 | #define _to_v16i8_v32i8(x) (v16i8_t){ _mm256_castsi256_si128((x).v1) } 81 | 82 | #define _cast_v2i64_v2i32(x) (v2i32_t){ (x).v1 } 83 | #define _cast_v2i32_v2i64(x) (v2i64_t){ (x).v1 } 84 | 85 | #endif /* _VECTOR_H_INCLUDED */ 86 | /** 87 | * end of vector.h 88 | */ 89 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/arch_util.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file arch_util.h 4 | * 5 | * @brief architecture-dependent utilities devided from util.h 6 | */ 7 | #ifndef _ARCH_UTIL_H_INCLUDED 8 | #define _ARCH_UTIL_H_INCLUDED 9 | 10 | #include 11 | #include 12 | 13 | /** 14 | * misc bit operations (popcnt, tzcnt, and lzcnt) 15 | */ 16 | 17 | /** 18 | * @macro popcnt 19 | */ 20 | #ifdef __POPCNT__ 21 | #define popcnt(x) ( (uint64_t)_mm_popcnt_u64(x) ) 22 | #else 23 | // #warning "popcnt instruction is not enabled." 24 | static inline 25 | uint64_t popcnt(uint64_t n) 26 | { 27 | uint64_t c = 0; 28 | c = (n & 0x5555555555555555) + ((n>>1) & 0x5555555555555555); 29 | c = (c & 0x3333333333333333) + ((c>>2) & 0x3333333333333333); 30 | c = (c & 0x0f0f0f0f0f0f0f0f) + ((c>>4) & 0x0f0f0f0f0f0f0f0f); 31 | c = (c & 0x00ff00ff00ff00ff) + ((c>>8) & 0x00ff00ff00ff00ff); 32 | c = (c & 0x0000ffff0000ffff) + ((c>>16) & 0x0000ffff0000ffff); 33 | c = (c & 0x00000000ffffffff) + ((c>>32) & 0x00000000ffffffff); 34 | return(c); 35 | } 36 | #endif 37 | 38 | /** 39 | * @macro tzcnt 40 | * @brief trailing zero count (count #continuous zeros from LSb) 41 | */ 42 | #ifdef __BMI__ 43 | /** immintrin.h is already included */ 44 | #define tzcnt(x) ( (uint64_t)_tzcnt_u64(x) ) 45 | #else 46 | // #warning "tzcnt instruction is not enabled." 47 | static inline 48 | uint64_t tzcnt(uint64_t n) 49 | { 50 | #ifdef __POPCNT__ 51 | return(popcnt(~n & (n - 1))); 52 | #else 53 | if(n == 0) { 54 | return(64); 55 | } else { 56 | int64_t res; 57 | __asm__( "bsfq %1, %0" : "=r"(res) : "r"(n) ); 58 | return(res); 59 | } 60 | #endif 61 | } 62 | #endif 63 | 64 | /** 65 | * @macro lzcnt 66 | * @brief leading zero count (count #continuous zeros from MSb) 67 | */ 68 | #ifdef __LZCNT__ 69 | #define lzcnt(x) ( (uint64_t)_lzcnt_u64(x) ) 70 | #else 71 | // #warning "lzcnt instruction is not enabled." 72 | static inline 73 | uint64_t lzcnt(uint64_t n) 74 | { 75 | if(n == 0) { 76 | return(64); 77 | } else { 78 | int64_t res; 79 | __asm__( "bsrq %1, %0" : "=r"(res) : "r"(n) ); 80 | return(63 - res); 81 | } 82 | } 83 | #endif 84 | 85 | /** 86 | * @macro _loadu_u64, _storeu_u64 87 | */ 88 | #define _loadu_u64(p) ( *((uint64_t *)(p)) ) 89 | #define _storeu_u64(p, e) { *((uint64_t *)(p)) = (e); } 90 | 91 | /** 92 | * @macro _aligned_block_memcpy 93 | * 94 | * @brief copy size bytes from src to dst. 95 | * 96 | * @detail 97 | * src and dst must be aligned to 16-byte boundary. 98 | * copy must be multipe of 16. 99 | */ 100 | #define _xmm_rd_a(src, n) (xmm##n) = _mm_load_si128((__m128i *)(src) + (n)) 101 | #define _xmm_rd_u(src, n) (xmm##n) = _mm_loadu_si128((__m128i *)(src) + (n)) 102 | #define _xmm_wr_a(dst, n) _mm_store_si128((__m128i *)(dst) + (n), (xmm##n)) 103 | #define _xmm_wr_u(dst, n) _mm_storeu_si128((__m128i *)(dst) + (n), (xmm##n)) 104 | #define _memcpy_blk_intl(dst, src, size, _wr, _rd) { \ 105 | /** duff's device */ \ 106 | uint8_t *_src = (uint8_t *)(src), *_dst = (uint8_t *)(dst); \ 107 | int64_t const _nreg = 16; /** #xmm registers == 16 */ \ 108 | int64_t const _tcnt = (size) / sizeof(__m128i); \ 109 | int64_t const _offset = ((_tcnt - 1) & (_nreg - 1)) - (_nreg - 1); \ 110 | int64_t _jmp = _tcnt & (_nreg - 1); \ 111 | int64_t _lcnt = (_tcnt + _nreg - 1) / _nreg; \ 112 | register __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; \ 113 | register __m128i xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15; \ 114 | _src += _offset * sizeof(__m128i); \ 115 | _dst += _offset * sizeof(__m128i); \ 116 | switch(_jmp) { \ 117 | case 0: do { _rd(_src, 0); \ 118 | case 15: _rd(_src, 1); \ 119 | case 14: _rd(_src, 2); \ 120 | case 13: _rd(_src, 3); \ 121 | case 12: _rd(_src, 4); \ 122 | case 11: _rd(_src, 5); \ 123 | case 10: _rd(_src, 6); \ 124 | case 9: _rd(_src, 7); \ 125 | case 8: _rd(_src, 8); \ 126 | case 7: _rd(_src, 9); \ 127 | case 6: _rd(_src, 10); \ 128 | case 5: _rd(_src, 11); \ 129 | case 4: _rd(_src, 12); \ 130 | case 3: _rd(_src, 13); \ 131 | case 2: _rd(_src, 14); \ 132 | case 1: _rd(_src, 15); \ 133 | switch(_jmp) { \ 134 | case 0: _wr(_dst, 0); \ 135 | case 15: _wr(_dst, 1); \ 136 | case 14: _wr(_dst, 2); \ 137 | case 13: _wr(_dst, 3); \ 138 | case 12: _wr(_dst, 4); \ 139 | case 11: _wr(_dst, 5); \ 140 | case 10: _wr(_dst, 6); \ 141 | case 9: _wr(_dst, 7); \ 142 | case 8: _wr(_dst, 8); \ 143 | case 7: _wr(_dst, 9); \ 144 | case 6: _wr(_dst, 10); \ 145 | case 5: _wr(_dst, 11); \ 146 | case 4: _wr(_dst, 12); \ 147 | case 3: _wr(_dst, 13); \ 148 | case 2: _wr(_dst, 14); \ 149 | case 1: _wr(_dst, 15); \ 150 | } \ 151 | _src += _nreg * sizeof(__m128i); \ 152 | _dst += _nreg * sizeof(__m128i); \ 153 | _jmp = 0; \ 154 | } while(--_lcnt > 0); \ 155 | } \ 156 | } 157 | #define _memcpy_blk_aa(dst, src, len) _memcpy_blk_intl(dst, src, len, _xmm_wr_a, _xmm_rd_a) 158 | #define _memcpy_blk_au(dst, src, len) _memcpy_blk_intl(dst, src, len, _xmm_wr_a, _xmm_rd_u) 159 | #define _memcpy_blk_ua(dst, src, len) _memcpy_blk_intl(dst, src, len, _xmm_wr_u, _xmm_rd_a) 160 | #define _memcpy_blk_uu(dst, src, len) _memcpy_blk_intl(dst, src, len, _xmm_wr_u, _xmm_rd_u) 161 | #define _memset_blk_intl(dst, a, size, _wr) { \ 162 | uint8_t *_dst = (uint8_t *)(dst); \ 163 | __m128i const xmm0 = _mm_set1_epi8((int8_t)a); \ 164 | int64_t i; \ 165 | for(i = 0; i < size / sizeof(__m128i); i++) { \ 166 | _wr(_dst, 0); _dst += sizeof(__m128i); \ 167 | } \ 168 | } 169 | #define _memset_blk_a(dst, a, size) _memset_blk_intl(dst, a, size, _xmm_wr_a) 170 | #define _memset_blk_u(dst, a, size) _memset_blk_intl(dst, a, size, _xmm_wr_u) 171 | 172 | /** 173 | * substitution matrix abstraction 174 | */ 175 | /* store */ 176 | #define _store_sb(_scv, sv16) { _store_v16i8((_scv).v1, (sv16)); } 177 | 178 | /* load */ 179 | #define _load_sb(scv) ( _from_v16i8(_load_v16i8((scv).v1)) ) 180 | 181 | 182 | /** 183 | * gap penalty vector abstraction macros 184 | */ 185 | /* store */ 186 | #define _make_gap(_e1, _e2, _e3, _e4) ( \ 187 | (v16i8_t){ _mm_set_epi8( \ 188 | (_e4), (_e4), (_e4), (_e4), \ 189 | (_e3), (_e3), (_e3), (_e3), \ 190 | (_e2), (_e2), (_e2), (_e2), \ 191 | (_e1), (_e1), (_e1), (_e1)) \ 192 | } \ 193 | ) 194 | #define _store_adjh(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 195 | _store_v16i8((_scv).v2, _make_gap(_adjh, _adjv, _ofsh, _ofsv)) \ 196 | } 197 | #define _store_adjv(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 198 | _store_v16i8((_scv).v3, _make_gap(_adjh, _adjv, _ofsh, _ofsv)) \ 199 | } 200 | #define _store_ofsh(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 201 | _store_v16i8((_scv).v4, _make_gap(_adjh, _adjv, _ofsh, _ofsv)) \ 202 | } 203 | #define _store_ofsv(_scv, _adjh, _adjv, _ofsh, _ofsv) { \ 204 | _store_v16i8((_scv).v5, _make_gap(_adjh, _adjv, _ofsh, _ofsv)) \ 205 | } 206 | 207 | /* load */ 208 | #define _load_gap(_ptr, _idx) ( \ 209 | (v16i8_t){ _mm_shuffle_epi32(_mm_load_si128((__m128i const *)(_ptr)), (_idx)) } \ 210 | ) 211 | 212 | #define _load_adjh(_scv) ( _from_v16i8(_load_gap((_scv).v2, 0x00)) ) 213 | #define _load_adjv(_scv) ( _from_v16i8(_load_gap((_scv).v2, 0x55)) ) 214 | #define _load_ofsh(_scv) ( _from_v16i8(_load_gap((_scv).v2, 0xaa)) ) 215 | #define _load_ofsv(_scv) ( _from_v16i8(_load_gap((_scv).v2, 0xff)) ) 216 | 217 | 218 | 219 | /* cache line operation */ 220 | #define WCR_BUF_SIZE ( 128 ) /** two cache lines in x86_64 */ 221 | #define memcpy_buf(_dst, _src) { \ 222 | register __m128i *_s = (__m128i *)(_src); \ 223 | register __m128i *_d = (__m128i *)(_dst); \ 224 | __m128i xmm0 = _mm_load_si128(_s); \ 225 | __m128i xmm1 = _mm_load_si128(_s + 1); \ 226 | __m128i xmm2 = _mm_load_si128(_s + 2); \ 227 | __m128i xmm3 = _mm_load_si128(_s + 3); \ 228 | __m128i xmm4 = _mm_load_si128(_s + 4); \ 229 | __m128i xmm5 = _mm_load_si128(_s + 5); \ 230 | __m128i xmm6 = _mm_load_si128(_s + 6); \ 231 | __m128i xmm7 = _mm_load_si128(_s + 7); \ 232 | _mm_stream_si128(_d, xmm0); \ 233 | _mm_stream_si128(_d + 1, xmm1); \ 234 | _mm_stream_si128(_d + 2, xmm2); \ 235 | _mm_stream_si128(_d + 3, xmm3); \ 236 | _mm_stream_si128(_d + 4, xmm4); \ 237 | _mm_stream_si128(_d + 5, xmm5); \ 238 | _mm_stream_si128(_d + 6, xmm6); \ 239 | _mm_stream_si128(_d + 7, xmm7); \ 240 | } 241 | 242 | /* 128bit register operation */ 243 | #define elem_128_t __m128i 244 | #define rd_128(_ptr) ( _mm_load_si128((__m128i *)(_ptr)) ) 245 | #define wr_128(_ptr, _e) { _mm_store_si128((__m128i *)(_ptr), (_e)); } 246 | #define _ex_128(k, h) ( _mm_extract_epi64((elem_128_t)k, h) ) 247 | #define ex_128(k, p) ( ((((p)>>3) ? _ex_128(k, 1) : (_ex_128(k, 0))>>(((p) & 0x07)<<3)) & (WCR_OCC_SIZE-1)) ) 248 | #define p_128(v) ( _mm_cvtsi64_si128((uint64_t)(v)) ) 249 | #define e_128(v) ( (uint64_t)_mm_cvtsi128_si64((__m128i)(v)) ) 250 | 251 | 252 | #endif /* #ifndef _ARCH_UTIL_H_INCLUDED */ 253 | /** 254 | * end of arch_util.h 255 | */ 256 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/v16i8.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v16i8.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V16I8_H_INCLUDED 8 | #define _V16I8_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v16i8_s { 15 | __m128i v1; 16 | } v16i8_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v16i8_1(u) 20 | #define _e_x_v16i8_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v16i8_1(a) (a).v1 24 | #define _e_v_v16i8_2(a) (a).v1 25 | #define _e_vv_v16i8_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v16i8_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v16i8_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v16i8_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v16i8_1(imm) (imm) 32 | #define _e_i_v16i8_2(imm) (imm) 33 | #define _e_vi_v16i8_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v16i8_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v16i8_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v16i8_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v16i8_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v16i8_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v16i8(ptr) ( _addr_v16i8_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v16i8_1(ptr) _addr_v16i8_1(ptr) 44 | #define _e_p_v16i8_2(ptr) _addr_v16i8_2(ptr) 45 | #define _e_pv_v16i8_1(ptr, a) _addr_v16i8_1(ptr), (a).v1 46 | #define _e_pv_v16i8_2(ptr, a) _addr_v16i8_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v16i8(intrin) _mm_##intrin##_epi8 50 | #define _i_v16i8x(intrin) _mm_##intrin##_si128 51 | 52 | /* apply */ 53 | #define _a_v16i8(intrin, expander, ...) ( \ 54 | (v16i8_t) { \ 55 | _i_v16i8(intrin)(expander##_v16i8_1(__VA_ARGS__)) \ 56 | } \ 57 | ) 58 | #define _a_v16i8x(intrin, expander, ...) ( \ 59 | (v16i8_t) { \ 60 | _i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)) \ 61 | } \ 62 | ) 63 | #define _a_v16i8xv(intrin, expander, ...) { \ 64 | _i_v16i8x(intrin)(expander##_v16i8_1(__VA_ARGS__)); \ 65 | } 66 | 67 | /* load and store */ 68 | #define _load_v16i8(...) _a_v16i8x(load, _e_p, __VA_ARGS__) 69 | #define _loadu_v16i8(...) _a_v16i8x(loadu, _e_p, __VA_ARGS__) 70 | #define _store_v16i8(...) _a_v16i8xv(store, _e_pv, __VA_ARGS__) 71 | #define _storeu_v16i8(...) _a_v16i8xv(storeu, _e_pv, __VA_ARGS__) 72 | 73 | /* broadcast */ 74 | #define _set_v16i8(...) _a_v16i8(set1, _e_i, __VA_ARGS__) 75 | #define _zero_v16i8() _a_v16i8x(setzero, _e_x, _unused) 76 | 77 | /* swap (reverse) */ 78 | #define _swap_idx_v16i8() ( \ 79 | _mm_set_epi8( \ 80 | 0, 1, 2, 3, 4, 5, 6, 7, \ 81 | 8, 9, 10, 11, 12, 13, 14, 15) \ 82 | ) 83 | #define _swap_v16i8(a) ( \ 84 | (v16i8_t) { \ 85 | _mm_shuffle_epi8((a).v1, _swap_idx_v16i8()) \ 86 | } \ 87 | ) 88 | 89 | /* logics */ 90 | #define _not_v16i8(...) _a_v16i8x(not, _e_v, __VA_ARGS__) 91 | #define _and_v16i8(...) _a_v16i8x(and, _e_vv, __VA_ARGS__) 92 | #define _or_v16i8(...) _a_v16i8x(or, _e_vv, __VA_ARGS__) 93 | #define _xor_v16i8(...) _a_v16i8x(xor, _e_vv, __VA_ARGS__) 94 | #define _andn_v16i8(...) _a_v16i8x(andnot, _e_vv, __VA_ARGS__) 95 | 96 | /* arithmetics */ 97 | #define _add_v16i8(...) _a_v16i8(add, _e_vv, __VA_ARGS__) 98 | #define _sub_v16i8(...) _a_v16i8(sub, _e_vv, __VA_ARGS__) 99 | #define _adds_v16i8(...) _a_v16i8(adds, _e_vv, __VA_ARGS__) 100 | #define _subs_v16i8(...) _a_v16i8(subs, _e_vv, __VA_ARGS__) 101 | #define _max_v16i8(...) _a_v16i8(max, _e_vv, __VA_ARGS__) 102 | #define _min_v16i8(...) _a_v16i8(min, _e_vv, __VA_ARGS__) 103 | 104 | /* shuffle */ 105 | #define _shuf_v16i8(...) _a_v16i8(shuffle, _e_vv, __VA_ARGS__) 106 | 107 | /* blend */ 108 | // #define _sel_v16i8(...) _a_v16i8(blendv, _e_vvv, __VA_ARGS__) 109 | 110 | /* compare */ 111 | #define _eq_v16i8(...) _a_v16i8(cmpeq, _e_vv, __VA_ARGS__) 112 | #define _lt_v16i8(...) _a_v16i8(cmplt, _e_vv, __VA_ARGS__) 113 | #define _gt_v16i8(...) _a_v16i8(cmpgt, _e_vv, __VA_ARGS__) 114 | 115 | /* insert and extract */ 116 | #define _ins_v16i8(a, val, imm) { \ 117 | (a).v1 = _i_v16i8(insert)((a).v1, (val), (imm)); \ 118 | } 119 | #define _ext_v16i8(a, imm) ( \ 120 | (int8_t)_i_v16i8(extract)((a).v1, (imm)) \ 121 | ) 122 | 123 | /* shift */ 124 | #define _bsl_v16i8(a, imm) ( \ 125 | (v16i8_t) { \ 126 | _i_v16i8x(slli)((a).v1, (imm)) \ 127 | } \ 128 | ) 129 | #define _bsr_v16i8(a, imm) ( \ 130 | (v16i8_t) { \ 131 | _i_v16i8x(srli)((a).v1, (imm)) \ 132 | } \ 133 | ) 134 | #define _shl_v16i8(a, imm) ( \ 135 | (v16i8_t) { \ 136 | _mm_slli_epi32((a).v1, (imm)) \ 137 | } \ 138 | ) 139 | #define _shr_v16i8(a, imm) ( \ 140 | (v16i8_t) { \ 141 | _mm_srli_epi32((a).v1, (imm)) \ 142 | } \ 143 | ) 144 | #define _sal_v16i8(a, imm) ( \ 145 | (v16i8_t) { \ 146 | _mm_slai_epi32((a).v1, (imm)) \ 147 | } \ 148 | ) 149 | #define _sar_v16i8(a, imm) ( \ 150 | (v16i8_t) { \ 151 | _mm_srai_epi32((a).v1, (imm)) \ 152 | } \ 153 | ) 154 | 155 | /* mask */ 156 | #define _mask_v16i8(a) ( \ 157 | (v16_mask_t) { \ 158 | .m1 = _i_v16i8(movemask)((a).v1) \ 159 | } \ 160 | ) 161 | 162 | /* debug print */ 163 | #ifdef _LOG_H_INCLUDED 164 | #define _print_v16i8(a) { \ 165 | debug("(v16i8_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 166 | #a, \ 167 | _ext_v16i8(a, 15), \ 168 | _ext_v16i8(a, 14), \ 169 | _ext_v16i8(a, 13), \ 170 | _ext_v16i8(a, 12), \ 171 | _ext_v16i8(a, 11), \ 172 | _ext_v16i8(a, 10), \ 173 | _ext_v16i8(a, 9), \ 174 | _ext_v16i8(a, 8), \ 175 | _ext_v16i8(a, 7), \ 176 | _ext_v16i8(a, 6), \ 177 | _ext_v16i8(a, 5), \ 178 | _ext_v16i8(a, 4), \ 179 | _ext_v16i8(a, 3), \ 180 | _ext_v16i8(a, 2), \ 181 | _ext_v16i8(a, 1), \ 182 | _ext_v16i8(a, 0)); \ 183 | } 184 | #else 185 | #define _print_v16i8(x) ; 186 | #endif 187 | 188 | #endif /* _V16I8_H_INCLUDED */ 189 | /** 190 | * end of v16i8.h 191 | */ 192 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/v2i32.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v2i32.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V2I32_H_INCLUDED 8 | #define _V2I32_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v2i32_s { 15 | __m128i v1; 16 | } v2i32_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v2i32_1(u) 20 | #define _e_x_v2i32_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v2i32_1(a) (a).v1 24 | #define _e_v_v2i32_2(a) (a).v1 25 | #define _e_vv_v2i32_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v2i32_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v2i32_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v2i32_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v2i32_1(imm) (imm) 32 | #define _e_i_v2i32_2(imm) (imm) 33 | #define _e_vi_v2i32_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v2i32_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v2i32_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v2i32_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v2i32_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v2i32_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v2i32(ptr) ( _addr_v2i32_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v2i32_1(ptr) _addr_v2i32_1(ptr) 44 | #define _e_p_v2i32_2(ptr) _addr_v2i32_2(ptr) 45 | #define _e_pv_v2i32_1(ptr, a) _addr_v2i32_1(ptr), (a).v1 46 | #define _e_pv_v2i32_2(ptr, a) _addr_v2i32_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v2i32(intrin) _mm_##intrin##_epi32 50 | #define _i_v2i32e(intrin) _mm_##intrin##_epi64 51 | #define _i_v2i32x(intrin) _mm_##intrin##_si128 52 | 53 | /* apply */ 54 | #define _a_v2i32(intrin, expander, ...) ( \ 55 | (v2i32_t) { \ 56 | _i_v2i32(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 57 | } \ 58 | ) 59 | #define _a_v2i32e(intrin, expander, ...) ( \ 60 | (v2i32_t) { \ 61 | _i_v2i32e(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 62 | } \ 63 | ) 64 | #define _a_v2i32ev(intrin, expander, ...) { \ 65 | _i_v2i32e(intrin)(expander##_v2i32_1(__VA_ARGS__)); \ 66 | } 67 | #define _a_v2i32x(intrin, expander, ...) ( \ 68 | (v2i32_t) { \ 69 | _i_v2i32x(intrin)(expander##_v2i32_1(__VA_ARGS__)) \ 70 | } \ 71 | ) 72 | #define _a_v2i32xv(intrin, expander, ...) { \ 73 | _i_v2i32x(intrin)(expander##_v2i32_1(__VA_ARGS__)); \ 74 | } 75 | 76 | /* load and store */ 77 | #define _load_v2i32(...) _a_v2i32e(loadl, _e_p, __VA_ARGS__) 78 | #define _loadu_v2i32(...) _a_v2i32e(loadl, _e_p, __VA_ARGS__) 79 | #define _store_v2i32(...) _a_v2i32ev(storel, _e_pv, __VA_ARGS__) 80 | #define _storeu_v2i32(...) _a_v2i32ev(storel, _e_pv, __VA_ARGS__) 81 | 82 | /* broadcast */ 83 | #define _set_v2i32(...) _a_v2i32(set1, _e_i, __VA_ARGS__) 84 | #define _zero_v2i32() _a_v2i32x(setzero, _e_x, _unused) 85 | #define _seta_v2i32(x, y) ( \ 86 | (v2i32_t) { \ 87 | _mm_cvtsi64_si128((((uint64_t)(x))<<32) | ((uint32_t)(y))) \ 88 | } \ 89 | ) 90 | #define _swap_v2i32(x) ( \ 91 | (v2i32_t) { \ 92 | _mm_shuffle_epi32((x).v1, 0x01) \ 93 | } \ 94 | ) 95 | 96 | /* logics */ 97 | #define _not_v2i32(...) _a_v2i32x(not, _e_v, __VA_ARGS__) 98 | #define _and_v2i32(...) _a_v2i32x(and, _e_vv, __VA_ARGS__) 99 | #define _or_v2i32(...) _a_v2i32x(or, _e_vv, __VA_ARGS__) 100 | #define _xor_v2i32(...) _a_v2i32x(xor, _e_vv, __VA_ARGS__) 101 | #define _andn_v2i32(...) _a_v2i32x(andnot, _e_vv, __VA_ARGS__) 102 | 103 | /* arithmetics */ 104 | #define _add_v2i32(...) _a_v2i32(add, _e_vv, __VA_ARGS__) 105 | #define _sub_v2i32(...) _a_v2i32(sub, _e_vv, __VA_ARGS__) 106 | #define _adds_v2i32(...) _a_v2i32(adds, _e_vv, __VA_ARGS__) 107 | #define _subs_v2i32(...) _a_v2i32(subs, _e_vv, __VA_ARGS__) 108 | #define _max_v2i32(...) _a_v2i32(max, _e_vv, __VA_ARGS__) 109 | #define _min_v2i32(...) _a_v2i32(min, _e_vv, __VA_ARGS__) 110 | 111 | /* blend: (mask & b) | (~mask & a) */ 112 | #define _sel_v2i32(mask, a, b) ( \ 113 | (v2i64_t) { \ 114 | _mm_blendv_epi8((b).v1, (a).v1, (mask).v1) \ 115 | } \ 116 | ) 117 | 118 | /* compare */ 119 | #define _eq_v2i32(...) _a_v2i32(cmpeq, _e_vv, __VA_ARGS__) 120 | #define _lt_v2i32(...) _a_v2i32(cmplt, _e_vv, __VA_ARGS__) 121 | #define _gt_v2i32(...) _a_v2i32(cmpgt, _e_vv, __VA_ARGS__) 122 | 123 | /* insert and extract */ 124 | #define _ins_v2i32(a, val, imm) { \ 125 | (a).v1 = _i_v2i32((a).v1, (val), (imm)); \ 126 | } 127 | #define _ext_v2i32(a, imm) ( \ 128 | (int32_t)_i_v2i32(extract)((a).v1, (imm)) \ 129 | ) 130 | 131 | /* shift */ 132 | #define _sal_v2i32(a, imm) ( \ 133 | (v2i32_t) {_i_v2i32(slai)((a).v1, (imm))} \ 134 | ) 135 | #define _sar_v2i32(a, imm) ( \ 136 | (v2i32_t) {_i_v2i32(srai)((a).v1, (imm))} \ 137 | ) 138 | 139 | /* mask */ 140 | #define _mask_v2i32(a) ( \ 141 | (uint32_t) (0xff & _mm_movemask_epi8((a).v1)) \ 142 | ) 143 | #define V2I32_MASK_00 ( 0x00 ) 144 | #define V2I32_MASK_01 ( 0x0f ) 145 | #define V2I32_MASK_10 ( 0xf0 ) 146 | #define V2I32_MASK_11 ( 0xff ) 147 | 148 | /* transpose */ 149 | #define _lo_v2i32(a, b) ( \ 150 | (v2i32_t) { \ 151 | _mm_unpacklo_epi32((a).v1, (b).v1) \ 152 | } \ 153 | ) 154 | #define _hi_v2i32(a, b) ( \ 155 | (v2i32_t) { \ 156 | _mm_shuffle_epi32(_mm_unpacklo_epi32((a).v1, (b).v1), 0x0e) \ 157 | } \ 158 | ) 159 | 160 | /* debug print */ 161 | #ifdef _LOG_H_INCLUDED 162 | #define _print_v2i32(a) { \ 163 | debug("(v2i32_t) %s(%d, %d)", #a, _ext_v2i32(a, 1), _ext_v2i32(a, 0)); \ 164 | } 165 | #else 166 | #define _print_v2i32(x) ; 167 | #endif 168 | 169 | #endif /* _V2I32_H_INCLUDED */ 170 | /** 171 | * end of v2i32.h 172 | */ 173 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/v2i64.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v2i64.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V2I64_H_INCLUDED 8 | #define _V2I64_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v2i64_s { 15 | __m128i v1; 16 | } v2i64_t; 17 | 18 | /* expanders (without argument) */ 19 | #define _e_x_v2i64_1(u) 20 | #define _e_x_v2i64_2(u) 21 | 22 | /* expanders (without immediate) */ 23 | #define _e_v_v2i64_1(a) (a).v1 24 | #define _e_v_v2i64_2(a) (a).v1 25 | #define _e_vv_v2i64_1(a, b) (a).v1, (b).v1 26 | #define _e_vv_v2i64_2(a, b) (a).v1, (b).v1 27 | #define _e_vvv_v2i64_1(a, b, c) (a).v1, (b).v1, (c).v1 28 | #define _e_vvv_v2i64_2(a, b, c) (a).v1, (b).v1, (c).v1 29 | 30 | /* expanders with immediate */ 31 | #define _e_i_v2i64_1(imm) (imm) 32 | #define _e_i_v2i64_2(imm) (imm) 33 | #define _e_vi_v2i64_1(a, imm) (a).v1, (imm) 34 | #define _e_vi_v2i64_2(a, imm) (a).v1, (imm) 35 | #define _e_vvi_v2i64_1(a, b, imm) (a).v1, (b).v1, (imm) 36 | #define _e_vvi_v2i64_2(a, b, imm) (a).v1, (b).v1, (imm) 37 | 38 | /* address calculation macros */ 39 | #define _addr_v2i64_1(imm) ( (__m128i *)(imm) ) 40 | #define _addr_v2i64_2(imm) ( (__m128i *)(imm) ) 41 | #define _pv_v2i64(ptr) ( _addr_v2i64_1(ptr) ) 42 | /* expanders with pointers */ 43 | #define _e_p_v2i64_1(ptr) _addr_v2i64_1(ptr) 44 | #define _e_p_v2i64_2(ptr) _addr_v2i64_2(ptr) 45 | #define _e_pv_v2i64_1(ptr, a) _addr_v2i64_1(ptr), (a).v1 46 | #define _e_pv_v2i64_2(ptr, a) _addr_v2i64_2(ptr), (a).v1 47 | 48 | /* expand intrinsic name */ 49 | #define _i_v2i64(intrin) _mm_##intrin##_epi64 50 | #define _i_v2i64x(intrin) _mm_##intrin##_si128 51 | 52 | /* apply */ 53 | #define _a_v2i64(intrin, expander, ...) ( \ 54 | (v2i64_t) { \ 55 | _i_v2i64(intrin)(expander##_v2i64_1(__VA_ARGS__)) \ 56 | } \ 57 | ) 58 | #define _a_v2i64x(intrin, expander, ...) ( \ 59 | (v2i64_t) { \ 60 | _i_v2i64x(intrin)(expander##_v2i64_1(__VA_ARGS__)) \ 61 | } \ 62 | ) 63 | #define _a_v2i64xv(intrin, expander, ...) { \ 64 | _i_v2i64x(intrin)(expander##_v2i64_1(__VA_ARGS__)); \ 65 | } 66 | 67 | /* load and store */ 68 | #define _load_v2i64(...) _a_v2i64x(load, _e_p, __VA_ARGS__) 69 | #define _loadu_v2i64(...) _a_v2i64x(loadu, _e_p, __VA_ARGS__) 70 | #define _store_v2i64(...) _a_v2i64xv(store, _e_pv, __VA_ARGS__) 71 | #define _storeu_v2i64(...) _a_v2i64xv(storeu, _e_pv, __VA_ARGS__) 72 | 73 | /* broadcast */ 74 | // #define _set_v2i64(...) _a_v2i64(set1, _e_i, __VA_ARGS__) 75 | #define _set_v2i64(x) ( (v2i64_t) { _mm_set1_epi64x(x) } ) 76 | #define _zero_v2i64() _a_v2i64x(setzero, _e_x, _unused) 77 | #define _seta_v2i64(x, y) ( (v2i64_t) { _mm_set_epi64x(x, y) } ) 78 | #define _swap_v2i64(x) ( \ 79 | (v2i32_t) { \ 80 | _mm_shuffle_epi32((x).v1, 0x1b) \ 81 | } \ 82 | ) 83 | 84 | /* logics */ 85 | #define _not_v2i64(...) _a_v2i64x(not, _e_v, __VA_ARGS__) 86 | #define _and_v2i64(...) _a_v2i64x(and, _e_vv, __VA_ARGS__) 87 | #define _or_v2i64(...) _a_v2i64x(or, _e_vv, __VA_ARGS__) 88 | #define _xor_v2i64(...) _a_v2i64x(xor, _e_vv, __VA_ARGS__) 89 | #define _andn_v2i64(...) _a_v2i64x(andnot, _e_vv, __VA_ARGS__) 90 | 91 | /* arithmetics */ 92 | #define _add_v2i64(...) _a_v2i64(add, _e_vv, __VA_ARGS__) 93 | #define _sub_v2i64(...) _a_v2i64(sub, _e_vv, __VA_ARGS__) 94 | #define _adds_v2i64(...) _a_v2i64(adds, _e_vv, __VA_ARGS__) 95 | #define _subs_v2i64(...) _a_v2i64(subs, _e_vv, __VA_ARGS__) 96 | // #define _max_v2i64(...) _a_v2i64(max, _e_vv, __VA_ARGS__) 97 | // #define _min_v2i64(...) _a_v2i64(min, _e_vv, __VA_ARGS__) 98 | #define _max_v2i64(a, b) ( (v2i64_t) { _mm_max_epi32(a.v1, b.v1) } ) 99 | #define _min_v2i64(a, b) ( (v2i64_t) { _mm_min_epi32(a.v1, b.v1) } ) 100 | 101 | /* shuffle */ 102 | // #define _shuf_v2i64(...) _a_v2i64(shuffle, _e_vv, __VA_ARGS__) 103 | 104 | /* blend */ 105 | #define _sel_v2i64(mask, a, b) ( \ 106 | (v2i64_t) { \ 107 | _mm_blendv_epi8((b).v1, (a).v1, (mask).v1) \ 108 | } \ 109 | ) 110 | 111 | /* compare */ 112 | #define _eq_v2i64(...) _a_v2i64(cmpeq, _e_vv, __VA_ARGS__) 113 | #define _lt_v2i64(...) _a_v2i64(cmplt, _e_vv, __VA_ARGS__) 114 | #define _gt_v2i64(...) _a_v2i64(cmpgt, _e_vv, __VA_ARGS__) 115 | 116 | /* insert and extract */ 117 | #define _ins_v2i64(a, val, imm) { \ 118 | (a).v1 = _i_v2i64((a).v1, (val), (imm)); \ 119 | } 120 | #define _ext_v2i64(a, imm) ( \ 121 | (int64_t)_i_v2i64(extract)((a).v1, (imm)) \ 122 | ) 123 | 124 | /* mask */ 125 | #define _mask_v2i64(a) ( \ 126 | (uint32_t) (_mm_movemask_epi8((a).v1)) \ 127 | ) 128 | #define V2I64_MASK_00 ( 0x0000 ) 129 | #define V2I64_MASK_01 ( 0x00ff ) 130 | #define V2I64_MASK_10 ( 0xff00 ) 131 | #define V2I64_MASK_11 ( 0xffff ) 132 | 133 | /* convert */ 134 | #define _cvt_v2i32_v2i64(a) ( \ 135 | (v2i64_t) { \ 136 | _mm_cvtepi32_epi64((a).v1) \ 137 | } \ 138 | ) 139 | 140 | /* transpose */ 141 | #define _lo_v2i64(a, b) ( \ 142 | (v2i64_t) { \ 143 | _mm_unpacklo_epi64((a).v1, (b).v1) \ 144 | } \ 145 | ) 146 | #define _hi_v2i64(a, b) ( \ 147 | (v2i64_t) { \ 148 | _mm_unpackhi_epi64((a).v1, (b).v1) \ 149 | } \ 150 | ) 151 | 152 | /* debug print */ 153 | #ifdef _LOG_H_INCLUDED 154 | #define _print_v2i64(a) { \ 155 | debug("(v2i64_t) %s(%lld, %lld)", #a, _ext_v2i64(a, 1), _ext_v2i64(a, 0)); \ 156 | } 157 | #else 158 | #define _print_v2i64(x) ; 159 | #endif 160 | 161 | #endif /* _V2I64_H_INCLUDED */ 162 | /** 163 | * end of v2i64.h 164 | */ 165 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/v32i16.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v32i16.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V32I16_H_INCLUDED 8 | #define _V32I16_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v32i16_s { 15 | __m128i v1; 16 | __m128i v2; 17 | __m128i v3; 18 | __m128i v4; 19 | } v32i16_t; 20 | 21 | /* expanders (without argument) */ 22 | #define _e_x_v32i16_1(u) 23 | #define _e_x_v32i16_2(u) 24 | #define _e_x_v32i16_3(u) 25 | #define _e_x_v32i16_4(u) 26 | 27 | /* expanders (without immediate) */ 28 | #define _e_v_v32i16_1(a) (a).v1 29 | #define _e_v_v32i16_2(a) (a).v2 30 | #define _e_v_v32i16_3(a) (a).v3 31 | #define _e_v_v32i16_4(a) (a).v4 32 | #define _e_vv_v32i16_1(a, b) (a).v1, (b).v1 33 | #define _e_vv_v32i16_2(a, b) (a).v2, (b).v2 34 | #define _e_vv_v32i16_3(a, b) (a).v3, (b).v3 35 | #define _e_vv_v32i16_4(a, b) (a).v4, (b).v4 36 | #define _e_vvv_v32i16_1(a, b, c) (a).v1, (b).v1, (c).v1 37 | #define _e_vvv_v32i16_2(a, b, c) (a).v2, (b).v2, (c).v2 38 | #define _e_vvv_v32i16_3(a, b, c) (a).v3, (b).v3, (c).v3 39 | #define _e_vvv_v32i16_4(a, b, c) (a).v4, (b).v4, (c).v4 40 | 41 | /* expanders with immediate */ 42 | #define _e_i_v32i16_1(imm) (imm) 43 | #define _e_i_v32i16_2(imm) (imm) 44 | #define _e_i_v32i16_3(imm) (imm) 45 | #define _e_i_v32i16_4(imm) (imm) 46 | #define _e_vi_v32i16_1(a, imm) (a).v1, (imm) 47 | #define _e_vi_v32i16_2(a, imm) (a).v2, (imm) 48 | #define _e_vi_v32i16_3(a, imm) (a).v3, (imm) 49 | #define _e_vi_v32i16_4(a, imm) (a).v4, (imm) 50 | #define _e_vvi_v32i16_1(a, b, imm) (a).v1, (b).v1, (imm) 51 | #define _e_vvi_v32i16_2(a, b, imm) (a).v2, (b).v2, (imm) 52 | #define _e_vvi_v32i16_3(a, b, imm) (a).v3, (b).v3, (imm) 53 | #define _e_vvi_v32i16_4(a, b, imm) (a).v4, (b).v4, (imm) 54 | 55 | /* address calculation macros */ 56 | #define _addr_v32i16_1(imm) ( (__m128i *)(imm) ) 57 | #define _addr_v32i16_2(imm) ( (__m128i *)(imm) + 1 ) 58 | #define _addr_v32i16_3(imm) ( (__m128i *)(imm) + 2 ) 59 | #define _addr_v32i16_4(imm) ( (__m128i *)(imm) + 3 ) 60 | #define _pv_v32i16(ptr) ( _addr_v32i16_1(ptr) ) 61 | /* expanders with pointers */ 62 | #define _e_p_v32i16_1(ptr) _addr_v32i16_1(ptr) 63 | #define _e_p_v32i16_2(ptr) _addr_v32i16_2(ptr) 64 | #define _e_p_v32i16_3(ptr) _addr_v32i16_3(ptr) 65 | #define _e_p_v32i16_4(ptr) _addr_v32i16_4(ptr) 66 | #define _e_pv_v32i16_1(ptr, a) _addr_v32i16_1(ptr), (a).v1 67 | #define _e_pv_v32i16_2(ptr, a) _addr_v32i16_2(ptr), (a).v2 68 | #define _e_pv_v32i16_3(ptr, a) _addr_v32i16_3(ptr), (a).v3 69 | #define _e_pv_v32i16_4(ptr, a) _addr_v32i16_4(ptr), (a).v4 70 | 71 | /* expand intrinsic name */ 72 | #define _i_v32i16(intrin) _mm_##intrin##_epi16 73 | #define _i_v32i16x(intrin) _mm_##intrin##_si128 74 | 75 | /* apply */ 76 | #define _a_v32i16(intrin, expander, ...) ( \ 77 | (v32i16_t) { \ 78 | _i_v32i16(intrin)(expander##_v32i16_1(__VA_ARGS__)), \ 79 | _i_v32i16(intrin)(expander##_v32i16_2(__VA_ARGS__)), \ 80 | _i_v32i16(intrin)(expander##_v32i16_3(__VA_ARGS__)), \ 81 | _i_v32i16(intrin)(expander##_v32i16_4(__VA_ARGS__)) \ 82 | } \ 83 | ) 84 | #define _a_v32i16x(intrin, expander, ...) ( \ 85 | (v32i16_t) { \ 86 | _i_v32i16x(intrin)(expander##_v32i16_1(__VA_ARGS__)), \ 87 | _i_v32i16x(intrin)(expander##_v32i16_2(__VA_ARGS__)), \ 88 | _i_v32i16x(intrin)(expander##_v32i16_3(__VA_ARGS__)), \ 89 | _i_v32i16x(intrin)(expander##_v32i16_4(__VA_ARGS__)) \ 90 | } \ 91 | ) 92 | #define _a_v32i16xv(intrin, expander, ...) { \ 93 | _i_v32i16x(intrin)(expander##_v32i16_1(__VA_ARGS__)); \ 94 | _i_v32i16x(intrin)(expander##_v32i16_2(__VA_ARGS__)); \ 95 | _i_v32i16x(intrin)(expander##_v32i16_3(__VA_ARGS__)); \ 96 | _i_v32i16x(intrin)(expander##_v32i16_4(__VA_ARGS__)); \ 97 | } 98 | 99 | /* load and store */ 100 | #define _load_v32i16(...) _a_v32i16x(load, _e_p, __VA_ARGS__) 101 | #define _loadu_v32i16(...) _a_v32i16x(loadu, _e_p, __VA_ARGS__) 102 | #define _store_v32i16(...) _a_v32i16xv(store, _e_pv, __VA_ARGS__) 103 | #define _storeu_v32i16(...) _a_v32i16xv(storeu, _e_pv, __VA_ARGS__) 104 | 105 | /* broadcast */ 106 | #define _set_v32i16(...) _a_v32i16(set1, _e_i, __VA_ARGS__) 107 | #define _zero_v32i16() _a_v32i16x(setzero, _e_x, _unused) 108 | 109 | /* logics */ 110 | #define _not_v32i16(...) _a_v32i16x(not, _e_v, __VA_ARGS__) 111 | #define _and_v32i16(...) _a_v32i16x(and, _e_vv, __VA_ARGS__) 112 | #define _or_v32i16(...) _a_v32i16x(or, _e_vv, __VA_ARGS__) 113 | #define _xor_v32i16(...) _a_v32i16x(xor, _e_vv, __VA_ARGS__) 114 | #define _andn_v32i16(...) _a_v32i16x(andnot, _e_vv, __VA_ARGS__) 115 | 116 | /* arithmetics */ 117 | #define _add_v32i16(...) _a_v32i16(add, _e_vv, __VA_ARGS__) 118 | #define _sub_v32i16(...) _a_v32i16(sub, _e_vv, __VA_ARGS__) 119 | #define _adds_v32i16(...) _a_v32i16(adds, _e_vv, __VA_ARGS__) 120 | #define _subs_v32i16(...) _a_v32i16(subs, _e_vv, __VA_ARGS__) 121 | #define _max_v32i16(...) _a_v32i16(max, _e_vv, __VA_ARGS__) 122 | #define _min_v32i16(...) _a_v32i16(min, _e_vv, __VA_ARGS__) 123 | 124 | /* compare */ 125 | #define _eq_v32i16(...) _a_v32i16(cmpeq, _e_vv, __VA_ARGS__) 126 | #define _lt_v32i16(...) _a_v32i16(cmplt, _e_vv, __VA_ARGS__) 127 | #define _gt_v32i16(...) _a_v32i16(cmpgt, _e_vv, __VA_ARGS__) 128 | 129 | /* insert and extract */ 130 | #define _ins_v32i16(a, val, imm) { \ 131 | if((imm) < sizeof(__m128i)/sizeof(int16_t)) { \ 132 | (a).v1 = _i_v32i8(insert)((a).v1, (val), (imm)); \ 133 | } else if((imm) < 2*sizeof(__m128i)/sizeof(int16_t)) { \ 134 | (a).v2 = _i_v32i8(insert)((a).v2, (val), (imm) - sizeof(__m128i)/sizeof(int16_t)); \ 135 | } else if((imm) < 3*sizeof(__m128i)/sizeof(int16_t)) { \ 136 | (a).v3 = _i_v32i8(insert)((a).v3, (val), (imm) - 2*sizeof(__m128i)/sizeof(int16_t)); \ 137 | } else { \ 138 | (a).v4 = _i_v32i8(insert)((a).v4, (val), (imm) - 3*sizeof(__m128i)/sizeof(int16_t)); \ 139 | } \ 140 | } 141 | #define _ext_v32i16(a, imm) ( \ 142 | (int16_t)(((imm) < sizeof(__m128i)/sizeof(int16_t)) \ 143 | ? _i_v32i16(extract)((a).v1, (imm)) \ 144 | : (((imm) < 2*sizeof(__m128i)/sizeof(int16_t)) \ 145 | ? _i_v32i16(extract)((a).v2, (imm) - sizeof(__m128i)/sizeof(int16_t)) \ 146 | : (((imm) < 3*sizeof(__m128i)/sizeof(int16_t)) \ 147 | ? _i_v32i16(extract)((a).v3, (imm) - 2*sizeof(__m128i)/sizeof(int16_t)) \ 148 | : _i_v32i16(extract)((a).v4, (imm) - 3*sizeof(__m128i)/sizeof(int16_t))))) \ 149 | ) 150 | 151 | /* mask */ 152 | #define _mask_v32i16(a) ( \ 153 | (v32_mask_t) { \ 154 | .m1 = _mm_movemask_epi8( \ 155 | _mm_packs_epi16((a).v1, (a).v2)), \ 156 | .m2 = _mm_movemask_epi8( \ 157 | _mm_packs_epi16((a).v3, (a).v4)) \ 158 | } \ 159 | ) 160 | 161 | /* horizontal max (reduction max) */ 162 | #define _hmax_v32i16(a) ({ \ 163 | __m128i _vmax = _mm_max_epi16( \ 164 | _mm_max_epi16((a).v1, (a).v2), \ 165 | _mm_max_epi16((a).v3, (a).v4)); \ 166 | _vmax = _mm_max_epi16(_vmax, \ 167 | _mm_srli_si128(_vmax, 8)); \ 168 | _vmax = _mm_max_epi16(_vmax, \ 169 | _mm_srli_si128(_vmax, 4)); \ 170 | _vmax = _mm_max_epi16(_vmax, \ 171 | _mm_srli_si128(_vmax, 2)); \ 172 | (int16_t)_mm_extract_epi16(_vmax, 0); \ 173 | }) 174 | 175 | #define _cvt_v32i8_v32i16(a) ( \ 176 | (v32i16_t) { \ 177 | _mm_cvtepi8_epi16((a).v1), \ 178 | _mm_cvtepi8_epi16(_mm_srli_si128((a).v1, 8)), \ 179 | _mm_cvtepi8_epi16((a).v2), \ 180 | _mm_cvtepi8_epi16(_mm_srli_si128((a).v2, 8)) \ 181 | } \ 182 | ) 183 | 184 | /* debug print */ 185 | #ifdef _LOG_H_INCLUDED 186 | #define _print_v32i16(a) { \ 187 | debug("(v32i16_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, " \ 188 | "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 189 | #a, \ 190 | _ext_v32i16(a, 31), \ 191 | _ext_v32i16(a, 30), \ 192 | _ext_v32i16(a, 29), \ 193 | _ext_v32i16(a, 28), \ 194 | _ext_v32i16(a, 27), \ 195 | _ext_v32i16(a, 26), \ 196 | _ext_v32i16(a, 25), \ 197 | _ext_v32i16(a, 24), \ 198 | _ext_v32i16(a, 23), \ 199 | _ext_v32i16(a, 22), \ 200 | _ext_v32i16(a, 21), \ 201 | _ext_v32i16(a, 20), \ 202 | _ext_v32i16(a, 19), \ 203 | _ext_v32i16(a, 18), \ 204 | _ext_v32i16(a, 17), \ 205 | _ext_v32i16(a, 16), \ 206 | _ext_v32i16(a, 15), \ 207 | _ext_v32i16(a, 14), \ 208 | _ext_v32i16(a, 13), \ 209 | _ext_v32i16(a, 12), \ 210 | _ext_v32i16(a, 11), \ 211 | _ext_v32i16(a, 10), \ 212 | _ext_v32i16(a, 9), \ 213 | _ext_v32i16(a, 8), \ 214 | _ext_v32i16(a, 7), \ 215 | _ext_v32i16(a, 6), \ 216 | _ext_v32i16(a, 5), \ 217 | _ext_v32i16(a, 4), \ 218 | _ext_v32i16(a, 3), \ 219 | _ext_v32i16(a, 2), \ 220 | _ext_v32i16(a, 1), \ 221 | _ext_v32i16(a, 0)); \ 222 | } 223 | #else 224 | #define _print_v32i16(x) ; 225 | #endif 226 | 227 | #endif /* _V32I16_H_INCLUDED */ 228 | /** 229 | * end of v32i16.h 230 | */ 231 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/v32i8.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file v32i8.h 4 | * 5 | * @brief struct and _Generic based vector class implementation 6 | */ 7 | #ifndef _V32I8_H_INCLUDED 8 | #define _V32I8_H_INCLUDED 9 | 10 | /* include header for intel / amd sse2 instruction sets */ 11 | #include 12 | 13 | /* 8bit 32cell */ 14 | typedef struct v32i8_s { 15 | __m128i v1; 16 | __m128i v2; 17 | } v32i8_t; 18 | 19 | /* expanders (without argument) */ 20 | #define _e_x_v32i8_1(u) 21 | #define _e_x_v32i8_2(u) 22 | 23 | /* expanders (without immediate) */ 24 | #define _e_v_v32i8_1(a) (a).v1 25 | #define _e_v_v32i8_2(a) (a).v2 26 | #define _e_vv_v32i8_1(a, b) (a).v1, (b).v1 27 | #define _e_vv_v32i8_2(a, b) (a).v2, (b).v2 28 | #define _e_vvv_v32i8_1(a, b, c) (a).v1, (b).v1, (c).v1 29 | #define _e_vvv_v32i8_2(a, b, c) (a).v2, (b).v2, (c).v2 30 | 31 | /* expanders with immediate */ 32 | #define _e_i_v32i8_1(imm) (imm) 33 | #define _e_i_v32i8_2(imm) (imm) 34 | #define _e_vi_v32i8_1(a, imm) (a).v1, (imm) 35 | #define _e_vi_v32i8_2(a, imm) (a).v2, (imm) 36 | #define _e_vvi_v32i8_1(a, b, imm) (a).v1, (b).v1, (imm) 37 | #define _e_vvi_v32i8_2(a, b, imm) (a).v2, (b).v2, (imm) 38 | 39 | /* address calculation macros */ 40 | #define _addr_v32i8_1(imm) ( (__m128i *)(imm) ) 41 | #define _addr_v32i8_2(imm) ( (__m128i *)(imm) + 1 ) 42 | #define _pv_v32i8(ptr) ( _addr_v32i8_1(ptr) ) 43 | /* expanders with pointers */ 44 | #define _e_p_v32i8_1(ptr) _addr_v32i8_1(ptr) 45 | #define _e_p_v32i8_2(ptr) _addr_v32i8_2(ptr) 46 | #define _e_pv_v32i8_1(ptr, a) _addr_v32i8_1(ptr), (a).v1 47 | #define _e_pv_v32i8_2(ptr, a) _addr_v32i8_2(ptr), (a).v2 48 | 49 | /* expand intrinsic name */ 50 | #define _i_v32i8(intrin) _mm_##intrin##_epi8 51 | #define _i_v32i8x(intrin) _mm_##intrin##_si128 52 | 53 | /* apply */ 54 | #define _a_v32i8(intrin, expander, ...) ( \ 55 | (v32i8_t) { \ 56 | _i_v32i8(intrin)(expander##_v32i8_1(__VA_ARGS__)), \ 57 | _i_v32i8(intrin)(expander##_v32i8_2(__VA_ARGS__)) \ 58 | } \ 59 | ) 60 | #define _a_v32i8x(intrin, expander, ...) ( \ 61 | (v32i8_t) { \ 62 | _i_v32i8x(intrin)(expander##_v32i8_1(__VA_ARGS__)), \ 63 | _i_v32i8x(intrin)(expander##_v32i8_2(__VA_ARGS__)) \ 64 | } \ 65 | ) 66 | #define _a_v32i8xv(intrin, expander, ...) { \ 67 | _i_v32i8x(intrin)(expander##_v32i8_1(__VA_ARGS__)); \ 68 | _i_v32i8x(intrin)(expander##_v32i8_2(__VA_ARGS__)); \ 69 | } 70 | 71 | /* load and store */ 72 | #define _load_v32i8(...) _a_v32i8x(load, _e_p, __VA_ARGS__) 73 | #define _loadu_v32i8(...) _a_v32i8x(loadu, _e_p, __VA_ARGS__) 74 | #define _store_v32i8(...) _a_v32i8xv(store, _e_pv, __VA_ARGS__) 75 | #define _storeu_v32i8(...) _a_v32i8xv(storeu, _e_pv, __VA_ARGS__) 76 | 77 | /* broadcast */ 78 | #define _set_v32i8(...) _a_v32i8(set1, _e_i, __VA_ARGS__) 79 | #define _zero_v32i8() _a_v32i8x(setzero, _e_x, _unused) 80 | 81 | /* swap (reverse) */ 82 | #define _swap_idx_v32i8() ( \ 83 | _mm_set_epi8( \ 84 | 0, 1, 2, 3, 4, 5, 6, 7, \ 85 | 8, 9, 10, 11, 12, 13, 14, 15) \ 86 | ) 87 | #define _swap_v32i8(a) ( \ 88 | (v32i8_t) { \ 89 | _mm_shuffle_epi8((a).v2, _swap_idx_v32i8()), \ 90 | _mm_shuffle_epi8((a).v1, _swap_idx_v32i8()) \ 91 | } \ 92 | ) 93 | 94 | /* logics */ 95 | #define _not_v32i8(...) _a_v32i8x(not, _e_v, __VA_ARGS__) 96 | #define _and_v32i8(...) _a_v32i8x(and, _e_vv, __VA_ARGS__) 97 | #define _or_v32i8(...) _a_v32i8x(or, _e_vv, __VA_ARGS__) 98 | #define _xor_v32i8(...) _a_v32i8x(xor, _e_vv, __VA_ARGS__) 99 | #define _andn_v32i8(...) _a_v32i8x(andnot, _e_vv, __VA_ARGS__) 100 | 101 | /* arithmetics */ 102 | #define _add_v32i8(...) _a_v32i8(add, _e_vv, __VA_ARGS__) 103 | #define _sub_v32i8(...) _a_v32i8(sub, _e_vv, __VA_ARGS__) 104 | #define _adds_v32i8(...) _a_v32i8(adds, _e_vv, __VA_ARGS__) 105 | #define _subs_v32i8(...) _a_v32i8(subs, _e_vv, __VA_ARGS__) 106 | #define _max_v32i8(...) _a_v32i8(max, _e_vv, __VA_ARGS__) 107 | #define _min_v32i8(...) _a_v32i8(min, _e_vv, __VA_ARGS__) 108 | 109 | /* shuffle */ 110 | #define _shuf_v32i8(...) _a_v32i8(shuffle, _e_vv, __VA_ARGS__) 111 | 112 | /* blend */ 113 | // #define _sel_v32i8(...) _a_v32i8(blendv, _e_vvv, __VA_ARGS__) 114 | 115 | /* compare */ 116 | #define _eq_v32i8(...) _a_v32i8(cmpeq, _e_vv, __VA_ARGS__) 117 | #define _lt_v32i8(...) _a_v32i8(cmplt, _e_vv, __VA_ARGS__) 118 | #define _gt_v32i8(...) _a_v32i8(cmpgt, _e_vv, __VA_ARGS__) 119 | 120 | /* insert and extract */ 121 | #define _ins_v32i8(a, val, imm) { \ 122 | if((imm) < sizeof(__m128i)) { \ 123 | (a).v1 = _i_v32i8(insert)((a).v1, (val), (imm)); \ 124 | } else { \ 125 | (a).v2 = _i_v32i8(insert)((a).v2, (val), (imm) - sizeof(__m128i)); \ 126 | } \ 127 | } 128 | #define _ext_v32i8(a, imm) ( \ 129 | (int8_t)(((imm) < sizeof(__m128i)) \ 130 | ? _i_v32i8(extract)((a).v1, (imm)) \ 131 | : _i_v32i8(extract)((a).v2, (imm) - sizeof(__m128i))) \ 132 | ) 133 | 134 | /* shift */ 135 | #define _bsl_v32i8(a, imm) ( \ 136 | (v32i8_t) { \ 137 | _i_v32i8x(slli)((a).v1, (imm)), \ 138 | _i_v32i8(alignr)((a).v2, (a).v1, sizeof(__m128i) - (imm)) \ 139 | } \ 140 | ) 141 | #define _bsr_v32i8(a, imm) ( \ 142 | (v32i8_t) { \ 143 | _i_v32i8(alignr)((a).v2, (a).v1, (imm)), \ 144 | _i_v32i8x(srli)((a).v2, (imm)) \ 145 | } \ 146 | ) 147 | #define _shl_v32i8(a, imm) ( \ 148 | (v32i8_t) { \ 149 | _mm_slli_epi32((a).v1, (imm)), \ 150 | _mm_slli_epi32((a).v2, (imm)) \ 151 | } \ 152 | ) 153 | #define _shr_v32i8(a, imm) ( \ 154 | (v32i8_t) { \ 155 | _mm_srli_epi32((a).v1, (imm)), \ 156 | _mm_srli_epi32((a).v2, (imm)) \ 157 | } \ 158 | ) 159 | #define _sal_v32i8(a, imm) ( \ 160 | (v32i8_t) { \ 161 | _mm_slai_epi32((a).v1, (imm)), \ 162 | _mm_slai_epi32((a).v2, (imm)) \ 163 | } \ 164 | ) 165 | #define _sar_v32i8(a, imm) ( \ 166 | (v32i8_t) { \ 167 | _mm_srai_epi32((a).v1, (imm)), \ 168 | _mm_srai_epi32((a).v2, (imm)) \ 169 | } \ 170 | ) 171 | 172 | /* mask */ 173 | #define _mask_v32i8(a) ( \ 174 | (v32_mask_t) { \ 175 | .m1 = _i_v32i8(movemask)((a).v1), \ 176 | .m2 = _i_v32i8(movemask)((a).v2) \ 177 | } \ 178 | ) 179 | 180 | /* debug print */ 181 | #ifdef _LOG_H_INCLUDED 182 | #define _print_v32i8(a) { \ 183 | debug("(v32i8_t) %s(%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, " \ 184 | "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", \ 185 | #a, \ 186 | _ext_v32i8(a, 31), \ 187 | _ext_v32i8(a, 30), \ 188 | _ext_v32i8(a, 29), \ 189 | _ext_v32i8(a, 28), \ 190 | _ext_v32i8(a, 27), \ 191 | _ext_v32i8(a, 26), \ 192 | _ext_v32i8(a, 25), \ 193 | _ext_v32i8(a, 24), \ 194 | _ext_v32i8(a, 23), \ 195 | _ext_v32i8(a, 22), \ 196 | _ext_v32i8(a, 21), \ 197 | _ext_v32i8(a, 20), \ 198 | _ext_v32i8(a, 19), \ 199 | _ext_v32i8(a, 18), \ 200 | _ext_v32i8(a, 17), \ 201 | _ext_v32i8(a, 16), \ 202 | _ext_v32i8(a, 15), \ 203 | _ext_v32i8(a, 14), \ 204 | _ext_v32i8(a, 13), \ 205 | _ext_v32i8(a, 12), \ 206 | _ext_v32i8(a, 11), \ 207 | _ext_v32i8(a, 10), \ 208 | _ext_v32i8(a, 9), \ 209 | _ext_v32i8(a, 8), \ 210 | _ext_v32i8(a, 7), \ 211 | _ext_v32i8(a, 6), \ 212 | _ext_v32i8(a, 5), \ 213 | _ext_v32i8(a, 4), \ 214 | _ext_v32i8(a, 3), \ 215 | _ext_v32i8(a, 2), \ 216 | _ext_v32i8(a, 1), \ 217 | _ext_v32i8(a, 0)); \ 218 | } 219 | #else 220 | #define _print_v32i8(x) ; 221 | #endif 222 | 223 | #endif /* _V32I8_H_INCLUDED */ 224 | /** 225 | * end of v32i8.h 226 | */ 227 | -------------------------------------------------------------------------------- /arch/x86_64_sse41/vector.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file vector.h 4 | * 5 | * @brief header for various vector (SIMD) macros 6 | */ 7 | #ifndef _VECTOR_H_INCLUDED 8 | #define _VECTOR_H_INCLUDED 9 | 10 | /** 11 | * @struct v32_mask_s 12 | * 13 | * @brief common 32cell-wide mask type 14 | */ 15 | typedef struct v32_mask_s { 16 | uint16_t m1; 17 | uint16_t m2; 18 | } v32_mask_t; 19 | typedef struct v32_mask_s v32i8_mask_t; 20 | 21 | /** 22 | * @union v32_mask_u 23 | */ 24 | typedef union v32_mask_u { 25 | v32_mask_t mask; 26 | uint32_t all; 27 | } v32_masku_t; 28 | typedef union v32_mask_u v32i8_masku_t; 29 | 30 | /** 31 | * @struct v16_mask_s 32 | * 33 | * @brief common 16cell-wide mask type 34 | */ 35 | typedef struct v16_mask_s { 36 | uint16_t m1; 37 | } v16_mask_t; 38 | typedef struct v16_mask_s v16i8_mask_t; 39 | 40 | /** 41 | * @union v16_mask_u 42 | */ 43 | typedef union v16_mask_u { 44 | v16_mask_t mask; 45 | uint16_t all; 46 | } v16_masku_t; 47 | typedef union v16_mask_u v16i8_masku_t; 48 | 49 | /** 50 | * abstract vector types 51 | * 52 | * v2i32_t, v2i64_t for pair of 32-bit, 64-bit signed integers. Mainly for 53 | * a pair of coordinates. Conversion between the two types are provided. 54 | * 55 | * v16i8_t is a unit vector for substitution matrices and gap vectors. 56 | * Broadcast to v16i8_t and v32i8_t are provided. 57 | * 58 | * v32i8_t is a unit vector for small differences in banded alignment. v16i8_t 59 | * vector can be broadcasted to high and low 16 elements of v32i8_t. It can 60 | * also expanded to v32i16_t. 61 | * 62 | * v32i16_t is for middle differences in banded alignment. It can be converted 63 | * from v32i8_t 64 | */ 65 | #include "v2i32.h" 66 | #include "v2i64.h" 67 | #include "v16i8.h" 68 | #include "v32i8.h" 69 | #include "v32i16.h" 70 | 71 | /* conversion and cast between vector types */ 72 | #define _from_v16i8_v32i8(x) (v32i8_t){ (x).v1, (x).v1 } 73 | #define _from_v32i8_v32i8(x) (v32i8_t){ (x).v1, (x).v2 } 74 | #define _from_v16i8_v16i8(x) (v16i8_t){ (x).v1 } 75 | #define _from_v32i8_v16i8(x) (v16i8_t){ (x).v1 } 76 | 77 | /* inversed alias */ 78 | #define _to_v32i8_v16i8(x) (v32i8_t){ (x).v1, (x).v1 } 79 | #define _to_v32i8_v32i8(x) (v32i8_t){ (x).v1, (x).v2 } 80 | #define _to_v16i8_v16i8(x) (v16i8_t){ (x).v1 } 81 | #define _to_v16i8_v32i8(x) (v16i8_t){ (x).v1 } 82 | 83 | #define _cast_v2i64_v2i32(x) (v2i32_t){ (x).v1 } 84 | #define _cast_v2i32_v2i64(x) (v2i64_t){ (x).v1 } 85 | 86 | #endif /* _VECTOR_H_INCLUDED */ 87 | /** 88 | * end of vector.h 89 | */ 90 | -------------------------------------------------------------------------------- /aw.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file aw.h 4 | * 5 | * @brief alignment writer 6 | */ 7 | #ifndef _AW_H_INCLUDED 8 | #define _AW_H_INCLUDED 9 | 10 | #include 11 | 12 | 13 | /** 14 | * @enum aw_file_format 15 | */ 16 | enum aw_file_format { 17 | AW_SAM = 16, 18 | AW_BAM = 17, 19 | AW_MAF = 18, 20 | AW_GPA = 19 /* graphical pairwise alignment format */ 21 | }; 22 | 23 | /** 24 | * @enum aw_sam_flags 25 | */ 26 | enum aw_sam_flags { 27 | /** flags written to the sam file */ 28 | SAM_MULTIPLE_SEGMENTS = 0x0001, 29 | SAM_PROPERLY_ALIGNED = 0x0002, 30 | SAM_UNMAPPED = 0x0004, 31 | SAM_NEXT_UNMAPPED = 0x0008, 32 | SAM_REVCOMP = 0x0010, 33 | SAM_NEXT_REVCOMP = 0x0020, 34 | SAM_FIRST_SEGMENT = 0x0040, 35 | SAM_LAST_SEGMENT = 0x0080, 36 | SAM_SECONDARY = 0x0100, 37 | SAM_SUPPLEMENTARY = 0x0800 38 | }; 39 | 40 | /** 41 | * @struct aw_params_s 42 | */ 43 | struct aw_params_s { 44 | uint8_t format; 45 | char clip; 46 | uint8_t pad[2]; 47 | uint32_t program_id; 48 | char const *program_name; 49 | char const *command; 50 | char const *name_prefix; 51 | }; 52 | typedef struct aw_params_s aw_params_t; 53 | #define AW_PARAMS(...) ( &((aw_params_t const){ __VA_ARGS__ }) ) 54 | 55 | /** 56 | * @type aw_t 57 | */ 58 | typedef struct aw_s aw_t; 59 | 60 | /** 61 | * @fn aw_init 62 | * 63 | * @brief create a alignment writer context 64 | */ 65 | aw_t *aw_init( 66 | char const *path, 67 | gref_idx_t const *idx, 68 | aw_params_t const *params); 69 | 70 | /** 71 | * @fn aw_clean 72 | */ 73 | void aw_clean(aw_t *aw); 74 | 75 | /** 76 | * @fn aw_append_alignment 77 | */ 78 | void aw_append_alignment( 79 | aw_t *aw, 80 | gref_idx_t const *ref, 81 | gref_acv_t const *query, 82 | struct gaba_alignment_s const *const *aln, 83 | uint64_t cnt); 84 | 85 | /** 86 | * @fn aw_append_unmapped 87 | */ 88 | void aw_append_unmapped( 89 | aw_t *aw, 90 | gref_idx_t const *ref, 91 | gref_acv_t const *query); 92 | 93 | 94 | #endif /* _SAM_H_INCLUDED */ 95 | /** 96 | * end of sam.h 97 | */ 98 | -------------------------------------------------------------------------------- /bench.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file bench.h 4 | * 5 | * @brief benchmarking utils 6 | * 7 | * @detail 8 | * usage: 9 | * bench_t b; 10 | * bench_init(b); // clear accumulator 11 | * 12 | * bench_start(b); 13 | * // execution time between bench_start and bench_end is accumulated 14 | * bench_end(b); 15 | * 16 | * printf("%lld us\n", bench_get(b)); // in us 17 | */ 18 | #ifndef _BENCH_H_INCLUDED 19 | #define _BENCH_H_INCLUDED 20 | 21 | #include 22 | #include 23 | 24 | /** 25 | * benchmark macros 26 | */ 27 | #ifdef BENCH 28 | #include 29 | 30 | /** 31 | * @struct _bench 32 | * @brief benchmark variable container 33 | */ 34 | struct _bench { 35 | struct timeval s; /** start */ 36 | int64_t a; /** accumulator */ 37 | }; 38 | typedef struct _bench bench_t; 39 | 40 | /** 41 | * @macro bench_init 42 | */ 43 | #define bench_init(b) { \ 44 | memset(&(b).s, 0, sizeof(struct timeval)); \ 45 | (b).a = 0; \ 46 | } 47 | 48 | /** 49 | * @macro bench_start 50 | */ 51 | #define bench_start(b) { \ 52 | gettimeofday(&(b).s, NULL); \ 53 | } 54 | 55 | /** 56 | * @macro bench_end 57 | */ 58 | #define bench_end(b) { \ 59 | struct timeval _e; \ 60 | gettimeofday(&_e, NULL); \ 61 | (b).a += ( (_e.tv_sec - (b).s.tv_sec ) * 1000000000 \ 62 | + (_e.tv_usec - (b).s.tv_usec) * 1000); \ 63 | } 64 | 65 | /** 66 | * @macro bench_get 67 | */ 68 | #define bench_get(b) ( \ 69 | (b).a \ 70 | ) 71 | 72 | #else /* #ifdef BENCH */ 73 | 74 | /** disable bench */ 75 | struct _bench { uint64_t _unused; }; 76 | typedef struct _bench bench_t; 77 | #define bench_init(b) ; 78 | #define bench_start(b) ; 79 | #define bench_end(b) ; 80 | #define bench_get(b) ( 0LL ) 81 | 82 | #endif /* #ifdef BENCH */ 83 | 84 | #endif /* #ifndef _BENCH_H_INCLUDED */ 85 | /** 86 | * end of bench.h 87 | */ 88 | -------------------------------------------------------------------------------- /comb.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file comb.h 4 | * 5 | * @brief comb aligner API header 6 | */ 7 | #ifndef _COMB_H_INCLUDED 8 | #define _COMB_H_INCLUDED 9 | 10 | 11 | #endif 12 | /** 13 | * end of comb.h 14 | */ 15 | -------------------------------------------------------------------------------- /examples/graph1.gfa: -------------------------------------------------------------------------------- 1 | H VN:Z:1.0 2 | S 11 ACCCGCGGGGTTGAGTATTTGTCGAATCTAAGCAGAGCAAGAGCGGGTTCCAACGCACTCTAGCAGGAGATATATAACTT 3 | S 12 CCCCTGCCGGAGGCCGACGTGTTATTCTGATCTAGGGAGGCTTCGAAACCGGAGTGAGTAGATGGTGCGGAAACCAAAGT 4 | S 13 AGGTTTTTTAATCGAATCTACTCCCTCTCCCATAGGCGTA 5 | L 11 + 12 + 0M 6 | L 12 + 13 + 0M 7 | L 11 + 13 + 0M -------------------------------------------------------------------------------- /examples/graph2.gfa: -------------------------------------------------------------------------------- 1 | H VN:Z:1.0 2 | S 14 ACCCGCGGGGTTGAGTATTTGTCGAATCTAAGCAGAGCAAGAGCGGGTTCCAACGCACTCTAGCAGGAGATATATAACTTCCCCTGCCGG 3 | S 15 AGGCCGACGTGTTATTCTGATCTAGGGAGGCTTCGAAACCGGAGTGAGTAGATGGTGCGG 4 | S 16 AAACCAAAGTAGGTTTTTTAATCGAATCTACTCCCTCTCCCATAGGCGTA 5 | L 14 + 15 + 0M 6 | L 15 + 16 + 0M 7 | L 14 + 16 + 0M -------------------------------------------------------------------------------- /examples/linear1.fa: -------------------------------------------------------------------------------- 1 | > seq1 2 | ACCCGCGGGGTTGAGTATTTGTCGAATCTAAGCAGAGCAAGAGCGGGTTCCAACGCACTCTAGCAGGAGATATATAACTT 3 | CCCCTGCCGGAGGCCGACGTGTTATTCTGATCTAGGGAGGCTTCGAAACCGGAGTGAGTAGATGGTGCGGAAACCAAAGT 4 | AGGTTTTTTAATCGAATCTACTCCCTCTCCCATAGGCGTA 5 | -------------------------------------------------------------------------------- /examples/linear2.fa: -------------------------------------------------------------------------------- 1 | > read1 2 | CCCCTGCCGGAGGCCGACGTGTTATTCTGATCTAGGGAGGCTTCGAAACCGGAGTGAGTAGATGGTGCGGAAACCAAAGT 3 | > read2 4 | CAACGCACTCTAGCAGGAGATATATAACTTCCCCTGCCGGAGGCCGACGTGTTATTCTGATCTAGGGAGGCTTCGAAACC 5 | > read3 6 | GGAGTGAGTAGATGGTGCGGAAACCAAAGTAGGTTTTTTAATCGAATCTACTCCCTCTCCCATAGGCGTA -------------------------------------------------------------------------------- /fna.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file fna.h 4 | * 5 | * @brief FASTA / FASTQ reader implementation. 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2015/05/23 9 | * @license Apache v2. 10 | * 11 | * @detail 12 | * Supported formats: 13 | * FASTA (raw and gzipped) 14 | * FASTQ (raw and gzipped) 15 | * FAST5 (unsupported for now!!!) 16 | * 17 | * List of APIs: 18 | * Basic readers: 19 | * fna_t *fna_init(char const *path, int pack); 20 | * fna_seq_t *fna_read(fna_t const *fna, fna_seq_t *seq); 21 | * void fna_seq_free(fna_seq_t *seq); 22 | * void fna_close(fna_t *fna); 23 | * 24 | * Sequence duplicators: 25 | * fna_seq_t *fna_duplicate(fna_seq_t const *seq); 26 | * fna_seq_t *fna_revcomp(fna_seq_t const *seq); 27 | * 28 | * Sequence modifiers: 29 | * void fna_append(fna_seq_t *dst, fna_seq_t const *src); 30 | * void fna_append_revcomp(fna_seq_t *dst, fna_seq_t const *src); 31 | * 32 | * Types and members: 33 | * fna_t (alias to struct _fna): sequence reader instance container. 34 | * path: path to the file. 35 | * fna_seq_t (alias to struct _seq): sequence container. 36 | * name: sequence name container (kvec_t(char) instance) 37 | * name.a: pointer to the sequence name (null-terminated ASCII). 38 | * seq: sequence container (kvec_t(uint8_t) or kpvec_t(uint8_t) instance) 39 | * seq.a: pointer to the sequence (null-terminated when fna->pack == FNA_RAW) 40 | */ 41 | #ifndef _FNA_H_INCLUDED 42 | #define _FNA_H_INCLUDED 43 | 44 | #include 45 | 46 | /** 47 | * @enum fna_flag_encode 48 | */ 49 | enum fna_flag_encode { 50 | FNA_RAW = 0, 51 | FNA_ASCII = 0, 52 | FNA_2BIT = 1, 53 | FNA_2BITPACKED = 2, 54 | FNA_4BIT = 3, 55 | FNA_4BITPACKED = 4, 56 | }; 57 | 58 | /** 59 | * @enum fna_format 60 | * 61 | * @brief format flag constant 62 | */ 63 | enum fna_format { 64 | FNA_UNKNOWN = 0, 65 | FNA_FASTA = 1, 66 | FNA_FASTQ = 2, 67 | FNA_FAST5 = 3, 68 | FNA_GFA = 4 69 | }; 70 | 71 | /** 72 | * @enum fna_options 73 | */ 74 | enum fna_options { 75 | FNA_SKIP_QUAL = 1 76 | }; 77 | 78 | /** 79 | * @enum fna_seq_type 80 | * @brief distinguish struct fna_seq_s with struct fna_link_s 81 | */ 82 | enum fna_seq_type { 83 | FNA_SEGMENT = 1, 84 | FNA_LINK = 2 85 | }; 86 | 87 | /** 88 | * @enum fna_status 89 | */ 90 | enum fna_status { 91 | FNA_SUCCESS = 0, 92 | FNA_ERROR_FILE_OPEN = 1, 93 | FNA_ERROR_UNKNOWN_FORMAT = 2, 94 | FNA_ERROR_BROKEN_FORMAT = 3, 95 | FNA_ERROR_OUT_OF_MEM = 4, 96 | FNA_ERROR_UNSUPPORTED_VERSION = 5, 97 | FNA_EOF = -1 98 | }; 99 | 100 | /** 101 | * @struct fna_params_s 102 | * @brief options 103 | */ 104 | struct fna_params_s { 105 | uint8_t file_format; /** see enum fna_format */ 106 | uint8_t seq_encode; /** see enum fna_flag_encode */ 107 | uint16_t options; /** see enum fna_options */ 108 | uint16_t head_margin; /** margin at the head of fna_seq_t */ 109 | uint16_t tail_margin; /** margin at the tail of fna_seq_t */ 110 | uint16_t seq_head_margin; /** margin at the head of seq buffer */ 111 | uint16_t seq_tail_margin; /** margin at the tail of seq buffer */ 112 | uint16_t reserved[2]; 113 | void *lmm; /** lmm memory manager */ 114 | }; 115 | typedef struct fna_params_s fna_params_t; 116 | 117 | #define FNA_PARAMS(...) ( &((struct fna_params_s const) { __VA_ARGS__ }) ) 118 | 119 | /** 120 | * @struct fna_s 121 | * 122 | * @brief a struct for fna context 123 | */ 124 | struct fna_s { 125 | void *reserved1; 126 | char *path; 127 | uint8_t file_format; /** see enum fna_format */ 128 | uint8_t seq_encode; /** see enum fna_flag_encode */ 129 | uint16_t options; 130 | int32_t status; /** see enum fna_status */ 131 | uint32_t reserved2[7]; 132 | }; 133 | typedef struct fna_s fna_t; 134 | 135 | /** 136 | * @struct fna_str_s 137 | */ 138 | struct fna_str_s { 139 | char const *ptr; 140 | int32_t len; 141 | }; 142 | 143 | /** 144 | * @struct fna_sarr_s 145 | */ 146 | struct fna_sarr_s { 147 | uint8_t const *ptr; 148 | int64_t len; 149 | }; 150 | 151 | /** 152 | * @struct fna_cigar_s 153 | */ 154 | struct fna_cigar_s { 155 | char const *ptr; 156 | int64_t len; 157 | }; 158 | 159 | /** 160 | * @struct fna_segment_s 161 | */ 162 | struct fna_segment_s { 163 | struct fna_str_s name; 164 | struct fna_str_s comment; 165 | struct fna_sarr_s seq; 166 | struct fna_sarr_s qual; 167 | }; 168 | 169 | /** 170 | * @struct fna_link_s 171 | */ 172 | struct fna_link_s { 173 | struct fna_str_s src; 174 | struct fna_str_s dst; 175 | int32_t src_ori; /** 0: forward, 1: reverse */ 176 | int32_t dst_ori; /** 0: forward, 1: reverse */ 177 | struct fna_cigar_s cigar; 178 | int32_t _pad[2]; 179 | }; 180 | 181 | /** 182 | * @struct fna_seq_s 183 | * 184 | * @brief a struct to contain parsed sequence. 185 | */ 186 | struct fna_seq_s { 187 | void *reserved1; 188 | uint8_t type; 189 | uint8_t seq_encode; /** one of fna_flag_encode */ 190 | uint16_t options; 191 | union fna_seq_body_u { 192 | struct fna_segment_s segment; 193 | struct fna_link_s link; 194 | } s; 195 | uint16_t reserved3[4]; 196 | }; 197 | typedef struct fna_seq_s fna_seq_t; 198 | 199 | /** 200 | * @fn fna_init 201 | * 202 | * @brief create a sequence reader context 203 | * 204 | * @param[in] path : a path to file to open. 205 | * @param[in] pack : see struct fna_params_s 206 | * 207 | * @return a pointer to the context, NULL if an error occurred (may be invalid path or invalid format) 208 | */ 209 | fna_t *fna_init(char const *path, fna_params_t const *params); 210 | 211 | /** 212 | * @fn fna_close 213 | * 214 | * @brief clean up sequence reader context 215 | */ 216 | void fna_close(fna_t *fna); 217 | 218 | /** 219 | * @fn fna_set_lmm 220 | * 221 | * @brief replace local memory manager context 222 | */ 223 | void *fna_set_lmm(fna_t *fna, void *lmm); 224 | 225 | /** 226 | * @fn fna_read 227 | * 228 | * @brief read a sequence 229 | * 230 | * @param[in] fna : a pointer to the context 231 | * 232 | * @return a pointer to a sequence object, NULL if the file pointer reached the end. 233 | */ 234 | fna_seq_t *fna_read(fna_t *fna); 235 | 236 | /** 237 | * @fn fna_append 238 | * 239 | * @brief concatenate src sesquence after dst sequence 240 | */ 241 | void fna_append(fna_seq_t *dst, fna_seq_t const *src); 242 | 243 | /** 244 | * @fn fna_duplicate 245 | * 246 | * @brief duplicate sequence 247 | */ 248 | fna_seq_t *fna_duplicate(fna_seq_t const *seq); 249 | 250 | /** 251 | * @fn fna_append_revcomp 252 | * 253 | * @brief append reverse complemented sequence after the given sequence 254 | */ 255 | void fna_append_revcomp(fna_seq_t *seq, fna_seq_t const *src); 256 | 257 | /** 258 | * @fn fna_revcomp 259 | * 260 | * @brief make reverse complemented sequence 261 | */ 262 | fna_seq_t *fna_revcomp(fna_seq_t const *seq); 263 | 264 | /** 265 | * @fn fna_seq_free 266 | * 267 | * @brief clean up sequence object 268 | */ 269 | void fna_seq_free(fna_seq_t *seq); 270 | 271 | #endif /* #ifndef _FNA_H_INCLUDED */ 272 | /** 273 | * end of fna.h 274 | */ 275 | -------------------------------------------------------------------------------- /gaba.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | /** 4 | * @file gaba.h 5 | * 6 | * @brief C header of the libgaba (libsea3) API 7 | * 8 | * @author Hajime Suzuki 9 | * @date 2014/12/29 10 | * @license Apache v2 11 | * 12 | * @detail 13 | * a header for libgaba (libsea3): a fast banded seed-and-extend alignment library. 14 | * 15 | * from C: 16 | * Include this header file as #include . This will enable you to 17 | * use all the APIs in the gaba_init, and gaba_align form. 18 | * 19 | * from C++: 20 | * Include this header as #include . The C APIs are wrapped with 21 | * namespace sea and the C++ class AlignmentContext and AlignmentResult 22 | * are added. See example.cpp for the detail of the usage in C++. 23 | */ 24 | 25 | #ifndef _GABA_H_INCLUDED 26 | #define _GABA_H_INCLUDED 27 | 28 | #include /** NULL and size_t */ 29 | #include /** uint8_t, int32_t, int64_t */ 30 | 31 | /** 32 | * @enum gaba_error 33 | * 34 | * @brief (API) error flags. see gaba_init function and status member in the gaba_alignment structure for more details. 35 | */ 36 | enum gaba_error { 37 | GABA_SUCCESS = 0, /*!< success!! */ 38 | GABA_TERMINATED = 1, /*!< (internal code) success */ 39 | GABA_ERROR = -1, /*!< unknown error */ 40 | /** errors which occur in an alignment function */ 41 | GABA_ERROR_INVALID_MEM = -2, /*!< invalid pointer to memory */ 42 | GABA_ERROR_INVALID_CONTEXT = -3, /*!< invalid pointer to the alignment context */ 43 | GABA_ERROR_OUT_OF_BAND = -4, /*!< traceback failure. using wider band may resolve this type of error. */ 44 | GABA_ERROR_OUT_OF_MEM = -5, /*!< out of memory error. mostly caused by exessively long queries. */ 45 | GABA_ERROR_OVERFLOW = -6, /*!< cell overflow error */ 46 | GABA_ERROR_INVALID_ARGS = -7, /*!< inproper input arguments. */ 47 | /** errors which occur in an initialization function */ 48 | GABA_ERROR_UNSUPPORTED_ALG = -8, /*!< unsupported combination of algorithm and processor options. use naive implementations instead. */ 49 | GABA_ERROR_INVALID_COST = -9 /*!< invalid alignment cost */ 50 | }; 51 | 52 | /** 53 | * @enum gaba_clip_type 54 | */ 55 | enum gaba_clip_type { 56 | GABA_CLIP_SOFT = 'S', 57 | GABA_CLIP_HARD = 'H' 58 | }; 59 | 60 | /** 61 | * @struct gaba_score_s 62 | * @brief score container 63 | */ 64 | struct gaba_score_s { 65 | int8_t score_sub[4][4]; 66 | int8_t score_gi_a, score_ge_a; 67 | int8_t score_gi_b, score_ge_b; 68 | }; 69 | typedef struct gaba_score_s gaba_score_t; 70 | 71 | /** 72 | * @struct gaba_params_s 73 | * @brief input parameters of gaba_init 74 | */ 75 | struct gaba_params_s { 76 | /** output options */ 77 | int16_t head_margin; /** margin at the head of gaba_res_t */ 78 | int16_t tail_margin; /** margin at the tail of gaba_res_t */ 79 | 80 | /** filtering options */ 81 | int16_t filter_thresh; /** popcnt filter threshold, set zero if you want to disable it */ 82 | 83 | /** score parameters */ 84 | int16_t xdrop; 85 | gaba_score_t const *score_matrix; 86 | }; 87 | typedef struct gaba_params_s gaba_params_t; 88 | 89 | /** 90 | * @macro GABA_PARAMS 91 | * @brief utility macro for gaba_init, see example on header. 92 | */ 93 | #define GABA_PARAMS(...) ( &((struct gaba_params_s const) { __VA_ARGS__ }) ) 94 | 95 | /** 96 | * @macro GABA_SCORE_SIMPLE 97 | * @brief utility macro for constructing score parameters. 98 | */ 99 | #define GABA_SCORE_SIMPLE(m, x, gi, ge) ( \ 100 | &((gaba_score_t const) { \ 101 | .score_sub = { \ 102 | {m, -(x), -(x), -(x)}, \ 103 | {-(x), m, -(x), -(x)}, \ 104 | {-(x), -(x), m, -(x)}, \ 105 | {-(x), -(x), -(x), m} \ 106 | }, \ 107 | .score_gi_a = gi, \ 108 | .score_ge_a = ge, \ 109 | .score_gi_b = gi, \ 110 | .score_ge_b = ge \ 111 | }) \ 112 | ) 113 | 114 | /** 115 | * @type gaba_t 116 | * 117 | * @brief (API) an alias to `struct gaba_context_s'. 118 | */ 119 | typedef struct gaba_context_s gaba_t; 120 | 121 | /** 122 | * @type gaba_stack_t 123 | * 124 | * @brief stack context container 125 | */ 126 | typedef struct gaba_stack_s gaba_stack_t; 127 | 128 | /** 129 | * @struct gaba_section_s 130 | * 131 | * @brief section container, a tuple of (id, length, head position). 132 | */ 133 | struct gaba_section_s { 134 | uint32_t id; /** (4) section id */ 135 | uint32_t len; /** (4) length of the seq */ 136 | uint8_t const *base; /** (8) pointer to the head of the sequence */ 137 | }; 138 | typedef struct gaba_section_s gaba_section_t; 139 | #define gaba_build_section(_id, _base, _len) ( \ 140 | (struct gaba_section_s){ \ 141 | .id = (_id), \ 142 | .base = (_base), \ 143 | .len = (_len) \ 144 | } \ 145 | ) 146 | #define gaba_rev(pos, len) ( (len) + (uint64_t)(len) - (uint64_t)(pos) - 1 ) 147 | 148 | /** 149 | * @type gaba_dp_t 150 | * 151 | * @brief an alias to `struct gaba_dp_context_s`. 152 | */ 153 | typedef struct gaba_dp_context_s gaba_dp_t; 154 | 155 | /** 156 | * @struct gaba_fill_s 157 | */ 158 | struct gaba_fill_s { 159 | /* coordinates */ 160 | int64_t psum; /** (8) global p-coordinate of the tail of the section */ 161 | int32_t p; /** (4) local p-coordinate of the tail of the section */ 162 | uint32_t ssum; /** (4) */ 163 | 164 | /* status and max scores */ 165 | int64_t max; /** (8) max */ 166 | uint32_t status; /** (4) */ 167 | 168 | uint8_t _pad[36]; 169 | }; 170 | typedef struct gaba_fill_s gaba_fill_t; 171 | 172 | /** 173 | * @enum gaba_status 174 | */ 175 | enum gaba_status { 176 | GABA_STATUS_CONT = 0, 177 | GABA_STATUS_UPDATE = 0x100, 178 | GABA_STATUS_UPDATE_A = 0x0f, 179 | GABA_STATUS_UPDATE_B = 0xf0, 180 | GABA_STATUS_TERM = 0x200 181 | }; 182 | 183 | /** 184 | * @struct gaba_pos_pair_s 185 | */ 186 | struct gaba_pos_pair_s { 187 | uint32_t apos, bpos; 188 | }; 189 | typedef struct gaba_pos_pair_s gaba_pos_pair_t; 190 | 191 | /** 192 | * @struct gaba_path_section_s 193 | */ 194 | struct gaba_path_section_s { 195 | uint32_t aid, bid; /** (8) id of the sections */ 196 | uint32_t apos, bpos; /** (8) pos in the sections */ 197 | uint32_t alen, blen; /** (8) length of the segments */ 198 | int64_t ppos; /** (8) path string position (offset) */ 199 | // uint32_t plen; /** (4) path string length */ 200 | // uint32_t reserved; /** (4) */ 201 | }; 202 | typedef struct gaba_path_section_s gaba_path_section_t; 203 | #define gaba_plen(sec) ( (sec)->alen + (sec)->blen ) 204 | 205 | /** 206 | * @struct gaba_path_s 207 | */ 208 | struct gaba_path_s { 209 | int64_t len; /** (8) path length (= array bit length) */ 210 | uint32_t array[]; /** () path array */ 211 | }; 212 | typedef struct gaba_path_s gaba_path_t; 213 | 214 | /** 215 | * @struct gaba_alignment_s 216 | */ 217 | struct gaba_alignment_s { 218 | void *lmm; 219 | int64_t score; 220 | uint32_t reserved1, reserved2; 221 | uint32_t rapos, rbpos; 222 | uint32_t rppos; /** (4) local path index in the root section */ 223 | uint32_t rsidx; /** (4) index of the root section */ 224 | uint32_t reserved3; 225 | uint32_t slen; 226 | struct gaba_path_section_s const *sec; 227 | struct gaba_path_s const *path; 228 | }; 229 | typedef struct gaba_alignment_s gaba_alignment_t; 230 | 231 | /** 232 | * @fn gaba_init 233 | * @brief (API) gaba_init new API 234 | */ 235 | gaba_t *gaba_init(gaba_params_t const *params); 236 | 237 | /** 238 | * @fn gaba_clean 239 | * 240 | * @brief (API) clean up the alignment context structure. 241 | * 242 | * @param[in] ctx : a pointer to the alignment structure. 243 | * 244 | * @return none. 245 | * 246 | * @sa gaba_init 247 | */ 248 | void gaba_clean( 249 | gaba_t *ctx); 250 | 251 | /** 252 | * @fn gaba_dp_init 253 | */ 254 | gaba_dp_t *gaba_dp_init( 255 | gaba_t const *ctx, 256 | uint8_t const *alim, 257 | uint8_t const *blim); 258 | 259 | /** 260 | * @fn gaba_dp_flush 261 | * @brief flush stack (flush all if NULL) 262 | */ 263 | void gaba_dp_flush( 264 | gaba_dp_t *dp, 265 | uint8_t const *alim, 266 | uint8_t const *blim); 267 | 268 | /** 269 | * @fn gaba_dp_save_stack 270 | */ 271 | gaba_stack_t const *gaba_dp_save_stack( 272 | gaba_dp_t *dp); 273 | 274 | /** 275 | * @fn gaba_dp_flush_stack 276 | */ 277 | void gaba_dp_flush_stack( 278 | gaba_dp_t *dp, 279 | gaba_stack_t const *stack); 280 | 281 | /** 282 | * @fn gaba_dp_clean 283 | */ 284 | void gaba_dp_clean( 285 | gaba_dp_t *dp); 286 | 287 | /** 288 | * @fn gaba_dp_fill_root 289 | */ 290 | gaba_fill_t *gaba_dp_fill_root( 291 | gaba_dp_t *dp, 292 | gaba_section_t const *a, 293 | uint32_t apos, 294 | gaba_section_t const *b, 295 | uint32_t bpos); 296 | 297 | /** 298 | * @fn gaba_dp_fill 299 | * @brief fill dp matrix inside section pairs 300 | */ 301 | gaba_fill_t *gaba_dp_fill( 302 | gaba_dp_t *dp, 303 | gaba_fill_t const *prev_sec, 304 | gaba_section_t const *a, 305 | gaba_section_t const *b); 306 | 307 | /** 308 | * @fn gaba_dp_merge 309 | */ 310 | gaba_fill_t *gaba_dp_merge( 311 | gaba_dp_t *dp, 312 | gaba_fill_t const *sec_list, 313 | uint64_t sec_list_len); 314 | 315 | /** 316 | * @fn gaba_dp_search_max 317 | */ 318 | gaba_pos_pair_t gaba_dp_search_max( 319 | gaba_dp_t *dp, 320 | gaba_fill_t const *sec); 321 | 322 | /** 323 | * @struct gaba_trace_params_s 324 | */ 325 | struct gaba_trace_params_s { 326 | void *lmm; 327 | struct gaba_path_section_s const *sec; 328 | uint16_t slen; 329 | uint16_t k; 330 | char seq_a_head_type; 331 | char seq_a_tail_type; 332 | char seq_b_head_type; 333 | char seq_b_tail_type; 334 | }; 335 | typedef struct gaba_trace_params_s gaba_trace_params_t; 336 | 337 | /** 338 | * @macro GABA_TRACE_PARAMS 339 | */ 340 | #define GABA_TRACE_PARAMS(...) ( &((struct gaba_trace_params_s const) { __VA_ARGS__ }) ) 341 | #define GABA_TRACE_NONE ( NULL ) 342 | 343 | /** 344 | * @type gaba_alignment_writer 345 | * @brief pointer to putchar-compatible writer 346 | */ 347 | typedef int (*gaba_alignment_writer)(int c); 348 | 349 | /** 350 | * @fn gaba_dp_trace 351 | * 352 | * @brief generate alignment result string 353 | */ 354 | gaba_alignment_t *gaba_dp_trace( 355 | gaba_dp_t *dp, 356 | gaba_fill_t const *fw_tail, 357 | gaba_fill_t const *rv_tail, 358 | gaba_trace_params_t const *params); 359 | 360 | /** 361 | * @fn gaba_dp_recombine 362 | * 363 | * @brief recombine two alignments x and y at xsid and ysid. 364 | */ 365 | gaba_alignment_t *gaba_dp_recombine( 366 | gaba_dp_t *dp, 367 | gaba_alignment_t *x, 368 | uint32_t xsid, 369 | gaba_alignment_t *y, 370 | uint32_t ysid); 371 | 372 | /** 373 | * @fn gaba_dp_res_free 374 | */ 375 | void gaba_dp_res_free( 376 | gaba_alignment_t *aln); 377 | 378 | /** 379 | * @fn gaba_dp_print_cigar_forward 380 | * 381 | * @brief convert path string to cigar. 382 | * @detail 383 | * fprintf must accept ("%" PRId64 "M") and ("%" PRId64 "%c") format string 384 | * otherwise can be ignored. 385 | */ 386 | typedef int (*gaba_dp_fprintf_t)(void *, char const *, ...); 387 | uint64_t gaba_dp_print_cigar_forward( 388 | gaba_dp_fprintf_t fprintf, 389 | void *fp, 390 | uint32_t const *path, 391 | uint32_t offset, 392 | uint32_t len); 393 | 394 | /** 395 | * @fn gaba_dp_print_cigar_reverse 396 | * 397 | * @brief convert path string to cigar in reverse direction 398 | */ 399 | uint64_t gaba_dp_print_cigar_reverse( 400 | gaba_dp_fprintf_t fprintf, 401 | void *fp, 402 | uint32_t const *path, 403 | uint32_t offset, 404 | uint32_t len); 405 | 406 | /** 407 | * @fn gaba_dp_dump_cigar_forward 408 | */ 409 | uint64_t gaba_dp_dump_cigar_forward( 410 | char *buf, 411 | uint64_t buf_size, 412 | uint32_t const *path, 413 | uint32_t offset, 414 | uint32_t len); 415 | 416 | /** 417 | * @fn gaba_dp_dump_cigar_reverse 418 | */ 419 | uint64_t gaba_dp_dump_cigar_reverse( 420 | char *buf, 421 | uint64_t buf_size, 422 | uint32_t const *path, 423 | uint32_t offset, 424 | uint32_t len); 425 | 426 | #endif /* #ifndef _GABA_H_INCLUDED */ 427 | 428 | /* 429 | * end of gaba.h 430 | */ 431 | -------------------------------------------------------------------------------- /ggsea.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file ggsea.h 4 | * 5 | * @brief Graph-to-Graph Seed-and-Extend Alignment 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2016/4/12 9 | */ 10 | #ifndef _GGSEA_H_INCLUDED 11 | #define _GGSEA_H_INCLUDED 12 | 13 | #include 14 | #include "gref.h" 15 | #include "gaba.h" 16 | 17 | 18 | /* types */ 19 | /** 20 | * @type ggsea_conf_t 21 | */ 22 | typedef struct ggsea_conf_s ggsea_conf_t; 23 | 24 | /** 25 | * @type ggsea_ctx_t 26 | */ 27 | typedef struct ggsea_ctx_s ggsea_ctx_t; 28 | 29 | /** 30 | * @struct ggsea_params_s 31 | */ 32 | struct ggsea_params_s { 33 | /* local memory manager */ 34 | void *lmm; 35 | 36 | /* score parameters */ 37 | int16_t xdrop; 38 | gaba_score_t const *score_matrix; 39 | 40 | /* repetitive kmer filter */ 41 | int64_t k; 42 | int64_t kmer_cnt_thresh; /* kmer count threshold */ 43 | 44 | /* overlap filter thresh */ 45 | int64_t overlap_thresh; /* depth */ 46 | 47 | /* popcnt filter thresh */ 48 | int64_t gapless_thresh; /* threshold */ 49 | 50 | /* score thresh */ 51 | int64_t score_thresh; 52 | }; 53 | typedef struct ggsea_params_s ggsea_params_t; 54 | 55 | /** 56 | * @macro GGSEA_PARAMS 57 | * @brief utility macro for gaba_init, see example on header. 58 | */ 59 | #define GGSEA_PARAMS(...) ( &((struct ggsea_params_s const) { __VA_ARGS__ }) ) 60 | 61 | /** 62 | * @struct ggsea_result_s 63 | */ 64 | struct ggsea_result_s { 65 | void *reserved1; 66 | gref_idx_t const *ref; 67 | gref_acv_t const *query; 68 | struct gaba_alignment_s const *const *aln; 69 | uint32_t cnt; 70 | uint32_t reserved2; 71 | }; 72 | typedef struct ggsea_result_s ggsea_result_t; 73 | 74 | 75 | /* functions */ 76 | 77 | /** 78 | * @fn ggsea_conf_init 79 | * @brief create configuration object 80 | */ 81 | ggsea_conf_t *ggsea_conf_init( 82 | ggsea_params_t const *params); 83 | 84 | /** 85 | * @fn ggsea_conf_clean 86 | * @brief cleanup configuration object 87 | */ 88 | void ggsea_conf_clean( 89 | ggsea_conf_t *conf); 90 | 91 | /** 92 | * @fn ggsea_ctx_init 93 | * @brief initialize thread-local context with const reference index object 94 | */ 95 | ggsea_ctx_t *ggsea_ctx_init( 96 | ggsea_conf_t const *conf, 97 | gref_idx_t const *ref); 98 | 99 | /** 100 | * @fn ggsea_ctx_clean 101 | * @brief cleanup thread-local context 102 | */ 103 | void ggsea_ctx_clean( 104 | ggsea_ctx_t *ctx); 105 | 106 | /** 107 | * @fn ggsea_align 108 | * @brief do pairwise local alignment between reference in the context and given query 109 | */ 110 | ggsea_result_t *ggsea_align( 111 | ggsea_ctx_t *_ctx, 112 | gref_acv_t const *query, 113 | gref_iter_t *iter, 114 | void *lmm); 115 | 116 | /** 117 | * @fn ggsea_aln_free 118 | */ 119 | void ggsea_aln_free( 120 | ggsea_result_t *aln); 121 | 122 | 123 | #endif /* _GGSEA_H_INCLUDED */ 124 | /** 125 | * end of ggsea.h 126 | */ 127 | -------------------------------------------------------------------------------- /gref.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file gref.h 4 | * 5 | * @brief a header of gref.c 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2015/6/23 9 | * @license Apache v2. 10 | * 11 | * @detail 12 | * Ref is a hash-based sequence indexer and exact matcher. 13 | */ 14 | 15 | #ifndef _GREF_H_INCLUDED 16 | #define _GREF_H_INCLUDED 17 | 18 | #include 19 | 20 | /** 21 | * @enum gref_error 22 | * @brief error flags 23 | */ 24 | enum gref_error { 25 | /** error codes */ 26 | GREF_SUCCESS = 0, 27 | GREF_ERROR = 1, 28 | GREF_ERROR_INVALID_CONTEXT = 2, 29 | GREF_ERROR_INVALID_ARGS = 3, 30 | GREF_ERROR_OVERWRITE = 4, 31 | GREF_ERROR_FILE_NOT_FOUND = 5, 32 | GREF_ERROR_BROKEN_FILE = 6, 33 | 34 | /** return values */ 35 | GREF_INDEX_VALID = 0, 36 | GREF_INDEX_INVALID = -1 37 | }; 38 | 39 | /** 40 | * @enum gref_seq_direction 41 | */ 42 | enum gref_seq_direction { 43 | GREF_FW_ONLY = 1, 44 | GREF_FW_RV = 2 45 | }; 46 | 47 | /** 48 | * @enum gref_format 49 | */ 50 | enum gref_format_flags { 51 | GREF_ASCII = 1, 52 | GREF_4BIT = 2, 53 | }; 54 | 55 | /** 56 | * @enum gref_copy_mode 57 | * 58 | * @brief sequences passed to the gref object must remain in the same 59 | * location until the object is destoryed if GREF_NOCOPY is specified. 60 | */ 61 | enum gref_copy_mode { 62 | GREF_COPY = 1, 63 | GREF_NOCOPY = 2 64 | }; 65 | 66 | /** 67 | * @type gref_t 68 | */ 69 | typedef struct gref_s gref_t; 70 | 71 | /** 72 | * @type gref_pool_t 73 | * @brief mutable sequence pool 74 | */ 75 | typedef struct gref_s gref_pool_t; 76 | 77 | /** 78 | * @type gref_acv_t 79 | * @brief immutable sequence pool, providing kmer iterator, converted from gref_pool_t 80 | */ 81 | typedef struct gref_s gref_acv_t; 82 | 83 | /** 84 | * @type gref_idx_t 85 | * @brief immutable sequence pool with kmer index, converted from gref_acv_t 86 | */ 87 | typedef struct gref_s gref_idx_t; 88 | 89 | /** 90 | * @type gref_iter_t 91 | */ 92 | typedef struct gref_iter_s gref_iter_t; 93 | 94 | /** 95 | * @struct gref_params_s 96 | */ 97 | struct gref_params_s { 98 | uint8_t k; /* kmer length */ 99 | uint8_t seq_direction; 100 | uint8_t seq_format; 101 | uint8_t copy_mode; 102 | uint16_t num_threads; 103 | uint16_t reserved; 104 | uint32_t hash_size; 105 | uint16_t seq_head_margin; 106 | uint16_t seq_tail_margin; 107 | void *lmm; 108 | }; 109 | typedef struct gref_params_s gref_params_t; 110 | #define GREF_PARAMS(...) ( &((struct gref_params_s const) { __VA_ARGS__ }) ) 111 | 112 | /** 113 | * k max and min 114 | */ 115 | #define GREF_K_MIN_BASE ( 2 ) 116 | #define GREF_K_MIN ( 1<>1 ) 187 | #define gref_dir(_gid) ( (_gid) & 0x01 ) 188 | 189 | /** 190 | * @struct gref_match_res_s 191 | */ 192 | struct gref_match_res_s { 193 | struct gref_gid_pos_s *gid_pos_arr; 194 | int64_t len; 195 | }; 196 | typedef struct gref_match_res_s gref_match_res_t; 197 | 198 | /** 199 | * @fn gref_init_pool 200 | * @brief initialize mutable reference object (reference index precursor) 201 | */ 202 | gref_pool_t *gref_init_pool( 203 | gref_params_t const *params); 204 | 205 | /** 206 | * @fn gref_freeze_pool 207 | */ 208 | gref_acv_t *gref_freeze_pool( 209 | gref_pool_t *pool); 210 | 211 | /** 212 | * @fn gref_melt_archive 213 | */ 214 | gref_pool_t *gref_melt_archive( 215 | gref_acv_t *acv); 216 | 217 | /** 218 | * @fn gref_build_index 219 | * @brief build index 220 | */ 221 | gref_idx_t *gref_build_index( 222 | gref_acv_t *acv); 223 | 224 | /** 225 | * @fn gref_disable_index 226 | */ 227 | gref_acv_t *gref_disable_index( 228 | gref_idx_t *idx); 229 | 230 | /** 231 | * @fn gref_clean 232 | * @brief cleanup object. gref can be pool, acv, or idx. 233 | */ 234 | void gref_clean( 235 | gref_t *gref); 236 | 237 | /** 238 | * @fn gref_append_segment 239 | * @brief append a sequence block to the context. 240 | */ 241 | int gref_append_segment( 242 | gref_pool_t *pool, 243 | char const *name, 244 | int32_t name_len, 245 | uint8_t const *seq, 246 | int64_t seq_len); 247 | 248 | /** 249 | * @fn gref_append_link 250 | * 251 | * @brief append a edge on graph 252 | */ 253 | int gref_append_link( 254 | gref_pool_t *pool, 255 | char const *src, 256 | int32_t src_len, 257 | int32_t src_ori, 258 | char const *dst, 259 | int32_t dst_len, 260 | int32_t dst_ori); 261 | 262 | /** 263 | * @fn gref_append_snp 264 | * @brief not implemented yet (;_;) 265 | */ 266 | int gref_append_snp( 267 | gref_pool_t *_pool, 268 | char const *name, 269 | int32_t name_len, 270 | int64_t pos, 271 | uint8_t snp); 272 | 273 | /** 274 | * @fn gref_split_segment 275 | * @brief not implemented yet (;_;) 276 | */ 277 | int gref_split_segment( 278 | gref_pool_t *_pool, 279 | char const *base, 280 | int32_t base_len, 281 | int64_t pos, 282 | char const *splitted, 283 | int32_t splitted_len); 284 | 285 | #if 0 286 | /** 287 | * @fn gref_load_index 288 | */ 289 | gref_idx_t *gref_load_index( 290 | zf_t *fp); 291 | 292 | /** 293 | * @fn gref_dump_index 294 | */ 295 | int gref_dump_index( 296 | gref_idx_t const *gref, 297 | zf_t *fp); 298 | #endif 299 | 300 | /** 301 | * @fn gref_iter_init, gref_iter_next, gref_iter_clean 302 | * 303 | * @brief kmer iterator 304 | */ 305 | gref_iter_t *gref_iter_init( 306 | gref_acv_t const *gref, 307 | gref_iter_params_t const *params); 308 | 309 | /** 310 | * @fn gref_iter_next 311 | */ 312 | #define GREF_ITER_KMER_TERM ( (uint64_t)0xffffffffffffffff ) 313 | gref_kmer_tuple_t gref_iter_next( 314 | gref_iter_t *iter); 315 | 316 | /** 317 | * @fn gref_iter_clean 318 | */ 319 | void gref_iter_clean( 320 | gref_iter_t *iter); 321 | 322 | /** 323 | * @fn gref_match 324 | * 325 | * @brief index must be build with kmer-hash mode. 326 | */ 327 | struct gref_match_res_s gref_match( 328 | gref_idx_t const *gref, 329 | uint8_t const *seq); 330 | struct gref_match_res_s gref_match_2bitpacked( 331 | gref_idx_t const *gref, 332 | uint64_t seq); 333 | 334 | /** 335 | * @fn gref_get_section_count 336 | */ 337 | int64_t gref_get_section_count( 338 | gref_t const *gref); 339 | 340 | /** 341 | * @fn gref_get_section 342 | */ 343 | struct gref_section_s const *gref_get_section( 344 | gref_acv_t const *gref, 345 | uint32_t gid); 346 | 347 | /** 348 | * @fn gref_get_link 349 | */ 350 | struct gref_link_s gref_get_link( 351 | gref_t const *gref, 352 | uint32_t gid); 353 | 354 | /** 355 | * @fn gref_get_name 356 | */ 357 | struct gref_str_s gref_get_name( 358 | gref_t const *gref, 359 | uint32_t gid); 360 | 361 | #if 0 362 | /* deprecated */ 363 | /** 364 | * @fn gref_get_ptr 365 | */ 366 | uint8_t const *gref_get_ptr( 367 | gref_t const *gref); 368 | #endif 369 | 370 | /** 371 | * @fn gref_get_total_len 372 | */ 373 | int64_t gref_get_total_len( 374 | gref_t const *gref); 375 | 376 | /** 377 | * @fn gref_get_lim 378 | */ 379 | uint8_t const *gref_get_lim( 380 | gref_t const *gref); 381 | #define gref_rev_ptr(ptr, lim) ( (uint8_t const *)(lim) + (uint64_t)(lim) - (uint64_t)(ptr) - 1 ) 382 | 383 | #if 0 384 | /** 385 | * @fn gref_is_amb 386 | */ 387 | int64_t gref_is_amb( 388 | gref_t const *gref, 389 | int64_t lb, int64_t ub); 390 | #endif 391 | 392 | #endif /** #ifndef _GREF_H_INCLUDED */ 393 | /** 394 | * end of gref.h 395 | */ 396 | -------------------------------------------------------------------------------- /hmap.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file hmap.h 4 | * 5 | * @brief string to object hashmap 6 | */ 7 | #ifndef _HMAP_H_INCLUDED 8 | #define _HMAP_H_INCLUDED 9 | 10 | #include 11 | 12 | /** 13 | * @type hmap_header_t 14 | * @brief object must have a hmap_header_t field at the head. 15 | */ 16 | struct hmap_header_s { 17 | uint64_t reserved; 18 | }; 19 | typedef struct hmap_header_s hmap_header_t; 20 | 21 | /** 22 | * @struct hmap_params_s 23 | */ 24 | struct hmap_params_s { 25 | uint64_t hmap_size; 26 | void *lmm; 27 | }; 28 | typedef struct hmap_params_s hmap_params_t; 29 | #define HMAP_PARAMS(...) ( &((struct hmap_params_s const){ __VA_ARGS__ }) ) 30 | 31 | /** 32 | * @type hmap_t 33 | */ 34 | typedef struct hmap_s hmap_t; 35 | 36 | /** 37 | * @struct hmap_key_s 38 | * @brief return value container for get_key function 39 | */ 40 | struct hmap_key_s { 41 | char const *ptr; 42 | uint32_t len; 43 | }; 44 | typedef struct hmap_key_s hmap_key_t; 45 | 46 | /** 47 | * @fn hmap_init 48 | */ 49 | hmap_t *hmap_init( 50 | uint64_t object_size, 51 | hmap_params_t const *params); 52 | 53 | /** 54 | * @fn hmap_clean 55 | */ 56 | void hmap_clean( 57 | hmap_t *hmap); 58 | 59 | /** 60 | * @fn hmap_flush 61 | */ 62 | void hmap_flush( 63 | hmap_t *hmap); 64 | 65 | /** 66 | * @fn hmap_get_id 67 | * @brief returns index in the object array 68 | */ 69 | uint32_t hmap_get_id( 70 | hmap_t *hmap, 71 | char const *str, 72 | uint32_t len); 73 | 74 | /** 75 | * @fn hmap_get_key 76 | */ 77 | struct hmap_key_s hmap_get_key( 78 | hmap_t *hmap, 79 | uint32_t id); 80 | 81 | /** 82 | * @fn hmap_get_object 83 | */ 84 | void *hmap_get_object( 85 | hmap_t *hmap, 86 | uint32_t id); 87 | 88 | /** 89 | * @fn hmap_get_count 90 | */ 91 | uint32_t hmap_get_count( 92 | hmap_t *hmap); 93 | 94 | #endif /* _HMAP_H_INCLUDED */ 95 | /** 96 | * end of hmap.h 97 | */ 98 | -------------------------------------------------------------------------------- /kopen.c: -------------------------------------------------------------------------------- 1 | 2 | 3 | /* for compatibility with -std=c99 (2016/4/26 by Hajime Suzuki) */ 4 | #ifndef _POSIX_C_SOURCE 5 | #define _POSIX_C_SOURCE 200112L 6 | #endif 7 | 8 | #ifndef _BSD_SOURCE 9 | #define _BSD_SOURCE 10 | #endif 11 | /* end */ 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include /** for waitpid */ 23 | #include /** with -D_POSIX_C_SOURCE=200112L */ 24 | #ifndef _WIN32 25 | #include 26 | #include 27 | #include 28 | #endif 29 | 30 | #ifdef _WIN32 31 | #define _KO_NO_NET 32 | #endif 33 | 34 | #ifndef _KO_NO_NET 35 | static int socket_wait(int fd, int is_read) 36 | { 37 | fd_set fds, *fdr = 0, *fdw = 0; 38 | struct timeval tv; 39 | int ret; 40 | tv.tv_sec = 5; tv.tv_usec = 0; // 5 seconds time out 41 | FD_ZERO(&fds); 42 | FD_SET(fd, &fds); 43 | if (is_read) fdr = &fds; 44 | else fdw = &fds; 45 | ret = select(fd+1, fdr, fdw, 0, &tv); 46 | if (ret == -1) perror("select"); 47 | return ret; 48 | } 49 | 50 | static int socket_connect(const char *host, const char *port) 51 | { 52 | #define __err_connect(func) do { perror(func); freeaddrinfo(res); return -1; } while (0) 53 | 54 | int on = 1, fd; 55 | struct linger lng = { 0, 0 }; 56 | struct addrinfo hints, *res = 0; 57 | memset(&hints, 0, sizeof(struct addrinfo)); 58 | hints.ai_family = AF_UNSPEC; 59 | hints.ai_socktype = SOCK_STREAM; 60 | if (getaddrinfo(host, port, &hints, &res) != 0) __err_connect("getaddrinfo"); 61 | if ((fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol)) == -1) __err_connect("socket"); 62 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) __err_connect("setsockopt"); 63 | if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &lng, sizeof(lng)) == -1) __err_connect("setsockopt"); 64 | if (connect(fd, res->ai_addr, res->ai_addrlen) != 0) __err_connect("connect"); 65 | freeaddrinfo(res); 66 | return fd; 67 | #undef __err_connect 68 | } 69 | 70 | static int http_open(const char *fn) 71 | { 72 | char *p, *proxy, *q, *http_host, *host, *port, *path, *buf; 73 | int fd, ret, l; 74 | 75 | /* parse URL; adapted from khttp_parse_url() in knetfile.c */ 76 | if (strstr(fn, "http://") != fn) return 0; 77 | // set ->http_host 78 | for (p = (char*)fn + 7; *p && *p != '/'; ++p); 79 | l = p - fn - 7; 80 | http_host = calloc(l + 1, 1); 81 | strncpy(http_host, fn + 7, l); 82 | http_host[l] = 0; 83 | for (q = http_host; *q && *q != ':'; ++q); 84 | if (*q == ':') *q++ = 0; 85 | // get http_proxy 86 | proxy = getenv("http_proxy"); 87 | // set host, port and path 88 | if (proxy == 0) { 89 | host = strdup(http_host); // when there is no proxy, server name is identical to http_host name. 90 | port = strdup(*q? q : "80"); 91 | path = strdup(*p? p : "/"); 92 | } else { 93 | host = (strstr(proxy, "http://") == proxy)? strdup(proxy + 7) : strdup(proxy); 94 | for (q = host; *q && *q != ':'; ++q); 95 | if (*q == ':') *q++ = 0; 96 | port = strdup(*q? q : "80"); 97 | path = strdup(fn); 98 | } 99 | 100 | /* connect; adapted from khttp_connect() in knetfile.c */ 101 | l = 0; 102 | fd = socket_connect(host, port); 103 | buf = calloc(0x10000, 1); // FIXME: I am lazy... But in principle, 64KB should be large enough. 104 | l += sprintf(buf + l, "GET %s HTTP/1.0\r\nHost: %s\r\n", path, http_host); 105 | l += sprintf(buf + l, "\r\n"); 106 | if (write(fd, buf, l) == -1) { 107 | // success 108 | l = 0; 109 | while (read(fd, buf + l, 1)) { // read HTTP header; FIXME: bad efficiency 110 | if (buf[l] == '\n' && l >= 3) 111 | if (strncmp(buf + l - 3, "\r\n\r\n", 4) == 0) break; 112 | ++l; 113 | } 114 | } 115 | buf[l] = 0; 116 | if (l < 14) { // prematured header 117 | close(fd); 118 | fd = -1; 119 | } 120 | ret = strtol(buf + 8, &p, 0); // HTTP return code 121 | if (ret != 200) { 122 | close(fd); 123 | fd = -1; 124 | } 125 | free(buf); free(http_host); free(host); free(port); free(path); 126 | return fd; 127 | } 128 | 129 | typedef struct { 130 | int max_response, ctrl_fd; 131 | char *response; 132 | } ftpaux_t; 133 | 134 | static int kftp_get_response(ftpaux_t *aux) 135 | { 136 | unsigned char c; 137 | int n = 0; 138 | char *p; 139 | if (socket_wait(aux->ctrl_fd, 1) <= 0) return 0; 140 | while (read(aux->ctrl_fd, &c, 1)) { // FIXME: this is *VERY BAD* for unbuffered I/O 141 | if (n >= aux->max_response) { 142 | aux->max_response = aux->max_response? aux->max_response<<1 : 256; 143 | aux->response = realloc(aux->response, aux->max_response); 144 | } 145 | aux->response[n++] = c; 146 | if (c == '\n') { 147 | if (n >= 4 && isdigit(aux->response[0]) && isdigit(aux->response[1]) && isdigit(aux->response[2]) 148 | && aux->response[3] != '-') break; 149 | n = 0; 150 | continue; 151 | } 152 | } 153 | if (n < 2) return -1; 154 | aux->response[n-2] = 0; 155 | return strtol(aux->response, &p, 0); 156 | } 157 | 158 | static int kftp_send_cmd(ftpaux_t *aux, const char *cmd, int is_get) 159 | { 160 | if (socket_wait(aux->ctrl_fd, 0) <= 0) return -1; // socket is not ready for writing 161 | if (write(aux->ctrl_fd, cmd, strlen(cmd)) == -1) return -1; 162 | return is_get? kftp_get_response(aux) : 0; 163 | } 164 | 165 | static int ftp_open(const char *fn) 166 | { 167 | char *p, *host = 0, *port = 0, *retr = 0; 168 | char host2[80], port2[10]; 169 | int v[6], l, fd = -1, ret, pasv_port, pasv_ip[4]; 170 | ftpaux_t aux; 171 | 172 | /* parse URL */ 173 | if (strstr(fn, "ftp://") != fn) return 0; 174 | for (p = (char*)fn + 6; *p && *p != '/'; ++p); 175 | if (*p != '/') return 0; 176 | l = p - fn - 6; 177 | port = strdup("21"); 178 | host = calloc(l + 1, 1); 179 | strncpy(host, fn + 6, l); 180 | retr = calloc(strlen(p) + 8, 1); 181 | sprintf(retr, "RETR %s\r\n", p); 182 | 183 | /* connect to ctrl */ 184 | memset(&aux, 0, sizeof(ftpaux_t)); 185 | aux.ctrl_fd = socket_connect(host, port); 186 | if (aux.ctrl_fd == -1) goto ftp_open_end; /* fail to connect ctrl */ 187 | 188 | /* connect to the data stream */ 189 | kftp_get_response(&aux); 190 | kftp_send_cmd(&aux, "USER anonymous\r\n", 1); 191 | kftp_send_cmd(&aux, "PASS kopen@\r\n", 1); 192 | kftp_send_cmd(&aux, "TYPE I\r\n", 1); 193 | kftp_send_cmd(&aux, "PASV\r\n", 1); 194 | for (p = aux.response; *p && *p != '('; ++p); 195 | if (*p != '(') goto ftp_open_end; 196 | ++p; 197 | sscanf(p, "%d,%d,%d,%d,%d,%d", &v[0], &v[1], &v[2], &v[3], &v[4], &v[5]); 198 | memcpy(pasv_ip, v, 4 * sizeof(int)); 199 | pasv_port = (v[4]<<8&0xff00) + v[5]; 200 | kftp_send_cmd(&aux, retr, 0); 201 | sprintf(host2, "%d.%d.%d.%d", pasv_ip[0], pasv_ip[1], pasv_ip[2], pasv_ip[3]); 202 | sprintf(port2, "%d", pasv_port); 203 | fd = socket_connect(host2, port2); 204 | if (fd == -1) goto ftp_open_end; 205 | ret = kftp_get_response(&aux); 206 | if (ret != 150) { 207 | close(fd); 208 | fd = -1; 209 | } 210 | close(aux.ctrl_fd); 211 | 212 | ftp_open_end: 213 | free(host); free(port); free(retr); free(aux.response); 214 | return fd; 215 | } 216 | #endif /* !defined(_KO_NO_NET) */ 217 | 218 | static char **cmd2argv(const char *cmd) 219 | { 220 | int i, beg, end, argc; 221 | char **argv, *str; 222 | end = strlen(cmd); 223 | for (i = end - 1; i >= 0; --i) 224 | if (!isspace(cmd[i])) break; 225 | end = i + 1; 226 | for (beg = 0; beg < end; ++beg) 227 | if (!isspace(cmd[beg])) break; 228 | if (beg == end) return 0; 229 | for (i = beg + 1, argc = 0; i < end; ++i) 230 | if (isspace(cmd[i]) && !isspace(cmd[i-1])) 231 | ++argc; 232 | argv = (char**)calloc(argc + 2, sizeof(void*)); 233 | argv[0] = str = (char*)calloc(end - beg + 1, 1); 234 | strncpy(argv[0], cmd + beg, end - beg); 235 | for (i = argc = 1; i < end - beg; ++i) 236 | if (isspace(str[i])) str[i] = 0; 237 | else if (str[i] && str[i-1] == 0) argv[argc++] = &str[i]; 238 | return argv; 239 | } 240 | 241 | #define KO_STDIN 1 242 | #define KO_FILE 2 243 | #define KO_PIPE 3 244 | #define KO_HTTP 4 245 | #define KO_FTP 5 246 | 247 | typedef struct { 248 | int type, fd; 249 | pid_t pid; 250 | } koaux_t; 251 | 252 | void *kopen(const char *fn, int *_fd) 253 | { 254 | koaux_t *aux = 0; 255 | *_fd = -1; 256 | if (strstr(fn, "http://") == fn) { 257 | aux = calloc(1, sizeof(koaux_t)); 258 | aux->type = KO_HTTP; 259 | *_fd = aux->fd = http_open(fn); 260 | } else if (strstr(fn, "ftp://") == fn) { 261 | aux = calloc(1, sizeof(koaux_t)); 262 | aux->type = KO_FTP; 263 | *_fd = aux->fd = ftp_open(fn); 264 | } else if (strcmp(fn, "-") == 0) { 265 | aux = calloc(1, sizeof(koaux_t)); 266 | aux->type = KO_STDIN; 267 | *_fd = aux->fd = STDIN_FILENO; 268 | } else { 269 | const char *p, *q; 270 | for (p = fn; *p; ++p) 271 | if (!isspace(*p)) break; 272 | if (*p == '<') { // pipe open 273 | int need_shell, pfd[2]; 274 | pid_t pid; 275 | // a simple check to see if we need to invoke a shell; not always working 276 | for (q = p + 1; *q; ++q) 277 | if (ispunct(*q) && *q != '.' && *q != '_' && *q != '-' && *q != ':') 278 | break; 279 | need_shell = (*q != 0); 280 | if (pipe(pfd) != 0) { 281 | return 0; 282 | } 283 | pid = vfork(); 284 | if (pid == -1) { /* vfork() error */ 285 | close(pfd[0]); close(pfd[1]); 286 | return 0; 287 | } 288 | if (pid == 0) { /* the child process */ 289 | char **argv; /* FIXME: I do not know if this will lead to a memory leak */ 290 | close(pfd[0]); 291 | dup2(pfd[1], STDOUT_FILENO); 292 | close(pfd[1]); 293 | if (!need_shell) { 294 | argv = cmd2argv(p + 1); 295 | execvp(argv[0], argv); 296 | free(argv[0]); free(argv); 297 | } else execl("/bin/sh", "sh", "-c", p + 1, NULL); 298 | exit(1); 299 | } else { /* parent process */ 300 | close(pfd[1]); 301 | aux = calloc(1, sizeof(koaux_t)); 302 | aux->type = KO_PIPE; 303 | *_fd = aux->fd = pfd[0]; 304 | aux->pid = pid; 305 | } 306 | } else { 307 | #ifdef _WIN32 308 | *_fd = open(fn, O_RDONLY | O_BINARY); 309 | #else 310 | *_fd = open(fn, O_RDONLY); 311 | #endif 312 | if (*_fd > 0) { 313 | aux = calloc(1, sizeof(koaux_t)); 314 | aux->type = KO_FILE; 315 | aux->fd = *_fd; 316 | } 317 | } 318 | } 319 | // *_fd = aux->fd; 320 | return aux; 321 | } 322 | 323 | int kclose(void *a) 324 | { 325 | koaux_t *aux = (koaux_t*)a; 326 | if (aux->type == KO_PIPE) { 327 | int status; 328 | pid_t pid; 329 | pid = waitpid(aux->pid, &status, WNOHANG); 330 | if (pid != aux->pid) kill(aux->pid, 15); 331 | } 332 | free(aux); 333 | return 0; 334 | } 335 | 336 | #ifdef _KO_MAIN 337 | #define BUF_SIZE 0x10000 338 | int main(int argc, char *argv[]) 339 | { 340 | void *x; 341 | int l, fd; 342 | unsigned char buf[BUF_SIZE]; 343 | FILE *fp; 344 | if (argc == 1) { 345 | fprintf(stderr, "Usage: kopen \n"); 346 | return 1; 347 | } 348 | x = kopen(argv[1], &fd); 349 | fp = fdopen(fd, "r"); 350 | if (fp == 0) { 351 | fprintf(stderr, "ERROR: fail to open the input\n"); 352 | return 1; 353 | } 354 | do { 355 | if ((l = fread(buf, 1, BUF_SIZE, fp)) != 0) 356 | fwrite(buf, 1, l, stdout); 357 | } while (l == BUF_SIZE); 358 | fclose(fp); 359 | kclose(x); 360 | return 0; 361 | } 362 | #endif 363 | -------------------------------------------------------------------------------- /kopen.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file kopen.h 4 | * 5 | * @brief a header for kopen and kclose 6 | */ 7 | #ifndef _KOPEN_H_INCLUDED 8 | #define _KOPEN_H_INCLUDED 9 | 10 | /** 11 | * @fn kopen 12 | */ 13 | void *kopen(const char *fn, int *_fd); 14 | 15 | /** 16 | * @fn kclose 17 | */ 18 | int kclose(void *a); 19 | 20 | 21 | #endif /** #ifndef _KOPEN_H_INCLUDED */ 22 | /** 23 | * end of kopen.h 24 | */ 25 | -------------------------------------------------------------------------------- /kvec.h: -------------------------------------------------------------------------------- 1 | /* The MIT License 2 | 3 | Copyright (c) 2008, by Attractive Chaos 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 | BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 | ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. 24 | */ 25 | 26 | /* 27 | An example: 28 | 29 | #include "kvec.h" 30 | int main() { 31 | kvec_t(int) array; 32 | kv_init(array); 33 | kv_push(int, array, 10); // append 34 | kv_a(int, array, 20) = 5; // dynamic 35 | kv_A(array, 20) = 4; // static 36 | kv_destroy(array); 37 | return 0; 38 | } 39 | */ 40 | 41 | /* 42 | 43 | 2016-0410 44 | * add kv_pushm 45 | 46 | 2016-0401 47 | 48 | * modify kv_init to return object 49 | * add kv_pusha to append arbitrary type element 50 | * add kv_roundup 51 | * change init size to 256 52 | 53 | 2015-0307 54 | 55 | * add packed vector. (Hajime Suzuki) 56 | 57 | 2008-09-22 (0.1.0): 58 | 59 | * The initial version. 60 | 61 | */ 62 | 63 | #ifndef AC_KVEC_H 64 | #define AC_KVEC_H 65 | 66 | #include 67 | #include 68 | #include 69 | 70 | // #define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) 71 | #define kv_roundup(x, base) ( (((x) + (base) - 1) / (base)) * (base) ) 72 | #define kv_max2(a, b) ( ((a) < (b)) ? (b) : (a) ) 73 | #define kv_min2(a, b) ( ((a) < (b)) ? (a) : (b) ) 74 | 75 | #define KVEC_INIT_SIZE ( 256 ) 76 | 77 | /** 78 | * basic vectors (kv_*) 79 | */ 80 | #define kvec_t(type) struct { uint64_t n, m; type *a; } 81 | #define kv_init(v) ({ (v).n = 0; (v).m = KVEC_INIT_SIZE; (v).a = calloc((v).m, sizeof(*(v).a)); (v); }) 82 | #define kv_destroy(v) { free((v).a); (v).a = NULL; } 83 | // #define kv_A(v, i) ( (v).a[(i)] ) 84 | #define kv_pop(v) ( (v).a[--(v).n] ) 85 | #define kv_size(v) ( (v).n ) 86 | #define kv_max(v) ( (v).m ) 87 | 88 | #define kv_clear(v) ( (v).n = 0 ) 89 | #define kv_resize(v, s) ({ \ 90 | uint64_t _size = kv_max2(KVEC_INIT_SIZE, (s)); \ 91 | (v).m = _size; \ 92 | (v).n = kv_min2((v).n, _size); \ 93 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m); \ 94 | }) 95 | 96 | #define kv_reserve(v, s) ( \ 97 | (v).m > (s) ? 0 : ((v).m = (s), (v).a = realloc((v).a, sizeof(*(v).a) * (v).m), 0) ) 98 | 99 | #define kv_copy(v1, v0) do { \ 100 | if ((v1).m < (v0).n) kv_resize(v1, (v0).n); \ 101 | (v1).n = (v0).n; \ 102 | memcpy((v1).a, (v0).a, sizeof(*(v).a) * (v0).n); \ 103 | } while (0) 104 | 105 | #define kv_push(v, x) do { \ 106 | if ((v).n == (v).m) { \ 107 | (v).m = (v).m * 2; \ 108 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m); \ 109 | } \ 110 | (v).a[(v).n++] = (x); \ 111 | } while (0) 112 | 113 | #define kv_pushp(v) ( \ 114 | ((v).n == (v).m) ? \ 115 | ((v).m = (v).m * 2, \ 116 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m), 0) \ 117 | : 0), ( (v).a + ((v).n++) ) 118 | 119 | /* kv_pusha will not check the alignment of elem_t */ 120 | #define kv_pusha(elem_t, v, x) do { \ 121 | uint64_t size = kv_roundup(sizeof(elem_t), sizeof(*(v).a)); \ 122 | if(sizeof(*(v).a) * ((v).m - (v).n) < size) { \ 123 | (v).m = kv_max2((v).m * 2, (v).n + (size)); \ 124 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m); \ 125 | } \ 126 | *((elem_t *)&((v).a[(v).n])) = (x); \ 127 | (v).n += size / sizeof(*(v).a); \ 128 | } while(0) 129 | 130 | #define kv_pushm(v, arr, size) do { \ 131 | if(sizeof(*(v).a) * ((v).m - (v).n) < (uint64_t)(size)) { \ 132 | (v).m = kv_max2((v).m * 2, (v).n + (size)); \ 133 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m); \ 134 | } \ 135 | for(uint64_t _i = 0; _i < (uint64_t)(size); _i++) { \ 136 | (v).a[(v).n + _i] = (arr)[_i]; \ 137 | } \ 138 | (v).n += (uint64_t)(size); \ 139 | } while(0) 140 | 141 | #define kv_a(v, i) ( \ 142 | ((v).m <= (size_t)(i) ? \ 143 | ((v).m = (v).n = (i) + 1, kv_roundup((v).m, 32), \ 144 | (v).a = realloc((v).a, sizeof(*(v).a) * (v).m), 0) \ 145 | : (v).n <= (size_t)(i) ? (v).n = (i) + 1 \ 146 | : 0), (v).a[(i)]) 147 | 148 | /** bound-unchecked accessor */ 149 | #define kv_at(v, i) ( (v).a[(i)] ) 150 | #define kv_ptr(v) ( (v).a ) 151 | 152 | /** heap queue : elements in v must be orderd in heap */ 153 | #define kv_hq_init(v) { kv_init(v); (v).n = 1; } 154 | #define kv_hq_destroy(v) kv_destroy(v) 155 | #define kv_hq_size(v) ( kv_size(v) - 1 ) 156 | #define kv_hq_max(v) ( kv_max(v) - 1 ) 157 | #define kv_hq_clear(v) ( (v).n = 1 ) 158 | 159 | #define kv_hq_resize(v, s) ( kv_resize(v, (s) + 1) ) 160 | #define kv_hq_reserve(v, s) ( kv_reserve(v, (s) + 1) ) 161 | 162 | #define kv_hq_copy(v1, v0) kv_copy(v1, v0) 163 | 164 | #define kv_hq_n(v, i) ( *((int64_t *)&v.a[i]) ) 165 | #define kv_hq_push(v, x) { \ 166 | /*debug("push, n(%llu), m(%llu)", (v).n, (v).m);*/ \ 167 | kv_push(v, x); \ 168 | uint64_t i = (v).n - 1; \ 169 | while(i > 1 && (kv_hq_n(v, i>>1) > kv_hq_n(v, i))) { \ 170 | (v).a[0] = (v).a[i>>1]; \ 171 | (v).a[i>>1] = (v).a[i]; \ 172 | (v).a[i] = (v).a[0]; \ 173 | i >>= 1; \ 174 | } \ 175 | } 176 | #define kv_hq_pop(v) ({ \ 177 | /*debug("pop, n(%llu), m(%llu)", (v).n, (v).m);*/ \ 178 | uint64_t i = 1, j = 2; \ 179 | (v).a[0] = (v).a[i]; \ 180 | (v).a[i] = (v).a[--(v).n]; \ 181 | (v).a[(v).n] = (v).a[0]; \ 182 | while(j < (v).n) { \ 183 | uint64_t k; \ 184 | k = (j + 1 < (v).n && kv_hq_n(v, j + 1) < kv_hq_n(v, j)) ? (j + 1) : j; \ 185 | k = (kv_hq_n(v, k) < kv_hq_n(v, i)) ? k : 0; \ 186 | if(k == 0) { break; } \ 187 | (v).a[0] = (v).a[k]; \ 188 | (v).a[k] = (v).a[i]; \ 189 | (v).a[i] = (v).a[0]; \ 190 | i = k; j = k<<1; \ 191 | } \ 192 | v.a[v.n]; \ 193 | }) 194 | 195 | /** 196 | * 2-bit packed vectors (kpv_*) 197 | * v.m must be multiple of kpv_elems(v). 198 | */ 199 | #define _BITS ( 2 ) 200 | 201 | /** 202 | * `sizeof(*((v).a)) * 8 / _BITS' is a number of packed elements in an array element. 203 | */ 204 | #define kpv_elems(v) ( sizeof(*((v).a)) * 8 / _BITS ) 205 | #define kpv_base(v, i) ( ((i) % kpv_elems(v)) * _BITS ) 206 | #define kpv_mask(v, e) ( (e) & ((1<<_BITS) - 1) ) 207 | 208 | #define kpvec_t(type) struct { uint64_t n, m; type *a; } 209 | #define kpv_init(v) ( (v).n = 0, (v).m = KVEC_INIT_SIZE * kpv_elems(v), (v).a = calloc((v).m, sizeof(*(v).a)) ) 210 | #define kpv_destroy(v) { free((v).a); (v).a = NULL; } 211 | 212 | // #define kpv_A(v, i) ( kpv_mask(v, (v).a[(i) / kpv_elems(v)]>>kpv_base(v, i)) ) 213 | #define kpv_pop(v) ( (v).n--, kpv_mask(v, (v).a[(v).n / kpv_elems(v)]>>kpv_base(v, (v).n)) ) 214 | #define kpv_size(v) ( (v).n ) 215 | #define kpv_max(v) ( (v).m ) 216 | 217 | /** 218 | * the length of the array is ((v).m + kpv_elems(v) - 1) / kpv_elems(v) 219 | */ 220 | #define kpv_amax(v) ( ((v).m + kpv_elems(v) - 1) / kpv_elems(v) ) 221 | #define kpv_asize(v) ( ((v).n + kpv_elems(v) - 1) / kpv_elems(v) ) 222 | 223 | #define kpv_clear(v) ( (v).n = 0 ) 224 | #define kpv_resize(v, s) ({ \ 225 | uint64_t _size = kv_max2(KVEC_INIT_SIZE, (s)); \ 226 | (v).m = _size; \ 227 | (v).n = kv_min2((v).n, _size); \ 228 | (v).a = realloc((v).a, sizeof(*(v).a) * kpv_amax(v)); \ 229 | }) 230 | 231 | #define kpv_reserve(v, s) ( \ 232 | (v).m > (s) ? 0 : ((v).m = (s), (v).a = realloc((v).a, sizeof(*(v).a) * kpv_amax(v)), 0) ) 233 | 234 | #define kpv_copy(v1, v0) do { \ 235 | if ((v1).m < (v0).n) kpv_resize(v1, (v0).n); \ 236 | (v1).n = (v0).n; \ 237 | memcpy((v1).a, (v0).a, kpv_amax(v)); \ 238 | } while (0) 239 | /* 240 | #define kpv_push(v, x) do { \ 241 | if ((v).n == (v).m) { \ 242 | (v).a = realloc((v).a, 2 * sizeof(*(v).a) * kpv_amax(v)); \ 243 | memset((v).a + kpv_amax(v), 0, kpv_amax(v)); \ 244 | (v).m = (v).m * 2; \ 245 | } \ 246 | if(kpv_base(v, (v).n) == 0) { \ 247 | (v).a[(v).n / kpv_elems(v)] = 0; \ 248 | } \ 249 | (v).a[(v).n / kpv_elems(v)] |= kpv_mask(v, x)<>kpv_base(v, i)) ) 283 | 284 | /** bound-unchecked accessor */ 285 | #define kpv_at(v, i) ( kpv_mask(v, (v).a[(i) / kpv_elems(v)]>>kpv_base(v, i)) ) 286 | #define kpv_ptr(v) ( (v).a ) 287 | 288 | #endif 289 | /** 290 | * end of kvec.h 291 | */ 292 | -------------------------------------------------------------------------------- /log.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file log.h 4 | * 5 | * @brief log handler 6 | */ 7 | #ifndef _LOG_H_INCLUDED 8 | #define _LOG_H_INCLUDED 9 | 10 | #include 11 | #include 12 | 13 | /** 14 | * color outputs 15 | */ 16 | #define RED(x) "\x1b[31m" \ 17 | x \ 18 | "\x1b[39m" 19 | #define GREEN(x) "\x1b[32m" \ 20 | x \ 21 | "\x1b[39m" 22 | #define YELLOW(x) "\x1b[33m" \ 23 | x \ 24 | "\x1b[39m" 25 | #define BLUE(x) "\x1b[34m" \ 26 | x \ 27 | "\x1b[39m" 28 | #define MAGENTA(x) "\x1b[35m" \ 29 | x \ 30 | "\x1b[39m" 31 | #define CYAN(x) "\x1b[36m" \ 32 | x \ 33 | "\x1b[39m" 34 | #define WHITE(x) "\x1b[37m" \ 35 | x \ 36 | "\x1b[39m" 37 | 38 | /** 39 | * @macro DEBUG 40 | */ 41 | // #define DEBUG ( 1 ) 42 | 43 | /** 44 | * @macro dbprintf 45 | */ 46 | #ifdef DEBUG 47 | 48 | #define dbprintf(fmt, ...) { \ 49 | fprintf(stderr, fmt, __VA_ARGS__); \ 50 | } 51 | 52 | #else 53 | 54 | #define dbprintf(fmt, ...) {} 55 | 56 | #endif 57 | 58 | /** 59 | * @macro debug 60 | */ 61 | #ifdef DEBUG 62 | 63 | #define debug(...) { \ 64 | debug_impl(__VA_ARGS__, ""); \ 65 | } 66 | #define debug_impl(fmt, ...) { \ 67 | dbprintf("[%s: %s(%d)] " fmt "%s\n", __FILE__, __func__, __LINE__, __VA_ARGS__); \ 68 | } 69 | 70 | #else 71 | 72 | #define debug(...) {} 73 | 74 | #endif 75 | 76 | /** 77 | * @macro print_lane 78 | */ 79 | #ifdef DEBUG 80 | 81 | #define print_lane(p1, p2) { \ 82 | cell_t *p = p1, *t = p2; \ 83 | char *str = NULL; \ 84 | int len = 0, size = 256; \ 85 | str = malloc(size); \ 86 | len += sprintf(str+len, "["); \ 87 | while(p != t) { \ 88 | if(*--t <= CELL_MIN) { \ 89 | len += sprintf(str+len, "-oo,"); \ 90 | } else if(*t >= CELL_MAX) { \ 91 | len += sprintf(str+len, "oo,"); \ 92 | } else { \ 93 | len += sprintf(str+len, "%d,", *t); \ 94 | } \ 95 | if(len > (size - 20)) { \ 96 | size *= 2; \ 97 | str = realloc(str, size); \ 98 | } \ 99 | } \ 100 | str[len == 1 ? 1 : len-1] = ']'; \ 101 | str[len == 1 ? 2 : len] = '\0'; \ 102 | debug("lane(%s)", str); \ 103 | free(str); \ 104 | } 105 | 106 | #else 107 | 108 | #define print_lane(p1, p2) {} 109 | 110 | #endif 111 | 112 | /** 113 | * @macro logprintf 114 | */ 115 | #define logprintf(fmt, ...) { \ 116 | fprintf(stderr, fmt, __VA_ARGS__); \ 117 | } 118 | 119 | /** 120 | * @macro log 121 | */ 122 | #define log(...) { \ 123 | log_impl(__VA_ARGS__, ""); \ 124 | } 125 | #define log_impl(fmt, ...) { \ 126 | logprintf("[%s] " fmt "%s\n", __func__, __VA_ARGS__); \ 127 | } 128 | #define log_nr(...) { \ 129 | log_nr_impl(__VA_ARGS__, ""); \ 130 | } 131 | #define log_nr_impl(fmt, ...) { \ 132 | logprintf("[%s] " fmt "%s", __func__, __VA_ARGS__); \ 133 | } 134 | 135 | /** 136 | * @macro log_error 137 | */ 138 | #define log_error(...) { \ 139 | log_error_impl(__VA_ARGS__, ""); \ 140 | } 141 | #define log_error_impl(fmt, ...) { \ 142 | logprintf("[%s] ERROR: " fmt "%s\n", __func__, __VA_ARGS__); \ 143 | } 144 | 145 | /** 146 | * @macro log_error_abort 147 | */ 148 | #define log_error_abort(...) { \ 149 | log_error_impl(__VA_ARGS__, ""); \ 150 | exit(1); \ 151 | } 152 | 153 | /** 154 | * @macro msg 155 | */ 156 | #define msg(...) { \ 157 | msg_impl(__VA_ARGS__, ""); \ 158 | } 159 | #define msg_impl(fmt, ...) { \ 160 | logprintf(fmt "%s\n", __VA_ARGS__); \ 161 | } 162 | 163 | /** 164 | * @macro dump 165 | */ 166 | #ifndef dump 167 | 168 | #if DEBUG 169 | /* compatible with dump in unittest.h */ 170 | #define ut_dump(ptr, len) ({ \ 171 | uint64_t size = (((len) + 15) / 16 + 1) * \ 172 | (strlen("0x0123456789abcdef:") + 16 * strlen(" 00a") + strlen(" \n+ margin")) \ 173 | + strlen(#ptr) + strlen("\n`' len: 100000000"); \ 174 | uint8_t *_ptr = (uint8_t *)(ptr); \ 175 | char *_str = alloca(size); \ 176 | char *_s = _str; \ 177 | /* make header */ \ 178 | _s += sprintf(_s, "\n`%s' len: %" PRId64 "\n", #ptr, (int64_t)len); \ 179 | _s += sprintf(_s, " "); \ 180 | for(int64_t i = 0; i < 16; i++) { \ 181 | _s += sprintf(_s, " %02x", (uint8_t)i); \ 182 | } \ 183 | _s += sprintf(_s, "\n"); \ 184 | for(int64_t i = 0; i < ((len) + 15) / 16; i++) { \ 185 | _s += sprintf(_s, "0x%016" PRIx64 ":", (uint64_t)_ptr); \ 186 | for(int64_t j = 0; j < 16; j++) { \ 187 | _s += sprintf(_s, " %02x", (uint8_t)_ptr[j]); \ 188 | } \ 189 | _s += sprintf(_s, " "); \ 190 | for(int64_t j = 0; j < 16; j++) { \ 191 | _s += sprintf(_s, "%c", isprint(_ptr[j]) ? _ptr[j] : ' '); \ 192 | } \ 193 | _s += sprintf(_s, "\n"); \ 194 | _ptr += 16; \ 195 | } \ 196 | (char const *)_str; \ 197 | }) 198 | #else 199 | 200 | #define dump(ptr, len) ; 201 | 202 | #endif 203 | #endif /* #ifndef dump */ 204 | 205 | #endif /* #ifndef _LOG_H_INCLUDED */ 206 | /** 207 | * end of log.h 208 | */ 209 | -------------------------------------------------------------------------------- /mem.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file mem.h 4 | */ 5 | #include 6 | #include 7 | #include 8 | 9 | /** 10 | * @fn mem_dump_file 11 | */ 12 | static inline 13 | char *mem_dump_file( 14 | FILE *fp) 15 | { 16 | uint64_t used = 0, size = 1024; 17 | char *buf = malloc(size); 18 | if(buf == NULL) { return(NULL); } 19 | 20 | #define push(x) { \ 21 | if(used == size) { \ 22 | buf = realloc(buf, size *= 2); \ 23 | if(buf == NULL) { return(NULL); } \ 24 | } \ 25 | buf[used++] = (x); \ 26 | } 27 | 28 | int c; 29 | while((c = getc(fp)) != EOF) { push(c); } 30 | 31 | /* push terminator (double '\0') */ 32 | push('\0'); 33 | push('\0'); 34 | 35 | #undef push 36 | return(buf); 37 | } 38 | 39 | 40 | #ifdef __linux__ 41 | /* dump /proc/meminfo, sum MemFree and Cached */ 42 | 43 | /** 44 | * @fn mem_dump_meminfo 45 | */ 46 | static inline 47 | char *mem_dump_vm_stat( 48 | void) 49 | { 50 | FILE *fp = NULL; 51 | char *res = NULL; 52 | 53 | /* open */ 54 | if((fp = fopen("/proc/meminfo", "r")) == NULL) { 55 | goto _mem_dump_meminfo_error_handler; 56 | } 57 | 58 | /* dump */ 59 | if((res = mem_dump_file(fp)) == NULL) { 60 | goto _mem_dump_meminfo_error_handler; 61 | } 62 | 63 | /* close file */ 64 | if(fclose(fp) != 0) { 65 | goto _mem_dump_meminfo_error_handler; 66 | } 67 | return(res); 68 | 69 | _mem_dump_meminfo_error_handler: 70 | if(fp != NULL) { fclose(fp); } 71 | if(res != NULL) { free(res); } 72 | return(NULL); 73 | } 74 | 75 | /** 76 | * @fn mem_estimate_free_size 77 | */ 78 | static inline 79 | uint64_t mem_estimate_free_size( 80 | void) 81 | { 82 | char *s = mem_dump_vm_stat(); 83 | if(s == NULL) { 84 | return(0); 85 | } 86 | 87 | char const *labels[] = { 88 | "MemFree:", 89 | "Cached:", 90 | NULL 91 | }; 92 | 93 | /* supposing s is terminated with double '\0's */ 94 | uint64_t mem = 0; 95 | char *p = s; 96 | while(*p != '\0') { 97 | 98 | char *t = p; 99 | while(*t != '\n' && *t != '\0') { t++; } 100 | 101 | for(char const **l = labels; *l != NULL; l++) { 102 | if(strncmp(p, *l, strlen(*l)) == 0) { 103 | t[-strlen(" kB")] = '\0'; 104 | mem += atoi(p + strlen(*l)); 105 | } 106 | } 107 | 108 | p = t + 1; 109 | } 110 | 111 | free(s); 112 | return(mem * 1024); 113 | } 114 | 115 | #elif __APPLE__ 116 | /* dump vm_stat, sum free, inactive, speculative, and purgable */ 117 | 118 | /** 119 | * @fn mem_dump_vm_stat 120 | */ 121 | static inline 122 | char *mem_dump_vm_stat( 123 | void) 124 | { 125 | FILE *fp = NULL; 126 | char *res = NULL; 127 | 128 | /* open */ 129 | if((fp = popen("vm_stat", "r")) == NULL) { 130 | goto _mem_dump_vm_stat_error_handler; 131 | } 132 | 133 | /* dump */ 134 | if((res = mem_dump_file(fp)) == NULL) { 135 | goto _mem_dump_vm_stat_error_handler; 136 | } 137 | 138 | /* close file */ 139 | if(pclose(fp) != 0) { 140 | goto _mem_dump_vm_stat_error_handler; 141 | } 142 | return(res); 143 | 144 | _mem_dump_vm_stat_error_handler: 145 | if(fp != NULL) { pclose(fp); } 146 | if(res != NULL) { free(res); } 147 | return(NULL); 148 | } 149 | 150 | /** 151 | * @fn mem_estimate_free_size 152 | */ 153 | static inline 154 | uint64_t mem_estimate_free_size( 155 | void) 156 | { 157 | char *s = mem_dump_vm_stat(); 158 | if(s == NULL) { 159 | return(0); 160 | } 161 | 162 | char const *labels[] = { 163 | "Pages free:", 164 | // "Pages active:", 165 | "Pages inactive:", 166 | "Pages speculative:", 167 | // "Pages throttled:", 168 | // "Pages wired down:", 169 | "Pages purgeable:", 170 | NULL 171 | }; 172 | 173 | /* supposing s is terminated with double '\0's */ 174 | uint64_t mem = 0; 175 | char *p = s; 176 | while(*p != '\0') { 177 | 178 | char *t = p; 179 | while(*t != '\n' && *t != '\0') { t++; } 180 | 181 | for(char const **l = labels; *l != NULL; l++) { 182 | if(strncmp(p, *l, strlen(*l)) == 0) { 183 | *t = '\0'; 184 | mem += atoi(p + strlen(*l)); 185 | } 186 | } 187 | 188 | p = t + 1; 189 | } 190 | 191 | free(s); 192 | return(mem * sysconf(_SC_PAGESIZE)); 193 | } 194 | #endif 195 | 196 | /** 197 | * end of mem.h 198 | */ 199 | -------------------------------------------------------------------------------- /ngx_rbtree.h: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | * Copyright (C) Igor Sysoev 4 | * Copyright (C) Nginx, Inc. 5 | */ 6 | 7 | 8 | #ifndef _NGX_RBTREE_H_INCLUDED_ 9 | #define _NGX_RBTREE_H_INCLUDED_ 10 | 11 | 12 | // #include 13 | // #include 14 | 15 | #include // for uint64_t 16 | 17 | // typedef ngx_uint_t ngx_rbtree_key_t; 18 | // typedef ngx_int_t ngx_rbtree_key_int_t; 19 | 20 | /** 21 | * modified to hold 64bit key-value pairs 22 | */ 23 | // typedef int64_t ngx_rbtree_key_t; 24 | 25 | 26 | typedef struct ngx_rbtree_node_s ngx_rbtree_node_t; 27 | 28 | struct ngx_rbtree_node_s { 29 | ngx_rbtree_node_t *parent; 30 | ngx_rbtree_node_t *left; 31 | ngx_rbtree_node_t *right; 32 | uint8_t color; 33 | uint8_t data; 34 | uint8_t pad[6]; 35 | int64_t key; 36 | }; 37 | 38 | 39 | typedef struct ngx_rbtree_s ngx_rbtree_t; 40 | 41 | typedef void (*ngx_rbtree_insert_pt) (ngx_rbtree_node_t *root, 42 | ngx_rbtree_node_t *node, ngx_rbtree_node_t *sentinel); 43 | 44 | struct ngx_rbtree_s { 45 | ngx_rbtree_node_t *root; 46 | ngx_rbtree_node_t *sentinel; 47 | // ngx_rbtree_insert_pt insert; 48 | }; 49 | 50 | 51 | #define ngx_rbtree_init(tree, s, i) \ 52 | ngx_rbtree_sentinel_init(s); \ 53 | (tree)->root = s; \ 54 | (tree)->sentinel = s; 55 | 56 | 57 | void ngx_rbtree_insert(ngx_rbtree_t *tree, ngx_rbtree_node_t *node); 58 | void ngx_rbtree_delete(ngx_rbtree_t *tree, ngx_rbtree_node_t *node); 59 | 60 | /* 61 | * search functions 62 | * find_key return the leftmost node 63 | * added 2015/11/06 64 | */ 65 | ngx_rbtree_node_t *ngx_rbtree_find_key(ngx_rbtree_t *tree, int64_t key); 66 | ngx_rbtree_node_t *ngx_rbtree_find_key_left(ngx_rbtree_t *tree, int64_t key); 67 | ngx_rbtree_node_t *ngx_rbtree_find_key_right(ngx_rbtree_t *tree, int64_t key); 68 | ngx_rbtree_node_t *ngx_rbtree_find_left(ngx_rbtree_t *tree, ngx_rbtree_node_t *node); 69 | ngx_rbtree_node_t *ngx_rbtree_find_right(ngx_rbtree_t *tree, ngx_rbtree_node_t *node); 70 | 71 | typedef void (*ngx_rbtree_walk_pt) (ngx_rbtree_node_t **node, ngx_rbtree_node_t *sentinel, void *ctx); 72 | void ngx_rbtree_walk(ngx_rbtree_t *tree, ngx_rbtree_walk_pt walk, void *ctx); 73 | 74 | #define ngx_rbt_red(node) ((node)->color = 1) 75 | #define ngx_rbt_black(node) ((node)->color = 0) 76 | #define ngx_rbt_is_red(node) ((node)->color) 77 | #define ngx_rbt_is_black(node) (!ngx_rbt_is_red(node)) 78 | #define ngx_rbt_copy_color(n1, n2) (n1->color = n2->color) 79 | 80 | 81 | /* a sentinel must be black */ 82 | 83 | #define ngx_rbtree_sentinel_init(node) ngx_rbt_black(node) 84 | 85 | 86 | /* interval tree (augumented tree) */ 87 | 88 | typedef struct ngx_ivtree_node_s ngx_ivtree_node_t; 89 | 90 | struct ngx_ivtree_node_s { 91 | ngx_ivtree_node_t *parent; 92 | ngx_ivtree_node_t *left; 93 | ngx_ivtree_node_t *right; 94 | uint8_t color; 95 | uint8_t data; 96 | uint8_t pad[6]; 97 | int64_t lkey; 98 | int64_t rkey; 99 | int64_t rkey_max; 100 | }; 101 | 102 | 103 | typedef struct ngx_ivtree_s ngx_ivtree_t; 104 | 105 | 106 | struct ngx_ivtree_s { 107 | ngx_ivtree_node_t *root; 108 | ngx_ivtree_node_t *sentinel; 109 | // ngx_rbtree_insert_pt insert; 110 | }; 111 | 112 | 113 | #define ngx_ivtree_init(tree, s, i) ngx_rbtree_init(tree, s, i) 114 | 115 | 116 | void ngx_ivtree_insert(ngx_ivtree_t *tree, ngx_ivtree_node_t *node); 117 | void ngx_ivtree_delete(ngx_ivtree_t *tree, ngx_ivtree_node_t *node); 118 | 119 | 120 | #endif /* _NGX_RBTREE_H_INCLUDED_ */ 121 | -------------------------------------------------------------------------------- /psort.c: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file psort.c 4 | * 5 | * @brief parallel sort library frontend implementation 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2016/3/20 9 | * @license MIT 10 | */ 11 | 12 | /* import unittest */ 13 | #define UNITTEST_UNIQUE_ID 200 14 | #define UNITTEST 1 15 | 16 | #include "unittest.h" 17 | 18 | #include 19 | #include 20 | #include "arch/arch.h" 21 | #include "psort.h" 22 | #include "ptask.h" /* pthread parallel task execution library */ 23 | #include "log.h" 24 | #include "sassert.h" 25 | 26 | 27 | /* constants */ 28 | #define WCR_OCC_SIZE ( 1<<8 ) /** 8bit */ 29 | 30 | /** 31 | * @macro _likely, _unlikely 32 | * @brief branch prediction hint for gcc-compatible compilers 33 | */ 34 | #define _likely(x) __builtin_expect(!!(x), 1) 35 | #define _unlikely(x) __builtin_expect(!!(x), 0) 36 | 37 | /** 38 | * @macro _force_inline 39 | * @brief inline directive for gcc-compatible compilers 40 | */ 41 | #define _force_inline inline 42 | // #define _force_inline 43 | 44 | /* max / min */ 45 | #define MAX2(x, y) ( (x) < (y) ? (y) : (x) ) 46 | #define MIN2(x, y) ( (x) > (y) ? (y) : (x) ) 47 | 48 | /** 49 | * @struct psort_occ_s 50 | */ 51 | struct psort_occ_s { 52 | uint64_t occ[WCR_OCC_SIZE]; 53 | }; 54 | 55 | /** 56 | * @struct psort_buffer_counter_s 57 | */ 58 | struct psort_buffer_counter_s { 59 | uint8_t cnt[WCR_OCC_SIZE]; 60 | }; 61 | 62 | /** 63 | * @struct psort_buffer_s 64 | */ 65 | struct psort_buffer_s { 66 | uint8_t buf[WCR_OCC_SIZE][WCR_BUF_SIZE]; 67 | }; 68 | 69 | /** 70 | * @struct psort_thread_context_s 71 | */ 72 | struct psort_thread_context_s { 73 | // struct psort_occ_s *occ; 74 | // struct psort_buffer_counter_s *cnt; 75 | 76 | /* aligned on 64byte boundary */ 77 | uint64_t occ[WCR_OCC_SIZE]; /* (2048) */ 78 | uint8_t cnt[WCR_OCC_SIZE]; /* (256) */ 79 | 80 | /* aligned on 64byte boundary (2304) */ 81 | uint8_t buf[WCR_OCC_SIZE][WCR_BUF_SIZE];/* (8192) */ 82 | 83 | /* aligned on 64byte boundary (10496) */ 84 | uint32_t digit; /* (4) */ 85 | uint32_t num_threads; /* (4) */ 86 | void *src; /* (8) */ 87 | void *dst; /* (8) */ 88 | uint64_t from, to; /* (16) */ 89 | uint64_t _pad[3]; /* (24) */ 90 | 91 | /* aligned on 64byte boundary (10560) */ 92 | }; 93 | _static_assert(sizeof(struct psort_thread_context_s) == 2368 + WCR_OCC_SIZE * WCR_BUF_SIZE); 94 | 95 | /** 96 | * @fn aligned_malloc 97 | * 98 | * @brief an wrapper of posix_memalign function 99 | */ 100 | static _force_inline 101 | void *aligned_malloc(size_t size, size_t align) 102 | { 103 | void *ptr; 104 | if(posix_memalign(&ptr, align, size) != 0) { 105 | return(NULL); 106 | } 107 | return(ptr); 108 | } 109 | 110 | /** 111 | * @fn psort_dispatcher 112 | */ 113 | static 114 | void *psort_dispatcher( 115 | void *arg, 116 | void *item) 117 | { 118 | debug("arg(%p), item(%p)", arg, item); 119 | ((void (*)(struct psort_thread_context_s *))item)( 120 | (struct psort_thread_context_s *)arg); 121 | return(NULL); 122 | } 123 | 124 | /* instanciate radixsort and quicksort */ 125 | #define _join(a, b) a##b 126 | #define join(a, b) _join(a, b) 127 | 128 | #undef UNITTEST_UNIQUE_ID 129 | 130 | /* 16bit */ 131 | #define elem_t uint16_t 132 | #define SUFFIX 16 133 | #define rd _rd 134 | #define wr _wr 135 | #define ex _ex 136 | #define p _p 137 | #define e _e 138 | #define UNITTEST_UNIQUE_ID SUFFIX 139 | #include "psort_radix_internal.c" 140 | // #include "psort_quick_intl.c" 141 | 142 | /* 32bit */ 143 | #define elem_t uint32_t 144 | #define SUFFIX 32 145 | #define rd _rd 146 | #define wr _wr 147 | #define ex _ex 148 | #define p _p 149 | #define e _e 150 | #define UNITTEST_UNIQUE_ID SUFFIX 151 | #include "psort_radix_internal.c" 152 | // #include "psort_quick_intl.c" 153 | 154 | /* 64bit */ 155 | #define elem_t uint64_t 156 | #define SUFFIX 64 157 | #define rd _rd 158 | #define wr _wr 159 | #define ex _ex 160 | #define p _p 161 | #define e _e 162 | #define UNITTEST_UNIQUE_ID SUFFIX 163 | #include "psort_radix_internal.c" 164 | // #include "psort_quick_intl.c" 165 | 166 | /* 128bit */ 167 | #define elem_t elem_128_t 168 | #define SUFFIX 128 169 | #define rd rd_128 170 | #define wr wr_128 171 | #define ex ex_128 172 | #define p p_128 173 | #define e e_128 174 | #define UNITTEST_UNIQUE_ID SUFFIX 175 | #include "psort_radix_internal.c" 176 | // #include "psort_quick_intl.c" 177 | 178 | /** 179 | * @fn psort_full 180 | */ 181 | int psort_full( 182 | void *arr, 183 | uint64_t len, 184 | uint64_t elem_size, 185 | uint64_t num_threads) 186 | { 187 | switch(elem_size) { 188 | case 2: psort_partialsort_parallel_16(arr, len, num_threads, 0, 2); return(0); 189 | case 4: psort_partialsort_parallel_32(arr, len, num_threads, 0, 4); return(0); 190 | case 8: psort_partialsort_parallel_64(arr, len, num_threads, 0, 8); return(0); 191 | case 16: psort_partialsort_parallel_128(arr, len, num_threads, 0, 16); return(0); 192 | default: return(-1); 193 | } 194 | return(-1); 195 | } 196 | 197 | /** 198 | * @fn psort_half 199 | */ 200 | int psort_half( 201 | void *arr, 202 | uint64_t len, 203 | uint64_t elem_size, 204 | uint64_t num_threads) 205 | { 206 | switch(elem_size) { 207 | case 2: psort_partialsort_parallel_16(arr, len, num_threads, 0, 1); return(0); 208 | case 4: psort_partialsort_parallel_32(arr, len, num_threads, 0, 2); return(0); 209 | case 8: psort_partialsort_parallel_64(arr, len, num_threads, 0, 4); return(0); 210 | case 16: psort_partialsort_parallel_128(arr, len, num_threads, 0, 8); return(0); 211 | default: return(-1); 212 | } 213 | return(-1); 214 | } 215 | 216 | /** 217 | * @fn psort_partial 218 | */ 219 | int psort_partial( 220 | void *arr, 221 | uint64_t len, 222 | uint64_t elem_size, 223 | uint64_t num_threads, 224 | uint64_t from, 225 | uint64_t to) 226 | { 227 | switch(elem_size) { 228 | case 2: psort_partialsort_parallel_16(arr, len, num_threads, from, to); return(0); 229 | case 4: psort_partialsort_parallel_32(arr, len, num_threads, from, to); return(0); 230 | case 8: psort_partialsort_parallel_64(arr, len, num_threads, from, to); return(0); 231 | case 16: psort_partialsort_parallel_128(arr, len, num_threads, from, to); return(0); 232 | default: return(-1); 233 | } 234 | return(-1); 235 | } 236 | 237 | /* unittest */ 238 | #include 239 | 240 | #define UNITTEST_UNIQUE_ID 61 241 | unittest_config( 242 | .name = "psort", 243 | .depends_on = { "psort_radix_internal" } 244 | ); 245 | 246 | #define UNITTEST_ARR_LEN 10000 247 | 248 | /* srand */ 249 | unittest() 250 | { 251 | srand(time(NULL)); 252 | } 253 | 254 | /* full sort 16bit */ 255 | unittest() 256 | { 257 | /* init */ 258 | uint16_t *arr = (uint16_t *)malloc(sizeof(uint16_t) * UNITTEST_ARR_LEN); 259 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 260 | arr[i] = rand() % UINT16_MAX; 261 | } 262 | 263 | /* sort */ 264 | psort_full(arr, UNITTEST_ARR_LEN, sizeof(uint16_t), 4); 265 | 266 | /* check */ 267 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 268 | assert(arr[i - 1] <= arr[i], "%lld, %d, %d", i, arr[i - 1], arr[i]); 269 | } 270 | free(arr); 271 | } 272 | 273 | /* full sort 32bit */ 274 | unittest() 275 | { 276 | /* init */ 277 | uint32_t *arr = (uint32_t *)malloc(sizeof(uint32_t) * UNITTEST_ARR_LEN); 278 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 279 | arr[i] = rand() % UINT32_MAX; 280 | } 281 | 282 | /* sort */ 283 | psort_full(arr, UNITTEST_ARR_LEN, sizeof(uint32_t), 4); 284 | 285 | /* check */ 286 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 287 | assert(arr[i - 1] <= arr[i], "%lld, %d, %d", i, arr[i - 1], arr[i]); 288 | } 289 | free(arr); 290 | } 291 | 292 | /* full sort 64bit */ 293 | unittest() 294 | { 295 | /* init */ 296 | uint64_t *arr = (uint64_t *)malloc(sizeof(uint64_t) * UNITTEST_ARR_LEN); 297 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 298 | arr[i] = (uint64_t)rand() + ((uint64_t)rand()<<32); 299 | } 300 | 301 | /* sort */ 302 | psort_full(arr, UNITTEST_ARR_LEN, sizeof(uint64_t), 4); 303 | 304 | /* check */ 305 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 306 | assert(arr[i - 1] <= arr[i], "%lld, %lld, %lld", i, arr[i - 1], arr[i]); 307 | } 308 | free(arr); 309 | } 310 | 311 | /* half sort 16bit */ 312 | unittest() 313 | { 314 | /* init */ 315 | uint16_t *arr = (uint16_t *)malloc(sizeof(uint16_t) * UNITTEST_ARR_LEN); 316 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 317 | arr[i] = rand() % UINT16_MAX; 318 | } 319 | 320 | /* sort */ 321 | psort_half(arr, UNITTEST_ARR_LEN, sizeof(uint16_t), 4); 322 | 323 | /* check */ 324 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 325 | assert((0xff & arr[i - 1]) <= (0xff & arr[i]), 326 | "%lld, %d, %d", i, 0xff & arr[i - 1], 0xff & arr[i]); 327 | } 328 | free(arr); 329 | } 330 | 331 | /* half sort 32bit */ 332 | unittest() 333 | { 334 | /* init */ 335 | uint32_t *arr = (uint32_t *)malloc(sizeof(uint32_t) * UNITTEST_ARR_LEN); 336 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 337 | arr[i] = rand() % UINT32_MAX; 338 | } 339 | 340 | /* sort */ 341 | psort_half(arr, UNITTEST_ARR_LEN, sizeof(uint32_t), 4); 342 | 343 | /* check */ 344 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 345 | assert((0xffff & arr[i - 1]) <= (0xffff & arr[i]), 346 | "%lld, %d, %d", i, 0xffff & arr[i - 1], 0xffff & arr[i]); 347 | } 348 | free(arr); 349 | } 350 | 351 | /* half sort 64bit */ 352 | unittest() 353 | { 354 | /* init */ 355 | uint64_t *arr = (uint64_t *)malloc(sizeof(uint64_t) * UNITTEST_ARR_LEN); 356 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 357 | arr[i] = (uint64_t)rand() + ((uint64_t)rand()<<32); 358 | } 359 | 360 | /* sort */ 361 | psort_half(arr, UNITTEST_ARR_LEN, sizeof(uint64_t), 4); 362 | 363 | /* check */ 364 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 365 | assert((0xffffffff & arr[i - 1]) <= (0xffffffff & arr[i]), 366 | "%lld, %lld, %lld", i, 0xffffffff & arr[i - 1], 0xffffffff & arr[i]); 367 | } 368 | free(arr); 369 | } 370 | 371 | /* partial sort 16bit */ 372 | unittest() 373 | { 374 | /* init */ 375 | uint16_t *arr = (uint16_t *)malloc(sizeof(uint16_t) * UNITTEST_ARR_LEN); 376 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 377 | arr[i] = rand() % UINT16_MAX; 378 | } 379 | 380 | /* sort */ 381 | psort_partial(arr, UNITTEST_ARR_LEN, sizeof(uint16_t), 4, 1, 2); 382 | 383 | /* check */ 384 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 385 | assert((0xff00 & arr[i - 1]) <= (0xff00 & arr[i]), 386 | "%lld, %d, %d", i, 0xff00 & arr[i - 1], 0xff00 & arr[i]); 387 | } 388 | free(arr); 389 | } 390 | 391 | /* partial sort 32bit */ 392 | unittest() 393 | { 394 | /* init */ 395 | uint32_t *arr = (uint32_t *)malloc(sizeof(uint32_t) * UNITTEST_ARR_LEN); 396 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 397 | arr[i] = rand() % UINT32_MAX; 398 | } 399 | 400 | /* sort */ 401 | psort_partial(arr, UNITTEST_ARR_LEN, sizeof(uint32_t), 4, 2, 4); 402 | 403 | /* check */ 404 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 405 | assert((0xffff0000 & arr[i - 1]) <= (0xffff0000 & arr[i]), 406 | "%lld, %d, %d", i, 0xffff0000 & arr[i - 1], 0xffff0000 & arr[i]); 407 | } 408 | free(arr); 409 | } 410 | 411 | /* partial sort 64bit */ 412 | unittest() 413 | { 414 | /* init */ 415 | uint64_t *arr = (uint64_t *)malloc(sizeof(uint64_t) * UNITTEST_ARR_LEN); 416 | for(int64_t i = 0; i < UNITTEST_ARR_LEN; i++) { 417 | arr[i] = (uint64_t)rand() + ((uint64_t)rand()<<32); 418 | } 419 | 420 | /* sort */ 421 | psort_partial(arr, UNITTEST_ARR_LEN, sizeof(uint64_t), 4, 4, 8); 422 | 423 | /* check */ 424 | uint64_t const mask = 0xffffffff00000000; 425 | for(int64_t i = 1; i < UNITTEST_ARR_LEN; i++) { 426 | assert((mask & arr[i - 1]) <= (mask & arr[i]), 427 | "%lld, %lld, %lld", i, mask & arr[i - 1], mask & arr[i]); 428 | } 429 | free(arr); 430 | } 431 | 432 | /** 433 | * end of psort.c 434 | */ 435 | -------------------------------------------------------------------------------- /psort.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file psort.h 4 | * 5 | * @brief parallel integer sort (radixsort) library 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2016/3/20 9 | * @license MIT 10 | */ 11 | #ifndef _PSORT_H_INCLUDED 12 | #define _PSORT_H_INCLUDED 13 | 14 | /** 15 | * @fn psort_full 16 | * @brief integer sort 17 | */ 18 | int psort_full( 19 | void *ptr, 20 | uint64_t len, 21 | uint64_t elem_size, 22 | uint64_t num_threads); 23 | 24 | /** 25 | * @fn psort_half 26 | * @brief key sort (sort the lower half of the element) 27 | */ 28 | int psort_half( 29 | void *ptr, 30 | uint64_t len, 31 | uint64_t elem_size, 32 | uint64_t num_threads); 33 | 34 | /** 35 | * @fn psort_partial 36 | */ 37 | int psort_partial( 38 | void *arr, 39 | uint64_t len, 40 | uint64_t elem_size, 41 | uint64_t num_threads, 42 | uint64_t from, 43 | uint64_t to); 44 | 45 | #endif /* _PSORT_H_INCLUDED */ 46 | /** 47 | * end of psort.h 48 | */ 49 | -------------------------------------------------------------------------------- /ptask.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file ptask.h 4 | * 5 | * @brief parallel task manager 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2016/3/22 9 | * @license MIT 10 | */ 11 | #ifndef _PTASK_H_INCLUDED 12 | #define _PTASK_H_INCLUDED 13 | 14 | #include 15 | 16 | /** 17 | * @enum ptask_status 18 | */ 19 | enum ptask_status { 20 | PTASK_SUCCESS = 0, 21 | PTASK_ERROR = 1 22 | }; 23 | 24 | /** 25 | * @type ptask_t 26 | * @brief context container 27 | */ 28 | typedef struct ptask_context_s ptask_t; 29 | 30 | /** 31 | * @fn ptask_init 32 | * @brief initialize threads 33 | */ 34 | ptask_t *ptask_init( 35 | void *(*worker)(void *arg, void *item), 36 | void *worker_arg[], 37 | int64_t num_threads, 38 | int64_t queue_size); 39 | 40 | /** 41 | * @fn ptask_clean 42 | */ 43 | void ptask_clean( 44 | ptask_t *ctx); 45 | 46 | /** 47 | * @fn ptask_parallel 48 | */ 49 | int ptask_parallel( 50 | ptask_t *ctx, 51 | void *items[], 52 | void *results[]); 53 | 54 | /** 55 | * @fn ptask_stream 56 | * @brief get an item from source, throw it to worker, and gather the results into drain. 57 | */ 58 | int ptask_stream( 59 | ptask_t *ctx, 60 | void *(*source)(void *arg), 61 | void *source_arg, 62 | void (*drain)(void *arg, void *result), 63 | void *drain_arg, 64 | int64_t bulk_elems); 65 | 66 | #endif /* _PTASK_H_INCLUDED */ 67 | /** 68 | * end of ptask.h 69 | */ 70 | -------------------------------------------------------------------------------- /queue.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (C) 2011 by Tobias Thiel 3 | * Permission is hereby granted, free of charge, to any person obtaining a copy 4 | * of this software and associated documentation files (the "Software"), to deal 5 | * in the Software without restriction, including without limitation the rights 6 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | * copies of the Software, and to permit persons to whom the Software is 8 | * furnished to do so, subject to the following conditions: 9 | * 10 | * The above copyright notice and this permission notice shall be included in 11 | * all copies or substantial portions of the Software. 12 | * 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | * THE SOFTWARE. 20 | */ 21 | 22 | #include "queue.h" 23 | #include "queue_internal.h" 24 | 25 | queue_t *queue_create(void) { 26 | queue_t *q = (queue_t *)malloc(sizeof(queue_t)); 27 | if(q != NULL) { 28 | q->mutex = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t)); 29 | if(q->mutex == NULL) { 30 | free(q); 31 | return NULL; 32 | } 33 | pthread_mutex_init(q->mutex, NULL); 34 | 35 | q->cond_get = (pthread_cond_t *)malloc(sizeof(pthread_cond_t)); 36 | if(q->cond_get == NULL) { 37 | pthread_mutex_destroy(q->mutex); 38 | free(q->mutex); 39 | free(q); 40 | return NULL; 41 | } 42 | pthread_cond_init(q->cond_get, NULL); 43 | 44 | q->cond_put = (pthread_cond_t *)malloc(sizeof(pthread_cond_t)); 45 | if(q->cond_put == NULL) { 46 | pthread_cond_destroy(q->cond_get); 47 | free(q->cond_get); 48 | pthread_mutex_destroy(q->mutex); 49 | free(q->mutex); 50 | free(q); 51 | return NULL; 52 | } 53 | pthread_cond_init(q->cond_put, NULL); 54 | 55 | q->first_el = NULL; 56 | q->last_el = NULL; 57 | q->num_els = 0; 58 | q->max_els = 0; 59 | q->new_data = 1; 60 | q->sort = 0; 61 | q->asc_order = 1; 62 | q->cmp_el = NULL; 63 | } 64 | 65 | return q; 66 | } 67 | 68 | queue_t *queue_create_limited(uintX_t max_elements) { 69 | queue_t *q = queue_create(); 70 | if(q != NULL) 71 | q->max_els = max_elements; 72 | 73 | return q; 74 | } 75 | 76 | queue_t *queue_create_sorted(int8_t asc, int (*cmp)(void *, void *)) { 77 | if(cmp == NULL) 78 | return NULL; 79 | 80 | queue_t *q = queue_create(); 81 | if(q != NULL) { 82 | q->sort = 1; 83 | q->asc_order = asc; 84 | q->cmp_el = cmp; 85 | } 86 | 87 | return q; 88 | } 89 | 90 | queue_t *queue_create_limited_sorted(uintX_t max_elements, int8_t asc, int (*cmp)(void *, void *)) { 91 | if(cmp == NULL) 92 | return NULL; 93 | 94 | queue_t *q = queue_create(); 95 | if(q != NULL) { 96 | q->max_els = max_elements; 97 | q->sort = 1; 98 | q->asc_order = asc; 99 | q->cmp_el = cmp; 100 | } 101 | 102 | return q; 103 | } 104 | 105 | int8_t queue_destroy(queue_t *q) { 106 | if(q == NULL) 107 | return Q_ERR_INVALID; 108 | return queue_destroy_internal(q, 0, NULL); 109 | } 110 | 111 | int8_t queue_destroy_complete(queue_t *q, void (*ff)(void *)) { 112 | if(q == NULL) 113 | return Q_ERR_INVALID; 114 | return queue_destroy_internal(q, 1, ff); 115 | } 116 | 117 | int8_t queue_flush(queue_t *q) { 118 | if(q == NULL) 119 | return Q_ERR_INVALID; 120 | if (0 != queue_lock_internal(q)) 121 | return Q_ERR_LOCK; 122 | 123 | int8_t r = queue_flush_internal(q, 0, NULL); 124 | 125 | if (0 != queue_unlock_internal(q)) 126 | return Q_ERR_LOCK; 127 | return r; 128 | } 129 | 130 | int8_t queue_flush_complete(queue_t *q, void (*ff)(void *)) { 131 | if(q == NULL) 132 | return Q_ERR_INVALID; 133 | if (0 != queue_lock_internal(q)) 134 | return Q_ERR_LOCK; 135 | 136 | int8_t r = queue_flush_internal(q, 1, ff); 137 | 138 | if (0 != queue_unlock_internal(q)) 139 | return Q_ERR_LOCK; 140 | return r; 141 | } 142 | 143 | uintX_t queue_elements(queue_t *q) { 144 | uintX_t ret = UINTX_MAX; 145 | if(q == NULL) 146 | return ret; 147 | if (0 != queue_lock_internal(q)) 148 | return ret; 149 | 150 | ret = q->num_els; 151 | 152 | if (0 != queue_unlock_internal(q)) 153 | return Q_ERR_LOCK; 154 | return ret; 155 | } 156 | 157 | int8_t queue_empty(queue_t *q) { 158 | if(q == NULL) 159 | return Q_ERR_INVALID; 160 | if (0 != queue_lock_internal(q)) 161 | return Q_ERR_LOCK; 162 | 163 | uint8_t ret; 164 | if(q->first_el == NULL || q->last_el == NULL) 165 | ret = 1; 166 | else 167 | ret = 0; 168 | 169 | if (0 != queue_unlock_internal(q)) 170 | return Q_ERR_LOCK; 171 | return ret; 172 | } 173 | 174 | int8_t queue_set_new_data(queue_t *q, uint8_t v) { 175 | if(q == NULL) 176 | return Q_ERR_INVALID; 177 | if (0 != queue_lock_internal(q)) 178 | return Q_ERR_LOCK; 179 | q->new_data = v; 180 | if (0 != queue_unlock_internal(q)) 181 | return Q_ERR_LOCK; 182 | 183 | if(q->new_data == 0) { 184 | // notify waiting threads, when new data isn't accepted 185 | pthread_cond_broadcast(q->cond_get); 186 | pthread_cond_broadcast(q->cond_put); 187 | } 188 | 189 | return Q_OK; 190 | } 191 | 192 | uint8_t queue_get_new_data(queue_t *q) { 193 | if(q == NULL) 194 | return 0; 195 | if (0 != queue_lock_internal(q)) 196 | return 0; 197 | 198 | uint8_t r = q->new_data; 199 | 200 | if (0 != queue_unlock_internal(q)) 201 | return 0; 202 | return r; 203 | } 204 | 205 | int8_t queue_put(queue_t *q, void *el) { 206 | if(q == NULL) 207 | return Q_ERR_INVALID; 208 | if (0 != queue_lock_internal(q)) 209 | return Q_ERR_LOCK; 210 | 211 | int8_t r = queue_put_internal(q, el, NULL); 212 | 213 | if (0 != queue_unlock_internal(q)) 214 | return Q_ERR_LOCK; 215 | return r; 216 | } 217 | 218 | int8_t queue_put_wait(queue_t *q, void *el) { 219 | if(q == NULL) 220 | return Q_ERR_INVALID; 221 | if (0 != queue_lock_internal(q)) 222 | return Q_ERR_LOCK; 223 | 224 | int8_t r = queue_put_internal(q, el, pthread_cond_wait); 225 | 226 | if (0 != queue_unlock_internal(q)) 227 | return Q_ERR_LOCK; 228 | return r; 229 | } 230 | 231 | int8_t queue_get(queue_t *q, void **e) { 232 | *e = NULL; 233 | if(q == NULL) 234 | return Q_ERR_INVALID; 235 | if (0 != queue_lock_internal(q)) 236 | return Q_ERR_LOCK; 237 | 238 | int8_t r = queue_get_internal(q, e, NULL, NULL, NULL); 239 | 240 | if (0 != queue_unlock_internal(q)) 241 | return Q_ERR_LOCK; 242 | return r; 243 | } 244 | 245 | int8_t queue_get_wait(queue_t *q, void **e) { 246 | *e = NULL; 247 | if(q == NULL) 248 | return Q_ERR_INVALID; 249 | if (0 != queue_lock_internal(q)) 250 | return Q_ERR_LOCK; 251 | 252 | int8_t r = queue_get_internal(q, e, pthread_cond_wait, NULL, NULL); 253 | 254 | if (0 != queue_unlock_internal(q)) 255 | return Q_ERR_LOCK; 256 | return r; 257 | } 258 | 259 | int8_t queue_get_filtered(queue_t *q, void **e, int (*cmp)(void *, void *), void *cmpel) { 260 | *e = NULL; 261 | if(q == NULL) 262 | return Q_ERR_INVALID; 263 | if (0 != queue_lock_internal(q)) 264 | return Q_ERR_LOCK; 265 | 266 | int8_t r = queue_get_internal(q, e, NULL, cmp, cmpel); 267 | 268 | if (0 != queue_unlock_internal(q)) 269 | return Q_ERR_LOCK; 270 | return r; 271 | } 272 | 273 | int8_t queue_flush_put(queue_t *q, void (*ff)(void *), void *e) { 274 | if(q == NULL) 275 | return Q_ERR_INVALID; 276 | if (0 != queue_lock_internal(q)) 277 | return Q_ERR_LOCK; 278 | 279 | int8_t r = queue_flush_internal(q, 0, NULL); 280 | r = queue_put_internal(q, e, NULL); 281 | 282 | if (0 != queue_unlock_internal(q)) 283 | return Q_ERR_LOCK; 284 | return r; 285 | } 286 | 287 | int8_t queue_flush_complete_put(queue_t *q, void (*ff)(void *), void *e) { 288 | if(q == NULL) 289 | return Q_ERR_INVALID; 290 | if (0 != queue_lock_internal(q)) 291 | return Q_ERR_LOCK; 292 | 293 | int8_t r = queue_flush_internal(q, 1, ff); 294 | r = queue_put_internal(q, e, NULL); 295 | 296 | if (0 != queue_unlock_internal(q)) 297 | return Q_ERR_LOCK; 298 | return r; 299 | } 300 | -------------------------------------------------------------------------------- /queue.h: -------------------------------------------------------------------------------- 1 | #ifndef __QUEUE_H__ 2 | #define __QUEUE_H__ 3 | 4 | /** 5 | * Copyright (C) 2011 by Tobias Thiel 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy 7 | * of this software and associated documentation files (the "Software"), to deal 8 | * in the Software without restriction, including without limitation the rights 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | * copies of the Software, and to permit persons to whom the Software is 11 | * furnished to do so, subject to the following conditions: 12 | * 13 | * The above copyright notice and this permission notice shall be included in 14 | * all copies or substantial portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | * THE SOFTWARE. 23 | */ 24 | 25 | /* for compatibility with -std=c99 (2016/4/26 by Hajime Suzuki) */ 26 | #ifndef _POSIX_C_SOURCE 27 | #define _POSIX_C_SOURCE 200112L 28 | #endif 29 | 30 | #ifndef _BSD_SOURCE 31 | #define _BSD_SOURCE 32 | #endif 33 | /* end */ 34 | 35 | 36 | #include 37 | #include 38 | #include /* (u)intX_t */ 39 | #ifndef _WIN32 40 | #include /* usleep */ 41 | #else 42 | #include /* Sleep */ 43 | #endif 44 | #include /* EBUSY */ 45 | 46 | #include /* pthread_mutex_t, pthread_cond_t */ 47 | 48 | #ifdef _WIN32 49 | #define sleepmilli(x) Sleep(x) 50 | #else 51 | #define sleepmilli(x) usleep(x * 1000) 52 | #endif 53 | 54 | /** 55 | * type which is used for counting the number of elements. 56 | * needed for limited queues 57 | */ 58 | #ifndef UINTX_MAX 59 | typedef uint16_t uintX_t; 60 | #define UINTX_MAX UINT16_MAX 61 | #endif 62 | 63 | /** 64 | * simple macros to reduce necessary casting to void * 65 | * or function pointers 66 | */ 67 | #define DEFINE_Q_DESTROY(fname, type) int8_t fname(queue_t *q, void (*ff)(type *)) { return queue_destroy_complete(q, (void (*)(void *))ff); } 68 | #define DEFINE_Q_FLUSH(fname, type) int8_t fname(queue_t *q, void (*ff)(type *)) { return queue_flush_complete(q, (void (*)(void *))ff); } 69 | #define DEFINE_Q_GET(fname, type) int8_t fname(queue_t *q, type **e) { return queue_get(q, (void **)e); } 70 | #define DEFINE_Q_GET_WAIT(fname, type) int8_t fname(queue_t *q, type **e) { return queue_get_wait(q, (void **)e); } 71 | #define DEFINE_Q_PUT(fname, type) int8_t fname(queue_t *q, type *e) { return queue_put(q, (void *)e); } 72 | #define DEFINE_Q_PUT_WAIT(fname, type) int8_t fname(queue_t *q, type *e) { return queue_put_wait(q, (void *)e); } 73 | #define DEFINE_Q_FLUSH_PUT(fname, type) int8_t fname(queue_t *q, void (*ff)(type *), type *e) { return queue_flush_complete_put(q, (void (*)(void *))ff, (void *)e); } 74 | 75 | /** 76 | * returned error codes, everything except Q_OK should be < 0 77 | */ 78 | typedef enum queue_erros_e { 79 | Q_OK = 0, 80 | Q_ERR_INVALID = -1, 81 | Q_ERR_LOCK = -2, 82 | Q_ERR_MEM = -3, 83 | Q_ERR_NONEWDATA = -4, 84 | Q_ERR_INVALID_ELEMENT = -5, 85 | Q_ERR_INVALID_CB = -6, 86 | Q_ERR_NUM_ELEMENTS = -7 87 | } queue_errors_t; 88 | 89 | typedef struct queue_element_s { 90 | void *data; 91 | struct queue_element_s *next; 92 | } queue_element_t; 93 | 94 | typedef struct queue_s { 95 | queue_element_t *first_el, *last_el; 96 | // (max.) number of elements 97 | uintX_t num_els; 98 | uintX_t max_els; 99 | // no new data allowed 100 | uint8_t new_data; 101 | // sorted queue 102 | int8_t sort; 103 | int8_t asc_order; 104 | int (*cmp_el)(void *, void *); 105 | // multithreaded 106 | pthread_mutex_t *mutex; 107 | pthread_cond_t *cond_get; 108 | pthread_cond_t *cond_put; 109 | } queue_t; 110 | 111 | /** 112 | * initializes and allocates a queue with unlimited elements 113 | * 114 | * returns NULL on error, or a pointer to the queue 115 | */ 116 | queue_t *queue_create(void); 117 | 118 | /** 119 | * initializes and allocates a queue 120 | * 121 | * max_elements - maximum number of elements which are allowed in the queue, == 0 for "unlimited" 122 | * 123 | * returns NULL on error, or a pointer to the queue 124 | */ 125 | queue_t *queue_create_limited(uintX_t max_elements); 126 | 127 | /** 128 | * just like queue_create() 129 | * additionally you can specify a comparator function so that your elements are ordered in the queue 130 | * elements will only be ordered if you create the queue with this method 131 | * the compare function should return 0 if both elements are the same, < 0 if the first is smaller 132 | * and > 0 if the second is smaller 133 | * 134 | * asc - sort in ascending order if not 0 135 | * cmp - comparator function, NULL will create an error 136 | * 137 | * returns NULL on error, or a pointer to the queue 138 | */ 139 | queue_t *queue_create_sorted(int8_t asc, int (*cmp)(void *, void *)); 140 | 141 | /** 142 | * see queue_create_limited() and queue_create_sorted() 143 | */ 144 | queue_t *queue_create_limited_sorted(uintX_t max_elements, int8_t asc, int (*cmp)(void *, void *)); 145 | 146 | /** 147 | * releases the memory internally allocated and destroys the queue 148 | * you have to release the memory the elements in the queue use 149 | * 150 | * q - the queue to be destroyed 151 | */ 152 | int8_t queue_destroy(queue_t *q); 153 | 154 | /** 155 | * in addition to queue_destroy(), this function will also free the memory of your elements 156 | * 157 | * q - the queue to be destroyed 158 | * ff - the free function to be used for the elements (free() if NULL) 159 | */ 160 | int8_t queue_destroy_complete(queue_t *q, void (*ff)(void *)); 161 | 162 | /** 163 | * deletes all the elements from the queue, but does not release their memory 164 | * 165 | * q - the queue 166 | */ 167 | int8_t queue_flush(queue_t *q); 168 | 169 | /** 170 | * just like queue_flush, but releases the memory of the elements 171 | * 172 | * q - the queue 173 | * ff - the free function to be used for the elements (free() if NULL) 174 | */ 175 | int8_t queue_flush_complete(queue_t *q, void (*ff)(void *)); 176 | 177 | /** 178 | * just like queue_flush, followed by an queue_put atomically 179 | * 180 | * q - the queue 181 | * ff - the free function to be used for the elements (free() if NULL) 182 | * e - the element 183 | */ 184 | int8_t queue_flush_put(queue_t *q, void (*ff)(void *), void *e); 185 | 186 | /** 187 | * just like queue_flush_complete, followed by an queue_put atomically 188 | * 189 | * q - the queue 190 | * ff - the free function to be used for the elements (free() if NULL) 191 | * e - the element 192 | */ 193 | int8_t queue_flush_complete_put(queue_t *q, void (*ff)(void *), void *e); 194 | 195 | /** 196 | * returns the number of elements in the queue 197 | * 198 | * q - the queue 199 | * 200 | * returns number of elements or UINTX_MAX if an error occured 201 | */ 202 | uintX_t queue_elements(queue_t *q); 203 | 204 | /** 205 | * returns wether the queue is empty 206 | * returns empty if there was an error 207 | * 208 | * q - the queue 209 | * 210 | * returns zero if queue is NOT empty, < 0 => error 211 | */ 212 | int8_t queue_empty(queue_t *q); 213 | 214 | /** 215 | * put a new element at the end of the queue 216 | * will produce an error if you called queue_no_new_data() 217 | * 218 | * q - the queue 219 | * e - the element 220 | * 221 | * returns 0 if everything worked, > 0 if max_elements is reached, < 0 if error occured 222 | */ 223 | int8_t queue_put(queue_t *q, void *e); 224 | 225 | /** 226 | * the same as queue_put(), but will wait if max_elements is reached, 227 | * until queue_set_new_data(, 0) is called or elements are removed 228 | * 229 | * q - the queue 230 | * e - the element 231 | * 232 | * returns 0 if everything worked, < 0 if error occured 233 | */ 234 | int8_t queue_put_wait(queue_t *q, void *e); 235 | 236 | /** 237 | * get the first element of the queue 238 | * 239 | * q - the queue 240 | * e - pointer which will be set to the element 241 | * 242 | * returns 0 if everything worked, > 0 if no elements in queue, < 0 if error occured 243 | */ 244 | int8_t queue_get(queue_t *q, void **e); 245 | 246 | /** 247 | * the same as queue_get(), but will wait if no elements are in the queue, 248 | * until queue_set_new_data(, 0) is called or new elements are added 249 | * 250 | * q - the queue 251 | * e - pointer which will be set to the element 252 | * 253 | * returns 0 if everything worked, < 0 if error occured 254 | */ 255 | int8_t queue_get_wait(queue_t *q, void **e); 256 | 257 | /** 258 | * gets the first element for which the given compare function returns 0 (equal) 259 | * does NOT wait if no elements in the queue 260 | * the compare function should return 0 if both elements are the same, < 0 if the first is smaller 261 | * and > 0 if the second is smaller 262 | * 263 | * q - the queue 264 | * e - pointer which will be set to the element 265 | * cmp - comparator function, NULL will create an error 266 | * cmpel - element with which should be compared 267 | * 268 | * returns 0 if everything worked, < 0 => error, Q_ERR_NUM_ELEMENTS(<0) if no element fulfills the requirement 269 | */ 270 | int8_t queue_get_filtered(queue_t *q, void **e, int (*cmp)(void *, void *), void *cmpel); 271 | 272 | /** 273 | * sets wether the queue will accept new data 274 | * defaults to 1 on creation 275 | * you should use this function when you're done and queue_put_wait() and queue_get_wait() should return 276 | * queue_put()/queue_put_wait() won't have any effect when new data isn't accepted. 277 | * 278 | * q - the queue 279 | * v - 0 new data NOT accepted 280 | */ 281 | int8_t queue_set_new_data(queue_t *q, uint8_t v); 282 | 283 | /** 284 | * returns wether the queue will accept new data 285 | * also returns 0, if there was an error 286 | * 287 | * q - the queue 288 | * 289 | * return 0 if new data is NOT accepted 290 | */ 291 | uint8_t queue_get_new_data(queue_t *q); 292 | 293 | #endif /* __QUEUE_H__ */ 294 | -------------------------------------------------------------------------------- /queue_internal.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (C) 2011 by Tobias Thiel 3 | * Permission is hereby granted, free of charge, to any person obtaining a copy 4 | * of this software and associated documentation files (the "Software"), to deal 5 | * in the Software without restriction, including without limitation the rights 6 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | * copies of the Software, and to permit persons to whom the Software is 8 | * furnished to do so, subject to the following conditions: 9 | * 10 | * The above copyright notice and this permission notice shall be included in 11 | * all copies or substantial portions of the Software. 12 | * 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | * THE SOFTWARE. 20 | */ 21 | 22 | #include "queue.h" 23 | #include "queue_internal.h" 24 | 25 | int8_t queue_lock_internal(queue_t *q) { 26 | if (q == NULL) 27 | return Q_ERR_INVALID; 28 | // all errors are unrecoverable for us 29 | if(0 != pthread_mutex_lock(q->mutex)) 30 | return Q_ERR_LOCK; 31 | return Q_OK; 32 | } 33 | 34 | int8_t queue_unlock_internal(queue_t *q) { 35 | if (q == NULL) 36 | return Q_ERR_INVALID; 37 | // all errors are unrecoverable for us 38 | if(0 != pthread_mutex_unlock(q->mutex)) 39 | return Q_ERR_LOCK; 40 | return Q_OK; 41 | } 42 | 43 | int8_t queue_destroy_internal(queue_t *q, uint8_t fd, void (*ff)(void *)) { 44 | // this method will not immediately return on error, 45 | // it will try to release all the memory that was allocated. 46 | int error = Q_OK; 47 | // make sure no new data comes and wake all waiting threads 48 | error = queue_set_new_data(q, 0); 49 | error = queue_lock_internal(q); 50 | 51 | // release internal element memory 52 | error = queue_flush_internal(q, fd, ff); 53 | 54 | // destroy lock and queue etc 55 | error = pthread_cond_destroy(q->cond_get); 56 | free(q->cond_get); 57 | error = pthread_cond_destroy(q->cond_put); 58 | free(q->cond_put); 59 | 60 | error = queue_unlock_internal(q); 61 | while(EBUSY == (error = pthread_mutex_destroy(q->mutex))) 62 | sleepmilli(100); 63 | free(q->mutex); 64 | 65 | // destroy queue 66 | free(q); 67 | return error; 68 | } 69 | 70 | int8_t queue_flush_internal(queue_t *q, uint8_t fd, void (*ff)(void *)) { 71 | if(q == NULL) 72 | return Q_ERR_INVALID; 73 | 74 | queue_element_t *qe = q->first_el; 75 | queue_element_t *nqe = NULL; 76 | while(qe != NULL) { 77 | nqe = qe->next; 78 | if(fd != 0 && ff == NULL) { 79 | free(qe->data); 80 | } else if(fd != 0 && ff != NULL) { 81 | ff(qe->data); 82 | } 83 | free(qe); 84 | qe = nqe; 85 | } 86 | q->first_el = NULL; 87 | q->last_el = NULL; 88 | q->num_els = 0; 89 | 90 | return Q_OK; 91 | } 92 | 93 | int8_t queue_put_internal(queue_t *q , void *el, int (*action)(pthread_cond_t *, pthread_mutex_t *)) { 94 | if(q == NULL) // queue not valid 95 | return Q_ERR_INVALID; 96 | 97 | if(q->new_data == 0) { // no new data allowed 98 | return Q_ERR_NONEWDATA; 99 | } 100 | 101 | // max_elements already reached? 102 | // if condition _needs_ to be in sync with while loop below! 103 | if(q->num_els == (UINTX_MAX - 1) || (q->max_els != 0 && q->num_els == q->max_els)) { 104 | if(action == NULL) { 105 | return Q_ERR_NUM_ELEMENTS; 106 | } else { 107 | while ((q->num_els == (UINTX_MAX - 1) || (q->max_els != 0 && q->num_els == q->max_els)) && q->new_data != 0) 108 | action(q->cond_put, q->mutex); 109 | if(q->new_data == 0) { 110 | return Q_ERR_NONEWDATA; 111 | } 112 | } 113 | } 114 | 115 | queue_element_t *new_el = (queue_element_t *)malloc(sizeof(queue_element_t)); 116 | if(new_el == NULL) { // could not allocate memory for new elements 117 | return Q_ERR_MEM; 118 | } 119 | new_el->data = el; 120 | new_el->next = NULL; 121 | 122 | if(q->sort == 0 || q->first_el == NULL) { 123 | // insert at the end when we don't want to sort or the queue is empty 124 | if(q->last_el == NULL) 125 | q->first_el = new_el; 126 | else 127 | q->last_el->next = new_el; 128 | q->last_el = new_el; 129 | } else { 130 | // search appropriate place to sort element in 131 | queue_element_t *s = q->first_el; // s != NULL, because of if condition above 132 | queue_element_t *t = NULL; 133 | int asc_first_el = q->asc_order != 0 && q->cmp_el(s->data, el) >= 0; 134 | int desc_first_el = q->asc_order == 0 && q->cmp_el(s->data, el) <= 0; 135 | 136 | if(asc_first_el == 0 && desc_first_el == 0) { 137 | // element will be inserted between s and t 138 | for(s = q->first_el, t = s->next; s != NULL && t != NULL; s = t, t = t->next) { 139 | if(q->asc_order != 0 && q->cmp_el(s->data, el) <= 0 && q->cmp_el(el, t->data) <= 0) { 140 | // asc: s <= e <= t 141 | break; 142 | } else if(q->asc_order == 0 && q->cmp_el(s->data, el) >= 0 && q->cmp_el(el, t->data) >= 0) { 143 | // desc: s >= e >= t 144 | break; 145 | } 146 | } 147 | // actually insert 148 | s->next = new_el; 149 | new_el->next = t; 150 | if(t == NULL) 151 | q->last_el = new_el; 152 | } else if(asc_first_el != 0 || desc_first_el != 0) { 153 | // add at front 154 | new_el->next = q->first_el; 155 | q->first_el = new_el; 156 | } 157 | } 158 | q->num_els++; 159 | // notify only one waiting thread, so that we don't have to check and fall to sleep because we were to slow 160 | pthread_cond_signal(q->cond_get); 161 | 162 | return Q_OK; 163 | } 164 | 165 | int8_t queue_get_internal(queue_t *q, void **e, int (*action)(pthread_cond_t *, pthread_mutex_t *), int (*cmp)(void *, void *), void *cmpel) { 166 | if(q == NULL) { // queue not valid 167 | *e = NULL; 168 | return Q_ERR_INVALID; 169 | } 170 | 171 | // are elements in the queue? 172 | if(q->num_els == 0) { 173 | if(action == NULL) { 174 | *e = NULL; 175 | return Q_ERR_NUM_ELEMENTS; 176 | } else { 177 | while(q->num_els == 0 && q->new_data != 0) 178 | action(q->cond_get, q->mutex); 179 | if (q->num_els == 0 && q->new_data == 0) 180 | return Q_ERR_NONEWDATA; 181 | } 182 | } 183 | 184 | // get first element (which fulfills the requirements) 185 | queue_element_t *el_prev = NULL, *el = q->first_el; 186 | while(cmp != NULL && el != NULL && 0 != cmp(el, cmpel)) { 187 | el_prev = el; 188 | el = el->next; 189 | } 190 | 191 | if(el != NULL && el_prev == NULL) { 192 | // first element is removed 193 | q->first_el = el->next; 194 | if(q->first_el == NULL) 195 | q->last_el = NULL; 196 | q->num_els--; 197 | *e = el->data; 198 | free(el); 199 | } else if(el != NULL && el_prev != NULL) { 200 | // element in the middle is removed 201 | el_prev->next = el->next; 202 | q->num_els--; 203 | *e = el->data; 204 | free(el); 205 | } else { 206 | // element is invalid 207 | *e = NULL; 208 | return Q_ERR_INVALID_ELEMENT; 209 | } 210 | 211 | // notify only one waiting thread 212 | pthread_cond_signal(q->cond_put); 213 | 214 | return Q_OK; 215 | } 216 | -------------------------------------------------------------------------------- /queue_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef __QUEUE_INTERNAL_H__ 2 | #define __QUEUE_INTERNAL_H__ 3 | 4 | /** 5 | * Copyright (C) 2011 by Tobias Thiel 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy 7 | * of this software and associated documentation files (the "Software"), to deal 8 | * in the Software without restriction, including without limitation the rights 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | * copies of the Software, and to permit persons to whom the Software is 11 | * furnished to do so, subject to the following conditions: 12 | * 13 | * The above copyright notice and this permission notice shall be included in 14 | * all copies or substantial portions of the Software. 15 | * 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | * THE SOFTWARE. 23 | */ 24 | 25 | #include 26 | #include 27 | 28 | #include "queue.h" 29 | 30 | /** 31 | * ATTENTION: 32 | * these functions are internal and should not be used directly. 33 | * they may _not_ lock properly, expecting the caller to do so 34 | */ 35 | 36 | /** 37 | * locks the queue 38 | * returns 0 on success, else not usable 39 | */ 40 | int8_t queue_lock_internal(queue_t *q); 41 | 42 | /** 43 | * unlocks the queue 44 | * returns 0 on success, else not usable 45 | */ 46 | int8_t queue_unlock_internal(queue_t *q); 47 | 48 | /** 49 | * adds an element to the queue. 50 | * when action is NULL the function returns with an error code. 51 | * queue _has_ to be locked. 52 | * 53 | * q - the queue 54 | * el - the element 55 | * action - specifies what should be executed if max_elements is reached. 56 | * 57 | * returns < 0 => error, 0 okay 58 | */ 59 | int8_t queue_put_internal(queue_t *q, void *el, int (*action)(pthread_cond_t *, pthread_mutex_t *)); 60 | 61 | /** 62 | * gets the first element in the queue. 63 | * when action is NULL the function returns with an error code. 64 | * queue _has_ to be locked. 65 | * 66 | * q - the queue 67 | * e - element pointer 68 | * action - specifies what should be executed if there are no elements in the queue 69 | * cmp - comparator function, NULL will create an error 70 | * cmpel - element with which should be compared 71 | * 72 | * returns < 0 => error, 0 okay 73 | */ 74 | int8_t queue_get_internal(queue_t *q, void **e, int (*action)(pthread_cond_t *, pthread_mutex_t *), int (*cmp)(void *, void *), void *cmpel); 75 | 76 | /** 77 | * destroys a queue. 78 | * queue will be locked. 79 | * 80 | * q - the queue 81 | * fd - should element data be freed? 0 => No, Otherwise => Yes 82 | * ff - function to release the memory, NULL => free() 83 | */ 84 | int8_t queue_destroy_internal(queue_t *q, uint8_t fd, void (*ff)(void *)); 85 | 86 | /** 87 | * flushes a queue. 88 | * deletes all elements in the queue. 89 | * queue _has_ to be locked. 90 | * 91 | * q - the queue 92 | * fd - should element data be freed? 0 => No, Otherwise => Yes 93 | * ff - function to release the memory, NULL => free() 94 | */ 95 | int8_t queue_flush_internal(queue_t *q, uint8_t fd, void (*ff)(void *)); 96 | 97 | #endif /* __QUEUE_INTERNAL_H__ */ 98 | -------------------------------------------------------------------------------- /sassert.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file sassert.h 4 | * 5 | * @brief static assertion 6 | */ 7 | #ifndef _SASSERT_H_INCLUDED 8 | #define _SASSERT_H_INCLUDED 9 | 10 | #include 11 | 12 | /** 13 | * assert macros 14 | */ 15 | #define _sa_cat_intl(x, y) x##y 16 | #define _sa_cat(x, y) _sa_cat_intl(x, y) 17 | /* static assert */ 18 | #define _static_assert(expr) typedef char _sa_cat(_st, __LINE__)[(expr) ? 1 : -1] 19 | /* check offset equality of elements in two structs */ 20 | #define _static_assert_offset(st1, mb1, st2, mb2, ofs) \ 21 | _static_assert(offsetof(st1, mb1) == offsetof(st2, mb2) + ofs) 22 | 23 | 24 | #endif /* #ifndef _SASSERT_H_INCLUDED */ 25 | /** 26 | * end of sassert.h 27 | */ 28 | -------------------------------------------------------------------------------- /sr.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file sr.h 4 | * 5 | * @brief sequence reader implementation 6 | */ 7 | #ifndef _SR_H_INCLUDED 8 | #define _SR_H_INCLUDED 9 | 10 | #include 11 | #include "gref.h" 12 | 13 | /** 14 | * @type sr_t 15 | */ 16 | typedef struct sr_s sr_t; 17 | 18 | /** 19 | * @enum sr_format 20 | * @brief format flag constant 21 | */ 22 | enum sr_format { 23 | SR_UNKNOWN = 0, 24 | SR_FASTA = 1, 25 | SR_FASTQ = 2, 26 | SR_FAST5 = 3, 27 | SR_GFA = 4 28 | }; 29 | 30 | /** 31 | * @enum sr_revcomp 32 | */ 33 | enum sr_revcomp { 34 | SR_FW_ONLY = 1, 35 | SR_FW_RV = 2 36 | }; 37 | 38 | /** 39 | * @struct sr_params_s 40 | */ 41 | struct sr_params_s { 42 | uint8_t k; /* kmer length */ 43 | uint8_t seq_direction; /* FW_ONLY or FW_RV */ 44 | uint8_t format; /* equal to fna_params_t.file_format */ 45 | uint8_t reserved1; 46 | uint16_t num_threads; 47 | uint16_t reserved2; 48 | uint32_t pool_size; 49 | uint32_t read_mem_size; 50 | void *lmm; /* lmm memory manager */ 51 | }; 52 | typedef struct sr_params_s sr_params_t; 53 | 54 | #define SR_PARAMS(...) ( &((struct sr_params_s const){ __VA_ARGS__ }) ) 55 | 56 | /** 57 | * @struct sr_gref_s 58 | * @brief gref and iter container 59 | */ 60 | struct sr_gref_s { 61 | void *lmm; 62 | char const *path; 63 | gref_t const *gref; 64 | gref_iter_t *iter; 65 | void *reserved1[2]; 66 | uint32_t reserved2[2]; 67 | }; 68 | 69 | /** 70 | * @fn sr_init 71 | */ 72 | sr_t *sr_init( 73 | char const *path, 74 | sr_params_t const *params); 75 | 76 | /** 77 | * @fn sr_clean 78 | */ 79 | void sr_clean( 80 | sr_t *sr); 81 | 82 | /** 83 | * @fn sr_get_index 84 | */ 85 | struct sr_gref_s *sr_get_index( 86 | sr_t *sr); 87 | 88 | /** 89 | * @fn sr_get_iter 90 | */ 91 | struct sr_gref_s *sr_get_iter( 92 | sr_t *sr); 93 | 94 | /** 95 | * @fn sr_gref_free 96 | */ 97 | void sr_gref_free( 98 | struct sr_gref_s *gref); 99 | 100 | #endif /* _SR_H_INCLUDED */ 101 | /** 102 | * end of sr.h 103 | */ 104 | -------------------------------------------------------------------------------- /tree.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file tree.h 4 | * 5 | * @brief an wrapper of ngx_rbtree.c in nginx (https://nginx.org/) core library 6 | */ 7 | #ifndef _TREE_H_INCLUDED 8 | #define _TREE_H_INCLUDED 9 | 10 | #include 11 | 12 | 13 | /** 14 | * @type rbtree_t 15 | */ 16 | typedef struct rbtree_s rbtree_t; 17 | 18 | /** 19 | * @struct rbtree_node_s 20 | * @brief object must have a rbtree_node_t field at the head. 21 | */ 22 | struct rbtree_node_s { 23 | uint8_t pad[24]; 24 | int64_t zero; /* must be zeroed if external memory is used */ 25 | int64_t key; 26 | }; 27 | typedef struct rbtree_node_s rbtree_node_t; 28 | 29 | /** 30 | * @struct rbtree_params_s 31 | */ 32 | struct rbtree_params_s { 33 | void *lmm; 34 | }; 35 | typedef struct rbtree_params_s rbtree_params_t; 36 | #define RBTREE_PARAMS(...) ( &((struct rbtree_params_s const) { __VA_ARGS__ }) ) 37 | 38 | /** 39 | * @fn rbtree_init 40 | */ 41 | rbtree_t *rbtree_init(uint64_t object_size, rbtree_params_t const *params); 42 | 43 | /** 44 | * @fn rbtree_clean 45 | */ 46 | void rbtree_clean(rbtree_t *tree); 47 | 48 | /** 49 | * @fn rbtree_flush 50 | */ 51 | void rbtree_flush(rbtree_t *tree); 52 | 53 | /** 54 | * @fn rbtree_create_node 55 | * @brief create a new node (not inserted in the tree) 56 | */ 57 | rbtree_node_t *rbtree_create_node(rbtree_t *tree); 58 | 59 | /** 60 | * @fn rbtree_insert 61 | * @brief insert a node 62 | */ 63 | void rbtree_insert(rbtree_t *tree, rbtree_node_t *node); 64 | 65 | /** 66 | * @fn rbtree_remove 67 | * @brief remove a node, automatically freed if malloc'd with rbtree_reserve_node 68 | */ 69 | void rbtree_remove(rbtree_t *tree, rbtree_node_t *node); 70 | 71 | /** 72 | * @fn rbtree_search_key 73 | * @brief search a node by key, returning the leftmost node 74 | */ 75 | rbtree_node_t *rbtree_search_key(rbtree_t *tree, int64_t key); 76 | 77 | /** 78 | * @fn rbtree_search_key_left 79 | * @brief search a node by key. returns the nearest node in the left half of the tree if key was not found. 80 | */ 81 | rbtree_node_t *rbtree_search_key_left(rbtree_t *tree, int64_t key); 82 | 83 | /** 84 | * @fn rbtree_search_key_right 85 | * @brief search a node by key. returns the nearest node in the right half of the tree if key was not found. 86 | */ 87 | rbtree_node_t *rbtree_search_key_right(rbtree_t *tree, int64_t key); 88 | 89 | /** 90 | * @fn rbtree_left 91 | * @brief returns the left next node 92 | */ 93 | rbtree_node_t *rbtree_left(rbtree_t *tree, rbtree_node_t const *node); 94 | 95 | /** 96 | * @fn rbtree_right 97 | * @brief returns the right next node 98 | */ 99 | rbtree_node_t *rbtree_right(rbtree_t *tree, rbtree_node_t const *node); 100 | 101 | /** 102 | * @fn rbtree_walk 103 | * @breif iterate over tree 104 | */ 105 | typedef void (*rbtree_walk_t)(rbtree_node_t *node, void *ctx); 106 | void rbtree_walk(rbtree_t *tree, rbtree_walk_t fn, void *ctx); 107 | 108 | 109 | 110 | 111 | /* interval tree implementation */ 112 | 113 | 114 | /** 115 | * @type ivtree_t 116 | */ 117 | typedef struct rbtree_s ivtree_t; 118 | 119 | /** 120 | * @struct ivtree_node_s 121 | */ 122 | struct ivtree_node_s { 123 | uint8_t pad[24]; 124 | int64_t zero; /* must be zeroed if external memory is used */ 125 | int64_t lkey; 126 | int64_t rkey; 127 | int64_t reserved; 128 | }; 129 | typedef struct ivtree_node_s ivtree_node_t; 130 | 131 | /** 132 | * @type ivtree_iter_t 133 | */ 134 | typedef struct ivtree_iter_s ivtree_iter_t; 135 | 136 | /** 137 | * @type ivtree_params_s 138 | */ 139 | typedef struct rvtree_params_s ivtree_params_t; 140 | #define IVTREE_PARAMS(...) ( &((struct rbtree_params_s const) { __VA_ARGS__ }) ) 141 | 142 | /** 143 | * @fn ivtree_init 144 | */ 145 | ivtree_t *ivtree_init(uint64_t object_size, ivtree_params_t const *params); 146 | 147 | /** 148 | * @fn ivtree_clean 149 | */ 150 | void ivtree_clean(ivtree_t *tree); 151 | 152 | /** 153 | * @fn ivtree_flush 154 | */ 155 | void ivtree_flush(ivtree_t *tree); 156 | 157 | /** 158 | * @fn ivtree_create_node 159 | * @brief create a new node (not inserted in the tree) 160 | */ 161 | ivtree_node_t *ivtree_create_node(ivtree_t *tree); 162 | 163 | /** 164 | * @fn ivtree_insert 165 | * @brief insert a node 166 | */ 167 | void ivtree_insert(ivtree_t *tree, ivtree_node_t *node); 168 | 169 | /** 170 | * @fn ivtree_remove 171 | * @brief remove a node, automatically freed if malloc'd with ivtree_reserve_node 172 | */ 173 | void ivtree_remove(ivtree_t *tree, ivtree_node_t *node); 174 | 175 | /** 176 | * @fn ivtree_contained 177 | * @brief return a set of sections contained in [lkey, rkey) 178 | */ 179 | ivtree_iter_t *ivtree_contained(ivtree_t *tree, int64_t lkey, int64_t rkey); 180 | 181 | /** 182 | * @fn ivtree_containing 183 | * @brief return a set of sections containing [lkey, rkey) 184 | */ 185 | ivtree_iter_t *ivtree_containing(ivtree_t *tree, int64_t lkey, int64_t rkey); 186 | 187 | /** 188 | * @fn ivtree_intersect 189 | * @brief return a set of sections intersect with [lkey, rkey) 190 | */ 191 | ivtree_iter_t *ivtree_intersect(ivtree_t *tree, int64_t lkey, int64_t rkey); 192 | 193 | /** 194 | * @fn ivtree_next 195 | */ 196 | ivtree_node_t *ivtree_next(ivtree_iter_t *iter); 197 | 198 | /** 199 | * @fn ivtree_iter_clean 200 | */ 201 | void ivtree_iter_clean(ivtree_iter_t *iter); 202 | 203 | /** 204 | * @fn ivtree_walk 205 | * @breif iterate over tree 206 | */ 207 | typedef void (*ivtree_walk_t)(ivtree_node_t *node, void *ctx); 208 | void ivtree_walk(ivtree_t *tree, ivtree_walk_t fn, void *ctx); 209 | 210 | 211 | #endif 212 | /** 213 | * end of tree.h 214 | */ 215 | -------------------------------------------------------------------------------- /unittest.c: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file unittest.c 4 | * @brief unittest function caller 5 | */ 6 | #define UNITTEST 1 7 | 8 | #include "unittest.h" 9 | 10 | int main(int argc, char *argv[]) 11 | { 12 | return(unittest_main(argc, argv)); 13 | } 14 | 15 | /** 16 | * end of unittest.c 17 | */ 18 | -------------------------------------------------------------------------------- /waf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocxtal/comb/1175a58d84038a4e2426daed29001c1cf7d53ef0/waf -------------------------------------------------------------------------------- /wscript: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # encoding: utf-8 3 | 4 | 5 | def isxdigit(string): 6 | try: 7 | int(string, 16) 8 | return True 9 | except ValueError: 10 | return False 11 | 12 | def check_output(*args): 13 | import subprocess 14 | try: 15 | process = subprocess.Popen(stdout = subprocess.PIPE, *args) 16 | output, unused_err = process.communicate() 17 | retcode = process.poll() 18 | return(output if retcode == 0 else None) 19 | except: 20 | return(None) 21 | 22 | def get_hash(default_version_string): 23 | s = check_output(['git', 'rev-parse', 'HEAD']) 24 | return(s.decode().split('\n')[0] if s is not None else default_version_string) 25 | 26 | def get_tag(hash): 27 | s = check_output(['git', 'show-ref', '--tags']) 28 | return(filter(lambda a: a == hash, s.decode().split('\n')) if s is not None else '') 29 | 30 | def get_version_string(default_version_string): 31 | hash = get_hash(default_version_string) 32 | tag = get_tag(hash) 33 | hash = hash[0:7] if isxdigit(hash) == True else hash 34 | return('"%s"' % (tag if tag != '' else hash)) 35 | 36 | 37 | def options(opt): 38 | opt.load('compiler_c') 39 | opt.recurse('arch') 40 | 41 | def configure(conf): 42 | conf.load('ar') 43 | conf.load('compiler_c') 44 | 45 | conf.recurse('arch') 46 | 47 | if 'LIB_Z' not in conf.env: 48 | conf.check_cc( 49 | lib = 'z', 50 | defines = ['HAVE_Z'], 51 | mandatory = False) 52 | 53 | if 'LIB_BZ2' not in conf.env: 54 | conf.check_cc( 55 | lib = 'bz2', 56 | defines = ['HAVE_BZ2'], 57 | mandatory = False) 58 | 59 | if 'LIB_PTHREAD' not in conf.env: 60 | conf.check_cc(lib = 'pthread') 61 | 62 | conf.env.append_value('CFLAGS', '-Wall') 63 | # conf.env.append_value('CFLAGS', '-Wextra') 64 | # conf.env.append_value('CFLAGS', '-Wno-missing-field-initializers') 65 | # conf.env.append_value('CFLAGS', '-Wno-unused-parameter') 66 | conf.env.append_value('CFLAGS', '-Wno-unused-function') 67 | 68 | 69 | if conf.env.CC_NAME == 'icc': 70 | conf.env.append_value('CFLAGS', '-inline-max-size=20000') 71 | conf.env.append_value('CFLAGS', '-inline-max-total-size=50000') 72 | 73 | conf.env.append_value('CFLAGS', '-O3') 74 | conf.env.append_value('CFLAGS', '-std=c99') 75 | conf.env.append_value('CFLAGS', '-march=native') 76 | 77 | conf.env.append_value('LIBS', conf.env.LIB_Z + conf.env.LIB_BZ2 + conf.env.LIB_PTHREAD) 78 | conf.env.append_value('DEFINES', conf.env.DEFINES_Z + conf.env.DEFINES_BZ2 + ['COMB_VERSION_STRING=' + get_version_string("0.0.1")]) 79 | conf.env.append_value('OBJS', 80 | ['aw.o', 'fna.o', 'gaba_linear.o', 'gaba_affine.o', 'gaba_wrap.o', 'ggsea.o', 'gref.o', 'hmap.o', 'kopen.o', 'ngx_rbtree.o', 'psort.o', 'ptask.o', 'queue.o', 'queue_internal.o', 'sr.o', 'tree.o', 'zf.o']) 81 | 82 | 83 | def build(bld): 84 | bld.recurse('arch') 85 | 86 | bld.objects(source = 'aw.c', target = 'aw.o') 87 | bld.objects(source = 'fna.c', target = 'fna.o') 88 | bld.objects(source = 'gaba.c', target = 'gaba_linear.o', defines = ['SUFFIX', 'MODEL=LINEAR']) 89 | bld.objects(source = 'gaba.c', target = 'gaba_affine.o', defines = ['SUFFIX', 'MODEL=AFFINE']) 90 | bld.objects(source = 'gaba_wrap.c', target = 'gaba_wrap.o') 91 | bld.objects(source = 'ggsea.c', target = 'ggsea.o') 92 | bld.objects(source = 'gref.c', target = 'gref.o') 93 | bld.objects(source = 'hmap.c', target = 'hmap.o') 94 | bld.objects(source = 'kopen.c', target = 'kopen.o') 95 | bld.objects(source = 'ngx_rbtree.c', target = 'ngx_rbtree.o') 96 | bld.objects(source = 'psort.c', target = 'psort.o') 97 | bld.objects(source = 'ptask.c', target = 'ptask.o') 98 | bld.objects(source = 'queue.c', target = 'queue.o') 99 | bld.objects(source = 'queue_internal.c', target = 'queue_internal.o') 100 | bld.objects(source = 'sr.c', target = 'sr.o') 101 | bld.objects(source = 'tree.c', target = 'tree.o') 102 | bld.objects(source = 'zf.c', target = 'zf.o') 103 | 104 | bld.program( 105 | source = ['comb.c'], 106 | target = 'comb', 107 | use = bld.env.OBJS, 108 | lib = bld.env.LIBS, 109 | defines = ['MAIN'], 110 | install_path = '${PREFIX}/bin') 111 | -------------------------------------------------------------------------------- /zf.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file zf.h 4 | * 5 | * @brief zlib file API compatible I/O wrapper library 6 | * 7 | * @author Hajime Suzuki 8 | * @date 2016/3/30 9 | * @license MIT 10 | */ 11 | #ifndef _ZF_H_INCLUDED 12 | #define _ZF_H_INCLUDED 13 | 14 | #include 15 | 16 | /* type aliases */ 17 | struct zf_s { 18 | char const *path; 19 | char const *mode; 20 | int reserved1[2]; 21 | void *reserved2[10]; 22 | int64_t reserved3[3]; 23 | 24 | }; 25 | typedef struct zf_s zf_t; 26 | 27 | 28 | /** 29 | * @fn zfopen 30 | * @brief open file, similar to fopen / gzopen, 31 | * compression format can be explicitly specified adding an extension to `mode', e.g. "w+.bz2". 32 | */ 33 | zf_t *zfopen( 34 | char const *path, 35 | char const *mode); 36 | 37 | /** 38 | * @fn zfclose 39 | * @brief close file, similar to fclose / gzclose 40 | */ 41 | int zfclose( 42 | zf_t *zf); 43 | 44 | /** 45 | * @fn zfread 46 | * @brief read from file, similar to gzread 47 | */ 48 | size_t zfread( 49 | zf_t *zf, 50 | void *ptr, 51 | size_t len); 52 | 53 | /** 54 | * @fn zfpeek 55 | * @brief read len (< 512k) without advancing pointer 56 | */ 57 | size_t zfpeek( 58 | zf_t *zf, 59 | void *ptr, 60 | size_t len); 61 | 62 | /** 63 | * @fn zfwrite 64 | * @brief write to file, similar to gzwrite 65 | */ 66 | size_t zfwrite( 67 | zf_t *zf, 68 | void *ptr, 69 | size_t len); 70 | 71 | /** 72 | * @fn zfgetc 73 | */ 74 | int zfgetc( 75 | zf_t *zf); 76 | 77 | /** 78 | * @fn zfungetc 79 | */ 80 | int zfungetc( 81 | zf_t *zf, 82 | int c); 83 | 84 | /** 85 | * @fn zfeof 86 | */ 87 | int zfeof( 88 | zf_t *zf); 89 | 90 | /** 91 | * @fn zfputc 92 | */ 93 | int zfputc( 94 | zf_t *zf, 95 | int c); 96 | 97 | /** 98 | * @fn zfputs 99 | */ 100 | int zfputs( 101 | zf_t *zf, 102 | char const *s); 103 | 104 | /** 105 | * @fn zfprintf 106 | */ 107 | int zfprintf( 108 | zf_t *zf, 109 | char const *format, 110 | ...); 111 | 112 | #endif /* _ZF_H_INCLUDED */ 113 | /** 114 | * end of zf.h 115 | */ 116 | --------------------------------------------------------------------------------