├── .gitignore
├── README.md
├── pom.xml
├── src
├── main
│ ├── java
│ │ └── com
│ │ │ └── aliyun
│ │ │ └── adb
│ │ │ └── contest
│ │ │ ├── AnalysisDB.java
│ │ │ ├── BucketFile.java
│ │ │ ├── DiskRaceEngine.java
│ │ │ ├── RaceAnalyticDB.java
│ │ │ ├── SimpleAnalyticDB.java
│ │ │ ├── Util.java
│ │ │ └── spi
│ │ │ └── AnalyticDB.java
│ └── resources
│ │ └── META-INF
│ │ └── services
│ │ └── com.aliyun.adb.contest.spi.AnalyticDB
└── test
│ └── java
│ └── com
│ └── aliyun
│ └── adb
│ └── contest
│ └── TestSimpleAnalyticDB.java
├── test_data
└── lineitem
└── test_result
└── results
/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | .idea
3 | *.iml
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | 请 fork 本项目,并设为 private。然后实现`AnalyticDB`接口,并提交自己的代码地址来完成比赛。
3 |
4 | 请务必阅读完下文的 **参赛注意事项**。
5 |
6 |
7 | # 测试环境说明
8 | 选手代码运行在容器内,容器 CPU 限制 **4核**。每台物理机上同时只会跑一个选手程序,避免互相干扰。
9 | 容器内存容量确保容器内系统能正常运行,选手使用内存受 JVM 配置限制。测试数据放在 PMem 上,同时
10 | 选手还有一块 PMem 可以用来存储选手生成的数据文件。
11 |
12 | ### JVM 版本
13 | ```
14 | openjdk version "1.8.0_292"
15 | OpenJDK Runtime Environment (build 1.8.0_292-b10)
16 | OpenJDK 64-Bit Server VM (build 25.292-b10, mixed mode)
17 | ```
18 |
19 | ### JVM 参数
20 | ```
21 | -Xmx4g -Xms4g -XX:MaxDirectMemorySize=256m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled
22 | ```
23 |
24 | # 测试数据说明
25 | 初赛只有一张表 lineitem,只有两列 L_ORDERKEY 和 L_PARTKEY,类型均为 bigint。
26 | 数据量3亿行, 格式如下:
27 | ```
28 | L_ORDERKEY,L_PARTKEY
29 | 2876385239627262908,3426163450417145920
30 | 5164669269206349835,310661817109752352
31 | 4036124977818879891,1783468637370676778
32 | ```
33 |
34 | 为了避免精度问题导致答案错误,我们会保证所求的百分位的位点恰好是整数。
35 | 换句话说,假设有 N 行数据,求百分位 P(0<=P<=1.0),我们保证`N * P`是个整数。
36 | 具体可以参考`SimpleAnalyticDB` 实现
37 |
38 | - 测试数据随机,均匀分布,极低概率可能会重复,每次测评可能是不同数据集
39 | - `test_data` 目录下提供了一份测试数据和答案,仅供参考
40 |
41 | # 测试规则说明
42 | - 任何原因导致测评没有通过,不会消耗提交次数
43 | - 选手总耗时是启动进程到执行结束,所以是包括数据加载和查询到总耗时,按耗时排名
44 | - 选手程序最多运行10分钟
45 | - 取最好的一次提交结果作为选手上榜成绩
46 |
47 | # 作弊程序判定规则
48 | - 不能通过 hard code 形式,记录任何测试数据的值在代码中,会视为 hack 代码
49 | - 选手的程序不能只对一份数据能跑,换一份数据不能跑过的程序都视为 hack 代码
50 | - 比赛结束会对前10名选手的代码做重判,确保选手代码可以在多份数据上跑通(只验正确性)。重判使用选手最优提交的代码版本,以及一份新的测试数据。
51 | - 如果选手最优提交的代码版本不存在了,视为无效成绩。
52 |
53 | # 依赖 & 编译说明
54 | - 只允许使用标准库,不能引入外部依赖,pom文件测评程序编译时会强制覆盖
55 | - 不允许使用 JNI 和 Unsafe 相关技术
56 | - 不要修改 maven 的`pom.xml`文件,可能会导致编译失败
57 | - 不要修改`AnalyticDB`接口文件,可能会导致编译失败
58 | - 提交代码前请运行`mvn clean package -DskipTests`,确保能编译通过
59 | - 在`META-INFO/services/com.aliyun.adb.contest.spi.AnalyticDB`中配置你的实现类
60 |
61 | # 参赛注意事项
62 | - 不要将自己生成的测试数据文件,提交到自己的代码库,可能会因为拉取代码时间过久而超时
63 | - 不要打印过多日志,否则可能因为打过多日志而超时,日志只会截取10M上传
64 | - 添加`analyticdb_support`为你项目的 reporter,确保测评程序有权限拉你的取代码。
65 | - 不要将自己的项目设置为 public 在比赛期间
66 | - 提交代码路径格式,必须是git格式地址,形如:git@code.aliyun.com:xxx/xxx.git
67 | - 任何恶意或者作弊行为会被永久禁赛
68 |
69 | # 常见问题
70 | 1.超时运行之后,看不到自己打的日志?
71 |
72 | 超时运行后,选手程序会被立即杀死,程序不是正常退出。日志可能来不及输出,会丢失一些。如果是超时前打出的日志,却看不到,
73 | 可以尝试调用`System.out.flush()`试试。其余情况,可能是选手程序导致进程一直卡住,根本没有打印。
74 |
75 |
76 | 2.搜索不到`analyticdb_support`账号?
77 |
78 | 直接复制`analyticdb_support`账号名,可能不会弹出账号搜索结果,可以尝试手动输入。手动输入完`analyticdb`,然后停下,即可搜到。
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 | com.aliyun.adb.contest
6 | adb-contest
7 | jar
8 | 1.0-SNAPSHOT
9 | adb-contest
10 |
11 |
12 |
13 |
14 | org.apache.maven.plugins
15 | maven-compiler-plugin
16 |
17 | 8
18 | 8
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | com.carrotsearch
27 | hppc
28 | 0.8.1
29 |
30 |
31 | junit
32 | junit
33 | 4.13.2
34 | test
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/AnalysisDB.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import java.io.BufferedReader;
4 | import java.io.File;
5 | import java.io.FileReader;
6 | import java.io.IOException;
7 | import java.io.RandomAccessFile;
8 | import java.nio.ByteBuffer;
9 | import java.nio.MappedByteBuffer;
10 | import java.nio.channels.FileChannel;
11 | import java.nio.channels.FileChannel.MapMode;
12 | import java.util.ArrayList;
13 | import java.util.HashMap;
14 | import java.util.List;
15 | import java.util.Map;
16 | import java.util.Map.Entry;
17 | import java.util.concurrent.CountDownLatch;
18 |
19 | import com.aliyun.adb.contest.spi.AnalyticDB;
20 |
21 | /**
22 | * @author jingfeng.xjf
23 | * @date 2021-06-24
24 | */
25 | public class AnalysisDB implements AnalyticDB {
26 |
27 | private final Map> data = new HashMap<>();
28 |
29 | private Map byte2IntMap;
30 |
31 | /**
32 | * The implementation must contain a public no-argument constructor.
33 | */
34 | public AnalysisDB() {
35 | byte2IntMap = new HashMap<>();
36 | for (int i = 0; i < 10; i++) {
37 | byte2IntMap.put((byte)(i + '0'), i);
38 | }
39 | }
40 |
41 | @Override
42 | public void load(String tpchDataFileDir, String workspaceDir) throws Exception {
43 |
44 | File dir = new File(tpchDataFileDir);
45 | for (File dataFile : dir.listFiles()) {
46 | if ("lineitem".equals(dataFile.getName())) {
47 | //testRead(dataFile, 12, true);
48 | //testRead(dataFile, 12, true);
49 | //testRead(dataFile, 12, false);
50 | //testRead(dataFile, 12, false);
51 | }
52 | }
53 |
54 | testWrite(workspaceDir, 12, 128, 1);
55 | testWrite(workspaceDir, 12, 64, 2);
56 | //testWriteWithCpuOp(workspaceDir, 12, 128, 2);
57 | //testWritex3(workspaceDir, 12, 128, 3);
58 |
59 | }
60 |
61 | @Override
62 | public String quantile(String table, String column, double percentile) throws Exception {
63 | return "12345";
64 | }
65 |
66 | private void loadInMemroy(File dataFile) throws IOException {
67 | BufferedReader reader = new BufferedReader(new FileReader(dataFile));
68 | String table = dataFile.getName();
69 | String[] columns = reader.readLine().split(",");
70 |
71 | for (String column : columns) {
72 | ArrayList countArray = new ArrayList<>();
73 | for (int i = 0; i < 101; i++) {
74 | countArray.add(0);
75 | }
76 | data.put(tableColumnKey(table, column), countArray);
77 | }
78 |
79 | String rawRow;
80 | while ((rawRow = reader.readLine()) != null) {
81 | String[] row = rawRow.split(",");
82 |
83 | for (int i = 0; i < columns.length; i++) {
84 | List countArray = data.get(tableColumnKey(table, columns[i]));
85 | int partition = head2BitPartition(row[i]);
86 | countArray.set(partition, countArray.get(partition) + 1);
87 | }
88 | }
89 | for (Entry> entries : data.entrySet()) {
90 | String key = entries.getKey();
91 | List value = entries.getValue();
92 | System.out.println("tableColumn : " + key);
93 | for (int i = 0; i < 101; i++) {
94 | System.out.println("partition " + i + " nums: " + value.get(i));
95 | }
96 | }
97 |
98 | }
99 |
100 | private String tableColumnKey(String table, String column) {
101 | return (table + "." + column).toLowerCase();
102 | }
103 |
104 | private int head2BitPartition(String val) {
105 | if (val.length() <= 17) {
106 | return 0;
107 | } else if (val.length() == 18) {
108 | return val.charAt(0) - '0';
109 | } else if (val.length() == 19) {
110 | return (val.charAt(0) - '0') * 10 + (val.charAt(1) - '0');
111 | }
112 | return 100;
113 | }
114 |
115 | private void testWrite(String workDir, final int threadNum, final int fileNum, final int testRound)
116 | throws Exception {
117 | long totalWriteNum = 6_0000_0000;
118 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
119 | long start = System.currentTimeMillis();
120 | for (int i = 0; i < threadNum; i++) {
121 | final long threadWriteNum = totalWriteNum / threadNum;
122 | final int threadNo = i;
123 | new Thread(() -> {
124 | try {
125 | File file = new File(
126 | workDir + File.separator + testRound + "_" + threadNum + "_" + threadNo + "_" + testRound);
127 | RandomAccessFile rw = new RandomAccessFile(file, "rw");
128 | FileChannel fileChannel = rw.getChannel();
129 | ByteBuffer byteBuffer = ByteBuffer.allocateDirect(128 * 1024);
130 | long position = 0;
131 | for (long val = 0; val < threadWriteNum; val++) {
132 | byteBuffer.putLong(val);
133 | if (byteBuffer.remaining() < 8) {
134 | byteBuffer.flip();
135 | fileChannel.write(byteBuffer, position);
136 | position += 128 * 1024;
137 | byteBuffer.clear();
138 | }
139 | }
140 | countDownLatch.countDown();
141 | } catch (Exception e) {
142 | e.printStackTrace();
143 | }
144 | }).start();
145 | }
146 | countDownLatch.await();
147 | System.out.println(
148 | "threadNum " + threadNum + " fileNum " + fileNum + " write 6_0000_0000 cost " + (
149 | System.currentTimeMillis()
150 | - start) + " ms");
151 | }
152 |
153 | private void testWriteWithCpuOp(String workDir, final int threadNum, final int fileNum, final int testRound)
154 | throws Exception {
155 | long totalWriteNum = 6_0000_0000;
156 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
157 | long start = System.currentTimeMillis();
158 | for (int i = 0; i < threadNum; i++) {
159 | String valStr = "600000000000000000";
160 | final long threadWriteNum = totalWriteNum / threadNum;
161 | final int threadNo = i;
162 | new Thread(() -> {
163 | try {
164 | FileChannel[] fileChannels = new FileChannel[fileNum];
165 | for (int j = 0; j < fileNum; j++) {
166 | File file = new File(workDir + File.separator + testRound + "_" + threadNo + "_" + j);
167 | RandomAccessFile rw = new RandomAccessFile(file, "rw");
168 | FileChannel fileChannel = rw.getChannel();
169 | fileChannels[j] = fileChannel;
170 | }
171 | ByteBuffer byteBuffer = ByteBuffer.allocate(128 * 1024);
172 | int round = 0;
173 | for (long val = 0; val < threadWriteNum; val++) {
174 | long temp = 0;
175 | for (int j = 0; j < valStr.length(); j++) {
176 | temp += temp * 10 + valStr.charAt(j) - '0';
177 | }
178 | //int offset = 64 - 8 + 1;
179 | //byte t = (byte)((temp >> offset) & 0xff);
180 | byteBuffer.putLong(val);
181 | if (byteBuffer.remaining() < 8) {
182 | byteBuffer.flip();
183 | fileChannels[round % fileNum].write(byteBuffer, fileChannels[round % fileNum].position());
184 | round++;
185 | byteBuffer.clear();
186 | }
187 | }
188 | countDownLatch.countDown();
189 | } catch (Exception e) {
190 | e.printStackTrace();
191 | }
192 | }).start();
193 | }
194 | countDownLatch.await();
195 | System.out.println(
196 | "threadNum " + threadNum + " fileNum " + fileNum + " write 6_0000_0000 cost " + (
197 | System.currentTimeMillis()
198 | - start) + " ms");
199 | }
200 |
201 | private void testWritex3(String workDir, final int threadNum, final int fileNum, final int testRound)
202 | throws Exception {
203 | long totalWriteNum = 6_0000_0000;
204 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
205 | long start = System.currentTimeMillis();
206 | for (int i = 0; i < threadNum; i++) {
207 | final long threadWriteNum = totalWriteNum / threadNum;
208 | final int threadNo = i;
209 | new Thread(() -> {
210 | try {
211 | FileChannel[] fileChannels = new FileChannel[fileNum];
212 | for (int j = 0; j < fileNum; j++) {
213 | File file = new File(workDir + File.separator + testRound + "_" + threadNo + "_" + j);
214 | RandomAccessFile rw = new RandomAccessFile(file, "rw");
215 | FileChannel fileChannel = rw.getChannel();
216 | fileChannels[j] = fileChannel;
217 | }
218 | ByteBuffer byteBuffer = ByteBuffer.allocate(128 * 1024);
219 | int round = 0;
220 | for (long val = 0; val < threadWriteNum; val++) {
221 | byteBuffer.putLong(val);
222 | byteBuffer.putLong(val);
223 | byteBuffer.putLong(val);
224 | if (byteBuffer.remaining() < 8 * 3) {
225 | byteBuffer.flip();
226 | fileChannels[round % fileNum].write(byteBuffer, fileChannels[round % fileNum].position());
227 | round++;
228 | byteBuffer.clear();
229 | }
230 | }
231 | countDownLatch.countDown();
232 | } catch (Exception e) {
233 | e.printStackTrace();
234 | }
235 | }).start();
236 | }
237 | countDownLatch.await();
238 | System.out.println(
239 | "threadNum " + threadNum + " fileNum " + fileNum + " write 6_0000_0000 cost " + (
240 | System.currentTimeMillis()
241 | - start) + " ms");
242 | }
243 |
244 | private void testRead(File dataFile, int threadNum, boolean flag) throws Exception {
245 | long start = System.currentTimeMillis();
246 | RandomAccessFile randomAccessFile = new RandomAccessFile(dataFile, "r");
247 | FileChannel fileChannel = randomAccessFile.getChannel();
248 | long totalSize = fileChannel.size();
249 | long[] readThreadPosition = new long[threadNum];
250 | // 设置对齐的 n 个片
251 | readThreadPosition[0] = 21;
252 | for (int i = 1; i < threadNum; i++) {
253 | long paddingPosition = totalSize / threadNum * i;
254 | MappedByteBuffer mappedByteBuffer = fileChannel.map(MapMode.READ_ONLY, paddingPosition, 100);
255 | for (int j = 0; j < 100; j++) {
256 | byte aByte = mappedByteBuffer.get();
257 | if (aByte == (byte)'\n') {
258 | paddingPosition += j + 1;
259 | break;
260 | }
261 | }
262 | readThreadPosition[i] = paddingPosition;
263 | }
264 |
265 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
266 | for (int k = 0; k < threadNum; k++) {
267 | final int threadNo = k;
268 | new Thread(() -> {
269 | long threadStart = System.currentTimeMillis();
270 | try {
271 | long readBufferSize = 1024 * 64;
272 | ByteBuffer byteBuffer = ByteBuffer.allocate((int)readBufferSize);
273 | byte[] readBufferArray = new byte[(int)readBufferSize];
274 | long readPosition = readThreadPosition[threadNo];
275 | long partitionTotalSize;
276 | if (threadNo == threadNum - 1) {
277 | partitionTotalSize = totalSize;
278 | } else {
279 | partitionTotalSize = readThreadPosition[threadNo + 1];
280 | }
281 | long blockReadPosition = 0;
282 | while (readPosition < partitionTotalSize - 1) {
283 | long size = Math.min(readBufferSize, partitionTotalSize - readPosition);
284 | if (size < readBufferSize) {
285 | readBufferArray = new byte[(int)size];
286 | }
287 | fileChannel.read(byteBuffer, readPosition);
288 | byteBuffer.flip();
289 | byteBuffer.get(readBufferArray);
290 | byteBuffer.clear();
291 | long val = 0;
292 | for (int i = 0; i < size; i++) {
293 | if (readBufferArray[i] != '\n' && readBufferArray[i] != ',') {
294 | if (flag) {
295 | val = val * 10 + convertByte(readBufferArray[i]);
296 | } else {
297 | val = val * 10 + readBufferArray[i] - '0';
298 | }
299 |
300 | } else if (readBufferArray[i] == ',') {
301 | //orderRaceEngine.add(threadNo, val);
302 | val = 0;
303 | blockReadPosition = i + 1;
304 | } else if (readBufferArray[i] == '\n') {
305 | //partRaceEngine.add(threadNo, val);
306 | val = 0;
307 | blockReadPosition = i + 1;
308 | }
309 | }
310 | readPosition += blockReadPosition;
311 | }
312 | } catch (Exception e) {
313 | e.printStackTrace();
314 | }
315 | countDownLatch.countDown();
316 | System.out.println("thread cost " + (System.currentTimeMillis() - threadStart) + " ms");
317 | }).start();
318 | }
319 | countDownLatch.await();
320 | System.out.println(
321 | "threadNum " + threadNum + " read + analysis cost " + (System.currentTimeMillis() - start) + " ms");
322 | }
323 |
324 | private void testMmapWrite(String workDir, final int threadNum) throws Exception {
325 | long totalWriteNum = 6_0000_0000;
326 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
327 | long start = System.currentTimeMillis();
328 | for (int i = 0; i < threadNum; i++) {
329 | final long threadWriteNum = totalWriteNum / threadNum;
330 | final int threadNo = i;
331 | new Thread(() -> {
332 | try {
333 | File file = new File(workDir + File.separator + threadNum + "_" + threadNo);
334 | RandomAccessFile rw = new RandomAccessFile(file, "rw");
335 | FileChannel fileChannel = rw.getChannel();
336 | MappedByteBuffer mappedByteBuffer = fileChannel.map(MapMode.READ_WRITE, 0, 800 * 1024 * 1024);
337 | for (long val = 0; val < threadWriteNum; val++) {
338 | mappedByteBuffer.putLong(val);
339 | }
340 | countDownLatch.countDown();
341 | } catch (Exception e) {
342 | e.printStackTrace();
343 | }
344 | }).start();
345 | }
346 | countDownLatch.await();
347 | System.out.println(
348 | "threadNum " + threadNum + " write 6_0000_0000 cost " + (System.currentTimeMillis() - start) + " ms");
349 | }
350 |
351 | public int convertByte(byte b) {
352 | return byte2IntMap.get(b);
353 | }
354 |
355 | }
356 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/BucketFile.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import java.io.File;
4 | import java.io.IOException;
5 | import java.io.RandomAccessFile;
6 | import java.nio.ByteBuffer;
7 | import java.nio.channels.FileChannel;
8 | import java.util.concurrent.Future;
9 |
10 | import static com.aliyun.adb.contest.RaceAnalyticDB.WRITE_BUFFER_SIZE;
11 | import static com.aliyun.adb.contest.RaceAnalyticDB.executorService;
12 |
13 | /**
14 | * @author jingfeng.xjf
15 | * @date 2021-06-21
16 | */
17 | public class BucketFile {
18 |
19 | private FileChannel fileChannel;
20 | private ByteBuffer byteBuffer;
21 | private long writePosition;
22 | private int bufferIndex;
23 | private int bufferSize;
24 |
25 | public BucketFile(String fileName) {
26 | File file = new File(fileName);
27 | try {
28 | file.createNewFile();
29 | } catch (IOException e) {
30 | System.out.println(e.getMessage());
31 | }
32 | writePosition = 0;
33 | try {
34 | RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
35 | fileChannel = randomAccessFile.getChannel();
36 | byteBuffer = ByteBuffer.allocateDirect(WRITE_BUFFER_SIZE);
37 | bufferIndex = 0;
38 | bufferSize = WRITE_BUFFER_SIZE / 8;
39 | } catch (Exception e) {
40 | System.out.println(e.getMessage());
41 | }
42 | }
43 |
44 | public void add(long longVal) throws Exception {
45 | byteBuffer.putLong(longVal);
46 | bufferIndex++;
47 | if (bufferIndex != bufferSize) {
48 | // 分支预测
49 | } else {
50 | flush();
51 | }
52 | }
53 |
54 | public void flush() throws Exception {
55 | if (bufferIndex == 0) {
56 | return;
57 | }
58 | byteBuffer.flip();
59 | fileChannel.write(byteBuffer, writePosition);
60 | writePosition += bufferIndex * 8;
61 | byteBuffer.clear();
62 | bufferIndex = 0;
63 | }
64 |
65 | public int getDataNum() {
66 | return (int)(writePosition / 8) + bufferIndex;
67 | }
68 |
69 | public Future loadAsync(final long[][] nums, final int[] index) {
70 | Future future = executorService.submit(() -> {
71 | byteBuffer.clear();
72 | int readNo = (int)(writePosition / WRITE_BUFFER_SIZE) + (writePosition % WRITE_BUFFER_SIZE == 0 ? 0 : 1);
73 | long readPosition = 0;
74 | for (int i = 0; i < readNo; i++) {
75 | int readSize = 0;
76 | try {
77 | readSize = fileChannel.read(byteBuffer, readPosition);
78 | } catch (IOException e) {
79 | e.printStackTrace();
80 | }
81 | readPosition += WRITE_BUFFER_SIZE;
82 | byteBuffer.flip();
83 | for (int j = 0; j < readSize / 8; j++) {
84 | byteBuffer.position(j * 8);
85 | long longVal = byteBuffer.getLong();
86 | int p = (int)((longVal >> 54) & 0x07);
87 | nums[p][index[p]++] = longVal;
88 | }
89 | byteBuffer.clear();
90 | }
91 | return true;
92 | });
93 | return future;
94 | }
95 |
96 | public void load(long[] nums, int offset) throws Exception {
97 | byteBuffer.clear();
98 | int readNo = (int)(writePosition / WRITE_BUFFER_SIZE) + (writePosition % WRITE_BUFFER_SIZE == 0 ? 0 : 1);
99 | long readPosition = 0;
100 | int n = offset;
101 | for (int i = 0; i < readNo; i++) {
102 | int readSize = fileChannel.read(byteBuffer, readPosition);
103 | readPosition += WRITE_BUFFER_SIZE;
104 | byteBuffer.flip();
105 | for (int j = 0; j < readSize / 8; j++) {
106 | byteBuffer.position(j * 8);
107 | nums[n] = byteBuffer.getLong();
108 | n++;
109 | }
110 | byteBuffer.clear();
111 | }
112 | }
113 |
114 | }
115 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/DiskRaceEngine.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import java.io.File;
4 | import java.util.concurrent.Future;
5 |
6 | import static com.aliyun.adb.contest.RaceAnalyticDB.OFFSET;
7 | import static com.aliyun.adb.contest.RaceAnalyticDB.PARTITION;
8 | import static com.aliyun.adb.contest.RaceAnalyticDB.THREAD_NUM;
9 |
10 | /**
11 | * @author jingfeng.xjf
12 | * @date 2021-06-21
13 | */
14 | public class DiskRaceEngine {
15 |
16 | private String tableColumnName;
17 | public BucketFile[][] bucketFiles;
18 | private int threadNum;
19 | private String workDir;
20 | private String filePrefix;
21 |
22 | public DiskRaceEngine(String workDir, String filePrefix, int threadNum) {
23 | this.workDir = workDir;
24 | this.filePrefix = filePrefix;
25 | this.threadNum = threadNum;
26 | bucketFiles = new BucketFile[threadNum][PARTITION];
27 | }
28 |
29 | public void init(int threadNo) {
30 | for (int j = 0; j < PARTITION; j++) {
31 | bucketFiles[threadNo][j] = new BucketFile(workDir + File.separator + filePrefix + "_" + threadNo + "_" + j);
32 | }
33 | }
34 |
35 | public void add(int threadNo, long longVal) throws Exception {
36 | // 根据 key 选择到对应分区的文件
37 | BucketFile bucket = chooseBucketByKey(threadNo, longVal);
38 | bucket.add(longVal);
39 | }
40 |
41 | private BucketFile chooseBucketByKey(int threadNo, long longVal) {
42 | int partition = getPartition(longVal);
43 | return bucketFiles[threadNo][partition];
44 | }
45 |
46 | public static int getPartition(long values) {
47 | int offset = 64 - OFFSET;
48 | return (byte)(values >> offset);
49 | }
50 |
51 | public long quantile(double percentile) throws Exception {
52 | long ioStart = System.currentTimeMillis();
53 | //int totalNumer = getTotalNumer();
54 | int totalNumer = 3_0000_0000;
55 | int rank = (int)Math.round(totalNumer * percentile);
56 | // rank 代表第 n 个数,从 1 开始;index 代表数组的下标,从 0 开始
57 | int currentBucketBeginRank = 1;
58 | int currentBucketEndRank;
59 | int blockIndex = 0;
60 | int hitPartition = -1;
61 | for (int i = 0; i < PARTITION; i++) {
62 | int dataNum = 0;
63 | for (int j = 0; j < threadNum; j++) {
64 | dataNum += bucketFiles[j][i].getDataNum();
65 | }
66 | currentBucketEndRank = currentBucketBeginRank + dataNum;
67 | if (currentBucketBeginRank <= rank && rank < currentBucketEndRank) {
68 | blockIndex = rank - currentBucketBeginRank;
69 | hitPartition = i;
70 | break;
71 | }
72 | currentBucketBeginRank = currentBucketEndRank;
73 | }
74 |
75 | long[][][] nums = Util.sharedBuffers;
76 | int[][] sharedIndex = Util.sharedIndex;
77 | for (int i = 0; i < THREAD_NUM; i++) {
78 | for (int j = 0; j < 8; j++) {
79 | sharedIndex[i][j] = 0;
80 | }
81 | }
82 | Future[] futures = new Future[threadNum];
83 | for (int i = 0; i < threadNum; i++) {
84 | bucketFiles[i][hitPartition].flush();
85 | futures[i] = bucketFiles[i][hitPartition].loadAsync(nums[i], sharedIndex[i]);
86 | }
87 | for (Future future : futures) {
88 | try {
89 | future.get();
90 | } catch (Exception e) {
91 | // 数据不均匀会导致这里溢出
92 | return 0;
93 | }
94 | }
95 |
96 | int currentBlockBeginIndex = 0;
97 | int currentBlockEndIndex;
98 | int resultIndex = 0;
99 | int hitmBlock = -1;
100 | for (int i = 0; i < 8; i++) {
101 | int dataNum = 0;
102 | for (int j = 0; j < threadNum; j++) {
103 | dataNum += sharedIndex[j][i];
104 | }
105 | currentBlockEndIndex = currentBlockBeginIndex + dataNum;
106 | if (currentBlockBeginIndex <= blockIndex && blockIndex < currentBlockEndIndex) {
107 | resultIndex = blockIndex - currentBlockBeginIndex;
108 | hitmBlock = i;
109 | break;
110 | }
111 | currentBlockBeginIndex = currentBlockEndIndex;
112 | }
113 |
114 | System.out.println("io cost " + (System.currentTimeMillis() - ioStart) + " ms");
115 |
116 | long[] sharedBuffer = Util.sharedBuffer;
117 | int sharedBufferIndex = 0;
118 | for (int i = 0; i < threadNum; i++) {
119 | for (int j = 0; j < sharedIndex[i][hitmBlock]; j++) {
120 | sharedBuffer[sharedBufferIndex++] = nums[i][hitmBlock][j];
121 | }
122 | }
123 |
124 | //long copyStart = System.currentTimeMillis();
125 | //long[][][] pnums = Util.sharedBuffers;
126 | //int[] sharedIndex = Util.sharedIndex;
127 | //Arrays.fill(sharedIndex, 0);
128 | //for (int i = 0; i < partitionDataNum; i++) {
129 | // int p = (int)((nums[i] >> 54) & 0x07);
130 | // pnums[p][sharedIndex[p]++] = nums[i];
131 | //}
132 | //System.out.println("copy cost " + (System.currentTimeMillis() - copyStart) + " ms");
133 |
134 | //int[] a = new int[10];
135 | //Arrays.fill(a, 0);
136 | //for (int i = 0; i < partitionDataNum; i++) {
137 | // a[(int)((nums[i] >> 56) & 0x01)]++;
138 | //}
139 | //for (int i = 0; i < 2; i++) {
140 | // System.out.printf("%d ", a[i]);
141 | //}
142 | //System.out.println();
143 | //Arrays.fill(a, 0);
144 | //for (int i = 0; i < partitionDataNum; i++) {
145 | // a[(int)((nums[i] >> 55) & 0x03)]++;
146 | //}
147 | //for (int i = 0; i < 4; i++) {
148 | // System.out.printf("%d ", a[i]);
149 | //}
150 | //System.out.println();
151 | //Arrays.fill(a, 0);
152 | //for (int i = 0; i < partitionDataNum; i++) {
153 | // a[(int)((nums[i] >> 54) & 0x07)]++;
154 | //}
155 | //for (int i = 0; i < 8; i++) {
156 | // System.out.printf("%d ", a[i]);
157 | //}
158 | //System.out.println();
159 |
160 | long start = System.currentTimeMillis();
161 | long result = Util.quickSelect(sharedBuffer, 0, sharedBufferIndex - 1, sharedBufferIndex - resultIndex);
162 | System.out.println("sort cost " + (System.currentTimeMillis() - start) + " ms");
163 | return result;
164 | }
165 |
166 | private int partition(long a[], int i, int j) {
167 | long tmp = a[j];
168 | int index = i;
169 | if (i < j) {
170 | for (int k = i; k < j; k++) {
171 | if (a[k] >= tmp) {
172 | swap(a, index++, k);
173 | }
174 | }
175 | swap(a, index, j);
176 | return index;
177 | }
178 | return index;
179 | }
180 |
181 | private long search(long a[], int i, int j, int k) {
182 | int m = partition(a, i, j);
183 | if (k == m - i + 1) { return a[m]; } else if (k < m - i + 1) {
184 | return search(a, i, m - 1, k);
185 | }
186 | //后半段
187 | else {
188 | //核心后半段:再找第 k-(m-i+1)大的数就行
189 | return search(a, m + 1, j, k - (m - i + 1));
190 | }
191 | }
192 |
193 | //交换数组array中的下标为index1和index2的两个数组元素
194 | private void swap(long[] array, int index1, int index2) {
195 | long temp = array[index1];
196 | array[index1] = array[index2];
197 | array[index2] = temp;
198 | }
199 |
200 | private int getTotalNumer() {
201 | int sum = 0;
202 | for (int i = 0; i < threadNum; i++) {
203 | for (int j = 0; j < PARTITION; j++) {
204 | sum += bucketFiles[i][j].getDataNum();
205 | }
206 | }
207 | return sum;
208 | }
209 |
210 | public void flush(int threadNum) throws Exception {
211 | for (int j = 0; j < PARTITION; j++) {
212 | bucketFiles[threadNum][j].flush();
213 | }
214 | }
215 |
216 | public void flushPartition(int partition) throws Exception {
217 | for (int j = 0; j < 12; j++) {
218 | bucketFiles[j][partition].flush();
219 | }
220 | }
221 | }
222 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/RaceAnalyticDB.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import java.io.File;
4 | import java.io.RandomAccessFile;
5 | import java.nio.ByteBuffer;
6 | import java.nio.MappedByteBuffer;
7 | import java.nio.channels.FileChannel;
8 | import java.nio.channels.FileChannel.MapMode;
9 | import java.util.HashMap;
10 | import java.util.Map;
11 | import java.util.concurrent.CountDownLatch;
12 | import java.util.concurrent.ExecutorService;
13 | import java.util.concurrent.Executors;
14 | import java.util.concurrent.atomic.AtomicInteger;
15 |
16 | import com.aliyun.adb.contest.spi.AnalyticDB;
17 |
18 | public class RaceAnalyticDB implements AnalyticDB {
19 |
20 | private final Map columnName2EngineMap = new HashMap<>();
21 |
22 | public static final int THREAD_NUM = 12;
23 |
24 | public static final int PARTITION_OVER_PARTITION = 8;
25 |
26 | public static final int PARTITION = 64;
27 |
28 | public static final int OFFSET = 7;
29 |
30 | public static final int WRITE_BUFFER_SIZE = 1024 * 128;
31 |
32 | public static ExecutorService executorService = Executors.newFixedThreadPool(THREAD_NUM);
33 |
34 |
35 |
36 | /**
37 | * The implementation must contain a public no-argument constructor.
38 | */
39 | public RaceAnalyticDB() {
40 | }
41 |
42 | @Override
43 | public void load(String tpchDataFileDir, String workspaceDir) throws Exception {
44 | File dir = new File(tpchDataFileDir);
45 | for (File dataFile : dir.listFiles()) {
46 | if ("lineitem".equals(dataFile.getName())) {
47 | final int threadNum = THREAD_NUM;
48 |
49 | // 初始化引擎
50 | long createEngineStart = System.currentTimeMillis();
51 | String orderKey = tableColumnKey("lineitem", "L_ORDERKEY");
52 | DiskRaceEngine orderRaceEngine = new DiskRaceEngine(workspaceDir, orderKey, threadNum);
53 | columnName2EngineMap.put(orderKey, orderRaceEngine);
54 | String partKey = tableColumnKey("lineitem", "L_PARTKEY");
55 | DiskRaceEngine partRaceEngine = new DiskRaceEngine(workspaceDir, partKey, threadNum);
56 | columnName2EngineMap.put(partKey, partRaceEngine);
57 | System.out.println(
58 | "create engine cost " + (System.currentTimeMillis() - createEngineStart) + " ms");
59 |
60 | long start = System.currentTimeMillis();
61 | RandomAccessFile randomAccessFile = new RandomAccessFile(dataFile, "r");
62 | FileChannel fileChannel = randomAccessFile.getChannel();
63 | long totalSize = fileChannel.size();
64 | long[] readThreadPosition = new long[threadNum];
65 | // 设置对齐的 n 个片
66 | readThreadPosition[0] = 21;
67 | for (int i = 1; i < threadNum; i++) {
68 | long paddingPosition = totalSize / threadNum * i;
69 | MappedByteBuffer mappedByteBuffer = fileChannel.map(MapMode.READ_ONLY, paddingPosition, 100);
70 | for (int j = 0; j < 100; j++) {
71 | byte aByte = mappedByteBuffer.get();
72 | if (aByte == (byte)'\n') {
73 | paddingPosition += j + 1;
74 | break;
75 | }
76 | }
77 | readThreadPosition[i] = paddingPosition;
78 | }
79 |
80 | CountDownLatch countDownLatch = new CountDownLatch(threadNum);
81 | for (int k = 0; k < threadNum; k++) {
82 | final int threadNo = k;
83 | new Thread(() -> {
84 | //long threadStart = System.currentTimeMillis();
85 | orderRaceEngine.init(threadNo);
86 | partRaceEngine.init(threadNo);
87 | try {
88 | int readBufferSize = 1024 * 1024 * 2;
89 | ByteBuffer byteBuffer = ByteBuffer.allocate(readBufferSize);
90 | byte[] readBufferArray;
91 | long readPosition = readThreadPosition[threadNo];
92 | long partitionTotalSize;
93 | if (threadNo == threadNum - 1) {
94 | partitionTotalSize = totalSize;
95 | } else {
96 | partitionTotalSize = readThreadPosition[threadNo + 1];
97 | }
98 | while (readPosition < partitionTotalSize - 1) {
99 | int size = (int)Math.min(readBufferSize, partitionTotalSize - readPosition);
100 | byteBuffer.clear();
101 | fileChannel.read(byteBuffer, readPosition);
102 | readBufferArray = byteBuffer.array();
103 | long val = 0;
104 | while (size > 0) {
105 | if (readBufferArray[size - 1] == '\n') {
106 | break;
107 | }
108 | size--;
109 | }
110 | for (int i = 0; i < size; i++) {
111 | //if (readBufferArray[i] == '\n') {
112 | // partRaceEngine.add(threadNo, val);
113 | // val = 0;
114 | // blockReadPosition = i + 1;
115 | //} else if(readBufferArray[i] == ',') {
116 | // orderRaceEngine.add(threadNo, val);
117 | // val = 0;
118 | // blockReadPosition = i + 1;
119 | //} else {
120 | // val = val * 10 + (readBufferArray[i] - '0');
121 | //}
122 | byte temp = readBufferArray[i];
123 | do {
124 | val = val * 10 + (temp - '0');
125 | temp = readBufferArray[++i];
126 | } while (temp != ',');
127 | orderRaceEngine.add(threadNo, val);
128 | val = 0;
129 | // skip ,
130 | i++;
131 | temp = readBufferArray[i];
132 | do {
133 | val = val * 10 + (temp - '0');
134 | temp = readBufferArray[++i];
135 | } while (temp != '\n');
136 | partRaceEngine.add(threadNo, val);
137 | val = 0;
138 | // skip \n
139 | }
140 | readPosition += size;
141 | }
142 |
143 | orderRaceEngine.flush(threadNo);
144 | partRaceEngine.flush(threadNo);
145 |
146 | } catch (Exception e) {
147 | e.printStackTrace();
148 | }
149 | countDownLatch.countDown();
150 | //System.out.println("thread cost " + (System.currentTimeMillis() - threadStart) + " ms");
151 | }).start();
152 | }
153 |
154 | countDownLatch.await();
155 | //System.out.println("read + analysis cost " + (System.currentTimeMillis() - start) + " ms");
156 |
157 | //for (int j = 0; j < 128; j++) {
158 | // long dataNum = 0;
159 | // for (int i = 0; i < threadNum; i++) {
160 | // dataNum += orderRaceEngine.bucketFiles[i][j].getDataNum();
161 | // }
162 | // System.out.println("partition " + j + " has " + dataNum + " nums");
163 | //}
164 | //
165 | //for (int i = 0; i < threadNum; i++) {
166 | // for (int j = 0; j < 128; j++) {
167 | // System.out.println(" partRaceEngine | thread " + i + " partition " + j + " nums " +
168 | // partRaceEngine.bucketFiles[i][j].getDataNum());
169 | // }
170 | //}
171 |
172 | }
173 | }
174 | }
175 |
176 | //AtomicInteger atomicInteger = new AtomicInteger();
177 |
178 | @Override
179 | public String quantile(String table, String column, double percentile) throws Exception {
180 | //long start = System.currentTimeMillis();
181 | DiskRaceEngine diskRaceEngine = columnName2EngineMap.get(tableColumnKey(table, column));
182 | long ans = diskRaceEngine.quantile(percentile);
183 | //long cost = System.currentTimeMillis() - start;
184 | //System.out.println(
185 | // "Query:" + table + ", " + column + ", " + percentile + " Answer:" + ans + ", Cost " + cost + " ms");
186 | //if (atomicInteger.incrementAndGet() == 9) {
187 | // return "12345";
188 | //}
189 | return ans + "";
190 | }
191 |
192 | private String tableColumnKey(String table, String column) {
193 | return (table + "." + column).toLowerCase();
194 | }
195 |
196 | }
197 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/SimpleAnalyticDB.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import com.aliyun.adb.contest.spi.AnalyticDB;
4 |
5 | import java.io.*;
6 | import java.util.ArrayList;
7 | import java.util.HashMap;
8 | import java.util.List;
9 | import java.util.Map;
10 |
11 | public class SimpleAnalyticDB implements AnalyticDB {
12 |
13 | private final Map> data = new HashMap>();
14 |
15 | /**
16 | *
17 | * The implementation must contain a public no-argument constructor.
18 | *
19 | */
20 | public SimpleAnalyticDB() {
21 | }
22 |
23 | @Override
24 | public void load(String tpchDataFileDir, String workspaceDir) throws Exception {
25 | File dir = new File(tpchDataFileDir);
26 |
27 | for (File dataFile : dir.listFiles()) {
28 | System.out.println("Start loading table " + dataFile.getName());
29 |
30 | // You can write data to workspaceDir
31 | File yourDataFile = new File(workspaceDir, dataFile.getName());
32 | yourDataFile.createNewFile();
33 |
34 | loadInMemroy(dataFile);
35 | }
36 |
37 | }
38 |
39 | @Override
40 | public String quantile(String table, String column, double percentile) throws Exception {
41 |
42 | List values = data.get(tableColumnKey(table, column));
43 |
44 | if (values == null) {
45 | throw new IllegalArgumentException();
46 | }
47 |
48 | int rank = (int) Math.round(values.size() * percentile);
49 | String ans = values.get(rank-1).toString();
50 |
51 | System.out.println("Query:" + table + ", " + column + ", " + percentile + " Answer:" + rank + ", " + ans);
52 |
53 | return ans;
54 | }
55 |
56 | private void loadInMemroy(File dataFile) throws IOException {
57 | BufferedReader reader = new BufferedReader(new FileReader(dataFile));
58 | String table = dataFile.getName();
59 | String[] columns = reader.readLine().split(",");
60 |
61 | for (String column : columns) {
62 | data.put(tableColumnKey(table, column), new ArrayList());
63 | }
64 |
65 | String rawRow;
66 | while ((rawRow = reader.readLine()) != null) {
67 | String[] row = rawRow.split(",");
68 |
69 | for (int i = 0; i < columns.length; i++) {
70 | data.get(tableColumnKey(table, columns[i])).add(Long.parseLong(row[i]));
71 | }
72 | }
73 |
74 | data.forEach((tableColumn, values) -> {
75 | values.sort(Long::compareTo);
76 | System.out.println("Finish loading column " + tableColumn);
77 | });
78 |
79 | }
80 |
81 | private String tableColumnKey(String table, String column) {
82 | return (table + "." + column).toLowerCase();
83 | }
84 |
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/Util.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import static com.aliyun.adb.contest.RaceAnalyticDB.PARTITION_OVER_PARTITION;
4 | import static com.aliyun.adb.contest.RaceAnalyticDB.THREAD_NUM;
5 |
6 | /**
7 | * @author jingfeng.xjf
8 | * @date 2021-06-28
9 | */
10 | public class Util {
11 |
12 | //public static long[] sharedBuffer = new long[498_3700];
13 | //public static long[] sharedBuffer = new long[260_0000];
14 | //public static long[] sharedBuffer = new long[260_0000];
15 |
16 | public static long[] sharedBuffer = new long[60_0000];
17 | public static long[][][] sharedBuffers = new long[THREAD_NUM][PARTITION_OVER_PARTITION][5_0000];
18 | public static int[][] sharedIndex = new int[THREAD_NUM][PARTITION_OVER_PARTITION];
19 |
20 | public static long quickSelect(long[] nums, int start, int end, int k) {
21 | if (start == end) {
22 | return nums[start];
23 | }
24 | int left = start;
25 | int right = end;
26 | long pivot = nums[(start + end) / 2];
27 | while (left <= right) {
28 | while (left <= right && nums[left] > pivot) {
29 | left++;
30 | }
31 | while (left <= right && nums[right] < pivot) {
32 | right--;
33 | }
34 | if (left <= right) {
35 | long temp = nums[left];
36 | nums[left] = nums[right];
37 | nums[right] = temp;
38 | left++;
39 | right--;
40 | }
41 | }
42 | if (start + k - 1 <= right) {
43 | return quickSelect(nums, start, right, k);
44 | }
45 | if (start + k - 1 >= left) {
46 | return quickSelect(nums, left, end, k - (left - start));
47 | }
48 | return nums[right + 1];
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/java/com/aliyun/adb/contest/spi/AnalyticDB.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest.spi;
2 |
3 | /**
4 | *
5 | * DO NOT modify this file.
6 | *
7 | */
8 | public interface AnalyticDB {
9 |
10 | /**
11 | *
12 | * Load test data.
13 | *
14 | * @param tpchDataFileDir A read-only directory
15 | * @param workspaceDir A directory for players to use
16 | */
17 | void load(String tpchDataFileDir, String workspaceDir) throws Exception;
18 |
19 |
20 | /**
21 | *
22 | * Quantile definition:
23 | *
24 | * Consider a table T which contains a column C in it. C contains five values {1, 2, 3, 4, 5}.
25 | * quantile(T, C, 0.2) = values[5*0.2-1] = values[0] = 1
26 | *
27 | * We make sure that the result (percentile*values.length) is an integer.
28 | *
29 | * @param table TPC-H table
30 | * @param column A column that belong to this table.
31 | * @param percentile [0, 1.0], such as 0.25, 0.125
32 | * @return A string representing the answer
33 | */
34 | String quantile(String table, String column, double percentile) throws Exception;
35 |
36 | }
37 |
--------------------------------------------------------------------------------
/src/main/resources/META-INF/services/com.aliyun.adb.contest.spi.AnalyticDB:
--------------------------------------------------------------------------------
1 | com.aliyun.adb.contest.RaceAnalyticDB
--------------------------------------------------------------------------------
/src/test/java/com/aliyun/adb/contest/TestSimpleAnalyticDB.java:
--------------------------------------------------------------------------------
1 | package com.aliyun.adb.contest;
2 |
3 | import com.aliyun.adb.contest.spi.AnalyticDB;
4 | import org.junit.Assert;
5 | import org.junit.Test;
6 |
7 | import java.io.BufferedReader;
8 | import java.io.File;
9 | import java.io.FileReader;
10 |
11 | public class TestSimpleAnalyticDB {
12 |
13 | @Test
14 | public void testCorrectness() throws Exception {
15 | File testDataDir = new File("./test_data");
16 | File testWorkspaceDir = new File("./target");
17 | File testResultsFile = new File("./test_result/results");
18 | AnalyticDB analyticDB = new RaceAnalyticDB();
19 |
20 | // Step #1: load data
21 | analyticDB.load(testDataDir.getAbsolutePath(), testWorkspaceDir.getAbsolutePath());
22 |
23 | // Step #2: test quantile function
24 | try (BufferedReader resReader = new BufferedReader(new FileReader(testResultsFile))) {
25 | String line;
26 |
27 | while ((line = resReader.readLine()) != null) {
28 | String resultStr[] = line.split(" ");
29 | String table = resultStr[0];
30 | String column = resultStr[1];
31 | double percentile = Double.valueOf(resultStr[2]);
32 | String answer = resultStr[3];
33 |
34 | Assert.assertEquals(answer, analyticDB.quantile(table, column, percentile));
35 | }
36 | }
37 | }
38 |
39 | }
40 |
--------------------------------------------------------------------------------
/test_result/results:
--------------------------------------------------------------------------------
1 | lineitem L_ORDERKEY 0.15 1389661994001906474
2 | lineitem L_ORDERKEY 0.16 1470939429013645136
3 | lineitem L_ORDERKEY 0.17 1564063400646807391
4 | lineitem L_ORDERKEY 0.18 1644410611553175244
5 | lineitem L_ORDERKEY 0.26 2402495117539867507
6 | lineitem L_PARTKEY 0.89 8224850914152207186
7 | lineitem L_PARTKEY 0.90 8307213892786221587
8 | lineitem L_PARTKEY 0.91 8396016236643654337
9 | lineitem L_PARTKEY 0.92 8484641999680979649
10 | lineitem L_PARTKEY 0.93 8580841529337283852
11 |
--------------------------------------------------------------------------------