├── .gitignore
├── LICENSE.md
├── README.md
├── dfx.json
├── downloading_vessel.md
├── images
├── Binary_search_tree.svg
├── Singly-linked-list.svg
├── array.png
└── hash-table.svg
├── module-1.md
├── module-2.md
├── module-3.md
├── module-4.md
├── package-set.dhall
├── src
├── BigMapBloomFilter
│ ├── BigMapBloomFilter.mo
│ ├── Main.mo
│ └── Utils.mo
├── BinarySearchTree
│ ├── BST.mo
│ ├── Main.mo
│ └── Types.mo
└── BloomFilter
│ ├── BloomFilter.mo
│ └── Main.mo
├── vendor
├── ATTRIBUTION.md
└── motoko-bigmap
│ ├── LICENSE
│ ├── NOTICE
│ ├── app
│ ├── Main.mo
│ └── SegCan.mo
│ ├── dfx.json
│ ├── src
│ ├── BigMap.mo
│ ├── BigTest
│ │ ├── Batch.mo
│ │ ├── Call.mo
│ │ ├── Eval.mo
│ │ ├── README.md
│ │ └── Types.mo
│ ├── DebugOff.mo
│ ├── Order.mo
│ ├── SegKey.mo
│ ├── Segment.mo
│ └── Types.mo
│ └── test
│ ├── BigTestPutGet.mo
│ └── PutGet.mo
└── vessel.dhall
/.gitignore:
--------------------------------------------------------------------------------
1 | # Various IDEs and Editors
2 | .vscode/
3 | .idea/
4 | **/*~
5 |
6 | # Mac OSX temporary files
7 | .DS_Store
8 | **/.DS_Store
9 |
10 | # dfx temporary files
11 | .dfx/
12 |
13 | # vessel temporary files
14 | .vessel/
15 |
16 | # frontend code
17 | node_modules/
18 | dist/
19 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-NonCommercial 4.0 International Public License
2 |
3 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
4 |
5 | Section 1 – Definitions.
6 |
7 | (a) Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
8 | (b) Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
9 | (c) Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
10 | (d) Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
11 | (e) Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
12 | (f) Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
13 | (g) Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
14 | (h) Licensor means the individual(s) or entity(ies) granting rights under this Public License.
15 | (i) NonCommercial means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
16 | (j) Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
17 | (k) Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
18 | (l) You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
19 |
20 | Section 2 – Scope.
21 |
22 | (a) License grant.
23 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
24 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and
25 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only.
26 | 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
27 | 3. Term. The term of this Public License is specified in Section 6(a).
28 | 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
29 | 5. Downstream recipients.
30 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
31 | B. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
32 | (b) Other rights.
33 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
34 | 2. Patent and trademark rights are not licensed under this Public License.
35 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.
36 |
37 | Section 3 – License Conditions.
38 |
39 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
40 | (a) Attribution.
41 | 1. If You Share the Licensed Material (including in modified form), You must:
42 | A. retain the following if it is supplied by the Licensor with the Licensed Material:
43 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
44 | ii. a copyright notice;
45 | iii. a notice that refers to this Public License;
46 | iv. a notice that refers to the disclaimer of warranties;
47 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
48 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
49 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
50 | 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License.
51 |
52 | Section 4 – Sui Generis Database Rights.
53 |
54 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
55 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only;
56 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
57 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
58 |
59 | Section 5 – Disclaimer of Warranties and Limitation of Liability.
60 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
61 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
62 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
63 |
64 | Section 6 – Term and Termination.
65 |
66 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
67 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
68 | 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
69 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
70 |
71 | Section 7 – Other Terms and Conditions.
72 |
73 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
74 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
75 |
76 | Section 8 – Interpretation.
77 |
78 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
79 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
80 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
81 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
82 |
83 | END OF TERMS AND CONDITIONS
84 |
85 | Copyright (c) 2020 DFINITY Stiftung, ALL RIGHTS RESERVED
86 |
87 | Creative Commons Attribution-NonCommercial 4.0 International Public License; you may not use this file except in compliance with the Public License. You may obtain a copy of the Public License at:
88 |
89 | https://creativecommons.org/licenses/by-nc/4.0/legalcode
90 |
91 | Unless required by applicable law or agreed to in writing, content distributed under this Public License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Public License for the specific language governing permissions and limitations under the Public License.
92 |
93 | END OF APPENDIX
94 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Table of Contents
2 | - [Module 1: Data Structures on the Internet Computer](module-1.md)
3 | - [Module 2: Object-Oriented Data Structure: Bloom Filters](module-2.md)
4 | - [Module 3: Pure Data Structures: Binary Search Trees](module-3.md)
5 | - [Module 4: Scaling Data Structures with `BigMap`](module-4.md)
6 |
7 | # About
8 | In this course, students will learn about the foundational elements of data structures as well as how they work on the Internet Computer. Module 1 serves as an intro to this unit, providing context for data structure design and the tradeoffs to consider when weighing which to use in certain projects. Module 2 and 3 explore the distinction between Motoko's differing styles for writing data structures: object-oriented (collection of functions that mutate state), and pure (no methods, no mutations). Module 4 builds on the data structures explored so far. Students will get hands-on experience via the IC's `BigMap` library, learning to scale their data structures beyond a single canister in the process.
9 |
10 | # Content
11 | The provided content was developed in collaboration with the following students:
12 |
13 | - [Nick Zoghb](https://www.linkedin.com/in/nickzoghb/), a graduate of University of California, Berkeley (2018) with a focus on Computer Science and Bioengineering
14 | - [Connor Solimano](https://www.linkedin.com/in/connor-solimano/), a student at Harvard College (2022) pursuing a degree in Computer Science and Economics
15 |
--------------------------------------------------------------------------------
/dfx.json:
--------------------------------------------------------------------------------
1 | {
2 | "dfx": "0.7.2",
3 | "canisters": {
4 | "BigMap": {
5 | "main": "vendor/motoko-bigmap/app/Main.mo",
6 | "type": "motoko"
7 | },
8 | "BigMapBloomFilter": {
9 | "main": "src/BigMapBloomFilter/Main.mo",
10 | "type": "motoko"
11 | },
12 | "BloomFilter": {
13 | "main": "src/BloomFilter/Main.mo",
14 | "type": "motoko"
15 | },
16 | "BST": {
17 | "main": "src/BinarySearchTree/Main.mo",
18 | "type": "motoko"
19 | }
20 | },
21 | "defaults": {
22 | "build": {
23 | "packtool": "vessel sources"
24 | }
25 | }
26 | }
--------------------------------------------------------------------------------
/downloading_vessel.md:
--------------------------------------------------------------------------------
1 |
2 | # Part A
3 | 1. Go to https://github.com/dfinity/vessel
4 | 2. Click on "Releases" on the right hand side.
5 | 3. Download the version appropriate for your operating system.
6 |
7 | # Part B (MacOS)
8 | 4. Once the file is in your directory, right click on the file and select "open". Opening ensures your OS that this file is safe to use.
9 | 5. Next, you want to put the file into your path.
10 | 6. To do so, open your terminal and type the following:
11 | ```
12 | echo $PATH
13 | ```
14 | 7. From there, depending on the file you downloaded and where it was saved locally, you want to move that file and rename it to `vessel`, like so:
15 | ```
16 | mv Downloads/vessel-macos /usr/local/bin/vessel
17 | ```
18 | 8. Note, if "permission is denied", execute the following command:
19 | ```
20 | sudo mv Downloads/vessel-macos /usr/local/bin/vessel
21 | ```
22 | 9. Make vessel an executable by running the following command:
23 | ```
24 | chmod +x /usr/local/bin/vessel
25 | ```
26 |
--------------------------------------------------------------------------------
/images/Binary_search_tree.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
162 |
164 |
165 |
166 |
167 |
169 | Adobe PDF library 5.00
170 |
171 |
172 |
173 |
174 |
175 |
177 | 2004-01-23T20:04:24-04:00
178 | 2005-12-31T21:30:20Z
179 | Adobe Illustrator 10.0
180 | 2004-01-23T20:25:01-05:00
181 |
182 |
183 | image/svg+xml
186 |
187 |
188 |
189 | 8
212 | 3
235 | 10
258 | 1
281 | 6
304 | 14
327 | 4
350 | 7
373 | 13
396 |
--------------------------------------------------------------------------------
/images/Singly-linked-list.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
215 |
--------------------------------------------------------------------------------
/images/array.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DFINITY-Education/data-structures/57c4aaf711c3b2df69259add09adcfd41ee658e2/images/array.png
--------------------------------------------------------------------------------
/images/hash-table.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
258 |
--------------------------------------------------------------------------------
/module-1.md:
--------------------------------------------------------------------------------
1 | # Module 1: Data Structures on the Internet Computer
2 |
3 | ## Outline
4 |
5 | 1. [Tradeoffs in data structure design](#tradeoffs-in-traditional-data-structures)
6 |
7 | * [Linked List vs Array](#linked-list-vs-array)
8 | * [Common Data Structures](#common-data-structures)
9 |
10 | 2. [Data structures in Motoko](#data-structures-in-motoko)
11 |
12 | * [Pure vs Object-Oriented](#pure-vs-object-oriented)
13 | * [Migration and Upgrading Canisters](#Migration-and-Upgrading-Canisters)
14 | * [Bigmap](#bigmap)
15 |
16 | ## Tradeoffs in Traditional Data Structures
17 |
18 | Data structures enable efficient data storage and come in a wide variety of forms. The optimal data structure for a given task depends on the situation and program priorities; understanding the various tradeoffs inherent in common data structures is essential to using them effectively.
19 |
20 | One common tradeoff exhibited in many data structures is that between **memory** and **lookup speed**. Often, data structures that take up more memory allow for greater lookup speed and vice versa.
21 |
22 | ### Linked List vs Array
23 |
24 | To better understand the aforementioned tradeoff between memory and lookup time, consider the characteristics of **arrays** compared to **linked lists**. An **array** is a fixed-length data structure with constant [O(1)] lookup time. Once an array of a specified size is created, you can't increase its capacity without creating an entirely new array. As a result, the tradeoff for this superb lookup time is reduced flexibility (making arrays poor for dynamically growing/shrinking databases) and increased memory required (you may need to allocate more space than needed to memory if unsure of the exact size required).
25 |
26 |
29 |
30 | Conversely, a **linked list** is a dynamically-sized data structure consisting of many nodes, each of which contains a specified data value and a pointer to the next node in the list. If you want to add a new element to the list, you just add a new pointer to the structure. The tradeoff for this increased flexibility, however, is that the lookup time is **O(n)**. This is because if you want to find the data value at the last node in the list, you must first traverse the entire linked list. The head (first node) of the linked list is typically the only location that's directly stored; all other node locations are stored in the preceding node.
31 |
32 | Tradeoffs may also exist in the insertion or deletion time of a given data structure. These times depend on the form of insertion/deletion that takes place. For example, inserting at the beginning of a linked list is **O(1)**, whereas inserting an element at the end of the linked list is **O(n)**.
33 |
34 |
37 |
38 | ### Common Data Structures
39 |
40 | #### Hash Table
41 |
42 | A **hash table** is a data structure consisting of an array and corresponding linked lists. Data is stored in key-value pairs, where the key corresponds to a particular index in the array. In the example shown below, consider a hash table intended to store the phone number (the value) for a given person (the key). We must use a **hash function** to find the particular index that a person's phone number is stored in. This hash function takes in the person's name and outputs an integer (from 0 to the array size) corresponding with the indices of the array.
43 |
44 | This, however, creates a problem: what happens when two distinct names result in the same index? This is bound to happen when the number of data values exceeds the size of the fixed array. Such an event, called a **collision**, is resolved by forming a linked list at that array index. In the diagram below, "Sandra Dee" hashes to the same index, 152, as "John Smith." As a result, the "John Smith" result just points to the location of "Sandra Dee" in the linked list, which can then be traversed to find the stored value for "John Smith".
45 |
46 |
47 |
Hashing names to indices in a hash table. Source: Hash Table Wiki
48 |
49 |
50 | Hash tables are especially useful because they offer both dynamically-sized data storage while also maintaining fast lookup times. In this case, the essential tradeoff is between array size (memory) and lookup speed. A smaller array requires less up-front memory to be allocated, but it results in a greater number of collisions. As a result, one must traverse each linked list for a given index to find the desired value. Larger arrays result in fewer collisions, allowing for near-constant lookup times, but require more up-front memory.
51 |
52 | #### Binary Search Tree
53 |
54 | A Binary search tree is a data structure in which each node points to two other nodes. These values are organized such that every node to the left of a given node is a smaller value, while every node to the right is a larger value. This allows for **O(log n)** search and insertion time as one traverses each "level" of the tree. Binary search trees provide quicker search than linked lists but do not preserve the insertion order of elements.
55 |
56 |
57 |
Left-aligned minimum binary search tree. Source: BST Wiki
58 |
59 |
60 | ## Data Structures in Motoko
61 |
62 | ### Pure vs Object-Oriented
63 |
64 | Data structures in Motoko are implemented using either pure or object-oriented module styles. Pure data structures use the functional programming features of Motoko and are characterized by their lack of mutable variables. Pure data structures are particularly useful when sending messages across mutable canisters because they are pure data - there is no mutation of state associated with them.
65 |
66 | Motoko's [List](https://sdk.dfinity.org/docs/base-libraries/list) type is one such pure data structure. It contains no methods (unique to object-oriented modules) and defined quite simply as:
67 |
68 | ```
69 | // A singly-linked list consists of zero or more _cons cells_, wherein
70 | // each cell contains a single list element (the cell's _head_), and a pointer to the
71 | // remainder of the list (the cell's _tail_).
72 | public type List = ?(T, List);
73 | ```
74 |
75 | Other common examples of pure data structures in Motoko include [AssocList, Trie, Result, and Option](https://sdk.dfinity.org/docs/base-libraries/stdlib-intro.html).
76 |
77 | There are also several data structures in Motoko that take advantage of object-oriented features, meaning that they associate data and code whose variables contain mutable state.
78 |
79 | [HashMap](https://sdk.dfinity.org/docs/base-libraries/hashmap) is a great example of an object-oriented data structure in Motoko, but its definition (which includes methods) cannot be written as simply as that of `List`. Other examples of object-oriented data structures in Motoko include [Buffer, TrieMap, and RBTree](https://sdk.dfinity.org/docs/base-libraries/stdlib-intro.html).
80 |
81 | ```
82 | class HashMap(initCapacity : Nat, keyEq : (K, K) -> Bool, keyHash : K -> Hash.Hash)
83 | ```
84 |
85 | ### Migration and Upgrading Canisters
86 |
87 | "Upgrading" is a term used to describe how a canister is updated with new code after being deployed on the Internet Computer. As there may be canisters that depend on another canister's specific implementation (e.g. one canister queries a separate database canister), developers on the IC must be careful about which aspects they change in each upgrade, lest they break the functionality of other dependent canisters.
88 |
89 | While data structures implemented in the object-oriented style are supported within canisters, they shouldn't necessarily be used to send data across canisters or to persist data in canisters that will be upgraded in the future. [Stable](https://sdk.dfinity.org/docs/language-guide/actors-async.html#_stable_and_flexible_variables) data stored within canisters must be first order (like an `Int` or `Text`), meaning that it cannot contain objects.
90 |
91 | ### [**BigMap**](https://www.youtube.com/watch?v=VcsIb37I2fM)
92 |
93 | Canisters on the Internet Computer have a maximum storage capacity of about 4 GB. However, large applications require significantly larger storage capabilities that necessarily need to span multiple canisters for adequate storage. That's where BigMap comes in. BigMap helps developers scale data across multiple canisters, allowing them to more easily store petabytes of data on the IC.
94 |
95 | In essence, BigMap is similar to a distributed hash table, where chunks of data are segmented across multiple canisters using key value pairs, which then is used for data retrieval when needed. Big Map uses two main kinds of canisters: an Index Canister and a number of Data Bucket canisters.
96 |
97 | The Index Canister maps the keys for particular pieces of data to canister ids that indicate which Data Bucket canisters hold the corresponding value. Big Map stores the actual data in Data Bucket canisters, the number of which can be scaled up or down depending on the amount of storage required. These Data Buckets also use a key-value system (HashMap) whereby the key for a particular data value is hashed using the SHA256 algorithm to then locate the corresponding data stored as a value.
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
--------------------------------------------------------------------------------
/module-2.md:
--------------------------------------------------------------------------------
1 | # Module 2: Object-Oriented Data Structure: Bloom Filters
2 |
3 | In this Module, you will implement a bloom filter that allows users to determine if an item is present in a given set.
4 |
5 | ## Background
6 |
7 | A **Bloom filter** is a probabilistic data structure designed to indicate, with high efficiency and low memory, if an element is contained in a set. It's **probabilistic** because although it can tell you with certainty that an element is not in the data structure, it can only tell you that an element *may be* in contained the structure. In other words, false negative results (indicating the element doesn't exist in the set when it actually does) won't occur, but false positive results (indicating the element exists when it doesn't) are possible.
8 |
9 | Such a data structure is especially useful in instances where we care more about ensuring that an element is definitely not in a set. For instance, when registering a new username, many services aim to quickly indicate whether a given name is already taken. The cost of a false positive - indicating that a username is already taken when it is actually available - isn't high, so this tradeoff for increased efficiency is worthwhile.
10 |
11 | Bloom filters use a **bitmap** as the base data structure. A bitmap is simply an array where each index contains either a 0 or a 1. The filter takes in the value that's being entered into the data structure, hashes it to multiple indices (ranging from 0 to the length - 1 of the bitmap) using several different hash functions, and stores a 1 at that particular index. The beauty of a bloom filter - and the aspect that makes it so space-efficient - is the fact that we don't need to actually store the given element in our set. We simply hash the element, go to the location in our bitmap that it hashes to, and insert a 1 into that spot (or multiple spots if using multiple hash functions).
12 |
13 | **Example bitmap with values initialized to 0:**
14 |
20 |
21 | To test for membership in the set, the program hashes the value being searched using the same aforementioned hash functions. If the resulting values are not in the bitmap, then you know that the element is *not* in the set. If the values are in the bitmap, then all you can conclude is that the element *might be* in the set. You cannot determine if the item exists with certainty because there could be other combinations of different hashed values that overlap with the same bits. Naturally, as you enter more elements into the data structure, the bitmap fills up and the probability of producing a false positive increases. [This interactive site](https://llimllib.github.io/bloomfilter-tutorial/) provides a great visual explanation of the mechanics behind bloom filters.
22 |
23 | ## Your Task
24 |
25 | In this exercise, you will implement a Bloom filter like the one described above. This data structure, built using object-oriented principles, will enable you to add elements to the structure and check if they are contained in the filter.
26 |
27 | ### Code Understanding
28 |
29 | #### `BloomFilter.mo`
30 |
31 | Let's begin by taking a look at `BloomFilter.mo`. You'll see two classes: `AutoScalingBloomFilter` and `BloomFilter`. We mentioned previously how the false-positive rate in Bloom filters increases as you store more elements in them. To enable our Bloom filter to maintain a consistent error rate, we will store a list of Bloom filters, where a new Bloom filter is created when an old filter reaches its maximum tolerated false-positive rate. `AutoScalingBloomFilter` manages our list of Bloom filters, deploys new Bloom filters, and searches through the list of filters when a user checks for membership. `BloomFilter` is the class used to create each individual Bloom filter - this is the part that you'll be finishing the implementation for.
32 |
33 | The `AutoScalingBloomFilter` class accepts three parameters: a `capacity`, an `errorRate`, and `hashFuncs`. The `capacity` represents the number of elements you want the bitmap to store and the `errorRate` is the max false-positive rate the Bloom filter should allow. `hashFuncs` is a list of the hash functions used for all Bloom filters.
34 |
35 | The first several lines of the `AutoScalingBloomFilter` class set up the optimal number (and size of) the slots in `BloomFilter`'s `bitMap`. `filters` maintains a list of all the Bloom filters, and `hashFuncs` contains all of the hash functions that will be used (the number of which also influences the false-positive error rate).
36 |
37 | The `add` function adds an `item` to our data structure; this is where the auto-scaling occurs. If a given `BloomFilter` has reached its capacity, a new one is created with the `item` and appended to the list. Notice that we call `BloomFilter`'s own `add` function to add an `item` to that specific `BloomFilter`.
38 |
39 | `check` runs through all of the Bloom filters in `filters` and calls each of their `check` class methods.
40 |
41 | The `BloomFilter` class accepts two parameters: `bitMapSize` and `hashFuncs`. The `bitMapSize` is the size of our bitmap, as determined by the math in `AutoScalingBloomFilter`. You can see it used in creating our `bitMap`, which is implemented as an `Array` of booleans initialized to `false`. `BloomFilter` has no notion of `capacity` or `errorRate` - the `AutoScalingBloomFilter` class is responsible for managing those factors.
42 |
43 | The `BloomFilter` takes advantage of **generic types**, represented by `S`, which allows the BloomFilter implementation to remain type agnostic. More specifically, this means that the `BloomFilter` and its related methods don't care if the items entered into it are of type `Text`, `Nat`, `Int`, etc. When you instantiate a new `BloomFilter` object, you specify a type that the `BloomFilter` will handle. All subsequent items must be of this same type.
44 |
45 | #### `Main.mo`
46 |
47 | `Main.mo` just sets up a `BloomFilter` and provides two functions, `bfAdd` and `bfCheck`, that you can use to test your implementation "on the go" using the command-line interface. Notice that these methods provide a specific type, `Nat`, that this specific instantiation of the `BloomFilter` will use.
48 |
49 | ### Specification
50 |
51 | **Task:** Complete the implementation of the `add` and `check ` methods in `BloomFilter.mo`.
52 |
53 | **`add`** simply adds an element to the `bitMap`
54 |
55 | * `add` takes one argument, `item`, representing the item to be added to the Bloom Filter, and returns nothing.
56 | * Remember that there will likely be more than one hash function stored in `hashFuncs`. You must apply each function in `hashFuncs` to the `item`, updating the boolean at the corresponding index of `bitMap` accordingly.
57 |
58 | **`check`** determines if the element is in the `bitMap`
59 |
60 | * `check` also takes one argument, `item`, representing the item to be checked for presence in the Bloom Filter, and returns `true` if it is contained and `false` otherwise. Remember that a `true` result isn't definitive - there is a chance of returning a false positive.
61 | * You must again apply each function in `hashFuncs` to `item`. `check` returns `true` if and only if every of the resulting hashes indicate the existence of `item` in the Bloom filter.
62 |
63 | *Hint:* Both `add` and `check` only require 3-4 lines of code to implement fully - most of this is practice understanding the inner workings of a Bloom filter and Motoko syntax.
64 |
65 | ### Testing
66 | Note, you may need to download the *package vessel* to your local machine. Instructions to do so can be found here: https://github.com/DFINITY-Education/data-structures/blob/master/downloading_vessel.md
67 |
68 | As you progress through your implementation of the Bloom filter, you can periodically self-test your work using the command line interface (CLI) after you've built and deployed the corresponding canisters.
69 |
70 | Inputting variant types into the CLI can be a bit unintuitive at first, so here is a quick guide to doing so. Imagine you have the following variant type:
71 |
72 | ```
73 | type Custom = {
74 | #first;
75 | #second;
76 | #third;
77 | }
78 | ```
79 |
80 | and a method:
81 |
82 | ```
83 | // canister:main
84 | actor {
85 | func Foo(arg1: Custom) {...};
86 | }
87 | ```
88 |
89 | This is how you call it via the CLI:
90 |
91 | ```
92 | dfx canister call main Foo '(variant { first })'
93 | ```
94 |
95 | Using this method, you should be able to run some basic tests on your implementation to aid in debugging.
96 |
--------------------------------------------------------------------------------
/module-3.md:
--------------------------------------------------------------------------------
1 | # Module 3: Pure Data Structures: Binary Search Trees
2 |
3 | In this module, you will implement a binary search tree, a pure data structure that allows you to efficiently store and search sortable items.
4 |
5 | ## Background
6 |
7 | We briefly discussed **Binary Search Trees** (BST) in [Module 1](/module-1.md#binary-search-tree), which should provide a helpful (and necessary) foundation for this Module. Please re-read that section before continuing.
8 |
9 | ## Your Task
10 |
11 | In this exercise, you will implement a binary search tree like the one described above. This data structure will be built entirely using pure, functional programming paradigms, unlike the object-oriented Bloom filter described in [Module 2](/module-2.md).
12 |
13 | ### Code Understanding
14 |
15 | #### `Types.mo`
16 |
17 | Let's start by looking at `Types.mo`. Here we've defined a `Tree` type, which can either be a `#node` or `#leaf`. This is the type that every element in your BST will be. Each `#node ` stores 5 values: a `key`, `value`, `l`, `r`, and `compareFunc`:
18 |
19 | * `key` is the value that the BST is sorted by. Therefore, all elements to the left of a `#node` should have a smaller key while all elements to the right of a `#node` should have a larger key.
20 | * `value` is the actual value that's stored in a given element within your BST. Whereas the `key` is used solely to position the `#node` correctly amongst other `nodes`, the `value` is the "valuable" item that you want to store.
21 | * `l` contains the `Tree` to the left of this given `node`
22 | * `r` contains the `Tree` to the right of this given `node`
23 | * `compareFunc` contains the function that will be used to compare `key`s and sort `nodes`. Every `node` in the tree will have an identical `compareFunc`. The reason for this, as opposed to simply storing the function somewhere else, is that we're using a pure programming style that doesn't contain mutable state. As a result, each node must "carry around" its `compareFunc`. An example of a comparison function is
24 |
25 | A `Tree` can also be a `#leaf`, which is essentially a `NULL` value in that it contains no useful information - it's just a placeholder. `#leaf`s will only exist at the bottom of the tree.
26 |
27 | We will get to the `Traversal` type later once we've reviewed more code, but suffice to say that it can be either a `preorder`, `postorder`, or `inorder`.
28 |
29 | As is the case in [Module 2](/module-2.md#code-understanding)'s `BloomFilter`, the `Tree` type takes advantage of **generic types**, represented by `X` and `Y`, which allows the BST implementation to remain type agnostic. More specifically, this means that the `Tree` and its related functions don't care if the items entered into it are of type `Text`, `Nat`, `Int`, etc. When you create a new `BST`, you specify the two types that the `BST` will handle. In this case, `X` represents the type for `key`s while `Y` represents the type for `value`s. All subsequent keys and values must match these two types, respectively. See `Main.mo` for an example of how one fills in these generic types with more specific ones!
30 |
31 | #### `BST.mo`
32 |
33 | In `BST.mo` we provide the actual implementation for our BST. Skip the `IterRep` type for now and turn your attention to `validate`. `validate` takes a `Tree` as an argument and returns a boolean indicating whether the given `Tree` is a valid BST - that is, it checks whether all of the left child nodes have `key`s less than their parent nodes and whether the right child nodes are greater. This function recursively checks the entire `Tree` using the helper function `validateAgainstChild`. Go through line by line until you fully understand this function - its general structure and recursive nature will help you think about how you can implement the other functions.
34 |
35 | The rest of the functions are either simple (`height` and `size`) or ones that you will implement yourself.
36 |
37 | #### `Main.mo`
38 |
39 | `Main.mo` just sets up a `BST` and provides a variety of functions that you can use to test your implementation "on the go" using the command-line interface.
40 |
41 | ### Specification
42 |
43 | **Task:** Complete the implementation of `get`, `put`, and `iter` in `BST.mo`
44 |
45 | **`get`** takes two arguments, `t` (the tree) and `key`, and returns the `value` of the `#node` in tree `t` with the given `key`
46 |
47 | * Start by thinking about the two cases that exist for a `Tree` type. Which case indicates that you've reached the end of the tree without finding the node? If this happens, you should return `null` because the tree doesn't contain a `node` with this `key`.
48 | * For a given `#node`, make sure to search the correct side of the tree depending on the output of applying `compareFunc` to `key` and the key of the current node being searched.
49 |
50 | **`put`** takes a Tree type, `t`, as well as the `key`, `val`, and `compareFunc` corresponding to a new `#node` that you want to insert into the BST and returns the new Tree with this `#node` added in the correct location.
51 |
52 | * If `t` is just a `#leaf`, then the new `#node` you add will be the only node in the Tree
53 | * If `t ` is itself a `#node`, you must compare the given `key` to the `key` of that `#node` using the provided `compareFunc`. This is where the real thinking starts - you must place the new node into the correct position within the tree!
54 | * If the given `key` is equal to the node's key, then you should replace that node with the new node (remember to keep its left and right children, however).
55 | * If the given `key` is less than or greater than the node's key, then you must check the corresponding child (left or right depending on `compareFunc`'s result). Think about how you must handle the case in which the child is a `#leaf` vs the case where the child is a `#node` (one case will require calling `put` again).
56 |
57 | **`iter`** takes two arguments, `t` (the tree) and `traversal` (a variant type indicating instructions for how to traverse the tree), and returns an `Iter` object that allows you to iterate through the tree in a specified order.
58 |
59 | BSTs, unlike some linear data structures, can be traversed in several different ways. The three main ways are:
60 |
61 | * **Inorder** traverses the tree in the following order: left child, root node, and right child
62 |
63 | * **Preorder** traverses starting with the root node, then left child, then right child
64 |
65 | * **Postorder** traverses the left child, then the right child, and then the root node
66 |
67 |
68 |
69 | Given the above tree, here's how each of these orders would traverse it:
70 |
71 | * **Inorder:** 1, 3, 4, 6, 7, 8, 10, 13, 14
72 | * Just think of the following algorithm:
73 | 1. Traverse the left subtree
74 | 2. Visit the root
75 | 3. Traverse the right subtree
76 | * **Preorder:** 8, 3, 1, 6, 4, 7, 10, 14, 13
77 | * Use the following algorithm:
78 | 1. Visit the root
79 | 2. Traverse the left subtree
80 | 3. Traverse the right subtree
81 | * **Postorder:** 1, 4, 7, 6, 3, 13, 14, 10, 8
82 | * Use the following algorithm:
83 | 1. Traverse the left subtree
84 | 2. Traverse the right subtree
85 | 3. Visit the root
86 |
87 | Feel free to read more about this topic [here](https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/). As we previously saw in `Main.mo`, the `Traversal` type has three variants: `#preorder`, `#postorder`, `#inorder` corresponding to the three aforementioned traversal strategies.
88 |
89 | **`iter` implementation details:**
90 |
91 | * The `iter` function returns an object of type `Iter` ([Motoko SDK page](https://sdk.dfinity.org/docs/base-libraries/iter)) that can be iterated through by calling a `next` function.
92 | * This object maintains an internal state, a `treeIter` of type `IterRep`, and therefore isn't pure. `treeIter` is a variant type that can either be a `#tree` or `#kv`, representing a `Tree` and key/value pair respectively. See the `IterRep` type definition at the top of the `BST.mo` file.
93 | * Using the object returned from `iter`, you should be able to call the `next()` function (see `bstNext()` in `Main.mo` for an example of this) to iterate one step through the Tree and return the next (key, value) pair.
94 |
95 | **Hints:**
96 |
97 | * Take a look through `Main.mo` if you're still having trouble understanding how all the pieces of the BST relate. This can get a bit abstract, so it's helpful to see the concrete implementation of our BST using `Nat`s.
98 | * Get familiar with `case` and `switch ` statements, because you'll be using them (sometimes multiple times) in all the functions you implement!
99 | * Each of the three functions you're implementing, `get`, `put`, and `iter`, are independent from each other and increase in difficulty. Start with `get` and then move through `put` and `iter`. If, however, you can't complete one function, you can still implement the others and get a partially functioning BST.
100 |
101 | ### Testing
102 |
103 | As you progress through your implementation of the BST, you can periodically self-test your work using the command line interface (CLI) after you've built and deployed the corresponding canisters.
104 |
105 | Inputting variant types into the CLI can be a bit unintuitive at first, so here is a quick guide to doing so. Imagine you have the following variant type:
106 |
107 | ```
108 | type Custom = {
109 | #first;
110 | #second;
111 | #third;
112 | }
113 | ```
114 |
115 | and a method:
116 |
117 | ```
118 | // canister: main
119 | actor {
120 | func Foo(arg1: Custom) {...};
121 | }
122 | ```
123 |
124 | This is how you call it via the CLI:
125 |
126 | ```
127 | dfx canister call main Foo '(variant { first })'
128 | ```
129 |
130 | Using this method, you should be able to run some basic tests on your implementation to aid in debugging.
131 |
--------------------------------------------------------------------------------
/module-4.md:
--------------------------------------------------------------------------------
1 | # Module 4: Scaling Data Structures with `BigMap`
2 |
3 | In this module, you will use Motko's `BigMap` to enable your implementation of the Bloom filter created in [Module 2](/module-2.md) to scale across Internet Computer canisters.
4 |
5 | ## Background
6 |
7 | We briefly discussed Motoko's `BigMap` function in [Module 1](#module-1.md), which should provide you with a brief introduction to its high-level purpose and use cases. Given that Motoko canisters can only store roughly 4 GB of data, developers need a way to easily scale their data storage across multiple canisters.
8 |
9 | We should note that Bloom filters are often used for their extreme efficiency, allowing us to store large data sets with relatively little memory. As such, the chances of creating a Bloom filter that surpasses 4 GB in the real world are slim to none. That being said, this activity gives you the toolset to understand how you can use `BigMap` to expand data structures that you have already implemented.
10 |
11 | A `BigMap` canister instance has already been deployed for this exercise (check the `dfx.json` config for a list of all canisters deployed in a project).
12 |
13 | ## Your Task
14 |
15 | ### Code Understanding
16 |
17 | #### `BloomFilter.mo`
18 |
19 | Let's start by taking a look at `BigMapBloomFilter/BigMapBloomFilter.mo`. The `BigMapBloomFilter` class maintains our `BigMap`-extension of `BloomFilter`, and you should notice that much of the code in this class parallels the code in `BloomFilter/BloomFilter.mo` that we used in [Module 2](/module-2.md). Feel free to reference that module again for a more in-depth description of the `BigMapBloomFilter` implementation.
20 |
21 | `add` and `check` are the two functions that you will implement for this module. They serve the same general purpose as the parallel functions in `BloomFilter/BloomFilter.mo`, but this time they incorporate the `BigMap` extension.
22 |
23 | #### `Utils.mo`
24 |
25 | The `Utils.mo` file contains several helper functions that you may find useful in completing the implementations of `add` and `check`.
26 |
27 | `serialize` takes in a list of booleans (corresponding to the `hashMap`) and converts it to a list of 1s and 0s of type `Nat8`. Unsurprisingly, `unserialize` performs the reverse operation. Finally, `constructWithData` creates a new `BloomFilter` with the provided `data` (the `hashMap` of a `BloomFilter`), `bitMapSize`, and `hashFuncs`. Notice that this function utilizes the `setData` function from `BloomFilter/BloomFilter.mo`. The purpose of these functions will become apparent in the specification below.
28 |
29 | #### `Main.mo`
30 |
31 | `Main.mo` instantiates a `BigMapBloomFilter` object with a few pre-entered parameters and provides two functions, `add` and `check`. These functions are the public interface that allow you to call `add` and `check` within the `BigMapBloomFilter` from the command line interface (for testing) or from other canisters. The only significant difference between this implementation and `BloomFilter/Main.mo` is that the two functions both require a `key` parameter. The `key` is what `BigMap` uses to index between canisters.
32 |
33 | ### Specification
34 |
35 | **Task:** Complete the implementation of the `add` and `check ` methods in `BigMapBloomFilter.mo`.
36 |
37 | **`add`** takes in a `key` and an `item` and adds that key, value pair to the `BigMapBloomFilter`
38 |
39 | * Start by understanding the `BigMap` API: `put` and `get`. Their function signatures are as follows:
40 | ```
41 | func get(key : [Nat8]) : async ?[Nat8]
42 | ```
43 | ```
44 | func put(key : [Nat8], value : [Nat8]) : async ()
45 | ```
46 | * Understand how the `un`/`serialize` data transformation functions from the `Utils` module can help "massage" Bloom filter data into the appropriate formats.
47 | * Think through the similarities and differences in how to approach the implementation of `add` and `check` for `BigMapBloomFilter` and `AutoScalingBloomFilter`. Now that we receive a key as input, how do we retrieve data from `BigMap` and convert it into a format we can interact with? How do we store the updated data back into `BigMap`?
48 | * Be sure to leverage other functions from the `Utils` module: `constructWithData`, etc. will come in handy!
49 | * Remember to unwrap results from a BigMap query.
50 |
51 | **`check`** takes in a `key` and an `item` and checks if that `item` is contained in any of the `BloomFilter`s
52 |
53 | * As above, be sure to leverage functions from the `Utils` module.
54 | * Similarly, remember to unwrap results from a BigMap query.
55 |
56 | ### Testing
57 |
58 | As you progress through your implementation of the `BigMapBloomFilter`, you can periodically self-test your work using the command line interface (CLI) after you've built and deployed the corresponding canisters.
59 |
60 | Inputting variant types into the CLI can be a bit unintuitive at first, so here is a quick guide to doing so. Imagine you have the following variant type:
61 |
62 | ```
63 | type Custom = {
64 | #first;
65 | #second;
66 | #third;
67 | }
68 | ```
69 |
70 | and a method:
71 |
72 | ```
73 | // canister: main
74 | actor {
75 | func Foo(arg1: Custom) {...};
76 | }
77 | ```
78 |
79 | This is how you call it via the CLI:
80 |
81 | ```
82 | dfx canister call main Foo '(variant { first })'
83 | ```
84 |
85 | Using this method, you should be able to run some basic tests on your implementation to aid in debugging.
86 |
--------------------------------------------------------------------------------
/package-set.dhall:
--------------------------------------------------------------------------------
1 | let upstream = https://github.com/dfinity/vessel-package-set/releases/download/mo-0.6.1-20210511/package-set.dhall sha256:aa5083f7cfd9dd0ddbd0210847175417a7efeaf8adcca6838fe9dd2ac460d236
2 | let Package =
3 | { name : Text, version : Text, repo : Text, dependencies : List Text }
4 |
5 | let
6 | -- This is where you can add your own packages to the package-set
7 | additions =
8 | [] : List Package
9 |
10 | let
11 | {- This is where you can override existing packages in the package-set
12 |
13 | For example, if you wanted to use version `v2.0.0` of the foo library:
14 | let overrides = [
15 | { name = "foo"
16 | , version = "v2.0.0"
17 | , repo = "https://github.com/bar/foo"
18 | , dependencies = [] : List Text
19 | }
20 | ]
21 | -}
22 | overrides =
23 | [] : List Package
24 |
25 | in upstream # additions # overrides
26 |
--------------------------------------------------------------------------------
/src/BigMapBloomFilter/BigMapBloomFilter.mo:
--------------------------------------------------------------------------------
1 | import Array "mo:base/Array";
2 | import BigMap "canister:BigMap";
3 | import BloomFilter "../BloomFilter/BloomFilter";
4 | import Float "mo:base/Float";
5 | import Hash "mo:base/Hash";
6 | import Int "mo:base/Int";
7 | import Int32 "mo:base/Int32";
8 | import Nat "mo:base/Nat";
9 | import Nat32 "mo:base/Nat32";
10 | import Utils "./Utils";
11 |
12 | module {
13 |
14 | type BloomFilter = BloomFilter.BloomFilter;
15 | type Hash = Hash.Hash;
16 |
17 | /// Manages BloomFilters, deploys new BloomFilters, and checks for element membership across filters.
18 | /// Args:
19 | /// |capacity| The maximum number of elements a BlooomFilter may store.
20 | /// |errorRate| The maximum false positive rate a BloomFilter may maintain.
21 | /// |hashFuncs| The hash functions used to hash elements into the filter.
22 | public class BigMapBloomFilter(capacity: Nat32, errorRate: Float, hashFuncs: [(S) -> Hash]) {
23 |
24 | let numSlices = Float.ceil(Float.log(1.0 / errorRate));
25 | let bitsPerSlice = Float.ceil(
26 | (Float.fromInt(Int32.toInt(Int32.fromNat32(capacity))) * Float.abs(Float.log(errorRate))) /
27 | (numSlices * (Float.log(2) ** 2)));
28 | let bitMapSize: Nat32 = Nat32.fromNat(Int.abs(Float.toInt(numSlices * bitsPerSlice)));
29 |
30 | /// Adds an element to the BloomFilter's bitmap and deploys new BloomFilter if previous is at capacity.
31 | /// Args:
32 | /// |key| The key associated with the particular item (used with BigMap).
33 | /// |item| The item to be added.
34 | public func add(key: Nat8, item: S) : async () {
35 | let filterOpt = await BigMap.get([key]);
36 | let filter = switch (filterOpt) {
37 | case (null) { BloomFilter.BloomFilter(bitMapSize, hashFuncs) };
38 | case (?data) { Utils.constructWithData(capacity, hashFuncs, Utils.unserialize(data)) };
39 | };
40 | filter.add(item);
41 | await BigMap.put([key], Utils.serialize(filter.getBitMap()));
42 | };
43 |
44 | /// Checks if an item is contained in any BloomFilters
45 | /// Args:
46 | /// |key| The key associated with the particular item (used with BigMap).
47 | /// |item| The item to be searched for.
48 | /// Returns:
49 | /// A boolean indicating set membership.
50 | public func check(key: Nat8, item: S) : async (Bool) {
51 | let filterOpt = await BigMap.get([key]);
52 | switch (filterOpt) {
53 | case (null) { false };
54 | case (?data) {
55 | let filter = Utils.constructWithData(capacity, hashFuncs, Utils.unserialize(data));
56 | if (filter.check(item)) { return true; };
57 | false
58 | };
59 | }
60 | };
61 |
62 | };
63 |
64 | };
65 |
--------------------------------------------------------------------------------
/src/BigMapBloomFilter/Main.mo:
--------------------------------------------------------------------------------
1 | import Hash "mo:base/Hash";
2 |
3 | import BigMapBloomFilter "./BigMapBloomFilter";
4 |
5 | actor {
6 |
7 | var bloomFilter = BigMapBloomFilter.BigMapBloomFilter(0, 0.001, [Hash.hash]);
8 |
9 | public func add(key: Nat8, item: Nat) : async () {
10 | await bloomFilter.add(key, item);
11 | };
12 |
13 | public func check(key: Nat8, item: Nat) : async (Bool) {
14 | await bloomFilter.check(key, item)
15 | };
16 |
17 | };
18 |
--------------------------------------------------------------------------------
/src/BigMapBloomFilter/Utils.mo:
--------------------------------------------------------------------------------
1 | import Array "mo:base/Array";
2 | import Hash "mo:base/Hash";
3 | import Nat8 "mo:base/Nat8";
4 | import Text "mo:base/Text";
5 |
6 | import BloomFilter "../BloomFilter/BloomFilter";
7 |
8 | module {
9 |
10 | type BloomFilter = BloomFilter.BloomFilter;
11 | type Hash = Hash.Hash;
12 |
13 | public func serialize(filterData: [Bool]) : [Nat8] {
14 | Array.map(filterData, func (b: Bool) : Nat8 { switch(b) { case (true) { 1 }; case (false) { 0 }; }} )
15 | };
16 |
17 | public func unserialize(serializedData: [Nat8]) : [Bool] {
18 | Array.map(serializedData, func (w: Nat8) : Bool { switch(w) { case (1) { true }; case (_) { false }; }} )
19 | };
20 |
21 | public func constructWithData(bitMapSize: Nat32, hashFuncs: [(S) -> Hash], data: [Bool]) : BloomFilter {
22 | let filter = BloomFilter.BloomFilter(bitMapSize, hashFuncs);
23 | filter.setData(data);
24 | filter
25 | };
26 |
27 | };
28 |
--------------------------------------------------------------------------------
/src/BinarySearchTree/BST.mo:
--------------------------------------------------------------------------------
1 | import Iter "mo:base/Iter";
2 | import List "mo:base/List";
3 | import Nat "mo:base/Nat";
4 | import Order "mo:base/Order";
5 |
6 | import Types "./Types";
7 |
8 | module {
9 |
10 | type Iter = Iter.Iter<(X, Y)>;
11 | type List = List.List;
12 | type Order = Order.Order;
13 | type Tree = Types.Tree;
14 |
15 | type IterRep = List.List<{ #tree: Tree; #kv: (X, Y); }>;
16 |
17 | /// Determines whether a given Tree is a valid BST.
18 | /// Args:
19 | /// |t| The Tree to be validated.
20 | /// Returns:
21 | /// A boolean representing whether |t| is a valid BST.
22 | public func validate(t: Tree) : Bool {
23 | func validateAgainstChild(parentKey: X, child: Tree, expected: Order) : Bool {
24 | switch (child) {
25 | case (#leaf(_)) { true };
26 | case (#node(key, _, _, _, compareFunc)) {
27 | compareFunc(key, parentKey) == expected
28 | };
29 | }
30 | };
31 |
32 | switch (t) {
33 | case (#leaf(_)) { true };
34 | case (#node(parentKey, _, leftChild, rightChild, compareFunc)) {
35 | validateAgainstChild(parentKey, leftChild, #less)
36 | and validateAgainstChild(parentKey, leftChild, #greater)
37 | and validate(leftChild)
38 | and validate(rightChild)
39 | };
40 | }
41 | };
42 |
43 | /// Determines whether a given Tree is a valid BST.
44 | /// Args:
45 | /// |t| The Tree to be searched.
46 | /// |key| The key being serched for.
47 | /// Returns:
48 | /// The value of the node in |t| that has key=|key|.
49 | /// Null if there is no such node in |t|.
50 | public func get(t: Tree, key: X) : ?Y {
51 | switch (t) {
52 | case (#leaf(_)) { null };
53 | case (#node(parentKey, parentVal, leftChild, rightChild, compareFunc)) {
54 | switch (compareFunc(key, parentKey)) {
55 | case (#equal) { ?parentVal };
56 | case (#less) { get(leftChild, key) };
57 | case (#greater) { get(rightChild, key) };
58 | }
59 | };
60 | }
61 | };
62 |
63 | /// Adds a new node to a tree.
64 | /// Args:
65 | /// |t| The Tree the node is added to.
66 | /// |key| The key of the new node.
67 | /// |val| The value of the new node.
68 | /// |compareFunc| The comparison function associated with the tree.
69 | /// Returns:
70 | /// A Tree with the correct node added.
71 | public func put(t: Tree, key: X, val: Y, compareFunc: (X, X) -> Order) : Tree {
72 | switch (t) {
73 | case (#leaf) {
74 | #node(key, val, #leaf, #leaf, compareFunc);
75 | };
76 | case (#node(parentKey, parentVal, leftChild, rightChild, _)) {
77 | let newNode = #node(key, val, #leaf, #leaf, compareFunc);
78 | switch (compareFunc(key, parentKey)) {
79 | case (#equal) { #node(key, val, leftChild, rightChild, compareFunc) };
80 | case (#less) {
81 | switch (leftChild) {
82 | case (#leaf) {
83 | #node(parentKey, parentVal, newNode, rightChild, compareFunc)
84 | };
85 | case (#node(k, v, l, r, _)) {
86 | #node(
87 | parentKey,
88 | parentVal,
89 | put(
90 | #node(k, v, l, r, compareFunc),
91 | key,
92 | val,
93 | compareFunc
94 | ),
95 | rightChild,
96 | compareFunc
97 | )
98 | };
99 | };
100 | };
101 | case (#greater) {
102 | switch (rightChild) {
103 | case (#leaf) {
104 | #node(parentKey, parentVal, leftChild, newNode, compareFunc)
105 | };
106 | case (#node(k, v, l, r, _)) {
107 | #node(
108 | parentKey,
109 | parentVal,
110 | leftChild,
111 | put(
112 | #node(k, v, l, r, compareFunc),
113 | key,
114 | val,
115 | compareFunc
116 | ),
117 | compareFunc
118 | )
119 | };
120 | };
121 | };
122 | };
123 | };
124 | };
125 | };
126 |
127 | /// Allows for iteration through the Tree nodes
128 | /// Args:
129 | /// |t| The Tree being iterated through.
130 | /// |traversal| The traversal type - see Types.mo.
131 | /// Returns:
132 | /// An Iter object that outputs (key, value) pairs upon subsequent next() method calls.
133 | /// Just hollow out the next() function - leave everything above that for students to have a good base to start from
134 | public func iter(t: Tree, traversal: Types.Traversal) : Iter {
135 | object {
136 | var treeIter : IterRep = ?(#tree(t), null);
137 | public func next() : ?(X, Y) {
138 | switch (traversal, treeIter) {
139 | case (_, null) { null };
140 | case (_, ?(#tree(#leaf(_)), rest)) {
141 | treeIter := rest;
142 | next()
143 | };
144 | case (_, ?(#kv(k, v), rest)) {
145 | treeIter := rest;
146 | ?(k, v)
147 | };
148 | case (#preorder, ?(#tree(#node(k, v, l, r, _)), rest)) {
149 | treeIter := ?(#kv(k, v), ?(#tree(l), ?(#tree(r), rest)));
150 | next()
151 | };
152 | case (#inorder, ?(#tree(#node(k, v, l, r, _)), rest)) {
153 | treeIter := ?(#tree(l), ?(#kv(k, v), ?(#tree(r), rest)));
154 | next()
155 | };
156 | case (#postorder, ?(#tree(#node(k, v, l, r, _)), rest)) {
157 | treeIter := ?(#tree(l), ?(#tree(r), ?(#kv(k, v), rest)));
158 | next()
159 | };
160 | }
161 | };
162 | }
163 | };
164 |
165 | public func height(t: Tree) : Nat {
166 | switch t {
167 | case (#leaf(_)) { 0 };
168 | case (#node(_, _, l, r, _)) {
169 | Nat.max(height(l), height(r)) + 1
170 | };
171 | }
172 | };
173 |
174 | public func size(t: Tree) : Nat {
175 | switch t {
176 | case (#leaf(_)) { 0 };
177 | case (#node(_, _, l, r, _)) {
178 | size(l) + size(r) + 1
179 | };
180 | }
181 | };
182 |
183 | };
184 |
--------------------------------------------------------------------------------
/src/BinarySearchTree/Main.mo:
--------------------------------------------------------------------------------
1 | import Iter "mo:base/Iter";
2 | import Nat "mo:base/Nat";
3 |
4 | import BST "./BST";
5 | import Types "./Types";
6 |
7 | actor {
8 |
9 | type Iter = Iter.Iter<(X, Y)>;
10 | type Tree = Types.Tree;
11 |
12 | var bst: Tree = #leaf;
13 | var bstIterator = BST.iter(bst, #inorder);
14 | let compareFunc = Nat.compare;
15 |
16 | public func validate() : async (Bool) {
17 | BST.validate(bst)
18 | };
19 |
20 | public func get(key: Nat) : async (?Nat) {
21 | BST.get(bst, key)
22 | };
23 |
24 | public func put(key: Nat, value: Nat) {
25 | bst := BST.put(bst, key, value, compareFunc);
26 | };
27 |
28 | public func iterSync(traversal: Types.Traversal) {
29 | bstIterator := BST.iter(bst, traversal);
30 | };
31 |
32 | public func next() : async (?(Nat, Nat)) {
33 | bstIterator.next()
34 | };
35 |
36 | public func height() : async (Nat) {
37 | BST.height(bst)
38 | };
39 |
40 | public func size() : async (Nat) {
41 | BST.size(bst)
42 | };
43 |
44 | public func bstReset() {
45 | bst := #leaf;
46 | bstIterator := BST.iter(bst, #inorder);
47 | };
48 |
49 | };
50 |
--------------------------------------------------------------------------------
/src/BinarySearchTree/Types.mo:
--------------------------------------------------------------------------------
1 | import Order "mo:base/Order";
2 |
3 | module {
4 |
5 | type Order = Order.Order;
6 |
7 | public type Tree = {
8 | #node : (
9 | key: X,
10 | value: Y,
11 | l: Tree,
12 | r: Tree,
13 | compareFunc: (X, X) -> Order,
14 | );
15 | #leaf;
16 | };
17 |
18 | // Specifies a traversal order of the BST - used as an argument for iter() in BST.mo
19 | public type Traversal = { #preorder; #postorder; #inorder };
20 |
21 | };
22 |
--------------------------------------------------------------------------------
/src/BloomFilter/BloomFilter.mo:
--------------------------------------------------------------------------------
1 | import Array "mo:base/Array";
2 | import Float "mo:base/Float";
3 | import Hash "mo:base/Hash";
4 | import Int "mo:base/Int";
5 | import Iter "mo:base/Iter";
6 | import Nat "mo:base/Nat";
7 | import Nat32 "mo:base/Nat32";
8 |
9 | module {
10 |
11 | type Hash = Hash.Hash;
12 |
13 | /// Manages BloomFilters, deploys new BloomFilters, and checks for element membership across filters.
14 | /// Args:
15 | /// |capacity| The maximum number of elements a BlooomFilter may store.
16 | /// |errorRate| The maximum false positive rate a BloomFilter may maintain.
17 | /// |hashFuncs| The hash functions used to hash elements into the filter.
18 | public class AutoScalingBloomFilter(capacity: Nat, errorRate: Float, hashFuncs: [(S) -> Hash]) {
19 |
20 | var filters: [BloomFilter] = [];
21 |
22 | let numSlices = Float.ceil(Float.log(1.0 / errorRate) / Float.log(2));
23 | let bitsPerSlice = Float.ceil(
24 | (Float.fromInt(capacity) * Float.abs(Float.log(errorRate))) /
25 | (numSlices * (Float.log(2) ** 2))
26 | );
27 | let bitMapSize: Nat32 = Nat32.fromNat(Int.abs(Float.toInt(numSlices * bitsPerSlice)));
28 |
29 | /// Adds an element to the BloomFilter's bitmap and deploys new BloomFilter if previous is at capacity.
30 | /// Args:
31 | /// |item| The item to be added.
32 | public func add(item: S) {
33 | var newFilter: Bool = false;
34 | var filter: BloomFilter = do {
35 | if (filters.size() > 0) {
36 | let last_filter = filters[filters.size() - 1];
37 | if (last_filter.getNumItems() < capacity) {
38 | last_filter
39 | } else {
40 | newFilter := true;
41 | BloomFilter(bitMapSize, hashFuncs)
42 | }
43 | } else {
44 | newFilter := true;
45 | BloomFilter(bitMapSize, hashFuncs)
46 | }
47 | };
48 | filter.add(item);
49 | if (newFilter) {
50 | filters := Array.append>(filters, [filter]);
51 | };
52 | };
53 |
54 | /// Checks if an item is contained in any BloomFilters
55 | /// Args:
56 | /// |item| The item to be searched for.
57 | /// Returns:
58 | /// A boolean indicating set membership.
59 | public func check(item: S) : Bool {
60 | for (filter in Iter.fromArray(filters)) {
61 | if (filter.check(item)) { return true; };
62 | };
63 |
64 | false
65 | };
66 |
67 | };
68 |
69 | /// The specific BloomFilter implementation used in AutoScalingBloomFilter.
70 | /// Args:
71 | /// |bitMapSize| The size of the bitmap (as determined in AutoScalingBloomFilter).
72 | /// |hashFuncs| The hash functions used to hash elements into the filter.
73 | public class BloomFilter(bitMapSize: Nat32, hashFuncs: [(S) -> Hash]) {
74 |
75 | var numItems = 0;
76 | let bitMap: [var Bool] = Array.init(Nat32.toNat(bitMapSize), false);
77 |
78 | public func add(item: S) {
79 | for (f in Iter.fromArray(hashFuncs)) {
80 | let digest = f(item) % bitMapSize;
81 | bitMap[Nat32.toNat(digest)] := true;
82 | };
83 | numItems += 1;
84 | };
85 |
86 | public func check(item: S) : Bool {
87 | for (f in Iter.fromArray(hashFuncs)) {
88 | let digest = f(item) % bitMapSize;
89 | if (bitMap[Nat32.toNat(digest)] == false) return false;
90 | };
91 | true
92 | };
93 |
94 | public func getNumItems() : Nat {
95 | numItems
96 | };
97 |
98 | public func getBitMap() : [Bool] {
99 | Array.freeze(bitMap)
100 | };
101 |
102 | public func setData(data: [Bool]) {
103 | assert data.size() == Nat32.toNat(bitMapSize);
104 | for (i in Iter.range(0, data.size() - 1)) {
105 | bitMap[i] := data[i];
106 | };
107 | };
108 |
109 | };
110 |
111 | };
112 |
--------------------------------------------------------------------------------
/src/BloomFilter/Main.mo:
--------------------------------------------------------------------------------
1 | import Hash "mo:base/Hash";
2 |
3 | import BloomFilter "./BloomFilter";
4 |
5 | actor {
6 |
7 | var bloomFilter = BloomFilter.AutoScalingBloomFilter(0, 0.001, [Hash.hash]);
8 |
9 | public func add(item: Nat) {
10 | bloomFilter.add(item);
11 | };
12 |
13 | public func check(item: Nat) : async (Bool) {
14 | bloomFilter.check(item)
15 | };
16 |
17 | };
18 |
--------------------------------------------------------------------------------
/vendor/ATTRIBUTION.md:
--------------------------------------------------------------------------------
1 | `motoko-bigmap` was written by Matthew Hammer and is CC Dfinity.
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction, and
10 | distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by the
13 | copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all other
16 | entities that control, are controlled by, or are under common control with
17 | that entity. For the purposes of this definition, "control" means (i) the
18 | power, direct or indirect, to cause the direction or management of such
19 | entity, whether by contract or otherwise, or (ii) ownership of fifty percent
20 | (50%) or more of the outstanding shares, or (iii) beneficial ownership of
21 | such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity exercising
24 | permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation source, and
28 | configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical transformation
31 | or translation of a Source form, including but not limited to compiled
32 | object code, generated documentation, and conversions to other media types.
33 |
34 | "Work" shall mean the work of authorship, whether in Source or Object form,
35 | made available under the License, as indicated by a copyright notice that is
36 | included in or attached to the work (an example is provided in the Appendix
37 | below).
38 |
39 | "Derivative Works" shall mean any work, whether in Source or Object form,
40 | that is based on (or derived from) the Work and for which the editorial
41 | revisions, annotations, elaborations, or other modifications represent, as a
42 | whole, an original work of authorship. For the purposes of this License,
43 | Derivative Works shall not include works that remain separable from, or
44 | merely link (or bind by name) to the interfaces of, the Work and Derivative
45 | Works thereof.
46 |
47 | "Contribution" shall mean any work of authorship, including the original
48 | version of the Work and any modifications or additions to that Work or
49 | Derivative Works thereof, that is intentionally submitted to Licensor for
50 | inclusion in the Work by the copyright owner or by an individual or Legal
51 | Entity authorized to submit on behalf of the copyright owner. For the
52 | purposes of this definition, "submitted" means any form of electronic,
53 | verbal, or written communication sent to the Licensor or its
54 | representatives, including but not limited to communication on electronic
55 | mailing lists, source code control systems, and issue tracking systems that
56 | are managed by, or on behalf of, the Licensor for the purpose of discussing
57 | and improving the Work, but excluding communication that is conspicuously
58 | marked or otherwise designated in writing by the copyright owner as "Not a
59 | Contribution."
60 |
61 | "Contributor" shall mean Licensor and any individual or Legal Entity on
62 | behalf of whom a Contribution has been received by Licensor and subsequently
63 | incorporated within the Work.
64 |
65 | 2. Grant of Copyright License. Subject to the terms and conditions of this
66 | License, each Contributor hereby grants to You a perpetual, worldwide,
67 | non-exclusive, no-charge, royalty-free, irrevocable copyright license to
68 | reproduce, prepare Derivative Works of, publicly display, publicly perform,
69 | sublicense, and distribute the Work and such Derivative Works in Source or
70 | Object form.
71 |
72 | 3. Grant of Patent License. Subject to the terms and conditions of this
73 | License, each Contributor hereby grants to You a perpetual, worldwide,
74 | non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
75 | this section) patent license to make, have made, use, offer to sell, sell,
76 | import, and otherwise transfer the Work, where such license applies only to
77 | those patent claims licensable by such Contributor that are necessarily
78 | infringed by their Contribution(s) alone or by combination of their
79 | Contribution(s) with the Work to which such Contribution(s) was submitted.
80 | If You institute patent litigation against any entity (including a
81 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a
82 | Contribution incorporated within the Work constitutes direct or contributory
83 | patent infringement, then any patent licenses granted to You under this
84 | License for that Work shall terminate as of the date such litigation is
85 | filed.
86 |
87 | 4. Redistribution. You may reproduce and distribute copies of the Work or
88 | Derivative Works thereof in any medium, with or without modifications, and
89 | in Source or Object form, provided that You meet the following conditions:
90 |
91 | a. You must give any other recipients of the Work or Derivative Works a
92 | copy of this License; and
93 |
94 | b. You must cause any modified files to carry prominent notices stating
95 | that You changed the files; and
96 |
97 | c. You must retain, in the Source form of any Derivative Works that You
98 | distribute, all copyright, patent, trademark, and attribution notices
99 | from the Source form of the Work, excluding those notices that do not
100 | pertain to any part of the Derivative Works; and
101 |
102 | d. If the Work includes a "NOTICE" text file as part of its distribution,
103 | then any Derivative Works that You distribute must include a readable
104 | copy of the attribution notices contained within such NOTICE file,
105 | excluding those notices that do not pertain to any part of the Derivative
106 | Works, in at least one of the following places: within a NOTICE text file
107 | distributed as part of the Derivative Works; within the Source form or
108 | documentation, if provided along with the Derivative Works; or, within a
109 | display generated by the Derivative Works, if and wherever such
110 | third-party notices normally appear. The contents of the NOTICE file are
111 | for informational purposes only and do not modify the License. You may
112 | add Your own attribution notices within Derivative Works that You
113 | distribute, alongside or as an addendum to the NOTICE text from the Work,
114 | provided that such additional attribution notices cannot be construed as
115 | modifying the License.
116 |
117 | You may add Your own copyright statement to Your modifications and may
118 | provide additional or different license terms and conditions for use,
119 | reproduction, or distribution of Your modifications, or for any such
120 | Derivative Works as a whole, provided Your use, reproduction, and
121 | distribution of the Work otherwise complies with the conditions stated in
122 | this License.
123 |
124 | 5. Submission of Contributions. Unless You explicitly state otherwise, any
125 | Contribution intentionally submitted for inclusion in the Work by You to the
126 | Licensor shall be under the terms and conditions of this License, without
127 | any additional terms or conditions. Notwithstanding the above, nothing
128 | herein shall supersede or modify the terms of any separate license agreement
129 | you may have executed with Licensor regarding such Contributions.
130 |
131 | 6. Trademarks. This License does not grant permission to use the trade names,
132 | trademarks, service marks, or product names of the Licensor, except as
133 | required for reasonable and customary use in describing the origin of the
134 | Work and reproducing the content of the NOTICE file.
135 |
136 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in
137 | writing, Licensor provides the Work (and each Contributor provides its
138 | Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
139 | KIND, either express or implied, including, without limitation, any
140 | warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
141 | FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining
142 | the appropriateness of using or redistributing the Work and assume any risks
143 | associated with Your exercise of permissions under this License.
144 |
145 | 8. Limitation of Liability. In no event and under no legal theory, whether in
146 | tort (including negligence), contract, or otherwise, unless required by
147 | applicable law (such as deliberate and grossly negligent acts) or agreed to
148 | in writing, shall any Contributor be liable to You for damages, including
149 | any direct, indirect, special, incidental, or consequential damages of any
150 | character arising as a result of this License or out of the use or inability
151 | to use the Work (including but not limited to damages for loss of goodwill,
152 | work stoppage, computer failure or malfunction, or any and all other
153 | commercial damages or losses), even if such Contributor has been advised of
154 | the possibility of such damages.
155 |
156 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or
157 | Derivative Works thereof, You may choose to offer, and charge a fee for,
158 | acceptance of support, warranty, indemnity, or other liability obligations
159 | and/or rights consistent with this License. However, in accepting such
160 | obligations, You may act only on Your own behalf and on Your sole
161 | responsibility, not on behalf of any other Contributor, and only if You
162 | agree to indemnify, defend, and hold each Contributor harmless for any
163 | liability incurred by, or claims asserted against, such Contributor by
164 | reason of your accepting any such warranty or additional liability.
165 |
166 | END OF TERMS AND CONDITIONS
167 |
168 | LLVM EXCEPTION TO THE APACHE 2.0 LICENSE
169 |
170 | As an exception, if, as a result of your compiling your source code, portions
171 | of this Software are embedded into an Object form of such source code, you may
172 | redistribute such embedded portions in such Object form without complying with
173 | the conditions of Sections 4(a), 4(b) and 4(d) of the License.
174 |
175 | In addition, if you combine or link compiled forms of this Software with
176 | software that is licensed under the GPLv2 ("Combined Software") and if a court
177 | of competent jurisdiction determines that the patent provision (Section 3), the
178 | indemnity provision (Section 9) or other Section of the License conflicts with
179 | the conditions of the GPLv2, you may retroactively and prospectively choose to
180 | deem waived or otherwise exclude such Section(s) of the License, but only in
181 | their entirety and only with respect to the Combined Software.
182 |
183 | END OF LLVM EXCEPTION
184 |
185 | APPENDIX: How to apply the Apache License to your work.
186 |
187 | To apply the Apache License to your work, attach the following boilerplate
188 | notice, with the fields enclosed by brackets "[]" replaced with your own
189 | identifying information. (Don't include the brackets!) The text should be
190 | enclosed in the appropriate comment syntax for the file format. We also
191 | recommend that a file or class name and description of purpose be included on
192 | the same "printed page" as the copyright notice for easier identification
193 | within third-party archives.
194 |
195 | Copyright [yyyy] [name of copyright owner]
196 |
197 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use
198 | this file except in compliance with the License. You may obtain a copy of the
199 | License at
200 |
201 | http://www.apache.org/licenses/LICENSE-2.0
202 |
203 | Unless required by applicable law or agreed to in writing, software distributed
204 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
205 | CONDITIONS OF ANY KIND, either express or implied. See the License for the
206 | specific language governing permissions and limitations under the License.
207 |
208 | END OF APPENDIX
209 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/NOTICE:
--------------------------------------------------------------------------------
1 | Copyright 2020 DFINITY
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use
4 | this file except in compliance with the License. You may obtain a copy of the
5 | License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software distributed
10 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
11 | CONDITIONS OF ANY KIND, either express or implied. See the License for the
12 | specific language governing permissions and limitations under the License.
13 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/app/Main.mo:
--------------------------------------------------------------------------------
1 | import BigMap "../src/BigMap";
2 | import Segment "../src/Segment";
3 | import SegKey "../src/SegKey";
4 | import T "../src/Types";
5 | import Debug "mo:base/Debug";
6 |
7 | actor {
8 | private /*stable*/ var segCount = 1;
9 | private /*flexible*/ var bm = BigMap.BigMap(segCount);
10 |
11 | public func init(n : Nat) {
12 | Debug.print "BigMap init begin";
13 | Debug.print (" - Old segment count = " # (debug_show segCount));
14 | segCount := n;
15 | Debug.print (" - New segment count = " # (debug_show segCount));
16 | bm := BigMap.BigMap(segCount);
17 | Debug.print "BigMap init end";
18 | };
19 |
20 | // return whether an init was called or not
21 | public func initNext(id : Text) : async Bool {
22 | let c = actor (id) : actor {
23 | init : T.SegmentInit -> async ();
24 | put : (T.SegKey, T.Val) -> async ();
25 | get : query T.SegKey -> async ?T.Val;
26 | };
27 | switch (bm.initNext(c)) {
28 | case null { false };
29 | case (?f) { await f(); true };
30 | };
31 | };
32 |
33 | public query func isReady() : async Bool {
34 | bm.isReady()
35 | };
36 |
37 | public query func getReady() : async [T.CanisterInfo] {
38 | bm.getReady()
39 | };
40 |
41 | public func get(key : [Nat8]) : async ?[Nat8] {
42 | Debug.print "BigMap get begin";
43 | if (not (bm.isReady())) {
44 | Debug.print "Error: Not ready.";
45 | assert false; loop { }
46 | } else {
47 | assert bm.isReady();
48 | let k = SegKey.ofKey(key);
49 | let c = bm.getCanister(k);
50 | let v = await (c.get(k));
51 | Debug.print "BigMap get end";
52 | v
53 | }
54 | };
55 |
56 | public func put(key : [Nat8], value : [Nat8]) : async () {
57 | Debug.print "BigMap put begin";
58 | if (not (bm.isReady())) {
59 | Debug.print "Error: Not ready.";
60 | assert false
61 | } else {
62 | assert bm.isReady();
63 | let k = SegKey.ofKey(key);
64 | let c = bm.getCanister(k);
65 | await (c.put(k, value));
66 | Debug.print "BigMap put end";
67 | }
68 | };
69 | };
70 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/app/SegCan.mo:
--------------------------------------------------------------------------------
1 | import T "../src/Types";
2 | import Debug "mo:base/Debug";
3 | import Segment "../src/Segment";
4 |
5 | actor {
6 | private flexible var seg
7 | : Segment.Segment
8 | = Segment.Segment(#singleton(0));
9 |
10 | public func init(init : Segment.Init) {
11 | Debug.print "BigMapSegCan init begin";
12 | Debug.print (" - id = " # (debug_show Segment.initId(init)));
13 | Debug.print (" - init = " # (debug_show init));
14 | seg := Segment.Segment(init);
15 | Debug.print "BigMapSegCan init end";
16 | };
17 |
18 | public query func get(sk : T.SegKey) : async ?T.Val {
19 | Debug.print "BigMapSegCan get begin";
20 | Debug.print (" - id = " # (debug_show seg.id));
21 | Debug.print (" - segKey = " # (debug_show sk));
22 | let v = seg.getSegKey(sk);
23 | Debug.print "BigMapSegCan get end";
24 | v
25 | };
26 |
27 | public func put(sk : T.SegKey, v : T.Val) {
28 | Debug.print "BigMapSegCan put begin";
29 | Debug.print (" - id = " # (debug_show seg.id));
30 | Debug.print (" - segKey = " # (debug_show sk));
31 | seg.putSegKey(sk, v);
32 | Debug.print "BigMapSegCan put end";
33 | };
34 |
35 | };
36 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/dfx.json:
--------------------------------------------------------------------------------
1 | {
2 | "canisters": {
3 | "BigMap": {
4 | "main": "app/Main.mo"
5 | },
6 | "BigMapSegment0": {
7 | "main": "app/SegCan.mo"
8 | },
9 | "BigMapSegment1": {
10 | "main": "app/SegCan.mo"
11 | },
12 | "BigMapSegment2": {
13 | "main": "app/SegCan.mo"
14 | },
15 | "BigMapSegment3": {
16 | "main": "app/SegCan.mo"
17 | },
18 | "BigTestPutGet": {
19 | "main": "test/BigTestPutGet.mo"
20 | },
21 | "TestPutGet": {
22 | "main": "test/PutGet.mo"
23 | }
24 | },
25 | "defaults": {
26 | "build": {
27 | "packtool": "vessel sources",
28 | "output": "canisters/"
29 | },
30 | "start": {
31 | "address": "127.0.0.1",
32 | "port": 8000
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigMap.mo:
--------------------------------------------------------------------------------
1 | import T "Types";
2 | import SegKey "SegKey";
3 | import Seg "Segment";
4 | //import Debug "DebugOff";
5 | import Debug "mo:base/Debug";
6 | import List "mo:base/List";
7 | import P "mo:base/Prelude";
8 |
9 | module {
10 |
11 | public class BigMap(
12 | numSegments : Nat
13 | ) = Self {
14 |
15 | type Segments = Seg.Segments;
16 | type Segment = Seg.Segment;
17 |
18 | type SegmentCanister = T.SegmentCanister;
19 | type CanisterInfo = T.CanisterInfo;
20 | type Canisters = List.List;
21 |
22 | private var segments : Segments =
23 | Seg.uniformSegments(numSegments);
24 |
25 | private var canisters : Canisters = null;
26 |
27 | // null means all segment canisters "fully initialized"
28 | public func initNext(sc : SegmentCanister) : ?(() -> async ()) {
29 | switch segments {
30 | case null null;
31 | case (?(segment, rest)) {
32 | segments := rest;
33 | canisters := ?((sc, segment.interval()), canisters);
34 | let init = #empty(segment.id, segment.interval());
35 | let doit = ?(func () : async () = async { await sc.init(init) });
36 | doit
37 | };
38 | }
39 | };
40 |
41 | public func getReady() : [CanisterInfo] {
42 | List.toArray(canisters)
43 | };
44 |
45 | public func getCanister(k : T.SegKey) : SegmentCanister {
46 | getCanisterInfo(k).0
47 | };
48 |
49 | public func getInterval(k : T.SegKey) : T.Interval {
50 | getCanisterInfo(k).1
51 | };
52 |
53 | public func getCanisterInfo(k : T.SegKey) : CanisterInfo {
54 | func getRec(s : Canisters) : CanisterInfo {
55 | switch s {
56 | case (?((sc, interval), s)) {
57 | if (SegKey.intervalContains(interval, k)) {
58 | (sc, interval)
59 | } else {
60 | getRec(s)
61 | }
62 | };
63 | case null {
64 | Debug.print ("Invariant error: key not contained in any segment"
65 | # (debug_show k));
66 | P.unreachable()
67 | };
68 | }
69 | };
70 | assert (isReady());
71 | getRec(canisters)
72 | };
73 |
74 | public func isReady() : Bool {
75 | switch segments {
76 | case null true;
77 | case _ false;
78 | }
79 | };
80 | };
81 | }
82 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigTest/Batch.mo:
--------------------------------------------------------------------------------
1 | import Eval "Eval";
2 | import Types "Types";
3 | import Q "mo:base/Deque";
4 | import List "mo:base/List";
5 |
6 | //import Debug "../DebugOff";
7 | import Debug "mo:base/Debug";
8 |
9 | module {
10 | public class Batch() {
11 |
12 | public type State = Types.State;
13 |
14 | var states : Q.Deque = (null, null);
15 |
16 | public func push(e: Types.Exp) : () {
17 | let st = Types.Init.empState(?e);
18 | states := Q.pushBack(states, st);
19 | };
20 |
21 | public func peek() : ?Types.DebugInfo {
22 | let s : ?State = Q.peekFront(states);
23 | switch s {
24 | case null null;
25 | case (?s) ?(s.stack, s.env, s.exp);
26 | }
27 | };
28 |
29 | public func saveResult(res: Types.Res) {
30 | let state : State =
31 | switch (Q.peekFront(states)) {
32 | case null { assert false; loop { } };
33 | case (?s) { s };
34 | };
35 | switch (res, state.exp) {
36 | case (#ok(v), null) {
37 | state.exp := ?#value(v)
38 | };
39 | case (#err(e), _) {
40 | assert false; loop { }
41 | };
42 | case (_, ?exp) {
43 | assert false; loop { }
44 | }
45 | };
46 | };
47 |
48 | public func nextCallRequest() : ?Types.CallReq {
49 | loop {
50 | let state : State =
51 | switch (Q.peekFront(states)) {
52 | case null { return null }; // end loop
53 | case (?s) { s };
54 | };
55 | Debug.print ("BigTest.Batch.nextCallRequest - state.stack = " # (debug_show state.stack));
56 | Debug.print ("BigTest.Batch.nextCallRequest - state.env = " # (debug_show state.env));
57 | Debug.print ("BigTest.Batch.nextCallRequest - state.exp = " # (debug_show state.exp));
58 | Debug.print ("BigTest.Batch.nextCallRequest - begin evaluation ...");
59 | let r = Eval.evalState(state);
60 | Debug.print ("BigTest.Batch.nextCallRequest - end evaluation.");
61 | Debug.print ("BigTest.Batch.nextCallRequest - result=" # (debug_show r));
62 | switch r {
63 | case (#ok(v)) {
64 |
65 | Debug.print ("Batch.nextCallRequest - postEval - result=" # (debug_show r));
66 | states := Q.popFront(states);
67 | };
68 | case (#err(#callRequest(stack, call))) {
69 | state.stack := stack;
70 | state.env := Types.Init.empEnv();
71 | state.exp := null;
72 | return ?call // end loop
73 | };
74 | case (#err(e)) {
75 | // to do -- report errror
76 | // continue?
77 | assert false; loop { }
78 | }
79 | };
80 | };
81 | }
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigTest/Call.mo:
--------------------------------------------------------------------------------
1 | import BigMap "canister:BigMap";
2 | import Prim "mo:prim";
3 |
4 | //import Debug "../DebugOff";
5 | import Debug "mo:base/Debug";
6 |
7 | import Types "Types";
8 |
9 | /** isolates logic specific to calling BigMap's service interface
10 | from within the (more general) DSL expression language. */
11 | module {
12 |
13 | public func awaitt(c: Types.CallReq) : () -> async Types.Res {
14 | func () : async Types.Res = async {
15 | Debug.print ("BigTest.Call.awaitt " # (debug_show c));
16 | switch c {
17 | case (#put(k, v)) {
18 | await BigMap.put(k, v);
19 | #ok(#unit)
20 | };
21 | case (#get(k)) {
22 | let res = await BigMap.get(k);
23 | switch res {
24 | case null { #ok(#nulll) };
25 | case (?r) { #ok(#opt(fromNat8s(r))) };
26 | }
27 | };
28 | }
29 | }
30 | };
31 |
32 | // convert DSL-level arguments into Candid-level arguments
33 | public func callRequest(c: Types.CallExp) : Types.CallReq {
34 | switch c {
35 | case (#put(#value(k), #value(v))) {
36 | let wsk = intoNat8s(k);
37 | let wsv = intoNat8s(v);
38 | #put(wsk, wsv)
39 | };
40 | case (#get(#value(k))) {
41 | let wsk = intoNat8s(k);
42 | #get(wsk)
43 | };
44 | case _ {
45 | assert false; loop { }
46 | }
47 | }
48 | };
49 |
50 | public func intoNat8s(v:Types.Val) : [Nat8] {
51 | switch v {
52 | case (#nat(n)) { [Prim.natToNat8(n)] };
53 | case (_) {
54 | // todo -- handle more cases
55 | assert false; loop { }
56 | };
57 | }
58 | };
59 |
60 | public func fromNat8s(ws:[Nat8]) : Types.Val {
61 | assert (ws.size() == 1);
62 | #nat(Prim.nat8ToNat(ws[0]))
63 | };
64 |
65 | }
66 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigTest/Eval.mo:
--------------------------------------------------------------------------------
1 | import Iter "mo:base/Iter";
2 | import Buf "mo:base/Buf";
3 | import List "mo:base/List";
4 |
5 | import Types "Types";
6 | import Call "Call";
7 |
8 | //import Debug "../DebugOff";
9 | import Debug "mo:base/Debug";
10 |
11 | import Log "mo:base/Debug";
12 |
13 | module {
14 |
15 | public type State = Types.State;
16 | public type Store = Types.Store;
17 | public type Stack = Types.Stack;
18 | public type Env = Types.Env;
19 | public type Cont = Types.Cont;
20 | public type Frame = Types.Frame;
21 | public type Exp = Types.Exp;
22 | public type Res = Types.Res;
23 | public type Val = Types.Val;
24 | public type Decls = Types.Decls;
25 |
26 | // evaluate all stack frames, or until we reach a call (then suspend), or an error
27 | public func evalState(state: State) : Res {
28 | switch (state.exp) {
29 | case null { assert false; loop {}};
30 | case (?exp) {
31 | switch (eval(state.store, state.env, exp)) {
32 | case (#ok(v)) { evalStack(state.store, state.stack, v) };
33 | case (#err(#callRequest(stack, call))) {
34 | let s = List.append(List.reverse(stack), state.stack);
35 | #err(#callRequest(s, call))
36 | };
37 | case (#err(e)) { #err(e) };
38 | }
39 | };
40 | }
41 | };
42 |
43 | // evaluate all stack frames, or until we reach a call (then suspend), or an error
44 | public func evalStack(store: Store, stack:Stack, v: Val) : Res {
45 | switch stack {
46 | case null { #ok(v) };
47 | case (?(frame, stack)) {
48 | switch frame {
49 | case (env, (#labell(name, desc))) {
50 | Log.print ("BigTest.Eval.evalStack - Successful test, returning value " # (debug_show v));
51 | Log.print ("BigTest.Eval.evalStack - name = " # name);
52 | Log.print ("BigTest.Eval.evalStack - desc = " # (debug_show desc));
53 | evalStack(store, stack, v)
54 | };
55 | case (env, (#block(x, decls))) {
56 | Debug.print ("BigTest.Eval.evalStack - " # x # " := " # (debug_show v));
57 | let env2 = Types.Env.update(env, x, v);
58 | switch (evalBlock(store, env2, decls)) {
59 | case (#err(#callRequest(stack2, call))) {
60 | #err(#callRequest(List.append(List.reverse(stack2), stack), call))
61 | };
62 | case (#err(e)) #err(e);
63 | case (#ok(#unit)) evalStack(store, stack, #unit);
64 | case _ { assert false; loop { }};
65 | }
66 | };
67 | case (env, (#iterate(b, i, x, body))) {
68 | switch (evalIterate(store, env, b, i, x, body)) {
69 | case (#err(#callRequest(stack2, call))) {
70 | #err(#callRequest(List.append(List.reverse(stack2), stack), call))
71 | };
72 | case (#err(e)) #err(e);
73 | case (#ok(#unit)) evalStack(store, stack, #unit);
74 | case _ { assert false; loop { }};
75 | }
76 | };
77 | case _ {
78 | // to do -- finish missing cases
79 | assert false; loop { }
80 | };
81 | }
82 | };
83 | }
84 | };
85 |
86 | // evaluate expression, or until we reach a call (then suspend), or an error
87 | public func eval(store: Store, env: Env, exp: Exp) : Res {
88 | switch exp {
89 | case (#value v) { #ok(v) };
90 | case (#opt e) {
91 | switch (eval(store, env, e)) {
92 | case (#ok(v)) { #ok(#opt(v)) };
93 | case (#err(e)) { #err(e) };
94 | }
95 | };
96 | case (#call(#put(e1, e2))) {
97 | let r1 = eval(store, env, e1);
98 | let r2 = eval(store, env, e2);
99 | switch (r1, r2) {
100 | case (#ok(v1), #ok(v2)) {
101 | #err(
102 | #callRequest(
103 | Types.Init.empStack(),
104 | Call.callRequest(
105 | #put(#value(v1),
106 | #value(v2)))))
107 | };
108 | case (#err(e1), _) #err(e1);
109 | case (_, #err(e2)) #err(e2);
110 | }
111 | };
112 | case (#call(#get(e))) {
113 | let r = eval(store, env, e);
114 | switch r {
115 | case (#ok(v)) {
116 | #err(
117 | #callRequest(
118 | Types.Init.empStack(),
119 | Call.callRequest(
120 | #get(#value(v))
121 | )))
122 | };
123 | case (#err(e)) #err(e);
124 | }
125 | };
126 | case (#varr x) {
127 | switch (Types.Env.find(env, x)) {
128 | case null #err(#unboundVariable(env, x));
129 | case (?v) { #ok(v) };
130 | }
131 | };
132 | case (#iterate(buf, x, body)) {
133 | switch (eval(store, env, buf)) {
134 | case (#ok(#buf(b))) {
135 | evalIterate(store, env, b, 0, x, body)
136 | };
137 | case (#ok(v)) #err(#iterateNonBuffer(v));
138 | case (#err(e)) #err(e);
139 | };
140 | };
141 | case (#labell(name, desc, e)) {
142 | Log.print "BigTest.Eval.evalExp - Begin labeled test:";
143 | Log.print ("BigTest.Eval.evalExp - name = " # name);
144 | Log.print ("BigTest.Eval.evalExp - desc = " # (debug_show desc));
145 | switch (eval(store, env, e)) {
146 | case (#ok(v)) { #ok(v) };
147 | case (#err(#callRequest(stack, call))) {
148 | Log.print (
149 | "BigTest.Eval.evalExp - Interrupting test " # name #
150 | " for call request " # (debug_show call));
151 | let cont : Cont = #labell(name, desc);
152 | let frame : Frame = (env, cont);
153 | #err(#callRequest(?(frame, stack), call))
154 | };
155 | case (#err(e)) { #err(e) };
156 | }
157 | };
158 | case (#arms(es)) {
159 | // to do -- sort of like iterate
160 | assert false; loop { }
161 | };
162 | case (#buf(es)) {
163 | let buf = Buf.Buf(0);
164 | for (e in es.vals()) {
165 | switch (eval(store, env, e)) {
166 | case (#err(e)) return #err(e);
167 | case (#ok(v)) buf.add(v);
168 | }
169 | };
170 | let b = store.bufs.size();
171 | store.bufs.add(buf);
172 | #ok(#buf(b))
173 | };
174 | case (#assertt(e)) {
175 | switch (eval(store, env, e)) {
176 | case (#ok(#bool(b))) {
177 | // in verbose mode, print all true assertions?
178 | if b { #ok(#unit) } else {
179 | // to do -- print error in log, but continue?
180 | assert false; loop { }
181 | }
182 | };
183 | case (#ok(v)) #err(#assertNonBool(env, e, v));
184 | case (#err(e)) #err(e);
185 | }
186 | };
187 | case (#equiv(e1, e2)) {
188 | let r1 = eval(store, env, e1);
189 | let r2 = eval(store, env, e2);
190 | switch (r1, r2) {
191 | case (#ok(v1), #ok(v2)) {
192 | if (Types.Val.equiv(store, v1, v2))
193 | #ok(#bool(true))
194 | else
195 | #ok(#bool(false))
196 | };
197 | case (#err(e1), _) #err(e1);
198 | case (_, #err(e2)) #err(e2);
199 | }
200 | };
201 | case (#equal(e1, e2)) {
202 | let r1 = eval(store, env, e1);
203 | let r2 = eval(store, env, e2);
204 | switch (r1, r2) {
205 | case (#ok(v1), #ok(v2)) {
206 | if (Types.Val.equal(v1, v2))
207 | #ok(#bool(true))
208 | else
209 | #ok(#bool(false))
210 | };
211 | case (#err(e1), _) #err(e1);
212 | case (_, #err(e2)) #err(e2);
213 | }
214 | };
215 | case (#block decls) {
216 | evalBlock(store, env, List.fromArray(decls))
217 | };
218 | case (#range (n, m)) {
219 | let r = Iter.range(n, m);
220 | let buf = Buf.Buf(0);
221 | for (i in r) {
222 | buf.add(#nat(i))
223 | };
224 | let b = store.bufs.size();
225 | store.bufs.add(buf);
226 | #ok(#buf(b))
227 | };
228 | }
229 | };
230 |
231 | public func evalIterate(store:Store, env:Env, b:Nat, pos:Nat, x:Text, body:Exp) : Res {
232 | let buf = store.bufs.get(b);
233 | Debug.print ("BigTest.Eval.evalIterate begin " # (debug_show (b, pos, x, body)));
234 | for (i in Iter.range(pos, buf.size() - 1)) {
235 | let v = buf.get(i);
236 | let env2 = Types.Env.update(env, x, v);
237 | switch (eval(store, env2, body)) {
238 | case (#err(#callRequest(stack, call))) {
239 | if (i < buf.size() - 1) {
240 | Debug.print "BigTest.Eval.evalIterate interrupted, saving unfinished iterations";
241 | let cont = #iterate(b, i + 1, x, body);
242 | let s = ?((env2, cont), stack);
243 | return #err(#callRequest(s, call))
244 | } else {
245 | Debug.print "BigTest.Eval.evalIterate interrupted on last iteration";
246 | return #err(#callRequest(stack, call))
247 | }
248 | };
249 | case (#err(e)) { return #err(e) };
250 | case (#ok(_v)) { };
251 | }
252 | };
253 | Debug.print "BigTest.Eval.evalIterate end";
254 | #ok(#unit)
255 | };
256 |
257 | public func evalBlock(store: Store, env: Env, decls: Decls) : Res {
258 | var env2 = env;
259 | var decls2 = decls;
260 | loop {
261 | switch decls2 {
262 | case null { return #ok(#unit) };
263 | case (?((x, e), rest)) {
264 | decls2 := rest;
265 | switch (eval(store, env2, e)) {
266 | case (#err(#callRequest(stack, call))) {
267 | // save our place, for later:
268 | let cont = #block(x, rest);
269 | let frame = (env2, cont);
270 | return #err(#callRequest(?(frame, stack), call))
271 | };
272 | case (#err(e)) {
273 | return #err(e)
274 | };
275 | case (#ok(v)) {
276 | Debug.print ("BigTest.Eval.evalBlock - " # x # " := " # (debug_show v));
277 | env2 := Types.Env.update(env2, x, v)
278 | };
279 | };
280 | };
281 | }
282 | };
283 | #ok(#unit)
284 | };
285 |
286 | } // module
287 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigTest/README.md:
--------------------------------------------------------------------------------
1 | # BigTest
2 |
3 | BigTest expresses and performs _very long-running_ batches of tests that
4 | exercise services on the Internet Computer.
5 |
6 | To use BigTest, the test author expresses each test script as a
7 | program written in a domain specific language (DSL).
8 |
9 | For Motoko programmers, BigTest provides a `Batch` class to build
10 | flexible test actors that test a specific service extensively, but
11 | without running out of gas, or out of time.
12 |
13 | BigTest implements this DSL in Motoko using standard PL implementation
14 | techniques that permit the DSL evaluation to implicitly **suspend**
15 | and **resume** around each remote Internet Computer service call.
16 |
17 | ## BigTest expression language
18 |
19 | The BigTest expression language is general enough for many kinds of tests:
20 |
21 | - **Standard PL features**: iteration, let-binding, primitive data
22 | - **Calls** to the service in question (e.g., `put` and `get` on `BigMap`)
23 | - **Buffers** and generators for test input and output
24 | - **Equivalence** checks for all test data output
25 | - **Assertions** whose failure signals a testing failure
26 | - **Labels** for human-readable reports, documentation and logs
27 |
28 | ## Why?
29 |
30 | Today, we can use shell scripts to invoke `dfx canister call` many times.
31 |
32 | **Q** _Why not use a shell script to create long-running tests?_
33 |
34 | This is certainly possible, and we do this today. Eventually,
35 | however, **programs that test canisters should _themselves_ be
36 | programmed as canisters**, not shell scripts running on traditional CI
37 | systems.
38 |
39 | To reach this goal, we need a new test-scripting language, as those
40 | shell scripts do not run on the Internet Computer, and probably will
41 | not soon. Unlike shell scripts, the BigTest language does not assume
42 | a filesystem, or any ambient UNIX system. Rather, it only
43 | assumes a Motoko runtime environment, provided by the IC itself.
44 |
45 | With BigTest, we can:
46 |
47 | - Host test logic on the Internet Computer itself,
48 | - Ask the test canister what script its running, what progress there is, etc
49 | - Reuse the same dead simple shell script (one single loop, with one call)
50 | - [(Eventually,) pre-check scripts for sanity, errors, etc](https://arxiv.org/pdf/1608.06012.pdf)
51 |
52 |
53 | **Q:** _Why not write testing canisters directly in Rust, or in Motoko?_
54 |
55 | This works fine for small tests that exercise the IC minimally, with a
56 | small number of service calls. Let's call these "small batch tests".
57 |
58 | But how do we relate these small batches, or systematically combine
59 | them into large ones?
60 |
61 | To ask it another way, how do we _systematically decompose a big test
62 | batch_ into many very small ones?
63 |
64 | To solve this problem, we need techniques that _stream_ the behavior
65 | of the batch test, and keep it "live" across many separate activating
66 | ingress calls. This way, a big batch can be decomposed (via
67 | streaming) into many small batches.
68 |
69 | This is precisely the problem solved by the BigTest DSL evaluator.
70 |
71 | Notably, it's also solved by languages that implement an `async`
72 | abstraction. More below.
73 |
74 |
75 | ### Aside: Static versus dynamic PL techniques
76 |
77 | Why even implement this new language if we already have Rust and Motoko?
78 |
79 | In terms of language design, Motoko programs and BigTest programs are
80 | attacking similar problems.
81 |
82 | For example, these two tests are very similar (intentionally):
83 |
84 | - Motoko-based `PutGet` test for BigMap ([`test/PutGet.mo`](https://github.com/dfinity/motoko-bigmap/blob/master/test/PutGet.mo))
85 | - `PutGet` as a BigTest test expression ([`test/BigTestPutGet.mo`](https://github.com/dfinity/motoko-bigmap/blob/master/test/BigTestPutGet.mo))
86 |
87 | In both settings, interacting with the Internet Computer interrupts
88 | ordinary control flow constructs, like simple loops, and the language
89 | uses techniques to hide this interruption from programmers, who do not
90 | wish to express it directly in their source programs. In sum, both
91 | languages express programs whose IC service calls require saving and
92 | restoring a surrounding calling context.
93 |
94 | Of course, the BigTest system is itself expressed as a Motoko program.
95 |
96 | Unlike _Motoko programs_, _BigTest programs are Motoko data_, and can
97 | be sent in a message or received as a response.
98 |
99 | Further, unlike a Rust or Motoko program, a BigTest program can be
100 | inspected and manipulated dynamically in a totally straightforward
101 | way, permitting tests to (potentially) be viewed, changed or extended
102 | while they are running.
103 |
104 | Stepping back, these benefits are merely those of dynamic PL
105 | techniques over static ones.
106 |
107 | BigTest would also benefit from additional (currently missing) static
108 | techniques, such as a type system for doing sanity checks.
109 |
110 | [Eventually, enough static checks would render BigTest more like Motoko
111 | and Rust, which is not the goal.](https://arxiv.org/pdf/1608.06012.pdf)
112 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/BigTest/Types.mo:
--------------------------------------------------------------------------------
1 | import List "mo:base/List";
2 | import AssocList "mo:base/AssocList";
3 | import Result "mo:base/Result";
4 | import Buf "mo:base/Buf";
5 | import Text "mo:base/Text";
6 | import Iter "mo:base/Iter";
7 |
8 | module {
9 |
10 | // calls to test: Specialized to BigMap actor (for now).
11 | public type CallExp = {
12 | #put: (Exp, Exp);
13 | #get: Exp;
14 | };
15 |
16 | // when well-formed, each call expression resolves to call request.
17 | public type CallReq = {
18 | #put: ([Nat8], [Nat8]);
19 | #get: [Nat8];
20 | };
21 |
22 | // ----- The rest of this module is generic, and not specific to BigMap's API.
23 |
24 | // express the "high-level" testing logic with a Motoko DSL;
25 | // expressions relate multiple Calls, and form tests around them.
26 | public type Exp = {
27 | #call: CallExp;
28 | #arms: [Exp]; // one assert(false) permitted per "arm"
29 | #labell: (Text, ?Text, Exp); // (label, description, body) for reports, docs, and log output
30 | #equal: (Exp, Exp); // considers buffer ids, and ignores content
31 | #equiv: (Exp, Exp); // ignores buffer ids, and considers content
32 | #assertt: Exp; // assert false stops execution in this "arm"
33 | #varr: Text; // resolve vars introduced by #block
34 | #buf: [Exp]; // allocate a new buffer
35 | #range: (Nat, Nat); // buffer all numbers in given range
36 | #iterate: (Exp, Text, Exp); // (buffer, var, body)
37 | #block: [Decl];
38 | #opt: Exp;
39 | #value: Val;
40 | };
41 |
42 | public type Decl = (Text, Exp);
43 |
44 | // lists are more inductive, and more useful for the stack rep
45 | public type Decls = List.List;
46 |
47 | // In Exp, we use Motoko keywords as variant labels by adding an extra "last letter", which avoids Motoko lexing/parsing issues (e.g., #labell). Rationale: Minimal effect on readability.
48 |
49 | // "high-level" values in/out of the calls to test, and as helper data
50 | public type Val = {
51 | #unit;
52 | #nulll;
53 | #bool: Bool;
54 | #nat: Nat;
55 | #text: Text; // remove? (currently unused)
56 | #buf: Nat;
57 | #opt: Val;
58 | };
59 |
60 | // Halt for call requests, assertion failures, other errors
61 | public type Halt = {
62 | #callRequest: (Stack, CallReq);
63 | #assertFalse: Exp;
64 | #unboundVariable: (Env, Text);
65 | #iterateNonBuffer: Val;
66 | #assertNonBool: (Env, Exp, Val);
67 | };
68 |
69 | public type Env = AssocList.AssocList;
70 |
71 | public type Res = Result.Result;
72 |
73 | public type Store = {
74 | bufs: Buf.Buf>;
75 | };
76 |
77 | // remaining work of an Exp to perform later, after a Call
78 | public type Cont = {
79 | #labell: (Text, ?Text);
80 | #block: (Text, List.List<(Text, Exp)>);
81 | #iterate: (Nat, Nat, Text, Exp); // (buf, key, var, body)
82 | #arms: [Exp];
83 | };
84 |
85 | public type Frame = (Env, Cont);
86 | public type Stack = List.List;
87 |
88 | // Info for inspecting expression evaluation via `peek`
89 | public type DebugInfo = (Stack, Env, ?Exp);
90 |
91 | public type State = {
92 | store: Store;
93 | var stack: Stack;
94 | var env: Env;
95 | var exp: ?Exp;
96 | };
97 |
98 | public module Init {
99 | public func empStore() : Store {
100 | {
101 | bufs = Buf.Buf>(0);
102 | }
103 | };
104 |
105 | public func empStack() : Stack {
106 | List.nil()
107 | };
108 |
109 | public func empEnv() : Env {
110 | List.nil<(Text, Val)>()
111 | };
112 |
113 | public func empState(_exp: ?Exp) : State {
114 | {
115 | store = empStore();
116 | var stack = empStack();
117 | var env = empEnv();
118 | var exp = _exp;
119 | }
120 | };
121 | };
122 |
123 | public module Env {
124 | public func update(env: Env, x:Text, v:Val) : Env {
125 | // remove shadowed variable, if any --- a simple form of GC in the DSL evaluation logic
126 | let (env2, _) = AssocList.replace(env, x, func (x:Text, y:Text) : Bool { x == y }, null);
127 | ?((x, v), env2)
128 | };
129 | public func find(env: Env, x:Text) : ?Val {
130 | AssocList.find(env, x, func (x:Text, y:Text) : Bool { x == y })
131 | };
132 | };
133 |
134 | public module Val {
135 | public func equiv(store:Store, v1: Val, v2: Val) : Bool {
136 | switch (v1, v2) {
137 | case (#bool(b1), #bool(b2)) b1 == b2;
138 | case (#nat(n1), #nat(n2)) n1 == n2;
139 | case (#text(t1), #text(t2)) t1 == t2;
140 | case (#opt(v1), #opt(v2)) equiv(store, v1, v2);
141 | case (#buf(b1), #buf(b2)) {
142 | if (b1 == b2) {
143 | true
144 | } else {
145 | let buf1 = store.bufs.get(b1);
146 | let buf2 = store.bufs.get(b2);
147 | if (buf1.size() != buf2.size())
148 | false
149 | else {
150 | if (buf1.size() == 0) { true } else {
151 | for (i in Iter.range(0, buf1.size() - 1)) {
152 | let v1 = buf1.get(i);
153 | let v2 = buf2.get(i);
154 | if (equiv(store, v1, v2)) {
155 | // continue
156 | } else {
157 | return false
158 | }
159 | };
160 | true
161 | }
162 | }
163 | }
164 | };
165 | case (_, _) false;
166 | }
167 | };
168 | public func equal(v1: Val, v2: Val) : Bool {
169 | switch (v1, v2) {
170 | case (#bool(b1), #bool(b2)) b1 == b2;
171 | case (#nat(n1), #nat(n2)) n1 == n2;
172 | case (#text(t1), #text(t2)) t1 == t2;
173 | case (#buf(b1), #buf(b2)) b1 == b2;
174 | case (#opt(v1), #opt(v2)) equal(v1, v2);
175 | case (_, _) false;
176 | };
177 | };
178 | };
179 | }
180 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/DebugOff.mo:
--------------------------------------------------------------------------------
1 | // include this module as Debug to switch off Debug.print
2 | module {
3 | public let print : Text -> () = func (x) { };
4 | }
5 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/Order.mo:
--------------------------------------------------------------------------------
1 | /// Order -- TEMP -- copied from base package.
2 |
3 | module {
4 |
5 | /// A type to represent an order.
6 | public type Order = {
7 | #less;
8 | #equal;
9 | #greater;
10 | };
11 |
12 | /// Check if an order is #less.
13 | public func isLess(order : Order) : Bool {
14 | switch order {
15 | case (#less) true;
16 | case _ false;
17 | };
18 | };
19 |
20 | /// Check if an order is #equal.
21 | public func isEqual(order : Order) : Bool {
22 | switch order {
23 | case (#equal) true;
24 | case _ false;
25 | };
26 | };
27 |
28 | /// Check if an order is #greater.
29 | public func isGreater(order : Order) : Bool {
30 | switch order {
31 | case (#greater) true;
32 | case _ false;
33 | };
34 | };
35 |
36 | };
37 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/SegKey.mo:
--------------------------------------------------------------------------------
1 | import T "Types";
2 | import Array "mo:base/Array";
3 | import P "mo:base/Prelude";
4 | import Order "Order"; // TEMP
5 | import SHA256 "mo:sha256/SHA256";
6 | import Prim "mo:prim";
7 | import Debug "DebugOff";
8 | //import Debug "mo:base/Debug";
9 | import Buf "mo:base/Buffer";
10 | import Iter "mo:base/Iter";
11 |
12 | // zero vs inf: the ring is initially one segment, allocated all of keyspace: [zero, inf).
13 | module {
14 | type SegKey = T.SegKey;
15 |
16 | public func ofKey(bytes : T.Key) : SegKey {
17 | let hashed = SHA256.sha256(bytes);
18 | ?hashed;
19 | };
20 |
21 | public func zero() : SegKey =
22 | ?Array.tabulate(32, func (_) = Prim.natToNat8(0));
23 |
24 | public func inf() : SegKey = null;
25 |
26 | public func equals(x : SegKey, y : SegKey ) : Bool {
27 | switch (compare(x, y)) {
28 | case (#equal) true;
29 | case _ false;
30 | }
31 | };
32 |
33 | public func hash(x : SegKey) : Nat32 {
34 | let blowup : Nat8 -> Nat32 = func (x) { Prim.natToNat32(Prim.nat8ToNat(x)) };
35 | switch x {
36 | case null { P.unreachable() };
37 | case (?x) {
38 | // to do -- use all bits in the final hash, not just the first ones
39 | blowup(x[0]) << 24 +
40 | blowup(x[1]) << 16 +
41 | blowup(x[2]) << 8 +
42 | blowup(x[3])
43 | }
44 | }
45 | };
46 |
47 | public func intervalContains(interval:(SegKey, SegKey), pt:SegKey) : Bool {
48 | switch (compare(interval.0, pt), compare(pt, interval.1)) {
49 | case (#less, #less) { true };
50 | case _ { false };
51 | }
52 | };
53 |
54 | // generate an array of segment keys, spaced uniformly in keyspace
55 | public func uniformSegments(n:Nat) : [SegKey] {
56 | assert (n < 255); // to do -- handle larger cases of n
57 | // 0000..00, 0100..00, 0200..00, ..., ff00..00
58 | let dist = Prim.natToNat8(255 / n);
59 | let keyData : [var Nat8] = Array.init(32, 0);
60 | let segKeys = Buf.Buffer(n);
61 | for (i in Iter.range(0, n - 1)) {
62 | segKeys.add(?Array.freeze(keyData));
63 | keyData[0] += dist;
64 | };
65 | segKeys.toArray()
66 | };
67 |
68 | public func compare(x : ?[Nat8], y : ?[Nat8]) : Order.Order {
69 | // null means 'infinity'
70 | Debug.print "SegKey SegKey";
71 | Debug.print ("SegKey compare bytes " # (debug_show (x, y)));
72 | switch (x, y) {
73 | case (null, null) { #equal };
74 | case (null, _) { #greater };
75 | case (_, null) { #less };
76 | case (?x, ?y) {
77 | Debug.print ("SegKey compare; lens " # (debug_show (x.size(), y.size())));
78 | assert(x.size() == 32);
79 | assert(y.size() == 32);
80 | for (i in x.keys()) {
81 | if (x[i] < y[i]) return #less
82 | else if (x[i] > y[i]) return #greater
83 | else { }
84 | };
85 | return #equal
86 | };
87 | }
88 | };
89 | }
90 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/Segment.mo:
--------------------------------------------------------------------------------
1 | import T "Types";
2 | import HashMap "mo:base/HashMap";
3 | import SegKey "SegKey";
4 | import Debug "DebugOff";
5 | //import Debug "mo:base/Debug";
6 | import P "mo:base/Prelude";
7 | import List "mo:base/List";
8 |
9 | module {
10 | public type Segments = List.List;
11 |
12 | public type Interval = T.Interval;
13 |
14 | public type Init = T.SegmentInit;
15 |
16 | public func initId(init:Init) : Nat {
17 | switch init {
18 | case (#singleton(id)) id;
19 | case (#empty(id, _)) id;
20 | case (#nonEmpty(id, _, _)) id;
21 | };
22 | };
23 |
24 | // generate a list of segments, spaced uniformly in keyspace
25 | public func uniformSegments(numSegments:Nat) : Segments {
26 | let segKeys = SegKey.uniformSegments(numSegments);
27 | var segments : Segments = null;
28 | for (i in segKeys.keys()) {
29 | let first = segKeys[i];
30 | let last =
31 | if (i == segKeys.size() - 1)
32 | SegKey.inf()
33 | else
34 | segKeys[i + 1];
35 | segments := ?(Segment(#empty(i, (first, last))), segments);
36 | };
37 | List.reverse(segments) /*List.reverse(segments)*/
38 | };
39 |
40 | public class Segment(init : Init) = Self {
41 |
42 | type SegMap = {
43 | interval : T.Interval;
44 | map : HashMap.HashMap;
45 | };
46 |
47 | public let id : Nat = initId(init);
48 |
49 | private var segMap : SegMap =
50 | switch init {
51 | case (#nonEmpty(_, interval_, data)) {
52 | let m = {
53 | interval = interval_;
54 | map = HashMap.HashMap(0, SegKey.equals, SegKey.hash);
55 | };
56 | for ((_, k, v) in data.vals()) {
57 | m.map.put(k, v)
58 | };
59 | m
60 | };
61 | case (#empty(_, interval_)) {
62 | // one of many initially-empty segments, with the given first/last keys
63 | {interval = interval_;
64 | map = HashMap.HashMap(0, SegKey.equals, SegKey.hash);}
65 | };
66 | case (#singleton _) {
67 | // initial segment has special first and last keys, and no content
68 | {interval = (SegKey.zero(), SegKey.inf());
69 | map = HashMap.HashMap(0, SegKey.equals, SegKey.hash);}
70 | };
71 | };
72 |
73 | // segment's local size; does not aggregate other segments' sizes.
74 | public func size() : Nat =
75 | segMap.map.size();
76 |
77 | public func intervalContains(k:T.SegKey) : Bool =
78 | SegKey.intervalContains(segMap.interval, k);
79 |
80 | public func interval() : Interval {
81 | segMap.interval
82 | };
83 |
84 | public func get(k:T.Key) : ?T.Val {
85 | getSegKey(SegKey.ofKey(k));
86 | };
87 |
88 | public func getSegKey(k:T.SegKey) : ?T.Val {
89 | assert (SegKey.intervalContains(segMap.interval, k));
90 | Debug.print "Segment getSegKey begin";
91 | Debug.print (" - id = " # (debug_show id));
92 | let v = segMap.map.get(k);
93 | Debug.print "Segment getSegKey end";
94 | v
95 | };
96 |
97 | public func put(k:T.Key, v:T.Val) {
98 | putSegKey(SegKey.ofKey(k), v);
99 | };
100 |
101 | public func putSegKey(k:T.SegKey, v:T.Val) {
102 | Debug.print "Segment put begin";
103 | Debug.print (" - id = " # (debug_show id));
104 | assert (SegKey.intervalContains(segMap.interval, k));
105 | segMap.map.put(k, v);
106 | Debug.print "Segment put end";
107 | };
108 | };
109 | }
110 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/src/Types.mo:
--------------------------------------------------------------------------------
1 | import HashMap "mo:base/HashMap";
2 |
3 | module {
4 | public type Key = [Nat8];
5 | public type Val = [Nat8];
6 |
7 | // SegKey type:
8 | // - compared to the (plaintext) Key type, these are hashed via the SHA256 package.
9 | // - the (unique) null key means "infinite hash value", always ending the ring.
10 | public type SegKey = ?[Nat8];
11 |
12 | // SegKey intervals: [first included, last excluded)
13 | public type Interval = (SegKey, SegKey);
14 |
15 | // segment initialization cases, as data:
16 | public type SegmentInit = {
17 | // this is the first, initially-empty segment of the BigMap.
18 | #singleton : Nat;
19 | // this is an initially-empty segment of a multi-segment pool.
20 | #empty : (Nat, Interval);
21 | // this is a forked segment of some other one; it has existing content.
22 | #nonEmpty : (Nat, Interval, [(Key, SegKey, Val)])
23 | };
24 |
25 | // Exposed to the Main index canister (see app/Main)
26 | public type SegmentCanister = actor {
27 | init : SegmentInit -> async ();
28 | put : (SegKey, Val) -> async ();
29 | get : query SegKey -> async ?Val;
30 | };
31 |
32 | // config/diagnostic info
33 | public type CanisterInfo = (SegmentCanister, Interval);
34 | }
35 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/test/BigTestPutGet.mo:
--------------------------------------------------------------------------------
1 | import Prim "mo:prim";
2 | import Buf "mo:base/Buffer";
3 | import BigMap "canister:BigMap";
4 | import Iter "mo:base/Iter";
5 | import Debug "mo:base/Debug";
6 |
7 | import TestBatch "mo:bigtest/Batch";
8 | import TestTypes "mo:bigtest/Types";
9 | import TestCall "mo:bigtest/service/BigMap";
10 |
11 | actor {
12 |
13 | // Test "PutGet" as a BigTest program (compare to test/PutGet.mo)
14 | func putGetTestExp(size : Nat) : TestTypes.Exp = {
15 | let s = (debug_show size);
16 | #labell(
17 | "PutGet",
18 | ?("range of (0, " # s # ") puts, followed by the same range of assert'ed gets"),
19 | #block(
20 | [
21 | ("buf", #range(0, size)),
22 | ("_", #iterate(#varr("buf"), "i",
23 | #call(#put(#varr("i"), #varr("i")))
24 | )),
25 | ("_", #iterate(#varr("buf"), "i",
26 | #block(
27 | [
28 | ("x", #call(#get(#varr("i")))),
29 | ("_", #assertt(#equal(#opt(#varr("i")), #varr("x"))))
30 | ])
31 | )),
32 | ]))
33 | };
34 |
35 | // ----- Boiler-plate testing code below
36 |
37 | // create a big batch of smaller batches
38 | func newBatches(sizes : [Nat]) : TestBatch.Batch {
39 | let batch = TestBatch.Batch();
40 | for (c in sizes.vals()) {
41 | batch.push(putGetTestExp(c));
42 | };
43 | batch
44 | };
45 |
46 | // some defaults
47 | var batch : TestBatch.Batch = newBatches([0, 1, 2, 4, 8, 128]);
48 |
49 | public func reset(sizes : [Nat]) : async () {
50 | batch := newBatches(sizes)
51 | };
52 |
53 | public func extend(sizes: [Nat]) {
54 | for (s in sizes.vals()) {
55 | batch.push(putGetTestExp(s));
56 | };
57 | };
58 |
59 | public query func peek() : async ?TestTypes.DebugInfo {
60 | batch.peek()
61 | };
62 |
63 | // false => no next call, otherwise returns true
64 | public func doNextCall() : async Bool {
65 | switch (batch.nextCallRequest()) {
66 | case null { false };
67 | case (?c) {
68 | Debug.print "doNextCall begin";
69 | Debug.print ("doNextCall - call = " # (debug_show c));
70 | Debug.print "doNextCall - awaiting result...";
71 | let r = await (TestCall.awaitt(c)());
72 | Debug.print ("doNextCall - result = " # (debug_show r));
73 | Debug.print "doNextCall - saving result...";
74 | batch.saveResult(r);
75 | Debug.print "doNextCall end";
76 | true
77 | }
78 | }
79 | };
80 |
81 | // Bonus:
82 | // For testing in an open, interactive world:
83 | // Use this to add other tests not-yet expressed above!
84 | public func pushExp(e: TestTypes.Exp) {
85 | batch.push(e)
86 | };
87 |
88 | }
89 |
--------------------------------------------------------------------------------
/vendor/motoko-bigmap/test/PutGet.mo:
--------------------------------------------------------------------------------
1 | import Prim "mo:prim";
2 | import Buf "mo:base/Buf";
3 | import BigMap "canister:BigMap";
4 | import Iter "mo:base/Iter";
5 | import Debug "mo:base/Debug";
6 |
7 | actor {
8 | public func go(count : Nat) : async () {
9 |
10 | // Cannot use Word8 because Candid type mismatch
11 | let buf = Buf.Buf<[Nat8]>(count);
12 | for (i in Iter.range(0, count - 1)) {
13 | let x : [Nat8] = [Prim.natToNat8(i)];
14 | buf.add(x)
15 | };
16 |
17 | Debug.print "Test: Doing puts...";
18 | for (i in buf.vals()) {
19 | Debug.print ("Test put key " # (debug_show i) # "...");
20 | await BigMap.put(i, i);
21 | Debug.print ("Test put key " # (debug_show i) # ": Done.")
22 | };
23 |
24 | Debug.print "Test: Doing gets...";
25 | for (i in buf.vals()) {
26 | Debug.print ("Test get key " # (debug_show i) # "...");
27 | let j = await BigMap.get(i);
28 | switch j {
29 | case null { assert false };
30 | case (?j) {
31 | assert (j[0] == i[0]);
32 | };
33 | };
34 | Debug.print ("Test put key " # (debug_show i) # ": Done.");
35 | };
36 |
37 | Debug.print "Test: Success."
38 | };
39 | }
40 |
--------------------------------------------------------------------------------
/vessel.dhall:
--------------------------------------------------------------------------------
1 | {
2 | dependencies = [ "base", "sha256" ],
3 | compiler = None Text
4 | }
5 |
--------------------------------------------------------------------------------