├── .github ├── ISSUE_TEMPLATE │ ├── JSON TEMPLATE │ └── large-dataset-datacap-application.yaml ├── scripts │ └── close_inactive │ │ ├── close_stale_issues.mjs │ │ ├── notify_stale_issues.mjs │ │ └── utils.mjs └── workflows │ └── close-inactive.yml └── README.md /.github/ISSUE_TEMPLATE/JSON TEMPLATE: -------------------------------------------------------------------------------- 1 | applicationVersion: 1, 2 | dataCapApplicationType: da | ldn-v3 | e-fil, 3 | projectId: 1, 4 | "datacapApplicant": "", 5 | "applicationInfo": { 6 | "coreInformation": { 7 | "Data Owner Name": "", 8 | "Data Owner Country/Region": "", 9 | "Data Owner Industry": "", 10 | "Website": 0, 11 | "Social Media": { 12 | "handle": "", 13 | "type": "Slack | Twitter | Facebook" 14 | }, 15 | "What is your role related to the dataset": "", 16 | "Total amount of DataCap being requested": { 17 | "amount": 0, 18 | "unit": "GiB | TiB | PiB" 19 | }, 20 | "Expected size of single dataset (one copy)": { 21 | "amount": "", 22 | "unit": "GiB | TiB | PiB" 23 | }, 24 | "Number of replicas to store (minimum 4)": 0, 25 | "Weekly allocation of DataCap requested": { 26 | "amount": "", 27 | "unit": "GiB | TiB | PiB" 28 | }, 29 | ""On-chain address (Note that you will not be able to change this in the future and that you should have a unique address for each LDN application)": "", 30 | "Data Type of Application": "Slingshot | Public, Open Dataset (Research/Non-Profit) | Public, Open Commercial/Enterprise | Private Commercial/Enterprise | Private Non-Profit / Social impact", 31 | "Custom multisig": "" 32 | }, 33 | "projectDetails": { 34 | "Share a brief history of your project and organization": "", 35 | "Is this project associated with other projects/ecosystem stakeholders?": true, 36 | "If answered yes, what are the other projects/ecosystem stakeholders": "" 37 | }, 38 | "useCaseDetails": { 39 | "Describe the data being stored onto Filecoin": "", 40 | "Where was the data currently stored in this dataset sourced from": "AWS Cloud | Google Cloud | Azure Cloud | My Own Storage Infra | other", 41 | "If you answered 'Other' in the previous question, enter the details here": "", 42 | "How do you plan to prepare the dataset": "IPFS | Lotus | Singularity | Graphsplit | other/custom tool", 43 | "If you answered 'other/custom tool' in the previous question, enter the details here": "", 44 | "Please share a sample of the data (a link to a file, an image, a table, etc., are good ways to do this.)": "", 45 | "Confirm that this is a public dataset that can be retrieved by anyone on the network (i.e., no specific permissions or access rights are required to view the data)": true, 46 | "If you chose not to confirm, what was the reason": "", 47 | "What is the expected retrieval frequency for this data": "Daily | Weekly | Monthly | Yearly | Sporadic | Never", 48 | "For how long do you plan to keep this dataset stored on Filecoin": "Less than a year | 1 to 1.5 years | 1.5 to 2 years | 2 to 3 years | More than 3 years | Permanently" 49 | } 50 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/large-dataset-datacap-application.yaml: -------------------------------------------------------------------------------- 1 | name: Large Dataset Datacap Application 2 | description: Clients should use this application form to request a DataCap allocation via a LDN for a dataset 3 | title: "[DataCap Application] - " 4 | labels: 'application, Phase: Diligence' 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | # Large Dataset Notary Application 10 | To apply for DataCap to onboard your dataset to Filecoin, please fill out the following form 11 | - type: markdown 12 | attributes: 13 | value: | 14 | # Core Information 15 | - type: input 16 | attributes: 17 | label: Data Owner Name 18 | validations: 19 | required: true 20 | - type: dropdown 21 | attributes: 22 | label: What is your role related to the dataset 23 | options: 24 | - Dataset Owner 25 | - Data Preparer 26 | - Storage provider filling out application on behalf of the data owner 27 | - Data onramp entity that provides data onboarding services to multiple clients 28 | - Other 29 | validations: 30 | required: true 31 | - type: dropdown 32 | attributes: 33 | label: Data Owner Country/Region 34 | options: 35 | - Afghanistan 36 | - Åland Islands 37 | - Albania 38 | - Algeria 39 | - American Samoa 40 | - Andorra 41 | - Angola 42 | - Anguilla 43 | - Antarctica 44 | - Antigua and Barbuda 45 | - Argentina 46 | - Armenia 47 | - Aruba 48 | - Australia 49 | - Austria 50 | - Azerbaijan 51 | - Bahamas 52 | - Bahrain 53 | - Bangladesh 54 | - Barbados 55 | - Belarus 56 | - Belgium 57 | - Belize 58 | - Benin 59 | - Bermuda 60 | - Bhutan 61 | - Bolivia 62 | - "Bonaire, Sint Eustatius and Saba" 63 | - Bosnia and Herzegovina 64 | - Botswana 65 | - Bouvet Island 66 | - Brazil 67 | - British Indian Ocean Territory 68 | - Brunei Darussalam 69 | - Bulgaria 70 | - Burkina Faso 71 | - Burundi 72 | - Cambodia 73 | - Cameroon 74 | - Canada 75 | - Cape Verde 76 | - Cayman Islands 77 | - Central African Republic 78 | - Chad 79 | - Chile 80 | - China 81 | - Christmas Island 82 | - Cocos (Keeling) Islands 83 | - Colombia 84 | - Comoros 85 | - Congo 86 | - "Congo, the Democratic Republic of the" 87 | - Cook Islands 88 | - Costa Rica 89 | - Côte d'Ivoire 90 | - Croatia 91 | - Cuba 92 | - Curaçao 93 | - Cyprus 94 | - Czech Republic 95 | - Denmark 96 | - Djibouti 97 | - Dominica 98 | - Dominican Republic 99 | - Ecuador 100 | - Egypt 101 | - El Salvador 102 | - Equatorial Guinea 103 | - Eritrea 104 | - Estonia 105 | - Ethiopia 106 | - Falkland Islands (Malvinas) 107 | - Faroe Islands 108 | - Fiji 109 | - Finland 110 | - France 111 | - French Guiana 112 | - French Polynesia 113 | - French Southern Territories 114 | - Gabon 115 | - Gambia 116 | - Georgia 117 | - Germany 118 | - Ghana 119 | - Gibraltar 120 | - Greece 121 | - Greenland 122 | - Grenada 123 | - Guadeloupe 124 | - Guam 125 | - Guatemala 126 | - Guernsey 127 | - Guinea 128 | - Guinea-Bissau 129 | - Guyana 130 | - Haiti 131 | - Heard Island and McDonald Islands 132 | - Holy See (Vatican City State) 133 | - Honduras 134 | - Hong Kong 135 | - Hungary 136 | - Iceland 137 | - India 138 | - Indonesia 139 | - "Iran, Islamic Republic of" 140 | - Iraq 141 | - Ireland 142 | - Isle of Man 143 | - Israel 144 | - Italy 145 | - Jamaica 146 | - Japan 147 | - Jersey 148 | - Jordan 149 | - Kazakhstan 150 | - Kenya 151 | - Kiribati 152 | - "Korea, Democratic People's Republic of" 153 | - "Korea, Republic of" 154 | - Kuwait 155 | - Kyrgyzstan 156 | - Lao People's Democratic Republic 157 | - Latvia 158 | - Lebanon 159 | - Lesotho 160 | - Liberia 161 | - Libya 162 | - Liechtenstein 163 | - Lithuania 164 | - Luxembourg 165 | - Macao 166 | - "Macedonia, the Former Yugoslav Republic of" 167 | - Madagascar 168 | - Malawi 169 | - Malaysia 170 | - Maldives 171 | - Mali 172 | - Malta 173 | - Marshall Islands 174 | - Martinique 175 | - Mauritania 176 | - Mauritius 177 | - Mayotte 178 | - Mexico 179 | - "Micronesia, Federated States of" 180 | - "Moldova, Republic of" 181 | - Monaco 182 | - Mongolia 183 | - Montenegro 184 | - Montserrat 185 | - Morocco 186 | - Mozambique 187 | - Myanmar 188 | - Namibia 189 | - Nauru 190 | - Nepal 191 | - Netherlands 192 | - New Caledonia 193 | - New Zealand 194 | - Nicaragua 195 | - Niger 196 | - Nigeria 197 | - Niue 198 | - Norfolk Island 199 | - Northern Mariana Islands 200 | - Norway 201 | - Oman 202 | - Pakistan 203 | - Palau 204 | - "Palestine, State of" 205 | - Panama 206 | - Papua New Guinea 207 | - Paraguay 208 | - Peru 209 | - Philippines 210 | - Pitcairn 211 | - Poland 212 | - Portugal 213 | - Puerto Rico 214 | - Qatar 215 | - Réunion 216 | - Romania 217 | - Russian Federation 218 | - Rwanda 219 | - Saint Barthélemy 220 | - "Saint Helena, Ascension and Tristan da Cunha" 221 | - Saint Kitts and Nevis 222 | - Saint Lucia 223 | - Saint Martin (French part) 224 | - Saint Pierre and Miquelon 225 | - Saint Vincent and the Grenadines 226 | - Samoa 227 | - San Marino 228 | - Sao Tome and Principe 229 | - Saudi Arabia 230 | - Senegal 231 | - Serbia 232 | - Seychelles 233 | - Sierra Leone 234 | - Singapore 235 | - Sint Maarten (Dutch part) 236 | - Slovakia 237 | - Slovenia 238 | - Solomon Islands 239 | - Somalia 240 | - South Africa 241 | - South Georgia and the South Sandwich Islands 242 | - South Sudan 243 | - Spain 244 | - Sri Lanka 245 | - Sudan 246 | - Suriname 247 | - Svalbard and Jan Mayen 248 | - Swaziland 249 | - Sweden 250 | - Switzerland 251 | - Syrian Arab Republic 252 | - Taiwan 253 | - Tajikistan 254 | - "Tanzania, United Republic of" 255 | - Thailand 256 | - Timor-Leste 257 | - Togo 258 | - Tokelau 259 | - Tonga 260 | - Trinidad and Tobago 261 | - Tunisia 262 | - Turkey 263 | - Turkmenistan 264 | - Turks and Caicos Islands 265 | - Tuvalu 266 | - Uganda 267 | - Ukraine 268 | - United Arab Emirates 269 | - United Kingdom 270 | - United States 271 | - United States Minor Outlying Islands 272 | - Uruguay 273 | - Uzbekistan 274 | - Vanuatu 275 | - "Venezuela, Bolivarian Republic of" 276 | - Viet Nam 277 | - "Virgin Islands, British" 278 | - "Virgin Islands, U.S." 279 | - Wallis and Futuna 280 | - Western Sahara 281 | - Yemen 282 | - Zambia 283 | - Zimbabwe 284 | validations: 285 | required: true 286 | - type: dropdown 287 | attributes: 288 | label: Data Owner Industry 289 | options: 290 | - Life Science / Healthcare 291 | - Arts & Recreation 292 | - Education & Training 293 | - Environment 294 | - Web3 / Crypto 295 | - IT & Technology Services 296 | - Professional Services (Legal, Consulting, Advising) 297 | - Government 298 | - Not-for-Profit 299 | - Financial Services 300 | - Utilities 301 | - Resources, Agriculture & Fisheries 302 | - Construction, Property & Real Estate 303 | - Information, Media & Telecommunications 304 | - Transport Services 305 | - Other 306 | validations: 307 | required: true 308 | - type: input 309 | attributes: 310 | label: Website 311 | validations: 312 | required: true 313 | - type: textarea 314 | attributes: 315 | label: Social Media 316 | render: text 317 | validations: 318 | required: true 319 | - type: dropdown 320 | attributes: 321 | label: Total amount of DataCap being requested 322 | options: 323 | - 100TiB 324 | - 200TiB 325 | - 300TiB 326 | - 400TiB 327 | - 500TiB 328 | - 1PiB 329 | - 2PiB 330 | - 3PiB 331 | - 4PiB 332 | - 5PiB 333 | - 6PiB 334 | - 7PiB 335 | - 8PiB 336 | - 9PiB 337 | - 10PiB 338 | - 11PiB 339 | - 12PiB 340 | - 13PiB 341 | - 14PiB 342 | - 15PiB 343 | - More Than 15PiB 344 | validations: 345 | required: true 346 | - type: input 347 | attributes: 348 | label: Expected size of single dataset (one copy) 349 | validations: 350 | required: true 351 | - type: dropdown 352 | attributes: 353 | label: Number of replicas to store 354 | options: 355 | - 1 356 | - 2 357 | - 3 358 | - 4 359 | - 5 360 | - 6 361 | - 7 362 | - 8 363 | - 9 364 | - 10 365 | validations: 366 | required: true 367 | - type: dropdown 368 | attributes: 369 | label: Weekly allocation of DataCap requested 370 | description: This amount is usually between 1 TiB and 1000 TiB 371 | options: 372 | - 10TiB 373 | - 50TiB 374 | - 100TiB 375 | - 150TiB 376 | - 200TiB 377 | - 250TiB 378 | - 300TiB 379 | - 350TiB 380 | - 400TiB 381 | - 450TiB 382 | - 500TiB 383 | - 600TiB 384 | - 700TiB 385 | - 800TiB 386 | - 900TiB 387 | - 1PiB 388 | validations: 389 | required: true 390 | - type: input 391 | attributes: 392 | label: On-chain address for first allocation 393 | description: Make sure the address exists on the chain 394 | validations: 395 | required: true 396 | - type: dropdown 397 | attributes: 398 | label: Data Type of Application 399 | options: 400 | - Slingshot 401 | - Public, Open Dataset (Research/Non-Profit) 402 | - Public, Open Commercial/Enterprise 403 | - Private Commercial/Enterprise 404 | - Private Non-Profit / Social impact 405 | validations: 406 | required: true 407 | - type: checkboxes 408 | attributes: 409 | label: Custom multisig 410 | description: Check this box off if you have explicitly been asked to by the governance team. 411 | options: 412 | - label: "Use Custom Multisig" 413 | - type: input 414 | attributes: 415 | label: Identifier 416 | description: If needed Filecoin team will fill up this field later on. You can skip this field. 417 | validations: 418 | required: false 419 | - type: markdown 420 | attributes: 421 | value: | 422 | # Project Details 423 | - type: textarea 424 | attributes: 425 | label: Share a brief history of your project and organization 426 | render: text 427 | validations: 428 | required: true 429 | - type: dropdown 430 | attributes: 431 | label: Is this project associated with other projects/ecosystem stakeholders? 432 | options: 433 | - "Yes" 434 | - "No" 435 | validations: 436 | required: true 437 | - type: textarea 438 | attributes: 439 | label: If answered yes, what are the other projects/ecosystem stakeholders 440 | render: text 441 | validations: 442 | required: false 443 | - type: markdown 444 | attributes: 445 | value: | 446 | # Use-case Details 447 | - type: textarea 448 | attributes: 449 | label: Describe the data being stored onto Filecoin 450 | render: text 451 | validations: 452 | required: true 453 | - type: dropdown 454 | attributes: 455 | label: Where was the data currently stored in this dataset sourced from 456 | options: 457 | - AWS Cloud 458 | - Google Cloud 459 | - Azure Cloud 460 | - My Own Storage Infra 461 | - Other 462 | validations: 463 | required: true 464 | - type: textarea 465 | attributes: 466 | label: If you answered "Other" in the previous question, enter the details here 467 | render: text 468 | validations: 469 | required: false 470 | - type: dropdown 471 | attributes: 472 | label: If you are a data preparer. What is your location (Country/Region) 473 | options: 474 | - Afghanistan 475 | - Åland Islands 476 | - Albania 477 | - Algeria 478 | - American Samoa 479 | - Andorra 480 | - Angola 481 | - Anguilla 482 | - Antarctica 483 | - Antigua and Barbuda 484 | - Argentina 485 | - Armenia 486 | - Aruba 487 | - Australia 488 | - Austria 489 | - Azerbaijan 490 | - Bahamas 491 | - Bahrain 492 | - Bangladesh 493 | - Barbados 494 | - Belarus 495 | - Belgium 496 | - Belize 497 | - Benin 498 | - Bermuda 499 | - Bhutan 500 | - Bolivia 501 | - "Bonaire, Sint Eustatius and Saba" 502 | - Bosnia and Herzegovina 503 | - Botswana 504 | - Bouvet Island 505 | - Brazil 506 | - British Indian Ocean Territory 507 | - Brunei Darussalam 508 | - Bulgaria 509 | - Burkina Faso 510 | - Burundi 511 | - Cambodia 512 | - Cameroon 513 | - Canada 514 | - Cape Verde 515 | - Cayman Islands 516 | - Central African Republic 517 | - Chad 518 | - Chile 519 | - China 520 | - Christmas Island 521 | - Cocos (Keeling) Islands 522 | - Colombia 523 | - Comoros 524 | - Congo 525 | - "Congo, the Democratic Republic of the" 526 | - Cook Islands 527 | - Costa Rica 528 | - Côte d'Ivoire 529 | - Croatia 530 | - Cuba 531 | - Curaçao 532 | - Cyprus 533 | - Czech Republic 534 | - Denmark 535 | - Djibouti 536 | - Dominica 537 | - Dominican Republic 538 | - Ecuador 539 | - Egypt 540 | - El Salvador 541 | - Equatorial Guinea 542 | - Eritrea 543 | - Estonia 544 | - Ethiopia 545 | - Falkland Islands (Malvinas) 546 | - Faroe Islands 547 | - Fiji 548 | - Finland 549 | - France 550 | - French Guiana 551 | - French Polynesia 552 | - French Southern Territories 553 | - Gabon 554 | - Gambia 555 | - Georgia 556 | - Germany 557 | - Ghana 558 | - Gibraltar 559 | - Greece 560 | - Greenland 561 | - Grenada 562 | - Guadeloupe 563 | - Guam 564 | - Guatemala 565 | - Guernsey 566 | - Guinea 567 | - Guinea-Bissau 568 | - Guyana 569 | - Haiti 570 | - Heard Island and McDonald Islands 571 | - Holy See (Vatican City State) 572 | - Honduras 573 | - Hong Kong 574 | - Hungary 575 | - Iceland 576 | - India 577 | - Indonesia 578 | - "Iran, Islamic Republic of" 579 | - Iraq 580 | - Ireland 581 | - Isle of Man 582 | - Israel 583 | - Italy 584 | - Jamaica 585 | - Japan 586 | - Jersey 587 | - Jordan 588 | - Kazakhstan 589 | - Kenya 590 | - Kiribati 591 | - "Korea, Democratic People's Republic of" 592 | - "Korea, Republic of" 593 | - Kuwait 594 | - Kyrgyzstan 595 | - Lao People's Democratic Republic 596 | - Latvia 597 | - Lebanon 598 | - Lesotho 599 | - Liberia 600 | - Libya 601 | - Liechtenstein 602 | - Lithuania 603 | - Luxembourg 604 | - Macao 605 | - "Macedonia, the Former Yugoslav Republic of" 606 | - Madagascar 607 | - Malawi 608 | - Malaysia 609 | - Maldives 610 | - Mali 611 | - Malta 612 | - Marshall Islands 613 | - Martinique 614 | - Mauritania 615 | - Mauritius 616 | - Mayotte 617 | - Mexico 618 | - "Micronesia, Federated States of" 619 | - "Moldova, Republic of" 620 | - Monaco 621 | - Mongolia 622 | - Montenegro 623 | - Montserrat 624 | - Morocco 625 | - Mozambique 626 | - Myanmar 627 | - Namibia 628 | - Nauru 629 | - Nepal 630 | - Netherlands 631 | - New Caledonia 632 | - New Zealand 633 | - Nicaragua 634 | - Niger 635 | - Nigeria 636 | - Niue 637 | - Norfolk Island 638 | - Northern Mariana Islands 639 | - Norway 640 | - Oman 641 | - Pakistan 642 | - Palau 643 | - "Palestine, State of" 644 | - Panama 645 | - Papua New Guinea 646 | - Paraguay 647 | - Peru 648 | - Philippines 649 | - Pitcairn 650 | - Poland 651 | - Portugal 652 | - Puerto Rico 653 | - Qatar 654 | - Réunion 655 | - Romania 656 | - Russian Federation 657 | - Rwanda 658 | - Saint Barthélemy 659 | - "Saint Helena, Ascension and Tristan da Cunha" 660 | - Saint Kitts and Nevis 661 | - Saint Lucia 662 | - Saint Martin (French part) 663 | - Saint Pierre and Miquelon 664 | - Saint Vincent and the Grenadines 665 | - Samoa 666 | - San Marino 667 | - Sao Tome and Principe 668 | - Saudi Arabia 669 | - Senegal 670 | - Serbia 671 | - Seychelles 672 | - Sierra Leone 673 | - Singapore 674 | - Sint Maarten (Dutch part) 675 | - Slovakia 676 | - Slovenia 677 | - Solomon Islands 678 | - Somalia 679 | - South Africa 680 | - South Georgia and the South Sandwich Islands 681 | - South Sudan 682 | - Spain 683 | - Sri Lanka 684 | - Sudan 685 | - Suriname 686 | - Svalbard and Jan Mayen 687 | - Swaziland 688 | - Sweden 689 | - Switzerland 690 | - Syrian Arab Republic 691 | - Taiwan 692 | - Tajikistan 693 | - "Tanzania, United Republic of" 694 | - Thailand 695 | - Timor-Leste 696 | - Togo 697 | - Tokelau 698 | - Tonga 699 | - Trinidad and Tobago 700 | - Tunisia 701 | - Turkey 702 | - Turkmenistan 703 | - Turks and Caicos Islands 704 | - Tuvalu 705 | - Uganda 706 | - Ukraine 707 | - United Arab Emirates 708 | - United Kingdom 709 | - United States 710 | - United States Minor Outlying Islands 711 | - Uruguay 712 | - Uzbekistan 713 | - Vanuatu 714 | - "Venezuela, Bolivarian Republic of" 715 | - Viet Nam 716 | - "Virgin Islands, British" 717 | - "Virgin Islands, U.S." 718 | - Wallis and Futuna 719 | - Western Sahara 720 | - Yemen 721 | - Zambia 722 | - Zimbabwe 723 | validations: 724 | required: false 725 | - type: textarea 726 | attributes: 727 | label: If you are a data preparer, how will the data be prepared? Please include tooling used and technical details? 728 | render: text 729 | validations: 730 | required: false 731 | - type: textarea 732 | attributes: 733 | label: If you are not preparing the data, who will prepare the data? (Provide name and business) 734 | render: text 735 | validations: 736 | required: false 737 | - type: textarea 738 | attributes: 739 | label: Has this dataset been stored on the Filecoin network before? If so, please explain and make the case why you would like to store this dataset again to the network. Provide details on preparation and/or SP distribution. 740 | render: text 741 | validations: 742 | required: false 743 | - type: textarea 744 | attributes: 745 | label: Please share a sample of the data 746 | description: A link to a file, an image, a table, etc., are good ways to do this. 747 | render: text 748 | validations: 749 | required: true 750 | - type: checkboxes 751 | attributes: 752 | label: Confirm that this is a public dataset that can be retrieved by anyone on the Network 753 | description: i.e., no specific permissions or access rights are required to view the data 754 | options: 755 | - label: I confirm 756 | validations: 757 | required: false 758 | - type: textarea 759 | attributes: 760 | label: If you chose not to confirm, what was the reason 761 | render: text 762 | validations: 763 | required: false 764 | - type: dropdown 765 | attributes: 766 | label: What is the expected retrieval frequency for this data 767 | options: 768 | - Daily 769 | - Weekly 770 | - Monthly 771 | - Yearly 772 | - Sporadic 773 | - Never 774 | validations: 775 | required: true 776 | - type: dropdown 777 | attributes: 778 | label: For how long do you plan to keep this dataset stored on Filecoin 779 | options: 780 | - Less than 1 year 781 | - 1 to 1.5 years 782 | - 1.5 to 2 years 783 | - 2 to 3 years 784 | - More than 3 years 785 | - Permanently 786 | validations: 787 | required: true 788 | - type: markdown 789 | attributes: 790 | value: | 791 | # DataCap Allocation Plan 792 | - type: dropdown 793 | attributes: 794 | label: In which geographies do you plan on making storage deals 795 | multiple: true 796 | options: 797 | - Greater China 798 | - Asia other than Greater China 799 | - Africa 800 | - North America 801 | - South America 802 | - Europe 803 | - Australia (continent) 804 | - Antarctica 805 | validations: 806 | required: true 807 | - type: dropdown 808 | attributes: 809 | label: How will you be distributing your data to storage providers 810 | multiple: true 811 | options: 812 | - Cloud storage (i.e. S3) 813 | - HTTP or FTP server 814 | - IPFS 815 | - Shipping hard drives 816 | - Lotus built-in data transfer 817 | - Venus built-in data transfer 818 | - I don't know yet 819 | - Others 820 | validations: 821 | required: true 822 | - type: dropdown 823 | attributes: 824 | label: How do you plan to choose storage providers 825 | multiple: true 826 | options: 827 | - Slack 828 | - Filmine 829 | - Big Data Exchange 830 | - Partners 831 | - I don't know yet 832 | - Others 833 | validations: 834 | required: true 835 | - type: textarea 836 | attributes: 837 | label: If you answered "Others" in the previous question, what is the tool or platform you plan to use 838 | render: text 839 | validations: 840 | required: false 841 | - type: textarea 842 | attributes: 843 | render: text 844 | label: If you already have a list of storage providers to work with, fill out their names and provider IDs below 845 | validations: 846 | required: false 847 | - type: dropdown 848 | attributes: 849 | label: How do you plan to make deals to your storage providers 850 | multiple: true 851 | options: 852 | - Boost client 853 | - Lotus client 854 | - Droplet client 855 | - Bidbot 856 | - Singularity 857 | - I don't know yet 858 | - Others/custom tool 859 | - type: textarea 860 | attributes: 861 | label: If you answered "Others/custom tool" in the previous question, enter the details here 862 | render: text 863 | validations: 864 | required: false 865 | - type: dropdown 866 | attributes: 867 | label: Can you confirm that you will follow the Fil+ guideline 868 | description: Data owner should engage at least 4 SPs and no single SP ID should receive >30% of a client's allocated DataCap 869 | options: 870 | - "Yes" 871 | - "No" 872 | validations: 873 | required: true 874 | -------------------------------------------------------------------------------- /.github/scripts/close_inactive/close_stale_issues.mjs: -------------------------------------------------------------------------------- 1 | // .github/scripts/close_inactive/close_stale_issues.js 2 | 3 | import { Octokit } from "@octokit/rest"; 4 | import fetch from "node-fetch"; 5 | import { checkThrottling, getLastComment } from "./utils.mjs"; 6 | 7 | const DAYS_TO_WAIT = 14; 8 | const owner = process.env.GITHUB_REPOSITORY.split('/')[0]; 9 | const repo = process.env.GITHUB_REPOSITORY.split('/')[1]; 10 | 11 | const octokit = new Octokit({ 12 | auth: process.env.GITHUB_TOKEN, 13 | request: { 14 | fetch: fetch 15 | } 16 | }); 17 | 18 | /** 19 | * This function checks the rate limit status and sleeps if the remaining requests are below the limit. 20 | */ 21 | async function closeStaleIssues() { 22 | await checkThrottling(octokit); 23 | 24 | let issues = await octokit.paginate(octokit.issues.listForRepo, { 25 | owner, 26 | repo, 27 | state: 'open', 28 | per_page: 100, 29 | }); 30 | 31 | const dateThreshold = new Date(); 32 | dateThreshold.setDate(dateThreshold.getDate() - DAYS_TO_WAIT); 33 | 34 | for (let issue of issues) { 35 | try { 36 | await checkThrottling(octokit); 37 | 38 | const lastComment = await getLastComment(octokit, owner, repo, issue.number, false); 39 | let updatedAt = lastComment 40 | ? new Date(lastComment.created_at) 41 | : new Date(issue.created_at); 42 | 43 | // Let's calculate the difference between the two dates 44 | const diffTime = dateThreshold - updatedAt; 45 | const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)); 46 | 47 | if (diffDays > 0) { 48 | await octokit.issues.createComment({ 49 | owner, 50 | repo, 51 | issue_number: issue.number, 52 | body: `This application has not seen any responses in the last 14 days, so for now it is being closed. Please feel free to contact the Fil+ Gov team to re-open the application if it is still being processed. Thank you! 53 | \n\n 54 | -- 55 | Commented by Stale Bot.` 56 | }); 57 | // Add stale label. 58 | await octokit.issues.addLabels({ 59 | owner, 60 | repo, 61 | issue_number: issue.number, 62 | labels: ['stale'] 63 | }); 64 | 65 | // Close issue. 66 | await octokit.issues.update({ 67 | owner, 68 | repo, 69 | issue_number: issue.number, 70 | state: 'closed' 71 | }); 72 | 73 | console.log(`Let's close issue ${issue.number}. Last commented ${diffDays} days from threshold`); 74 | } else { 75 | console.log(`Issue ${issue.number} will remain open. Updated ${diffDays} days from threshold`); 76 | } 77 | } catch (error) { 78 | console.log(`Error in ${issue.number}: ${error.message}`); 79 | } 80 | } 81 | } 82 | 83 | closeStaleIssues(); 84 | 85 | -------------------------------------------------------------------------------- /.github/scripts/close_inactive/notify_stale_issues.mjs: -------------------------------------------------------------------------------- 1 | // .github/scripts/close_inactive/notify_stale_issues.js 2 | 3 | import { Octokit } from "@octokit/rest"; 4 | import fetch from "node-fetch"; 5 | import { getLastComment, checkThrottling } from "./utils.mjs"; 6 | 7 | const DAYS_TO_WAIT = 10; 8 | const owner = process.env.GITHUB_REPOSITORY.split('/')[0]; 9 | const repo = process.env.GITHUB_REPOSITORY.split('/')[1]; 10 | 11 | const octokit = new Octokit({ 12 | auth: process.env.GITHUB_TOKEN, 13 | request: { 14 | fetch: fetch 15 | } 16 | }); 17 | 18 | /** 19 | * This function checks the rate limit status and sleeps if the remaining requests are below the limit. 20 | */ 21 | async function checkAndCommentOnIssues() { 22 | 23 | await checkThrottling(octokit); 24 | 25 | let issues = await octokit.paginate(octokit.issues.listForRepo, { 26 | owner, 27 | repo, 28 | state: 'open', 29 | per_page: 100, 30 | }); 31 | 32 | const dateThreshold = new Date(); 33 | dateThreshold.setDate(dateThreshold.getDate() - DAYS_TO_WAIT); 34 | 35 | for (let issue of issues) { 36 | try { 37 | await checkThrottling(octokit); 38 | 39 | const lastComment = await getLastComment(octokit, owner, repo, issue.number, true); 40 | if (lastComment && 41 | lastComment.user.login == 'github-actions[bot]' && 42 | lastComment.body.includes("Commented by Stale Bot.") 43 | ) continue; //Do not comment again if already commented by bot 44 | 45 | const updatedAt = lastComment 46 | ? new Date(lastComment.created_at) 47 | : new Date(issue.created_at); 48 | 49 | // Let's calculate the difference between the two dates 50 | const diffTime = dateThreshold - updatedAt; 51 | const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)); 52 | 53 | if (diffDays > 0) { 54 | await octokit.issues.createComment({ 55 | owner, 56 | repo, 57 | issue_number: issue.number, 58 | body: `This application has not seen any responses in the last 10 days. This issue will be marked with Stale label and will be closed in 4 days. Comment if you want to keep this application open. 59 | \n\n 60 | -- 61 | Commented by Stale Bot.` 62 | }); 63 | console.log(`Stale advice on issue ${issue.number}. Updated ${diffDays} days from threshold`); 64 | } else { 65 | console.log(`No stale advice on issue ${issue.number}. Updated ${diffDays} days from threshold`); 66 | } 67 | } catch (error) { 68 | console.log(`Error in ${issue.number}: ${error.message}`); 69 | } 70 | } 71 | } 72 | 73 | checkAndCommentOnIssues(); 74 | 75 | -------------------------------------------------------------------------------- /.github/scripts/close_inactive/utils.mjs: -------------------------------------------------------------------------------- 1 | const RATE_REMAINING_LIMIT = 100; 2 | 3 | /** 4 | * This function returns the last comment on an issue. 5 | * 6 | * @param {Octokit} octokit 7 | * @param {string} owner 8 | * @param {string} repo 9 | * @param {number} issueNumber 10 | * @param {boolean} includeBot 11 | * @returns 12 | */ 13 | export async function getLastComment(octokit, owner, repo, issueNumber, includeBot) { 14 | const botSignature = "Commented by Stale Bot." 15 | 16 | let comments = await octokit.paginate(octokit.issues.listComments, { 17 | owner, 18 | repo, 19 | issue_number: issueNumber, 20 | per_page: 100 21 | }); 22 | 23 | if (!comments || comments.length === 0) { 24 | return null; // Issue has no comments. 25 | } 26 | 27 | if (!includeBot) { 28 | comments = comments.filter(comment => 29 | !(comment.user.login === 'github-actions[bot]' && 30 | comment.body.includes(botSignature)) 31 | ); 32 | } 33 | 34 | // Order comments by date desc. 35 | comments.sort((a, b) => new Date(b.created_at) - new Date(a.created_at)); 36 | return comments[0]; 37 | } 38 | 39 | /** 40 | * This function checks the rate limit status and sleeps if the remaining requests are below the limit. 41 | * 42 | * @param {Octokit} octokit 43 | */ 44 | export async function checkThrottling(octokit) { 45 | const rateLimitStatus = await octokit.rateLimit.get(); 46 | const remaining = rateLimitStatus.data.rate.remaining; 47 | 48 | if (remaining < RATE_REMAINING_LIMIT) { 49 | const timestampNow = new Date().getTime() 50 | const timestampReset = (rateLimitStatus.data.rate.reset + 10) * 1000 51 | const sleepTime = (timestampReset - timestampNow) 52 | 53 | const sleepTimeStringInMinutesAndSeconds = new Date(sleepTime).toISOString().substr(14, 5); 54 | console.log(`Rate limit reached. Throttling for ${sleepTimeStringInMinutesAndSeconds} minutes`); 55 | await new Promise(resolve => setTimeout(resolve, sleepTime)); 56 | } 57 | } -------------------------------------------------------------------------------- /.github/workflows/close-inactive.yml: -------------------------------------------------------------------------------- 1 | name: 'Close inactive issues' 2 | on: 3 | #workflow_dispatch: 4 | schedule: 5 | - cron: '0 1 * * *' 6 | 7 | jobs: 8 | 9 | notify_stale_issues: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@v3 14 | with: 15 | sparse-checkout: | 16 | .github 17 | 18 | - name: Install dependencies 19 | run: | 20 | npm install @octokit/rest node-fetch 21 | 22 | - name: Notify on stale issues 23 | run: node .github/scripts/close_inactive/notify_stale_issues.mjs 24 | env: 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | close_stale_issues: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Checkout code 31 | uses: actions/checkout@v3 32 | with: 33 | sparse-checkout: | 34 | .github 35 | 36 | - name: Install dependencies 37 | run: | 38 | npm install @octokit/rest node-fetch 39 | 40 | - name: set stale issues 41 | run: node .github/scripts/close_inactive/close_stale_issues.mjs 42 | env: 43 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Archived July 2024 2 | With the deprecation of the large dataset notary program, we are archiving this repo. For Fil+ Allocator Governance discussions, see this [Governance repo](https://github.com/filecoin-project/Allocator-Governance/). For the registry of active allocators, see this [Allocator Registry repo](https://github.com/filecoin-project/Allocator-Registry) 3 | 4 | To view other updated reference links for the Fil+ program, please see: 5 | 6 | - For information on the open and pending DataCap deals, please see [FIDL's Allocator.tech](https://allocator.tech/). 7 | - For information on support issues or a listing of all current Allocators, please see [Allocator Registry repo](https://github.com/filecoin-project/Allocator-Registry) 8 | - For getting in touch, please reach out in [SLACK](https://filecoin.io/slack) 9 | - Bi-weekly recorded Fil+ governance meetings on [Youtube](https://www.youtube.com/playlist?list=PLp3zrT1ewY0kYN1hJpERMUxTCbFC4yZwN) 10 | 11 | 12 | 13 | 14 | # Filecoin Plus for large datasets 15 | 16 | Filecoin Plus is a community program that aims to increase Filecoin's network by becoming the decentralized storage network for humanity's most important information. As the network continues to grow, we as a community need to make sure to maintain a civil online engagement. To learn more about acceptable community behavior please check the [Filecoin Community Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). 17 | 18 | This repo serves as the hub for client applications for DataCap at a large scale - currently defined as > 100 TiB of DataCap. If you wish to generally learn more about Filecoin Plus or apply for less than 500 TiB of DataCap, check out the following resources: 19 | 20 | - Filecoin Plus documentation: https://docs.filecoin.io/store/filecoin-plus/ 21 | - Fil+ community governance repo: https://github.com/filecoin-project/notary-governance/ 22 | - Fil+ client on-boarding and DataCap applications: https://github.com/filecoin-project/filecoin-plus-client-onboarding 23 | - To apply for DataCap: https://filplus.storage/apply 24 | 25 | The process outlined below for clients looking to apply for a large amount of DataCap was initially proposed in [Issue #94 - Onboarding projects with large DataCap requirements](https://github.com/filecoin-project/notary-governance/issues/94). Through an initial pilot phase and learnings/feedback collected over time, we are currently on the third iteration of the Large Dataset Notary (LDN) process. See #217, #328, #509 for additional details. 26 | 27 | The main difference between this process and applying for DataCap directly from a notary (via filplus.storage) is that this process is (1) significantly more public, (2) DataCap is allocated from a large multisig Notary address, and (3) DataCap is allocated in tranches. 28 | 29 | ## Current Scope 30 | 31 | Based on conversations in various issues and governance calls, here is the current scope of the Large Dataset Notary (LDN) program. You can find relevant issues, as well as links to governance call recordings in the [Notary Governance repo](https://github.com/filecoin-project/notary-governance). Please note that this is still an evolving conversation, so the scope is subject to change. If you would like to participate in this conversation or have feedback, please let us know! You can start a discussion topic in the [Notary Governance repo](https://github.com/filecoin-project/notary-governance/discussions), in the [fil-plus](https://filecoinproject.slack.com/archives/C01DLAPKDGX) public Slack channel, or in an upcoming [Governance call](https://calendar.google.com/calendar/embed?src=c_k1gkfoom17g0j8c6bam6uf43j0%40group.calendar.google.com&ctz=America%2FLos_Angeles). 32 | 33 | Clients can currently apply for a **Large Dataset Notary** which can grant them between 100 TiB and 5 PiB of total DataCap per application. 34 | 35 | In order for a client and their dataset to be eligible: 36 | 37 | - The dataset should be public, open, and mission aligned with Filecoin and Filecoin Plus. This also means that the data should be accessible to anyone in the network, without requiring any special permissions or access requirement. If this is not the case - consider instead going via the E-Fil+ pathway to getting DataCap. You can read more about that [here](https://efilplus.super.site/) 38 | 39 | - If a client wants to onboard more than 5+PiBs, the recommendation would be to start with a few applications and earn trust from the community. Having a positive reputation and proving to the community first by onboarding a smaller amount of data will help anyone who wants to onboard massive amounts of data much faster and smoother. 40 | 41 | - Stored data should be readily retrievable on the network and this can be regularly verified (though the use of manual or automated verification that includes retrieving data from various miners over the course of the DataCap allocation timeframe). Each project should specify what portion of the data is retrievable and provide justification. From there notaries can decide during the due diligence phases if the client’s application is justifiable and can agree to sign it or not. 42 | 43 | - There should be no open disputes in the Fil+ ecosystem against the client during the time that the application is open for review 44 | 45 | - With the current tooling and workflow, the recommendation would be to use a different address for every application. However, if you cannot, know that the workaround requires manual attention. We strongly do not recommend this due to delays created and mixed math for subsequent allocation issues. In the short term, we can support this. Please notify Simon Kim and add this to your LDN application if you absolutely have to go down this path and share why. 46 | 47 | - Best practice for storing large datasets includes ideally, storing it in 3 or more regions, with 4 or more storage provider operators or owners, and having at least 5 replicas of the dataset. No more than one replica should be stored with one SP ID, and if the data cannot leave a particular geographic boundary, then it is expected that replication will still happen across different locations (cities, datacenters, etc.). Each storage provider should not exceed 30% of the total datacap that the client was allocated and the storage provider should have published its public IP address. If you cannot follow these practices due to policy or any other issues, you may explain your case in the application and provide to the community what method you can do instead. These are recommendations and not strict rules that every client must follow. 48 | 49 | - Regarding cases of abusing the program’s incentive structure, notaries should not be signing their own applications. For the program to work, each stakeholder will need to play their parts in a truthful manner. 50 | 51 | - Datasets that have been stored previously, may be allowed to be copied over time on chain. This can provide value to the network if it is a new team, a new datacenter, and a new geopolitical region. However, storage providers should not be storing more than 20% of the duplicated data. While same datasets may help the network, this should not be a reason for client’s to onboard the same exact dataset repeatedly, client’s should explicitly justify their reasoning on why the repeated dataset should be onboarded. 52 | 53 | - To help notaries more efficiently complete their due diligence process, clients should justify their reasoning of the amount of DataCap that you are applying for will help notaries with their due diligence process. Clients should explain how their dataset is useful for the network, and visible proof of the size of the data that is being onboarded. 54 | 55 | If you are a client who is interested in applying for a large DataCap allocation via an LDN, please see the steps outlined below. 56 | 57 | ## Applying for a large DataCap allocation 58 | 59 | Application flow: 60 | 61 | - Client submits an application by applying on https://filplus.storage/apply or creating a GitHub issue in this repo 62 | - Automation and the Fil+ governance team ensures that the application has been fully filled out, and a request is sent to the RKH to set up a Notary (LDN) for this client 63 | - Notaries and community members carry out due diligence via comments on the issue and in conversation during a Notary Governance call 64 | - In parallel, RKH are informed of the client application request and approve the multisig LDN to allocate DataCap to this client in tranches 65 | - If the community is in agreement that the dataset is in line with the values of the Filecoin Plus program and should be approved for a DataCap allocation, 2 notaries approve the request to allocate the first tranche of DataCap 66 | 67 | When clients use up > 75% of the prior DataCap allocation, a request for additional DataCap in the form of the next tranche is automatically kicked off (via the'subsequent allocation bot'). Notaries have access to on-chain data required to verify that the client is operating in good faith, in accordance with the principles of the program, and in line with their allocation strategy outlined in the original application. 2 notaries need to approve the next tranche of DataCap to be allocated to the client. The same notary cannot sign off on immediately subsequent allocations of DataCap, i.e., you need at minimum 4 unique notaries to support your application on an ongoing basis to receive multiple tranches of DataCap. 68 | 69 | ## Application flow labels 70 | 71 | The following labels indicate the statues of LDN applications. The most recent version of these labels were released on April 15, 2023. More comprehensive release notes can be found in [this blog](https://medium.com/filecoin-plus/ldn-label-update-part-1-label-consolidation-ae2691c78702). 72 | 73 | - **Validated**: 74 | - The validated label is added to an issue when the parent comment of an LDN application is fully completed and all questions have been answered. 75 | - When the validated label is added, a member of the governance team will review the application and post the trigger message. 76 | - This trigger message signifies to the SSA bot (subsequent allocation bot) to initiate the tranche allocation process, which will then post the request message. 77 | 78 | - **Ready to Sign**: 79 | - If the SSA bot detects the trigger message posted by the governance team or identifies that the client is low on previously granted DataCap (<25% of DataCap balance last granted), it will post a request message. 80 | - Once there is a request message, the ready to sign label should be added to the LDN application. 81 | - SSA bot detects the ready to sign label and notaries should be able to view the LDN application on the Fil+ registry (filplus.fil.org) for next steps. 82 | 83 | - **Start Sign Datacap**: 84 | - When the first notary, of the two notaries required, has completed their due diligence and has signed in support of the LDN application DataCap allocation tranche, the proposal message is posted. 85 | - The start sign datacap label is added to the issue when this proposal message is posted and the SSA bot detects it. 86 | - On the Fil+ registry, the status is updated to indicate that one notary has already supported the LDN application. The applicant now needs a second notary to sign the tranche to release DataCap. 87 | 88 | - **Granted**: 89 | - When a second notary has completed due diligence and signed in support of the LDN application’s DataCap tranche request, the approval message is posted. 90 | - The granted label is added to the issue when this approval message is posted and the SSA bot detects it. 91 | - If successful, DataCap is issued for the granted allocation. The LDN application is removed from the list notaries see on the Filecoin Plus registry, until the next tranche is requested and the process repeats. 92 | 93 | - **Total DataCap Reached**: 94 | - The total datacap reached label is added when the client has reached the total amount of DataCap requested in the application. 95 | 96 | - **Error**: 97 | - Consolidation of what used to be a set of many error labels that indicate tooling errors. 98 | - An error message with more information about the error is posted in the issue. 99 | - The following is an example of an error message where the “Address” field is not specified in the “Approved Comment.” 100 | 101 | - **Governance Review Needed**: 102 | - Information provided by the applicant has changed, requiring a manual check from members focused on the client UX on the governance team. 103 | 104 | - **EFil+**: 105 | - Applications that are part of the E-Fil+ pipeline (read more at: https://efilplus.super.site/) will have this label. 106 | 107 | ## DataCap tranche size calculations 108 | 109 | - First allocation: lesser of 5% of total DataCap requested or 50% of weekly allocation rate 110 | - Second allocation: lesser of 10% of total DataCap requested or 100% of weekly allocation rate 111 | - Third allocation: lesser of 20% of total DataCap request or 200% of weekly allocation rate 112 | - Fourth allocation: lesser of 40% of total DataCap requested or 400% of weekly allocation rate 113 | - Fifth allocation onwards: lesser of 80% of total DataCap request or 800% of weekly allocation rate 114 | 115 | 116 | ### Granting DataCap to the client 117 | The bot will post a comment with the following structure to kick off a request for DataCap allocation: 118 | 119 | ``` 120 | ## DataCap Allocation requested 121 | 122 | #### Multisig Notary address 123 | > 124 | 125 | #### Client address 126 | > 127 | 128 | #### DataCap allocation requested 129 | > XTiB 130 | 131 | #### Id 132 | > Id 133 | ``` 134 | 135 | This initiates a proposal to the multisig Notary to grant the associated amount of DataCap to the client address. Other notaries will now see this in the Filecoin Plus Registry app where they can approve or decline the request. 136 | 137 | In order to approve the request in the [Fil+ Registry App](https://plus.fil.org/), Notaries need to sign in with their Ledger. During this initial authorization, the app will check if the Ledger address is an approved signer on the large dataset multisig notary addresses (previously, the Organization). Notaries can then action and sign multiple large requests in a row, without needing to re-auth for each multisig. 138 | 139 | All notaries signing onto the LDN multisig are encouraged to track the client's use of previous DataCap allocations using on-chain information, data available on chain browsers, or on Fil+ specific dashboards like https://filplus.d.interplanetary.one/ or https://filplus.info/. 140 | 141 | ## FVM Smart Contracts 142 | Smart contracts can acquire DataCap just like any regular client. To do so, simply enter the f410 address of the smart contract that requires DataCap as the client address when making a request. 143 | 144 | The process outlined above is for larger amounts of Datacap > 500 TiBs. For a smart contract's first DataCap allocation, we recommend using auto-verifier [Verify.glif.io](Verify.glif.io) to get 32 GiB of DataCap, as specified [here](https://docs.filecoin.io/store/filecoin-plus/overview/). 145 | 146 | It's important to note that DataCap allocations are a one-time credit for a Filecoin address and cannot be transferred between smart contracts. If you need to redeploy the smart contract, you must request additional DataCap. To improve this experience, we are developing an FRC to allow DataCap to be held between redeployments. 147 | 148 | ## Current status 149 | 150 | New applications are being accepted at this time, though please expect that the process will likely have some issues as we continue to test and improve the functionality of the process. 151 | 152 | ## Retrieval Guidelines for Data Clients 153 | 154 | 1. Fil+ data clients are advised to meticulously choose SPs that align with their specific data retrieval requirements. 155 | 2. Fil+ open dataset clients commit to ensuring the retrievability of open datasets by storing with SPs that serve HTTP retrievals with either [booster-http](https://boost.filecoin.io/http-retrieval/serving-files-with-booster-http) or their custom tooling. 156 | 3. Fil+ clients can enhance their reputation by holding their SPs accountable for retrievability. This may streamline acquisition of additional DataCap in the future. 157 | 4. Fil+ Notaries will consider the client’s track record on retrievability as part of their due diligence. 158 | 5. Data clients and SPs should be aware of the risk of network attacks, and mitigate these risks via rate limiting tools (e.g. set a max requests per second limit). 159 | 6. Data clients and SPs should consider implementing a throttling limit that determines the maximum bandwidth a single retrieving client can consume at any given time. 160 | 7. Private data clients (E Fil+) should store with SPs that provide a level of retrievability consistent with the data clients’ retrieval needs indicated on the DataCap application. 161 | 8. In the event of a large retrieval size, SPs should leverage tooling for load balancing to protect themselves. 162 | 9. Multiple SPs can share a single unsealed copy of data with the same CID. This practice is deemed acceptable as it optimizes time and resource utilization. 163 | 10. If the data client has very good (95-100%) retrievability track record via another retrieval method (GraphSync or BitSwap), then the data client may work with Notaries to get future DataCap approval on a case-by-case basis. 164 | --------------------------------------------------------------------------------