├── tests ├── __init__.py ├── data │ ├── dot.png │ ├── face.jpg │ ├── test.pdf │ ├── test.tif │ ├── test.wav │ ├── english.png │ ├── coutwildrnp.shp │ ├── coutwildrnp.shx │ ├── feather-0_3_1.feather │ ├── text.txt │ ├── notebook.ipynb │ └── jupyter_tensorboard.ipynb ├── test_vtk.py ├── test_shap.py ├── test_kmapper.py ├── common.py ├── test_earthengine.py ├── test_bokeh.py ├── test_wandb.py ├── test_essentia.py ├── test_cartopy.py ├── test_pysal.py ├── test_tensorflow_cloud.py ├── test_sympy.py ├── test_opencv.py ├── test_shapely.py ├── test_spacy.py ├── test_holoviews.py ├── test_levenshtein.py ├── test_nltk.py ├── test_plotly.py ├── test_fasttext.py ├── test_gensim.py ├── test_annoy.py ├── test_learntools.py ├── test_fiona.py ├── test_openslide.py ├── test_pydub.py ├── test_pyfasttext.py ├── test_imports.py ├── test_vowpalwabbit.py ├── test_jieba.py ├── test_pytorch_ignite.py ├── test_ggplot.py ├── test_torchaudio.py ├── test_albumentations.py ├── test_flask.py ├── test_kaggle_kernel_credentials.py ├── test_h2o.py ├── test_geoviews.py ├── test_hyperopt.py ├── test_xvfbwrapper.py ├── test_skimage.py ├── test_pycuda.py ├── test_pycrypto.py ├── test_hep_ml.py ├── test_bigquery_storage.py ├── test_dlib.py ├── test_pandas.py ├── test_papermill.py ├── test_nvidia.py ├── test_seaborn.py ├── test_bqplot.py ├── test_optuna.py ├── test_rgf.py ├── test_qgrid.py ├── test_wordbatch.py ├── test_fbprophet.py ├── test_pydegensac.py ├── test_gcloud.py ├── test_pytext.py ├── test_matplotlib.py ├── test_category_encoders.py ├── test_tsfresh.py ├── test_datatable.py ├── test_kornia.py ├── test_numpy.py ├── test_sklearn.py ├── test_jax.py ├── test_cupy.py ├── test_jupyter_nbconvert.py ├── test_pytesseract.py ├── test_tensorflow_bigquery.py ├── test_theano.py ├── test_rapids.py ├── test_allennlp.py ├── test_geopandas.py ├── test_tensorflow_addons.py ├── test_nnabla.py ├── test_easyocr.py ├── test_jupytertensorboard.py ├── test_fastai.py ├── test_xgboost.py ├── test_ucaip.py ├── test_bayes_opt.py ├── test_transformers.py ├── test_pytorch.py ├── test_pdpbox.py ├── test_keras_tuner.py ├── test_mxnet.py ├── test_plotly_express.py ├── test_tensorflow_credentials.py ├── test_datashader.py ├── test_pytorch_lightning.py ├── test_gcs.py ├── test_dipy.py ├── test_lightgbm.py ├── test_pykalman.py ├── test_vision.py ├── test_natural_language.py ├── test_tensorflow.py ├── test_video_intelligence.py ├── test_keras.py ├── test_bigquery_proxy.py ├── test_user_session.py ├── test_catalyst.py ├── test_translation.py ├── test_automl.py ├── test_datasets.py └── test_bigquery.py ├── .gitignore ├── tensorflow-whl ├── tensorflow-gcs-config │ ├── third_party │ │ ├── BUILD │ │ ├── tensorflow │ │ │ ├── BUILD │ │ │ ├── BUILD.tpl │ │ │ └── tf_configure.bzl │ │ └── jsoncpp.BUILD │ ├── MANIFEST.in │ ├── .gitignore │ ├── README.md │ ├── setup.py │ ├── tensorflow_gcs_config │ │ ├── BUILD │ │ ├── gcs_config_ops.cc │ │ └── __init__.py │ ├── WORKSPACE │ ├── Dockerfile │ └── build.py ├── README.md ├── build ├── push ├── Jenkinsfile ├── CHANGELOG.md └── Dockerfile ├── patches ├── template_conf.json ├── imagemagick-policy.xml ├── nbconvert-extensions.tpl ├── kaggle_session.py ├── kaggle_datasets.py ├── tensorboard │ └── notebook.py ├── sitecustomize.py ├── kaggle_web_client.py ├── log.py └── kaggle_secrets.py ├── clean-layer.sh ├── dev.Dockerfile ├── tools └── pip_list_versions.py ├── push ├── diff ├── README.md ├── test ├── gpu.Dockerfile └── Jenkinsfile /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea/ 3 | .vscode 4 | .mypy_cache -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/third_party/BUILD: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/third_party/tensorflow/BUILD: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/data/dot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/dot.png -------------------------------------------------------------------------------- /tests/data/face.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/face.jpg -------------------------------------------------------------------------------- /tests/data/test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/test.pdf -------------------------------------------------------------------------------- /tests/data/test.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/test.tif -------------------------------------------------------------------------------- /tests/data/test.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/test.wav -------------------------------------------------------------------------------- /tests/data/english.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/english.png -------------------------------------------------------------------------------- /tests/data/coutwildrnp.shp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/coutwildrnp.shp -------------------------------------------------------------------------------- /tests/data/coutwildrnp.shx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/coutwildrnp.shx -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include tensorflow_gcs_config/*.py 2 | include tensorflow_gcs_config/*.so 3 | -------------------------------------------------------------------------------- /tests/data/feather-0_3_1.feather: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adaptive/docker-python/master/tests/data/feather-0_3_1.feather -------------------------------------------------------------------------------- /tests/test_vtk.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | 4 | class TestVTK(unittest.TestCase): 5 | def test_import(self): 6 | import vtk 7 | -------------------------------------------------------------------------------- /tests/test_shap.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import shap 4 | 5 | class TestShap(unittest.TestCase): 6 | def test_init(self): 7 | shap.initjs() 8 | -------------------------------------------------------------------------------- /tests/test_kmapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import kmapper as km 4 | 5 | class TestKMapper(unittest.TestCase): 6 | def test_init(self): 7 | km.KeplerMapper() 8 | -------------------------------------------------------------------------------- /tests/common.py: -------------------------------------------------------------------------------- 1 | """Common testing setup""" 2 | 3 | import os 4 | import unittest 5 | 6 | gpu_test = unittest.skipIf(len(os.environ.get('CUDA_VERSION', '')) == 0, 'Not running GPU tests') 7 | -------------------------------------------------------------------------------- /tests/test_earthengine.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import ee 3 | 4 | class TestEarthEngine(unittest.TestCase): 5 | def test_version(self): 6 | self.assertIsNotNone(ee.__version__) 7 | -------------------------------------------------------------------------------- /tests/test_bokeh.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from bokeh.plotting import figure 4 | 5 | class TestBokeh(unittest.TestCase): 6 | def test_figure(self): 7 | figure(title="Hello World") 8 | -------------------------------------------------------------------------------- /tests/test_wandb.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import wandb 4 | 5 | class TestWandB(unittest.TestCase): 6 | 7 | def test_version(self): 8 | self.assertIsNotNone(wandb.__version__) 9 | -------------------------------------------------------------------------------- /tests/test_essentia.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from essentia.standard import Windowing 4 | 5 | class TestEssentia(unittest.TestCase): 6 | def test_windowing(self): 7 | Windowing(type = 'hann') 8 | -------------------------------------------------------------------------------- /tests/test_cartopy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cartopy.crs as ccrs 4 | 5 | class TestCartopy(unittest.TestCase): 6 | def test_projection(self): 7 | ccrs.PlateCarree() 8 | ccrs.Mollweide() 9 | -------------------------------------------------------------------------------- /tests/test_pysal.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pysal.lib.weights import lat2W 4 | 5 | class TestPysal(unittest.TestCase): 6 | def test_distance_band(self): 7 | w = lat2W(4,4) 8 | self.assertEqual(16, w.n) -------------------------------------------------------------------------------- /tests/test_tensorflow_cloud.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import tensorflow_cloud as tfc 4 | 5 | 6 | class TestTensorflowCloud(unittest.TestCase): 7 | def test_remote(self): 8 | self.assertFalse(tfc.remote()) 9 | -------------------------------------------------------------------------------- /tests/test_sympy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import sympy 5 | 6 | class TestSympy(unittest.TestCase): 7 | def test_matrix(self): 8 | self.assertEqual((2, 2), sympy.Matrix([[0, 1], [1, 0]]).shape) -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/.gitignore: -------------------------------------------------------------------------------- 1 | bazel-bin 2 | bazel-genfiles 3 | bazel-out 4 | bazel-tensorflow-gcs-config 5 | bazel-out 6 | bazel-testlogs 7 | build 8 | dist 9 | __pycache__ 10 | *.egg-info 11 | *.so 12 | .bazelrc 13 | -------------------------------------------------------------------------------- /tests/data/text.txt: -------------------------------------------------------------------------------- 1 | Kaggle is a platform for predictive modelling and analytics competitions in which statisticians and data miners compete to produce the best models for predicting and describing the datasets uploaded by companies and users. 2 | -------------------------------------------------------------------------------- /tests/test_opencv.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cv2 4 | 5 | class TestOpenCV(unittest.TestCase): 6 | def test_imread(self): 7 | img = cv2.imread('/input/tests/data/dot.png') 8 | 9 | self.assertEqual(1, img.shape[0]) 10 | -------------------------------------------------------------------------------- /tests/test_shapely.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from shapely.geometry import Point 4 | 5 | class TestShapely(unittest.TestCase): 6 | def test_geometry(self): 7 | p = Point(0.0, 0.0) 8 | 9 | self.assertEqual("Point", p.geom_type) 10 | -------------------------------------------------------------------------------- /tests/test_spacy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import spacy 4 | 5 | class TestSpacy(unittest.TestCase): 6 | def test_model(self): 7 | nlp = spacy.load('en') 8 | doc = nlp('This is a sentence.') 9 | self.assertEqual(5, len(doc)) 10 | -------------------------------------------------------------------------------- /tests/test_holoviews.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import holoviews as hv 4 | 5 | class TestHoloviews(unittest.TestCase): 6 | def test_curve(self): 7 | xs = range(-10,11) 8 | ys = [100-x**2 for x in xs] 9 | 10 | hv.Curve((xs, ys)) 11 | -------------------------------------------------------------------------------- /tests/test_levenshtein.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import Levenshtein 4 | 5 | class TestLevenshtein(unittest.TestCase): 6 | def test_distance(self): 7 | distance = Levenshtein.distance('Levenshtein', 'Lenvinsten') 8 | 9 | self.assertEqual(4, distance) -------------------------------------------------------------------------------- /tests/test_nltk.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import nltk 4 | 5 | class TestNLTK(unittest.TestCase): 6 | def test_tokenize(self): 7 | tokens = nltk.word_tokenize("At eight o'clock") 8 | 9 | self.assertEqual(["At", "eight", "o'clock"], tokens) 10 | -------------------------------------------------------------------------------- /tests/test_plotly.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import plotly.graph_objs as go 4 | 5 | class TestPlotly(unittest.TestCase): 6 | def test_figure(self): 7 | trace = {'x': [1, 2], 'y': [1, 3]} 8 | data = [ trace ] 9 | go.Figure(data=data) 10 | -------------------------------------------------------------------------------- /tests/test_fasttext.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import fasttext 4 | 5 | class TestFastText(unittest.TestCase): 6 | def test_tokenize(self): 7 | tokens = fasttext.FastText.tokenize("Hello World") 8 | 9 | self.assertEqual(["Hello", "World"], tokens) 10 | -------------------------------------------------------------------------------- /tests/test_gensim.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from gensim import corpora 4 | 5 | class TestGensim(unittest.TestCase): 6 | def test_dictionary(self): 7 | dic = corpora.Dictionary([['lorem', 'ipsum']]) 8 | 9 | self.assertEqual(2, len(dic.token2id)) 10 | -------------------------------------------------------------------------------- /tests/test_annoy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from annoy import AnnoyIndex 4 | 5 | 6 | class TestAnnoy(unittest.TestCase): 7 | def test_tree(self): 8 | t = AnnoyIndex(5, 'angular') 9 | t.add_item(1, [1,2,3,4,5]) 10 | 11 | self.assertTrue(t.build(1)) 12 | -------------------------------------------------------------------------------- /tests/test_learntools.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from learntools.core import binder; binder.bind(globals()) 4 | from learntools.python.ex1 import * 5 | 6 | class TestLearnTools(unittest.TestCase): 7 | def test_check(self): 8 | color="blue" 9 | q0.check() 10 | -------------------------------------------------------------------------------- /tests/test_fiona.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import fiona 4 | import pandas as pd 5 | 6 | class TestFiona(unittest.TestCase): 7 | def test_read(self): 8 | with fiona.open("/input/tests/data/coutwildrnp.shp") as source: 9 | self.assertEqual(67, len(source)) 10 | 11 | -------------------------------------------------------------------------------- /tests/test_openslide.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from openslide import open_slide 4 | 5 | class TestOpenslide(unittest.TestCase): 6 | def test_read_tif(self): 7 | slide = open_slide('/input/tests/data/test.tif') 8 | 9 | self.assertEqual(1, slide.level_count) 10 | -------------------------------------------------------------------------------- /tests/test_pydub.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pydub import AudioSegment 4 | 5 | 6 | class TestPydub(unittest.TestCase): 7 | def test_import(self): 8 | sound = AudioSegment.from_file('/input/tests/data/test.wav') 9 | 10 | self.assertEqual(1810, len(sound)) 11 | 12 | -------------------------------------------------------------------------------- /tests/test_pyfasttext.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pyfasttext import FastText 4 | 5 | class TestPyFasttext(unittest.TestCase): 6 | def test_vector(self): 7 | model = FastText() 8 | 9 | model.supervised(input='/input/tests/data/text.txt', output='model', epoch=1, lr=0.7) 10 | -------------------------------------------------------------------------------- /tests/test_imports.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TestImport(unittest.TestCase): 4 | # Basic import tests for packages without any. 5 | def test_basic(self): 6 | import bq_helper 7 | import cleverhans 8 | import tensorflow_gcs_config 9 | import tensorflow_datasets 10 | -------------------------------------------------------------------------------- /tests/test_vowpalwabbit.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from vowpalwabbit import pyvw 4 | 5 | class TestVowpalwabbit(unittest.TestCase): 6 | def test_basic(self): 7 | vw = pyvw.vw(quiet=True) 8 | ex = vw.example('1 | a b c') 9 | vw.learn(ex) 10 | self.assertGreater(vw.predict(ex), 0) 11 | -------------------------------------------------------------------------------- /tests/test_jieba.py: -------------------------------------------------------------------------------- 1 | # encoding=utf-8 2 | import unittest 3 | 4 | import jieba 5 | 6 | 7 | class TestJieba(unittest.TestCase): 8 | def test_text_split(self): 9 | sentence = "我爱北京天安门" 10 | seg_list = jieba.cut(sentence) 11 | seg_list = list(seg_list) 12 | 13 | self.assertEqual(4, len(seg_list)) -------------------------------------------------------------------------------- /tests/test_pytorch_ignite.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from ignite.engine import Engine 4 | 5 | 6 | class TestPytorchIgnite(unittest.TestCase): 7 | def test_engine(self): 8 | 9 | def update_fn(engine, batch): 10 | pass 11 | 12 | engine = Engine(update_fn) 13 | engine.run([0, 1, 2]) 14 | -------------------------------------------------------------------------------- /tests/test_ggplot.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os.path 3 | 4 | from ggplot import * 5 | 6 | class TestGgplot(unittest.TestCase): 7 | 8 | def test_plot(self): 9 | p = ggplot(aes(x='mpg'), data=mtcars) + geom_histogram() 10 | p.save("myplot.png") 11 | 12 | self.assertTrue(os.path.isfile("myplot.png")) 13 | -------------------------------------------------------------------------------- /tests/test_torchaudio.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torchaudio 4 | 5 | 6 | class TestTorchaudio(unittest.TestCase): 7 | def test_import(self): 8 | waveform, sample_rate = torchaudio.load('/input/tests/data/test.wav') 9 | 10 | self.assertEqual(2, len(waveform)) 11 | self.assertEqual(44100, sample_rate) 12 | -------------------------------------------------------------------------------- /tests/test_albumentations.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cv2 4 | from albumentations import HorizontalFlip 5 | 6 | class TestAlbumentations(unittest.TestCase): 7 | def test_rotate(self): 8 | image = cv2.imread('/input/tests/data/dot.png') 9 | aug = HorizontalFlip(p=1) 10 | image_rotated = aug(image=image)['image'] 11 | -------------------------------------------------------------------------------- /tests/test_flask.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from flask import Flask, request 4 | 5 | class TestFlask(unittest.TestCase): 6 | def test_request(self): 7 | app = Flask(__name__) 8 | with app.test_request_context('/foo', method='POST'): 9 | assert request.path == '/foo' 10 | assert request.method == 'POST' 11 | -------------------------------------------------------------------------------- /patches/template_conf.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_template": "classic", 3 | "mimetypes": { 4 | "text/html": true 5 | }, 6 | "preprocessors": { 7 | "100-pygments": { 8 | "type": "nbconvert.preprocessors.CSSHTMLHeaderPreprocessor", 9 | "enabled": true, 10 | "style": "default" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /tests/test_kaggle_kernel_credentials.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from kaggle_secrets import GcpTarget 4 | from kaggle_gcp import KaggleKernelCredentials 5 | 6 | class TestKaggleKernelCredentials(unittest.TestCase): 7 | 8 | def test_default_target(self): 9 | creds = KaggleKernelCredentials() 10 | self.assertEqual(GcpTarget.BIGQUERY, creds.target) 11 | -------------------------------------------------------------------------------- /tests/test_h2o.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import h2o 4 | 5 | class TestH2o(unittest.TestCase): 6 | def tearDown(self): 7 | h2o.cluster().shutdown(False) 8 | 9 | def test_init_read(self): 10 | h2o.init() 11 | train = h2o.import_file("/input/tests/data/train.csv", destination_frame="train") 12 | self.assertEqual(100, train.nrow) 13 | -------------------------------------------------------------------------------- /tests/test_geoviews.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import geoviews.feature as gf 4 | 5 | from cartopy import crs 6 | 7 | class TestGeoviews(unittest.TestCase): 8 | def test_viz(self): 9 | (gf.ocean + gf.land + gf.ocean * gf.land * gf.coastline * gf.borders).options( 10 | 'Feature', projection=crs.Geostationary(), global_extent=True 11 | ).cols(3) 12 | -------------------------------------------------------------------------------- /tests/test_hyperopt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from hyperopt import fmin, tpe, hp 4 | 5 | class TestHyperopt(unittest.TestCase): 6 | def test_find_min(self): 7 | best = fmin( 8 | fn=lambda x: x ** 2, 9 | space=hp.uniform('x', -10, 10), 10 | algo=tpe.suggest, 11 | max_evals=1, 12 | ) 13 | self.assertIn('x', best) -------------------------------------------------------------------------------- /tests/test_xvfbwrapper.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os.path 3 | from xvfbwrapper import Xvfb 4 | 5 | class TestXvfbwrapper(unittest.TestCase): 6 | def test_xvfb(self): 7 | vdisplay = Xvfb() 8 | vdisplay.start() 9 | display_var = ':{}'.format(vdisplay.new_display) 10 | self.assertEqual(display_var, os.environ['DISPLAY']) 11 | vdisplay.stop() 12 | -------------------------------------------------------------------------------- /tests/test_skimage.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from skimage import data, filters, io 4 | 5 | class TestSkImage(unittest.TestCase): 6 | def test_filter(self): 7 | image = data.coins() 8 | filters.sobel(image) 9 | 10 | def test_codecs(self): 11 | img = io.MultiImage('/input/tests/data/test.tif') 12 | 13 | self.assertEqual((10, 10, 4), img[0].shape) -------------------------------------------------------------------------------- /tests/test_pycuda.py: -------------------------------------------------------------------------------- 1 | """Tests for general GPU support""" 2 | 3 | import unittest 4 | 5 | from common import gpu_test 6 | 7 | 8 | class TestPycuda(unittest.TestCase): 9 | @gpu_test 10 | def test_pycuda(self): 11 | import pycuda.driver 12 | pycuda.driver.init() 13 | gpu_name = pycuda.driver.Device(0).name() 14 | self.assertNotEqual(0, len(gpu_name)) 15 | -------------------------------------------------------------------------------- /tests/test_pycrypto.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from Crypto.Hash import SHA256 4 | 5 | class TestPycrypto(unittest.TestCase): 6 | def test_digest(self): 7 | hash = SHA256.new() 8 | hash.update('message'.encode('utf-8')) 9 | h = hash.digest() 10 | self.assertEqual(h, b'\xabS\n\x13\xe4Y\x14\x98+y\xf9\xb7\xe3\xfb\xa9\x94\xcf\xd1\xf3\xfb"\xf7\x1c\xea\x1a\xfb\xf0+F\x0cm\x1d') 11 | -------------------------------------------------------------------------------- /tests/test_hep_ml.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | 5 | from hep_ml.preprocessing import BinTransformer 6 | 7 | class TestHepML(unittest.TestCase): 8 | def test_preprocessing(self): 9 | X = np.array([[1.1, 1.2, 1.3],[5.1, 6.4, 10.5]]) 10 | transformer = BinTransformer().fit(X) 11 | new_X = transformer.transform(X) 12 | 13 | self.assertEqual((2, 3), new_X.shape) 14 | -------------------------------------------------------------------------------- /tests/test_bigquery_storage.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TestBigQueryStorage(unittest.TestCase): 4 | 5 | def test_ensure_bq_storage_is_not_installed(self): 6 | # b/183041606#comment5: Ensures bigquery_storage is not installed. 7 | # bigquery falls back on using regular BQ queries which are supported by the BQ proxy. 8 | with self.assertRaises(ImportError): 9 | from google.cloud import bigquery_storage -------------------------------------------------------------------------------- /patches/imagemagick-policy.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 9 | ]> 10 | -------------------------------------------------------------------------------- /tests/test_dlib.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cv2 4 | import dlib 5 | 6 | 7 | class TestDLib(unittest.TestCase): 8 | def test_dlib_face_detector(self): 9 | detector = dlib.get_frontal_face_detector() 10 | image = cv2.imread('/input/tests/data/face.jpg') 11 | image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 12 | faces = detector(image_gray, 1) 13 | 14 | self.assertEqual(len(faces), 1) 15 | -------------------------------------------------------------------------------- /tests/test_pandas.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pandas as pd 4 | 5 | class TestPandas(unittest.TestCase): 6 | def test_read_csv(self): 7 | data = pd.read_csv("/input/tests/data/train.csv") 8 | 9 | self.assertEqual(100, len(data.index)) 10 | 11 | def test_read_feather(self): 12 | data = pd.read_feather("/input/tests/data/feather-0_3_1.feather") 13 | 14 | self.assertEqual(10, data.size) 15 | -------------------------------------------------------------------------------- /tests/test_papermill.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import subprocess 4 | 5 | class TestPapermill(unittest.TestCase): 6 | def test_papermill(self): 7 | result = subprocess.run([ 8 | 'papermill', 9 | '/input/tests/data/notebook.ipynb', 10 | '-', 11 | ], stdout=subprocess.PIPE) 12 | 13 | self.assertEqual(0, result.returncode) 14 | self.assertTrue(b'999' in result.stdout) 15 | -------------------------------------------------------------------------------- /tests/test_nvidia.py: -------------------------------------------------------------------------------- 1 | """Tests for general GPU support""" 2 | 3 | import subprocess 4 | import unittest 5 | 6 | from common import gpu_test 7 | 8 | 9 | class TestNvidia(unittest.TestCase): 10 | @gpu_test 11 | def test_system_management_interface(self): 12 | smi = subprocess.Popen(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 13 | smi.communicate() 14 | self.assertEqual(0, smi.returncode) 15 | -------------------------------------------------------------------------------- /tests/test_seaborn.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from distutils.version import StrictVersion 4 | 5 | import seaborn as sns 6 | 7 | class TestSeaborn(unittest.TestCase): 8 | # Fails if seaborn gets downgraded by other package installations. 9 | def test_version(self): 10 | self.assertGreaterEqual(StrictVersion(sns.__version__), StrictVersion("0.9.0")) 11 | 12 | 13 | def test_option(self): 14 | sns.set(style="darkgrid") 15 | -------------------------------------------------------------------------------- /tests/test_bqplot.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | import bqplot.pyplot as plt 4 | 5 | class TestBqplot(unittest.TestCase): 6 | def test_figure(self): 7 | size = 100 8 | scale = 100.0 9 | np.random.seed(0) 10 | x_data = np.arange(size) 11 | y_data = np.cumsum(np.random.randn(size) * scale) 12 | fig = plt.figure(title='First Example') 13 | plt.plot(y_data) 14 | fig.save_png() 15 | -------------------------------------------------------------------------------- /tests/test_optuna.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import optuna 4 | 5 | 6 | class TestOptuna(unittest.TestCase): 7 | 8 | def test_study(self): 9 | 10 | def objective(trial): 11 | x = trial.suggest_uniform('x', -1., 1.) 12 | return x ** 2 13 | 14 | n_trials = 20 15 | study = optuna.create_study() 16 | study.optimize(objective, n_trials=n_trials) 17 | self.assertEqual(len(study.trials), n_trials) 18 | -------------------------------------------------------------------------------- /tests/test_rgf.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from sklearn import datasets 4 | from rgf.sklearn import RGFClassifier 5 | 6 | class TestRGF(unittest.TestCase): 7 | def test_classifier(self): 8 | iris = datasets.load_iris() 9 | X, y = iris.data, iris.target 10 | 11 | rgf = RGFClassifier(max_leaf=400, 12 | algorithm="RGF_Sib", 13 | test_interval=100, 14 | n_iter=1) 15 | 16 | rgf.fit(X,y) 17 | -------------------------------------------------------------------------------- /tests/test_qgrid.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | from qgrid import QgridWidget 7 | 8 | 9 | class TestQgrid(unittest.TestCase): 10 | def test_nans(self): 11 | df = pd.DataFrame([(pd.Timestamp('2017-02-02'), np.nan), 12 | (4, 2), 13 | ('foo', 'bar')]) 14 | view = QgridWidget(df=df) 15 | 16 | self.assertIsNotNone(view.get_changed_df()) 17 | -------------------------------------------------------------------------------- /tests/test_wordbatch.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from wordbatch.extractors import WordBag 4 | from wordbatch.pipelines import WordBatch 5 | 6 | class TestWordBatch(unittest.TestCase): 7 | def test_wordbatch(self): 8 | WordBatch(extractor=(WordBag, { 9 | "hash_ngrams":2, 10 | "hash_ngrams_weights":[0.5, -1.0], 11 | "hash_size":2**23, 12 | "norm":'l2', 13 | "tf":'log', 14 | "idf":50.0})) 15 | -------------------------------------------------------------------------------- /tests/test_fbprophet.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import pandas as pd 5 | 6 | from fbprophet import Prophet 7 | 8 | class TestFbProphet(unittest.TestCase): 9 | def test_fit(self): 10 | train = pd.DataFrame({ 11 | 'ds': np.array(['2012-05-18', '2012-05-20']), 12 | 'y': np.array([38.23, 21.25]) 13 | }) 14 | 15 | forecaster = Prophet(mcmc_samples=1) 16 | forecaster.fit(train, control={'adapt_engaged': False}) 17 | -------------------------------------------------------------------------------- /patches/nbconvert-extensions.tpl: -------------------------------------------------------------------------------- 1 | {# 2 | Jinja template to inject notebook cell metadata to enhance generated HTML output 3 | All cell metadata starting with '_kg_' will be included with its value ({key}-{value}) 4 | as a class in the cell's DIV container 5 | #} 6 | 7 | {% extends 'classic/index.html.j2'%} 8 | {% block any_cell %} 9 |
10 | {{ super() }} 11 |
12 | {% endblock any_cell %} -------------------------------------------------------------------------------- /tests/test_pydegensac.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pydegensac 4 | import numpy as np 5 | 6 | 7 | class TestPydegensac(unittest.TestCase): 8 | def test_find_homography(self): 9 | src_pts = np.float32([ [0,0],[0,1],[1,1],[1,0] ]).reshape(-1,2) 10 | dst_pts = np.float32([ [0,0],[0,-1],[-1,-1],[-1,0] ]).reshape(-1,2) 11 | 12 | H, mask = pydegensac.findHomography(src_pts, dst_pts, 4, 1) 13 | 14 | self.assertEqual(3, len(H)) 15 | self.assertEqual(4, len(mask)) 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/README.md: -------------------------------------------------------------------------------- 1 | # Tensorflow GCS Configuration Ops 2 | 3 | This package provides TF 2.X compatible versions of the 4 | `tf.contrib.cloud.configure_gcs()` operations. 5 | 6 | This is a slightly modified and repackaged version of the GCS code in TensorFlow I/O, 7 | in particular the [tfio.gcs](https://www.tensorflow.org/io/api_docs/python/tfio/gcs) module. 8 | 9 | This is a copy of the internal source released as the 10 | [tensorflow-gcs-config](https://pypi.org/project/tensorflow-gcs-config/) package. 11 | -------------------------------------------------------------------------------- /tests/test_gcloud.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import unittest 4 | 5 | class TestGcloud(unittest.TestCase): 6 | def test_version(self): 7 | result = subprocess.run([ 8 | 'gcloud', 9 | '--version', 10 | ], 11 | stdout=subprocess.PIPE, 12 | stderr=subprocess.PIPE, 13 | ) 14 | 15 | print("gloud version: ", result) 16 | 17 | self.assertEqual(0, result.returncode) 18 | self.assertIn(b'Google Cloud SDK', result.stdout) 19 | -------------------------------------------------------------------------------- /tests/test_pytext.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pytext.config.field_config import FeatureConfig 4 | from pytext.data.featurizer import InputRecord, SimpleFeaturizer 5 | 6 | class TestPyText(unittest.TestCase): 7 | 8 | def test_tokenize(self): 9 | featurizer = SimpleFeaturizer.from_config( 10 | SimpleFeaturizer.Config(), FeatureConfig() 11 | ) 12 | 13 | tokens = featurizer.featurize(InputRecord(raw_text="At eight o'clock")).tokens 14 | self.assertEqual(['at', 'eight', "o'clock"], tokens) 15 | -------------------------------------------------------------------------------- /tests/test_matplotlib.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os.path 3 | 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | 7 | from mpl_toolkits.basemap import Basemap 8 | 9 | class TestMatplotlib(unittest.TestCase): 10 | def test_plot(self): 11 | plt.plot(np.linspace(0,1,50), np.random.rand(50)) 12 | plt.savefig("plot1.png") 13 | 14 | self.assertTrue(os.path.isfile("plot1.png")) 15 | 16 | def test_basemap(self): 17 | m = Basemap(width=100,height=100,projection='aeqd', lat_0=40,lon_0=-105) 18 | self.assertEqual(0, m.xmin) 19 | -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/third_party/tensorflow/BUILD.tpl: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//visibility:public"]) 2 | 3 | cc_library( 4 | name = "tf_header_lib", 5 | hdrs = [":tf_header_include"], 6 | includes = ["include"], 7 | visibility = ["//visibility:public"], 8 | ) 9 | 10 | cc_library( 11 | name = "libtensorflow_framework", 12 | srcs = [":libtensorflow_framework.so"], 13 | #data = ["lib/libtensorflow_framework.so"], 14 | visibility = ["//visibility:public"], 15 | ) 16 | 17 | %{TF_HEADER_GENRULE} 18 | %{TF_SHARED_LIBRARY_GENRULE} 19 | -------------------------------------------------------------------------------- /tests/test_category_encoders.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from category_encoders import CountEncoder 4 | import pandas as pd 5 | 6 | 7 | ## Need to make sure we have CountEncoder available from the category_encoders library 8 | class TestCategoryEncoders(unittest.TestCase): 9 | def test_count_encoder(self): 10 | encoder = CountEncoder(cols="data") 11 | 12 | data = pd.DataFrame([1, 2, 3, 1, 4, 5, 3, 1], columns=["data"]) 13 | 14 | encoded = encoder.fit_transform(data) 15 | self.assertTrue((encoded.data == [3, 1, 2, 3, 1, 1, 2, 3]).all()) 16 | 17 | -------------------------------------------------------------------------------- /tests/test_tsfresh.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pandas as pd 4 | import numpy as np 5 | 6 | from tsfresh import extract_features 7 | 8 | class TestTsFresh(unittest.TestCase): 9 | def test_extract_feature(self): 10 | ts = pd.DataFrame({ 11 | 'id': np.array(['a', 'a', 'a', 'b', 'b', 'b']), 12 | 'time': np.array([0,1,2,0,1,2]), 13 | 'x': np.array([3,4,5,7,8,10]) 14 | }) 15 | extracted_features = extract_features(ts, column_id='id', column_sort='time') 16 | self.assertEqual(2, len(extracted_features)) 17 | 18 | 19 | -------------------------------------------------------------------------------- /tests/test_datatable.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import datatable as dt 3 | from datatable.internal import frame_integrity_check 4 | from datatable import ltype 5 | 6 | class TestDatatable(unittest.TestCase): 7 | def test_fread(self): 8 | d0 = dt.fread( 9 | "L,T,U,D\n" 10 | "true,True,TRUE,1\n" 11 | "false,False,FALSE,0\n" 12 | ",,,\n" 13 | ) 14 | frame_integrity_check(d0) 15 | assert d0.shape == (3, 4) 16 | assert d0.ltypes == (ltype.bool,) * 4 17 | assert d0.to_list() == [[True, False, None]] * 4 18 | -------------------------------------------------------------------------------- /tests/test_kornia.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import cv2 4 | import torch 5 | import kornia 6 | 7 | class TestKornia(unittest.TestCase): 8 | def test_imread_opencv(self): 9 | img = cv2.imread('/input/tests/data/dot.png') 10 | img_t = kornia.image_to_tensor(img) 11 | 12 | self.assertEqual(img.shape, (1, 1, 3)) 13 | self.assertEqual(img_t.shape, (3, 1, 1)) 14 | 15 | def test_grayscale_torch(self): 16 | img_rgb = torch.rand(2, 3, 4, 5) 17 | img_gray = kornia.rgb_to_grayscale(img_rgb) 18 | 19 | self.assertEqual(img_gray.shape, (2, 1, 4, 5)) 20 | -------------------------------------------------------------------------------- /tests/test_numpy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | from numpy.distutils.system_info import get_info 5 | 6 | class TestNumpy(unittest.TestCase): 7 | def test_array(self): 8 | array = np.array([1, 3]) 9 | 10 | self.assertEqual((2,), array.shape) 11 | 12 | # Numpy must be linked to the MKL. (Occasionally, a third-party package will muck up the installation 13 | # and numpy will be reinstalled with an OpenBLAS backing.) 14 | def test_mkl(self): 15 | # This will throw an exception if the MKL is not linked correctly. 16 | get_info("blas_mkl") 17 | -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | 4 | setup_kwargs = { 5 | } 6 | 7 | from build import compile_bazel 8 | compile_bazel() 9 | 10 | setup( 11 | name='tensorflow-gcs-config', 12 | version='2.1.7', 13 | description='TensorFlow operations for configuring access to GCS (Google Compute Storage) resources.', 14 | long_description='TensorFlow operations for configuring access to GCS (Google Compute Storage) resources.', 15 | author='Google, Inc.', 16 | author_email=None, 17 | url=None, 18 | packages = ['tensorflow_gcs_config'], 19 | include_package_data=True, 20 | ) 21 | -------------------------------------------------------------------------------- /tensorflow-whl/tensorflow-gcs-config/tensorflow_gcs_config/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache 2.0 2 | 3 | package(default_visibility = ["//visibility:public"]) 4 | 5 | cc_binary( 6 | name = "_gcs_config_ops.so", 7 | srcs = [ 8 | "gcs_config_op_kernels.cc", 9 | "gcs_config_ops.cc", 10 | ], 11 | copts = [ 12 | "-pthread", 13 | "-std=c++11", 14 | "-DNDEBUG", 15 | ], 16 | linkshared = 1, 17 | deps = [ 18 | "@jsoncpp_git//:jsoncpp", 19 | "@local_config_tf//:libtensorflow_framework", 20 | "@local_config_tf//:tf_header_lib", 21 | ], 22 | ) 23 | -------------------------------------------------------------------------------- /tests/test_sklearn.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from sklearn import datasets 4 | from sklearn.ensemble import RandomForestClassifier 5 | from sklearn.linear_model import LinearRegression 6 | 7 | class TestSklearn(unittest.TestCase): 8 | def test_random_forest_classifier(self): 9 | iris = datasets.load_iris() 10 | X, y = iris.data, iris.target 11 | rf1 = RandomForestClassifier() 12 | rf1.fit(X,y) 13 | 14 | def test_linearn_classifier(self): 15 | boston = datasets.load_boston() 16 | X, y = boston.data, boston.target 17 | lr1 = LinearRegression() 18 | lr1.fit(X,y) 19 | -------------------------------------------------------------------------------- /tests/test_jax.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import time 4 | 5 | from common import gpu_test 6 | 7 | 8 | class TestJAX(unittest.TestCase): 9 | def tanh(self, x): 10 | import jax.numpy as np 11 | y = np.exp(-2.0 * x) 12 | return (1.0 - y) / (1.0 + y) 13 | 14 | @gpu_test 15 | def test_JAX(self): 16 | # importing inside the gpu-only test because these packages can't be 17 | # imported on the CPU image since they are not present there. 18 | from jax import grad, jit 19 | 20 | grad_tanh = grad(self.tanh) 21 | ag = grad_tanh(1.0) 22 | self.assertEqual(0.4199743, ag) 23 | -------------------------------------------------------------------------------- /tests/test_cupy.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from common import gpu_test 4 | 5 | 6 | class TestCupy(unittest.TestCase): 7 | @gpu_test 8 | def test_kernel(self): 9 | import cupy as cp 10 | x = cp.arange(6, dtype='f').reshape(2, 3) 11 | y = cp.arange(3, dtype='f') 12 | kernel = cp.ElementwiseKernel( 13 | 'float32 x, float32 y', 'float32 z', 14 | '''if (x - 2 > y) { 15 | z = x * y; 16 | } else { 17 | z = x + y; 18 | }''', 19 | 'my_kernel') 20 | r = kernel(x, y) 21 | 22 | self.assertEqual((2, 3), r.shape) -------------------------------------------------------------------------------- /tests/test_jupyter_nbconvert.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import subprocess 4 | 5 | class TestJupyterNbconvert(unittest.TestCase): 6 | def test_nbconvert(self): 7 | result = subprocess.run([ 8 | 'jupyter', 9 | 'nbconvert', 10 | '--to', 11 | 'notebook', 12 | '--template', 13 | '/opt/kaggle/nbconvert-extensions.tpl', 14 | '--execute', 15 | '--stdout', 16 | '/input/tests/data/notebook.ipynb', 17 | ], stdout=subprocess.PIPE) 18 | 19 | self.assertEqual(0, result.returncode) 20 | self.assertTrue(b'999' in result.stdout) -------------------------------------------------------------------------------- /tests/test_pytesseract.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import io 3 | import pytesseract 4 | import numpy as np 5 | from PIL import Image 6 | from wand.image import Image as wandimage 7 | 8 | class TestPytesseract(unittest.TestCase): 9 | def test_tesseract(self): 10 | # Open pdf with Wand 11 | with wandimage(filename='/input/tests/data/test.pdf') as wand_image: 12 | img_buffer = np.asarray(bytearray(wand_image.make_blob(format='png')), dtype='uint8') 13 | bytesio = io.BytesIO(img_buffer) 14 | test_string = pytesseract.image_to_string(Image.open(bytesio)) 15 | self.assertTrue(type(test_string) == str) 16 | -------------------------------------------------------------------------------- /tests/test_tensorflow_bigquery.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from google.cloud import bigquery 4 | import tensorflow as tf 5 | 6 | 7 | class TestTensorflowBigQuery(unittest.TestCase): 8 | 9 | # Some versions of bigquery crashed tensorflow, add this test to make sure that doesn't happen. 10 | # python -c "from google.cloud import bigquery; import tensorflow". This flow is common because bigquery is imported in kaggle_gcp.py 11 | # which is loaded at startup. 12 | def test_matrix_addition(self): 13 | result = tf.add([1, 2], [3, 4]) 14 | self.assertEqual([2], result.shape) 15 | self.assertListEqual([4, 6], [x for x in result.numpy()]) -------------------------------------------------------------------------------- /tests/test_theano.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import theano 4 | from theano import tensor 5 | 6 | class TestTheano(unittest.TestCase): 7 | def test_addition(self): 8 | # Declare two symbolic floating-point scalars. 9 | a = tensor.dscalar() 10 | b = tensor.dscalar() 11 | 12 | # Create a simple expression. 13 | c = a + b 14 | 15 | # Convert the expression into a callable object that takes (a,b) 16 | # values as input and computes a value for 'c'. 17 | f = theano.function([a,b], c) 18 | 19 | # Bind 1.5 to 'a', 2.5 to 'b', and evaluate 'c'. 20 | self.assertEqual(4.0, f(1.5, 2.5)) 21 | -------------------------------------------------------------------------------- /tests/test_rapids.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from common import gpu_test 4 | 5 | 6 | class TestRapids(unittest.TestCase): 7 | @gpu_test 8 | def test_dbscan(self): 9 | import cudf 10 | from cuml.cluster import DBSCAN 11 | 12 | # Create and populate a GPU DataFrame 13 | gdf_float = cudf.DataFrame() 14 | gdf_float['0'] = [1.0, 2.0, 5.0] 15 | gdf_float['1'] = [4.0, 2.0, 1.0] 16 | gdf_float['2'] = [4.0, 2.0, 1.0] 17 | 18 | # Setup and fit clusters 19 | dbscan_float = DBSCAN(eps=1.0, min_samples=1) 20 | dbscan_float.fit(gdf_float) 21 | 22 | self.assertEqual(3, dbscan_float.labels_.size) 23 | -------------------------------------------------------------------------------- /tests/test_allennlp.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from allennlp.data.tokenizers import SpacyTokenizer 4 | 5 | 6 | class TestAllenNlp(unittest.TestCase): 7 | # reference 8 | # https://github.com/allenai/allennlp/blob/master/allennlp/tests/data/tokenizers/word_tokenizer_test.py 9 | def test_passes_through_correctly(self): 10 | tokenizer = SpacyTokenizer() 11 | sentence = "this (sentence) has 'crazy' \"punctuation\"." 12 | tokens = [t.text for t in tokenizer.tokenize(sentence)] 13 | expected_tokens = ["this", "(", "sentence", ")", "has", "'", "crazy", "'", "\"", 14 | "punctuation", "\"", "."] 15 | self.assertSequenceEqual(tokens, expected_tokens) 16 | -------------------------------------------------------------------------------- /tests/test_geopandas.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import geopandas 4 | 5 | class TestGeopandas(unittest.TestCase): 6 | def test_read(self): 7 | df = geopandas.read_file(geopandas.datasets.get_path('nybb')) 8 | self.assertTrue(df.size > 1) 9 | 10 | def test_spatial_join(self): 11 | cities = geopandas.read_file(geopandas.datasets.get_path('naturalearth_cities')) 12 | world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) 13 | countries = world[['geometry', 'name']] 14 | countries = countries.rename(columns={'name':'country'}) 15 | cities_with_country = geopandas.sjoin(cities, countries, how="inner", op='intersects') 16 | self.assertTrue(cities_with_country.size > 1) -------------------------------------------------------------------------------- /tensorflow-whl/README.md: -------------------------------------------------------------------------------- 1 | # Build new Tensorflow wheels 2 | 3 | ``` 4 | ./build 5 | ``` 6 | 7 | # Push the new wheels (Kaggle Engineers only) 8 | 9 | 1. Add an entry in the [CHANGELOG](CHANGELOG.md) with an appropriate `LABEL`. 10 | 2. Push the new image using the `LABEL` you picked above. 11 | 12 | ``` 13 | ./push LABEL 14 | ``` 15 | 16 | # Use the new wheels 17 | 18 | Update the line below in the [CPU Dockerfile](../Dockerfile) and the [GPU Dockerfile](../gpu.Dockerfile) to use the new `LABEL`. 19 | 20 | To use wheels built locally: 21 | ``` 22 | FROM kaggle/python-tensorflow-whl as tensorflow_whl 23 | ``` 24 | 25 | To use our pre-built wheels: 26 | ``` 27 | FROM gcr.io/kaggle-images/python-tensorflow-whl: