├── .devcontainer ├── README.md ├── devcontainer.json └── setup.sh ├── .github ├── dependabot.yml └── workflows │ ├── actions.yml │ ├── auto-assignment.yaml │ ├── config │ ├── jax │ │ └── keras.json │ ├── numpy │ │ └── keras.json │ ├── openvino │ │ └── keras.json │ ├── tensorflow │ │ └── keras.json │ └── torch │ │ └── keras.json │ ├── labeler.yaml │ ├── nightly.yml │ ├── scorecard.yml │ ├── scripts │ ├── auto-assignment.js │ └── labeler.js │ └── stale-issue-pr.yaml ├── .gitignore ├── .kokoro ├── README.md └── github │ └── ubuntu │ └── gpu │ ├── build.sh │ ├── jax │ ├── continuous.cfg │ └── presubmit.cfg │ ├── tensorflow │ ├── continuous.cfg │ └── presubmit.cfg │ └── torch │ ├── continuous.cfg │ └── presubmit.cfg ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── api_gen.py ├── benchmarks ├── __init__.py ├── layer_benchmark │ ├── README.md │ ├── __init__.py │ ├── activation_benchmark.py │ ├── attention_benchmark.py │ ├── base_benchmark.py │ ├── conv_benchmark.py │ ├── core_benchmark.py │ ├── merge_benchmark.py │ ├── normalization_benchmark.py │ ├── pooling_benchmark.py │ ├── regularization_benchmark.py │ ├── reshaping_benchmark.py │ └── rnn_benchmark.py ├── model_benchmark │ ├── __init__.py │ ├── benchmark_utils.py │ ├── bert_benchmark.py │ └── image_classification_benchmark.py └── torch_ctl_benchmark │ ├── README.md │ ├── __init__.py │ ├── benchmark_utils.py │ ├── conv_model_benchmark.py │ └── dense_model_benchmark.py ├── codecov.yml ├── conftest.py ├── examples ├── demo_custom_jax_workflow.py ├── demo_custom_layer_backend_agnostic.py ├── demo_custom_tf_workflow.py ├── demo_custom_torch_workflow.py ├── demo_functional.py ├── demo_jax_distributed.py ├── demo_mnist_convnet.py ├── demo_subclass.py └── demo_torch_multi_gpu.py ├── guides ├── custom_train_step_in_jax.py ├── custom_train_step_in_tensorflow.py ├── custom_train_step_in_torch.py ├── distributed_training_with_jax.py ├── distributed_training_with_tensorflow.py ├── distributed_training_with_torch.py ├── functional_api.py ├── making_new_layers_and_models_via_subclassing.py ├── sequential_model.py ├── training_with_built_in_methods.py ├── transfer_learning.py ├── understanding_masking_and_padding.py ├── writing_a_custom_training_loop_in_jax.py ├── writing_a_custom_training_loop_in_tensorflow.py ├── writing_a_custom_training_loop_in_torch.py └── writing_your_own_callbacks.py ├── integration_tests ├── basic_full_flow.py ├── dataset_tests │ ├── boston_housing_test.py │ ├── california_housing_test.py │ ├── cifar100_test.py │ ├── cifar10_test.py │ ├── fashion_mnist_test.py │ ├── imdb_test.py │ ├── mnist_test.py │ └── reuters_test.py ├── import_test.py ├── jax_custom_fit_test.py ├── model_visualization_test.py ├── numerical_test.py ├── tf_custom_fit_test.py ├── tf_distribute_training_test.py ├── torch_custom_fit_test.py └── torch_workflow_test.py ├── keras ├── __init__.py ├── api │ ├── __init__.py │ ├── _tf_keras │ │ ├── __init__.py │ │ └── keras │ │ │ ├── __init__.py │ │ │ ├── activations │ │ │ └── __init__.py │ │ │ ├── applications │ │ │ ├── __init__.py │ │ │ ├── convnext │ │ │ │ └── __init__.py │ │ │ ├── densenet │ │ │ │ └── __init__.py │ │ │ ├── efficientnet │ │ │ │ └── __init__.py │ │ │ ├── efficientnet_v2 │ │ │ │ └── __init__.py │ │ │ ├── imagenet_utils │ │ │ │ └── __init__.py │ │ │ ├── inception_resnet_v2 │ │ │ │ └── __init__.py │ │ │ ├── inception_v3 │ │ │ │ └── __init__.py │ │ │ ├── mobilenet │ │ │ │ └── __init__.py │ │ │ ├── mobilenet_v2 │ │ │ │ └── __init__.py │ │ │ ├── mobilenet_v3 │ │ │ │ └── __init__.py │ │ │ ├── nasnet │ │ │ │ └── __init__.py │ │ │ ├── resnet │ │ │ │ └── __init__.py │ │ │ ├── resnet50 │ │ │ │ └── __init__.py │ │ │ ├── resnet_v2 │ │ │ │ └── __init__.py │ │ │ ├── vgg16 │ │ │ │ └── __init__.py │ │ │ ├── vgg19 │ │ │ │ └── __init__.py │ │ │ └── xception │ │ │ │ └── __init__.py │ │ │ ├── backend │ │ │ └── __init__.py │ │ │ ├── callbacks │ │ │ └── __init__.py │ │ │ ├── config │ │ │ └── __init__.py │ │ │ ├── constraints │ │ │ └── __init__.py │ │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── boston_housing │ │ │ │ └── __init__.py │ │ │ ├── california_housing │ │ │ │ └── __init__.py │ │ │ ├── cifar10 │ │ │ │ └── __init__.py │ │ │ ├── cifar100 │ │ │ │ └── __init__.py │ │ │ ├── fashion_mnist │ │ │ │ └── __init__.py │ │ │ ├── imdb │ │ │ │ └── __init__.py │ │ │ ├── mnist │ │ │ │ └── __init__.py │ │ │ └── reuters │ │ │ │ └── __init__.py │ │ │ ├── distribution │ │ │ └── __init__.py │ │ │ ├── dtype_policies │ │ │ └── __init__.py │ │ │ ├── export │ │ │ └── __init__.py │ │ │ ├── initializers │ │ │ └── __init__.py │ │ │ ├── layers │ │ │ └── __init__.py │ │ │ ├── legacy │ │ │ ├── __init__.py │ │ │ └── saving │ │ │ │ └── __init__.py │ │ │ ├── losses │ │ │ └── __init__.py │ │ │ ├── metrics │ │ │ └── __init__.py │ │ │ ├── mixed_precision │ │ │ └── __init__.py │ │ │ ├── models │ │ │ └── __init__.py │ │ │ ├── ops │ │ │ ├── __init__.py │ │ │ ├── image │ │ │ │ └── __init__.py │ │ │ ├── linalg │ │ │ │ └── __init__.py │ │ │ ├── nn │ │ │ │ └── __init__.py │ │ │ └── numpy │ │ │ │ └── __init__.py │ │ │ ├── optimizers │ │ │ ├── __init__.py │ │ │ ├── legacy │ │ │ │ └── __init__.py │ │ │ └── schedules │ │ │ │ └── __init__.py │ │ │ ├── preprocessing │ │ │ ├── __init__.py │ │ │ ├── image │ │ │ │ └── __init__.py │ │ │ ├── sequence │ │ │ │ └── __init__.py │ │ │ └── text │ │ │ │ └── __init__.py │ │ │ ├── quantizers │ │ │ └── __init__.py │ │ │ ├── random │ │ │ └── __init__.py │ │ │ ├── regularizers │ │ │ └── __init__.py │ │ │ ├── saving │ │ │ └── __init__.py │ │ │ ├── tree │ │ │ └── __init__.py │ │ │ ├── utils │ │ │ ├── __init__.py │ │ │ ├── bounding_boxes │ │ │ │ └── __init__.py │ │ │ └── legacy │ │ │ │ └── __init__.py │ │ │ ├── visualization │ │ │ └── __init__.py │ │ │ └── wrappers │ │ │ └── __init__.py │ ├── activations │ │ └── __init__.py │ ├── applications │ │ ├── __init__.py │ │ ├── convnext │ │ │ └── __init__.py │ │ ├── densenet │ │ │ └── __init__.py │ │ ├── efficientnet │ │ │ └── __init__.py │ │ ├── efficientnet_v2 │ │ │ └── __init__.py │ │ ├── imagenet_utils │ │ │ └── __init__.py │ │ ├── inception_resnet_v2 │ │ │ └── __init__.py │ │ ├── inception_v3 │ │ │ └── __init__.py │ │ ├── mobilenet │ │ │ └── __init__.py │ │ ├── mobilenet_v2 │ │ │ └── __init__.py │ │ ├── mobilenet_v3 │ │ │ └── __init__.py │ │ ├── nasnet │ │ │ └── __init__.py │ │ ├── resnet │ │ │ └── __init__.py │ │ ├── resnet50 │ │ │ └── __init__.py │ │ ├── resnet_v2 │ │ │ └── __init__.py │ │ ├── vgg16 │ │ │ └── __init__.py │ │ ├── vgg19 │ │ │ └── __init__.py │ │ └── xception │ │ │ └── __init__.py │ ├── backend │ │ └── __init__.py │ ├── callbacks │ │ └── __init__.py │ ├── config │ │ └── __init__.py │ ├── constraints │ │ └── __init__.py │ ├── datasets │ │ ├── __init__.py │ │ ├── boston_housing │ │ │ └── __init__.py │ │ ├── california_housing │ │ │ └── __init__.py │ │ ├── cifar10 │ │ │ └── __init__.py │ │ ├── cifar100 │ │ │ └── __init__.py │ │ ├── fashion_mnist │ │ │ └── __init__.py │ │ ├── imdb │ │ │ └── __init__.py │ │ ├── mnist │ │ │ └── __init__.py │ │ └── reuters │ │ │ └── __init__.py │ ├── distribution │ │ └── __init__.py │ ├── dtype_policies │ │ └── __init__.py │ ├── export │ │ └── __init__.py │ ├── initializers │ │ └── __init__.py │ ├── layers │ │ └── __init__.py │ ├── legacy │ │ ├── __init__.py │ │ └── saving │ │ │ └── __init__.py │ ├── losses │ │ └── __init__.py │ ├── metrics │ │ └── __init__.py │ ├── mixed_precision │ │ └── __init__.py │ ├── models │ │ └── __init__.py │ ├── ops │ │ ├── __init__.py │ │ ├── image │ │ │ └── __init__.py │ │ ├── linalg │ │ │ └── __init__.py │ │ ├── nn │ │ │ └── __init__.py │ │ └── numpy │ │ │ └── __init__.py │ ├── optimizers │ │ ├── __init__.py │ │ ├── legacy │ │ │ └── __init__.py │ │ └── schedules │ │ │ └── __init__.py │ ├── preprocessing │ │ ├── __init__.py │ │ ├── image │ │ │ └── __init__.py │ │ └── sequence │ │ │ └── __init__.py │ ├── quantizers │ │ └── __init__.py │ ├── random │ │ └── __init__.py │ ├── regularizers │ │ └── __init__.py │ ├── saving │ │ └── __init__.py │ ├── tree │ │ └── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── bounding_boxes │ │ │ └── __init__.py │ │ └── legacy │ │ │ └── __init__.py │ ├── visualization │ │ └── __init__.py │ └── wrappers │ │ └── __init__.py └── src │ ├── __init__.py │ ├── activations │ ├── __init__.py │ ├── activations.py │ └── activations_test.py │ ├── api_export.py │ ├── applications │ ├── __init__.py │ ├── applications_test.py │ ├── convnext.py │ ├── densenet.py │ ├── efficientnet.py │ ├── efficientnet_v2.py │ ├── imagenet_utils.py │ ├── imagenet_utils_test.py │ ├── inception_resnet_v2.py │ ├── inception_v3.py │ ├── mobilenet.py │ ├── mobilenet_v2.py │ ├── mobilenet_v3.py │ ├── nasnet.py │ ├── resnet.py │ ├── resnet_v2.py │ ├── vgg16.py │ ├── vgg19.py │ └── xception.py │ ├── backend │ ├── __init__.py │ ├── common │ │ ├── __init__.py │ │ ├── backend_utils.py │ │ ├── backend_utils_test.py │ │ ├── compute_output_spec_test.py │ │ ├── dtypes.py │ │ ├── dtypes_test.py │ │ ├── global_state.py │ │ ├── global_state_test.py │ │ ├── keras_tensor.py │ │ ├── keras_tensor_test.py │ │ ├── masking.py │ │ ├── masking_test.py │ │ ├── name_scope.py │ │ ├── name_scope_test.py │ │ ├── remat.py │ │ ├── remat_test.py │ │ ├── stateless_scope.py │ │ ├── stateless_scope_test.py │ │ ├── symbolic_scope.py │ │ ├── symbolic_scope_test.py │ │ ├── tensor_attributes.py │ │ ├── thread_safe_test.py │ │ ├── variables.py │ │ └── variables_test.py │ ├── config.py │ ├── jax │ │ ├── __init__.py │ │ ├── core.py │ │ ├── distribution_lib.py │ │ ├── distribution_lib_test.py │ │ ├── export.py │ │ ├── image.py │ │ ├── layer.py │ │ ├── linalg.py │ │ ├── math.py │ │ ├── nn.py │ │ ├── numpy.py │ │ ├── optimizer.py │ │ ├── random.py │ │ ├── rnn.py │ │ ├── sparse.py │ │ ├── tensorboard.py │ │ └── trainer.py │ ├── numpy │ │ ├── __init__.py │ │ ├── core.py │ │ ├── export.py │ │ ├── image.py │ │ ├── layer.py │ │ ├── linalg.py │ │ ├── math.py │ │ ├── nn.py │ │ ├── numpy.py │ │ ├── random.py │ │ ├── rnn.py │ │ └── trainer.py │ ├── openvino │ │ ├── __init__.py │ │ ├── core.py │ │ ├── excluded_concrete_tests.txt │ │ ├── excluded_tests.txt │ │ ├── export.py │ │ ├── image.py │ │ ├── layer.py │ │ ├── linalg.py │ │ ├── math.py │ │ ├── nn.py │ │ ├── numpy.py │ │ ├── random.py │ │ ├── rnn.py │ │ └── trainer.py │ ├── tensorflow │ │ ├── __init__.py │ │ ├── core.py │ │ ├── distribute_test.py │ │ ├── distribution_lib.py │ │ ├── export.py │ │ ├── image.py │ │ ├── layer.py │ │ ├── linalg.py │ │ ├── math.py │ │ ├── name_scope_test.py │ │ ├── nn.py │ │ ├── numpy.py │ │ ├── optimizer.py │ │ ├── optimizer_distribute_test.py │ │ ├── random.py │ │ ├── rnn.py │ │ ├── saved_model_test.py │ │ ├── sparse.py │ │ ├── tensorboard.py │ │ ├── trackable.py │ │ └── trainer.py │ ├── tests │ │ ├── compute_output_spec_test.py │ │ └── device_scope_test.py │ └── torch │ │ ├── __init__.py │ │ ├── core.py │ │ ├── export.py │ │ ├── image.py │ │ ├── layer.py │ │ ├── linalg.py │ │ ├── math.py │ │ ├── nn.py │ │ ├── numpy.py │ │ ├── optimizers │ │ ├── __init__.py │ │ ├── torch_adadelta.py │ │ ├── torch_adagrad.py │ │ ├── torch_adam.py │ │ ├── torch_adamax.py │ │ ├── torch_adamw.py │ │ ├── torch_lion.py │ │ ├── torch_nadam.py │ │ ├── torch_optimizer.py │ │ ├── torch_parallel_optimizer.py │ │ ├── torch_rmsprop.py │ │ └── torch_sgd.py │ │ ├── random.py │ │ ├── rnn.py │ │ └── trainer.py │ ├── callbacks │ ├── __init__.py │ ├── backup_and_restore.py │ ├── backup_and_restore_test.py │ ├── callback.py │ ├── callback_list.py │ ├── callback_test.py │ ├── csv_logger.py │ ├── csv_logger_test.py │ ├── early_stopping.py │ ├── early_stopping_test.py │ ├── history.py │ ├── lambda_callback.py │ ├── lambda_callback_test.py │ ├── learning_rate_scheduler.py │ ├── learning_rate_scheduler_test.py │ ├── model_checkpoint.py │ ├── model_checkpoint_test.py │ ├── monitor_callback.py │ ├── monitor_callback_test.py │ ├── progbar_logger.py │ ├── reduce_lr_on_plateau.py │ ├── reduce_lr_on_plateau_test.py │ ├── remote_monitor.py │ ├── remote_monitor_test.py │ ├── swap_ema_weights.py │ ├── swap_ema_weights_test.py │ ├── tensorboard.py │ ├── tensorboard_test.py │ ├── terminate_on_nan.py │ └── terminate_on_nan_test.py │ ├── constraints │ ├── __init__.py │ ├── constraints.py │ └── constraints_test.py │ ├── datasets │ ├── __init__.py │ ├── boston_housing.py │ ├── california_housing.py │ ├── cifar.py │ ├── cifar10.py │ ├── cifar100.py │ ├── fashion_mnist.py │ ├── imdb.py │ ├── mnist.py │ └── reuters.py │ ├── distribution │ ├── __init__.py │ ├── distribution_lib.py │ └── distribution_lib_test.py │ ├── dtype_policies │ ├── __init__.py │ ├── dtype_policy.py │ ├── dtype_policy_map.py │ ├── dtype_policy_map_test.py │ └── dtype_policy_test.py │ ├── export │ ├── __init__.py │ ├── export_utils.py │ ├── onnx.py │ ├── onnx_test.py │ ├── saved_model.py │ ├── saved_model_test.py │ ├── tf2onnx_lib.py │ ├── tfsm_layer.py │ └── tfsm_layer_test.py │ ├── initializers │ ├── __init__.py │ ├── constant_initializers.py │ ├── constant_initializers_test.py │ ├── initializer.py │ ├── random_initializers.py │ └── random_initializers_test.py │ ├── layers │ ├── __init__.py │ ├── activations │ │ ├── __init__.py │ │ ├── activation.py │ │ ├── activation_test.py │ │ ├── elu.py │ │ ├── elu_test.py │ │ ├── leaky_relu.py │ │ ├── leaky_relu_test.py │ │ ├── prelu.py │ │ ├── prelu_test.py │ │ ├── relu.py │ │ ├── relu_test.py │ │ ├── softmax.py │ │ └── softmax_test.py │ ├── attention │ │ ├── __init__.py │ │ ├── additive_attention.py │ │ ├── additive_attention_test.py │ │ ├── attention.py │ │ ├── attention_test.py │ │ ├── grouped_query_attention.py │ │ ├── grouped_query_attention_test.py │ │ ├── multi_head_attention.py │ │ └── multi_head_attention_test.py │ ├── convolutional │ │ ├── __init__.py │ │ ├── base_conv.py │ │ ├── base_conv_transpose.py │ │ ├── base_depthwise_conv.py │ │ ├── base_separable_conv.py │ │ ├── conv1d.py │ │ ├── conv1d_transpose.py │ │ ├── conv2d.py │ │ ├── conv2d_transpose.py │ │ ├── conv3d.py │ │ ├── conv3d_transpose.py │ │ ├── conv_test.py │ │ ├── conv_transpose_test.py │ │ ├── depthwise_conv1d.py │ │ ├── depthwise_conv2d.py │ │ ├── depthwise_conv_test.py │ │ ├── separable_conv1d.py │ │ ├── separable_conv2d.py │ │ └── separable_conv_test.py │ ├── core │ │ ├── __init__.py │ │ ├── dense.py │ │ ├── dense_test.py │ │ ├── einsum_dense.py │ │ ├── einsum_dense_test.py │ │ ├── embedding.py │ │ ├── embedding_test.py │ │ ├── identity.py │ │ ├── identity_test.py │ │ ├── input_layer.py │ │ ├── input_layer_test.py │ │ ├── lambda_layer.py │ │ ├── lambda_layer_test.py │ │ ├── masking.py │ │ ├── masking_test.py │ │ ├── wrapper.py │ │ └── wrapper_test.py │ ├── input_spec.py │ ├── layer.py │ ├── layer_test.py │ ├── merging │ │ ├── __init__.py │ │ ├── add.py │ │ ├── average.py │ │ ├── base_merge.py │ │ ├── concatenate.py │ │ ├── dot.py │ │ ├── maximum.py │ │ ├── merging_test.py │ │ ├── minimum.py │ │ ├── multiply.py │ │ └── subtract.py │ ├── normalization │ │ ├── __init__.py │ │ ├── batch_normalization.py │ │ ├── batch_normalization_test.py │ │ ├── group_normalization.py │ │ ├── group_normalization_test.py │ │ ├── layer_normalization.py │ │ ├── layer_normalization_test.py │ │ ├── rms_normalization.py │ │ ├── rms_normalization_test.py │ │ ├── spectral_normalization.py │ │ ├── spectral_normalization_test.py │ │ ├── unit_normalization.py │ │ └── unit_normalization_test.py │ ├── pooling │ │ ├── __init__.py │ │ ├── average_pooling1d.py │ │ ├── average_pooling2d.py │ │ ├── average_pooling3d.py │ │ ├── average_pooling_test.py │ │ ├── base_global_pooling.py │ │ ├── base_pooling.py │ │ ├── global_average_pooling1d.py │ │ ├── global_average_pooling2d.py │ │ ├── global_average_pooling3d.py │ │ ├── global_average_pooling_test.py │ │ ├── global_max_pooling1d.py │ │ ├── global_max_pooling2d.py │ │ ├── global_max_pooling3d.py │ │ ├── global_max_pooling_test.py │ │ ├── max_pooling1d.py │ │ ├── max_pooling2d.py │ │ ├── max_pooling3d.py │ │ └── max_pooling_test.py │ ├── preprocessing │ │ ├── __init__.py │ │ ├── category_encoding.py │ │ ├── category_encoding_test.py │ │ ├── discretization.py │ │ ├── discretization_test.py │ │ ├── feature_space.py │ │ ├── feature_space_test.py │ │ ├── hashed_crossing.py │ │ ├── hashed_crossing_test.py │ │ ├── hashing.py │ │ ├── hashing_test.py │ │ ├── image_preprocessing │ │ │ ├── __init__.py │ │ │ ├── aug_mix.py │ │ │ ├── aug_mix_test.py │ │ │ ├── auto_contrast.py │ │ │ ├── auto_contrast_test.py │ │ │ ├── base_image_preprocessing_layer.py │ │ │ ├── bounding_boxes │ │ │ │ ├── __init__.py │ │ │ │ ├── bounding_box.py │ │ │ │ ├── converters.py │ │ │ │ ├── converters_test.py │ │ │ │ ├── formats.py │ │ │ │ ├── iou.py │ │ │ │ ├── iou_test.py │ │ │ │ └── validation.py │ │ │ ├── center_crop.py │ │ │ ├── center_crop_test.py │ │ │ ├── cut_mix.py │ │ │ ├── cut_mix_test.py │ │ │ ├── equalization.py │ │ │ ├── equalization_test.py │ │ │ ├── max_num_bounding_box.py │ │ │ ├── max_num_bounding_box_test.py │ │ │ ├── mix_up.py │ │ │ ├── mix_up_test.py │ │ │ ├── rand_augment.py │ │ │ ├── rand_augment_test.py │ │ │ ├── random_brightness.py │ │ │ ├── random_brightness_test.py │ │ │ ├── random_color_degeneration.py │ │ │ ├── random_color_degeneration_test.py │ │ │ ├── random_color_jitter.py │ │ │ ├── random_color_jitter_test.py │ │ │ ├── random_contrast.py │ │ │ ├── random_contrast_test.py │ │ │ ├── random_crop.py │ │ │ ├── random_crop_test.py │ │ │ ├── random_elastic_transform.py │ │ │ ├── random_elastic_transform_test.py │ │ │ ├── random_erasing.py │ │ │ ├── random_erasing_test.py │ │ │ ├── random_flip.py │ │ │ ├── random_flip_test.py │ │ │ ├── random_gaussian_blur.py │ │ │ ├── random_gaussian_blur_test.py │ │ │ ├── random_grayscale.py │ │ │ ├── random_grayscale_test.py │ │ │ ├── random_hue.py │ │ │ ├── random_hue_test.py │ │ │ ├── random_invert.py │ │ │ ├── random_invert_test.py │ │ │ ├── random_perspective.py │ │ │ ├── random_perspective_test.py │ │ │ ├── random_posterization.py │ │ │ ├── random_posterization_test.py │ │ │ ├── random_rotation.py │ │ │ ├── random_rotation_test.py │ │ │ ├── random_saturation.py │ │ │ ├── random_saturation_test.py │ │ │ ├── random_sharpness.py │ │ │ ├── random_sharpness_test.py │ │ │ ├── random_shear.py │ │ │ ├── random_shear_test.py │ │ │ ├── random_translation.py │ │ │ ├── random_translation_test.py │ │ │ ├── random_zoom.py │ │ │ ├── random_zoom_test.py │ │ │ ├── resizing.py │ │ │ ├── resizing_test.py │ │ │ ├── solarization.py │ │ │ └── solarization_test.py │ │ ├── index_lookup.py │ │ ├── index_lookup_test.py │ │ ├── integer_lookup.py │ │ ├── integer_lookup_test.py │ │ ├── mel_spectrogram.py │ │ ├── mel_spectrogram_test.py │ │ ├── normalization.py │ │ ├── normalization_test.py │ │ ├── pipeline.py │ │ ├── pipeline_test.py │ │ ├── rescaling.py │ │ ├── rescaling_test.py │ │ ├── stft_spectrogram.py │ │ ├── stft_spectrogram_test.py │ │ ├── string_lookup.py │ │ ├── string_lookup_test.py │ │ ├── text_vectorization.py │ │ ├── text_vectorization_test.py │ │ └── tf_data_layer.py │ ├── regularization │ │ ├── __init__.py │ │ ├── activity_regularization.py │ │ ├── activity_regularization_test.py │ │ ├── alpha_dropout.py │ │ ├── alpha_dropout_test.py │ │ ├── dropout.py │ │ ├── dropout_test.py │ │ ├── gaussian_dropout.py │ │ ├── gaussian_dropout_test.py │ │ ├── gaussian_noise.py │ │ ├── gaussian_noise_test.py │ │ ├── spatial_dropout.py │ │ └── spatial_dropout_test.py │ ├── reshaping │ │ ├── __init__.py │ │ ├── cropping1d.py │ │ ├── cropping1d_test.py │ │ ├── cropping2d.py │ │ ├── cropping2d_test.py │ │ ├── cropping3d.py │ │ ├── cropping3d_test.py │ │ ├── flatten.py │ │ ├── flatten_test.py │ │ ├── permute.py │ │ ├── permute_test.py │ │ ├── repeat_vector.py │ │ ├── repeat_vector_test.py │ │ ├── reshape.py │ │ ├── reshape_test.py │ │ ├── up_sampling1d.py │ │ ├── up_sampling1d_test.py │ │ ├── up_sampling2d.py │ │ ├── up_sampling2d_test.py │ │ ├── up_sampling3d.py │ │ ├── up_sampling3d_test.py │ │ ├── zero_padding1d.py │ │ ├── zero_padding1d_test.py │ │ ├── zero_padding2d.py │ │ ├── zero_padding2d_test.py │ │ ├── zero_padding3d.py │ │ └── zero_padding3d_test.py │ └── rnn │ │ ├── __init__.py │ │ ├── bidirectional.py │ │ ├── bidirectional_test.py │ │ ├── conv_lstm.py │ │ ├── conv_lstm1d.py │ │ ├── conv_lstm1d_test.py │ │ ├── conv_lstm2d.py │ │ ├── conv_lstm2d_test.py │ │ ├── conv_lstm3d.py │ │ ├── conv_lstm3d_test.py │ │ ├── conv_lstm_test.py │ │ ├── dropout_rnn_cell.py │ │ ├── dropout_rnn_cell_test.py │ │ ├── gru.py │ │ ├── gru_test.py │ │ ├── lstm.py │ │ ├── lstm_test.py │ │ ├── rnn.py │ │ ├── rnn_test.py │ │ ├── simple_rnn.py │ │ ├── simple_rnn_test.py │ │ ├── stacked_rnn_cells.py │ │ ├── stacked_rnn_cells_test.py │ │ ├── time_distributed.py │ │ └── time_distributed_test.py │ ├── legacy │ ├── __init__.py │ ├── backend.py │ ├── layers.py │ ├── losses.py │ ├── preprocessing │ │ ├── __init__.py │ │ ├── image.py │ │ ├── sequence.py │ │ └── text.py │ └── saving │ │ ├── __init__.py │ │ ├── json_utils.py │ │ ├── json_utils_test.py │ │ ├── legacy_h5_format.py │ │ ├── legacy_h5_format_test.py │ │ ├── saving_options.py │ │ ├── saving_utils.py │ │ └── serialization.py │ ├── losses │ ├── __init__.py │ ├── loss.py │ ├── loss_test.py │ ├── losses.py │ └── losses_test.py │ ├── metrics │ ├── __init__.py │ ├── accuracy_metrics.py │ ├── accuracy_metrics_test.py │ ├── confusion_metrics.py │ ├── confusion_metrics_test.py │ ├── correlation_metrics.py │ ├── correlation_metrics_test.py │ ├── f_score_metrics.py │ ├── f_score_metrics_test.py │ ├── hinge_metrics.py │ ├── hinge_metrics_test.py │ ├── iou_metrics.py │ ├── iou_metrics_test.py │ ├── metric.py │ ├── metric_test.py │ ├── metrics_utils.py │ ├── probabilistic_metrics.py │ ├── probabilistic_metrics_test.py │ ├── reduction_metrics.py │ ├── reduction_metrics_test.py │ ├── regression_metrics.py │ └── regression_metrics_test.py │ ├── models │ ├── __init__.py │ ├── cloning.py │ ├── cloning_test.py │ ├── functional.py │ ├── functional_test.py │ ├── model.py │ ├── model_test.py │ ├── sequential.py │ ├── sequential_test.py │ ├── variable_mapping.py │ └── variable_mapping_test.py │ ├── ops │ ├── __init__.py │ ├── core.py │ ├── core_test.py │ ├── einops.py │ ├── einops_test.py │ ├── function.py │ ├── function_test.py │ ├── image.py │ ├── image_test.py │ ├── linalg.py │ ├── linalg_test.py │ ├── math.py │ ├── math_test.py │ ├── nn.py │ ├── nn_test.py │ ├── node.py │ ├── node_test.py │ ├── numpy.py │ ├── numpy_test.py │ ├── operation.py │ ├── operation_test.py │ ├── operation_utils.py │ ├── operation_utils_test.py │ ├── symbolic_arguments.py │ └── symbolic_arguments_test.py │ ├── optimizers │ ├── __init__.py │ ├── adadelta.py │ ├── adadelta_test.py │ ├── adafactor.py │ ├── adafactor_test.py │ ├── adagrad.py │ ├── adagrad_test.py │ ├── adam.py │ ├── adam_test.py │ ├── adamax.py │ ├── adamax_test.py │ ├── adamw.py │ ├── adamw_test.py │ ├── base_optimizer.py │ ├── ftrl.py │ ├── ftrl_test.py │ ├── lamb.py │ ├── lamb_test.py │ ├── lion.py │ ├── lion_test.py │ ├── loss_scale_optimizer.py │ ├── loss_scale_optimizer_test.py │ ├── muon.py │ ├── muon_test.py │ ├── nadam.py │ ├── nadam_test.py │ ├── optimizer.py │ ├── optimizer_sparse_test.py │ ├── optimizer_test.py │ ├── rmsprop.py │ ├── rmsprop_test.py │ ├── schedules │ │ ├── __init__.py │ │ ├── learning_rate_schedule.py │ │ └── learning_rate_schedule_test.py │ ├── sgd.py │ └── sgd_test.py │ ├── quantizers │ ├── __init__.py │ ├── quantizers.py │ └── quantizers_test.py │ ├── random │ ├── __init__.py │ ├── random.py │ ├── random_test.py │ ├── seed_generator.py │ └── seed_generator_test.py │ ├── regularizers │ ├── __init__.py │ ├── regularizers.py │ └── regularizers_test.py │ ├── saving │ ├── __init__.py │ ├── file_editor.py │ ├── file_editor_test.py │ ├── keras_saveable.py │ ├── object_registration.py │ ├── object_registration_test.py │ ├── saving_api.py │ ├── saving_api_test.py │ ├── saving_lib.py │ ├── saving_lib_test.py │ ├── serialization_lib.py │ └── serialization_lib_test.py │ ├── testing │ ├── __init__.py │ ├── test_case.py │ ├── test_utils.py │ └── test_utils_test.py │ ├── trainers │ ├── __init__.py │ ├── compile_utils.py │ ├── compile_utils_test.py │ ├── data_adapters │ │ ├── __init__.py │ │ ├── array_data_adapter.py │ │ ├── array_data_adapter_test.py │ │ ├── array_slicing.py │ │ ├── data_adapter.py │ │ ├── data_adapter_utils.py │ │ ├── data_adapter_utils_test.py │ │ ├── generator_data_adapter.py │ │ ├── generator_data_adapter_test.py │ │ ├── py_dataset_adapter.py │ │ ├── py_dataset_adapter_test.py │ │ ├── tf_dataset_adapter.py │ │ ├── tf_dataset_adapter_test.py │ │ ├── torch_data_loader_adapter.py │ │ └── torch_data_loader_adapter_test.py │ ├── epoch_iterator.py │ ├── epoch_iterator_test.py │ ├── trainer.py │ └── trainer_test.py │ ├── tree │ ├── __init__.py │ ├── dmtree_impl.py │ ├── optree_impl.py │ ├── tree_api.py │ └── tree_test.py │ ├── utils │ ├── __init__.py │ ├── argument_validation.py │ ├── audio_dataset_utils.py │ ├── audio_dataset_utils_test.py │ ├── backend_utils.py │ ├── backend_utils_test.py │ ├── code_stats.py │ ├── code_stats_test.py │ ├── config.py │ ├── dataset_utils.py │ ├── dataset_utils_test.py │ ├── dtype_utils.py │ ├── dtype_utils_test.py │ ├── file_utils.py │ ├── file_utils_test.py │ ├── image_dataset_utils.py │ ├── image_dataset_utils_test.py │ ├── image_utils.py │ ├── io_utils.py │ ├── io_utils_test.py │ ├── jax_layer.py │ ├── jax_layer_test.py │ ├── jax_utils.py │ ├── model_visualization.py │ ├── module_utils.py │ ├── naming.py │ ├── naming_test.py │ ├── numerical_utils.py │ ├── numerical_utils_test.py │ ├── progbar.py │ ├── python_utils.py │ ├── python_utils_test.py │ ├── rng_utils.py │ ├── rng_utils_test.py │ ├── sequence_utils.py │ ├── sequence_utils_test.py │ ├── summary_utils.py │ ├── summary_utils_test.py │ ├── text_dataset_utils.py │ ├── text_dataset_utils_test.py │ ├── tf_utils.py │ ├── timeseries_dataset_utils.py │ ├── timeseries_dataset_utils_test.py │ ├── torch_utils.py │ ├── torch_utils_test.py │ ├── traceback_utils.py │ ├── tracking.py │ └── tracking_test.py │ ├── version.py │ ├── visualization │ ├── __init__.py │ ├── draw_bounding_boxes.py │ ├── draw_segmentation_masks.py │ ├── plot_bounding_box_gallery.py │ ├── plot_image_gallery.py │ └── plot_segmentation_mask_gallery.py │ └── wrappers │ ├── __init__.py │ ├── fixes.py │ ├── sklearn_test.py │ ├── sklearn_wrapper.py │ └── utils.py ├── pip_build.py ├── pyproject.toml ├── requirements-common.txt ├── requirements-jax-cuda.txt ├── requirements-tensorflow-cuda.txt ├── requirements-torch-cuda.txt ├── requirements.txt └── shell ├── api_gen.sh └── format.sh /.devcontainer/README.md: -------------------------------------------------------------------------------- 1 | # Dev container configurations 2 | 3 | This directory contains the configuration for dev containers, which is used to 4 | initialize the development environment in **Codespaces**, **Visual Studio 5 | Code**, and **JetBrains IDEs**. The environment is installed with all the 6 | necessary dependencies for development and is ready for linting, formatting, and 7 | running tests. 8 | 9 | * **GitHub Codespaces**. Create a codespace for the repo by clicking 10 | the "Code" button on the main page of the repo, selecting the "Codespaces" 11 | tab, and clicking the "+". The configurations will automatically be used. 12 | Follow 13 | [this guide](https://docs.github.com/en/codespaces/developing-in-a-codespace/creating-a-codespace-for-a-repository) 14 | for more details. 15 | 16 | * **Visual Studio Code**. Open the root folder of the repo in VS Code. A 17 | notification will pop up to open it in a dev container with the 18 | configuration. Follow 19 | [this guide](https://code.visualstudio.com/docs/devcontainers/tutorial) 20 | for more details. 21 | 22 | * **JetBrains IDEs**. Open the `.devcontainer/devcontainer.json` in your 23 | JetBrains IDE. Click the docker icon to create a dev container. 24 | Follow 25 | [this guide](https://www.jetbrains.com/help/idea/connect-to-devcontainer.html) 26 | for more details. -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "image": "mcr.microsoft.com/vscode/devcontainers/python:3.10", 3 | "postCreateCommand": "sh ./.devcontainer/setup.sh && pip install -r requirements.txt", 4 | "customizations": { 5 | "vscode": { 6 | "settings": { 7 | "python.testing.pytestEnabled": true, 8 | "editor.formatOnSave": true, 9 | "editor.codeActionsOnSave": { 10 | "source.organizeImports": true 11 | }, 12 | "[python]": { 13 | "editor.defaultFormatter": "charliermarsh.ruff" 14 | }, 15 | "editor.rulers": [ 16 | 80 17 | ] 18 | }, 19 | "extensions": [ 20 | "charliermarsh.ruff", 21 | "ms-python.python" 22 | ] 23 | } 24 | }, 25 | "features": { 26 | "ghcr.io/devcontainers/features/github-cli:1": {} 27 | } 28 | } -------------------------------------------------------------------------------- /.devcontainer/setup.sh: -------------------------------------------------------------------------------- 1 | sudo pip install --upgrade pip 2 | sudo pip install -r requirements.txt 3 | echo "bash shell/lint.sh" > .git/hooks/pre-commit 4 | chmod a+x .git/hooks/pre-commit 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "monthly" 12 | groups: 13 | github-actions: 14 | patterns: 15 | - "*" 16 | - package-ecosystem: "pip" 17 | directory: "/" 18 | schedule: 19 | interval: "monthly" 20 | groups: 21 | python: 22 | patterns: 23 | - "*" 24 | ignore: 25 | # TODO: ignore all updates for JAX GPU due to cuda version issue 26 | - dependency-name: "jax[cuda12_pip]" -------------------------------------------------------------------------------- /.github/workflows/auto-assignment.yaml: -------------------------------------------------------------------------------- 1 | name: auto-assignment 2 | on: 3 | issues: 4 | types: 5 | - opened 6 | 7 | permissions: 8 | contents: read 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | welcome: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: actions/github-script@v7 18 | with: 19 | script: | 20 | const script = require('./\.github/workflows/scripts/auto-assignment.js') 21 | script({github, context}) -------------------------------------------------------------------------------- /.github/workflows/config/jax/keras.json: -------------------------------------------------------------------------------- 1 | { 2 | "floatx": "float32", 3 | "epsilon": 1e-07, 4 | "backend": "jax", 5 | "image_data_format": "channels_last" 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/config/numpy/keras.json: -------------------------------------------------------------------------------- 1 | { 2 | "floatx": "float32", 3 | "epsilon": 1e-07, 4 | "backend": "numpy", 5 | "image_data_format": "channels_last" 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/config/openvino/keras.json: -------------------------------------------------------------------------------- 1 | { 2 | "floatx": "float32", 3 | "epsilon": 1e-07, 4 | "backend": "openvino", 5 | "image_data_format": "channels_last" 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/config/tensorflow/keras.json: -------------------------------------------------------------------------------- 1 | { 2 | "floatx": "float32", 3 | "epsilon": 1e-07, 4 | "backend": "tensorflow", 5 | "image_data_format": "channels_last" 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/config/torch/keras.json: -------------------------------------------------------------------------------- 1 | { 2 | "floatx": "float32", 3 | "epsilon": 1e-07, 4 | "backend": "torch", 5 | "image_data_format": "channels_first" 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/labeler.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | # This workflow automatically identifies issues and pull requests (PRs) and add the 17 | # appropriate label as per defined rules. 18 | # First Labeler workflow: It searches for the keyword "Gemma" (case-insensitive) in both the title 19 | # and description of the issue/PR. If a match is found, the workflow adds the label 'Gemma' to the issue/PR. 20 | 21 | name: 'Labeler' 22 | on: 23 | issues: 24 | types: [edited,opened] 25 | pull_request_target: 26 | types: [opened, edited] 27 | 28 | permissions: 29 | contents: read 30 | issues: write 31 | pull-requests: write 32 | 33 | jobs: 34 | welcome: 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v4 38 | - uses: actions/github-script@v7 39 | with: 40 | script: | 41 | const script = require('./\.github/workflows/scripts/labeler.js') 42 | script({github, context}) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.pyc 3 | .vscode-test 4 | __pycache__ 5 | **/.vscode-test/** 6 | **/.vscode test/** 7 | **/.vscode-smoke/** 8 | **/.venv*/ 9 | bin/** 10 | build/** 11 | obj/** 12 | .pytest_cache 13 | tmp/** 14 | .vs/ 15 | dist/** 16 | **/*.egg-info/* 17 | .vscode 18 | examples/**/*.jpg 19 | .python-version 20 | .coverage 21 | *coverage.xml 22 | .ruff_cache -------------------------------------------------------------------------------- /.kokoro/README.md: -------------------------------------------------------------------------------- 1 | CI to run on PR and merge to Master. -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/jax/continuous.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "jax" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/jax/presubmit.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "jax" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "tensorflow" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/tensorflow/presubmit.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "tensorflow" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/torch/continuous.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "torch" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.kokoro/github/ubuntu/gpu/torch/presubmit.cfg: -------------------------------------------------------------------------------- 1 | build_file: "keras/.kokoro/github/ubuntu/gpu/build.sh" 2 | 3 | action { 4 | define_artifacts { 5 | regex: "**/sponge_log.log" 6 | regex: "**/sponge_log.xml" 7 | } 8 | } 9 | 10 | env_vars: { 11 | key: "KERAS_BACKEND" 12 | value: "torch" 13 | } 14 | 15 | # Set timeout to 60 mins from default 180 mins 16 | timeout_mins: 60 -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: local 3 | hooks: 4 | - id: api-gen 5 | name: api_gen 6 | entry: | 7 | bash shell/api_gen.sh 8 | git status 9 | clean=$(git status | grep "nothing to commit") 10 | if [ -z "$clean" ]; then 11 | echo "Please run shell/api_gen.sh to generate API." 12 | exit 1 13 | fi 14 | language: system 15 | stages: [pre-commit, manual] 16 | require_serial: true 17 | - repo: https://github.com/astral-sh/ruff-pre-commit 18 | rev: v0.9.2 19 | hooks: 20 | - id: ruff 21 | args: [--config, pyproject.toml, --fix, .] 22 | stages: [pre-commit] 23 | - id: ruff-format 24 | args: [--config, pyproject.toml, .] 25 | stages: [pre-commit] 26 | - id: ruff 27 | args: [--config, pyproject.toml, .] 28 | stages: [manual] 29 | - id: ruff-format 30 | args: ["--check", --config, pyproject.toml, .] 31 | stages: [manual] -------------------------------------------------------------------------------- /benchmarks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/benchmarks/__init__.py -------------------------------------------------------------------------------- /benchmarks/layer_benchmark/README.md: -------------------------------------------------------------------------------- 1 | # Benchmark the layer performance 2 | 3 | This directory contains benchmarks to compare the performance of 4 | `keras.layers.XXX` and `tf.keras.layers.XXX`. We compare the performance of 5 | both the forward pass and train step (forward & backward pass). 6 | 7 | To run the benchmark, use the command below and change the flags according to 8 | your target: 9 | 10 | ```shell 11 | python3 -m benchmarks.layer_benchmark.conv_benchmark \ 12 | --benchmark_name=benchmark_conv2D \ 13 | --num_samples=2048 \ 14 | --batch_size=256 \ 15 | --jit_compile=True 16 | ``` -------------------------------------------------------------------------------- /benchmarks/layer_benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/benchmarks/layer_benchmark/__init__.py -------------------------------------------------------------------------------- /benchmarks/model_benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/benchmarks/model_benchmark/__init__.py -------------------------------------------------------------------------------- /benchmarks/model_benchmark/benchmark_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import keras 4 | 5 | 6 | class BenchmarkMetricsCallback(keras.callbacks.Callback): 7 | def __init__(self, start_batch=1, stop_batch=None): 8 | self.start_batch = start_batch 9 | self.stop_batch = stop_batch 10 | 11 | # Store the throughput of each epoch. 12 | self.state = {"throughput": []} 13 | 14 | def on_train_batch_begin(self, batch, logs=None): 15 | if batch == self.start_batch: 16 | self.state["epoch_begin_time"] = time.time() 17 | 18 | def on_train_batch_end(self, batch, logs=None): 19 | if batch == self.stop_batch: 20 | epoch_end_time = time.time() 21 | throughput = (self.stop_batch - self.start_batch + 1) / ( 22 | epoch_end_time - self.state["epoch_begin_time"] 23 | ) 24 | self.state["throughput"].append(throughput) 25 | -------------------------------------------------------------------------------- /benchmarks/torch_ctl_benchmark/README.md: -------------------------------------------------------------------------------- 1 | # Benchmark the performance of torch custom training loop 2 | 3 | This directory contains benchmarks to compare the performance of a Keras model 4 | and a equivalent Torch model while using the same Torch custom training loop. 5 | 6 | The benchmark purpose is to understand the performance diff resulting from the 7 | modeling API choice (Keras or Torch). 8 | 9 | To run the benchmark, use the command below and change to your target: 10 | 11 | ```shell 12 | python3 -m benchmarks.torch_ctl_benchmark.conv_model_benchmark 13 | ``` -------------------------------------------------------------------------------- /benchmarks/torch_ctl_benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/benchmarks/torch_ctl_benchmark/__init__.py -------------------------------------------------------------------------------- /benchmarks/torch_ctl_benchmark/benchmark_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import numpy as np 4 | import torch 5 | 6 | 7 | def train_loop(model, train_loader, num_epochs, optimizer, loss_fn, framework): 8 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 9 | model.to(device) 10 | start = None 11 | average_batch_time_per_epoch = [] 12 | for _ in range(num_epochs): 13 | running_loss = 0.0 14 | for batch_idx, (inputs, targets) in enumerate(train_loader): 15 | if batch_idx == 1: 16 | start = time.time() 17 | inputs = inputs.to(device) 18 | targets = targets.to(device) 19 | # Forward pass 20 | outputs = model(inputs) 21 | loss = loss_fn(outputs, targets) 22 | 23 | # Backward and optimize 24 | optimizer.zero_grad() 25 | loss.backward() 26 | optimizer.step() 27 | 28 | running_loss += loss.item() 29 | 30 | end = time.time() 31 | average_batch_time_per_epoch.append( 32 | (end - start) / (len(train_loader) - 1) 33 | ) 34 | average_time = np.mean(average_batch_time_per_epoch) 35 | 36 | print(f"Time per batch in {framework}: {average_time:.2f}") 37 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | # `auto` compares coverage with the base-commit 6 | target: auto 7 | 8 | patch: 9 | default: 10 | target:auto 11 | 12 | comment: 13 | layout: "header, reach, diff, flags, files" 14 | behavior: default 15 | require_changes: no 16 | require_base: no 17 | require_head: yes 18 | show_carryforward_flags: yes 19 | 20 | flag_management: 21 | default_rules: 22 | carryforward: false 23 | statuses: 24 | - type: project 25 | target: auto 26 | - type: patch 27 | target: auto 28 | individual_flags: 29 | - name: keras 30 | paths: 31 | - keras 32 | - name: keras.applications 33 | paths: 34 | - keras/applications 35 | carryforward: true 36 | -------------------------------------------------------------------------------- /examples/demo_functional.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras import Model 4 | from keras import layers 5 | from keras import losses 6 | from keras import metrics 7 | from keras import optimizers 8 | import keras 9 | 10 | keras.config.disable_traceback_filtering() 11 | 12 | inputs = layers.Input((100,)) 13 | x = layers.Dense(512, activation="relu")(inputs) 14 | residual = x 15 | x = layers.Dense(512, activation="relu")(x) 16 | x = layers.Dense(512, activation="relu")(x) 17 | x += residual 18 | x = layers.Dense(512, activation="relu")(x) 19 | residual = x 20 | x = layers.Dense(512, activation="relu")(x) 21 | x = layers.Dense(512, activation="relu")(x) 22 | x += residual 23 | residual = x 24 | x = layers.Dense(512, activation="relu")(x) 25 | x = layers.Dense(512, activation="relu")(x) 26 | x += residual 27 | outputs = layers.Dense(16)(x) 28 | model = Model(inputs, outputs) 29 | 30 | model.summary() 31 | 32 | x = np.random.random((50000, 100)) 33 | y = np.random.random((50000, 16)) 34 | batch_size = 32 35 | epochs = 5 36 | 37 | model.compile( 38 | optimizer=optimizers.Adam(learning_rate=0.001), 39 | loss=losses.MeanSquaredError(), 40 | metrics=[ 41 | metrics.CategoricalAccuracy(name="acc"), 42 | metrics.MeanSquaredError(name="mse"), 43 | ], 44 | ) 45 | 46 | print("\nTrain model") 47 | history = model.fit( 48 | x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2 49 | ) 50 | print("\nHistory:") 51 | print(history.history) 52 | 53 | print("\nEvaluate model") 54 | scores = model.evaluate(x, y, return_dict=True) 55 | print(scores) 56 | 57 | print("\nRun inference") 58 | pred = model.predict(x) 59 | print(f"Inferred output shape {pred.shape}") 60 | -------------------------------------------------------------------------------- /examples/demo_mnist_convnet.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import keras 3 | from keras import layers 4 | from keras.utils import to_categorical 5 | 6 | # Model / data parameters 7 | num_classes = 10 8 | input_shape = (28, 28, 1) 9 | 10 | # Load the data and split it between train and test sets 11 | (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() 12 | 13 | # Scale images to the [0, 1] range 14 | x_train = x_train.astype("float32") / 255 15 | x_test = x_test.astype("float32") / 255 16 | # Make sure images have shape (28, 28, 1) 17 | x_train = np.expand_dims(x_train, -1) 18 | x_test = np.expand_dims(x_test, -1) 19 | print("x_train shape:", x_train.shape) 20 | print(x_train.shape[0], "train samples") 21 | print(x_test.shape[0], "test samples") 22 | 23 | 24 | # convert class vectors to binary class matrices 25 | y_train = to_categorical(y_train, num_classes) 26 | y_test = to_categorical(y_test, num_classes) 27 | 28 | batch_size = 128 29 | epochs = 3 30 | 31 | model = keras.Sequential( 32 | [ 33 | layers.Input(shape=input_shape), 34 | layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), 35 | layers.MaxPooling2D(pool_size=(2, 2)), 36 | layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), 37 | layers.MaxPooling2D(pool_size=(2, 2)), 38 | layers.Flatten(), 39 | layers.Dropout(0.5), 40 | layers.Dense(num_classes, activation="softmax"), 41 | ] 42 | ) 43 | 44 | model.summary() 45 | 46 | model.compile( 47 | loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] 48 | ) 49 | 50 | model.fit( 51 | x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1 52 | ) 53 | 54 | score = model.evaluate(x_test, y_test, verbose=0) 55 | print("Test loss:", score[0]) 56 | print("Test accuracy:", score[1]) 57 | -------------------------------------------------------------------------------- /examples/demo_subclass.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras import Model 4 | from keras import layers 5 | from keras import losses 6 | from keras import metrics 7 | from keras import optimizers 8 | 9 | 10 | class MyModel(Model): 11 | def __init__(self, hidden_dim, output_dim): 12 | super().__init__() 13 | self.dense1 = layers.Dense(hidden_dim, activation="relu") 14 | self.dense2 = layers.Dense(hidden_dim, activation="relu") 15 | self.dense3 = layers.Dense(output_dim) 16 | 17 | def call(self, x): 18 | x = self.dense1(x) 19 | x = self.dense2(x) 20 | return self.dense3(x) 21 | 22 | 23 | model = MyModel(hidden_dim=256, output_dim=16) 24 | 25 | x = np.random.random((50000, 128)) 26 | y = np.random.random((50000, 16)) 27 | batch_size = 32 28 | epochs = 6 29 | 30 | model.compile( 31 | optimizer=optimizers.SGD(learning_rate=0.001), 32 | loss=losses.MeanSquaredError(), 33 | metrics=[metrics.MeanSquaredError()], 34 | ) 35 | history = model.fit( 36 | x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2 37 | ) 38 | 39 | print("History:") 40 | print(history.history) 41 | 42 | model.summary() 43 | -------------------------------------------------------------------------------- /integration_tests/basic_full_flow.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | import keras 5 | from keras.src import layers 6 | from keras.src import losses 7 | from keras.src import metrics 8 | from keras.src import optimizers 9 | from keras.src import testing 10 | 11 | 12 | class MyModel(keras.Model): 13 | def __init__(self, hidden_dim, output_dim, **kwargs): 14 | super().__init__(**kwargs) 15 | self.hidden_dim = hidden_dim 16 | self.output_dim = output_dim 17 | self.dense1 = layers.Dense(hidden_dim, activation="relu") 18 | self.dense2 = layers.Dense(hidden_dim, activation="relu") 19 | self.dense3 = layers.Dense(output_dim) 20 | 21 | def call(self, x): 22 | x = self.dense1(x) 23 | x = self.dense2(x) 24 | return self.dense3(x) 25 | 26 | 27 | class BasicFlowTest(testing.TestCase): 28 | @pytest.mark.requires_trainable_backend 29 | def test_basic_fit(self): 30 | model = MyModel(hidden_dim=2, output_dim=1) 31 | 32 | x = np.random.random((128, 4)) 33 | y = np.random.random((128, 4)) 34 | batch_size = 32 35 | epochs = 3 36 | 37 | model.compile( 38 | optimizer=optimizers.SGD(learning_rate=0.001), 39 | loss=losses.MeanSquaredError(), 40 | metrics=[metrics.MeanSquaredError()], 41 | ) 42 | output_before_fit = model(x) 43 | model.fit( 44 | x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2 45 | ) 46 | output_after_fit = model(x) 47 | 48 | self.assertNotAllClose(output_before_fit, output_after_fit) 49 | 50 | def test_basic_fit_no_training(self): 51 | model = MyModel(hidden_dim=2, output_dim=1) 52 | x = np.random.random((128, 4)) 53 | model.predict(x) 54 | model(x) 55 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/boston_housing_test.py: -------------------------------------------------------------------------------- 1 | from keras.src import testing 2 | from keras.src.datasets import boston_housing 3 | 4 | 5 | class BostonHousingTest(testing.TestCase): 6 | def test_load_data(self): 7 | (x_train, y_train), (x_test, y_test) = boston_housing.load_data() 8 | self.assertEqual(x_train.shape[1], 13) 9 | self.assertEqual(x_train.shape[0] + x_test.shape[0], 506) 10 | 11 | def test_seed_reproducibility(self): 12 | seed = 123 13 | first_load = boston_housing.load_data(seed=seed) 14 | second_load = boston_housing.load_data(seed=seed) 15 | self.assertAllClose(first_load[0][0], second_load[0][0]) 16 | self.assertAllClose(first_load[1][0], second_load[1][0]) 17 | 18 | def test_invalid_test_split(self): 19 | with self.assertRaises(AssertionError): 20 | boston_housing.load_data(test_split=-0.1) 21 | with self.assertRaises(AssertionError): 22 | boston_housing.load_data(test_split=1.0) 23 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/california_housing_test.py: -------------------------------------------------------------------------------- 1 | from keras.src import testing 2 | from keras.src.datasets import california_housing 3 | 4 | 5 | class CaliforniaHousingTest(testing.TestCase): 6 | def test_load_data_large(self): 7 | (x_train, y_train), (x_test, y_test) = california_housing.load_data( 8 | version="large" 9 | ) 10 | self.assertEqual(x_train.shape[1], 8) 11 | # Ensure the dataset contains 20,640 samples as documented 12 | self.assertEqual(x_train.shape[0] + x_test.shape[0], 20640) 13 | 14 | def test_load_data_small(self): 15 | (x_train, y_train), (x_test, y_test) = california_housing.load_data( 16 | version="small" 17 | ) 18 | self.assertEqual(x_train.shape[1], 8) 19 | # Ensure the small dataset contains 600 samples as documented 20 | self.assertEqual(x_train.shape[0] + x_test.shape[0], 600) 21 | 22 | def test_invalid_version(self): 23 | with self.assertRaises(ValueError): 24 | california_housing.load_data(version="invalid_version") 25 | 26 | def test_seed_reproducibility(self): 27 | # Ensure the data is reproducible with the same seed 28 | seed = 123 29 | first_load = california_housing.load_data(version="large", seed=seed) 30 | second_load = california_housing.load_data(version="large", seed=seed) 31 | self.assertAllClose(first_load[0][0], second_load[0][0]) 32 | self.assertAllClose(first_load[1][0], second_load[1][0]) 33 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/cifar100_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import testing 4 | from keras.src.datasets import cifar100 5 | 6 | 7 | class Cifar100LoadDataTest(testing.TestCase): 8 | def test_shapes_fine_label_mode(self): 9 | (x_train, y_train), (x_test, y_test) = cifar100.load_data( 10 | label_mode="fine" 11 | ) 12 | self.assertEqual(x_train.shape, (50000, 32, 32, 3)) 13 | self.assertEqual(y_train.shape, (50000, 1)) 14 | self.assertEqual(x_test.shape, (10000, 32, 32, 3)) 15 | self.assertEqual(y_test.shape, (10000, 1)) 16 | 17 | def test_shapes_coarse_label_mode(self): 18 | (x_train, y_train), (x_test, y_test) = cifar100.load_data( 19 | label_mode="coarse" 20 | ) 21 | self.assertEqual(x_train.shape, (50000, 32, 32, 3)) 22 | self.assertEqual(y_train.shape, (50000, 1)) 23 | self.assertEqual(x_test.shape, (10000, 32, 32, 3)) 24 | self.assertEqual(y_test.shape, (10000, 1)) 25 | 26 | def test_dtypes(self): 27 | (x_train, y_train), (x_test, y_test) = cifar100.load_data() 28 | self.assertEqual(x_train.dtype, np.uint8) 29 | self.assertEqual(y_train.dtype, np.int64) 30 | self.assertEqual(x_test.dtype, np.uint8) 31 | self.assertEqual(y_test.dtype, np.int64) 32 | 33 | def test_invalid_label_mode(self): 34 | with self.assertRaises(ValueError): 35 | cifar100.load_data(label_mode="invalid") 36 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/cifar10_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import testing 4 | from keras.src.datasets import cifar10 5 | 6 | 7 | class Cifar10LoadDataTest(testing.TestCase): 8 | def test_x_train_shape(self): 9 | (x_train, _), _ = cifar10.load_data() 10 | self.assertEqual(x_train.shape, (50000, 32, 32, 3)) 11 | 12 | def test_y_train_shape(self): 13 | (_, y_train), _ = cifar10.load_data() 14 | self.assertEqual(y_train.shape, (50000, 1)) 15 | 16 | def test_x_test_shape(self): 17 | _, (x_test, _) = cifar10.load_data() 18 | self.assertEqual(x_test.shape, (10000, 32, 32, 3)) 19 | 20 | def test_y_test_shape(self): 21 | _, (_, y_test) = cifar10.load_data() 22 | self.assertEqual(y_test.shape, (10000, 1)) 23 | 24 | def test_x_train_dtype(self): 25 | (x_train, _), _ = cifar10.load_data() 26 | self.assertEqual(x_train.dtype, np.uint8) 27 | 28 | def test_y_train_dtype(self): 29 | (_, y_train), _ = cifar10.load_data() 30 | self.assertEqual(y_train.dtype, np.uint8) 31 | 32 | def test_x_test_dtype(self): 33 | _, (x_test, _) = cifar10.load_data() 34 | self.assertEqual(x_test.dtype, np.uint8) 35 | 36 | def test_y_test_dtype(self): 37 | _, (_, y_test) = cifar10.load_data() 38 | self.assertEqual(y_test.dtype, np.uint8) 39 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/fashion_mnist_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import testing 4 | from keras.src.datasets import fashion_mnist 5 | 6 | 7 | class FashionMnistLoadDataTest(testing.TestCase): 8 | def test_x_train_shape(self): 9 | (x_train, _), _ = fashion_mnist.load_data() 10 | self.assertEqual(x_train.shape, (60000, 28, 28)) 11 | 12 | def test_y_train_shape(self): 13 | (_, y_train), _ = fashion_mnist.load_data() 14 | self.assertEqual(y_train.shape, (60000,)) 15 | 16 | def test_x_test_shape(self): 17 | _, (x_test, _) = fashion_mnist.load_data() 18 | self.assertEqual(x_test.shape, (10000, 28, 28)) 19 | 20 | def test_y_test_shape(self): 21 | _, (_, y_test) = fashion_mnist.load_data() 22 | self.assertEqual(y_test.shape, (10000,)) 23 | 24 | def test_x_train_dtype(self): 25 | (x_train, _), _ = fashion_mnist.load_data() 26 | self.assertEqual(x_train.dtype, np.uint8) 27 | 28 | def test_y_train_dtype(self): 29 | (_, y_train), _ = fashion_mnist.load_data() 30 | self.assertEqual(y_train.dtype, np.uint8) 31 | 32 | def test_x_test_dtype(self): 33 | _, (x_test, _) = fashion_mnist.load_data() 34 | self.assertEqual(x_test.dtype, np.uint8) 35 | 36 | def test_y_test_dtype(self): 37 | _, (_, y_test) = fashion_mnist.load_data() 38 | self.assertEqual(y_test.dtype, np.uint8) 39 | -------------------------------------------------------------------------------- /integration_tests/dataset_tests/mnist_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import testing 4 | from keras.src.datasets import mnist 5 | 6 | 7 | class MnistLoadDataTest(testing.TestCase): 8 | def test_x_train_shape(self): 9 | (x_train, _), _ = mnist.load_data() 10 | self.assertEqual(x_train.shape, (60000, 28, 28)) 11 | 12 | def test_y_train_shape(self): 13 | (_, y_train), _ = mnist.load_data() 14 | self.assertEqual(y_train.shape, (60000,)) 15 | 16 | def test_x_test_shape(self): 17 | _, (x_test, _) = mnist.load_data() 18 | self.assertEqual(x_test.shape, (10000, 28, 28)) 19 | 20 | def test_y_test_shape(self): 21 | _, (_, y_test) = mnist.load_data() 22 | self.assertEqual(y_test.shape, (10000,)) 23 | 24 | def test_x_train_dtype(self): 25 | (x_train, _), _ = mnist.load_data() 26 | self.assertEqual(x_train.dtype, np.uint8) 27 | 28 | def test_y_train_dtype(self): 29 | (_, y_train), _ = mnist.load_data() 30 | self.assertEqual(y_train.dtype, np.uint8) 31 | 32 | def test_x_test_dtype(self): 33 | _, (x_test, _) = mnist.load_data() 34 | self.assertEqual(x_test.dtype, np.uint8) 35 | 36 | def test_y_test_dtype(self): 37 | _, (_, y_test) = mnist.load_data() 38 | self.assertEqual(y_test.dtype, np.uint8) 39 | -------------------------------------------------------------------------------- /integration_tests/tf_custom_fit_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | import keras 5 | 6 | 7 | def test_custom_fit(): 8 | class CustomModel(keras.Model): 9 | def __init__(self, *args, **kwargs): 10 | super().__init__(*args, **kwargs) 11 | self.loss_tracker = keras.metrics.Mean(name="loss") 12 | self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae") 13 | self.loss_fn = keras.losses.MeanSquaredError() 14 | 15 | def train_step(self, data): 16 | x, y = data 17 | with tf.GradientTape() as tape: 18 | y_pred = self(x, training=True) 19 | loss = self.loss_fn(y, y_pred) 20 | trainable_vars = self.trainable_variables 21 | gradients = tape.gradient(loss, trainable_vars) 22 | self.optimizer.apply(gradients, trainable_vars) 23 | self.loss_tracker.update_state(loss) 24 | self.mae_metric.update_state(y, y_pred) 25 | return { 26 | "loss": self.loss_tracker.result(), 27 | "mae": self.mae_metric.result(), 28 | } 29 | 30 | @property 31 | def metrics(self): 32 | return [self.loss_tracker, self.mae_metric] 33 | 34 | inputs = keras.Input(shape=(32,)) 35 | outputs = keras.layers.Dense(1)(inputs) 36 | model = CustomModel(inputs, outputs) 37 | model.compile(optimizer="adam") 38 | x = np.random.random((64, 32)) 39 | y = np.random.random((64, 1)) 40 | history = model.fit(x, y, epochs=1) 41 | 42 | assert "loss" in history.history 43 | assert "mae" in history.history 44 | 45 | print("History:") 46 | print(history.history) 47 | 48 | 49 | if __name__ == "__main__": 50 | test_custom_fit() 51 | -------------------------------------------------------------------------------- /integration_tests/torch_custom_fit_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | import keras 5 | 6 | 7 | def test_custom_fit(): 8 | class CustomModel(keras.Model): 9 | def __init__(self, *args, **kwargs): 10 | super().__init__(*args, **kwargs) 11 | self.loss_tracker = keras.metrics.Mean(name="loss") 12 | self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae") 13 | self.loss_fn = keras.losses.MeanSquaredError() 14 | 15 | def train_step(self, data): 16 | x, y = data 17 | self.zero_grad() 18 | y_pred = self(x, training=True) 19 | loss = self.loss_fn(y, y_pred) 20 | loss.backward() 21 | trainable_weights = [v for v in self.trainable_weights] 22 | gradients = [v.value.grad for v in trainable_weights] 23 | with torch.no_grad(): 24 | self.optimizer.apply(gradients, trainable_weights) 25 | self.loss_tracker.update_state(loss) 26 | self.mae_metric.update_state(y, y_pred) 27 | return { 28 | "loss": self.loss_tracker.result(), 29 | "mae": self.mae_metric.result(), 30 | } 31 | 32 | @property 33 | def metrics(self): 34 | return [self.loss_tracker, self.mae_metric] 35 | 36 | inputs = keras.Input(shape=(32,)) 37 | outputs = keras.layers.Dense(1)(inputs) 38 | model = CustomModel(inputs, outputs) 39 | model.compile(optimizer="adam") 40 | x = np.random.random((64, 32)) 41 | y = np.random.random((64, 1)) 42 | history = model.fit(x, y, epochs=1) 43 | 44 | assert "loss" in history.history 45 | assert "mae" in history.history 46 | 47 | print("History:") 48 | print(history.history) 49 | 50 | 51 | if __name__ == "__main__": 52 | test_custom_fit() 53 | -------------------------------------------------------------------------------- /integration_tests/torch_workflow_test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src import layers 4 | from keras.src import testing 5 | from keras.src.backend.common import KerasVariable 6 | 7 | 8 | class Net(torch.nn.Module): 9 | def __init__(self): 10 | super().__init__() 11 | self.fc1 = layers.Dense(1) 12 | 13 | def forward(self, x): 14 | x = self.fc1(x) 15 | return x 16 | 17 | 18 | class TorchWorkflowTest(testing.TestCase): 19 | def test_keras_layer_in_nn_module(self): 20 | net = Net() 21 | 22 | # Test using Keras layer in a nn.Module. 23 | # Test forward pass 24 | self.assertAllEqual(list(net(torch.empty(100, 10)).shape), [100, 1]) 25 | # Test KerasVariables are added as nn.Parameter. 26 | self.assertLen(list(net.parameters()), 2) 27 | 28 | # Test using KerasVariable as a torch tensor for torch ops. 29 | kernel = net.fc1.kernel 30 | transposed_kernel = torch.transpose(kernel, 0, 1) 31 | self.assertIsInstance(kernel, KerasVariable) 32 | self.assertIsInstance( 33 | torch.mul(kernel, transposed_kernel), torch.Tensor 34 | ) 35 | -------------------------------------------------------------------------------- /keras/__init__.py: -------------------------------------------------------------------------------- 1 | # This file should NEVER be packaged! This is a hack to make "import keras" from 2 | # the base of the repo just import the source files. We'll keep it for compat. 3 | 4 | import os # isort: skip 5 | 6 | # Add everything in /api/ to the module search path. 7 | __path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405 8 | 9 | from keras.api import * # noqa: F403, E402 10 | from keras.api import __version__ # noqa: E402 11 | 12 | # Don't pollute namespace. 13 | del os 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/__init__.py: -------------------------------------------------------------------------------- 1 | from keras._tf_keras import keras 2 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/convnext/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase 8 | from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge 9 | from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall 10 | from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny 11 | from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge 12 | from keras.src.applications.convnext import ( 13 | decode_predictions as decode_predictions, 14 | ) 15 | from keras.src.applications.convnext import preprocess_input as preprocess_input 16 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/densenet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.densenet import DenseNet121 as DenseNet121 8 | from keras.src.applications.densenet import DenseNet169 as DenseNet169 9 | from keras.src.applications.densenet import DenseNet201 as DenseNet201 10 | from keras.src.applications.densenet import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.densenet import preprocess_input as preprocess_input 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/efficientnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0 8 | from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1 9 | from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2 10 | from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3 11 | from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4 12 | from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5 13 | from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6 14 | from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7 15 | from keras.src.applications.efficientnet import ( 16 | decode_predictions as decode_predictions, 17 | ) 18 | from keras.src.applications.efficientnet import ( 19 | preprocess_input as preprocess_input, 20 | ) 21 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/efficientnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.efficientnet_v2 import ( 8 | EfficientNetV2B0 as EfficientNetV2B0, 9 | ) 10 | from keras.src.applications.efficientnet_v2 import ( 11 | EfficientNetV2B1 as EfficientNetV2B1, 12 | ) 13 | from keras.src.applications.efficientnet_v2 import ( 14 | EfficientNetV2B2 as EfficientNetV2B2, 15 | ) 16 | from keras.src.applications.efficientnet_v2 import ( 17 | EfficientNetV2B3 as EfficientNetV2B3, 18 | ) 19 | from keras.src.applications.efficientnet_v2 import ( 20 | EfficientNetV2L as EfficientNetV2L, 21 | ) 22 | from keras.src.applications.efficientnet_v2 import ( 23 | EfficientNetV2M as EfficientNetV2M, 24 | ) 25 | from keras.src.applications.efficientnet_v2 import ( 26 | EfficientNetV2S as EfficientNetV2S, 27 | ) 28 | from keras.src.applications.efficientnet_v2 import ( 29 | decode_predictions as decode_predictions, 30 | ) 31 | from keras.src.applications.efficientnet_v2 import ( 32 | preprocess_input as preprocess_input, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/imagenet_utils/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.imagenet_utils import ( 8 | decode_predictions as decode_predictions, 9 | ) 10 | from keras.src.applications.imagenet_utils import ( 11 | preprocess_input as preprocess_input, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/inception_resnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.inception_resnet_v2 import ( 8 | InceptionResNetV2 as InceptionResNetV2, 9 | ) 10 | from keras.src.applications.inception_resnet_v2 import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.inception_resnet_v2 import ( 14 | preprocess_input as preprocess_input, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/inception_v3/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3 8 | from keras.src.applications.inception_v3 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.inception_v3 import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet import MobileNet as MobileNet 8 | from keras.src.applications.mobilenet import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.mobilenet import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/mobilenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2 8 | from keras.src.applications.mobilenet_v2 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.mobilenet_v2 import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/mobilenet_v3/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet_v3 import ( 8 | decode_predictions as decode_predictions, 9 | ) 10 | from keras.src.applications.mobilenet_v3 import ( 11 | preprocess_input as preprocess_input, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/nasnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.nasnet import NASNetLarge as NASNetLarge 8 | from keras.src.applications.nasnet import NASNetMobile as NASNetMobile 9 | from keras.src.applications.nasnet import ( 10 | decode_predictions as decode_predictions, 11 | ) 12 | from keras.src.applications.nasnet import preprocess_input as preprocess_input 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/resnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet import ResNet50 as ResNet50 8 | from keras.src.applications.resnet import ResNet101 as ResNet101 9 | from keras.src.applications.resnet import ResNet152 as ResNet152 10 | from keras.src.applications.resnet import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.resnet import preprocess_input as preprocess_input 14 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet import ResNet50 as ResNet50 8 | from keras.src.applications.resnet import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.resnet import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/resnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2 8 | from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2 9 | from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2 10 | from keras.src.applications.resnet_v2 import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.resnet_v2 import ( 14 | preprocess_input as preprocess_input, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/vgg16/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.vgg16 import VGG16 as VGG16 8 | from keras.src.applications.vgg16 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.vgg16 import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/vgg19/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.vgg19 import VGG19 as VGG19 8 | from keras.src.applications.vgg19 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.vgg19 import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/applications/xception/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.xception import Xception as Xception 8 | from keras.src.applications.xception import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.xception import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.callbacks.backup_and_restore import ( 8 | BackupAndRestore as BackupAndRestore, 9 | ) 10 | from keras.src.callbacks.callback import Callback as Callback 11 | from keras.src.callbacks.callback_list import CallbackList as CallbackList 12 | from keras.src.callbacks.csv_logger import CSVLogger as CSVLogger 13 | from keras.src.callbacks.early_stopping import EarlyStopping as EarlyStopping 14 | from keras.src.callbacks.history import History as History 15 | from keras.src.callbacks.lambda_callback import LambdaCallback as LambdaCallback 16 | from keras.src.callbacks.learning_rate_scheduler import ( 17 | LearningRateScheduler as LearningRateScheduler, 18 | ) 19 | from keras.src.callbacks.model_checkpoint import ( 20 | ModelCheckpoint as ModelCheckpoint, 21 | ) 22 | from keras.src.callbacks.progbar_logger import ProgbarLogger as ProgbarLogger 23 | from keras.src.callbacks.reduce_lr_on_plateau import ( 24 | ReduceLROnPlateau as ReduceLROnPlateau, 25 | ) 26 | from keras.src.callbacks.remote_monitor import RemoteMonitor as RemoteMonitor 27 | from keras.src.callbacks.swap_ema_weights import ( 28 | SwapEMAWeights as SwapEMAWeights, 29 | ) 30 | from keras.src.callbacks.tensorboard import TensorBoard as TensorBoard 31 | from keras.src.callbacks.terminate_on_nan import ( 32 | TerminateOnNaN as TerminateOnNaN, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/constraints/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.constraints import deserialize as deserialize 8 | from keras.src.constraints import get as get 9 | from keras.src.constraints import serialize as serialize 10 | from keras.src.constraints.constraints import Constraint as Constraint 11 | from keras.src.constraints.constraints import MaxNorm as MaxNorm 12 | from keras.src.constraints.constraints import MaxNorm as max_norm 13 | from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm 14 | from keras.src.constraints.constraints import MinMaxNorm as min_max_norm 15 | from keras.src.constraints.constraints import NonNeg as NonNeg 16 | from keras.src.constraints.constraints import NonNeg as non_neg 17 | from keras.src.constraints.constraints import UnitNorm as UnitNorm 18 | from keras.src.constraints.constraints import UnitNorm as unit_norm 19 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.datasets import boston_housing as boston_housing 8 | from keras.datasets import california_housing as california_housing 9 | from keras.datasets import cifar10 as cifar10 10 | from keras.datasets import cifar100 as cifar100 11 | from keras.datasets import fashion_mnist as fashion_mnist 12 | from keras.datasets import imdb as imdb 13 | from keras.datasets import mnist as mnist 14 | from keras.datasets import reuters as reuters 15 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/boston_housing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.boston_housing import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/california_housing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.california_housing import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/cifar10/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.cifar10 import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/cifar100/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.cifar100 import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/fashion_mnist/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.fashion_mnist import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/imdb/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.imdb import get_word_index as get_word_index 8 | from keras.src.datasets.imdb import load_data as load_data 9 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/mnist/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.mnist import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/datasets/reuters/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.reuters import get_label_names as get_label_names 8 | from keras.src.datasets.reuters import get_word_index as get_word_index 9 | from keras.src.datasets.reuters import load_data as load_data 10 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/distribution/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.distribution.distribution_lib import DataParallel as DataParallel 8 | from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh 9 | from keras.src.distribution.distribution_lib import LayoutMap as LayoutMap 10 | from keras.src.distribution.distribution_lib import ( 11 | ModelParallel as ModelParallel, 12 | ) 13 | from keras.src.distribution.distribution_lib import TensorLayout as TensorLayout 14 | from keras.src.distribution.distribution_lib import ( 15 | distribute_tensor as distribute_tensor, 16 | ) 17 | from keras.src.distribution.distribution_lib import distribution as distribution 18 | from keras.src.distribution.distribution_lib import initialize as initialize 19 | from keras.src.distribution.distribution_lib import list_devices as list_devices 20 | from keras.src.distribution.distribution_lib import ( 21 | set_distribution as set_distribution, 22 | ) 23 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/dtype_policies/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.dtype_policies import deserialize as deserialize 8 | from keras.src.dtype_policies import get as get 9 | from keras.src.dtype_policies import serialize as serialize 10 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy 11 | from keras.src.dtype_policies.dtype_policy import ( 12 | FloatDTypePolicy as FloatDTypePolicy, 13 | ) 14 | from keras.src.dtype_policies.dtype_policy import ( 15 | QuantizedDTypePolicy as QuantizedDTypePolicy, 16 | ) 17 | from keras.src.dtype_policies.dtype_policy import ( 18 | QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy, 19 | ) 20 | from keras.src.dtype_policies.dtype_policy_map import ( 21 | DTypePolicyMap as DTypePolicyMap, 22 | ) 23 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/export/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.export.saved_model import ExportArchive as ExportArchive 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.legacy import saving as saving 8 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/legacy/saving/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.saving.serialization import ( 8 | deserialize_keras_object as deserialize_keras_object, 9 | ) 10 | from keras.src.legacy.saving.serialization import ( 11 | serialize_keras_object as serialize_keras_object, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/mixed_precision/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy 8 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy 9 | from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy 10 | from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy 11 | from keras.src.dtype_policies.dtype_policy import ( 12 | set_dtype_policy as set_dtype_policy, 13 | ) 14 | from keras.src.dtype_policies.dtype_policy import ( 15 | set_dtype_policy as set_global_policy, 16 | ) 17 | from keras.src.optimizers.loss_scale_optimizer import ( 18 | LossScaleOptimizer as LossScaleOptimizer, 19 | ) 20 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/models/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.models.cloning import clone_model as clone_model 8 | from keras.src.models.model import Model as Model 9 | from keras.src.models.model import model_from_json as model_from_json 10 | from keras.src.models.sequential import Sequential as Sequential 11 | from keras.src.saving.saving_api import load_model as load_model 12 | from keras.src.saving.saving_api import save_model as save_model 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/ops/image/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.ops.image import affine_transform as affine_transform 8 | from keras.src.ops.image import crop_images as crop_images 9 | from keras.src.ops.image import elastic_transform as elastic_transform 10 | from keras.src.ops.image import extract_patches as extract_patches 11 | from keras.src.ops.image import gaussian_blur as gaussian_blur 12 | from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb 13 | from keras.src.ops.image import map_coordinates as map_coordinates 14 | from keras.src.ops.image import pad_images as pad_images 15 | from keras.src.ops.image import perspective_transform as perspective_transform 16 | from keras.src.ops.image import resize as resize 17 | from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale 18 | from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv 19 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/ops/linalg/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.ops.linalg import cholesky as cholesky 8 | from keras.src.ops.linalg import det as det 9 | from keras.src.ops.linalg import eig as eig 10 | from keras.src.ops.linalg import eigh as eigh 11 | from keras.src.ops.linalg import inv as inv 12 | from keras.src.ops.linalg import lstsq as lstsq 13 | from keras.src.ops.linalg import lu_factor as lu_factor 14 | from keras.src.ops.linalg import norm as norm 15 | from keras.src.ops.linalg import qr as qr 16 | from keras.src.ops.linalg import solve as solve 17 | from keras.src.ops.linalg import solve_triangular as solve_triangular 18 | from keras.src.ops.linalg import svd as svd 19 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.optimizers import legacy as legacy 8 | from keras.optimizers import schedules as schedules 9 | from keras.src.optimizers import deserialize as deserialize 10 | from keras.src.optimizers import get as get 11 | from keras.src.optimizers import serialize as serialize 12 | from keras.src.optimizers.adadelta import Adadelta as Adadelta 13 | from keras.src.optimizers.adafactor import Adafactor as Adafactor 14 | from keras.src.optimizers.adagrad import Adagrad as Adagrad 15 | from keras.src.optimizers.adam import Adam as Adam 16 | from keras.src.optimizers.adamax import Adamax as Adamax 17 | from keras.src.optimizers.adamw import AdamW as AdamW 18 | from keras.src.optimizers.ftrl import Ftrl as Ftrl 19 | from keras.src.optimizers.lamb import Lamb as Lamb 20 | from keras.src.optimizers.lion import Lion as Lion 21 | from keras.src.optimizers.loss_scale_optimizer import ( 22 | LossScaleOptimizer as LossScaleOptimizer, 23 | ) 24 | from keras.src.optimizers.muon import Muon as Muon 25 | from keras.src.optimizers.nadam import Nadam as Nadam 26 | from keras.src.optimizers.optimizer import Optimizer as Optimizer 27 | from keras.src.optimizers.rmsprop import RMSprop as RMSprop 28 | from keras.src.optimizers.sgd import SGD as SGD 29 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/optimizers/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.optimizers import LegacyOptimizerWarning as Adagrad 8 | from keras.src.optimizers import LegacyOptimizerWarning as Adam 9 | from keras.src.optimizers import LegacyOptimizerWarning as Ftrl 10 | from keras.src.optimizers import LegacyOptimizerWarning as Optimizer 11 | from keras.src.optimizers import LegacyOptimizerWarning as RMSprop 12 | from keras.src.optimizers import LegacyOptimizerWarning as SGD 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/optimizers/schedules/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 8 | CosineDecay as CosineDecay, 9 | ) 10 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 11 | CosineDecayRestarts as CosineDecayRestarts, 12 | ) 13 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 14 | ExponentialDecay as ExponentialDecay, 15 | ) 16 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 17 | InverseTimeDecay as InverseTimeDecay, 18 | ) 19 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 20 | LearningRateSchedule as LearningRateSchedule, 21 | ) 22 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 23 | PiecewiseConstantDecay as PiecewiseConstantDecay, 24 | ) 25 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 26 | PolynomialDecay as PolynomialDecay, 27 | ) 28 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 29 | deserialize as deserialize, 30 | ) 31 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 32 | serialize as serialize, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras._tf_keras.keras.preprocessing import image as image 8 | from keras._tf_keras.keras.preprocessing import sequence as sequence 9 | from keras._tf_keras.keras.preprocessing import text as text 10 | from keras.src.utils.image_dataset_utils import ( 11 | image_dataset_from_directory as image_dataset_from_directory, 12 | ) 13 | from keras.src.utils.text_dataset_utils import ( 14 | text_dataset_from_directory as text_dataset_from_directory, 15 | ) 16 | from keras.src.utils.timeseries_dataset_utils import ( 17 | timeseries_dataset_from_array as timeseries_dataset_from_array, 18 | ) 19 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/preprocessing/image/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.preprocessing.image import ( 8 | DirectoryIterator as DirectoryIterator, 9 | ) 10 | from keras.src.legacy.preprocessing.image import ( 11 | ImageDataGenerator as ImageDataGenerator, 12 | ) 13 | from keras.src.legacy.preprocessing.image import Iterator as Iterator 14 | from keras.src.legacy.preprocessing.image import ( 15 | NumpyArrayIterator as NumpyArrayIterator, 16 | ) 17 | from keras.src.legacy.preprocessing.image import ( 18 | apply_affine_transform as apply_affine_transform, 19 | ) 20 | from keras.src.legacy.preprocessing.image import ( 21 | apply_brightness_shift as apply_brightness_shift, 22 | ) 23 | from keras.src.legacy.preprocessing.image import ( 24 | apply_channel_shift as apply_channel_shift, 25 | ) 26 | from keras.src.legacy.preprocessing.image import ( 27 | random_brightness as random_brightness, 28 | ) 29 | from keras.src.legacy.preprocessing.image import ( 30 | random_channel_shift as random_channel_shift, 31 | ) 32 | from keras.src.legacy.preprocessing.image import ( 33 | random_rotation as random_rotation, 34 | ) 35 | from keras.src.legacy.preprocessing.image import random_shear as random_shear 36 | from keras.src.legacy.preprocessing.image import random_shift as random_shift 37 | from keras.src.legacy.preprocessing.image import random_zoom as random_zoom 38 | from keras.src.utils.image_utils import array_to_img as array_to_img 39 | from keras.src.utils.image_utils import img_to_array as img_to_array 40 | from keras.src.utils.image_utils import load_img as load_img 41 | from keras.src.utils.image_utils import save_img as save_img 42 | from keras.src.utils.image_utils import smart_resize as smart_resize 43 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/preprocessing/sequence/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.preprocessing.sequence import ( 8 | TimeseriesGenerator as TimeseriesGenerator, 9 | ) 10 | from keras.src.legacy.preprocessing.sequence import ( 11 | make_sampling_table as make_sampling_table, 12 | ) 13 | from keras.src.legacy.preprocessing.sequence import skipgrams as skipgrams 14 | from keras.src.utils.sequence_utils import pad_sequences as pad_sequences 15 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/preprocessing/text/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.preprocessing.text import Tokenizer as Tokenizer 8 | from keras.src.legacy.preprocessing.text import hashing_trick as hashing_trick 9 | from keras.src.legacy.preprocessing.text import one_hot as one_hot 10 | from keras.src.legacy.preprocessing.text import ( 11 | text_to_word_sequence as text_to_word_sequence, 12 | ) 13 | from keras.src.legacy.preprocessing.text import ( 14 | tokenizer_from_json as tokenizer_from_json, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/quantizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.quantizers import deserialize as deserialize 8 | from keras.src.quantizers import get as get 9 | from keras.src.quantizers import serialize as serialize 10 | from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer 11 | from keras.src.quantizers.quantizers import Quantizer as Quantizer 12 | from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize 13 | from keras.src.quantizers.quantizers import ( 14 | compute_float8_amax_history as compute_float8_amax_history, 15 | ) 16 | from keras.src.quantizers.quantizers import ( 17 | compute_float8_scale as compute_float8_scale, 18 | ) 19 | from keras.src.quantizers.quantizers import ( 20 | fake_quant_with_min_max_vars as fake_quant_with_min_max_vars, 21 | ) 22 | from keras.src.quantizers.quantizers import ( 23 | quantize_and_dequantize as quantize_and_dequantize, 24 | ) 25 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/random/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.random.random import beta as beta 8 | from keras.src.random.random import binomial as binomial 9 | from keras.src.random.random import categorical as categorical 10 | from keras.src.random.random import dropout as dropout 11 | from keras.src.random.random import gamma as gamma 12 | from keras.src.random.random import normal as normal 13 | from keras.src.random.random import randint as randint 14 | from keras.src.random.random import shuffle as shuffle 15 | from keras.src.random.random import truncated_normal as truncated_normal 16 | from keras.src.random.random import uniform as uniform 17 | from keras.src.random.seed_generator import SeedGenerator as SeedGenerator 18 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/regularizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.regularizers import deserialize as deserialize 8 | from keras.src.regularizers import get as get 9 | from keras.src.regularizers import serialize as serialize 10 | from keras.src.regularizers.regularizers import L1 as L1 11 | from keras.src.regularizers.regularizers import L1 as l1 12 | from keras.src.regularizers.regularizers import L1L2 as L1L2 13 | from keras.src.regularizers.regularizers import L1L2 as l1_l2 14 | from keras.src.regularizers.regularizers import L2 as L2 15 | from keras.src.regularizers.regularizers import L2 as l2 16 | from keras.src.regularizers.regularizers import ( 17 | OrthogonalRegularizer as OrthogonalRegularizer, 18 | ) 19 | from keras.src.regularizers.regularizers import ( 20 | OrthogonalRegularizer as orthogonal_regularizer, 21 | ) 22 | from keras.src.regularizers.regularizers import Regularizer as Regularizer 23 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/saving/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor 8 | from keras.src.saving.object_registration import ( 9 | CustomObjectScope as CustomObjectScope, 10 | ) 11 | from keras.src.saving.object_registration import ( 12 | CustomObjectScope as custom_object_scope, 13 | ) 14 | from keras.src.saving.object_registration import ( 15 | get_custom_objects as get_custom_objects, 16 | ) 17 | from keras.src.saving.object_registration import ( 18 | get_registered_name as get_registered_name, 19 | ) 20 | from keras.src.saving.object_registration import ( 21 | get_registered_object as get_registered_object, 22 | ) 23 | from keras.src.saving.object_registration import ( 24 | register_keras_serializable as register_keras_serializable, 25 | ) 26 | from keras.src.saving.saving_api import load_model as load_model 27 | from keras.src.saving.saving_api import load_weights as load_weights 28 | from keras.src.saving.saving_api import save_model as save_model 29 | from keras.src.saving.saving_api import save_weights as save_weights 30 | from keras.src.saving.serialization_lib import ( 31 | deserialize_keras_object as deserialize_keras_object, 32 | ) 33 | from keras.src.saving.serialization_lib import ( 34 | serialize_keras_object as serialize_keras_object, 35 | ) 36 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/tree/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE 8 | from keras.src.tree.tree_api import assert_same_paths as assert_same_paths 9 | from keras.src.tree.tree_api import ( 10 | assert_same_structure as assert_same_structure, 11 | ) 12 | from keras.src.tree.tree_api import flatten as flatten 13 | from keras.src.tree.tree_api import flatten_with_path as flatten_with_path 14 | from keras.src.tree.tree_api import is_nested as is_nested 15 | from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples 16 | from keras.src.tree.tree_api import map_shape_structure as map_shape_structure 17 | from keras.src.tree.tree_api import map_structure as map_structure 18 | from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to 19 | from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as 20 | from keras.src.tree.tree_api import traverse as traverse 21 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/utils/bounding_boxes/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 8 | affine_transform as affine_transform, 9 | ) 10 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 11 | clip_to_image_size as clip_to_image_size, 12 | ) 13 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 14 | convert_format as convert_format, 15 | ) 16 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 17 | crop as crop, 18 | ) 19 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 20 | decode_deltas_to_boxes as decode_deltas_to_boxes, 21 | ) 22 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 23 | encode_box_to_deltas as encode_box_to_deltas, 24 | ) 25 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 26 | pad as pad, 27 | ) 28 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import ( 29 | compute_ciou as compute_ciou, 30 | ) 31 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import ( 32 | compute_iou as compute_iou, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/utils/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.saving.serialization import ( 8 | deserialize_keras_object as deserialize_keras_object, 9 | ) 10 | from keras.src.legacy.saving.serialization import ( 11 | serialize_keras_object as serialize_keras_object, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.visualization.draw_bounding_boxes import ( 8 | draw_bounding_boxes as draw_bounding_boxes, 9 | ) 10 | from keras.src.visualization.draw_segmentation_masks import ( 11 | draw_segmentation_masks as draw_segmentation_masks, 12 | ) 13 | from keras.src.visualization.plot_bounding_box_gallery import ( 14 | plot_bounding_box_gallery as plot_bounding_box_gallery, 15 | ) 16 | from keras.src.visualization.plot_image_gallery import ( 17 | plot_image_gallery as plot_image_gallery, 18 | ) 19 | from keras.src.visualization.plot_segmentation_mask_gallery import ( 20 | plot_segmentation_mask_gallery as plot_segmentation_mask_gallery, 21 | ) 22 | -------------------------------------------------------------------------------- /keras/api/_tf_keras/keras/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.wrappers.sklearn_wrapper import ( 8 | SKLearnClassifier as SKLearnClassifier, 9 | ) 10 | from keras.src.wrappers.sklearn_wrapper import ( 11 | SKLearnRegressor as SKLearnRegressor, 12 | ) 13 | from keras.src.wrappers.sklearn_wrapper import ( 14 | SKLearnTransformer as SKLearnTransformer, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/applications/convnext/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase 8 | from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge 9 | from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall 10 | from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny 11 | from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge 12 | from keras.src.applications.convnext import ( 13 | decode_predictions as decode_predictions, 14 | ) 15 | from keras.src.applications.convnext import preprocess_input as preprocess_input 16 | -------------------------------------------------------------------------------- /keras/api/applications/densenet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.densenet import DenseNet121 as DenseNet121 8 | from keras.src.applications.densenet import DenseNet169 as DenseNet169 9 | from keras.src.applications.densenet import DenseNet201 as DenseNet201 10 | from keras.src.applications.densenet import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.densenet import preprocess_input as preprocess_input 14 | -------------------------------------------------------------------------------- /keras/api/applications/efficientnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0 8 | from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1 9 | from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2 10 | from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3 11 | from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4 12 | from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5 13 | from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6 14 | from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7 15 | from keras.src.applications.efficientnet import ( 16 | decode_predictions as decode_predictions, 17 | ) 18 | from keras.src.applications.efficientnet import ( 19 | preprocess_input as preprocess_input, 20 | ) 21 | -------------------------------------------------------------------------------- /keras/api/applications/efficientnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.efficientnet_v2 import ( 8 | EfficientNetV2B0 as EfficientNetV2B0, 9 | ) 10 | from keras.src.applications.efficientnet_v2 import ( 11 | EfficientNetV2B1 as EfficientNetV2B1, 12 | ) 13 | from keras.src.applications.efficientnet_v2 import ( 14 | EfficientNetV2B2 as EfficientNetV2B2, 15 | ) 16 | from keras.src.applications.efficientnet_v2 import ( 17 | EfficientNetV2B3 as EfficientNetV2B3, 18 | ) 19 | from keras.src.applications.efficientnet_v2 import ( 20 | EfficientNetV2L as EfficientNetV2L, 21 | ) 22 | from keras.src.applications.efficientnet_v2 import ( 23 | EfficientNetV2M as EfficientNetV2M, 24 | ) 25 | from keras.src.applications.efficientnet_v2 import ( 26 | EfficientNetV2S as EfficientNetV2S, 27 | ) 28 | from keras.src.applications.efficientnet_v2 import ( 29 | decode_predictions as decode_predictions, 30 | ) 31 | from keras.src.applications.efficientnet_v2 import ( 32 | preprocess_input as preprocess_input, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/applications/imagenet_utils/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.imagenet_utils import ( 8 | decode_predictions as decode_predictions, 9 | ) 10 | from keras.src.applications.imagenet_utils import ( 11 | preprocess_input as preprocess_input, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/applications/inception_resnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.inception_resnet_v2 import ( 8 | InceptionResNetV2 as InceptionResNetV2, 9 | ) 10 | from keras.src.applications.inception_resnet_v2 import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.inception_resnet_v2 import ( 14 | preprocess_input as preprocess_input, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/applications/inception_v3/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3 8 | from keras.src.applications.inception_v3 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.inception_v3 import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/applications/mobilenet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet import MobileNet as MobileNet 8 | from keras.src.applications.mobilenet import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.mobilenet import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/applications/mobilenet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2 8 | from keras.src.applications.mobilenet_v2 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.mobilenet_v2 import ( 12 | preprocess_input as preprocess_input, 13 | ) 14 | -------------------------------------------------------------------------------- /keras/api/applications/mobilenet_v3/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.mobilenet_v3 import ( 8 | decode_predictions as decode_predictions, 9 | ) 10 | from keras.src.applications.mobilenet_v3 import ( 11 | preprocess_input as preprocess_input, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/applications/nasnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.nasnet import NASNetLarge as NASNetLarge 8 | from keras.src.applications.nasnet import NASNetMobile as NASNetMobile 9 | from keras.src.applications.nasnet import ( 10 | decode_predictions as decode_predictions, 11 | ) 12 | from keras.src.applications.nasnet import preprocess_input as preprocess_input 13 | -------------------------------------------------------------------------------- /keras/api/applications/resnet/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet import ResNet50 as ResNet50 8 | from keras.src.applications.resnet import ResNet101 as ResNet101 9 | from keras.src.applications.resnet import ResNet152 as ResNet152 10 | from keras.src.applications.resnet import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.resnet import preprocess_input as preprocess_input 14 | -------------------------------------------------------------------------------- /keras/api/applications/resnet50/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet import ResNet50 as ResNet50 8 | from keras.src.applications.resnet import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.resnet import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/applications/resnet_v2/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2 8 | from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2 9 | from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2 10 | from keras.src.applications.resnet_v2 import ( 11 | decode_predictions as decode_predictions, 12 | ) 13 | from keras.src.applications.resnet_v2 import ( 14 | preprocess_input as preprocess_input, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/api/applications/vgg16/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.vgg16 import VGG16 as VGG16 8 | from keras.src.applications.vgg16 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.vgg16 import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/applications/vgg19/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.vgg19 import VGG19 as VGG19 8 | from keras.src.applications.vgg19 import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.vgg19 import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/applications/xception/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.applications.xception import Xception as Xception 8 | from keras.src.applications.xception import ( 9 | decode_predictions as decode_predictions, 10 | ) 11 | from keras.src.applications.xception import preprocess_input as preprocess_input 12 | -------------------------------------------------------------------------------- /keras/api/backend/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.backend.common.dtypes import result_type as result_type 8 | from keras.src.backend.common.global_state import clear_session as clear_session 9 | from keras.src.backend.common.keras_tensor import ( 10 | is_keras_tensor as is_keras_tensor, 11 | ) 12 | from keras.src.backend.common.variables import is_float_dtype as is_float_dtype 13 | from keras.src.backend.common.variables import is_int_dtype as is_int_dtype 14 | from keras.src.backend.common.variables import ( 15 | standardize_dtype as standardize_dtype, 16 | ) 17 | from keras.src.backend.config import backend as backend 18 | from keras.src.backend.config import epsilon as epsilon 19 | from keras.src.backend.config import floatx as floatx 20 | from keras.src.backend.config import image_data_format as image_data_format 21 | from keras.src.backend.config import set_epsilon as set_epsilon 22 | from keras.src.backend.config import set_floatx as set_floatx 23 | from keras.src.backend.config import ( 24 | set_image_data_format as set_image_data_format, 25 | ) 26 | from keras.src.utils.naming import get_uid as get_uid 27 | -------------------------------------------------------------------------------- /keras/api/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.callbacks.backup_and_restore import ( 8 | BackupAndRestore as BackupAndRestore, 9 | ) 10 | from keras.src.callbacks.callback import Callback as Callback 11 | from keras.src.callbacks.callback_list import CallbackList as CallbackList 12 | from keras.src.callbacks.csv_logger import CSVLogger as CSVLogger 13 | from keras.src.callbacks.early_stopping import EarlyStopping as EarlyStopping 14 | from keras.src.callbacks.history import History as History 15 | from keras.src.callbacks.lambda_callback import LambdaCallback as LambdaCallback 16 | from keras.src.callbacks.learning_rate_scheduler import ( 17 | LearningRateScheduler as LearningRateScheduler, 18 | ) 19 | from keras.src.callbacks.model_checkpoint import ( 20 | ModelCheckpoint as ModelCheckpoint, 21 | ) 22 | from keras.src.callbacks.progbar_logger import ProgbarLogger as ProgbarLogger 23 | from keras.src.callbacks.reduce_lr_on_plateau import ( 24 | ReduceLROnPlateau as ReduceLROnPlateau, 25 | ) 26 | from keras.src.callbacks.remote_monitor import RemoteMonitor as RemoteMonitor 27 | from keras.src.callbacks.swap_ema_weights import ( 28 | SwapEMAWeights as SwapEMAWeights, 29 | ) 30 | from keras.src.callbacks.tensorboard import TensorBoard as TensorBoard 31 | from keras.src.callbacks.terminate_on_nan import ( 32 | TerminateOnNaN as TerminateOnNaN, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/constraints/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.constraints import deserialize as deserialize 8 | from keras.src.constraints import get as get 9 | from keras.src.constraints import serialize as serialize 10 | from keras.src.constraints.constraints import Constraint as Constraint 11 | from keras.src.constraints.constraints import MaxNorm as MaxNorm 12 | from keras.src.constraints.constraints import MaxNorm as max_norm 13 | from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm 14 | from keras.src.constraints.constraints import MinMaxNorm as min_max_norm 15 | from keras.src.constraints.constraints import NonNeg as NonNeg 16 | from keras.src.constraints.constraints import NonNeg as non_neg 17 | from keras.src.constraints.constraints import UnitNorm as UnitNorm 18 | from keras.src.constraints.constraints import UnitNorm as unit_norm 19 | -------------------------------------------------------------------------------- /keras/api/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.datasets import boston_housing as boston_housing 8 | from keras.datasets import california_housing as california_housing 9 | from keras.datasets import cifar10 as cifar10 10 | from keras.datasets import cifar100 as cifar100 11 | from keras.datasets import fashion_mnist as fashion_mnist 12 | from keras.datasets import imdb as imdb 13 | from keras.datasets import mnist as mnist 14 | from keras.datasets import reuters as reuters 15 | -------------------------------------------------------------------------------- /keras/api/datasets/boston_housing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.boston_housing import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/california_housing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.california_housing import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/cifar10/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.cifar10 import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/cifar100/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.cifar100 import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/fashion_mnist/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.fashion_mnist import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/imdb/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.imdb import get_word_index as get_word_index 8 | from keras.src.datasets.imdb import load_data as load_data 9 | -------------------------------------------------------------------------------- /keras/api/datasets/mnist/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.mnist import load_data as load_data 8 | -------------------------------------------------------------------------------- /keras/api/datasets/reuters/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.datasets.reuters import get_label_names as get_label_names 8 | from keras.src.datasets.reuters import get_word_index as get_word_index 9 | from keras.src.datasets.reuters import load_data as load_data 10 | -------------------------------------------------------------------------------- /keras/api/distribution/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.distribution.distribution_lib import DataParallel as DataParallel 8 | from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh 9 | from keras.src.distribution.distribution_lib import LayoutMap as LayoutMap 10 | from keras.src.distribution.distribution_lib import ( 11 | ModelParallel as ModelParallel, 12 | ) 13 | from keras.src.distribution.distribution_lib import TensorLayout as TensorLayout 14 | from keras.src.distribution.distribution_lib import ( 15 | distribute_tensor as distribute_tensor, 16 | ) 17 | from keras.src.distribution.distribution_lib import distribution as distribution 18 | from keras.src.distribution.distribution_lib import initialize as initialize 19 | from keras.src.distribution.distribution_lib import list_devices as list_devices 20 | from keras.src.distribution.distribution_lib import ( 21 | set_distribution as set_distribution, 22 | ) 23 | -------------------------------------------------------------------------------- /keras/api/dtype_policies/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.dtype_policies import deserialize as deserialize 8 | from keras.src.dtype_policies import get as get 9 | from keras.src.dtype_policies import serialize as serialize 10 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy 11 | from keras.src.dtype_policies.dtype_policy import ( 12 | FloatDTypePolicy as FloatDTypePolicy, 13 | ) 14 | from keras.src.dtype_policies.dtype_policy import ( 15 | QuantizedDTypePolicy as QuantizedDTypePolicy, 16 | ) 17 | from keras.src.dtype_policies.dtype_policy import ( 18 | QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy, 19 | ) 20 | from keras.src.dtype_policies.dtype_policy_map import ( 21 | DTypePolicyMap as DTypePolicyMap, 22 | ) 23 | -------------------------------------------------------------------------------- /keras/api/export/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.export.saved_model import ExportArchive as ExportArchive 8 | -------------------------------------------------------------------------------- /keras/api/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.legacy import saving as saving 8 | -------------------------------------------------------------------------------- /keras/api/legacy/saving/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.saving.serialization import ( 8 | deserialize_keras_object as deserialize_keras_object, 9 | ) 10 | from keras.src.legacy.saving.serialization import ( 11 | serialize_keras_object as serialize_keras_object, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/mixed_precision/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy 8 | from keras.src.dtype_policies.dtype_policy import DTypePolicy as Policy 9 | from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy 10 | from keras.src.dtype_policies.dtype_policy import dtype_policy as global_policy 11 | from keras.src.dtype_policies.dtype_policy import ( 12 | set_dtype_policy as set_dtype_policy, 13 | ) 14 | from keras.src.dtype_policies.dtype_policy import ( 15 | set_dtype_policy as set_global_policy, 16 | ) 17 | from keras.src.optimizers.loss_scale_optimizer import ( 18 | LossScaleOptimizer as LossScaleOptimizer, 19 | ) 20 | -------------------------------------------------------------------------------- /keras/api/models/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.models.cloning import clone_model as clone_model 8 | from keras.src.models.model import Model as Model 9 | from keras.src.models.model import model_from_json as model_from_json 10 | from keras.src.models.sequential import Sequential as Sequential 11 | from keras.src.saving.saving_api import load_model as load_model 12 | from keras.src.saving.saving_api import save_model as save_model 13 | -------------------------------------------------------------------------------- /keras/api/ops/image/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.ops.image import affine_transform as affine_transform 8 | from keras.src.ops.image import crop_images as crop_images 9 | from keras.src.ops.image import elastic_transform as elastic_transform 10 | from keras.src.ops.image import extract_patches as extract_patches 11 | from keras.src.ops.image import gaussian_blur as gaussian_blur 12 | from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb 13 | from keras.src.ops.image import map_coordinates as map_coordinates 14 | from keras.src.ops.image import pad_images as pad_images 15 | from keras.src.ops.image import perspective_transform as perspective_transform 16 | from keras.src.ops.image import resize as resize 17 | from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale 18 | from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv 19 | -------------------------------------------------------------------------------- /keras/api/ops/linalg/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.ops.linalg import cholesky as cholesky 8 | from keras.src.ops.linalg import det as det 9 | from keras.src.ops.linalg import eig as eig 10 | from keras.src.ops.linalg import eigh as eigh 11 | from keras.src.ops.linalg import inv as inv 12 | from keras.src.ops.linalg import lstsq as lstsq 13 | from keras.src.ops.linalg import lu_factor as lu_factor 14 | from keras.src.ops.linalg import norm as norm 15 | from keras.src.ops.linalg import qr as qr 16 | from keras.src.ops.linalg import solve as solve 17 | from keras.src.ops.linalg import solve_triangular as solve_triangular 18 | from keras.src.ops.linalg import svd as svd 19 | -------------------------------------------------------------------------------- /keras/api/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.optimizers import legacy as legacy 8 | from keras.optimizers import schedules as schedules 9 | from keras.src.optimizers import deserialize as deserialize 10 | from keras.src.optimizers import get as get 11 | from keras.src.optimizers import serialize as serialize 12 | from keras.src.optimizers.adadelta import Adadelta as Adadelta 13 | from keras.src.optimizers.adafactor import Adafactor as Adafactor 14 | from keras.src.optimizers.adagrad import Adagrad as Adagrad 15 | from keras.src.optimizers.adam import Adam as Adam 16 | from keras.src.optimizers.adamax import Adamax as Adamax 17 | from keras.src.optimizers.adamw import AdamW as AdamW 18 | from keras.src.optimizers.ftrl import Ftrl as Ftrl 19 | from keras.src.optimizers.lamb import Lamb as Lamb 20 | from keras.src.optimizers.lion import Lion as Lion 21 | from keras.src.optimizers.loss_scale_optimizer import ( 22 | LossScaleOptimizer as LossScaleOptimizer, 23 | ) 24 | from keras.src.optimizers.muon import Muon as Muon 25 | from keras.src.optimizers.nadam import Nadam as Nadam 26 | from keras.src.optimizers.optimizer import Optimizer as Optimizer 27 | from keras.src.optimizers.rmsprop import RMSprop as RMSprop 28 | from keras.src.optimizers.sgd import SGD as SGD 29 | -------------------------------------------------------------------------------- /keras/api/optimizers/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.optimizers import LegacyOptimizerWarning as Adagrad 8 | from keras.src.optimizers import LegacyOptimizerWarning as Adam 9 | from keras.src.optimizers import LegacyOptimizerWarning as Ftrl 10 | from keras.src.optimizers import LegacyOptimizerWarning as Optimizer 11 | from keras.src.optimizers import LegacyOptimizerWarning as RMSprop 12 | from keras.src.optimizers import LegacyOptimizerWarning as SGD 13 | -------------------------------------------------------------------------------- /keras/api/optimizers/schedules/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 8 | CosineDecay as CosineDecay, 9 | ) 10 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 11 | CosineDecayRestarts as CosineDecayRestarts, 12 | ) 13 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 14 | ExponentialDecay as ExponentialDecay, 15 | ) 16 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 17 | InverseTimeDecay as InverseTimeDecay, 18 | ) 19 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 20 | LearningRateSchedule as LearningRateSchedule, 21 | ) 22 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 23 | PiecewiseConstantDecay as PiecewiseConstantDecay, 24 | ) 25 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 26 | PolynomialDecay as PolynomialDecay, 27 | ) 28 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 29 | deserialize as deserialize, 30 | ) 31 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 32 | serialize as serialize, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.preprocessing import image as image 8 | from keras.preprocessing import sequence as sequence 9 | from keras.src.utils.image_dataset_utils import ( 10 | image_dataset_from_directory as image_dataset_from_directory, 11 | ) 12 | from keras.src.utils.text_dataset_utils import ( 13 | text_dataset_from_directory as text_dataset_from_directory, 14 | ) 15 | from keras.src.utils.timeseries_dataset_utils import ( 16 | timeseries_dataset_from_array as timeseries_dataset_from_array, 17 | ) 18 | -------------------------------------------------------------------------------- /keras/api/preprocessing/image/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.utils.image_utils import array_to_img as array_to_img 8 | from keras.src.utils.image_utils import img_to_array as img_to_array 9 | from keras.src.utils.image_utils import load_img as load_img 10 | from keras.src.utils.image_utils import save_img as save_img 11 | from keras.src.utils.image_utils import smart_resize as smart_resize 12 | -------------------------------------------------------------------------------- /keras/api/preprocessing/sequence/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.utils.sequence_utils import pad_sequences as pad_sequences 8 | -------------------------------------------------------------------------------- /keras/api/quantizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.quantizers import deserialize as deserialize 8 | from keras.src.quantizers import get as get 9 | from keras.src.quantizers import serialize as serialize 10 | from keras.src.quantizers.quantizers import AbsMaxQuantizer as AbsMaxQuantizer 11 | from keras.src.quantizers.quantizers import Quantizer as Quantizer 12 | from keras.src.quantizers.quantizers import abs_max_quantize as abs_max_quantize 13 | from keras.src.quantizers.quantizers import ( 14 | compute_float8_amax_history as compute_float8_amax_history, 15 | ) 16 | from keras.src.quantizers.quantizers import ( 17 | compute_float8_scale as compute_float8_scale, 18 | ) 19 | from keras.src.quantizers.quantizers import ( 20 | fake_quant_with_min_max_vars as fake_quant_with_min_max_vars, 21 | ) 22 | from keras.src.quantizers.quantizers import ( 23 | quantize_and_dequantize as quantize_and_dequantize, 24 | ) 25 | -------------------------------------------------------------------------------- /keras/api/random/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.random.random import beta as beta 8 | from keras.src.random.random import binomial as binomial 9 | from keras.src.random.random import categorical as categorical 10 | from keras.src.random.random import dropout as dropout 11 | from keras.src.random.random import gamma as gamma 12 | from keras.src.random.random import normal as normal 13 | from keras.src.random.random import randint as randint 14 | from keras.src.random.random import shuffle as shuffle 15 | from keras.src.random.random import truncated_normal as truncated_normal 16 | from keras.src.random.random import uniform as uniform 17 | from keras.src.random.seed_generator import SeedGenerator as SeedGenerator 18 | -------------------------------------------------------------------------------- /keras/api/regularizers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.regularizers import deserialize as deserialize 8 | from keras.src.regularizers import get as get 9 | from keras.src.regularizers import serialize as serialize 10 | from keras.src.regularizers.regularizers import L1 as L1 11 | from keras.src.regularizers.regularizers import L1 as l1 12 | from keras.src.regularizers.regularizers import L1L2 as L1L2 13 | from keras.src.regularizers.regularizers import L1L2 as l1_l2 14 | from keras.src.regularizers.regularizers import L2 as L2 15 | from keras.src.regularizers.regularizers import L2 as l2 16 | from keras.src.regularizers.regularizers import ( 17 | OrthogonalRegularizer as OrthogonalRegularizer, 18 | ) 19 | from keras.src.regularizers.regularizers import ( 20 | OrthogonalRegularizer as orthogonal_regularizer, 21 | ) 22 | from keras.src.regularizers.regularizers import Regularizer as Regularizer 23 | -------------------------------------------------------------------------------- /keras/api/saving/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor 8 | from keras.src.saving.object_registration import ( 9 | CustomObjectScope as CustomObjectScope, 10 | ) 11 | from keras.src.saving.object_registration import ( 12 | CustomObjectScope as custom_object_scope, 13 | ) 14 | from keras.src.saving.object_registration import ( 15 | get_custom_objects as get_custom_objects, 16 | ) 17 | from keras.src.saving.object_registration import ( 18 | get_registered_name as get_registered_name, 19 | ) 20 | from keras.src.saving.object_registration import ( 21 | get_registered_object as get_registered_object, 22 | ) 23 | from keras.src.saving.object_registration import ( 24 | register_keras_serializable as register_keras_serializable, 25 | ) 26 | from keras.src.saving.saving_api import load_model as load_model 27 | from keras.src.saving.saving_api import load_weights as load_weights 28 | from keras.src.saving.saving_api import save_model as save_model 29 | from keras.src.saving.saving_api import save_weights as save_weights 30 | from keras.src.saving.serialization_lib import ( 31 | deserialize_keras_object as deserialize_keras_object, 32 | ) 33 | from keras.src.saving.serialization_lib import ( 34 | serialize_keras_object as serialize_keras_object, 35 | ) 36 | -------------------------------------------------------------------------------- /keras/api/tree/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE 8 | from keras.src.tree.tree_api import assert_same_paths as assert_same_paths 9 | from keras.src.tree.tree_api import ( 10 | assert_same_structure as assert_same_structure, 11 | ) 12 | from keras.src.tree.tree_api import flatten as flatten 13 | from keras.src.tree.tree_api import flatten_with_path as flatten_with_path 14 | from keras.src.tree.tree_api import is_nested as is_nested 15 | from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples 16 | from keras.src.tree.tree_api import map_shape_structure as map_shape_structure 17 | from keras.src.tree.tree_api import map_structure as map_structure 18 | from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to 19 | from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as 20 | from keras.src.tree.tree_api import traverse as traverse 21 | -------------------------------------------------------------------------------- /keras/api/utils/bounding_boxes/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 8 | affine_transform as affine_transform, 9 | ) 10 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 11 | clip_to_image_size as clip_to_image_size, 12 | ) 13 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 14 | convert_format as convert_format, 15 | ) 16 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 17 | crop as crop, 18 | ) 19 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 20 | decode_deltas_to_boxes as decode_deltas_to_boxes, 21 | ) 22 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 23 | encode_box_to_deltas as encode_box_to_deltas, 24 | ) 25 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( 26 | pad as pad, 27 | ) 28 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import ( 29 | compute_ciou as compute_ciou, 30 | ) 31 | from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.iou import ( 32 | compute_iou as compute_iou, 33 | ) 34 | -------------------------------------------------------------------------------- /keras/api/utils/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.legacy.saving.serialization import ( 8 | deserialize_keras_object as deserialize_keras_object, 9 | ) 10 | from keras.src.legacy.saving.serialization import ( 11 | serialize_keras_object as serialize_keras_object, 12 | ) 13 | -------------------------------------------------------------------------------- /keras/api/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.visualization.draw_bounding_boxes import ( 8 | draw_bounding_boxes as draw_bounding_boxes, 9 | ) 10 | from keras.src.visualization.draw_segmentation_masks import ( 11 | draw_segmentation_masks as draw_segmentation_masks, 12 | ) 13 | from keras.src.visualization.plot_bounding_box_gallery import ( 14 | plot_bounding_box_gallery as plot_bounding_box_gallery, 15 | ) 16 | from keras.src.visualization.plot_image_gallery import ( 17 | plot_image_gallery as plot_image_gallery, 18 | ) 19 | from keras.src.visualization.plot_segmentation_mask_gallery import ( 20 | plot_segmentation_mask_gallery as plot_segmentation_mask_gallery, 21 | ) 22 | -------------------------------------------------------------------------------- /keras/api/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | """DO NOT EDIT. 2 | 3 | This file was autogenerated. Do not edit it by hand, 4 | since your modifications would be overwritten. 5 | """ 6 | 7 | from keras.src.wrappers.sklearn_wrapper import ( 8 | SKLearnClassifier as SKLearnClassifier, 9 | ) 10 | from keras.src.wrappers.sklearn_wrapper import ( 11 | SKLearnRegressor as SKLearnRegressor, 12 | ) 13 | from keras.src.wrappers.sklearn_wrapper import ( 14 | SKLearnTransformer as SKLearnTransformer, 15 | ) 16 | -------------------------------------------------------------------------------- /keras/src/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src import activations 2 | from keras.src import applications 3 | from keras.src import backend 4 | from keras.src import constraints 5 | from keras.src import datasets 6 | from keras.src import initializers 7 | from keras.src import layers 8 | from keras.src import models 9 | from keras.src import ops 10 | from keras.src import optimizers 11 | from keras.src import regularizers 12 | from keras.src import utils 13 | from keras.src import visualization 14 | from keras.src.backend import KerasTensor 15 | from keras.src.layers import Input 16 | from keras.src.layers import Layer 17 | from keras.src.models import Functional 18 | from keras.src.models import Model 19 | from keras.src.models import Sequential 20 | from keras.src.version import __version__ 21 | -------------------------------------------------------------------------------- /keras/src/api_export.py: -------------------------------------------------------------------------------- 1 | try: 2 | import namex 3 | except ImportError: 4 | namex = None 5 | 6 | 7 | # These dicts reference "canonical names" only 8 | # (i.e. the first name an object was registered with). 9 | REGISTERED_NAMES_TO_OBJS = {} 10 | REGISTERED_OBJS_TO_NAMES = {} 11 | 12 | 13 | def register_internal_serializable(path, symbol): 14 | global REGISTERED_NAMES_TO_OBJS 15 | if isinstance(path, (list, tuple)): 16 | name = path[0] 17 | else: 18 | name = path 19 | REGISTERED_NAMES_TO_OBJS[name] = symbol 20 | REGISTERED_OBJS_TO_NAMES[symbol] = name 21 | 22 | 23 | def get_symbol_from_name(name): 24 | return REGISTERED_NAMES_TO_OBJS.get(name, None) 25 | 26 | 27 | def get_name_from_symbol(symbol): 28 | return REGISTERED_OBJS_TO_NAMES.get(symbol, None) 29 | 30 | 31 | if namex: 32 | 33 | class keras_export(namex.export): 34 | def __init__(self, path): 35 | super().__init__(package="keras", path=path) 36 | 37 | def __call__(self, symbol): 38 | register_internal_serializable(self.path, symbol) 39 | return super().__call__(symbol) 40 | 41 | else: 42 | 43 | class keras_export: 44 | def __init__(self, path): 45 | self.path = path 46 | 47 | def __call__(self, symbol): 48 | register_internal_serializable(self.path, symbol) 49 | return symbol 50 | -------------------------------------------------------------------------------- /keras/src/applications/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/applications/__init__.py -------------------------------------------------------------------------------- /keras/src/backend/common/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.common import backend_utils 2 | from keras.src.backend.common.dtypes import result_type 3 | from keras.src.backend.common.variables import AutocastScope 4 | from keras.src.backend.common.variables import Variable as KerasVariable 5 | from keras.src.backend.common.variables import get_autocast_scope 6 | from keras.src.backend.common.variables import is_float_dtype 7 | from keras.src.backend.common.variables import is_int_dtype 8 | from keras.src.backend.common.variables import standardize_dtype 9 | from keras.src.backend.common.variables import standardize_shape 10 | from keras.src.random import random 11 | -------------------------------------------------------------------------------- /keras/src/backend/common/global_state_test.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.common import global_state 2 | from keras.src.testing import test_case 3 | from keras.src.utils.naming import auto_name 4 | 5 | 6 | class GlobalStateTest(test_case.TestCase): 7 | def test_clear_session(self): 8 | name0 = auto_name("somename") 9 | self.assertEqual(name0, "somename") 10 | name1 = auto_name("somename") 11 | self.assertEqual(name1, "somename_1") 12 | global_state.clear_session() 13 | name0 = auto_name("somename") 14 | self.assertEqual(name0, "somename") 15 | -------------------------------------------------------------------------------- /keras/src/backend/common/masking.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.common.tensor_attributes import get_tensor_attr 2 | from keras.src.backend.common.tensor_attributes import set_tensor_attr 3 | 4 | 5 | def set_keras_mask(x, mask): 6 | """Sets the Keras mask attribute for the given tensor in-place. 7 | 8 | Args: 9 | x: Input tensor. 10 | mask: The mask tensor to be set. If `None`, the `_keras_mask` attribute 11 | will be cleared. 12 | """ 13 | set_tensor_attr(x, "_keras_mask", mask) 14 | 15 | 16 | def get_keras_mask(x): 17 | """Gets the Keras mask attribute from the given tensor. 18 | 19 | Args: 20 | x: Input tensor. 21 | 22 | Returns: 23 | The mask tensor associated with the input tensor, or `None` if no mask 24 | has been set. 25 | """ 26 | return get_tensor_attr(x, "_keras_mask") 27 | -------------------------------------------------------------------------------- /keras/src/backend/common/masking_test.py: -------------------------------------------------------------------------------- 1 | from keras.src import backend 2 | from keras.src import ops 3 | from keras.src import testing 4 | from keras.src.backend.common.masking import get_keras_mask 5 | from keras.src.backend.common.masking import set_keras_mask 6 | 7 | 8 | class MaskingTest(testing.TestCase): 9 | def test_mask_on_eager_tensor(self): 10 | x = ops.zeros((2, 3)) 11 | self.assertIsNone(get_keras_mask(x)) 12 | 13 | set_keras_mask(x, None) 14 | self.assertIsNone(get_keras_mask(x)) 15 | 16 | mask = ops.ones((2, 3)) 17 | set_keras_mask(x, mask) 18 | self.assertIs(get_keras_mask(x), mask) 19 | 20 | set_keras_mask(x, None) 21 | self.assertIsNone(get_keras_mask(x)) 22 | 23 | set_keras_mask(x, None) 24 | self.assertIsNone(get_keras_mask(x)) 25 | 26 | def test_mask_on_tracer_tensor(self): 27 | def fn(x): 28 | self.assertIsNone(get_keras_mask(x)) 29 | 30 | set_keras_mask(x, None) 31 | self.assertIsNone(get_keras_mask(x)) 32 | 33 | mask = ops.ones((2, 3)) 34 | set_keras_mask(x, mask) 35 | self.assertIs(get_keras_mask(x), mask) 36 | 37 | set_keras_mask(x, None) 38 | self.assertIsNone(get_keras_mask(x)) 39 | 40 | set_keras_mask(x, None) # key is now deleted, should be a no-op 41 | self.assertIsNone(get_keras_mask(x)) 42 | 43 | backend.compute_output_spec(fn, backend.KerasTensor((2, 3))) 44 | -------------------------------------------------------------------------------- /keras/src/backend/common/symbolic_scope.py: -------------------------------------------------------------------------------- 1 | from keras.src.api_export import keras_export 2 | from keras.src.backend.common import global_state 3 | 4 | 5 | @keras_export("keras.SymbolicScope") 6 | class SymbolicScope: 7 | """Scope to indicate the symbolic stage.""" 8 | 9 | def __enter__(self): 10 | self.original_scope = get_symbolic_scope() 11 | global_state.set_global_attribute("symbolic_scope", self) 12 | return self 13 | 14 | def __exit__(self, *args, **kwargs): 15 | global_state.set_global_attribute("symbolic_scope", self.original_scope) 16 | 17 | 18 | def in_symbolic_scope(): 19 | return global_state.get_global_attribute("symbolic_scope") is not None 20 | 21 | 22 | def get_symbolic_scope(): 23 | return global_state.get_global_attribute("symbolic_scope") 24 | -------------------------------------------------------------------------------- /keras/src/backend/common/symbolic_scope_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import ops 4 | from keras.src import testing 5 | from keras.src.backend.common.symbolic_scope import SymbolicScope 6 | from keras.src.backend.common.symbolic_scope import in_symbolic_scope 7 | 8 | 9 | class TestSymbolicScope(testing.TestCase): 10 | def test_basic_flow(self): 11 | # Define a function that behaves differently according to 12 | # `in_symbolic_scope`. 13 | def compute_loss(y, y_pred): 14 | if in_symbolic_scope(): 15 | return ops.zeros_like(y) 16 | return ops.add(y, y_pred) 17 | 18 | y = ops.ones(shape=(2,)) 19 | y_pred = ops.ones(shape=(2,)) 20 | with SymbolicScope(): 21 | loss = compute_loss(y, y_pred) 22 | self.assertAllClose(loss, np.zeros((2,))) 23 | 24 | loss = compute_loss(y, y_pred) 25 | self.assertAllClose(loss, 2 * np.ones((2,))) 26 | -------------------------------------------------------------------------------- /keras/src/backend/common/tensor_attributes.py: -------------------------------------------------------------------------------- 1 | import weakref 2 | 3 | from keras.src.backend.common import global_state 4 | 5 | 6 | def _clear_tensor_attr(tensor_id, attr): 7 | attr_dict = global_state.get_global_attribute(f"{attr}_dict") 8 | if attr_dict is not None and tensor_id in attr_dict: 9 | del attr_dict[tensor_id] 10 | 11 | 12 | def set_tensor_attr(tensor, attr, value): 13 | try: 14 | setattr(tensor, attr, value) 15 | except AttributeError: 16 | attr_dict = global_state.get_global_attribute(f"{attr}_dict") 17 | if attr_dict is None: 18 | if value is None: 19 | return 20 | attr_dict = {} 21 | global_state.set_global_attribute(f"{attr}_dict", attr_dict) 22 | if value is not None: 23 | attr_dict[id(tensor)] = value 24 | weakref.finalize(tensor, _clear_tensor_attr, id(tensor), attr) 25 | elif id(tensor) in attr_dict: 26 | del attr_dict[id(tensor)] 27 | 28 | 29 | def get_tensor_attr(tensor, attr): 30 | if not hasattr(tensor, attr): 31 | attr_dict = global_state.get_global_attribute(f"{attr}_dict") 32 | if attr_dict is not None: 33 | return attr_dict.get(id(tensor), None) 34 | else: 35 | return None 36 | return getattr(tensor, attr, None) 37 | -------------------------------------------------------------------------------- /keras/src/backend/common/thread_safe_test.py: -------------------------------------------------------------------------------- 1 | import concurrent 2 | 3 | import numpy as np 4 | 5 | from keras.src import backend 6 | from keras.src import ops 7 | from keras.src import testing 8 | 9 | 10 | class TestThreadSafe(testing.TestCase): 11 | def test_is_thread_safe(self): 12 | if backend.IS_THREAD_SAFE: 13 | executor = concurrent.futures.ThreadPoolExecutor() 14 | 15 | def sum(x, axis): 16 | return ops.sum(x, axis=axis) 17 | 18 | futures = [] 19 | 20 | for i in range(10000): 21 | futures.clear() 22 | x = ops.convert_to_tensor(np.random.rand(100, 100)) 23 | futures.append(executor.submit(sum, x, 1)) 24 | x = ops.convert_to_tensor(np.random.rand(100)) 25 | futures.append(executor.submit(sum, x, 0)) 26 | concurrent.futures.wait( 27 | futures, return_when=concurrent.futures.ALL_COMPLETED 28 | ) 29 | [future.result() for future in futures] 30 | -------------------------------------------------------------------------------- /keras/src/backend/jax/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.jax import core 2 | from keras.src.backend.jax import distribution_lib 3 | from keras.src.backend.jax import image 4 | from keras.src.backend.jax import linalg 5 | from keras.src.backend.jax import math 6 | from keras.src.backend.jax import nn 7 | from keras.src.backend.jax import numpy 8 | from keras.src.backend.jax import random 9 | from keras.src.backend.jax import tensorboard 10 | from keras.src.backend.jax.core import IS_THREAD_SAFE 11 | from keras.src.backend.jax.core import SUPPORTS_RAGGED_TENSORS 12 | from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS 13 | from keras.src.backend.jax.core import Variable 14 | from keras.src.backend.jax.core import cast 15 | from keras.src.backend.jax.core import compute_output_spec 16 | from keras.src.backend.jax.core import cond 17 | from keras.src.backend.jax.core import convert_to_numpy 18 | from keras.src.backend.jax.core import convert_to_tensor 19 | from keras.src.backend.jax.core import device_scope 20 | from keras.src.backend.jax.core import is_tensor 21 | from keras.src.backend.jax.core import name_scope 22 | from keras.src.backend.jax.core import random_seed_dtype 23 | from keras.src.backend.jax.core import scatter 24 | from keras.src.backend.jax.core import shape 25 | from keras.src.backend.jax.core import stop_gradient 26 | from keras.src.backend.jax.core import vectorized_map 27 | from keras.src.backend.jax.rnn import cudnn_ok 28 | from keras.src.backend.jax.rnn import gru 29 | from keras.src.backend.jax.rnn import lstm 30 | from keras.src.backend.jax.rnn import rnn 31 | -------------------------------------------------------------------------------- /keras/src/backend/jax/layer.py: -------------------------------------------------------------------------------- 1 | class JaxLayer: 2 | pass 3 | -------------------------------------------------------------------------------- /keras/src/backend/jax/tensorboard.py: -------------------------------------------------------------------------------- 1 | from keras.src.utils.module_utils import jax 2 | 3 | 4 | def start_trace(logdir): 5 | if logdir: 6 | jax.profiler.start_trace(logdir) 7 | 8 | 9 | def stop_trace(save): 10 | if save: 11 | jax.profiler.stop_trace() 12 | 13 | 14 | def start_batch_trace(batch): 15 | batch_trace_context = jax.profiler.TraceAnnotation( 16 | f"Profiled batch {batch}" 17 | ) 18 | batch_trace_context.__enter__() 19 | return batch_trace_context 20 | 21 | 22 | def stop_batch_trace(batch_trace_context): 23 | batch_trace_context.__exit__(None, None, None) 24 | -------------------------------------------------------------------------------- /keras/src/backend/numpy/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.common.name_scope import name_scope 2 | from keras.src.backend.numpy import core 3 | from keras.src.backend.numpy import image 4 | from keras.src.backend.numpy import linalg 5 | from keras.src.backend.numpy import math 6 | from keras.src.backend.numpy import nn 7 | from keras.src.backend.numpy import numpy 8 | from keras.src.backend.numpy import random 9 | from keras.src.backend.numpy.core import IS_THREAD_SAFE 10 | from keras.src.backend.numpy.core import SUPPORTS_RAGGED_TENSORS 11 | from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS 12 | from keras.src.backend.numpy.core import Variable 13 | from keras.src.backend.numpy.core import cast 14 | from keras.src.backend.numpy.core import compute_output_spec 15 | from keras.src.backend.numpy.core import cond 16 | from keras.src.backend.numpy.core import convert_to_numpy 17 | from keras.src.backend.numpy.core import convert_to_tensor 18 | from keras.src.backend.numpy.core import device_scope 19 | from keras.src.backend.numpy.core import is_tensor 20 | from keras.src.backend.numpy.core import random_seed_dtype 21 | from keras.src.backend.numpy.core import shape 22 | from keras.src.backend.numpy.core import vectorized_map 23 | from keras.src.backend.numpy.rnn import cudnn_ok 24 | from keras.src.backend.numpy.rnn import gru 25 | from keras.src.backend.numpy.rnn import lstm 26 | from keras.src.backend.numpy.rnn import rnn 27 | -------------------------------------------------------------------------------- /keras/src/backend/numpy/export.py: -------------------------------------------------------------------------------- 1 | class NumpyExportArchive: 2 | def track(self, resource): 3 | raise NotImplementedError( 4 | "`track` is not implemented in the numpy backend." 5 | ) 6 | 7 | def add_endpoint(self, name, fn, input_signature=None, **kwargs): 8 | raise NotImplementedError( 9 | "`add_endpoint` is not implemented in the numpy backend." 10 | ) 11 | -------------------------------------------------------------------------------- /keras/src/backend/numpy/layer.py: -------------------------------------------------------------------------------- 1 | class NumpyLayer: 2 | pass 3 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.common.name_scope import name_scope 2 | from keras.src.backend.openvino import core 3 | from keras.src.backend.openvino import image 4 | from keras.src.backend.openvino import linalg 5 | from keras.src.backend.openvino import math 6 | from keras.src.backend.openvino import nn 7 | from keras.src.backend.openvino import numpy 8 | from keras.src.backend.openvino import random 9 | from keras.src.backend.openvino.core import IS_THREAD_SAFE 10 | from keras.src.backend.openvino.core import SUPPORTS_RAGGED_TENSORS 11 | from keras.src.backend.openvino.core import SUPPORTS_SPARSE_TENSORS 12 | from keras.src.backend.openvino.core import Variable 13 | from keras.src.backend.openvino.core import cast 14 | from keras.src.backend.openvino.core import compute_output_spec 15 | from keras.src.backend.openvino.core import cond 16 | from keras.src.backend.openvino.core import convert_to_numpy 17 | from keras.src.backend.openvino.core import convert_to_tensor 18 | from keras.src.backend.openvino.core import is_tensor 19 | from keras.src.backend.openvino.core import random_seed_dtype 20 | from keras.src.backend.openvino.core import shape 21 | from keras.src.backend.openvino.core import vectorized_map 22 | from keras.src.backend.openvino.rnn import cudnn_ok 23 | from keras.src.backend.openvino.rnn import gru 24 | from keras.src.backend.openvino.rnn import lstm 25 | from keras.src.backend.openvino.rnn import rnn 26 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/excluded_tests.txt: -------------------------------------------------------------------------------- 1 | keras/src/activations 2 | keras/src/backend/common/dtypes_test.py 3 | keras/src/backend/common/variables_test.py 4 | keras/src/callbacks/early_stopping_test.py 5 | keras/src/dtype_policies/dtype_policy_map_test.py 6 | keras/src/layers/attention 7 | keras/src/layers/convolutional/conv_transpose_test.py 8 | keras/src/layers/convolutional/separable_conv_test.py 9 | keras/src/layers/core/dense_test.py 10 | keras/src/layers/core/einsum_dense_test.py 11 | keras/src/layers/core/embedding_test.py 12 | keras/src/layers/normalization/spectral_normalization_test.py 13 | keras/src/layers/normalization/unit_normalization_test.py 14 | keras/src/layers/pooling/average_pooling_test.py 15 | keras/src/layers/pooling/max_pooling_test.py 16 | keras/src/layers/preprocessing 17 | keras/src/layers/regularization 18 | keras/src/layers/reshaping/reshape_test.py 19 | keras/src/layers/reshaping/up_sampling1d_test.py 20 | keras/src/layers/reshaping/up_sampling2d_test.py 21 | keras/src/layers/reshaping/up_sampling3d_test.py 22 | keras/src/layers/reshaping/zero_padding1d_test.py 23 | keras/src/layers/reshaping/zero_padding2d_test.py 24 | keras/src/layers/reshaping/zero_padding3d_test.py 25 | keras/src/layers/layer_test.py 26 | keras/src/layers/rnn 27 | keras/src/legacy 28 | keras/src/losses 29 | keras/src/metrics 30 | keras/src/models 31 | keras/src/ops/core_test.py 32 | keras/src/ops/image_test.py 33 | keras/src/ops/linalg_test.py 34 | keras/src/ops/math_test.py 35 | keras/src/ops/nn_test.py 36 | keras/src/optimizers 37 | keras/src/quantizers 38 | keras/src/random 39 | keras/src/regularizers 40 | keras/src/saving 41 | keras/src/trainers 42 | keras/src/utils -------------------------------------------------------------------------------- /keras/src/backend/openvino/export.py: -------------------------------------------------------------------------------- 1 | class OpenvinoExportArchive: 2 | def track(self, resource): 3 | raise NotImplementedError( 4 | "`track` is not implemented in the openvino backend." 5 | ) 6 | 7 | def add_endpoint(self, name, fn, input_signature=None, **kwargs): 8 | raise NotImplementedError( 9 | "`add_endpoint` is not implemented in the openvino backend." 10 | ) 11 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/image.py: -------------------------------------------------------------------------------- 1 | def rgb_to_grayscale(image, data_format="channels_last"): 2 | raise NotImplementedError( 3 | "`rgb_to_grayscale` is not supported with openvino backend" 4 | ) 5 | 6 | 7 | def resize( 8 | image, 9 | size, 10 | interpolation="bilinear", 11 | antialias=False, 12 | crop_to_aspect_ratio=False, 13 | pad_to_aspect_ratio=False, 14 | fill_mode="constant", 15 | fill_value=0.0, 16 | data_format="channels_last", 17 | ): 18 | raise NotImplementedError("`resize` is not supported with openvino backend") 19 | 20 | 21 | def affine_transform( 22 | image, 23 | transform, 24 | interpolation="bilinear", 25 | fill_mode="constant", 26 | fill_value=0, 27 | data_format="channels_last", 28 | ): 29 | raise NotImplementedError( 30 | "`affine_transform` is not supported with openvino backend" 31 | ) 32 | 33 | 34 | def map_coordinates( 35 | input, coordinates, order, fill_mode="constant", fill_value=0.0 36 | ): 37 | raise NotImplementedError( 38 | "`map_coordinates` is not supported with openvino backend" 39 | ) 40 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/layer.py: -------------------------------------------------------------------------------- 1 | class OpenvinoLayer: 2 | pass 3 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/linalg.py: -------------------------------------------------------------------------------- 1 | def cholesky(a): 2 | raise NotImplementedError( 3 | "`cholesky` is not supported with openvino backend" 4 | ) 5 | 6 | 7 | def det(a): 8 | raise NotImplementedError("`det` is not supported with openvino backend") 9 | 10 | 11 | def eig(a): 12 | raise NotImplementedError("`eig` is not supported with openvino backend") 13 | 14 | 15 | def eigh(a): 16 | raise NotImplementedError("`eigh` is not supported with openvino backend") 17 | 18 | 19 | def inv(a): 20 | raise NotImplementedError("`inv` is not supported with openvino backend") 21 | 22 | 23 | def lu_factor(a): 24 | raise NotImplementedError( 25 | "`lu_factor` is not supported with openvino backend" 26 | ) 27 | 28 | 29 | def norm(x, ord=None, axis=None, keepdims=False): 30 | raise NotImplementedError("`norm` is not supported with openvino backend") 31 | 32 | 33 | def qr(x, mode="reduced"): 34 | raise NotImplementedError("`qr` is not supported with openvino backend") 35 | 36 | 37 | def solve(a, b): 38 | raise NotImplementedError("`solve` is not supported with openvino backend") 39 | 40 | 41 | def solve_triangular(a, b, lower=False): 42 | raise NotImplementedError( 43 | "`solve_triangular` is not supported with openvino backend" 44 | ) 45 | 46 | 47 | def svd(x, full_matrices=True, compute_uv=True): 48 | raise NotImplementedError("`svd` is not supported with openvino backend") 49 | 50 | 51 | def lstsq(a, b, rcond=None): 52 | raise NotImplementedError("`lstsq` is not supported with openvino backend") 53 | -------------------------------------------------------------------------------- /keras/src/backend/openvino/rnn.py: -------------------------------------------------------------------------------- 1 | def rnn( 2 | step_function, 3 | inputs, 4 | initial_states, 5 | go_backwards=False, 6 | mask=None, 7 | constants=None, 8 | unroll=False, 9 | input_length=None, 10 | time_major=False, 11 | zero_output_for_mask=False, 12 | return_all_outputs=True, 13 | ): 14 | raise NotImplementedError("`rnn` is not supported with openvino backend") 15 | 16 | 17 | def lstm(*args, **kwargs): 18 | raise NotImplementedError("`lstm` is not supported with openvino backend") 19 | 20 | 21 | def gru(*args, **kwargs): 22 | raise NotImplementedError("`gru` is not supported with openvino backend") 23 | 24 | 25 | def unstack(x, axis=0): 26 | raise NotImplementedError( 27 | "`unstack` is not supported with openvino backend" 28 | ) 29 | 30 | 31 | def numpy_scan(f, init, xs, reverse=False, mask=None): 32 | raise NotImplementedError( 33 | "`numpy_scan` is not supported with openvino backend" 34 | ) 35 | 36 | 37 | def cudnn_ok(*args, **kwargs): 38 | return False 39 | -------------------------------------------------------------------------------- /keras/src/backend/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.tensorflow import core 2 | from keras.src.backend.tensorflow import distribution_lib 3 | from keras.src.backend.tensorflow import image 4 | from keras.src.backend.tensorflow import linalg 5 | from keras.src.backend.tensorflow import math 6 | from keras.src.backend.tensorflow import nn 7 | from keras.src.backend.tensorflow import numpy 8 | from keras.src.backend.tensorflow import random 9 | from keras.src.backend.tensorflow import tensorboard 10 | from keras.src.backend.tensorflow.core import IS_THREAD_SAFE 11 | from keras.src.backend.tensorflow.core import SUPPORTS_RAGGED_TENSORS 12 | from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS 13 | from keras.src.backend.tensorflow.core import Variable 14 | from keras.src.backend.tensorflow.core import cast 15 | from keras.src.backend.tensorflow.core import compute_output_spec 16 | from keras.src.backend.tensorflow.core import cond 17 | from keras.src.backend.tensorflow.core import convert_to_numpy 18 | from keras.src.backend.tensorflow.core import convert_to_tensor 19 | from keras.src.backend.tensorflow.core import device_scope 20 | from keras.src.backend.tensorflow.core import is_tensor 21 | from keras.src.backend.tensorflow.core import name_scope 22 | from keras.src.backend.tensorflow.core import random_seed_dtype 23 | from keras.src.backend.tensorflow.core import scatter 24 | from keras.src.backend.tensorflow.core import shape 25 | from keras.src.backend.tensorflow.core import stop_gradient 26 | from keras.src.backend.tensorflow.core import vectorized_map 27 | from keras.src.backend.tensorflow.rnn import cudnn_ok 28 | from keras.src.backend.tensorflow.rnn import gru 29 | from keras.src.backend.tensorflow.rnn import lstm 30 | from keras.src.backend.tensorflow.rnn import rnn 31 | -------------------------------------------------------------------------------- /keras/src/backend/tensorflow/export.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | class TFExportArchive: 5 | def _track_layer(self, layer): 6 | # Variables in the lists below are actually part of the trackables 7 | # that get saved, because the lists are created in __init__. 8 | variables = layer.variables 9 | trainable_variables = layer.trainable_variables 10 | non_trainable_variables = layer.non_trainable_variables 11 | self._tf_trackable.variables += variables 12 | self._tf_trackable.trainable_variables += trainable_variables 13 | self._tf_trackable.non_trainable_variables += non_trainable_variables 14 | 15 | def add_endpoint(self, name, fn, input_signature=None, **kwargs): 16 | decorated_fn = tf.function( 17 | fn, input_signature=input_signature, autograph=False 18 | ) 19 | return decorated_fn 20 | -------------------------------------------------------------------------------- /keras/src/backend/tensorflow/name_scope_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from keras.src.backend.tensorflow.core import name_scope 4 | from keras.src.testing import TestCase 5 | 6 | 7 | class TFNameScopeTest(TestCase): 8 | def test_stacking(self): 9 | self.assertEqual(tf.Variable(0, name="x").name, "x:0") 10 | with name_scope("outer") as outer: 11 | self.assertEqual(outer.name, "outer") 12 | self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0") 13 | with name_scope("middle") as middle: 14 | self.assertEqual(middle.name, "middle") 15 | self.assertEqual( 16 | tf.Variable(0, name="x").name, "outer/middle/x:0" 17 | ) 18 | with name_scope("inner") as inner: 19 | self.assertEqual(inner.name, "inner") 20 | self.assertEqual( 21 | tf.Variable(0, name="x").name, "outer/middle/inner/x:0" 22 | ) 23 | self.assertEqual( 24 | tf.Variable(0, name="x").name, "outer/middle/x:0" 25 | ) 26 | self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0") 27 | self.assertEqual(tf.Variable(0, name="x").name, "x:0") 28 | 29 | def test_deduplicate(self): 30 | self.assertEqual(tf.Variable(0, name="x").name, "x:0") 31 | with name_scope("name", caller=1): 32 | with name_scope("name", caller=1): 33 | self.assertEqual(tf.Variable(0, name="x").name, "name/x:0") 34 | self.assertEqual(tf.Variable(0, name="x").name, "x:0") 35 | with name_scope("name"): 36 | with name_scope("name"): 37 | self.assertEqual(tf.Variable(0, name="x").name, "name/name/x:0") 38 | -------------------------------------------------------------------------------- /keras/src/backend/tensorflow/tensorboard.py: -------------------------------------------------------------------------------- 1 | from keras.src.utils.module_utils import tensorflow as tf 2 | 3 | 4 | def start_trace(logdir): 5 | tf.profiler.experimental.start(logdir=logdir) 6 | 7 | 8 | def stop_trace(save): 9 | tf.profiler.experimental.stop(save=save) 10 | 11 | 12 | def start_batch_trace(batch): 13 | batch_trace_context = tf.profiler.experimental.Trace( 14 | "Profiled batch", step_num=batch 15 | ) 16 | batch_trace_context.__enter__() 17 | return batch_trace_context 18 | 19 | 20 | def stop_batch_trace(batch_trace_context): 21 | batch_trace_context.__exit__(None, None, None) 22 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.backend.torch.optimizers.torch_optimizer import TorchOptimizer 2 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_adagrad.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src import ops 4 | from keras.src import optimizers 5 | from keras.src.backend.torch.optimizers import torch_parallel_optimizer 6 | 7 | 8 | class Adagrad( 9 | torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adagrad 10 | ): 11 | def _parallel_update_step( 12 | self, 13 | grads, 14 | variables, 15 | learning_rate, 16 | ): 17 | keras_variables = variables 18 | variables = [v.value for v in variables] 19 | 20 | dtype = variables[0].dtype 21 | lr = ops.cast(learning_rate, dtype) 22 | 23 | accumulators = [ 24 | self._accumulators[self._get_variable_index(variable)].value 25 | for variable in keras_variables 26 | ] 27 | torch._foreach_add_(accumulators, torch._foreach_mul(grads, grads)) 28 | torch._foreach_add_( 29 | variables, 30 | torch._foreach_div( 31 | torch._foreach_mul(grads, lr), 32 | torch._foreach_sqrt( 33 | torch._foreach_add(accumulators, self.epsilon) 34 | ), 35 | ), 36 | alpha=-1, 37 | ) 38 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_adamax.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src import ops 4 | from keras.src import optimizers 5 | from keras.src.backend.torch.optimizers import torch_parallel_optimizer 6 | 7 | 8 | class Adamax( 9 | torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adamax 10 | ): 11 | def _parallel_update_step( 12 | self, 13 | grads, 14 | variables, 15 | learning_rate, 16 | ): 17 | keras_variables = variables 18 | variables = [v.value for v in variables] 19 | 20 | dtype = variables[0].dtype 21 | lr = ops.cast(learning_rate, dtype) 22 | 23 | local_step = ops.cast(self.iterations + 1, dtype) 24 | 25 | beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step) 26 | 27 | m_list = [ 28 | self._m[self._get_variable_index(variable)].value 29 | for variable in keras_variables 30 | ] 31 | u_list = [ 32 | self._u[self._get_variable_index(variable)].value 33 | for variable in keras_variables 34 | ] 35 | 36 | torch._foreach_mul_(m_list, self.beta_1) 37 | torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1) 38 | 39 | torch._foreach_mul_(u_list, self.beta_2) 40 | torch._foreach_maximum_(u_list, torch._foreach_abs(grads)) 41 | 42 | torch._foreach_add_( 43 | variables, 44 | torch._foreach_div( 45 | torch._foreach_mul(m_list, lr), 46 | torch._foreach_mul( 47 | torch._foreach_add(u_list, self.epsilon), 48 | 1 - beta_1_power, 49 | ), 50 | ), 51 | alpha=-1, 52 | ) 53 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_adamw.py: -------------------------------------------------------------------------------- 1 | from keras.src import optimizers 2 | from keras.src.backend.torch.optimizers import torch_adam 3 | 4 | 5 | class AdamW(torch_adam.Adam, optimizers.AdamW): 6 | pass 7 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_lion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src import ops 4 | from keras.src import optimizers 5 | from keras.src.backend.torch.optimizers import torch_parallel_optimizer 6 | 7 | 8 | class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion): 9 | def _parallel_update_step( 10 | self, 11 | grads, 12 | variables, 13 | learning_rate, 14 | ): 15 | keras_variables = variables 16 | variables = [v.value for v in variables] 17 | 18 | dtype = variables[0].dtype 19 | lr = ops.cast(learning_rate, dtype) 20 | 21 | m_list = [ 22 | self._momentums[self._get_variable_index(variable)].value 23 | for variable in keras_variables 24 | ] 25 | 26 | c_t = torch._foreach_mul(m_list, self.beta_1) 27 | torch._foreach_add_(c_t, grads, alpha=1 - self.beta_1) 28 | c_t = [c.sign() for c in c_t] 29 | 30 | torch._foreach_add_( 31 | variables, 32 | torch._foreach_mul(c_t, lr), 33 | alpha=-1, 34 | ) 35 | 36 | torch._foreach_mul_(m_list, self.beta_2) 37 | torch._foreach_add_(m_list, grads, alpha=1 - self.beta_2) 38 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_parallel_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src.optimizers.base_optimizer import BaseOptimizer 4 | from keras.src.utils import torch_utils 5 | 6 | 7 | class TorchParallelOptimizer(BaseOptimizer): 8 | @torch_utils.no_grad 9 | def _backend_update_step(self, grads, trainable_variables, learning_rate): 10 | self._parallel_update_step( 11 | grads, 12 | trainable_variables, 13 | learning_rate, 14 | ) 15 | 16 | @torch_utils.no_grad 17 | def _backend_reset_gradient_accumulators(self): 18 | acc_list = [ 19 | v.value for v in self._accumulated_gradients if v is not None 20 | ] 21 | torch._foreach_mul_(acc_list, 0.0) 22 | 23 | @torch_utils.no_grad 24 | def _backend_increment_gradient_accumulators(self, grads, acc_grads): 25 | acc_list = [v.value for v in acc_grads] 26 | torch._foreach_add_(acc_list, grads, alpha=1.0) 27 | -------------------------------------------------------------------------------- /keras/src/backend/torch/optimizers/torch_sgd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from keras.src import optimizers 4 | from keras.src.backend.torch.optimizers import torch_parallel_optimizer 5 | 6 | 7 | class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD): 8 | def _parallel_update_step( 9 | self, 10 | grads, 11 | variables, 12 | learning_rate, 13 | ): 14 | keras_variables = variables 15 | variables = [v.value for v in variables] 16 | if self.momentum != 0: 17 | bufs = [ 18 | self.momentums[self._get_variable_index(variable)].value 19 | for variable in keras_variables 20 | ] 21 | 22 | for i in range(len(bufs)): 23 | if bufs[i] is None: 24 | bufs[i] = torch.clone(grads[i]).detach() 25 | 26 | torch._foreach_mul_(bufs, self.momentum) 27 | torch._foreach_add_(bufs, grads, alpha=-learning_rate) 28 | 29 | if self.nesterov: 30 | torch._foreach_add_(variables, grads, alpha=-learning_rate) 31 | torch._foreach_add_(variables, bufs, alpha=self.momentum) 32 | else: 33 | torch._foreach_add_(variables, bufs) 34 | 35 | else: 36 | torch._foreach_add_(variables, grads, alpha=-learning_rate) 37 | -------------------------------------------------------------------------------- /keras/src/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.callbacks.backup_and_restore import BackupAndRestore 2 | from keras.src.callbacks.callback import Callback 3 | from keras.src.callbacks.callback_list import CallbackList 4 | from keras.src.callbacks.csv_logger import CSVLogger 5 | from keras.src.callbacks.early_stopping import EarlyStopping 6 | from keras.src.callbacks.history import History 7 | from keras.src.callbacks.lambda_callback import LambdaCallback 8 | from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler 9 | from keras.src.callbacks.model_checkpoint import ModelCheckpoint 10 | from keras.src.callbacks.monitor_callback import MonitorCallback 11 | from keras.src.callbacks.progbar_logger import ProgbarLogger 12 | from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau 13 | from keras.src.callbacks.remote_monitor import RemoteMonitor 14 | from keras.src.callbacks.swap_ema_weights import SwapEMAWeights 15 | from keras.src.callbacks.tensorboard import TensorBoard 16 | from keras.src.callbacks.terminate_on_nan import TerminateOnNaN 17 | -------------------------------------------------------------------------------- /keras/src/callbacks/callback_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import models 5 | from keras.src import testing 6 | from keras.src.callbacks.callback import Callback 7 | 8 | 9 | class CallbackTest(testing.TestCase): 10 | @pytest.mark.requires_trainable_backend 11 | def test_model_state_is_current_on_epoch_end(self): 12 | class TestModel(models.Model): 13 | def __init__(self): 14 | super().__init__() 15 | self.iterations = self.add_variable( 16 | shape=(), initializer="zeros", trainable=False 17 | ) 18 | 19 | def call(self, inputs): 20 | self.iterations.assign(self.iterations + 1) 21 | return inputs 22 | 23 | class CBK(Callback): 24 | def on_batch_end(self, batch, logs): 25 | assert np.int32(self.model.iterations) == batch + 1 26 | 27 | model = TestModel() 28 | model.compile(optimizer="sgd", loss="mse") 29 | x = np.random.random((8, 1)) 30 | y = np.random.random((8, 1)) 31 | model.fit(x, y, callbacks=[CBK()], batch_size=2) 32 | -------------------------------------------------------------------------------- /keras/src/callbacks/history.py: -------------------------------------------------------------------------------- 1 | from keras.src.api_export import keras_export 2 | from keras.src.callbacks.callback import Callback 3 | 4 | 5 | @keras_export("keras.callbacks.History") 6 | class History(Callback): 7 | """Callback that records events into a `History` object. 8 | 9 | This callback is automatically applied to 10 | every Keras model. The `History` object 11 | gets returned by the `fit()` method of models. 12 | 13 | Example: 14 | 15 | >>> model = Sequential([layers.Dense(10)]) 16 | >>> model.compile(SGD(), loss='mse') 17 | >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), 18 | ... epochs=10, verbose=1) 19 | >>> print(history.params) 20 | {'verbose': 1, 'epochs': 10, 'steps': 1} 21 | >>> # check the keys of history object 22 | >>> print(history.history.keys()) 23 | dict_keys(['loss']) 24 | 25 | """ 26 | 27 | def __init__(self): 28 | super().__init__() 29 | self.history = {} 30 | 31 | def on_train_begin(self, logs=None): 32 | self.epoch = [] 33 | 34 | def on_epoch_end(self, epoch, logs=None): 35 | logs = logs or {} 36 | self.epoch.append(epoch) 37 | for k, v in logs.items(): 38 | self.history.setdefault(k, []).append(v) 39 | 40 | # Set the history attribute on the model after the epoch ends. This will 41 | # make sure that the state which is set is the latest one. 42 | self.model.history = self 43 | -------------------------------------------------------------------------------- /keras/src/callbacks/terminate_on_nan.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src.api_export import keras_export 4 | from keras.src.callbacks.callback import Callback 5 | from keras.src.utils import io_utils 6 | 7 | 8 | @keras_export("keras.callbacks.TerminateOnNaN") 9 | class TerminateOnNaN(Callback): 10 | """Callback that terminates training when a NaN loss is encountered.""" 11 | 12 | def on_batch_end(self, batch, logs=None): 13 | logs = logs or {} 14 | loss = logs.get("loss") 15 | if loss is not None: 16 | if np.isnan(loss) or np.isinf(loss): 17 | io_utils.print_msg( 18 | f"Batch {batch}: Invalid loss, terminating training" 19 | ) 20 | self.model.stop_training = True 21 | -------------------------------------------------------------------------------- /keras/src/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """Small NumPy datasets for debugging/testing.""" 2 | 3 | from keras.src.datasets import boston_housing 4 | from keras.src.datasets import california_housing 5 | from keras.src.datasets import cifar10 6 | from keras.src.datasets import cifar100 7 | from keras.src.datasets import fashion_mnist 8 | from keras.src.datasets import imdb 9 | from keras.src.datasets import mnist 10 | from keras.src.datasets import reuters 11 | -------------------------------------------------------------------------------- /keras/src/datasets/cifar.py: -------------------------------------------------------------------------------- 1 | """Utilities common to CIFAR10 and CIFAR100 datasets.""" 2 | 3 | import _pickle as cPickle 4 | 5 | 6 | def load_batch(fpath, label_key="labels"): 7 | """Internal utility for parsing CIFAR data. 8 | 9 | Args: 10 | fpath: path the file to parse. 11 | label_key: key for label data in the retrieve 12 | dictionary. 13 | 14 | Returns: 15 | A tuple `(data, labels)`. 16 | """ 17 | with open(fpath, "rb") as f: 18 | d = cPickle.load(f, encoding="bytes") 19 | # decode utf8 20 | d_decoded = {} 21 | for k, v in d.items(): 22 | d_decoded[k.decode("utf8")] = v 23 | d = d_decoded 24 | data = d["data"] 25 | labels = d[label_key] 26 | 27 | data = data.reshape(data.shape[0], 3, 32, 32) 28 | return data, labels 29 | -------------------------------------------------------------------------------- /keras/src/distribution/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.distribution.distribution_lib import DataParallel 2 | from keras.src.distribution.distribution_lib import DeviceMesh 3 | from keras.src.distribution.distribution_lib import Distribution 4 | from keras.src.distribution.distribution_lib import LayoutMap 5 | from keras.src.distribution.distribution_lib import ModelParallel 6 | from keras.src.distribution.distribution_lib import TensorLayout 7 | from keras.src.distribution.distribution_lib import distribute_tensor 8 | from keras.src.distribution.distribution_lib import distribution 9 | from keras.src.distribution.distribution_lib import initialize 10 | from keras.src.distribution.distribution_lib import list_devices 11 | from keras.src.distribution.distribution_lib import set_distribution 12 | -------------------------------------------------------------------------------- /keras/src/export/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.export.onnx import export_onnx 2 | from keras.src.export.saved_model import ExportArchive 3 | from keras.src.export.saved_model import export_saved_model 4 | from keras.src.export.tfsm_layer import TFSMLayer 5 | -------------------------------------------------------------------------------- /keras/src/layers/activations/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.layers.activations.elu import ELU 2 | from keras.src.layers.activations.leaky_relu import LeakyReLU 3 | from keras.src.layers.activations.prelu import PReLU 4 | from keras.src.layers.activations.relu import ReLU 5 | from keras.src.layers.activations.softmax import Softmax 6 | -------------------------------------------------------------------------------- /keras/src/layers/activations/activation.py: -------------------------------------------------------------------------------- 1 | from keras.src import activations 2 | from keras.src.api_export import keras_export 3 | from keras.src.layers.layer import Layer 4 | 5 | 6 | @keras_export("keras.layers.Activation") 7 | class Activation(Layer): 8 | """Applies an activation function to an output. 9 | 10 | Args: 11 | activation: Activation function. It could be a callable, or the name of 12 | an activation from the `keras.activations` namespace. 13 | **kwargs: Base layer keyword arguments, such as `name` and `dtype`. 14 | 15 | Example: 16 | 17 | >>> layer = keras.layers.Activation('relu') 18 | >>> layer(np.array([-3.0, -1.0, 0.0, 2.0])) 19 | [0.0, 0.0, 0.0, 2.0] 20 | >>> layer = keras.layers.Activation(keras.activations.relu) 21 | >>> layer(np.array([-3.0, -1.0, 0.0, 2.0])) 22 | [0.0, 0.0, 0.0, 2.0] 23 | """ 24 | 25 | def __init__(self, activation, **kwargs): 26 | super().__init__(**kwargs) 27 | self.supports_masking = True 28 | self.activation = activations.get(activation) 29 | 30 | self._build_at_init() 31 | 32 | def call(self, inputs): 33 | return self.activation(inputs) 34 | 35 | def compute_output_shape(self, input_shape): 36 | return input_shape 37 | 38 | def get_config(self): 39 | config = {"activation": activations.serialize(self.activation)} 40 | base_config = super().get_config() 41 | return {**base_config, **config} 42 | -------------------------------------------------------------------------------- /keras/src/layers/activations/activation_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from keras.src import activations 4 | from keras.src import layers 5 | from keras.src import testing 6 | 7 | 8 | class ActivationTest(testing.TestCase): 9 | @pytest.mark.requires_trainable_backend 10 | def test_activation_basics(self): 11 | self.run_layer_test( 12 | layers.Activation, 13 | init_kwargs={ 14 | "activation": "relu", 15 | }, 16 | input_shape=(2, 3), 17 | expected_output_shape=(2, 3), 18 | expected_num_trainable_weights=0, 19 | expected_num_non_trainable_weights=0, 20 | expected_num_seed_generators=0, 21 | expected_num_losses=0, 22 | supports_masking=True, 23 | assert_built_after_instantiation=True, 24 | ) 25 | self.run_layer_test( 26 | layers.Activation, 27 | init_kwargs={ 28 | "activation": activations.gelu, 29 | }, 30 | input_shape=(2, 2), 31 | expected_output_shape=(2, 2), 32 | expected_num_trainable_weights=0, 33 | expected_num_non_trainable_weights=0, 34 | expected_num_seed_generators=0, 35 | expected_num_losses=0, 36 | supports_masking=True, 37 | assert_built_after_instantiation=True, 38 | ) 39 | -------------------------------------------------------------------------------- /keras/src/layers/activations/elu.py: -------------------------------------------------------------------------------- 1 | from keras.src import activations 2 | from keras.src.api_export import keras_export 3 | from keras.src.layers.layer import Layer 4 | 5 | 6 | @keras_export("keras.layers.ELU") 7 | class ELU(Layer): 8 | """Applies an Exponential Linear Unit function to an output. 9 | 10 | Formula: 11 | 12 | ``` 13 | f(x) = alpha * (exp(x) - 1.) for x < 0 14 | f(x) = x for x >= 0 15 | ``` 16 | 17 | Args: 18 | alpha: float, slope of negative section. Defaults to `1.0`. 19 | **kwargs: Base layer keyword arguments, such as `name` and `dtype`. 20 | """ 21 | 22 | def __init__(self, alpha=1.0, **kwargs): 23 | super().__init__(**kwargs) 24 | self.alpha = alpha 25 | self.supports_masking = True 26 | 27 | self._build_at_init() 28 | 29 | def call(self, inputs): 30 | return activations.elu(inputs, alpha=self.alpha) 31 | 32 | def compute_output_shape(self, input_shape): 33 | return input_shape 34 | -------------------------------------------------------------------------------- /keras/src/layers/activations/elu_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import testing 5 | from keras.src.layers.activations import elu 6 | 7 | 8 | class ELUTest(testing.TestCase): 9 | def test_config(self): 10 | elu_layer = elu.ELU() 11 | self.run_class_serialization_test(elu_layer) 12 | 13 | @pytest.mark.requires_trainable_backend 14 | def test_elu(self): 15 | self.run_layer_test( 16 | elu.ELU, 17 | init_kwargs={}, 18 | input_shape=(2, 3, 4), 19 | supports_masking=True, 20 | assert_built_after_instantiation=True, 21 | ) 22 | 23 | def test_correctness(self): 24 | def np_elu(x, alpha=1.0): 25 | return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1) 26 | 27 | x = np.random.random((2, 2, 5)) 28 | elu_layer = elu.ELU() 29 | self.assertAllClose(elu_layer(x), np_elu(x)) 30 | 31 | elu_layer = elu.ELU(alpha=0.7) 32 | self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7)) 33 | -------------------------------------------------------------------------------- /keras/src/layers/activations/leaky_relu_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import testing 5 | from keras.src.layers.activations import leaky_relu 6 | 7 | 8 | class LeakyReLUTest(testing.TestCase): 9 | @pytest.mark.requires_trainable_backend 10 | def test_leaky_relu(self): 11 | self.run_layer_test( 12 | leaky_relu.LeakyReLU, 13 | init_kwargs={ 14 | "negative_slope": 1, 15 | }, 16 | input_shape=(2, 3, 4), 17 | supports_masking=True, 18 | assert_built_after_instantiation=True, 19 | ) 20 | 21 | def test_leaky_relu_correctness(self): 22 | leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5) 23 | input = np.array([-10, -5, 0.0, 5, 10]) 24 | expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0]) 25 | result = leaky_relu_layer(input) 26 | self.assertAllClose(result, expected_output) 27 | 28 | def test_invalid_usage(self): 29 | with self.assertRaisesRegex( 30 | ValueError, 31 | "The negative_slope value of a Leaky ReLU layer cannot be None", 32 | ): 33 | self.run_layer_test( 34 | leaky_relu.LeakyReLU, 35 | init_kwargs={"negative_slope": None}, 36 | input_shape=(2, 3, 4), 37 | supports_masking=True, 38 | ) 39 | -------------------------------------------------------------------------------- /keras/src/layers/activations/prelu_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import testing 5 | from keras.src.layers.activations import prelu 6 | 7 | 8 | class PReLUTest(testing.TestCase): 9 | @pytest.mark.requires_trainable_backend 10 | def test_prelu(self): 11 | self.run_layer_test( 12 | prelu.PReLU, 13 | init_kwargs={ 14 | "alpha_initializer": "zeros", 15 | "alpha_regularizer": "L1", 16 | "alpha_constraint": "MaxNorm", 17 | "shared_axes": 1, 18 | }, 19 | input_shape=(2, 3, 4), 20 | supports_masking=True, 21 | ) 22 | 23 | def test_prelu_correctness(self): 24 | def np_prelu(x, alpha): 25 | return (x > 0) * x + (x <= 0) * alpha * x 26 | 27 | inputs = np.random.randn(2, 10, 5, 3) 28 | prelu_layer = prelu.PReLU( 29 | alpha_initializer="glorot_uniform", 30 | alpha_regularizer="l1", 31 | alpha_constraint="non_neg", 32 | shared_axes=(1, 2), 33 | ) 34 | prelu_layer.build(inputs.shape) 35 | 36 | weights = np.random.random((1, 1, 3)) 37 | prelu_layer.alpha.assign(weights) 38 | ref_out = np_prelu(inputs, weights) 39 | self.assertAllClose(prelu_layer(inputs), ref_out) 40 | -------------------------------------------------------------------------------- /keras/src/layers/attention/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/attention/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/convolutional/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/convolutional/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/core/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/core/identity.py: -------------------------------------------------------------------------------- 1 | from keras.src import tree 2 | from keras.src.api_export import keras_export 3 | from keras.src.backend import KerasTensor 4 | from keras.src.layers.layer import Layer 5 | 6 | 7 | @keras_export("keras.layers.Identity") 8 | class Identity(Layer): 9 | """Identity layer. 10 | 11 | This layer should be used as a placeholder when no operation is to be 12 | performed. The layer just returns its `inputs` argument as output. 13 | """ 14 | 15 | def __init__(self, **kwargs): 16 | super().__init__(**kwargs) 17 | self.supports_masking = True 18 | 19 | self._build_at_init() 20 | 21 | def call(self, inputs): 22 | return inputs 23 | 24 | def compute_output_shape(self, input_shape): 25 | return input_shape 26 | 27 | def compute_output_spec(self, inputs): 28 | return tree.map_structure( 29 | lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse), 30 | inputs, 31 | ) 32 | -------------------------------------------------------------------------------- /keras/src/layers/core/identity_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from absl.testing import parameterized 3 | 4 | from keras.src import backend 5 | from keras.src import layers 6 | from keras.src import testing 7 | 8 | 9 | class IdentityTest(testing.TestCase): 10 | @parameterized.named_parameters( 11 | [ 12 | {"testcase_name": "dense", "sparse": False}, 13 | {"testcase_name": "sparse", "sparse": True}, 14 | ] 15 | ) 16 | @pytest.mark.requires_trainable_backend 17 | def test_identity_basics(self, sparse): 18 | if sparse and not backend.SUPPORTS_SPARSE_TENSORS: 19 | pytest.skip("Backend does not support sparse tensors.") 20 | self.run_layer_test( 21 | layers.Identity, 22 | init_kwargs={}, 23 | input_shape=(2, 3), 24 | input_sparse=sparse, 25 | expected_output_shape=(2, 3), 26 | expected_output_sparse=sparse, 27 | expected_num_trainable_weights=0, 28 | expected_num_non_trainable_weights=0, 29 | expected_num_seed_generators=0, 30 | expected_num_losses=0, 31 | run_training_check=not sparse, 32 | supports_masking=True, 33 | assert_built_after_instantiation=True, 34 | ) 35 | -------------------------------------------------------------------------------- /keras/src/layers/core/wrapper.py: -------------------------------------------------------------------------------- 1 | from keras.src.api_export import keras_export 2 | from keras.src.layers.layer import Layer 3 | from keras.src.saving import serialization_lib 4 | 5 | 6 | @keras_export("keras.layers.Wrapper") 7 | class Wrapper(Layer): 8 | """Abstract wrapper base class. 9 | 10 | Wrappers take another layer and augment it in various ways. 11 | Do not use this class as a layer, it is only an abstract base class. 12 | Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers. 13 | 14 | Args: 15 | layer: The layer to be wrapped. 16 | """ 17 | 18 | def __init__(self, layer, **kwargs): 19 | try: 20 | assert isinstance(layer, Layer) 21 | except Exception: 22 | raise ValueError( 23 | f"Layer {layer} supplied to Wrapper isn't " 24 | "a supported layer type. Please " 25 | "ensure wrapped layer is a valid Keras layer." 26 | ) 27 | super().__init__(**kwargs) 28 | self.layer = layer 29 | 30 | def build(self, input_shape=None): 31 | if not self.layer.built: 32 | self.layer.build(input_shape) 33 | self.layer.built = True 34 | 35 | def get_config(self): 36 | config = {"layer": serialization_lib.serialize_keras_object(self.layer)} 37 | base_config = super().get_config() 38 | return {**base_config, **config} 39 | 40 | @classmethod 41 | def from_config(cls, config, custom_objects=None): 42 | layer = serialization_lib.deserialize_keras_object( 43 | config.pop("layer"), 44 | custom_objects=custom_objects, 45 | ) 46 | return cls(layer, **config) 47 | -------------------------------------------------------------------------------- /keras/src/layers/merging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/merging/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/normalization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/normalization/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/pooling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/pooling/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/pooling/base_global_pooling.py: -------------------------------------------------------------------------------- 1 | from keras.src import backend 2 | from keras.src.layers.input_spec import InputSpec 3 | from keras.src.layers.layer import Layer 4 | 5 | 6 | class BaseGlobalPooling(Layer): 7 | """Base global pooling layer.""" 8 | 9 | def __init__( 10 | self, pool_dimensions, data_format=None, keepdims=False, **kwargs 11 | ): 12 | super().__init__(**kwargs) 13 | 14 | self.data_format = backend.standardize_data_format(data_format) 15 | self.keepdims = keepdims 16 | self.input_spec = InputSpec(ndim=pool_dimensions + 2) 17 | 18 | self._build_at_init() 19 | 20 | def call(self, inputs): 21 | raise NotImplementedError 22 | 23 | def compute_output_shape(self, input_shape): 24 | num_spatial_dims = len(input_shape) - 2 25 | if self.data_format == "channels_last": 26 | if self.keepdims: 27 | return ( 28 | (input_shape[0],) 29 | + (1,) * num_spatial_dims 30 | + (input_shape[-1],) 31 | ) 32 | else: 33 | return (input_shape[0],) + (input_shape[-1],) 34 | else: 35 | if self.keepdims: 36 | return (input_shape[0], input_shape[1]) + ( 37 | 1, 38 | ) * num_spatial_dims 39 | else: 40 | return (input_shape[0], input_shape[1]) 41 | 42 | def get_config(self): 43 | config = super().get_config() 44 | config.update( 45 | { 46 | "data_format": self.data_format, 47 | "keepdims": self.keepdims, 48 | } 49 | ) 50 | return config 51 | -------------------------------------------------------------------------------- /keras/src/layers/preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/preprocessing/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/preprocessing/image_preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/preprocessing/image_preprocessing/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/regularization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/regularization/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/regularization/activity_regularization.py: -------------------------------------------------------------------------------- 1 | from keras.src import regularizers 2 | from keras.src.api_export import keras_export 3 | from keras.src.layers.layer import Layer 4 | 5 | 6 | @keras_export("keras.layers.ActivityRegularization") 7 | class ActivityRegularization(Layer): 8 | """Layer that applies an update to the cost function based input activity. 9 | 10 | Args: 11 | l1: L1 regularization factor (positive float). 12 | l2: L2 regularization factor (positive float). 13 | 14 | Input shape: 15 | Arbitrary. Use the keyword argument `input_shape` 16 | (tuple of integers, does not include the samples axis) 17 | when using this layer as the first layer in a model. 18 | 19 | Output shape: 20 | Same shape as input. 21 | """ 22 | 23 | def __init__(self, l1=0.0, l2=0.0, **kwargs): 24 | super().__init__( 25 | activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs 26 | ) 27 | self.supports_masking = True 28 | self.l1 = l1 29 | self.l2 = l2 30 | 31 | self._build_at_init() 32 | 33 | def call(self, inputs): 34 | return inputs 35 | 36 | def compute_output_shape(self, input_shape): 37 | return input_shape 38 | 39 | def get_config(self): 40 | base_config = super().get_config() 41 | base_config.pop("activity_regularizer", None) 42 | config = {"l1": self.l1, "l2": self.l2} 43 | return {**base_config, **config} 44 | -------------------------------------------------------------------------------- /keras/src/layers/regularization/activity_regularization_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import layers 5 | from keras.src.testing import test_case 6 | 7 | 8 | class ActivityRegularizationTest(test_case.TestCase): 9 | def test_correctness(self): 10 | layer = layers.ActivityRegularization(l1=0.2, l2=0.3) 11 | layer(2 * np.ones((1,))) 12 | self.assertLen(layer.losses, 1) 13 | self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2) 14 | 15 | @pytest.mark.requires_trainable_backend 16 | def test_activity_regularization_basics(self): 17 | self.run_layer_test( 18 | layers.ActivityRegularization, 19 | {"l1": 0.1, "l2": 0.2}, 20 | input_shape=(2, 3), 21 | input_dtype="float32", 22 | expected_output_shape=(2, 3), 23 | expected_num_trainable_weights=0, 24 | expected_num_non_trainable_weights=0, 25 | expected_num_seed_generators=0, 26 | expected_num_losses=1, 27 | supports_masking=True, 28 | assert_built_after_instantiation=True, 29 | ) 30 | -------------------------------------------------------------------------------- /keras/src/layers/regularization/gaussian_dropout_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import backend 5 | from keras.src import layers 6 | from keras.src import testing 7 | 8 | 9 | class GaussianDropoutTest(testing.TestCase): 10 | @pytest.mark.requires_trainable_backend 11 | def test_gaussian_dropout_basics(self): 12 | self.run_layer_test( 13 | layers.GaussianDropout, 14 | init_kwargs={ 15 | "rate": 0.2, 16 | }, 17 | input_shape=(2, 3), 18 | call_kwargs={"training": True}, 19 | expected_output_shape=(2, 3), 20 | expected_num_trainable_weights=0, 21 | expected_num_non_trainable_weights=0, 22 | expected_num_seed_generators=1, 23 | expected_num_losses=0, 24 | supports_masking=True, 25 | assert_built_after_instantiation=True, 26 | ) 27 | 28 | def test_gaussian_dropout_correctness(self): 29 | inputs = np.ones((20, 500)) 30 | layer = layers.GaussianDropout(0.3, seed=1337) 31 | outputs = layer(inputs, training=True) 32 | self.assertAllClose( 33 | np.std(backend.convert_to_numpy(outputs)), 34 | np.sqrt(0.3 / (1 - 0.3)), 35 | atol=0.02, 36 | ) 37 | -------------------------------------------------------------------------------- /keras/src/layers/regularization/gaussian_noise_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import backend 5 | from keras.src import layers 6 | from keras.src import testing 7 | 8 | 9 | class GaussianNoiseTest(testing.TestCase): 10 | @pytest.mark.requires_trainable_backend 11 | def test_gaussian_noise_basics(self): 12 | self.run_layer_test( 13 | layers.GaussianNoise, 14 | init_kwargs={ 15 | "stddev": 0.2, 16 | }, 17 | input_shape=(2, 3), 18 | call_kwargs={"training": True}, 19 | expected_output_shape=(2, 3), 20 | expected_num_trainable_weights=0, 21 | expected_num_non_trainable_weights=0, 22 | expected_num_seed_generators=1, 23 | expected_num_losses=0, 24 | supports_masking=True, 25 | assert_built_after_instantiation=True, 26 | ) 27 | 28 | def test_gaussian_noise_correctness(self): 29 | inputs = np.ones((20, 500)) 30 | layer = layers.GaussianNoise(0.3, seed=1337) 31 | outputs = layer(inputs, training=True) 32 | self.assertAllClose( 33 | np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02 34 | ) 35 | -------------------------------------------------------------------------------- /keras/src/layers/reshaping/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/reshaping/__init__.py -------------------------------------------------------------------------------- /keras/src/layers/reshaping/repeat_vector.py: -------------------------------------------------------------------------------- 1 | from keras.src import ops 2 | from keras.src.api_export import keras_export 3 | from keras.src.layers.input_spec import InputSpec 4 | from keras.src.layers.layer import Layer 5 | 6 | 7 | @keras_export("keras.layers.RepeatVector") 8 | class RepeatVector(Layer): 9 | """Repeats the input n times. 10 | 11 | Example: 12 | 13 | >>> x = keras.Input(shape=(32,)) 14 | >>> y = keras.layers.RepeatVector(3)(x) 15 | >>> y.shape 16 | (None, 3, 32) 17 | 18 | Args: 19 | n: Integer, repetition factor. 20 | 21 | Input shape: 22 | 2D tensor with shape `(batch_size, features)`. 23 | 24 | Output shape: 25 | 3D tensor with shape `(batch_size, n, features)`. 26 | """ 27 | 28 | def __init__(self, n, **kwargs): 29 | super().__init__(**kwargs) 30 | self.n = n 31 | if not isinstance(n, int): 32 | raise TypeError( 33 | f"Expected an integer value for `n`, got {type(n)}." 34 | ) 35 | self.input_spec = InputSpec(ndim=2) 36 | 37 | def compute_output_shape(self, input_shape): 38 | return (input_shape[0], self.n, input_shape[1]) 39 | 40 | def call(self, inputs): 41 | input_shape = ops.shape(inputs) 42 | reshaped = ops.reshape(inputs, (input_shape[0], 1, input_shape[1])) 43 | return ops.repeat(reshaped, self.n, axis=1) 44 | 45 | def get_config(self): 46 | config = {"n": self.n} 47 | base_config = super().get_config() 48 | return {**base_config, **config} 49 | -------------------------------------------------------------------------------- /keras/src/layers/reshaping/repeat_vector_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from keras.src import layers 5 | from keras.src import ops 6 | from keras.src import testing 7 | 8 | 9 | class FlattenTest(testing.TestCase): 10 | @pytest.mark.requires_trainable_backend 11 | def test_repeat_vector(self): 12 | inputs = np.random.random((2, 5)).astype("float32") 13 | expected_output = ops.convert_to_tensor( 14 | np.repeat(np.reshape(inputs, (2, 1, 5)), 3, axis=1) 15 | ) 16 | self.run_layer_test( 17 | layers.RepeatVector, 18 | init_kwargs={"n": 3}, 19 | input_data=inputs, 20 | expected_output=expected_output, 21 | ) 22 | 23 | def test_repeat_vector_with_dynamic_batch_size(self): 24 | input_layer = layers.Input(batch_shape=(None, 5)) 25 | repeated = layers.RepeatVector(n=3)(input_layer) 26 | self.assertEqual(repeated.shape, (None, 3, 5)) 27 | 28 | def test_repeat_vector_with_dynamic_dimension(self): 29 | input_layer = layers.Input(batch_shape=(2, None)) 30 | repeated = layers.RepeatVector(n=3)(input_layer) 31 | self.assertEqual(repeated.shape, (2, 3, None)) 32 | 33 | def test_repeat_vector_with_invalid_n(self): 34 | with self.assertRaisesRegex( 35 | TypeError, "Expected an integer value for `n`" 36 | ): 37 | layers.RepeatVector(n="3") 38 | 39 | with self.assertRaisesRegex( 40 | TypeError, "Expected an integer value for `n`" 41 | ): 42 | layers.RepeatVector(n=3.5) 43 | 44 | with self.assertRaisesRegex( 45 | TypeError, "Expected an integer value for `n`" 46 | ): 47 | layers.RepeatVector(n=[3]) 48 | -------------------------------------------------------------------------------- /keras/src/layers/reshaping/up_sampling1d.py: -------------------------------------------------------------------------------- 1 | from keras.src import ops 2 | from keras.src.api_export import keras_export 3 | from keras.src.layers.input_spec import InputSpec 4 | from keras.src.layers.layer import Layer 5 | 6 | 7 | @keras_export("keras.layers.UpSampling1D") 8 | class UpSampling1D(Layer): 9 | """Upsampling layer for 1D inputs. 10 | 11 | Repeats each temporal step `size` times along the time axis. 12 | 13 | Example: 14 | 15 | >>> input_shape = (2, 2, 3) 16 | >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) 17 | >>> x 18 | [[[ 0 1 2] 19 | [ 3 4 5]] 20 | [[ 6 7 8] 21 | [ 9 10 11]]] 22 | >>> y = keras.layers.UpSampling1D(size=2)(x) 23 | >>> y 24 | [[[ 0. 1. 2.] 25 | [ 0. 1. 2.] 26 | [ 3. 4. 5.] 27 | [ 3. 4. 5.]] 28 | [[ 6. 7. 8.] 29 | [ 6. 7. 8.] 30 | [ 9. 10. 11.] 31 | [ 9. 10. 11.]]] 32 | 33 | Args: 34 | size: Integer. Upsampling factor. 35 | 36 | Input shape: 37 | 3D tensor with shape: `(batch_size, steps, features)`. 38 | 39 | Output shape: 40 | 3D tensor with shape: `(batch_size, upsampled_steps, features)`. 41 | """ 42 | 43 | def __init__(self, size=2, **kwargs): 44 | super().__init__(**kwargs) 45 | self.size = int(size) 46 | self.input_spec = InputSpec(ndim=3) 47 | 48 | def compute_output_shape(self, input_shape): 49 | size = ( 50 | self.size * input_shape[1] if input_shape[1] is not None else None 51 | ) 52 | return [input_shape[0], size, input_shape[2]] 53 | 54 | def call(self, inputs): 55 | return ops.repeat(x=inputs, repeats=self.size, axis=1) 56 | 57 | def get_config(self): 58 | config = {"size": self.size} 59 | base_config = super().get_config() 60 | return {**base_config, **config} 61 | -------------------------------------------------------------------------------- /keras/src/layers/rnn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/layers/rnn/__init__.py -------------------------------------------------------------------------------- /keras/src/legacy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/legacy/__init__.py -------------------------------------------------------------------------------- /keras/src/legacy/losses.py: -------------------------------------------------------------------------------- 1 | from keras.src.api_export import keras_export 2 | 3 | 4 | @keras_export("keras._legacy.losses.Reduction") 5 | class Reduction: 6 | AUTO = "auto" 7 | NONE = "none" 8 | SUM = "sum" 9 | SUM_OVER_BATCH_SIZE = "sum_over_batch_size" 10 | 11 | @classmethod 12 | def all(cls): 13 | return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) 14 | 15 | @classmethod 16 | def validate(cls, key): 17 | if key not in cls.all(): 18 | raise ValueError( 19 | f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"' 20 | ) 21 | -------------------------------------------------------------------------------- /keras/src/legacy/preprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/legacy/preprocessing/__init__.py -------------------------------------------------------------------------------- /keras/src/legacy/saving/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/legacy/saving/__init__.py -------------------------------------------------------------------------------- /keras/src/legacy/saving/saving_options.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | 3 | from keras.src.backend.common import global_state 4 | 5 | 6 | @contextlib.contextmanager 7 | def keras_option_scope(use_legacy_config=True): 8 | use_legacy_config_prev_value = global_state.get_global_attribute( 9 | "use_legacy_config", None 10 | ) 11 | global_state.set_global_attribute("use_legacy_config", use_legacy_config) 12 | try: 13 | yield 14 | finally: 15 | global_state.set_global_attribute( 16 | "use_legacy_config", use_legacy_config_prev_value 17 | ) 18 | -------------------------------------------------------------------------------- /keras/src/models/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.models.functional import Functional 2 | from keras.src.models.model import Model 3 | from keras.src.models.sequential import Sequential 4 | -------------------------------------------------------------------------------- /keras/src/models/variable_mapping_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from keras.src import testing 4 | from keras.src.saving import saving_lib_test 5 | 6 | 7 | class VariableMappingTest(testing.TestCase): 8 | def test_basics(self): 9 | model = saving_lib_test._get_basic_functional_model() 10 | model.optimizer.build(model.trainable_variables) 11 | variable_map = model._get_variable_map() 12 | 13 | self.assertIn("first_dense/kernel", variable_map) 14 | self.assertIn("second_dense/bias", variable_map) 15 | self.assertIn("adam/learning_rate", variable_map) 16 | 17 | model = saving_lib_test._get_basic_sequential_model() 18 | model.build((None, 1)) 19 | model.optimizer.build(model.trainable_variables) 20 | variable_map = model._get_variable_map() 21 | self.assertIn("sequential/dense_1/bias", variable_map) 22 | self.assertIn("adam/learning_rate", variable_map) 23 | 24 | model = saving_lib_test._get_subclassed_model() 25 | model(np.ones((1, 1))) 26 | model.optimizer.build(model.trainable_variables) 27 | variable_map = model._get_variable_map() 28 | self.assertIn("custom_model_x/my_dense_1/dense/kernel", variable_map) 29 | self.assertIn("custom_model_x/my_dense_1/my_dict_weight", variable_map) 30 | self.assertIn( 31 | "custom_model_x/my_dense_1/my_additional_weight", variable_map 32 | ) 33 | self.assertIn("adam/learning_rate", variable_map) 34 | -------------------------------------------------------------------------------- /keras/src/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # from keras.src.ops.numpy import Matmul, matmul 2 | # from keras.src.ops.numpy import Add, add 3 | # from keras.src.ops.numpy import Multiply, multiply 4 | 5 | from keras.src.backend import cast 6 | from keras.src.backend import cond 7 | from keras.src.backend import is_tensor 8 | from keras.src.backend import name_scope 9 | from keras.src.backend import random 10 | from keras.src.ops import image 11 | from keras.src.ops import operation_utils 12 | from keras.src.ops.core import * # noqa: F403 13 | from keras.src.ops.linalg import * # noqa: F403 14 | from keras.src.ops.math import * # noqa: F403 15 | from keras.src.ops.nn import * # noqa: F403 16 | from keras.src.ops.numpy import * # noqa: F403 17 | -------------------------------------------------------------------------------- /keras/src/ops/symbolic_arguments.py: -------------------------------------------------------------------------------- 1 | from keras.src import tree 2 | from keras.src.backend import KerasTensor 3 | 4 | 5 | class SymbolicArguments: 6 | def __init__(self, *args, **kwargs): 7 | self.args = tree.map_structure(lambda x: x, args) 8 | self.kwargs = tree.map_structure(lambda x: x, kwargs) 9 | self._flat_arguments = tree.flatten((self.args, self.kwargs)) 10 | 11 | # Used to avoid expensive `tree` operations in the most common case. 12 | if ( 13 | not self.kwargs 14 | and len(self.args) == 1 15 | and isinstance(self.args[0], KerasTensor) 16 | ): 17 | self._single_positional_tensor = self.args[0] 18 | else: 19 | self._single_positional_tensor = None 20 | 21 | self.keras_tensors = [] 22 | for arg in self._flat_arguments: 23 | if isinstance(arg, KerasTensor): 24 | self.keras_tensors.append(arg) 25 | 26 | def convert(self, conversion_fn): 27 | args = tree.map_structure(conversion_fn, self.args) 28 | kwargs = tree.map_structure(conversion_fn, self.kwargs) 29 | return args, kwargs 30 | 31 | def fill_in(self, tensor_dict): 32 | """Maps KerasTensors to computed values using `tensor_dict`. 33 | 34 | `tensor_dict` maps `KerasTensor` instances to their current values. 35 | """ 36 | if self._single_positional_tensor is not None: 37 | # Performance optimization for most common case. 38 | # Approx. 70x faster. 39 | return (tensor_dict[id(self._single_positional_tensor)],), {} 40 | 41 | def switch_fn(x): 42 | if isinstance(x, KerasTensor): 43 | return tensor_dict.get(id(x), None) 44 | return x 45 | 46 | return self.convert(switch_fn) 47 | -------------------------------------------------------------------------------- /keras/src/optimizers/optimizer.py: -------------------------------------------------------------------------------- 1 | from keras.src import backend 2 | from keras.src.api_export import keras_export 3 | from keras.src.optimizers import base_optimizer 4 | 5 | if backend.backend() == "tensorflow": 6 | from keras.src.backend.tensorflow.optimizer import ( 7 | TFOptimizer as BackendOptimizer, 8 | ) 9 | elif backend.backend() == "torch": 10 | from keras.src.backend.torch.optimizers import ( 11 | TorchOptimizer as BackendOptimizer, 12 | ) 13 | elif backend.backend() == "jax": 14 | from keras.src.backend.jax.optimizer import JaxOptimizer as BackendOptimizer 15 | else: 16 | 17 | class BackendOptimizer(base_optimizer.BaseOptimizer): 18 | pass 19 | 20 | 21 | @keras_export(["keras.Optimizer", "keras.optimizers.Optimizer"]) 22 | class Optimizer(BackendOptimizer, base_optimizer.BaseOptimizer): 23 | pass 24 | 25 | 26 | Optimizer.__doc__ = base_optimizer.BaseOptimizer.__doc__ 27 | base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args 28 | -------------------------------------------------------------------------------- /keras/src/optimizers/schedules/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay 2 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 3 | CosineDecayRestarts, 4 | ) 5 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 6 | ExponentialDecay, 7 | ) 8 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 9 | InverseTimeDecay, 10 | ) 11 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 12 | PiecewiseConstantDecay, 13 | ) 14 | from keras.src.optimizers.schedules.learning_rate_schedule import ( 15 | PolynomialDecay, 16 | ) 17 | -------------------------------------------------------------------------------- /keras/src/random/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.random.random import categorical 2 | from keras.src.random.random import dropout 3 | from keras.src.random.random import gamma 4 | from keras.src.random.random import normal 5 | from keras.src.random.random import randint 6 | from keras.src.random.random import shuffle 7 | from keras.src.random.random import truncated_normal 8 | from keras.src.random.random import uniform 9 | from keras.src.random.seed_generator import SeedGenerator 10 | -------------------------------------------------------------------------------- /keras/src/saving/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.saving.object_registration import CustomObjectScope 2 | from keras.src.saving.object_registration import custom_object_scope 3 | from keras.src.saving.object_registration import get_custom_objects 4 | from keras.src.saving.object_registration import get_registered_name 5 | from keras.src.saving.object_registration import get_registered_object 6 | from keras.src.saving.object_registration import register_keras_serializable 7 | from keras.src.saving.saving_api import load_model 8 | from keras.src.saving.serialization_lib import deserialize_keras_object 9 | from keras.src.saving.serialization_lib import serialize_keras_object 10 | -------------------------------------------------------------------------------- /keras/src/saving/keras_saveable.py: -------------------------------------------------------------------------------- 1 | import io 2 | 3 | 4 | class KerasSaveable: 5 | # Note: renaming this function will cause old pickles to be broken. 6 | # This is probably not a huge deal, as pickle should not be a recommended 7 | # saving format -- it should only be supported for use with distributed 8 | # computing frameworks. 9 | 10 | def _obj_type(self): 11 | raise NotImplementedError( 12 | "KerasSaveable subclases must provide an " 13 | "implementation for `obj_type()`" 14 | ) 15 | 16 | @classmethod 17 | def _unpickle_model(cls, bytesio): 18 | import keras.src.saving.saving_lib as saving_lib 19 | 20 | # pickle is not safe regardless of what you do. 21 | return saving_lib._load_model_from_fileobj( 22 | bytesio, custom_objects=None, compile=True, safe_mode=False 23 | ) 24 | 25 | def __reduce__(self): 26 | """__reduce__ is used to customize the behavior of `pickle.pickle()`. 27 | 28 | The method returns a tuple of two elements: a function, and a list of 29 | arguments to pass to that function. In this case we just leverage the 30 | keras saving library.""" 31 | import keras.src.saving.saving_lib as saving_lib 32 | 33 | buf = io.BytesIO() 34 | saving_lib._save_model_to_fileobj(self, buf, "h5") 35 | return ( 36 | self._unpickle_model, 37 | (buf,), 38 | ) 39 | -------------------------------------------------------------------------------- /keras/src/testing/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.testing.test_case import TestCase 2 | from keras.src.testing.test_case import jax_uses_gpu 3 | from keras.src.testing.test_case import tensorflow_uses_gpu 4 | from keras.src.testing.test_case import torch_uses_gpu 5 | from keras.src.testing.test_case import uses_gpu 6 | -------------------------------------------------------------------------------- /keras/src/trainers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/keras-team/keras/3d2db56dd5d917ee9302b37ffe9fa861417ce529/keras/src/trainers/__init__.py -------------------------------------------------------------------------------- /keras/src/tree/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.tree.tree_api import assert_same_paths 2 | from keras.src.tree.tree_api import assert_same_structure 3 | from keras.src.tree.tree_api import flatten 4 | from keras.src.tree.tree_api import flatten_with_path 5 | from keras.src.tree.tree_api import is_nested 6 | from keras.src.tree.tree_api import lists_to_tuples 7 | from keras.src.tree.tree_api import map_shape_structure 8 | from keras.src.tree.tree_api import map_structure 9 | from keras.src.tree.tree_api import map_structure_up_to 10 | from keras.src.tree.tree_api import pack_sequence_as 11 | from keras.src.tree.tree_api import register_tree_node_class 12 | from keras.src.tree.tree_api import traverse 13 | -------------------------------------------------------------------------------- /keras/src/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory 2 | from keras.src.utils.dataset_utils import split_dataset 3 | from keras.src.utils.file_utils import get_file 4 | from keras.src.utils.image_dataset_utils import image_dataset_from_directory 5 | from keras.src.utils.image_utils import array_to_img 6 | from keras.src.utils.image_utils import img_to_array 7 | from keras.src.utils.image_utils import load_img 8 | from keras.src.utils.image_utils import save_img 9 | from keras.src.utils.io_utils import disable_interactive_logging 10 | from keras.src.utils.io_utils import enable_interactive_logging 11 | from keras.src.utils.io_utils import is_interactive_logging_enabled 12 | from keras.src.utils.model_visualization import model_to_dot 13 | from keras.src.utils.model_visualization import plot_model 14 | from keras.src.utils.numerical_utils import normalize 15 | from keras.src.utils.numerical_utils import to_categorical 16 | from keras.src.utils.progbar import Progbar 17 | from keras.src.utils.python_utils import default 18 | from keras.src.utils.python_utils import is_default 19 | from keras.src.utils.python_utils import removeprefix 20 | from keras.src.utils.python_utils import removesuffix 21 | from keras.src.utils.rng_utils import set_random_seed 22 | from keras.src.utils.sequence_utils import pad_sequences 23 | from keras.src.utils.text_dataset_utils import text_dataset_from_directory 24 | from keras.src.utils.timeseries_dataset_utils import ( 25 | timeseries_dataset_from_array, 26 | ) 27 | -------------------------------------------------------------------------------- /keras/src/utils/code_stats.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def count_loc(directory, exclude=("_test",), extensions=(".py",), verbose=0): 5 | loc = 0 6 | for root, _, fnames in os.walk(directory): 7 | skip = False 8 | for ex in exclude: 9 | if root.endswith(ex): 10 | skip = True 11 | if skip: 12 | continue 13 | 14 | for fname in fnames: 15 | skip = False 16 | for ext in extensions: 17 | if not fname.endswith(ext): 18 | skip = True 19 | break 20 | 21 | for ex in exclude: 22 | if fname.endswith(ex + ext): 23 | skip = True 24 | break 25 | if skip: 26 | continue 27 | 28 | fname = os.path.join(root, fname) 29 | if verbose: 30 | print(f"Count LoCs in {fname}") 31 | 32 | with open(fname) as f: 33 | lines = f.read().split("\n") 34 | 35 | string_open = False 36 | for line in lines: 37 | line = line.strip() 38 | if not line or line.startswith("#"): 39 | continue 40 | if not string_open: 41 | if not line.startswith('"""'): 42 | loc += 1 43 | else: 44 | if not line.endswith('"""'): 45 | string_open = True 46 | else: 47 | if line.startswith('"""'): 48 | string_open = False 49 | return loc 50 | -------------------------------------------------------------------------------- /keras/src/utils/dtype_utils.py: -------------------------------------------------------------------------------- 1 | from keras.src import backend 2 | from keras.src import ops 3 | 4 | DTYPE_TO_SIZE = { 5 | **{f"float{i}": i for i in (16, 32, 64)}, 6 | **{f"int{i}": i for i in (8, 16, 32, 64)}, 7 | **{f"uint{i}": i for i in (8, 16, 32, 64)}, 8 | "bfloat16": 16, 9 | "bool": 1, 10 | } 11 | 12 | 13 | def dtype_size(dtype): 14 | size = DTYPE_TO_SIZE.get(dtype, None) 15 | if size is None: 16 | raise ValueError(f"Invalid dtype: {dtype}") 17 | return size 18 | 19 | 20 | def is_float(dtype): 21 | return "float" in dtype 22 | 23 | 24 | def cast_to_common_dtype(tensors): 25 | """Cast a list of tensors to a common dtype. 26 | 27 | If any tensor is floating-point, they will all be casted to the most-precise 28 | floating-point dtype. Otherwise the tensors are not casted. 29 | 30 | Args: 31 | tensors: A list of tensors. 32 | 33 | Returns: 34 | Same list, casted to a common dtype. 35 | """ 36 | highest_float = None 37 | highest_float_size = ( 38 | -1 39 | ) # Initially set to an impossible value for comparison 40 | for x in tensors: 41 | dtype = backend.standardize_dtype(x.dtype) 42 | if is_float(dtype): 43 | if highest_float is None or dtype_size(dtype) > highest_float_size: 44 | highest_float = dtype 45 | highest_float_size = dtype_size(dtype) 46 | elif dtype == "float16" and highest_float == "bfloat16": 47 | highest_float = "float32" 48 | highest_float_size = dtype_size(highest_float) 49 | if highest_float: 50 | tensors = [ops.cast(x, highest_float) for x in tensors] 51 | return tensors 52 | -------------------------------------------------------------------------------- /keras/src/utils/jax_utils.py: -------------------------------------------------------------------------------- 1 | from keras.src import backend 2 | 3 | 4 | def is_in_jax_tracing_scope(x=None): 5 | if backend.backend() == "jax": 6 | if x is None: 7 | x = backend.numpy.ones(()) 8 | for c in x.__class__.__mro__: 9 | if c.__name__ == "Tracer" and c.__module__.startswith("jax"): 10 | return True 11 | return False 12 | -------------------------------------------------------------------------------- /keras/src/utils/rng_utils_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | import keras 6 | from keras.src import backend 7 | from keras.src.testing import test_case 8 | from keras.src.utils import rng_utils 9 | 10 | 11 | class TestRandomSeedSetting(test_case.TestCase): 12 | @pytest.mark.skipif( 13 | backend.backend() == "numpy", 14 | reason="Numpy backend does not support random seed setting.", 15 | ) 16 | def test_set_random_seed(self): 17 | def get_model_output(): 18 | model = keras.Sequential( 19 | [ 20 | keras.layers.Dense(10), 21 | keras.layers.Dropout(0.5), 22 | keras.layers.Dense(10), 23 | ] 24 | ) 25 | x = np.random.random((32, 10)).astype("float32") 26 | ds = tf.data.Dataset.from_tensor_slices(x).shuffle(32).batch(16) 27 | return model.predict(ds) 28 | 29 | rng_utils.set_random_seed(42) 30 | y1 = get_model_output() 31 | rng_utils.set_random_seed(42) 32 | y2 = get_model_output() 33 | self.assertAllClose(y1, y2) 34 | -------------------------------------------------------------------------------- /keras/src/version.py: -------------------------------------------------------------------------------- 1 | from keras.src.api_export import keras_export 2 | 3 | # Unique source of truth for the version number. 4 | __version__ = "3.10.0" 5 | 6 | 7 | @keras_export("keras.version") 8 | def version(): 9 | return __version__ 10 | -------------------------------------------------------------------------------- /keras/src/visualization/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.visualization import draw_bounding_boxes 2 | from keras.src.visualization import plot_image_gallery 3 | -------------------------------------------------------------------------------- /keras/src/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier 2 | from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor 3 | from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer 4 | 5 | __all__ = ["SKLearnClassifier", "SKLearnRegressor", "SKLearnTransformer"] 6 | -------------------------------------------------------------------------------- /requirements-common.txt: -------------------------------------------------------------------------------- 1 | pre-commit 2 | namex>=0.0.8 3 | ruff 4 | pytest 5 | numpy 6 | scipy 7 | scikit-learn 8 | pillow 9 | pandas 10 | absl-py 11 | requests 12 | h5py 13 | ml-dtypes 14 | protobuf 15 | tensorboard-plugin-profile 16 | rich 17 | build 18 | optree 19 | pytest-cov 20 | packaging 21 | # for tree_test.py 22 | dm_tree 23 | coverage!=7.6.5 # 7.6.5 breaks CI 24 | # for onnx_test.py 25 | onnxruntime 26 | openvino 27 | -------------------------------------------------------------------------------- /requirements-jax-cuda.txt: -------------------------------------------------------------------------------- 1 | # Tensorflow cpu-only version (needed for testing). 2 | tensorflow-cpu~=2.18.1 3 | tf2onnx 4 | 5 | # Torch cpu-only version (needed for testing). 6 | --extra-index-url https://download.pytorch.org/whl/cpu 7 | torch==2.6.0 8 | 9 | # Jax with cuda support. 10 | --find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html 11 | jax[cuda12]==0.6.0 12 | flax 13 | 14 | -r requirements-common.txt 15 | -------------------------------------------------------------------------------- /requirements-tensorflow-cuda.txt: -------------------------------------------------------------------------------- 1 | # Tensorflow with cuda support. 2 | tensorflow[and-cuda]~=2.18.1 3 | tf2onnx 4 | 5 | # Torch cpu-only version (needed for testing). 6 | --extra-index-url https://download.pytorch.org/whl/cpu 7 | torch==2.6.0 8 | 9 | # Jax cpu-only version (needed for testing). 10 | jax[cpu] 11 | 12 | -r requirements-common.txt 13 | -------------------------------------------------------------------------------- /requirements-torch-cuda.txt: -------------------------------------------------------------------------------- 1 | # Tensorflow cpu-only version (needed for testing). 2 | tensorflow-cpu~=2.18.1 3 | tf2onnx 4 | 5 | # Torch with cuda support. 6 | # - torch is pinned to a version that is compatible with torch-xla 7 | # - torch-xla is pinned to a version that supports GPU (2.6 doesn't) 8 | --extra-index-url https://download.pytorch.org/whl/cu121 9 | torch==2.6.0 10 | torch-xla==2.6.0;sys_platform != 'darwin' 11 | 12 | # Jax cpu-only version (needed for testing). 13 | jax[cpu] 14 | 15 | -r requirements-common.txt 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Tensorflow. 2 | tensorflow-cpu~=2.18.1;sys_platform != 'darwin' 3 | tensorflow~=2.18.1;sys_platform == 'darwin' 4 | tf_keras 5 | tf2onnx 6 | 7 | # Torch. 8 | --extra-index-url https://download.pytorch.org/whl/cpu 9 | torch==2.6.0;sys_platform != 'darwin' 10 | torch==2.6.0;sys_platform == 'darwin' 11 | torch-xla==2.6.0;sys_platform != 'darwin' 12 | 13 | # Jax. 14 | # Pinned to 0.5.0 on CPU. JAX 0.5.1 requires Tensorflow 2.19 for saved_model_test. 15 | # Note that we test against the latest JAX on GPU. 16 | jax[cpu]==0.5.0 17 | flax 18 | 19 | # Common deps. 20 | -r requirements-common.txt 21 | -------------------------------------------------------------------------------- /shell/api_gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | 4 | base_dir=$(dirname $(dirname $0)) 5 | 6 | echo "Generating api directory with public APIs..." 7 | # Generate API Files 8 | python3 "${base_dir}"/api_gen.py 9 | 10 | # Format code because `api_gen.py` might order 11 | # imports differently. 12 | echo "Formatting api directory..." 13 | (SKIP=api-gen pre-commit run --files $(find "${base_dir}"/keras/api -type f) --hook-stage pre-commit || true) > /dev/null 14 | -------------------------------------------------------------------------------- /shell/format.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | 4 | if ! command -v pre-commit 2>&1 >/dev/null 5 | then 6 | echo 'Please `pip install pre-commit` to run format.sh.' 7 | exit 1 8 | fi 9 | 10 | base_dir=$(dirname $(dirname $0)) 11 | 12 | echo "Formatting all files..." 13 | SKIP=api-gen pre-commit run --all-files 14 | --------------------------------------------------------------------------------