├── .gitmodules ├── helloWorld.py ├── scripts ├── launch.job ├── skylake.err └── skylake.out ├── CMakeLists.txt ├── mpi_lib.cpp └── README.md /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "pybind11"] 2 | path = pybind11 3 | url = https://github.com/pybind/pybind11.git 4 | -------------------------------------------------------------------------------- /helloWorld.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import mpi4py 4 | mpi4py.rc.threaded = True 5 | mpi4py.rc.thread_level = "funneled" 6 | from mpi4py import MPI 7 | from mpi_lib import Distributed 8 | 9 | # Main program that delegates some work to a C++/MPI routine: say_hi 10 | def main (): 11 | comm = MPI.COMM_WORLD 12 | rank = comm.Get_rank () 13 | size = comm.Get_size () 14 | name = MPI.Get_processor_name() 15 | print("[Python] Hello from machine " + name + ", MPI rank " + str( rank ) + " out of " + str( size )) 16 | distrib = Distributed() 17 | distrib.say_hi() 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /scripts/launch.job: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #SBATCH -J mpi4py_pybind11 3 | #SBATCH -p skylake 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=2 6 | #SBATCH -c 16 7 | #SBATCH -A b171 8 | #SBATCH -t 0:05:00 9 | #SBATCH -o ./%N.%j.%a.out 10 | #SBATCH -e ./%N.%j.%a.err 11 | #SBATCH --exclusive 12 | 13 | # chargement des modules 14 | module purge 15 | module load userspace/all 16 | module load gcc/7.2.0 17 | spack load -r py-mpi4py 18 | spack load -r py-numpy 19 | module load openmpi/gcc72/ofed/3.1.3 20 | 21 | echo “Running on: $SLURM_NODELIST” 22 | cd /home/glatu/test_pybind_mpi/build 23 | ulimit -a 24 | 25 | export OMP_PLACES=cores 26 | export OMP_PROC_BIND=true 27 | export OMP_DISPLAY_ENV=true 28 | export OMP_SCHEDULE=static 29 | mpirun -report-bindings --map-by socket:PE=1 -bind-to core python3 -m mpi4py helloWorld.py 30 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.9) # 3.9 To rely on modern OpenMP support 2 | project(test_pybind_mpi) 3 | 4 | find_package(PythonLibs) 5 | include_directories(${PYTHON_INCLUDE_DIRS}) 6 | 7 | find_package(OpenMP) 8 | find_package(MPI REQUIRED) 9 | 10 | set(CMAKE_CXX_COMPILE_FLAGS ${CMAKE_CXX_COMPILE_FLAGS} ${MPI_COMPILE_FLAGS}) 11 | set(CMAKE_CXX_LINK_FLAGS ${CMAKE_CXX_LINK_FLAGS} ${MPI_LINK_FLAGS}) 12 | 13 | include_directories(${MPI_INCLUDE_PATH}) 14 | 15 | add_subdirectory(pybind11) 16 | pybind11_add_module(mpi_lib mpi_lib.cpp) 17 | target_link_libraries(mpi_lib PRIVATE mpi) 18 | if(OpenMP_CXX_FOUND) 19 | target_link_libraries(mpi_lib PUBLIC OpenMP::OpenMP_CXX) 20 | else() 21 | message( FATAL_ERROR "Your compiler does not support OpenMP" ) 22 | endif() 23 | 24 | file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/helloWorld.py 25 | DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/) 26 | 27 | -------------------------------------------------------------------------------- /scripts/skylake.err: -------------------------------------------------------------------------------- 1 | [skylake109.cluster:307562] MCW rank 0 bound to socket 0[core 0[hwt 0]]: [B/././././././././././././././.][./././././././././././././././.] 2 | [skylake109.cluster:307562] MCW rank 1 bound to socket 1[core 16[hwt 0]]: [./././././././././././././././.][B/././././././././././././././.] 3 | [skylake112.cluster:189721] MCW rank 6 bound to socket 0[core 0[hwt 0]]: [B/././././././././././././././.][./././././././././././././././.] 4 | [skylake110.cluster:145399] MCW rank 2 bound to socket 0[core 0[hwt 0]]: [B/././././././././././././././.][./././././././././././././././.] 5 | [skylake112.cluster:189721] MCW rank 7 bound to socket 1[core 16[hwt 0]]: [./././././././././././././././.][B/././././././././././././././.] 6 | [skylake110.cluster:145399] MCW rank 3 bound to socket 1[core 16[hwt 0]]: [./././././././././././././././.][B/././././././././././././././.] 7 | [skylake111.cluster:12455] MCW rank 4 bound to socket 0[core 0[hwt 0]]: [B/././././././././././././././.][./././././././././././././././.] 8 | [skylake111.cluster:12455] MCW rank 5 bound to socket 1[core 16[hwt 0]]: [./././././././././././././././.][B/././././././././././././././.] 9 | -------------------------------------------------------------------------------- /scripts/skylake.out: -------------------------------------------------------------------------------- 1 | “Running on: skylake[109-112]” 2 | core file size (blocks, -c) 0 3 | data seg size (kbytes, -d) unlimited 4 | scheduling priority (-e) 0 5 | file size (blocks, -f) unlimited 6 | pending signals (-i) 767198 7 | max locked memory (kbytes, -l) unlimited 8 | max memory size (kbytes, -m) 189071360 9 | open files (-n) 65536 10 | pipe size (512 bytes, -p) 8 11 | POSIX message queues (bytes, -q) 819200 12 | real-time priority (-r) 0 13 | stack size (kbytes, -s) unlimited 14 | cpu time (seconds, -t) unlimited 15 | max user processes (-u) 4096 16 | virtual memory (kbytes, -v) unlimited 17 | file locks (-x) unlimited 18 | [Python] Hello from machine skylake112.cluster, MPI rank 6 out of 8 19 | [Python] Hello from machine skylake109.cluster, MPI rank 1 out of 8 20 | [Python] Hello from machine skylake110.cluster, MPI rank 2 out of 8 21 | [C++] Hello from machine skylake110.cluster, MPI rank 2 out of 8 22 | [Python] Hello from machine skylake110.cluster, MPI rank 3 out of 8 23 | [C++] Hello from machine skylake110.cluster, MPI rank 3 out of 8 24 | [C++] Hello from machine skylake112.cluster, MPI rank 6 out of 8 25 | [Python] Hello from machine skylake112.cluster, MPI rank 7 out of 8 26 | [C++] Hello from machine skylake112.cluster, MPI rank 7 out of 8 27 | [Python] Hello from machine skylake109.cluster, MPI rank 0 out of 8 28 | [C++] Hello from machine skylake109.cluster, MPI rank 0 out of 8 29 | [C++] Hello from machine skylake109.cluster, MPI rank 1 out of 8 30 | [Python] Hello from machine skylake111.cluster, MPI rank 4 out of 8 31 | [C++] Hello from machine skylake111.cluster, MPI rank 4 out of 8 32 | [Python] Hello from machine skylake111.cluster, MPI rank 5 out of 8 33 | [C++] Hello from machine skylake111.cluster, MPI rank 5 out of 8 34 | -------------------------------------------------------------------------------- /mpi_lib.cpp: -------------------------------------------------------------------------------- 1 | #ifdef _OPENMP 2 | #include 3 | #endif 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace py = pybind11; 10 | using pymod = pybind11::module; 11 | 12 | class Distributed 13 | { 14 | public: 15 | Distributed() : comm_global(MPI_COMM_WORLD) {} 16 | 17 | ~Distributed() {} 18 | 19 | void say_hi() { 20 | int world_size; 21 | MPI_Comm_size(comm_global, &world_size); 22 | int world_rank; 23 | MPI_Comm_rank(comm_global, &world_rank); 24 | char processor_name[MPI_MAX_PROCESSOR_NAME] = "localhost"; 25 | int name_len; 26 | MPI_Get_processor_name(processor_name, &name_len); 27 | printf("[C++] Hello from machine %s, MPI rank %d out of %d\n", 28 | processor_name, 29 | world_rank, 30 | world_size); 31 | if (world_rank == 0) { 32 | int thread_level; 33 | MPI_Query_thread( &thread_level ); 34 | switch (thread_level) { 35 | case MPI_THREAD_SINGLE: 36 | printf("Detected thread level MPI_THREAD_SINGLE\n"); 37 | fflush(stdout); 38 | break; 39 | case MPI_THREAD_FUNNELED: 40 | printf("Detected thread level MPI_THREAD_FUNNELED\n"); 41 | fflush(stdout); 42 | break; 43 | case MPI_THREAD_SERIALIZED: 44 | printf("Detected thread level MPI_THREAD_SERIALIZED\n"); 45 | fflush(stdout); 46 | break; 47 | case MPI_THREAD_MULTIPLE: 48 | printf("Detected thread level MPI_THREAD_MULTIPLE\n"); 49 | fflush(stdout); 50 | break; 51 | } 52 | int nthreads, tid; 53 | #pragma omp parallel private(nthreads, tid) 54 | { 55 | 56 | /* Obtain thread number */ 57 | tid = omp_get_thread_num(); 58 | printf("Hello World from thread = %d\n", tid); 59 | 60 | /* Only master thread does this */ 61 | if (tid == 0 ) 62 | { 63 | nthreads = omp_get_num_threads(); 64 | printf("Number of threads = %d\n", nthreads); 65 | } 66 | } 67 | } 68 | } 69 | private: 70 | MPI_Comm comm_global; 71 | }; 72 | 73 | 74 | 75 | PYBIND11_MODULE(mpi_lib, mmod) 76 | { 77 | constexpr auto MODULE_DESCRIPTION = "Just testing out mpi with python."; 78 | mmod.doc() = MODULE_DESCRIPTION; 79 | 80 | py::class_(mmod, "Distributed") 81 | // .def(py::init()) 82 | .def(py::init<>()) 83 | .def("say_hi", &Distributed::say_hi, "Each process will say hi"); 84 | 85 | } 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Example for using C++ library, pybind11, mpi4py & MPI 2 | 3 | ## Introduction 4 | 5 | Demonstrates how to call a C++ class from Python using pybind11 together with MPI. 6 | 7 | To fetch this repository use the following command: 8 | ``` 9 | git clone --recursive https://codev-tuleap.cea.fr/plugins/git/helix/test_pybind_mpi.git 10 | ``` 11 | or 12 | ``` 13 | git clone https://codev-tuleap.cea.fr/plugins/git/helix/test_pybind_mpi.git 14 | git submodule init 15 | git submodule update --init 16 | ``` 17 | 18 | Derived from the following URLs: 19 | 20 | - https://coderefinery.github.io/mma/03-pybind11 21 | - https://stackoverflow.com/questions/49259704/pybind11-possible-to-use-mpi4py/50022979 22 | - https://lipn.univ-paris13.fr/~coti/doc/tutopympi.pdf 23 | - https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html 24 | 25 | ## Contents 26 | 27 | Main files: 28 | 29 | - `mpi_lib.cpp`: C++/MPI library code 30 | - `helloWorld.py`: python code that call the C++/MPI library (using pybind11 and mpi4py) 31 | 32 | ## How to configure your system 33 | 34 | An easy way to setup the needed package is to use 'spack' machinery. 35 | ``` 36 | git clone https://github.com/spack/spack 37 | source spack/share/spack/setup-env.sh # you should also put this line into you .bashrc 38 | spack install openmpi 39 | spack install python@3.7.4 40 | spack install py-mpi4py 41 | spack install py-numpy 42 | spack install cmake@3.12.4 43 | ``` 44 | 45 | Each time you want to use the installed package, you will do: 46 | ``` 47 | spack load -r openmpi 48 | spack load python@3.7.4 49 | spack load -r py-mpi4py 50 | spack load -r py-numpy 51 | spack load -r cmake@3.12.4 52 | ``` 53 | 54 | Another way to go without spack for a quicker installation (however requiring root user access) can be: 55 | ``` 56 | apt install openmpi 57 | apt install python3 58 | apt install cmake 59 | pip3 install numpy 60 | pip3 install https://bitbucket.org/mpi4py/mpi4py/get/master.tar.gz 61 | ``` 62 | 63 | 64 | ## How to build this demo 65 | 66 | ``` 67 | cd test_pybind_mpi 68 | mkdir build 69 | cd build 70 | export CC= 71 | export CXX= 72 | cmake .. 73 | make 74 | ``` 75 | 76 | ## Example test run 77 | 78 | ``` 79 | mpirun -n 8 python3 helloWorld.py 80 | [Python] Hello from machine skylake095.cluster, MPI rank 3 out of 8 81 | [C++] Hello from machine skylake095.cluster, MPI rank 3 out of 8 82 | [Python] Hello from machine skylake095.cluster, MPI rank 2 out of 8 83 | [C++] Hello from machine skylake095.cluster, MPI rank 2 out of 8 84 | [Python] Hello from machine skylake090.cluster, MPI rank 1 out of 8 85 | [C++] Hello from machine skylake090.cluster, MPI rank 1 out of 8 86 | [Python] Hello from machine skylake099.cluster, MPI rank 5 out of 8 87 | [C++] Hello from machine skylake099.cluster, MPI rank 5 out of 8 88 | [Python] Hello from machine skylake099.cluster, MPI rank 4 out of 8 89 | [C++] Hello from machine skylake099.cluster, MPI rank 4 out of 8 90 | [Python] Hello from machine skylake100.cluster, MPI rank 6 out of 8 91 | [C++] Hello from machine skylake100.cluster, MPI rank 6 out of 8 92 | [Python] Hello from machine skylake100.cluster, MPI rank 7 out of 8 93 | [C++] Hello from machine skylake100.cluster, MPI rank 7 out of 8 94 | [Python] Hello from machine skylake090.cluster, MPI rank 0 out of 8 95 | [C++] Hello from machine skylake090.cluster, MPI rank 0 out of 8 96 | Detected thread level MPI_THREAD_FUNNELED 97 | Hello World from thread = 0 98 | Hello World from thread = 5 99 | Hello World from thread = 4 100 | Hello World from thread = 10 101 | Hello World from thread = 1 102 | Hello World from thread = 2 103 | Hello World from thread = 13 104 | Hello World from thread = 3 105 | Hello World from thread = 11 106 | Hello World from thread = 8 107 | Hello World from thread = 9 108 | Hello World from thread = 6 109 | Hello World from thread = 14 110 | Hello World from thread = 7 111 | Hello World from thread = 12 112 | Hello World from thread = 15 113 | Number of threads = 16 114 | ``` 115 | --------------------------------------------------------------------------------