// An example to perform parallel writes to an HDF5 file using hyperslabs. // Command line inputs: filename, dimX, dimY, procX, procY // For this example, the number of processes in dimX (procX) should evenly divide dimX. // Similarly, the number of processes in dimY (procY) should evenly divide dimY. // Each rank writes a partial, a chunk of the dataset into the file. #include "hdf5.h" #include "stdlib.h" #include #include #define DIMS 2 int main (int argc, char **argv) { hid_t fileId, datasetId1; char *v1 = "v1"; hid_t mspace,fspace; hsize_t dimsf[DIMS]; hsize_t dimsm[DIMS]; int *data, i, npx, npy; hid_t plistId; herr_t status; int size, rank, newRank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; // Initialize the MPI environment and get size and rank MPI_Init(&argc, &argv); MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); // Setup cartesian info int cprocs[2], cpers[2], crnk[2]; MPI_Comm commCart; if (argc != 6) { printf("Usage error: please use mpirun -np ./ \n"); MPI_Finalize(); return -1; } dimsf[0] = atoi(argv[2]); dimsf[1] = atoi(argv[3]); npx = atoi(argv[4]); npy = atoi(argv[5]); if ((dimsf[0] % npx) || (dimsf[1] % npy)) { printf("Number of processes in each dimension should evenly divide that dimension\n"); MPI_Finalize(); return -1; } if (!(npx*npy == size)) { printf("nprocX * nprocY = Number_of_processes\n"); MPI_Finalize(); return -1; } hsize_t count[DIMS], start[DIMS], blk[DIMS], stride[DIMS]; cprocs[0] = npx; cprocs[1] = npy; // grid distribution of the processes cpers[0] = 0; cpers[1] = 0; // no periodicity MPI_Cart_create(MPI_COMM_WORLD, 2, cprocs, cpers, 1, &commCart); MPI_Comm_rank(commCart, &newRank); MPI_Cart_coords(commCart, newRank, 2, crnk); count[0] = dimsf[0]/npx; // evenly divide the number of blocks in dimX (rows) among the processes count[1] = dimsf[1]/npy; // same as above for dimY dimsm[0] = count[0]; dimsm[1] = count[1]; // Initialize the offsets start[0] = crnk[0] * count[0]; start[1] = crnk[1] * count[1]; // Initialize the number of blocks to be selected in each dimension blk[0] = blk[1] = 1; // This is also the default value (same as using NULL within H5Sselect_hyperslab) // Initialize the stride for each dimension stride[0] = stride[1] = 1; // This is also the default value (same as using NULL within H5Sselect_hyperslab) // Initialize the data to be written data = (int *) malloc(sizeof(int)*dimsm[0]*dimsm[1]); for (i=0; i < dimsm[0]*dimsm[1]; i++) { data[i] = rank+10; } // Create file access property list to perform parallel I/O plistId = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plistId, comm, info); // Create a new file. Note that this is a collective operation fileId = H5Fcreate(argv[1], H5F_ACC_TRUNC, H5P_DEFAULT, plistId); H5Pclose(plistId); // Create the dataspace for the dataset mspace = H5Screate_simple(DIMS, dimsm, NULL); fspace = H5Screate_simple(DIMS, dimsf, NULL); // Create the dataset with default values datasetId1 = H5Dcreate(fileId, v1, H5T_NATIVE_INT, fspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create a property list for collective data transfer plistId = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plistId, H5FD_MPIO_COLLECTIVE); // Select hyperslab within the file fspace = H5Dget_space(datasetId1); H5Sselect_hyperslab(fspace, H5S_SELECT_SET, start, stride, count, blk); // Same as the above // H5Sselect_hyperslab(fspace, H5S_SELECT_SET, start, NULL, count, NULL); // Each MPI rank writes its own partial, contiguous dataset status = H5Dwrite(datasetId1, H5T_NATIVE_INT, mspace, fspace, plistId, data); // De-allocate data free(data); // Close all the HDF5 resources H5Dclose(datasetId1); H5Sclose(fspace); H5Sclose(mspace); H5Fclose(fileId); H5Pclose(plistId); MPI_Finalize(); return 0; }