// An example to perform parallel writes to an HDF5 file using hyperslabs. // Command line inputs: filename, dimX, dimY (2D dataset dimensions) // For this example, the number of processes should evenly divide dimX. // Each rank writes a partial, contiguous dataset into the file. #include "hdf5.h" #include "stdlib.h" #include #include #define DIMS 2 int main (int argc, char **argv) { hid_t fileId, datasetId1; char *v1 = "v1"; hid_t mspace,fspace; hsize_t dimsf[DIMS]; hsize_t dimsm[DIMS]; int *data, i; hid_t plistId; herr_t status; int size, rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; // Initialize the MPI environment and get size and rank MPI_Init(&argc, &argv); MPI_Comm_size(comm, &size); MPI_Comm_rank(comm, &rank); if (argc != 4) { printf("Usage error: please use mpirun -np ./ \n"); MPI_Finalize(); return -1; } dimsf[0] = atoi(argv[2]); dimsf[1] = atoi(argv[3]); if ((dimsf[0] % size)) { printf("Number of processes should evenly divide dimX\n"); MPI_Finalize(); return -1; } hsize_t count[DIMS], start[DIMS], blk[DIMS], stride[DIMS]; count[0] = dimsf[0]/size; // evenly divide the number of blocks in dimX (rows) among the processes count[1] = dimsf[1]; // select all the blocks along the dimY (columns) dimsm[0] = count[0]; dimsm[1] = count[1]; // Initialize the offsets start[0] = rank * count[0]; start[1] = 0; // Initialize the number of blocks to be selected in each dimension blk[0] = blk[1] = 1; // This is also the default value (same as using NULL within H5Sselect_hyperslab) // Initialize the stride for each dimension stride[0] = stride[1] = 1; // This is also the default value (same as using NULL within H5Sselect_hyperslab) // Initialize the data to be written data = (int *) malloc(sizeof(int)*dimsm[0]*dimsm[1]); for (i=0; i < dimsm[0]*dimsm[1]; i++) { data[i] = rank+10; } // Create file access property list to perform parallel I/O plistId = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(plistId, comm, info); // Create a new file. Note that this is a collective operation fileId = H5Fcreate(argv[1], H5F_ACC_TRUNC, H5P_DEFAULT, plistId); H5Pclose(plistId); // Create the dataspace for the dataset mspace = H5Screate_simple(DIMS, dimsm, NULL); fspace = H5Screate_simple(DIMS, dimsf, NULL); // Create the dataset with default values datasetId1 = H5Dcreate(fileId, v1, H5T_NATIVE_INT, fspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); // Create a property list for collective data transfer plistId = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plistId, H5FD_MPIO_COLLECTIVE); // Select hyperslab within the file fspace = H5Dget_space(datasetId1); H5Sselect_hyperslab(fspace, H5S_SELECT_SET, start, stride, count, blk); // Same as the above // H5Sselect_hyperslab(fspace, H5S_SELECT_SET, start, NULL, count, NULL); // Each MPI rank writes its own partial, contiguous dataset status = H5Dwrite(datasetId1, H5T_NATIVE_INT, mspace, fspace, plistId, data); // De-allocate data free(data); // Close all the HDF5 resources H5Dclose(datasetId1); H5Sclose(fspace); H5Sclose(mspace); H5Fclose(fileId); H5Pclose(plistId); MPI_Finalize(); return 0; }