/*! \file io.cxx * \brief this file contains routines for io */ //-- IO #include #include #include #include #include #include "stf.h" #include "gadgetitems.h" #include "tipsy_structs.h" #include "endianutils.h" #ifdef USEHDF #include "hdfitems.h" #endif #include "ramsesitems.h" #ifdef USEXDR #endif #include "nchiladaitems.h" #ifdef USEADIOS #include "adios.h" #endif #include "io.h" #include "ioutils.h" #include "logging.h" #include "timer.h" ///write the information stored in a unit struct as meta data into a HDF5 file #ifdef USEHDF inline void WriteHeaderUnitEntry(Options & opt, H5OutputFile & hfile, string datasetname, HeaderUnitInfo &u) { hfile.write_attribute(datasetname, "Dimension_Mass", u.massdim); hfile.write_attribute(datasetname, "Dimension_Length", u.lengthdim); hfile.write_attribute(datasetname, "Dimension_Velocity", u.velocitydim); hfile.write_attribute(datasetname, "Dimension_Time", u.timedim); if (u.extrainfo.size()>0){ hfile.write_attribute(datasetname, "Dimension_Extra_Info", u.extrainfo); } } #endif ///Checks if file exits by attempting to get the file attributes ///If success file obviously exists. ///If failure may mean that we don't have permission to access the folder which contains this file or doesn't exist. ///If we need to do that level of checking, lookup return values of stat which will give you more details on why stat failed. bool FileExists(const char *fname) { struct stat stFileInfo; return stat(fname, &stFileInfo) == 0; } ///\name Read particle data files //@{ ///Reads the header structure if its a tipsy file or get_nbodies if gadget Int_t ReadHeader(Options &opt){ InitEndian(); if (opt.inputtype==IOTIPSY) { struct tipsy_dump tipsyheader; fstream Ftip(opt.fname, ios::in | ios::binary); if (!Ftip){cerr<<"ERROR: Unable to open " < &Part, const Int_t nbodies, Particle *&Pbaryons, Int_t nbaryons) { InitEndian(); #ifndef USEMPI int ThisTask = 0, NProcs = 1; Int_t Nlocal = nbodies; #endif if (ThisTask==0) { LOG(info) << "Reading input ..."; #ifdef USEMPI LOG(info) << "Each MPI read thread, of which there are " << opt.nsnapread << ", will allocate " << vr::memory_amount(opt.mpiparticlebufsize * NProcs * sizeof(Particle)) << " to store particle data"; LOG(info) << "Sending information to non-read threads in chunks of " << opt.mpiparticlebufsize << " particles"; LOG(info) << "This requires approximately " << (int)(Nlocal/(double)opt.mpiparticlebufsize)<<" receive operations"; #endif } if(opt.inputtype==IOTIPSY) ReadTipsy(opt,Part,nbodies, Pbaryons, nbaryons); else if (opt.inputtype==IOGADGET) ReadGadget(opt,Part,nbodies, Pbaryons, nbaryons); else if (opt.inputtype==IORAMSES) ReadRamses(opt,Part,nbodies, Pbaryons, nbaryons); #ifdef USEHDF else if (opt.inputtype==IOHDF) ReadHDF(opt,Part,nbodies, Pbaryons, nbaryons); #endif #ifdef USEXDR else if (opt.inputtype==IONCHILADA) ReadNchilada(opt,Part,nbodies, Pbaryons, nbaryons); #endif #ifdef NOMASS NOMASSCheck(opt); #endif AdjustHydroQuantities(opt,Part,nbodies); AdjustStarQuantities(opt,Part,nbodies); AdjustBHQuantities(opt,Part,nbodies); #ifdef USEMPI MPIAdjustDomain(opt); #endif LOG_RANK0(info) << "Done loading input data"; MEMORY_USAGE_REPORT(debug); } //Adjust particle data to appropriate units void AdjustHydroQuantities(Options &opt, vector &Part, const Int_t nbodies) { #ifdef GASON for (auto &p:Part) { if (p.GetType()!=GASTYPE) continue; p.SetU(p.GetU()*opt.internalenergyinputconversion); } #ifdef STARON if (opt.metallicityinputconversion!=1.0) { for (auto &p:Part) { if (p.GetType()!=GASTYPE) continue; p.SetZmet(p.GetZmet()*opt.metallicityinputconversion); } } if (opt.isfrisssfr==1) { for (auto &p:Part) { if (p.GetType()!=GASTYPE) continue; p.SetSFR(p.GetSFR()*p.GetMass()); } } if (opt.SFRinputconversion!=1.0) { for (auto &p:Part) { if (p.GetType()!=GASTYPE) continue; p.SetSFR(p.GetSFR()*opt.SFRinputconversion); } } #endif #endif } void AdjustStarQuantities(Options &opt, vector &Part, const Int_t nbodies) { #ifdef STARON if (opt.metallicityinputconversion!=1.0) { for (auto &p:Part) { if (p.GetType()!=STARTYPE) continue; p.SetZmet(p.GetZmet()*opt.metallicityinputconversion); } } if (opt.istellaragescalefactor!=0 || opt.stellarageinputconversion!=1.0) { double tage; for (auto &p:Part) { if (p.GetType()!=STARTYPE) continue; //if stellar age is initially stored as scale factor of formation if (opt.istellaragescalefactor == 1) { //make sure units are consisten with those internal to the input tables as CalcCosmicTime returns time in yrs. tage = CalcCosmicTime(opt,p.GetTage(),opt.a) / opt.stellaragetoyrs; } //if stellar age is initially stored as redshift of formation else if (opt.istellaragescalefactor == 2) { tage = CalcCosmicTime(opt,1.0/(p.GetTage()+1),opt.a) / opt.stellaragetoyrs; } //if stellar age is initially stored as time of formation else if (opt.istellaragescalefactor == 3) { tage = opt.a-p.GetTage(); } //if stellar age is initially stored as an age else tage = p.GetTage(); tage*=opt.stellarageinputconversion; p.SetTage(tage); } } #endif } void AdjustBHQuantities(Options &opt, vector &Part, const Int_t nbodies) { #ifdef BHON if (opt.metallicityinputconversion!=1.0) { for (auto &p:Part) { if (p.GetType()!=BHTYPE) continue; p.SetZmet(p.GetZmet()*opt.metallicityinputconversion); } } #endif } //@} ///\name Read STF data files //@{ ///Read local velocity density void ReadLocalVelocityDensity(Options &opt, const Int_t nbodies, vector &Part){ Int_t tempi; Double_t tempd; fstream Fin; char fname[1000]; //set filename appropriate to mpi thread if necessary #ifdef USEMPI sprintf(fname,"%s.%d",opt.smname,ThisTask); #else sprintf(fname,"%s",opt.smname); #endif LOG(info) << "Reading smooth density data from " << fname; if (opt.ibinaryout==OUTBINARY) { Fin.open(fname,ios::in| ios::binary); Fin.read((char*)&tempi,sizeof(Int_t)); if (tempi!=nbodies) { LOG(error) << "File " << fname << " contains incorrect number of particles. Exiting"; exit(9); } for(Int_t i=0;i>tempi; if (tempi!=nbodies) { LOG(error) << "File " << fname << " contains incorrect number of particles. Exiting"; exit(9); } for(Int_t i=0;i>tempd;Part[i].SetDensity(tempd);} } LOG(info) << "Done"; Fin.close(); } //@} /// \name Write STF data files for intermediate steps //@{ ///Writes local velocity density of each particle to a file void WriteLocalVelocityDensity(Options &opt, const Int_t nbodies, vector &Part){ fstream Fout; char fname[1000]; #ifdef USEMPI if(opt.smname==NULL) sprintf(fname,"%s.smdata.%d",opt.outname,ThisTask); else sprintf(fname,"%s.%d",opt.smname,ThisTask); #else if(opt.smname==NULL) sprintf(fname,"%s.smdata",opt.outname); else sprintf(fname,"%s",opt.smname); #endif if (opt.ibinaryout==OUTBINARY) { Fout.open(fname,ios::out|ios::binary); Fout.write((char*)&nbodies,sizeof(Int_t)); Double_t tempd; for(Int_t i=0;i &Part, Int_t nadditional){ fstream Fout,Fout2,Fout3; string fname, fname2, fname3; ostringstream os; unsigned long long noffset=0,ngtot=0,nids=0,nidstot=0,nuids=0,nuidstot=0, ng=0, nwritecommtot=0, nuwritecommtot=0; vector groupdata; vector offset; vector partdata; #ifdef USEMPI MPIBuildWriteComm(opt); #endif #ifdef USEHDF H5OutputFile Fhdf, Fhdf3; int itemp=0; int ival; #if defined(USEMPI) && defined(USEPARALLELHDF) vector mpi_ngoffset(NProcs); Int_t ngoffset; #endif #endif #ifdef USEADIOS int adios_err; uint64_t adios_groupsize , adios_totalsize ; int64_t adios_file_handle,adios_file_handle3; int64_t adios_grp_handle, adios_grp_handle3; int64_t adios_var_handle; int64_t adios_attr_handle; #endif #if defined(USEHDF)||defined(USEADIOS) DataGroupNames datagroupnames; #endif vr::Timer write_timer; #ifndef USEMPI int ThisTask=0,NProcs=1; #endif os << opt.outname << ".catalog_groups"; #ifdef USEMPI if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF os<<"."<1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); } } #else //create file else if (opt.ibinaryout==OUTHDF) { Fhdf.create(string(fname)); } #endif #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //open an adios file adios_err=adios_open(&adios_file_handle, "VELOCIraptor_catalog_groups", fname, "w", MPI_COMM_WORLD); } #endif else Fout.open(fname,ios::out); ng=ngroups; #ifdef USEMPI if (NProcs > 1) { for (int j=0;j1){ MPI_Allreduce(&ng, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask==0) { Fhdf.write_dataset(opt, datagroupnames.group[itemp++], 1, &ThisWriteComm, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.group[itemp++], 1, &NWriteComms, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.group[itemp++], 1, &nwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.group[itemp++], 1, &ngtot, -1, -1, false); } else { itemp=4; } Fhdf.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); } else{ itemp=0; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ng); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ngtot); itemp++; } #else itemp=0; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ng); itemp++; Fhdf.write_dataset(opt, datagroupnames.group[itemp], 1, &ngtot); itemp++; #endif } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //declare the attributes in a header group, assiging the group handle, setting the name, no time step indicator, and a flag saying yes to all statistics adios_err=adios_declare_group(&adios_grp_handle,"Header", "" , adios_stat_full); //select simple mpi method adios_select_method (adios_grp_handle, "MPI", "", ""); //define some attributes adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(ThisTask).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(NProcs).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(ng).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(ngtot).c_str(),""); itemp++; ///\todo don't actually know if I should use adios attribute or var to store simple single values } #endif else{ Fout<1) for (Int_t i=2;i<=ng;i++) offset[i]=offset[i-1]+pglist[i-1][numingroup[i-1]]; if (opt.ibinaryout==OUTBINARY) Fout.write((char*)&(offset.data())[1],sizeof(Int_t)*ngroups); #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { for (auto &x:groupdata) x=0; if (ng > 1) for (Int_t i=2;i<=ng;i++) groupdata[i-1]=offset[i]; #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ nids = 0; for (Int_t i=1; i<=ng; i++) nids+=pglist[i][numingroup[i]]; MPI_Allgather(&nids, 1, MPI_Int_t, mpi_ngoffset.data(), 1, MPI_Int_t, mpi_comm_write); ngoffset = 0; if (ThisWriteTask > 0) { for (auto itask = 0; itask < ThisWriteTask; itask++) ngoffset += mpi_ngoffset[itask]; if (ng > 0) for (Int_t i=1; i<=ng; i++) groupdata[i-1] += ngoffset; } } #endif Fhdf.write_dataset(opt, datagroupnames.group[itemp], ng, groupdata.data()); itemp++; } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //don't delcare new group, just add data adios_err=adios_define_var(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],"ng","ngtot","ngmpioffset"); for (Int_t i=1;i<=ng;i++) groupdata[i-1]=offset[i]; adios_err=adios_write(adios_file_handle,datagroupnames.group[itemp].c_str(),groupdata.data()); itemp++; } #endif else for (Int_t i=1;i<=ng;i++) Fout<1) for (Int_t i=2;i<=ng;i++) offset[i]=offset[i-1]+numingroup[i-1]-pglist[i-1][numingroup[i-1]]; if (opt.ibinaryout==OUTBINARY) Fout.write((char*)&(offset.data())[1],sizeof(Int_t)*ngroups); #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { for (auto &x:groupdata) x=0; if (ng > 1) for (Int_t i=2;i<=ng;i++) groupdata[i-1]=offset[i]; #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ nuids = 0; for (Int_t i=1; i<=ng; i++) nuids+=numingroup[i]-pglist[i][numingroup[i]]; MPI_Allgather(&nuids, 1, MPI_Int_t, mpi_ngoffset.data(), 1, MPI_Int_t, mpi_comm_write); ngoffset = 0; if (ThisWriteTask > 0) { for (auto itask = 0; itask < ThisWriteTask; itask++) ngoffset += mpi_ngoffset[itask]; if (ng > 0) for (Int_t i=1; i<=ng; i++) groupdata[i-1] += ngoffset; } } #endif Fhdf.write_dataset(opt, datagroupnames.group[itemp], ng, groupdata.data()); itemp++; } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //don't delcare new group, just add data adios_err=adios_define_var(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],"ng","ngtot","ngmpioffset"); groupdata.resize(ng+1,0); for (Int_t i=1;i<=ng;i++) groupdata[i-1]=offset[i]; adios_err=adios_write(adios_file_handle,datagroupnames.group[itemp].c_str(),groupdata.data()); itemp++; } #endif else for (Int_t i=1;i<=ng;i++) Fout<1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); Fhdf3.create(string(fname3),H5F_ACC_TRUNC, 0, false); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); Fhdf3.create(string(fname3),H5F_ACC_TRUNC, ThisWriteComm, false); } #else Fhdf.create(string(fname)); Fhdf3.create(string(fname3)); #endif } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { adios_err=adios_open(&adios_file_handle, "VELOCIraptor_catalog_particles", fname, "w", MPI_COMM_WORLD); adios_err=adios_open(&adios_file_handle3, "VELOCIraptor_catalog_particles.unbound", fname3, "w", MPI_COMM_WORLD); } #endif else { Fout.open(fname,ios::out); Fout3.open(fname3,ios::out); } //see above regarding unbound particle //for (Int_t i=1;i<=ngroups;i++) nids+=numingroup[i]; nids = nuids = 0; for (Int_t i=1;i<=ngroups;i++) {nids+=pglist[i][numingroup[i]];nuids+=numingroup[i]-pglist[i][numingroup[i]];} #ifdef USEMPI if (NProcs > 1) { MPI_Allreduce(&nids, &nidstot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&nuids, &nuidstot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); } else { nidstot=nids; nuidstot=nuids; } #else nidstot=nids; nuidstot=nuids; #endif //write header if (opt.ibinaryout==OUTBINARY) { Fout.write((char*)&ThisTask,sizeof(int)); Fout.write((char*)&NProcs,sizeof(int)); Fout.write((char*)&nids,sizeof(unsigned long)); Fout.write((char*)&nidstot,sizeof(unsigned long)); Fout3.write((char*)&ThisTask,sizeof(int)); Fout3.write((char*)&NProcs,sizeof(int)); Fout3.write((char*)&nuids,sizeof(unsigned long)); Fout3.write((char*)&nuidstot,sizeof(unsigned long)); } #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ MPI_Allreduce(&nids, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); MPI_Allreduce(&nuids, &nuwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask == 0) { itemp=0; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisWriteComm, -1, -1, false); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisWriteComm, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &NWriteComms, -1, -1, false); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &NWriteComms, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nwritecommtot, -1, -1, false); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuwritecommtot, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nidstot, -1, -1, false); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuidstot, -1, -1, false); itemp++; } else { itemp=4; } Fhdf.close(); Fhdf3.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); Fhdf3.append(string(fname3)); } else{ itemp=0; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisTask); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &NProcs); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nids); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuids); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nidstot); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuidstot); itemp++; } #else itemp=0; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisTask); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &NProcs); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nids); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuids); itemp++; Fhdf.write_dataset(opt, datagroupnames.part[itemp], 1, &nidstot); Fhdf3.write_dataset(opt, datagroupnames.part[itemp], 1, &nuidstot); itemp++; #endif } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //declare the attributes in a header group, assiging the group handle, setting the name, no time step indicator, and a flag saying yes to all statistics adios_err=adios_declare_group(&adios_grp_handle,"Header", "" , adios_stat_full); //select simple mpi method adios_select_method (adios_grp_handle, "MPI", "", ""); //define some attributes adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(ThisTask).c_str(),""); adios_err=adios_define_attribute(adios_grp_handle3,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(ThisTask).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(NProcs).c_str(),""); adios_err=adios_define_attribute(adios_grp_handle3,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(NProcs).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(nids).c_str(),""); adios_err=adios_define_attribute(adios_grp_handle3,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(nuids).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(nidstot).c_str(),""); adios_err=adios_define_attribute(adios_grp_handle3,datagroupnames.group[itemp].c_str(),"",datagroupnames.adiosgroupdatatype[itemp],to_string(nuidstot).c_str(),""); itemp++; ///\todo don't actually know if I should use adios attribute or var to store simple single values } #endif else { Fout< 0) { partdata.resize(nids+1); for (Int_t i=0;i 0) { partdata.resize(nuids+1); for (Int_t i=0;i &Part){ fstream Fout,Fout2; string fname, fname2; ostringstream os, os2; unsigned long long noffset=0,ngtot=0,nids=0,nidstot,nuids=0,nuidstot=0, nwritecommtot=0, nuwritecommtot=0; Int_t *offset; int *typeval; #ifdef USEMPI MPIBuildWriteComm(opt); #endif #ifdef USEHDF H5OutputFile Fhdf,Fhdf2; int itemp; int ival; #endif #if defined(USEHDF)||defined(USEADIOS) DataGroupNames datagroupnames; #endif #ifndef USEMPI int ThisTask=0,NProcs=1; #endif vr::Timer write_timer; os << opt.outname << ".catalog_parttypes"; os2 << opt.outname << ".catalog_parttypes.unbound"; #ifdef USEMPI if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF os<<"."<1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); Fhdf2.create(string(fname2),H5F_ACC_TRUNC, 0, false); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); Fhdf2.create(string(fname2),H5F_ACC_TRUNC, ThisWriteComm, false); } #else //create file Fhdf.create(string(fname)); Fhdf2.create(string(fname2)); #endif } #endif else { Fout.open(fname,ios::out); Fout2.open(fname2,ios::out); } for (Int_t i=1;i<=ngroups;i++) {nids+=pglist[i][numingroup[i]];nuids+=numingroup[i]-pglist[i][numingroup[i]];} #ifdef USEMPI if(NProcs > 1) { MPI_Allreduce(&nids, &nidstot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&nuids, &nuidstot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD); } else { nidstot=nids; nuidstot=nuids; } #else nidstot=nids; nuidstot=nuids; #endif //write header if (opt.ibinaryout==OUTBINARY) { Fout.write((char*)&ThisTask,sizeof(int)); Fout.write((char*)&NProcs,sizeof(int)); Fout.write((char*)&nids,sizeof(Int_t)); Fout.write((char*)&nidstot,sizeof(Int_t)); Fout2.write((char*)&ThisTask,sizeof(int)); Fout2.write((char*)&NProcs,sizeof(int)); Fout2.write((char*)&nuids,sizeof(Int_t)); Fout2.write((char*)&nuidstot,sizeof(Int_t)); } #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ MPI_Allreduce(&nids, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); MPI_Allreduce(&nuids, &nuwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask == 0) { itemp=0; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisWriteComm, -1, -1, false); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisWriteComm, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &NWriteComms, -1, -1, false); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &NWriteComms, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nwritecommtot, -1, -1, false); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuwritecommtot, -1, -1, false); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nidstot, -1, -1, false); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuidstot, -1, -1, false); itemp++; } else { itemp=4; } Fhdf.close(); Fhdf2.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); Fhdf2.append(string(fname2)); } else{ itemp=0; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisTask); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &NProcs); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nids); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuids); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nidstot); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuidstot); itemp++; } #else itemp=0; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisTask); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &ThisTask); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &NProcs); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &NProcs); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nids); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuids); itemp++; Fhdf.write_dataset(opt, datagroupnames.types[itemp], 1, &nidstot); Fhdf2.write_dataset(opt, datagroupnames.types[itemp], 1, &nuidstot); itemp++; #endif } #endif else { Fout< std::vector get_sizes(const std::vector> &vov) { std::vector sizes(vov.size()); std::transform(vov.begin(), vov.end(), sizes.begin(), [](const std::vector &v) { return v.size(); }); return sizes; } template std::size_t get_total_size(const std::vector> &vov) { return std::accumulate(vov.begin(), vov.end(), std::size_t{0}, [](std::size_t s, const std::vector &v) { return s + v.size(); } ); } template std::vector flatten(std::vector> &vov, std::size_t expected_total) { #ifndef NDEBUG assert(expected_total == get_total_size(vov)); #endif std::vector all_values; all_values.reserve(expected_total); for (const auto &v: vov) { all_values.insert(all_values.end(), v.begin(), v.end()); } vov.clear(); vov.shrink_to_fit(); return all_values; }; ///Write the particles in each SO region ///Note that this particle list will not be exclusive ///\todo optimisation memory wise can be implemented by not creating an array ///to store all ids and then copying info from the array of vectors into it. void WriteSOCatalog(Options &opt, const Int_t ngroups, std::vector> &SOpids, std::vector> &SOtypes) { assert(SOpids.size() == ngroups); #if defined(GASON) || defined(STARON) || defined(BHON) assert(SOtypes.size() == ngroups); #else assert(SOtypes.size() == 0); #endif fstream Fout; string fname; ostringstream os; #ifdef USEMPI MPIBuildWriteComm(opt); #endif #ifdef USEHDF H5OutputFile Fhdf; int itemp=0; int ival; #endif #ifdef USEADIOS int adios_err; uint64_t adios_groupsize , adios_totalsize ; int64_t adios_file_handle; int64_t adios_grp_handle; int64_t adios_var_handle; int64_t adios_attr_handle; #endif #if defined(USEHDF)||defined(USEADIOS) DataGroupNames datagroupnames; #endif vr::Timer write_timer; #ifndef USEMPI int ThisTask=0,NProcs=1; #endif auto mpi_total = [&](unsigned long val) { #ifdef USEMPI MPI_Allreduce(MPI_IN_PLACE, &val, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); #endif // USEMPI return val; }; unsigned long ng = ngroups; unsigned long ngtot = mpi_total(ng); unsigned long nSOids = get_total_size(SOpids); unsigned long nSOidstot = mpi_total(nSOids); os << opt.outname <<".catalog_SOlist"; #ifdef USEMPI if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF os<<"."<1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); } #else Fhdf.create(string(fname)); #endif } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //open an adios file adios_err=adios_open(&adios_file_handle, "VELOCIraptor_SOlist", fname, "w", MPI_COMM_WORLD); } #endif else Fout.open(fname,ios::out); //write header if (opt.ibinaryout==OUTBINARY) { Fout.write((char*)&ThisTask,sizeof(int)); Fout.write((char*)&NProcs,sizeof(int)); Fout.write((char*)&ng,sizeof(unsigned long)); Fout.write((char*)&ngtot,sizeof(unsigned long)); Fout.write((char*)&nSOids,sizeof(unsigned long)); Fout.write((char*)&nSOidstot,sizeof(unsigned long)); } #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ unsigned long nwritecommtot = 0, nSOwritecommtot = 0; MPI_Allreduce(&ng, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); MPI_Allreduce(&nSOids, &nSOwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask == 0) { itemp=0; Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &ThisWriteComm, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &NWriteComms, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &nwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &ngtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &nSOwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &nSOidstot, -1, -1, false); } else { itemp=6; } Fhdf.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); } else #endif // USEPARALLELHDF { itemp=0; Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &ngtot); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &nSOids); Fhdf.write_dataset(opt, datagroupnames.SO[itemp++], 1, &nSOidstot); } } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //declare the attributes in a header group, assiging the group handle, setting the name, no time step indicator, and a flag saying yes to all statistics adios_err=adios_declare_group(&adios_grp_handle,"Header", "" , adios_stat_full); //select simple mpi method adios_select_method (adios_grp_handle, "MPI", "", ""); //define some attributes adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(ThisTask).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(NProcs).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(ng).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(ngtot).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(nSOids).c_str(),""); itemp++; adios_err=adios_define_attribute(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],to_string(nSOidstot).c_str(),""); itemp++; ///\todo don't actually know if I should use adios attribute or var to store simple single values } #endif else { Fout<(SOpids); if (ngroups > 0 ) Fout.write((char*)numingroup.data(),sizeof(Int_t)*ngroups); numingroup.clear(); } #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { auto data = get_sizes(SOpids); Fhdf.write_dataset(opt, datagroupnames.SO[itemp], ng, data.data()); itemp++; } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //declare a new group //declare the attributes in a header group, assiging the group handle, setting the name, no time step indicator, and a flag saying yes to all statistics adios_err=adios_declare_group(&adios_grp_handle,"Catalog_Data", "" , adios_stat_full); //select simple mpi method adios_select_method (adios_grp_handle, "MPI", "", ""); //now stage variables (data) //if want to define dimensions can either create a variable that stores the dimensions or store the value as a string. //store local dim adios_err=adios_define_var(adios_grp_handle,"ng","", adios_unsigned_long,0,0,0); //store global dim adios_err=adios_define_var(adios_grp_handle,"ngtot","", adios_unsigned_long,0,0,0); //store mpi offset adios_err=adios_define_var(adios_grp_handle,"ngmpioffset","", adios_unsigned_long,0,0,0); //then define the group actually storing the data. Might be useful to define an offset variable as well for quick access when reading //offset would be the last field in the code below adios_err=adios_define_var(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],"ng","ngtot","ngmpioffset"); adios_err=adios_write(adios_file_handle,"ng",&ng); adios_err=adios_write(adios_file_handle,"ngtot",&ngtot); Int_t mpioffset=0; for (Int_t itask=0;itask(SOpids); adios_err=adios_write(adios_file_handle,datagroupnames.SO[itemp].c_str(),data); itemp++; } #endif else { for (const auto &pids : SOpids) Fout << pids.size() << endl; } //Write offsets vector offsets(ngroups); if (ngroups > 0) { auto sizes = get_sizes(SOpids); std::partial_sum(sizes.begin(), std::next(sizes.end(), -1), std::next(offsets.begin())); } if (opt.ibinaryout==OUTBINARY) Fout.write((char*)offsets.data(),sizeof(Int_t)*ngroups); #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1) { vector mpi_offset(NProcs); MPI_Allgather(&nSOids, 1, MPI_Int_t, mpi_offset.data(), 1, MPI_Int_t, mpi_comm_write); auto task_offset = std::accumulate(mpi_offset.begin(), std::next(mpi_offset.begin(), ThisWriteTask), Int_t{0}); for (auto &offset: offsets) { offset += task_offset; } } #endif Fhdf.write_dataset(opt, datagroupnames.SO[itemp], ng, offsets.data()); itemp++; } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { //don't delcare new group, just add data adios_err=adios_define_var(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],"ng","ngtot","ngmpioffset"); adios_err=adios_write(adios_file_handle,datagroupnames.SO[itemp].c_str(), offsets.data()); delete[] data; itemp++; } #endif else { for (const auto offset: offsets) Fout << offset << endl; } offsets.clear(); if (opt.ibinaryout==OUTBINARY) { if (nSOids>0) { auto ids = flatten(SOpids, nSOids); Fout.write((char*)ids.data(), sizeof(long long) * nSOids); } #if defined(GASON) || defined(STARON) || defined(BHON) if (nSOids>0) { auto types = flatten(SOtypes, nSOids); Fout.write((char*)types.data(), sizeof(int) * nSOids); } #endif } #ifdef USEHDF else if (opt.ibinaryout==OUTHDF) { { auto ids = flatten(SOpids, nSOids); Fhdf.write_dataset(opt, datagroupnames.SO[itemp], nSOids, ids.data()); } #if defined(GASON) || defined(STARON) || defined(BHON) { itemp++; auto types = flatten(SOtypes, nSOids); Fhdf.write_dataset(opt, datagroupnames.SO[itemp], nSOids, types.data()); } #endif } #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) { adios_err=adios_declare_group(&adios_grp_handle,"Particle_Data", "" , adios_stat_full); adios_select_method (adios_grp_handle, "MPI", "", ""); //store local dim adios_err=adios_define_var(adios_grp_handle,"nSOids","", adios_unsigned_long,0,0,0); //store global dim adios_err=adios_define_var(adios_grp_handle,"nSOidstot","", adios_unsigned_long,0,0,0); //store mpi offset adios_err=adios_define_var(adios_grp_handle,"nSOidsmpioffset","", adios_unsigned_long,0,0,0); adios_err=adios_define_var(adios_grp_handle,datagroupnames.SO[itemp].c_str(),"",datagroupnames.adiosSOdatatype[itemp],"nSOids","nSOidstot","nSOidsmpioffset"); adios_err=adios_write(adios_file_handle,"nSOids",&nSOids); adios_err=adios_write(adios_file_handle,"nSOidstot",&nSOidstot); Int_t mpioffset=0; adios_err=adios_write(adios_file_handle,"nSOidsmpioffset",&mpioffset); adios_err=adios_write(adios_file_handle,datagroupnames.SO[itemp].c_str(),idval); #if defined(GASON) || defined(STARON) || defined(BHON) itemp++; adios_err=adios_write(adios_file_handle,datagroupnames.SO[itemp].c_str(),typeval); #endif } #endif else { { auto ids = flatten(SOpids, nSOids); for (const auto id: ids) { Fout << id << endl; } } #if defined(GASON) || defined(STARON) || defined(BHON) { auto types = flatten(SOtypes, nSOids); for (const auto type: types) { Fout << type << endl; } } #endif } if (opt.ibinaryout==OUTASCII || opt.ibinaryout==OUTBINARY) Fout.close(); #ifdef USEHDF if (opt.ibinaryout==OUTHDF) Fhdf.close(); #endif #ifdef USEADIOS else if (opt.ibinaryout==OUTADIOS) adios_err=adios_close(adios_file_handle); #endif #ifdef USEMPI MPIFreeWriteComm(); #endif LOG(info) << "Wrote " << fname << " in " << write_timer; } //@} ///\name Final outputs such as properties and output that can be used to construct merger trees and substructure hierarchy //@{ ///Writes the bulk properties of the substructures ///\todo need to add in 500crit mass and radial output in here and in \ref allvars.h void WriteProperties(Options &opt, const Int_t ngroups, PropData *pdata){ fstream Fout; string fname; ostringstream os; char buf[40]; long unsigned ngtot=0, noffset=0, ng=ngroups, nwritecommtot=0; vr::Timer write_timer; //if need to convert from physical back to comoving if (opt.icomoveunit) { opt.p*=opt.h/opt.a; for (Int_t i=1;i<=ngroups;i++) pdata[i].ConverttoComove(opt); } #ifdef USEMPI MPIBuildWriteComm(opt); #endif #ifdef USEHDF H5OutputFile Fhdf; int itemp=0; int ival; #endif #if defined(USEHDF)||defined(USEADIOS) DataGroupNames datagroupnames; #endif PropDataHeader head(opt); os << opt.outname <<".properties"; #ifdef USEMPI if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF os<<"."< 1) { for (int j=0;j1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); } #else Fhdf.create(string(fname)); #endif itemp=0; #ifdef USEPARALLELHDF if(opt.mpinprocswritesize>1){ MPI_Allreduce(&ng, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); //if parallel HDF then only if (ThisWriteTask==0) { Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ThisWriteComm, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &NWriteComms, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &nwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ngtot, -1, -1, false); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icosmologicalin); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icomoveunit); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.p); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.a); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.lengthtokpc); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.velocitytokms); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.masstosolarmass); #if defined(GASON) || defined(STARON) || defined(BHON) Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.metallicitytosolar); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.SFRtosolarmassperyear); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.stellaragetoyrs); #endif WriteVELOCIraptorConfigToHDF(opt,Fhdf); WriteSimulationInfoToHDF(opt,Fhdf); WriteUnitInfoToHDF(opt,Fhdf); } Fhdf.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); } else{ Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ngtot); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icosmologicalin); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icomoveunit); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.p); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.a); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.lengthtokpc); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.velocitytokms); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.masstosolarmass); #if defined(GASON) || defined(STARON) || defined(BHON) Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.metallicitytosolar); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.SFRtosolarmassperyear); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.stellaragetoyrs); #endif WriteVELOCIraptorConfigToHDF(opt,Fhdf); WriteSimulationInfoToHDF(opt,Fhdf); WriteUnitInfoToHDF(opt,Fhdf); } #else Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.prop[itemp++], 1, &ngtot); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icosmologicalin); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.icomoveunit); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.p); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.a); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.lengthtokpc); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.velocitytokms); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.masstosolarmass); #if defined(GASON) || defined(STARON) || defined(BHON) Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.metallicitytosolar); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.SFRtosolarmassperyear); Fhdf.write_attribute(string("/"), datagroupnames.prop[itemp++], opt.stellaragetoyrs); #endif WriteVELOCIraptorConfigToHDF(opt,Fhdf); WriteSimulationInfoToHDF(opt,Fhdf); WriteUnitInfoToHDF(opt,Fhdf); #endif } #endif else { Fout.open(fname,ios::out); Fout<0) { for (Int_t i=0;i0) { for (Int_t i=0;i0) { for (Int_t i=0;i0) { for (Int_t i=0;i0) { for (Int_t i=0;i0) { for (Int_t i=0;i0){ for (auto j=0;j0) { for (auto &extrafield:opt.gas_internalprop_output_names) { for (Int_t i=0;i0) { for (auto &extrafield:opt.star_internalprop_output_names) { for (Int_t i=0;i0) { for (auto &extrafield:opt.bh_internalprop_output_names) { for (Int_t i=0;i0) { for (auto &extrafield:opt.extra_dm_internalprop_output_names) { for (Int_t i=0;i0){ for (auto j=0;j0){ for (auto k=0;k<3;k++) { for (auto j=0;j0) { for (auto j=0;j0 && opt.iextrahalooutput) { for (auto j=0;j indices(ngroups), haloindices; vr::Timer write_timer; //void pointer to hold data void *data; int itemp=0, nbinsedges = opt.profilenbins+1; int ival; //if need to convert from physical back to comoving if (opt.icomoveunit) { opt.p*=opt.h/opt.a; for (Int_t i=1;i<=ngroups;i++) { if (pdata[i].gNFOF >= opt.profileminFOFsize && pdata[i].num >= opt.profileminsize) { pdata[i].ConvertProfilestoComove(opt); } } } if (opt.iInclusiveHalo>0) { nhalos = 0; haloindices.resize(ngroups); for (auto i=1;i<=ngroups;i++){ if (pdata[i].gNFOF >= opt.profileminFOFsize && pdata[i].num >= opt.profileminsize && pdata[i].hostid == -1) { haloindices[nhalos++] = i; } } #ifdef USEMPI MPI_Allgather(&nhalos, 1, MPI_Int_t, mpi_nhalos, 1, MPI_Int_t, MPI_COMM_WORLD); #endif } #ifdef USEMPI MPIBuildWriteComm(opt); #endif #ifdef USEHDF H5OutputFile Fhdf; vector dims; #endif #if defined(USEHDF)||defined(USEADIOS) DataGroupNames datagroupnames; #endif ProfileDataHeader head(opt); //since profiles can be called for a subset of objects, get the total number to be written if (opt.profileminsize > 0 || opt.profileminFOFsize > 0) { ng = 0; for (auto i=1;i<=ngroups;i++) if (pdata[i].gNFOF >= opt.profileminFOFsize && pdata[i].num >= opt.profileminsize) { indices[ng++] = i; } #ifdef USEMPI MPI_Allgather(&ng, 1, MPI_Int_t, mpi_ngroups, 1, MPI_Int_t, MPI_COMM_WORLD); #endif } else { for (auto i=1;i<=ngroups;i++) indices[i-1] = i; } os << opt.outname << ".profiles"; #ifdef USEMPI if (opt.ibinaryout==OUTHDF) { #ifdef USEPARALLELHDF os<<"."< 1) { for (int j=0;j1){ //if parallel then open file in serial so first task in each i/o communicator writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); MPI_Allreduce(&ng, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); MPI_Allreduce(&nhalos, &nhalowritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask == 0) { itemp=0; Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ThisWriteComm, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &NWriteComms, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ngtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalowritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalostot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, opt.profileradnormstring, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &opt.iInclusiveHalo, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nbinsedges, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], nbinsedges, data, H5T_NATIVE_DOUBLE, -1, false); } else { itemp=10; } Fhdf.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); itemp=0; Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ngtot); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalos); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalostot); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, opt.profileradnormstring); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &opt.iInclusiveHalo); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nbinsedges); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], nbinsedges, data, H5T_NATIVE_DOUBLE); } #else Fhdf.create(string(fname)); itemp=0; Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &ngtot); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalos); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nhalostot); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, opt.profileradnormstring); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &opt.iInclusiveHalo); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], 1, &nbinsedges); Fhdf.write_dataset(opt, datagroupnames.profile[itemp++], nbinsedges, data, H5T_NATIVE_DOUBLE); #endif } #endif else { Fout.open(fname,ios::out); Fout<0){ if (pdata[indices[i]].hostid == -1) ((Double_t*)data)[i]=pdata[indices[i]].gR200c_excl; else ((Double_t*)data)[i]=pdata[indices[i]].gR200c; } else ((Double_t*)data)[i]=pdata[indices[i]].gR200c; } Fhdf.write_dataset(opt, head.headerdatainfo[itemp], ng, data, head.hdfpredtypeinfo[itemp]);itemp++; } //otherwise no normalisation and don't need to write data block ::operator delete(data); //now move onto 2d arrays; data= ::operator new(sizeof(int)*(ng)*(opt.profilenbins)); dims.resize(2);dims[0]=ng;dims[1]=opt.profilenbins; //write all the npart arrays for (Int_t i=0;i0) { data= ::operator new(sizeof(int)*(nhalos)*(opt.profilenbins)); dims.resize(2);dims[0]=nhalos;dims[1]=opt.profilenbins; for (Int_t i=0;i1){ Fhdf.append(string(fname),H5F_ACC_RDWR); } else{ Fhdf.append(string(fname),H5F_ACC_RDWR,ThisWriteComm,false); } #else Fhdf.append(string(fname),H5F_ACC_RDWR); #endif } #endif else Fout.open(fname,ios::out|ios::app); //since the hierarchy file is appended to the catalog_groups files, no header written #ifdef USEMPI if (NProcs > 1) { for (int j=0;j1){ //if parallel then open file in serial so task 0 writes header Fhdf.create(string(fname),H5F_ACC_TRUNC, 0, false); MPI_Allreduce(&ng, &nwritecommtot, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, mpi_comm_write); if (ThisWriteTask == 0) { itemp=0; Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ThisWriteComm, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &NWriteComms, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &nwritecommtot, -1, -1, false); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ngtot, -1, -1, false); } else { itemp = 4; } Fhdf.close(); MPI_Barrier(MPI_COMM_WORLD); //reopen for parallel write Fhdf.append(string(fname)); } else{ Fhdf.create(string(fname),H5F_ACC_TRUNC, ThisWriteComm, false); itemp=0; Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ngtot); } #else Fhdf.create(string(fname)); itemp=0; Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ThisTask); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &NProcs); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ng); Fhdf.write_dataset(opt, datagroupnames.hierarchy[itemp++], 1, &ngtot); #endif } #endif else { Fout<>nbodies; //nbodies=opt.numpart[DARKTYPE]; //for (Int_t i=0;i>temp; for (Int_t i=0;i>pfof[i];if (pfof[i]>ngroup) ngroup=pfof[i];} Fin.close(); LOG(info) << "Done"; return ngroup; } //load binary group fof catalogue Int_t ReadFOFGroupBinary(Options &opt, Int_t nbodies, Int_t *pfof, Int_t *idtoindex, Int_t minid, Particle *p) {//old groupcat format char buf[1024]; int TotNgroups,NFiles,dummy,Ngroups,Nids; int *pids; int *numingroup; fstream Fin; //group tab contains bulk info of groups sprintf(buf, "%s/group_tab_%03d", opt.gname, opt.snum); LOG(info) << "Reading group binary data from " << buf; Fin.open(buf,ios::in|ios::binary); //read group header info (number of groups, number of particles in all groups, total number of groups (in case split across several files) //and number of files Fin.read((char*)&Ngroups,sizeof(int)); Fin.read((char*)&Nids,sizeof(int)); Fin.read((char*)&TotNgroups,sizeof(int)); Fin.read((char*)&NFiles,sizeof(int)); LOG(info) << Ngroups << " fof groups in files"; LOG(info) << Nids << " particles in groups"; numingroup=new int[Ngroups]; //offsets are sum of lengths starting at 0 //ids of particles order according to group with first particle beloing to group 0, read n1 Fin.read((char*)numingroup,sizeof(int)*Ngroups); Fin.close(); //group ids contains actual particle ids in the group sprintf(buf, "%s/group_ids_%03d", opt.gname, opt.snum); Fin.open(buf,ios::in|ios::binary); //reread header info Fin.read((char*)&dummy,sizeof(int)); Fin.read((char*)&dummy,sizeof(int)); Fin.read((char*)&dummy,sizeof(int)); Fin.read((char*)&dummy,sizeof(int)); //read array of particle ids belong to groups pids=new int[Nids]; Fin.read((char*)pids,sizeof(int)*Nids); int offset=0; for (Int_t i=0;i &Part) { return; } #endif #ifdef EXTENDEDHALOOUTPUT /// \name Routines that can be used to output information of a halo subvolume decomposition //@{ ///Writes cell quantites void WriteExtendedOutput (Options &opt, Int_t numgroups, Int_t nbodies, PropData *pdata, Particle *p, Int_t * pfof) { fstream Fout; char fname[1000]; int ngtot = 0; int noffset = 0; #ifdef USEMPI for (int j = 0; j < NProcs; j++) ngtot += mpi_ngroups[j]; for (int j = 0; j < ThisTask; j++) noffset += mpi_ngroups[j]; #else int ThisTask = 0; int NProcs = 1; ngtot = numgroups; #endif LOG(info) << "numgroups " << numgroups; numgroups++; Int_t * nfilespergroup = new Int_t [numgroups]; Int_t ** filesofgroup = new Int_t * [numgroups]; Int_t * ntaskspergroup = new Int_t [numgroups]; Int_t ** tasksofgroup = new Int_t * [numgroups]; // First send to Task 0, Groups and number of files over which the group is distributed // from original reading // Groups id have already the offset Int_t ** npartsofgroupinfile = new Int_t * [numgroups]; Int_t ** npartsofgroupintask = new Int_t * [numgroups]; #ifdef USEMPI Int_t ntosendtotask [NProcs]; Int_t ntorecievefromtask [NProcs]; for (Int_t i = 0; i < NProcs; i++) { ntosendtotask[i] = 0; ntorecievefromtask[i] = 0; } #endif // Initialize arrays for (Int_t i = 0; i < numgroups; i++) { npartsofgroupinfile[i] = new Int_t [opt.num_files]; #ifdef USEMPI npartsofgroupintask[i] = new Int_t [NProcs]; ntaskspergroup[i] = 0; for(Int_t j = 0; j < NProcs; j++) npartsofgroupintask[i][j] = 0; #endif nfilespergroup[i] = 0; for(Int_t j = 0; j < opt.num_files; j++) npartsofgroupinfile[i][j] = 0; } // Set IdStruct IdTopHost and fill arrays for (Int_t i = 0; i < nbodies; i++) { if (pfof[i] == 0) { p[i].SetIdTopHost(0); p[i].SetIdHost(0); p[i].SetIdStruct(0); } else { p[i].SetIdStruct (pdata[pfof[i]].haloid); if (pdata[pfof[i]].hostfofid == 0) p[i].SetIdTopHost (pfof[i] + noffset); else p[i].SetIdTopHost (pdata[pfof[i]].hostfofid); if (pdata[pfof[i]].hostid < 0) p[i].SetIdHost (pfof[i] + noffset); else p[i].SetIdHost (pdata[pfof[i]].hostid); } npartsofgroupinfile[pfof[i]][p[i].GetOFile()]++; #ifdef USEMPI npartsofgroupintask[pfof[i]][p[i].GetOTask()]++; #endif } for (Int_t i = 0; i < numgroups; i++) { #ifdef USEMPI for(Int_t j = 0; j < NProcs; j++) if(npartsofgroupintask[i][j] > 0) ntaskspergroup[i]++; #endif for(Int_t j = 0; j < opt.num_files; j++) if(npartsofgroupinfile[i][j] > 0) nfilespergroup[i]++; } // Shorten arrays for (Int_t i = 0; i < numgroups; i++) { Int_t k = 0; #ifdef USEMPI tasksofgroup[i] = new Int_t [ntaskspergroup[i]]; for(Int_t j = 0; j < NProcs; j++) { if(npartsofgroupintask[i][j] > 0) { tasksofgroup[i][k] = j; ntosendtotask[j] += npartsofgroupintask[i][j]; k++; } } k = 0; #endif filesofgroup[i] = new Int_t [nfilespergroup[i]]; for(Int_t j = 0; j < opt.num_files; j++) { if(npartsofgroupinfile[i][j] > 0) { filesofgroup[i][k] = j; k++; } } delete [] npartsofgroupinfile[i]; #ifdef USEMPI delete [] npartsofgroupintask[i]; #endif } delete [] npartsofgroupinfile; #ifdef USEMPI delete [] npartsofgroupintask; #endif // Now nfilespergroup has the number of files over which the group is distributed and // filesofgroup has the id of the files // // Write FilesOfGroup File int myturn = 0; #ifdef USEMPI MPI_Status status; MPI_Request rqst; if (ThisTask == 0) { #endif myturn = 1; char fog [1000]; sprintf (fog, "%s.filesofgroup", opt.outname); Fout.open (fog, ios::out); for (Int_t i = 1; i < numgroups; i++) { Fout << pdata[i].haloid << " " << nfilespergroup[i] << endl; for (Int_t j = 0; j < nfilespergroup[i]; j++) Fout << filesofgroup[i][j] << " "; Fout << endl; } Fout.close(); #ifdef USEMPI if (NProcs > 1) MPI_Isend (&myturn, 1, MPI_INT, ThisTask+1, ThisTask, MPI_COMM_WORLD, &rqst); } else { MPI_Recv (&myturn, 1, MPI_INT, ThisTask-1, ThisTask-1, MPI_COMM_WORLD, &status); ofstream fout; char fog[1000]; sprintf (fog, "%s.filesofgroup", opt.outname); fout.open (fog, ios::app); for (Int_t i = 1; i < numgroups; i++) { fout << pdata[i].haloid << " " << nfilespergroup[i] << endl; for (Int_t j = 0; j < nfilespergroup[i]; j++)fout << filesofgroup[i][j] << " "; fout << endl; } fout.close(); if (ThisTask != NProcs-1) MPI_Isend (&myturn, 1, MPI_INT, ThisTask+1, ThisTask, MPI_COMM_WORLD, &rqst); } delete [] filesofgroup; delete [] tasksofgroup; delete [] ntaskspergroup; delete [] nfilespergroup; LOG(debug) << "filesofgroup written"; // Send and Particles before writing Extended Files // Communicate to all other processors how many particles are going to be sent ntosendtotask[ThisTask] = 0; for (Int_t i = 1; i < NProcs; i++) { int src = (ThisTask + NProcs - i) % NProcs; int dst = (ThisTask + i) % NProcs; MPI_Isend (&ntosendtotask[dst], 1, MPI_INT, dst, ThisTask, MPI_COMM_WORLD, &rqst); MPI_Recv (&ntorecievefromtask[src], 1, MPI_INT, src, src, MPI_COMM_WORLD, &status); } // Declare and allocate Particle arrays for sending and receiving Particle ** PartsToSend = new Particle * [NProcs]; Particle ** PartsToRecv = new Particle * [NProcs]; int * count = new int [NProcs]; for (Int_t i = 0; i < NProcs; i++) { count[i] = 0; PartsToSend[i] = new Particle [ntosendtotask[i]+1]; PartsToRecv[i] = new Particle [ntorecievefromtask[i]+1]; } // Copy Particles to send for (Int_t i = 0; i < nbodies; i++) if (p[i].GetOTask() != ThisTask) PartsToSend[p[i].GetOTask()][count[p[i].GetOTask()]++] = Particle(p[i]); //determine if number of particles can fit into a single send int bufferFlag = 1; long int maxNumPart = LOCAL_MAX_MSGSIZE / (long int) sizeof(Particle); int localMax = 0; int globalMax = 0; //find max local send and global send for (Int_t i = 0; i < NProcs; i++) if (ntosendtotask[i] > localMax) localMax = ntosendtotask[i]; MPI_Allreduce (&localMax, &globalMax, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); if (globalMax >= maxNumPart) bufferFlag = 1; // Send and Receive Particles //if splitting sends into chunks if (bufferFlag) { int numBuffersToSend [NProcs]; int numBuffersToRecv [NProcs]; int numPartInBuffer = maxNumPart; for (int jj = 0; jj < NProcs; jj++) { numBuffersToSend[jj] = 0; numBuffersToRecv[jj] = 0; if (ntosendtotask[jj] > 0) numBuffersToSend[jj] = (ntosendtotask[jj]/numPartInBuffer) + 1; } //broadcast numbers sent for (int i = 1; i < NProcs; i++) { int src = (ThisTask + NProcs - i) % NProcs; int dst = (ThisTask + i) % NProcs; MPI_Isend (&numBuffersToSend[dst], 1, MPI_INT, dst, 0, MPI_COMM_WORLD, &rqst); MPI_Recv (&numBuffersToRecv[src], 1, MPI_INT, src, 0, MPI_COMM_WORLD, &status); } MPI_Barrier (MPI_COMM_WORLD); //for each mpi thread send info as necessary for (int i = 1; i < NProcs; i++) { int src = (ThisTask + NProcs - i) % NProcs; int dst = (ThisTask + i) % NProcs; Int_t size = numPartInBuffer; int buffOffset = 0; //send buffers for (int jj = 0; jj < numBuffersToSend[dst]-1; jj++) { MPI_Isend (&size, 1, MPI_Int_t, dst, (int)(jj+1), MPI_COMM_WORLD, &rqst); MPI_Isend (&PartsToSend[dst][buffOffset], sizeof(Particle)*size, MPI_BYTE, dst, (int)(10000+jj+1), MPI_COMM_WORLD, &rqst); } //and if anything is remaining size = ntosendtotask[dst] % numPartInBuffer; if (size > 0 && numBuffersToSend[dst] > 0) { MPI_Isend (&size, 1, MPI_Int_t, dst, (int)(numBuffersToSend[dst]), MPI_COMM_WORLD, &rqst); MPI_Isend (&PartsToSend[dst][buffOffset], sizeof(Particle)*size, MPI_BYTE, dst, (int)(10000+numBuffersToSend[dst]), MPI_COMM_WORLD, &rqst); } // Receive Buffers buffOffset = 0; for (int jj = 0; jj < numBuffersToRecv[src]; jj++) { Int_t numInBuffer = 0; MPI_Recv (&numInBuffer, 1, MPI_Int_t, src, (int)(jj+1), MPI_COMM_WORLD, &status); MPI_Recv (&PartsToRecv[src][buffOffset], sizeof(Particle)*numInBuffer, MPI_BYTE, src, (int)(10000+jj+1), MPI_COMM_WORLD, &status); buffOffset += numInBuffer; } } } else { for (Int_t i = 1; i < NProcs; i++) { int src = (ThisTask + NProcs - i) % NProcs; int dst = (ThisTask + i) % NProcs; MPI_Isend (PartsToSend[dst], ntosendtotask[dst]*sizeof(Particle), MPI_BYTE, dst, ThisTask, MPI_COMM_WORLD, &rqst); MPI_Recv (PartsToRecv[src], ntorecievefromtask[src]*sizeof(Particle), MPI_BYTE, src, src, MPI_COMM_WORLD, &status); } } #endif // Organize particles in files for the ExtendedOutput // First determine which files ThisTask should write int npartthistask = 0; int npartperfile[opt.num_files]; for (Int_t i = 0; i < opt.num_files; i++) npartperfile[i] = 0; // Loop over particles to get how many go to each file for (Int_t i = 0; i < nbodies; i++) if (p[i].GetOTask() == ThisTask) { npartthistask++; npartperfile[p[i].GetOFile()]++; } #ifdef USEMPI for (Int_t i = 0; i < NProcs; i++) for (Int_t j = 0; j < ntorecievefromtask[i]; j++) if (PartsToRecv[i][j].GetOTask() == ThisTask) { npartthistask++; npartperfile[PartsToRecv[i][j].GetOFile()]++; } #endif // Allocate and fill arrays of particle, halo, host, and igm ids // Here I am assuming that we have n particles with indexes rangin from 0 to n-1 int ** Id, ** IdStruct, ** IdTopHost, ** IdHost; Id = new int * [opt.num_files]; IdStruct = new int * [opt.num_files]; IdTopHost = new int * [opt.num_files]; IdHost = new int * [opt.num_files]; for (Int_t i = 0; i < opt.num_files; i++) if (npartperfile[i] > 0) { Id[i] = new int [npartperfile[i]]; IdStruct[i] = new int [npartperfile[i]]; IdTopHost[i] = new int [npartperfile[i]]; IdHost[i] = new int [npartperfile[i]]; } for (Int_t i = 0; i < nbodies; i++) { if (p[i].GetOTask() == ThisTask) { Id [p[i].GetOFile()][p[i].GetOIndex()] = p[i].GetPID(); IdStruct [p[i].GetOFile()][p[i].GetOIndex()] = p[i].GetIdStruct(); IdHost [p[i].GetOFile()][p[i].GetOIndex()] = p[i].GetIdHost(); IdTopHost [p[i].GetOFile()][p[i].GetOIndex()] = p[i].GetIdTopHost(); } } #ifdef USEMPI for (Int_t i = 0; i < NProcs; i++) for (Int_t j = 0; j < ntorecievefromtask[i]; j++) if (PartsToRecv[i][j].GetOTask() == ThisTask) { Id [PartsToRecv[i][j].GetOFile()][PartsToRecv[i][j].GetOIndex()] = PartsToRecv[i][j].GetPID(); IdStruct [PartsToRecv[i][j].GetOFile()][PartsToRecv[i][j].GetOIndex()] = PartsToRecv[i][j].GetIdStruct(); IdHost [PartsToRecv[i][j].GetOFile()][PartsToRecv[i][j].GetOIndex()] = PartsToRecv[i][j].GetIdHost(); IdTopHost [PartsToRecv[i][j].GetOFile()][PartsToRecv[i][j].GetOIndex()] = PartsToRecv[i][j].GetIdTopHost(); } #endif // Write ExtendedFiles for (Int_t i = 0; i < opt.num_files; i++) if (npartperfile[i] > 0) { sprintf (fname,"%s.extended.%d",opt.outname,i); Fout.open (fname,ios::out); for (Int_t j = 0; j < npartperfile[i]; j++) { Fout << setw(12) << Id[i][j] << " "; Fout << setw(7) << IdStruct[i][j] << " "; Fout << setw(7) << IdHost[i][j] << " "; Fout << setw(7) << IdTopHost[i][j] << " "; Fout << endl; } Fout.close(); } #ifdef USEMPI MPI_Barrier (MPI_COMM_WORLD); #endif LOG(info) << "Finished writing extended output"; } //@} #endif