/*! \file ramsesio.cxx * \brief this file contains routines for ramses snapshot file io * * \todo need to check if amr file quantity ngrid_current is actually the number of cells in the file as * an example fortran code I have been given seems to also use the ngridlevel array, which stores the number of cells * at a given resolution level. * \todo need to add in ability for multiple read threads and sends between read threads * * * Edited by: Rodrigo Ca\~nas * rodrigo.canas@icrar.org * * Last edited: 7 - Jun - 2017 * * */ //-- RAMSES SPECIFIC IO #include "stf.h" #include "io.h" #include "ramsesitems.h" #include "endianutils.h" int RAMSES_fortran_read(fstream &F, int &i){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); F.read((char*)&i,sizeof(int)); byteoffset+=dummy; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); return byteoffset; } int RAMSES_fortran_read(fstream &F, RAMSESFLOAT &f){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); F.read((char*)&f,sizeof(RAMSESFLOAT)); byteoffset+=dummy; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); return byteoffset; } int RAMSES_fortran_read(fstream &F, int *i){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); F.read((char*)i,dummy); byteoffset+=dummy; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); return byteoffset; } int RAMSES_fortran_read(fstream &F, unsigned int *i){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy)); byteoffset += sizeof(int); F.read((char*)i,dummy); byteoffset += dummy; F.read((char*)&dummy, sizeof(dummy)); byteoffset += sizeof(int); return byteoffset; } int RAMSES_fortran_read(fstream &F, long long *i){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); F.read((char*)i,dummy); byteoffset+=dummy; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); return byteoffset; } int RAMSES_fortran_read(fstream &F, RAMSESFLOAT *f){ int dummy,byteoffset=0; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); F.read((char*)f,dummy); byteoffset+=dummy; F.read((char*)&dummy, sizeof(dummy));byteoffset+=sizeof(int); return byteoffset; } int RAMSES_fortran_skip(fstream &F, int nskips){ int dummy,byteoffset=0; for (int i=0;i>stringbuf>>stringbuf>>opt.num_files; getline(Finfo,stringbuf);//ndim getline(Finfo,stringbuf);//lmin getline(Finfo,stringbuf);//lmax getline(Finfo,stringbuf);//ngridmax getline(Finfo,stringbuf);//nstep getline(Finfo,stringbuf);//blank getline(Finfo,stringbuf);//box getline(Finfo,stringbuf);//time getline(Finfo,stringbuf);//a getline(Finfo,stringbuf);//hubble Finfo>>stringbuf>>stringbuf>>OmegaM; getline(Finfo,stringbuf); getline(Finfo,stringbuf); getline(Finfo,stringbuf); Finfo>>stringbuf>>stringbuf>>OmegaB; Finfo.close(); dmp_mass = 1.0 / (opt.Neff*opt.Neff*opt.Neff) * (OmegaM - OmegaB) / OmegaM; //now particle info for (i=0;i &Part, const Int_t nbodies, Particle *&Pbaryons, Int_t nbaryons) { char buf[2000],buf1[2000],buf2[2000]; string stringbuf,orderingstring; fstream Finfo; fstream *Famr; fstream *Fhydro; fstream *Fpart, *Fpartvel,*Fpartid,*Fpartmass, *Fpartlevel, *Fpartage, *Fpartmet; RAMSES_Header *header; int intbuff[NRAMSESTYPE]; long long longbuff[NRAMSESTYPE]; int i,j,k,n,idim,ivar,igrid,ireaderror=0; Int_t count2,bcount2; //IntType inttype; int dummy,byteoffset; Double_t MP_DM=MAXVALUE,LN,N_DM,MP_B=0; double z,aadjust,Hubble,Hubbleflow; Double_t mscale,lscale,lvscale,rhoscale; Double_t mtemp,utemp,rhotemp,Ztemp,Ttemp; Coordinate xpos,vpos; RAMSESFLOAT xtemp[3],vtemp[3]; RAMSESIDTYPE idval; int typeval; RAMSESFLOAT ageval,metval; int *ngridlevel,*ngridbound,*ngridfile; int lmin=1000000,lmax=0; double dmp_mass; int ninputoffset = 0; int ifirstfile=0,ibuf=0; std::vector ireadfile; Int_t ibufindex; int *ireadtask,*readtaskID; #ifndef USEMPI int ThisTask=0,NProcs=1; ireadfile = std::vector(opt.num_files, 1); #else MPI_Bcast (&(opt.num_files), sizeof(opt.num_files), MPI_BYTE, 0, MPI_COMM_WORLD); MPI_Barrier (MPI_COMM_WORLD); #endif ///\todo because of the stupid fortran format, easier if chunksize is BIG so that ///number of particles local to a file are smaller Int_t chunksize=RAMSESCHUNKSIZE,nchunk; RAMSESFLOAT *xtempchunk, *vtempchunk, *mtempchunk, *sphtempchunk, *agetempchunk, *mettempchunk, *hydrotempchunk; RAMSESIDTYPE *idvalchunk, *levelchunk; int *icellchunk; Famr = new fstream[opt.num_files]; Fhydro = new fstream[opt.num_files]; Fpart = new fstream[opt.num_files]; Fpartvel = new fstream[opt.num_files]; Fpartmass = new fstream[opt.num_files]; Fpartid = new fstream[opt.num_files]; Fpartlevel = new fstream[opt.num_files]; Fpartage = new fstream[opt.num_files]; Fpartmet = new fstream[opt.num_files]; header = new RAMSES_Header[opt.num_files]; Particle *Pbuf; #ifdef USEMPI MPI_Status status; MPI_Comm mpi_comm_read; vector *Preadbuf; //for parallel io Int_t BufSize=opt.mpiparticlebufsize; Int_t Nlocalbuf,*Nbuf, *Nreadbuf,*nreadoffset; Int_t *Nlocalthreadbuf,Nlocaltotalbuf; int *irecv, sendTask,recvTask,irecvflag, *mpi_irecvflag; MPI_Request *mpi_request; Int_t inreadsend,totreadsend; Int_t *mpi_nsend_baryon; Int_t *mpi_nsend_readthread; Int_t *mpi_nsend_readthread_baryon; if (opt.iBaryonSearch) mpi_nsend_baryon=new Int_t[NProcs*NProcs]; if (opt.nsnapread>1) { mpi_nsend_readthread=new Int_t[opt.nsnapread*opt.nsnapread]; if (opt.iBaryonSearch) mpi_nsend_readthread_baryon=new Int_t[opt.nsnapread*opt.nsnapread]; } Nbuf=new Int_t[NProcs]; nreadoffset=new Int_t[opt.nsnapread]; ireadtask=new int[NProcs]; readtaskID=new int[opt.nsnapread]; MPIDistributeReadTasks(opt,ireadtask,readtaskID); MPI_Comm_split(MPI_COMM_WORLD, (ireadtask[ThisTask]>=0), ThisTask, &mpi_comm_read); if (ireadtask[ThisTask]>=0) { //to temporarily store data Pbuf=new Particle[BufSize*NProcs]; Nreadbuf=new Int_t[opt.nsnapread]; for (int j=0;j1){ Preadbuf=new vector[opt.nsnapread]; for (int j=0;j(opt.num_files); ifirstfile=MPISetFilesRead(opt,ireadfile,ireadtask); inreadsend=0; for (int j=0;j=0) { #endif //first read cosmological information sprintf(buf1,"%s/info_%s.txt",opt.fname,opt.ramsessnapname); Finfo.open(buf1, ios::in); getline(Finfo,stringbuf);//nfiles getline(Finfo,stringbuf);//ndim Finfo>>stringbuf>>stringbuf>>header[ifirstfile].levelmin; getline(Finfo,stringbuf);//lmax getline(Finfo,stringbuf);//ngridmax getline(Finfo,stringbuf);//nstep getline(Finfo,stringbuf);//blank Finfo>>stringbuf>>stringbuf>>header[ifirstfile].BoxSize; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].time; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].aexp; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].HubbleParam; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].Omegam; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].OmegaLambda; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].Omegak; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].Omegab; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].scale_l; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].scale_d; Finfo>>stringbuf>>stringbuf>>header[ifirstfile].scale_t; //convert boxsize to comoving kpc/h header[ifirstfile].BoxSize*=header[ifirstfile].scale_l/3.086e21/header[ifirstfile].aexp*header[ifirstfile].HubbleParam/100.0; getline(Finfo,stringbuf); Finfo>>stringbuf>>orderingstring; getline(Finfo,stringbuf); Finfo.close(); opt.p = header[ifirstfile].BoxSize; opt.a = header[ifirstfile].aexp; opt.Omega_m = header[ifirstfile].Omegam; opt.Omega_Lambda = header[ifirstfile].OmegaLambda; opt.Omega_b = header[ifirstfile].Omegab; opt.h = header[ifirstfile].HubbleParam/100.0; opt.Omega_cdm = opt.Omega_m-opt.Omega_b; //set hubble unit to km/s/kpc opt.H = 0.1; //set Gravity to value for kpc (km/s)^2 / solar mass opt.G = 4.30211349e-6; //and for now fix the units opt.lengthtokpc=opt.velocitytokms=opt.masstosolarmass=1.0; //Hubble flow if (opt.comove) aadjust=1.0; else aadjust=opt.a; CalcOmegak(opt); Hubble=GetHubble(opt, aadjust); CalcCriticalDensity(opt, aadjust); CalcBackgroundDensity(opt, aadjust); CalcVirBN98(opt,aadjust); //if opt.virlevel<0, then use virial overdensity based on Bryan and Norman 1997 virialization level is given by if (opt.virlevel<0) opt.virlevel=opt.virBN98; PrintCosmology(opt); //adjust length scale so that convert from 0 to 1 (box units) to kpc comoving //to scale mpi domains correctly need to store in opt.lengthinputconversion the box size in comoving little h value //opt.lengthinputconversion= opt.p*opt.h/opt.a; opt.lengthinputconversion = header[ifirstfile].BoxSize; //adjust velocity scale to that ramses is converted to km/s from which you can convert again; opt.velocityinputconversion = header[ifirstfile].scale_l/header[ifirstfile].scale_t*1e-5; //convert mass from code units to Solar Masses mscale = header[ifirstfile].scale_d * (header[ifirstfile].scale_l * header[ifirstfile].scale_l * header[ifirstfile].scale_l) / 1.988e+33; //convert length from code units to kpc (this lscale includes the physical box size) lscale = header[ifirstfile].scale_l/3.086e21; //convert velocity from code units to km/s lvscale = header[ifirstfile].scale_l/header[ifirstfile].scale_t*1e-5; //convert density to Msun/kpc^3 rhoscale = mscale/(lscale*lscale*lscale); //ignore hubble flow Hubbleflow=0.; //for (int j=0;j=0) { #endif dmp_mass = 1.0 / (opt.Neff*opt.Neff*opt.Neff) * opt.Omega_cdm / opt.Omega_m; #ifdef USEMPI } MPI_Bcast (&dmp_mass, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); #endif //if not only gas being searched open particle data count2=bcount2=0; if (opt.partsearchtype!=PSTGAS) { #ifdef USEMPI if (ireadtask[ThisTask]>=0) { inreadsend=0; #endif //read particle files consists of positions,velocities, mass, id, and level (along with ages and met if some flags set) for (i=0;i 1e-5 && (agetempchunk[nn] == 0.0)) { // GHOST PARTIRCLE!!! } else { xtemp[0] = xtempchunk[nn]; xtemp[1] = xtempchunk[nn+nchunk]; xtemp[2] = xtempchunk[nn+2*nchunk]; vtemp[0] = vtempchunk[nn]; vtemp[1] = vtempchunk[nn+nchunk]; vtemp[2] = vtempchunk[nn+2*nchunk]; idval = idvalchunk[nn]; ///Need to check this for correct 'endianness' // for (int kk=0;kk<3;kk++) {xtemp[kk]=LittleRAMSESFLOAT(xtemp[kk]);vtemp[kk]=LittleRAMSESFLOAT(vtemp[kk]);} #ifndef NOMASS mtemp=mtempchunk[nn]; #else mtemp=1.0; #endif ageval = agetempchunk[nn]; if (fabs((mtemp-dmp_mass)/dmp_mass) < 1e-5) typeval = DARKTYPE; else typeval = STARTYPE; /* if (ageval==0 && idval>0) typeval=DARKTYPE; else if (idval>0) typeval=STARTYPE; else typeval=BHTYPE; */ #ifdef USEMPI //determine processor this particle belongs on based on its spatial position ibuf=MPIGetParticlesProcessor(opt, xtemp[0],xtemp[1],xtemp[2]); ibufindex=ibuf*BufSize+Nbuf[ibuf]; #endif //reset hydro quantities of buffer #ifdef USEMPI #ifdef GASON Pbuf[ibufindex].SetU(0); #ifdef STARON Pbuf[ibufindex].SetSFR(0); Pbuf[ibufindex].SetZmet(0); #endif #endif #ifdef STARON Pbuf[ibufindex].SetZmet(0); Pbuf[ibufindex].SetTage(0); #endif #ifdef BHON #endif #endif if (opt.partsearchtype==PSTALL) { #ifdef USEMPI Pbuf[ibufindex]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,typeval); Pbuf[ibufindex].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Part[ibufindex].SetInputFileID(i); Part[ibufindex].SetInputIndexInFile(nn+ninputoffset); } #endif Nbuf[ibuf]++; MPIAddParticletoAppropriateBuffer(opt, ibuf, ibufindex, ireadtask, BufSize, Nbuf, Pbuf, Nlocal, Part.data(), Nreadbuf, Preadbuf); #else Part[count2]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,typeval); Part[count2].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Part[count2].SetInputFileID(i); Part[count2].SetInputIndexInFile(nn+ninputoffset); } #endif #endif count2++; } else if (opt.partsearchtype==PSTDARK) { if (!(typeval==STARTYPE||typeval==BHTYPE)) { #ifdef USEMPI Pbuf[ibufindex]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,DARKTYPE); Pbuf[ibufindex].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Pbuf[ibufindex].SetInputFileID(i); Pbuf[ibufindex].SetInputIndexInFile(nn+ninputoffset); } #endif //ensure that store number of particles to be sent to other reading threads Nbuf[ibuf]++; MPIAddParticletoAppropriateBuffer(opt, ibuf, ibufindex, ireadtask, BufSize, Nbuf, Pbuf, Nlocal, Part.data(), Nreadbuf, Preadbuf); #else Part[count2]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,typeval); Part[count2].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Part[count2].SetInputFileID(i); Part[count2].SetInputIndexInFile(nn+ninputoffset); } #endif #endif count2++; } else if (opt.iBaryonSearch) { #ifdef USEMPI Pbuf[ibufindex]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2); Pbuf[ibufindex].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Pbuf[ibufindex].SetInputFileID(i); Pbuf[ibufindex].SetInputIndexInFile(nn+ninputoffset); } #endif if (typeval==STARTYPE) Pbuf[ibufindex].SetType(STARTYPE); else if (typeval==BHTYPE) Pbuf[ibufindex].SetType(BHTYPE); //ensure that store number of particles to be sent to the reading threads Nbuf[ibuf]++; if (ibuf==ThisTask) { if (k==RAMSESSTARTYPE) Nlocalbaryon[2]++; else if (k==RAMSESSINKTYPE) Nlocalbaryon[3]++; } MPIAddParticletoAppropriateBuffer(opt, ibuf, ibufindex, ireadtask, BufSize, Nbuf, Pbuf, Nlocalbaryon[0], Pbaryons, Nreadbuf, Preadbuf); #else Pbaryons[bcount2]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,typeval); Pbaryons[bcount2].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Part[bcount2].SetInputFileID(i); Part[bcount2].SetInputIndexInFile(nn+ninputoffset); } #endif #endif bcount2++; } } else if (opt.partsearchtype==PSTSTAR) { if (typeval==STARTYPE) { #ifdef USEMPI //if using MPI, determine proccessor and place in ibuf, store particle in particle buffer and if buffer full, broadcast data //unless ibuf is 0, then just store locally Pbuf[ibufindex]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,STARTYPE); //ensure that store number of particles to be sent to the reading threads Pbuf[ibufindex].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Pbuf[ibufindex].SetInputFileID(i); Pbuf[ibufindex].SetInputIndexInFile(nn+ninputoffset); } #endif Nbuf[ibuf]++; MPIAddParticletoAppropriateBuffer(opt, ibuf, ibufindex, ireadtask, BufSize, Nbuf, Pbuf, Nlocal, Part.data(), Nreadbuf, Preadbuf); #else Part[count2]=Particle(mtemp*mscale, xtemp[0]*lscale,xtemp[1]*lscale,xtemp[2]*lscale, vtemp[0]*opt.velocityinputconversion+Hubbleflow*xtemp[0], vtemp[1]*opt.velocityinputconversion+Hubbleflow*xtemp[1], vtemp[2]*opt.velocityinputconversion+Hubbleflow*xtemp[2], count2,typeval); Part[count2].SetPID(idval); #ifdef EXTRAINPUTINFO if (opt.iextendedoutput) { Part[count2].SetInputFileID(i); Part[count2].SetInputIndexInFile(nn+ninputoffset); } #endif #endif count2++; } } }//end of ghost particle check }//end of loop over chunk delete[] xtempchunk; delete[] vtempchunk; delete[] mtempchunk; delete[] idvalchunk; delete[] agetempchunk; delete[] levelchunk; delete[] mettempchunk; Fpart[i].close(); Fpartvel[i].close(); Fpartmass[i].close(); Fpartid[i].close(); Fpartlevel[i].close(); Fpartage[i].close(); Fpartmet[i].close(); #ifdef USEMPI //send information between read threads if (opt.nsnapread>1&&inreadsend0) { MPI_Ssend(&Pbuf[ibuf*BufSize], sizeof(Particle)*Nbuf[ibuf], MPI_BYTE, ibuf, ibuf, MPI_COMM_WORLD); Nbuf[ibuf]=0; //last broadcast with Nbuf[ibuf]=0 so that receiver knows no more particles are to be broadcast MPI_Ssend(&Nbuf[ibuf],1,MPI_Int_t,ibuf,ibuf+NProcs,MPI_COMM_WORLD); } } if (opt.nsnapread>1){ MPI_Allgather(Nreadbuf, opt.nsnapread, MPI_Int_t, mpi_nsend_readthread, opt.nsnapread, MPI_Int_t, mpi_comm_read); MPISendParticlesBetweenReadThreads(opt, Preadbuf, Part.data(), ireadtask, readtaskID, Pbaryons, mpi_comm_read, mpi_nsend_readthread, mpi_nsend_readthread_baryon); } }//end of ireadtask[ibuf]>0 #endif #ifdef USEMPI else { MPIReceiveParticlesFromReadThreads(opt,Pbuf,Part.data(),readtaskID, irecv, mpi_irecvflag, Nlocalthreadbuf, mpi_request,Pbaryons); } #endif } //if gas searched in some fashion then load amr/hydro data if (opt.partsearchtype==PSTGAS||opt.partsearchtype==PSTALL||(opt.partsearchtype==PSTDARK&&opt.iBaryonSearch)) { #ifdef USEMPI if (ireadtask[ThisTask]>=0) { inreadsend=0; #endif for (i=0;iheader[i].nlevelmax) lmin=header[i].nlevelmax; if (lmax0 then need two skip twice then read ngridbound if(header[i].nboundary>0) { ngridbound=new int[header[i].nboundary*header[i].nlevelmax]; RAMSES_fortran_skip(Famr[i]); RAMSES_fortran_skip(Famr[i]); //ngridbound is an array of some sort but I don't see what it is used for RAMSES_fortran_read(Famr[i],ngridbound); for (j=0;j0) { xtempchunk=new RAMSESFLOAT[3*chunksize]; //store son value in icell icellchunk=new int[header[i].twotondim*chunksize]; //skip grid index, next index and prev index. RAMSES_fortran_skip(Famr[i],3); //now read grid centre for (idim=0;idim0) { hydrotempchunk=new RAMSESFLOAT[chunksize*header[i].twotondim*header[i].nvarh]; //first read velocities (for 2 cells per number of dimensions (ie: cell corners?)) for (idim=0;idim0) { delete[] xtempchunk; delete[] hydrotempchunk; } } } Famr[i].close(); #ifdef USEMPI //send information between read threads if (opt.nsnapread>1&&inreadsend0) { MPI_Ssend(&Pbuf[ibuf*BufSize], sizeof(Particle)*Nbuf[ibuf], MPI_BYTE, ibuf, ibuf, MPI_COMM_WORLD); MPISendHydroInfoFromReadThreads(opt, Nbuf[ibuf], &Pbuf[ibuf*BufSize], ibuf); MPISendStarInfoFromReadThreads(opt, Nbuf[ibuf], &Pbuf[ibuf*BufSize], ibuf); MPISendBHInfoFromReadThreads(opt, Nbuf[ibuf], &Pbuf[ibuf*BufSize], ibuf); Nbuf[ibuf]=0; //last broadcast with Nbuf[ibuf]=0 so that receiver knows no more particles are to be broadcast MPI_Ssend(&Nbuf[ibuf],1,MPI_Int_t,ibuf,ibuf+NProcs,MPI_COMM_WORLD); } } if (opt.nsnapread>1){ MPI_Allgather(Nreadbuf, opt.nsnapread, MPI_Int_t, mpi_nsend_readthread, opt.nsnapread, MPI_Int_t, mpi_comm_read); MPISendParticlesBetweenReadThreads(opt, Preadbuf, Part.data(), ireadtask, readtaskID, Pbaryons, mpi_comm_read, mpi_nsend_readthread, mpi_nsend_readthread_baryon); } }//end of reading task #endif #ifdef USEMPI //if not reading information than waiting to receive information else { MPIReceiveParticlesFromReadThreads(opt,Pbuf,Part.data(),readtaskID, irecv, mpi_irecvflag, Nlocalthreadbuf, mpi_request,Pbaryons); } #endif }//end of check if gas loaded //update info opt.p*=opt.a/opt.h; #ifdef HIGHRES opt.zoomlowmassdm=MP_DM*mscale; #endif #ifdef USEMPI MPI_Barrier(MPI_COMM_WORLD); //update cosmological data and boundary in code units MPI_Bcast(&(opt.p),sizeof(opt.p),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.a),sizeof(opt.a),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.Omega_cdm),sizeof(opt.Omega_cdm),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.Omega_b),sizeof(opt.Omega_b),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.Omega_m),sizeof(opt.Omega_m),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.Omega_Lambda),sizeof(opt.Omega_Lambda),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.h),sizeof(opt.h),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.rhocrit),sizeof(opt.rhocrit),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.rhobg),sizeof(opt.rhobg),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.virlevel),sizeof(opt.virlevel),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.virBN98),sizeof(opt.virBN98),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.ellxscale),sizeof(opt.ellxscale),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.lengthinputconversion),sizeof(opt.lengthinputconversion),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.velocityinputconversion),sizeof(opt.velocityinputconversion),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.massinputconversion),sizeof(opt.massinputconversion),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&(opt.G),sizeof(opt.G),MPI_BYTE,0,MPI_COMM_WORLD); #ifdef NOMASS MPI_Bcast(&(opt.MassValue),sizeof(opt.MassValue),MPI_BYTE,0,MPI_COMM_WORLD); #endif MPI_Bcast(&(Ntotal),sizeof(Ntotal),MPI_BYTE,0,MPI_COMM_WORLD); MPI_Bcast(&opt.zoomlowmassdm,sizeof(opt.zoomlowmassdm),MPI_BYTE,0,MPI_COMM_WORLD); #endif //store how to convert input internal energies to physical output internal energies //as we already convert ramses units to sensible output units, nothing to do. opt.internalenergyinputconversion = 1.0; //a bit of clean up #ifdef USEMPI MPI_Comm_free(&mpi_comm_read); if (opt.iBaryonSearch) delete[] mpi_nsend_baryon; if (opt.nsnapread>1) { delete[] mpi_nsend_readthread; if (opt.iBaryonSearch) delete[] mpi_nsend_readthread_baryon; if (ireadtask[ThisTask]>=0) delete[] Preadbuf; } delete[] Nbuf; if (ireadtask[ThisTask]>=0) { delete[] Nreadbuf; delete[] Pbuf; } delete[] ireadtask; delete[] readtaskID; #endif }