I apologize for the length of the title
In the code that i'm modifing I have created a class that contains a derived type that in turn contains an allocatable vector of reals (crom) (further :idon (real) and pos(integer)):
as you saw I have created a "derived data type" and consequently I really would like comunicate between processors by using MPI_TYPE_CREATE_STRUCT ... but the amazing point is that - also if I put the "sequence" instruction into the "type crom" the addresses of the elements ( of the crom type) aren't sequential !. let see these results:
for example running the code I find these addresses:
[data]
cromF(1)%gene(1) 233935744
cromF(1)%gene(2) 233935748 (which it's OK)
cromF(1)%idon 233933496 (which is behind of 240 positions!!!)
cromF(1)%pos 233933500 (that could be ok)
[data]
anyway by using a trick to create the offset (displacement into adresses) by using directly the address gaps i thought to overcome the problem, but at the end the code doesn't work. The only way that makes the code working is to forget the " MPI_TYPE_CREATE_STRUCT" and pass the information starting from the first value of every items declared into the derived type. My question is: why I'm not able to create an correct data mapping by using MPI_TYPE_CREATE_STRUCT ?
this is the part of the code under inquisition! :
In the code that i'm modifing I have created a class that contains a derived type that in turn contains an allocatable vector of reals (crom) (further :idon (real) and pos(integer)):
Code:
module class_crom
use random_N
type crom
sequence
real,allocatable,dimension(:) :: gene
real :: idon
integer :: pos
end type crom
real, allocatable,dimension(:) :: gene0,D_gene0
contains
subroutine popolaz_new (this,i,N_crom,N_geni,mype,chiamate)
implicit none
type(crom),intent(out) :: this
real, allocatable,dimension(:),save :: rnd
integer, intent(in) :: i,mype,N_geni,N_crom
integer, intent(inout) :: chiamate
integer :: k
allocate (this%gene(1:N_geni))
allocate (rnd(1:N_geni),gene0(1:N_geni),D_gene0(1:N_geni))
gene0=(/0.,115.,-70.,-65.,-60.,-55.,-50.,-45.,-40.,-35.,-30.,&
&18.5,18.5,18.5,20.5,20.5,20.5,20.5,20.5,20.5,0.270,0.03,0.03,0.03,0.03/)
D_gene0=(/7.,10.,30.,45.,45.,45.,45.,45.,45.,45.,45.,12.,12.,12.,10.,10.&
,10.,10.,10.,10.,0.035,0.03,0.03,0.03,0.03/)
call RANU3(ix+mype,rnd,N_geni,chiamate,ivw,icon) !crea un vettore di numeri casuali
if (chiamate==0) chiamate=1
rnd=(rnd*2.0)-1.0
do k=1,N_geni
this%gene(k)=gene0(k)+(rnd(k)*D_gene0(k))
end do
this%pos=i
print *,'i=',i,'mype=',mype,'rnd_test=',rnd
deallocate (rnd,gene0,D_gene0)
end subroutine
end module
for example running the code I find these addresses:
[data]
cromF(1)%gene(1) 233935744
cromF(1)%gene(2) 233935748 (which it's OK)
cromF(1)%idon 233933496 (which is behind of 240 positions!!!)
cromF(1)%pos 233933500 (that could be ok)
[data]
anyway by using a trick to create the offset (displacement into adresses) by using directly the address gaps i thought to overcome the problem, but at the end the code doesn't work. The only way that makes the code working is to forget the " MPI_TYPE_CREATE_STRUCT" and pass the information starting from the first value of every items declared into the derived type. My question is: why I'm not able to create an correct data mapping by using MPI_TYPE_CREATE_STRUCT ?
this is the part of the code under inquisition! :
Code:
use class_crom
.
.
type (crom), allocatable, dimension(:) :: cromF,cromF2
.
.
allocate(cromF(1:N_popo),cromF2(1:N_popo))
do i=1,N_popo
allocate(cromF(i)%gene(1:N_geni));cromF(i)%gene(:)=0.0
allocate(cromF2(i)%gene(1:N_geni));cromF2(i)%gene(:)=0.0
end do
do i=croXpe(mype,1),croXpe(mype,NcroXpe)
call popolaz_new(cromF(i),i,N_popo,N_geni,mype,chiamate)
pop(i,:)=cromF(i)%gene(:)
end do
!pop_T=transpose(pop)
call mpi_barrier(mpi_comm_world,ierr)
!call mpi_allgather(pop_T(1,croXpe(mype,1)) ,NcroXpe*N_geni,mpi_real,ric_T,NcroXpe*N_geni,mpi_real,mpi_comm_world,ierr)
!call mpi_allgather(cromF(croXpe(mype,1)),2,cromtype,cromF2(croXpe(mype,1)),2,cromtype,mpi_comm_world,ierr)
!if (mype==0) then
call mpi_get_address(cromF(1)%gene(1), disp(0),ierr)
call mpi_get_address(cromF(1)%idon, disp(1),ierr)
call mpi_get_address(cromF(1)%pos, disp(2),ierr)
print *,'add_g1 ','add_idon ','add_pos = ',disp(:)
print *,'distanza adress= ',disp(1)-disp(0),disp(2)-disp(1)
!end if
base=disp(0)
offsets(0)=disp(0)-base; offsets(1)=disp(1)-base; offsets(2)=disp(2)-base
print *,'offsets= ',offsets(:)
blockcounts(0)=N_geni; blockcounts(1)=1; blockcounts(2)=1
oldtypes(0)=MPI_REAL; oldtypes(1)=MPI_REAL; oldtypes(2)=MPI_INTEGER
!---mpi variables---
call MPI_TYPE_CREATE_STRUCT(3,blockcounts,offsets,oldtypes,cromtype,ierr)
print*,'struct_ierr= ',ierr
call MPI_TYPE_COMMIT(cromtype,ierr)
!-------------------
if (mype==0) then
cromF(1)%idon=0.5600
!call mpi_send(cromF(1)%gene(1),1,cromtype,1,1,mpi_comm_world,ierr)
call mpi_send(cromF(1)%gene(1),N_geni,MPI_REAL,1,1,mpi_comm_world,ierr)
call mpi_send(cromF(1)%idon,1,MPI_REAL,1,1,mpi_comm_world,ierr)
call mpi_send(cromF(1)%pos,1,MPI_INTEGER,1,1,mpi_comm_world,ierr)
end if
call mpi_barrier(mpi_comm_world,ierr)
if (mype==1) then
!call mpi_recv(cromF2(1)%gene(1),1,cromtype,0,1,mpi_comm_world,stat,ierr)
call mpi_recv(cromF2(1)%gene(1),N_geni,MPI_REAL,0,1,mpi_comm_world,stat,ierr)
call mpi_recv(cromF2(1)%idon,1,MPI_REAL,0,1,mpi_comm_world,stat,ierr)
call mpi_recv(cromF2(1)%pos,1,MPI_INTEGER,0,1,mpi_comm_world,stat,ierr)
end if