/* ROUTINE TO DO ENTIRELY FOURTH ORDER 2-D ELASTIC FINITE DIFFERENCE */ /* WAVE PROPAGATION BY METHOD OF VIDALE AND STEAD */ /* revised by Richard Stead, 4-18-86 */ /* by John Vidale, 3-30-86, all rights reserved */ #define ta(x) na[pt[x]+0] #define tb(x) na[pt[x]+1] #define ta0 pna[0] #define tb0 pna[1] #define tc0 pna[2] #define tcc pna[3] #define tb16 pna[4] #define tbb pna[5] #define ta16 pna[6] #define taa pna[7] #define tcd pna[8] #define tcd10 pna[9] #define tlu2m2 pna[3] #define tlu2m1 pna[4] #define tlu2 pna[5] #define tlu2p1 pna[6] #define tlu2p2 pna[7] #define tlu1m2 pna[8] #define tlu1m1 pna[9] #define tlu1 pna[10] #define tlu1p1 pna[11] #define tlu1p2 pna[12] #define tl00m2 pna[13] #define tl00m1 pna[14] #define tl00 pna[15] #define tl00p1 pna[16] #define tl00p2 pna[17] #define tld1m2 pna[18] #define tld1m1 pna[19] #define tld1 pna[20] #define tld1p1 pna[21] #define tld1p2 pna[22] #define tld2m2 pna[23] #define tld2m1 pna[24] #define tld2 pna[25] #define tld2p1 pna[26] #define tld2p2 pna[27] #define tmu2m2 pna[28] #define tmu2m1 pna[29] #define tmu2 pna[30] #define tmu2p1 pna[31] #define tmu2p2 pna[32] #define tmu1m2 pna[33] #define tmu1m1 pna[34] #define tmu1 pna[35] #define tmu1p1 pna[36] #define tmu1p2 pna[37] #define tm00m2 pna[38] #define tm00m1 pna[39] #define tm00 pna[40] #define tm00p1 pna[41] #define tm00p2 pna[42] #define tmd1m2 pna[43] #define tmd1m1 pna[44] #define tmd1 pna[45] #define tmd1p1 pna[46] #define tmd1p2 pna[47] #define tmd2m2 pna[48] #define tmd2m1 pna[49] #define tmd2 pna[50] #define tmd2p1 pna[51] #define tmd2p2 pna[52] struct disp { float u; float w; }; inner4(p1,p2,pt,na,cnt,nx) register struct disp *p1, *p2; float *na; register int *pt; int cnt,nx; { float piece0; struct disp *p2p2, *p2p1, *p2m1, *p2m2; register float *pna; p1--; p2--; pt--; while(cnt--) { p1++; p2++; pt++; pna= &na[*pt]; p2p1= p2 +nx; p2p2= p2p1 +nx; p2m1= p2 -nx; p2m2= p2m1 -nx; /* fourth order homogeneous case */ if(tc0 <0.0) { p1->u= tcc*p2[0].u - p1[0].u +tb16*( p2[1].u +p2[-1].u) - tbb*( p2[2].u + p2[-2].u) +ta16*( p2p1[0].u+p2m1[0].u) - taa*( p2p2[0].u+p2m2[0].u) +tcd10*( p2p1[ 1].w+p2m1[-1].w -p2p1[-1].w-p2m1[ 1].w) +tcd*(-p2p2[ 1].w-p2p1[ 2].w -p2m2[-1].w-p2m1[-2].w +p2p1[-2].w+p2p2[-1].w +p2m1[ 2].w+p2m2[ 1].w); p1->w= tcc*p2[0].w - p1[0].w +tb16*( p2p1[0].w +p2m1[0].w) - tbb*( p2p2[0].w +p2m2[0].w) +ta16*( p2[1].w +p2[-1].w) - taa*( p2[2].w +p2[-2].w) +tcd10*( p2p1[ 1].u+p2m1[-1].u -p2p1[-1].u-p2m1[ 1].u) +tcd*(-p2p2[ 1].u-p2p1[ 2].u -p2m2[-1].u-p2m1[-2].u +p2p1[-2].u+p2p2[-1].u +p2m1[ 2].u+p2m2[ 1].u); } else{ piece0=tlu2m2*p2m2[-2].w + tlu2m1*p2m2[-1].w + tlu2p2*p2m2[ 2].w + tlu2p1*p2m2[ 1].w + tlu1m2*p2m1[-2].w + tlu1m1*p2m1[-1].w + tlu1p2*p2m1[ 2].w + tlu1p1*p2m1[ 1].w + tld1m2*p2p1[-2].w + tld1m1*p2p1[-1].w + tld1p2*p2p1[ 2].w + tld1p1*p2p1[ 1].w + tld2m2*p2p2[-2].w + tld2m1*p2p2[-1].w + tld2p2*p2p2[ 2].w + tld2p1*p2p2[ 1].w; p1->u= 2.*p2[0].u-p1[0].u+piece0+ tlu2*p2m2[0].u + tlu1*p2m1[0].u + tld2*p2p2[0].u + tld1*p2p1[0].u + tl00m2*p2[-2].u + tl00m1*p2[-1].u + tl00p2*p2[ 2].u + tl00p1*p2[ 1].u + tl00*p2[0].u; piece0=tmu2m2*p2m2[-2].u + tmu2m1*p2m2[-1].u + tmu2p2*p2m2[ 2].u + tmu2p1*p2m2[ 1].u + tmu1m2*p2m1[-2].u + tmu1m1*p2m1[-1].u + tmu1p2*p2m1[ 2].u + tmu1p1*p2m1[ 1].u + tmd1m2*p2p1[-2].u + tmd1m1*p2p1[-1].u + tmd1p2*p2p1[ 2].u + tmd1p1*p2p1[ 1].u + tmd2m2*p2p2[-2].u + tmd2m1*p2p2[-1].u + tmd2p2*p2p2[ 2].u + tmd2p1*p2p2[ 1].u; p1->w= 2.*p2[0].w-p1[0].w+piece0+ tmu2*p2m2[0].w + tmu1*p2m1[0].w + tmd2*p2p2[0].w + tmd1*p2p1[0].w + tm00m2*p2[-2].w + tm00m1*p2[-1].w + tm00p2*p2[ 2].w + tm00p1*p2[ 1].w + tm00*p2[0].w; } } }