diff --git a/bld/build-namelist b/bld/build-namelist
index 7e5056b58a..283562264a 100755
--- a/bld/build-namelist
+++ b/bld/build-namelist
@@ -2754,20 +2754,15 @@ if ($waccmx) {
if (($ionos eq 'none') and ($wmx_opt =~ /ionosphere/) ) {
die "CAM Namelist ERROR: WACCMX must be configured with an active ionosphere wmx_opt is set to ionosphere\n";
}
- if (($ionos ne 'none') and ($wmx_opt =~ /neutral/) ) {
+ if (($ionos eq 'wxie') and ($wmx_opt =~ /neutral/) ) {
die "CAM Namelist ERROR: WACCMX cannot be configured with an active ionosphere wmx_opt is set to neutral\n";
}
- if (($ionos eq 'wxie') and ($wmx_opt =~ /ionosphere/) and ($dyn eq 'fv')) {
+ if (($ionos eq 'wxie') and ($wmx_opt =~ /ionosphere/)) {
# turn on electro-dynamo generated ion drift velocities
add_default($nl, 'ionos_xport_active', 'val'=>'.true.');
add_default($nl, 'ionos_edyn_active', 'val'=>'.true.');
add_default($nl, 'empirical_ion_velocities', 'val'=>'.false.');
- } elsif (($ionos eq 'wxi') and ($wmx_opt =~ /ionosphere/) and ($dyn eq 'fv')) {
- # turn off electro-dynamo generated ion drift velocities
- add_default($nl, 'ionos_xport_active', 'val'=>'.true.');
- add_default($nl, 'ionos_edyn_active', 'val'=>'.false.');
- add_default($nl, 'empirical_ion_velocities', 'val'=>'.true.');
} elsif (($ionos eq 'none') and ($wmx_opt =~ /neutral/)) {
add_default($nl, 'ionos_xport_active', 'val'=>'.false.');
add_default($nl, 'ionos_edyn_active', 'val'=>'.false.');
@@ -2778,6 +2773,11 @@ if ($waccmx) {
if ($dyn eq 'fv') {
add_default($nl, 'fv_high_altitude', 'val'=>'.true.');
}
+ if ($ionos ne 'none') {
+ add_default($nl, 'cam_physics_mesh');
+ add_default($nl, 'oplus_grid');
+ add_default($nl, 'edyn_grid');
+ }
add_default($nl,'dadadj_niter');
add_default($nl,'ionos_epotential_model');
if ($nl->get_value('ionos_epotential_model') =~ 'weimer') {
diff --git a/bld/config_files/definition.xml b/bld/config_files/definition.xml
index 6b5e881992..4dc7d970fc 100644
--- a/bld/config_files/definition.xml
+++ b/bld/config_files/definition.xml
@@ -49,7 +49,7 @@ Switch to turn on analytic initial conditions for the dynamics state:
Option to turn on waccmx thermosphere/ionosphere extension: 0 => no, 1 => yes
-
+
Ionosphere model used in WACCMX.
diff --git a/bld/configure b/bld/configure
index d377857549..b695c60135 100755
--- a/bld/configure
+++ b/bld/configure
@@ -84,7 +84,7 @@ OPTIONS
dlatxdlon for fv grids (dlat and dlon are the grid cell size
in degrees for latitude and longitude respectively); nexnp for
se grids.
- -ionosphere Ionophere module used in WACCMX [ none | wxi | wxie ].
+ -ionosphere Ionophere module used in WACCMX [ none | wxie ].
-macrophys Specify the macrophysics option [rk | park | clubb_sgs].
-max_n_rad_cnst Maximum number of constituents that are either radiatively
active, or in any single diagnostic list for the radiation.
@@ -1273,15 +1273,10 @@ my $nlev = 0;
# Defaults
if ($waccmx) {
- if ($ionos =~ /wxi/) {
- if ($phys_pkg eq 'cam6') {
+ if ($phys_pkg eq 'cam6') {
$nlev = 130;
- } else {
+ } else {
$nlev = 126;
- }
- }
- else {
- $nlev = 81;
}
}
elsif ($chem_pkg =~ /waccm_/) {
@@ -1881,15 +1876,6 @@ if ($cfg_ref->get('analytic_ic')) {
#WACCM-X extended thermosphere/ionosphere model
if ($waccmx) {
$cfg_cppdefs .= ' -DWACCMX_PHYS';
- if (($dyn_pkg ne 'fv') and ($ionos ne 'none')) {
- die "ERROR: Ionosphere is only available for FV dycore \n";
- }
- if ($ionos =~ /wxi/) {
- $cfg_cppdefs .= ' -DWACCMX_IONOS';
- }
- if ($ionos =~ /wxie/) {
- $cfg_cppdefs .= ' -DWACCMX_EDYN_ESMF';
- }
}
# PIO
@@ -2136,7 +2122,7 @@ sub write_filepath
if ($waccmx) {
print $fh "$camsrcdir/src/physics/waccmx\n";
- if ($ionos =~ /wxi/) {
+ if ($ionos =~ /wxie/) {
print $fh "$camsrcdir/src/ionosphere/waccmx\n";
}
}
diff --git a/bld/namelist_files/namelist_defaults_cam.xml b/bld/namelist_files/namelist_defaults_cam.xml
index bb392d15d0..39e3db5905 100644
--- a/bld/namelist_files/namelist_defaults_cam.xml
+++ b/bld/namelist_files/namelist_defaults_cam.xml
@@ -23,6 +23,7 @@
450
1800
+300
1800
1800
1800
@@ -131,12 +132,20 @@
atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_4x5_L81_c160630.nc
atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_10x15_L81_c141027.nc
atm/waccm/ic/waccmx_aqua_4x5_L126_c170705.nc
+atm/waccm/ic/f.c54137.FX2000climo.f19_f19.ZGTest.001.cam.i.0002-01-01-00000_c170817.nc
atm/waccm/ic/fx2000_0.9x1.25_126lev_0002-01-01-00000_c181221.nc
atm/waccm/ic/wcmx-cam6-phys_1.9x2.5_130lev_2000_c181115.nc
atm/waccm/ic/wcmx-cam6-phys_0.9x1.25_130lev_2000_c190122.nc
atm/waccm/ic/FC6X2000_f05_spinup01.cam.i.0002-01-01-00000_c190711.nc
atm/waccm/ic/waccmx_mam4_aqua_4x5_L130_c180803.nc
atm/waccm/ic/waccmx_mam4_aqua_1.9x2.5_L130_c180803.nc
+atm/waccm/ic/waccmx_aqua_ne5np4_126L_c210304.nc
+atm/waccm/ic/waccmx_ne16np4_126L_c200108.nc
+atm/waccm/ic/fx2000_phys-ionos-cpl_ne16_spinup03.cam.i.0002-01-01-00000_c201005.nc
+atm/waccm/ic/waccmx_aqua_ne16np4_126L_c191108.nc
+atm/waccm/ic/waccmx4_neutral_aquap_ne16np4_126lev_c200827.nc
+atm/waccm/ic/fx2000_phys-ionos-cpl_ne30_spinup01.cam.i.0002-01-01-00000_c201014.nc
+atm/waccm/ic/fx2000_phys-ionos-cpl_ne30pg3_spinup01.cam.i.0002-01-01-00000_c201014.nc
atm/cam/inic/fv3/aqua_0006-01-01_C24_L32_c200625.nc
atm/cam/inic/fv3/aqua_0006-01-01_C48_L32_c200625.nc
@@ -702,10 +711,31 @@
atm/waccm/efld/wei05sc_c080415.nc
5
30
+30
90
-.false.
-.true.
-.true.
+.true.
+
+
+ 144,96
+ 144,96
+ 288,192
+ 288,192
+ 576,384
+
+80x97
+160x193
+160x193
+320x385
+
+
+atm/cam/coords/fv0.47x0.63_esmf_c210305.nc
+atm/cam/coords/fv0.9x1.25_esmf_c210305.nc
+atm/cam/coords/fv1.9x2.5_esmf_200428.nc
+atm/cam/coords/fv4x5_esmf_c210305.nc
+atm/cam/coords/ne5np4_esmf_20191204.nc
+atm/cam/coords/ne30np4_esmf_c210305.nc
+atm/cam/coords/ne16np4_esmf_c210305.nc
+atm/cam/coords/ne30pg3_esmf_20200428.nc
1.00D0
@@ -2589,6 +2619,7 @@
1
1
+ 2
1
4
4
@@ -2606,15 +2637,18 @@
-1
1.0e13
+5.e15
-1
1.5625e13
+ 10.e15
-1
1.5625e13
5.0e5
2.0e5
+ 1.0e6
0.0
@@ -2627,7 +2661,7 @@
2
0.0
-100.0
+100.0
1
@@ -2641,6 +2675,7 @@
1
3
+ 5
3
5
4
diff --git a/bld/namelist_files/namelist_definition.xml b/bld/namelist_files/namelist_definition.xml
index 5118bb4417..8556aa8945 100644
--- a/bld/namelist_files/namelist_definition.xml
+++ b/bld/namelist_files/namelist_definition.xml
@@ -4332,6 +4332,15 @@ cache file to be consistent with how CAM was built.
Default: set by build-namelist
+
+Full pathname to CAM physics grid ESMF mesh file.
+N.B. this variable may not be set by the user.
+It is set by build-namelist via information in the configure
+cache file to be consistent with how CAM was built.
+Default: set by build-namelist
+
+
Runtime options of upper thermosphere WACCM-X. 'ionosphere' for
@@ -4366,6 +4375,25 @@ Switch to apply ring polar filter within in ionosphere O+ transport.
Default: FALSE
+
+Oplus transport grid size, entered as num_longitudes, num_latitudes.
+Default: set by build-namelist
+
+
+
+Magnetic grid resolution (nlon x nlat).
+Default: set by build-namelist
+
+
+
+Number of MPI processes on which to run the WACCM-X ionosphere
+electro-dynamo and O+ ion transport modules.
+Default: 0 (use all atmosphere tasks)
+
+
Switch to to turn on/off O+ transport in ionosphere.
@@ -4392,12 +4420,6 @@ Electric potential model used in the waccmx ionosphere.
Default: set by build-namelist
-
-Give the user the ability to input prescribed high-latitude electric potential.
-Default: FALSE
-
-
Co-latitudes (degrees) of the critical angles where the ionosphere
@@ -4413,15 +4435,33 @@ high latitude electric potential model.
Default: set by build-namelist.
-
+Give the user the ability to input prescribed high-latitude electric potential.
+Default: FALSE
+
+
+
-Full pathname of AMIE inputs for northern hemisphere.
+List of full pathnames of AMIE electic potential inputs for northern hemisphere.
Default: NONE.
-
+List of full pathnames of AMIE electic potential inputs for southern hemisphere.
+Default: NONE.
+
+
+
+Give the user the ability to input LTR high-latitude electric potential.
+Default: FALSE
+
+
+
-Full pathname of AMIE inputs for southern hemisphere.
+List of full pathnames of LTR electic potential inputs for both hemispheres.
Default: NONE.
diff --git a/bld/namelist_files/use_cases/sd_waccmx_ma_cam6.xml b/bld/namelist_files/use_cases/sd_waccmx_ma_cam6.xml
index d0ce64aac9..c62865924d 100644
--- a/bld/namelist_files/use_cases/sd_waccmx_ma_cam6.xml
+++ b/bld/namelist_files/use_cases/sd_waccmx_ma_cam6.xml
@@ -15,7 +15,7 @@
'epp_ion_rates'
-atm/waccm/ic/f.c61016.FC6XSD.f09_f09_mg17.test01.cam.i.2000-01-21-00000_c190325.nc
+atm/waccm/ic/f.c62004.FC6XSD.f09_f09_mg17.cam.i.1980-01-01-00000_c191211.nc
atm/waccm/ic/FC6XSD_f19_f19_mg17_L145_1981-01-01-00000_c190617.nc
50.
diff --git a/bld/namelist_files/use_cases/waccmx_ma_2000_cam4.xml b/bld/namelist_files/use_cases/waccmx_ma_2000_cam4.xml
index 1e4b78283f..5658d9cb1d 100644
--- a/bld/namelist_files/use_cases/waccmx_ma_2000_cam4.xml
+++ b/bld/namelist_files/use_cases/waccmx_ma_2000_cam4.xml
@@ -28,7 +28,7 @@
0.90D0
-atm/waccm/ic/f_2000_waccmx_cesm1_1_beta08.cam.i.2019-01-01-00000_c140827.nc
+atm/waccm/ic/f_2000_waccmx_cesm1_1_beta08.cam.i.2019-01-01-00000_c140827.nc
atm/cam/topo/USGS-gtopo30_1.9x2.5_remap_c050602.nc
@@ -47,12 +47,12 @@
CYCLICAL
2000
-
+
.true.
atm/waccm/qbo/qbocyclic28months.nc'
-.true.
+.true.
@@ -72,55 +72,55 @@
0, -3, -24
'A', 'I', 'I'
-
+
- 'AOA1', 'AOA2', 'BR', 'BRCL', 'BRO', 'BRONO2', 'CCL4', 'CF2CLBR', 'CF3BR',
- 'CFC11', 'CFC113', 'CFC12', 'CH2O', 'CH3BR', 'CH3CCL3', 'CH3CL', 'CH3O2', 'CH3OOH',
- 'CH4', 'CL', 'CL2', 'CL2O2', 'CLDHGH', 'CLDLOW', 'CLDMED', 'CLDTOT', 'CLO', 'CLONO2',
- 'CLOUD', 'CO', 'CO2', 'DTCOND', 'DTV', 'DUV', 'DVV', 'EKGW', 'FLNS', 'FLNSC', 'FLNT',
+ 'AOA1', 'AOA2', 'BR', 'BRCL', 'BRO', 'BRONO2', 'CCL4', 'CF2CLBR', 'CF3BR',
+ 'CFC11', 'CFC113', 'CFC12', 'CH2O', 'CH3BR', 'CH3CCL3', 'CH3CL', 'CH3O2', 'CH3OOH',
+ 'CH4', 'CL', 'CL2', 'CL2O2', 'CLDHGH', 'CLDLOW', 'CLDMED', 'CLDTOT', 'CLO', 'CLONO2',
+ 'CLOUD', 'CO', 'CO2', 'DTCOND', 'DTV', 'DUV', 'DVV', 'EKGW', 'FLNS', 'FLNSC', 'FLNT',
'FLNTC', 'FSDS', 'FSNS', 'FSNSC', 'FSNT', 'FSNTC', 'H', 'H2', 'H2O', 'H2O2', 'HBR',
'HCFC22', 'HCL', 'HNO3', 'HO2', 'HO2NO2', 'HOBR', 'HOCL', 'HORZ', 'LANDFRAC', 'LHFLX',
'N', 'N2O', 'N2O5', 'NO', 'NO2', 'NO3', 'O', 'O1D', 'O2', 'O3',
'OCLO', 'OCNFRAC', 'OH', 'OMEGA', 'PHIS', 'PRECC', 'PRECL', 'PS', 'Q', 'QFLX',
'QPERT', 'QRL', 'QRLNLTE', 'QRS', 'RELHUM', 'SHFLX', 'SOLIN', 'SWCF', 'QCP',
- 'QTHERMAL', 'QRL_TOT', 'QRS_TOT', 'QJOULE', 'PSL', 'HNO3_STS', 'HNO3_NAT',
- 'HNO3_GAS', 'NO_Aircraft', 'NO_Lightning', 'QNO', 'QRS_AUR', 'QRS_CO2NIR', 'QRS_EUV',
- 'SAD_ICE', 'SAD_LNAT', 'SAD_SULFC', 'T', 'TREFHT',
- 'TTGW', 'U', 'UTGWORO', 'UTGWSPEC', 'V', 'VERT', 'VTGWORO', 'VTGWSPEC', 'Z3', 'O2_1S',
- 'O2_1D', 'NOX', 'NOY', 'CLOX', 'CLOY', 'BROX', 'BROY', 'TCLY', 'TOTH', 'QJOULE','UI','VI','WI',
+ 'QTHERMAL', 'QRL_TOT', 'QRS_TOT', 'QJOULE', 'PSL', 'HNO3_STS', 'HNO3_NAT',
+ 'HNO3_GAS', 'NO_Aircraft', 'NO_Lightning', 'QNO', 'QRS_AUR', 'QRS_CO2NIR', 'QRS_EUV',
+ 'SAD_ICE', 'SAD_LNAT', 'SAD_SULFC', 'T', 'TREFHT',
+ 'TTGW', 'U', 'UTGWORO', 'UTGWSPEC', 'V', 'VERT', 'VTGWORO', 'VTGWSPEC', 'Z3', 'O2_1S',
+ 'O2_1D', 'NOX', 'NOY', 'CLOX', 'CLOY', 'BROX', 'BROY', 'TCLY', 'TOTH', 'QJOULE','UI','VI','WI',
'UIONTEND', 'VIONTEND', 'DTCORE', 'T_24_COS', 'T_24_SIN', 'T_12_COS', 'T_12_SIN',
'OMEGA_24_COS', 'OMEGA_24_SIN', 'OMEGA_12_COS', 'OMEGA_12_SIN',
- 'U_24_COS', 'U_24_SIN', 'U_12_COS', 'U_12_SIN', 'V_24_COS', 'V_24_SIN', 'V_12_COS',
+ 'U_24_COS', 'U_24_SIN', 'U_12_COS', 'U_12_SIN', 'V_24_COS', 'V_24_SIN', 'V_12_COS',
'V_12_SIN', 'PS_24_COS', 'PS_24_SIN', 'PS_12_COS', 'PS_12_SIN', 'CLDLIQ','CLDICE','CONCLD',
'FRONTGF:I', 'BUTGWSPEC', 'BTAUE', 'BTAUW', 'BTAUN', 'BTAUS','TAUE','TAUW','TAUN','TAUS',
'TAUGWX', 'TAUGWY', 'TAUX','TAUY','SNOWHLND','SNOWHICE','ICEFRAC','FSDSC','SFNO', 'SFCO',
'SFCH2O','CFC11STAR','TROPP_FD', 'Op', 'O2p', 'Np', 'NOp', 'N2p', 'e'
-
+
- 'PS', 'Z3', 'T', 'U', 'V', 'FLNT','PSL',
+ 'PS', 'Z3', 'T', 'U', 'V', 'FLNT','PSL',
'OMEGA','FSDS','FSDSC','CLOUD','CONCLD','SNOWHLND','SNOWHICE',
- 'CH3CL', 'CFC11', 'CFC12', 'CFC113', 'HCFC22', 'CCL4', 'CH3CCL3',
+ 'CH3CL', 'CFC11', 'CFC12', 'CFC113', 'HCFC22', 'CCL4', 'CH3CCL3',
'CH3BR', 'CF3BR', 'CF2CLBR',
'CO', 'CO2', 'CH2O', 'CH3OOH', 'CH4',
'O3', 'O', 'O1D',
'N', 'NO', 'NO2', 'NO3', 'N2O5', 'HNO3', 'HO2NO2', 'NOX', 'NOY', 'N2O',
'H', 'H2', 'OH', 'HO2', 'H2O2', 'H2O',
- 'CL','CL2', 'CLO', 'OCLO', 'CL2O2', 'CLONO2', 'HOCL', 'HCL', 'CLOX', 'CLOY',
+ 'CL','CL2', 'CLO', 'OCLO', 'CL2O2', 'CLONO2', 'HOCL', 'HCL', 'CLOX', 'CLOY',
'BR', 'BRO', 'HOBR', 'HBR', 'BRCL', 'BRONO2', 'BROX', 'BROY', 'TCLY',
'jo2_a', 'jo2_b', 'jo3_a', 'jo3_b', 'jhocl', 'jno3_b', 'jcl2o2',
'SAD_SULFC', 'SAD_LNAT', 'SAD_ICE','AOA1','AOA2',
'O2', 'CLDLIQ', 'CLDICE', 'ASDIR',
'VTHzm', 'WTHzm', 'UVzm', 'UWzm', 'TH', 'MSKtem',
'O2_1S', 'O2_1D',
- 'Op', 'O2p', 'Np', 'NOp', 'N2p', 'e',
- 'UIONTEND', 'VIONTEND', 'UTGWSPEC', 'UTGWORO', 'VTGWSPEC', 'VTGWORO', 'TTGW',
+ 'Op', 'O2p', 'Np', 'NOp', 'N2p', 'e',
+ 'UIONTEND', 'VIONTEND', 'UTGWSPEC', 'UTGWORO', 'VTGWSPEC', 'VTGWORO', 'TTGW',
'DTV', 'DUV', 'DVV', 'EKGW',
'QJOULE', 'QCP', 'QRL_TOT', 'QRS_TOT', 'UI', 'VI', 'WI'
-
+
'AOA1', 'AOA2', 'BR', 'BRCL', 'BRO', 'BRONO2', 'CCL4', 'CF2CLBR', 'CF3BR', 'CFC11', 'CFC113',
'CFC12', 'CH2O', 'CH3BR', 'CH3CCL3', 'CH3CL', 'CH3O2', 'CH3OOH', 'CH4', 'CL', 'CL2', 'CL2O2',
@@ -149,4 +149,3 @@
'neutral'
-
diff --git a/bld/namelist_files/use_cases/waccmxie_ma_2000_cam4.xml b/bld/namelist_files/use_cases/waccmxie_ma_2000_cam4.xml
index 64b77cb7b5..72c1fa7185 100644
--- a/bld/namelist_files/use_cases/waccmxie_ma_2000_cam4.xml
+++ b/bld/namelist_files/use_cases/waccmxie_ma_2000_cam4.xml
@@ -30,7 +30,7 @@
2000
-
+
.true.
atm/waccm/qbo/qbocyclic28months.nc
@@ -53,7 +53,7 @@
2000
-.true.
+.true.
atm/cam/chem/trop_mozart_aero/aero
@@ -94,11 +94,11 @@
0, -1, -24, -24, -120, -24
1, 24, 7, 7, 10, 365
-
-
- 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
- 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2' 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'O1D', 'Op2P', 'Op2D', 'Op',
- 'Np', 'N2p', 'O2p', 'NOp', 'QJOULE', 'SIGMAHAL', 'SIGMAPED', 'SolIonRate_Tot', 'QRS_TOT', 'QO3', 'QCO2',
+
+
+ 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
+ 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2' 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'O1D', 'Op2P', 'Op2D', 'Op',
+ 'Np', 'N2p', 'O2p', 'NOp', 'QJOULE', 'SIGMAHAL', 'SIGMAPED', 'SolIonRate_Tot', 'QRS_TOT', 'QO3', 'QCO2',
'QNO', 'QO3P', 'QHC2S', 'QJOULE', 'EKGW', 'TTGW', 'UTGW_TOTAL',
'T_24_COS', 'T_24_SIN', 'T_12_COS', 'T_12_SIN', 'T_08_COS', 'T_08_SIN',
'U_24_COS', 'U_24_SIN', 'U_12_COS', 'U_12_SIN', 'U_08_COS', 'U_08_SIN',
@@ -107,50 +107,50 @@
'ALATM', 'ALONM', 'AOA1', 'AOA2', 'BR', 'BRCL', 'BRO', 'BRONO2', 'CCL4', 'CF2CLBR', 'CF3BR', 'CFC11',
'CFC113', 'CFC12', 'CH2O', 'CH3BR', 'CH3CCL3', 'CH3CL', 'CH3O2', 'CH3OOH', 'CH4', 'CL',
'CL2', 'CL2O2', 'CLDHGH', 'CLDLOW', 'CLDMED', 'CLDTOT', 'CLO', 'CLONO2', 'CLOUD', 'CO',
- 'DTCOND', 'DTV', 'DUV', 'DVV', 'FLNS', 'FLNSC', 'FLNT', 'FLNTC','FSDS', 'FSNS', 'FSNSC', 'FSNT', 'FSNTC',
- 'H2', 'H2O', 'H2O2', 'HBR', 'HCFC22', 'HCL', 'HNO3', 'HO2', 'HO2NO2', 'HOBR', 'HOCL', 'HORZ',
- 'LANDFRAC', 'LHFLX', 'N2O', 'N2O5', 'NO2', 'NO3', 'O3', 'OCLO', 'OCNFRAC', 'OH', 'PHIS',
+ 'DTCOND', 'DTV', 'DUV', 'DVV', 'FLNS', 'FLNSC', 'FLNT', 'FLNTC','FSDS', 'FSNS', 'FSNSC', 'FSNT', 'FSNTC',
+ 'H2', 'H2O', 'H2O2', 'HBR', 'HCFC22', 'HCL', 'HNO3', 'HO2', 'HO2NO2', 'HOBR', 'HOCL', 'HORZ',
+ 'LANDFRAC', 'LHFLX', 'N2O', 'N2O5', 'NO2', 'NO3', 'O3', 'OCLO', 'OCNFRAC', 'OH', 'PHIS',
'PRECC', 'PRECL', 'Q', 'QFLX', 'QPERT', 'QRL', 'QRS', 'RELHUM', 'SHFLX', 'SOLIN', 'SWCF', 'QCP', 'QTHERMAL',
'QRL_TOT', 'PSL', 'HNO3_STS', 'HNO3_NAT', 'HNO3_GAS', 'NO_Aircraft', 'NO_Lightning',
'QRS_AUR', 'QRS_CO2NIR', 'QRS_EUV', 'SAD_ICE', 'SAD_LNAT', 'SAD_SULFC', 'TREFHT',
- 'VERT', 'VTGWORO', 'VTGWSPEC', 'O2_1S', 'O2_1D', 'NOX', 'NOY', 'CLOX', 'CLOY', 'BROX', 'BROY',
- 'TCLY', 'TOTH', 'UIONTEND', 'VIONTEND', 'DTCORE', 'CLDLIQ', 'CLDICE', 'CONCLD', 'FRONTGF:I',
- 'BTAUE', 'BTAUW', 'BTAUN', 'BTAUS', 'TAUE', 'TAUW', 'TAUN', 'TAUS', 'TAUGWX', 'TAUGWY', 'TAUX', 'TAUY',
+ 'VERT', 'VTGWORO', 'VTGWSPEC', 'O2_1S', 'O2_1D', 'NOX', 'NOY', 'CLOX', 'CLOY', 'BROX', 'BROY',
+ 'TCLY', 'TOTH', 'UIONTEND', 'VIONTEND', 'DTCORE', 'CLDLIQ', 'CLDICE', 'CONCLD', 'FRONTGF:I',
+ 'BTAUE', 'BTAUW', 'BTAUN', 'BTAUS', 'TAUE', 'TAUW', 'TAUN', 'TAUS', 'TAUGWX', 'TAUGWY', 'TAUX', 'TAUY',
'SNOWHLND', 'SNOWHICE', 'ICEFRAC', 'FSDSC', 'SFNO', 'SFCO', 'SFCH2O', 'CFC11STAR', 'TROPP_FD',
'KVH', 'KVM', 'KVT', 'Qbkgndtot', 'Z3GM', 'OpDens', 'EDens'
-
-
- 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
+
+
+ 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2', 'O', 'O2', 'H'
-
-
- 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
- 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2', 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'O1D',
+
+
+ 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
+ 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2', 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'O1D',
'Op2P', 'Op2D', 'Op', 'Np', 'N2p', 'O2p', 'NOp', 'QJOULE', 'SIGMAHAL', 'SIGMAPED', 'SolIonRate_Tot',
'Z3GM', 'OpDens', 'EDens'
-
-
- 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
- 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2', 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'QRS_TOT',
- 'QO3', 'QCO2', 'QNO', 'QO3P', 'QHC2S', 'QJOULE', 'EKGW', 'TTGW', 'UTGW_TOTAL',
+
+
+ 'Z3', 'T', 'TIon', 'TElec', 'e', 'U', 'V', 'OMEGA', 'UI', 'VI', 'WI', 'ElecColDens', 'PHIM2D', 'PS',
+ 'EDYN_ZIGM11_PED', 'EDYN_ZIGM2_HAL', 'ED1', 'ED2', 'O', 'O2', 'H', 'NO', 'CO2', 'N', 'QRS_TOT',
+ 'QO3', 'QCO2', 'QNO', 'QO3P', 'QHC2S', 'QJOULE', 'EKGW', 'TTGW', 'UTGW_TOTAL',
'SolIonRate_Tot', 'Z3GM', 'OpDens', 'EDens'
-
-
+
+
'T_24_COS', 'T_24_SIN', 'T_12_COS', 'T_12_SIN', 'T_08_COS', 'T_08_SIN',
'U_24_COS', 'U_24_SIN', 'U_12_COS', 'U_12_SIN', 'U_08_COS', 'U_08_SIN',
'V_24_COS', 'V_24_SIN', 'V_12_COS', 'V_12_SIN', 'V_08_COS', 'V_08_SIN',
'OMEGA_24_COS', 'OMEGA_24_SIN', 'OMEGA_12_COS', 'OMEGA_12_SIN', 'OMEGA_08_COS', 'OMEGA_08_SIN'
-
-
- 'MSKtem', 'PS', 'PSL', 'VTHzm', 'UVzm', 'UWzm', 'Uzm', 'Vzm', 'THzm','Wzm', 'PHIS'
+
+
+ 'MSKtem', 'PS', 'PSL', 'VTHzm', 'UVzm', 'UWzm', 'Uzm', 'Vzm', 'THzm','Wzm', 'PHIS'
-42
+42
'SolIonRate_Tot = jeuv_1 + jeuv_2 + jeuv_3 + jeuv_4 + jeuv_5 + jeuv_6 + jeuv_7 + jeuv_8 + jeuv_9 + jeuv_10 + jeuv_11 + jeuv_14 + jeuv_15 + jeuv_16 +',
@@ -171,6 +171,6 @@
atm/cam/dst/dst_source2x2tuned-cam4-06132012.nc
atm/cam/topo/USGS-gtopo30_1.9x2.5_remap_c050602.nc
1.200D0
-.false.
+.false.
diff --git a/cime_config/config_component.xml b/cime_config/config_component.xml
index 8ac578b80d..a0a9ede3aa 100644
--- a/cime_config/config_component.xml
+++ b/cime_config/config_component.xml
@@ -146,6 +146,7 @@
-chem waccm_tsmlt_mam4
-chem waccm_mad
-waccmx -ionosphere wxie
+ -waccmx -ionosphere none -chem waccm_ma
-chem waccm_ma
-chem waccm_mad
-chem waccm_ma_mam4
@@ -198,6 +199,7 @@
2000_cam4_trop_chem
waccmxie_ma_2000_cam4
+ waccmx_ma_2000_cam4
2000_cam6
waccm_tsmlt_2000_cam6
diff --git a/cime_config/config_pes.xml b/cime_config/config_pes.xml
index 3c8a1feff4..6b95ee6b64 100644
--- a/cime_config/config_pes.xml
+++ b/cime_config/config_pes.xml
@@ -7,67 +7,67 @@
none
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
none
- -1
- -1
- -1
- -1
- -1
- -1
- -1
- -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -114,34 +114,34 @@
none
- -4
- -4
- -4
- -4
- -4
- -4
- -4
- -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -151,34 +151,34 @@
none
- -4
- -4
- -4
- -4
- -4
- -4
- -4
- -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -188,34 +188,34 @@
none
- -40
- -40
- -40
- -40
- -40
- -40
- -40
- -40
+ -40
+ -40
+ -40
+ -40
+ -40
+ -40
+ -40
+ -40
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -225,34 +225,108 @@
none
- 2048
- 2048
- 2048
- 2048
- 2048
- 2048
- 2048
- 2048
+ 2048
+ 2048
+ 2048
+ 2048
+ 2048
+ 2048
+ 2048
+ 2048
+
+
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+
+
+
+ none
+
+ 720
+ 720
+ 720
+ 720
+ 720
+ 720
+ 720
+ 720
+
+
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+
+
+
+ none
+
+ 864
+ 864
+ 864
+ 864
+ 864
+ 864
+ 864
+ 864
- 8
- 8
- 8
- 8
- 8
- 8
- 8
- 8
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -297,36 +371,36 @@
- none
+ none
- -16
- -16
- -16
- -16
- -16
- -16
- -16
- -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -336,34 +410,34 @@
none
- 4800
- 4800
- 4800
- 4800
- 4800
- 4800
- 4800
- 4800
+ 4800
+ 4800
+ 4800
+ 4800
+ 4800
+ 4800
+ 4800
+ 4800
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
+ 4
+ 4
+ 4
+ 4
+ 4
+ 4
+ 4
+ 4
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -373,34 +447,34 @@
none
- 16384
- 16384
- 16384
- 16384
- 16384
- 16384
- 16384
- 16384
+ 16384
+ 16384
+ 16384
+ 16384
+ 16384
+ 16384
+ 16384
+ 16384
- 8
- 8
- 8
- 8
- 8
- 8
- 8
- 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -410,34 +484,34 @@
none
- 8192
- 8192
- 8192
- 8192
- 8192
- 8192
- 8192
- 8192
+ 8192
+ 8192
+ 8192
+ 8192
+ 8192
+ 8192
+ 8192
+ 8192
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
+ 2
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -447,34 +521,34 @@
none
- -32
- -32
- -32
- -32
- -32
- -32
- -32
- -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -484,34 +558,34 @@
none
- 64
- 64
- 64
- 64
- 64
- 64
- 64
- 64
+ 64
+ 64
+ 64
+ 64
+ 64
+ 64
+ 64
+ 64
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -521,34 +595,34 @@
none
- -2
- -2
- -2
- -2
- -2
- -2
- -2
- -2
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -558,34 +632,34 @@
none
- 224
- 224
- 224
- 224
- 224
- 224
- 224
- 224
+ 224
+ 224
+ 224
+ 224
+ 224
+ 224
+ 224
+ 224
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -595,34 +669,34 @@
none
- 192
- 192
- 192
- 192
- 192
- 192
- 192
- 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -632,34 +706,34 @@
none
- 192
- 192
- 192
- 192
- 192
- 192
- 192
- 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -669,34 +743,34 @@
none
- 240
- 240
- 240
- 240
- 240
- 240
- 240
- 240
+ 240
+ 240
+ 240
+ 240
+ 240
+ 240
+ 240
+ 240
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -711,10 +785,10 @@
360
360
360
- 360
- 360
- 360
- 360
+ 360
+ 360
+ 360
+ 360
3
@@ -744,14 +818,14 @@
none
- 144
- 144
- 144
- 144
- 144
- 144
- 144
- 144
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
3
@@ -812,36 +886,36 @@
none
- 192
- 192
- 192
- 192
- 192
- 192
- 192
- 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
+ 192
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
-
+
@@ -849,14 +923,14 @@
none
- 288
- 288
- 288
- 288
- 288
- 288
- 288
- 288
+ 576
+ 576
+ 576
+ 576
+ 576
+ 576
+ 576
+ 576
3
@@ -887,34 +961,34 @@
none
- -4
- -4
- -4
- -4
- -4
- -4
- -4
- -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -924,34 +998,34 @@
none
- -208
- -208
- -208
- -208
- -208
- -208
- -208
- -208
+ -208
+ -208
+ -208
+ -208
+ -208
+ -208
+ -208
+ -208
- 8
- 8
- 8
- 8
- 8
- 8
- 8
- 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
+ 8
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -961,34 +1035,34 @@
none
- 384
- 384
- 384
- 384
- 384
- 384
- 384
- 384
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
+ 384
- 3
- 3
- 3
- 3
- 3
- 3
- 3
- 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -998,34 +1072,34 @@
none
- -8
- -8
- -8
- -8
- -8
- -8
- -8
- -8
+ -8
+ -8
+ -8
+ -8
+ -8
+ -8
+ -8
+ -8
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1033,34 +1107,34 @@
none
- 768
- 768
- 768
- 768
- 768
- 768
- 768
- 768
+ 768
+ 768
+ 768
+ 768
+ 768
+ 768
+ 768
+ 768
- 3
- 3
- 3
- 3
- 3
- 3
- 3
- 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
+ 3
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1103,34 +1177,34 @@
none
- 480
- 480
- 480
- 480
- 480
- 480
- 480
- 480
+ 480
+ 480
+ 480
+ 480
+ 480
+ 480
+ 480
+ 480
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1140,34 +1214,34 @@
none
- -16
- -16
- -16
- -16
- -16
- -16
- -16
- -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1177,34 +1251,34 @@
none
- 960
- 960
- 960
- 960
- 960
- 960
- 960
- 960
+ 960
+ 960
+ 960
+ 960
+ 960
+ 960
+ 960
+ 960
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1214,14 +1288,14 @@
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
@@ -1233,34 +1307,34 @@
none
- -1
- -1
- -1
- -1
- -1
- -1
- -1
- -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
+ -1
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1271,34 +1345,34 @@
none
- -4
- -4
- -4
- -4
- -4
- -4
- -4
- -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
+ -4
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1309,34 +1383,34 @@
none
- -16
- -16
- -16
- -16
- -16
- -16
- -16
- -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
+ -16
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1347,34 +1421,34 @@
none
- -32
- -32
- -32
- -32
- -32
- -32
- -32
- -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1385,34 +1459,34 @@
none
- -32
- -32
- -32
- -32
- -32
- -32
- -32
- -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
+ -32
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1425,34 +1499,34 @@
none
- -91
- -91
- -91
- -91
- -91
- -91
- -91
- -91
+ -91
+ -91
+ -91
+ -91
+ -91
+ -91
+ -91
+ -91
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1463,34 +1537,34 @@
none
- -118
- -118
- -118
- -118
- -118
- -118
- -118
- -118
+ -118
+ -118
+ -118
+ -118
+ -118
+ -118
+ -118
+ -118
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
@@ -1501,34 +1575,34 @@
none
- -135
- -135
- -135
- -135
- -135
- -135
- -135
- -135
+ -135
+ -135
+ -135
+ -135
+ -135
+ -135
+ -135
+ -135
1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
diff --git a/cime_config/testdefs/testlist_cam.xml b/cime_config/testdefs/testlist_cam.xml
index 7b1e5a1b5e..c61136515d 100644
--- a/cime_config/testdefs/testlist_cam.xml
+++ b/cime_config/testdefs/testlist_cam.xml
@@ -562,13 +562,11 @@
-
-
+
-
@@ -891,23 +889,23 @@
-
-
+
+
+
-
+
-
-
+
+
-
@@ -1620,6 +1618,15 @@
+
+
+
+
+
+
+
+
+
@@ -1643,6 +1650,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -1667,7 +1691,7 @@
-
+
@@ -1736,7 +1760,7 @@
-
+
@@ -1936,6 +1960,9 @@
+
+
+
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_amie/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_amie/user_nl_cam
index 23fbd163a2..114edf9b30 100644
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_amie/user_nl_cam
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_amie/user_nl_cam
@@ -1,8 +1,11 @@
-mfilt=1,1,1,1,1,1
-ndens=1,1,1,1,1,1
-nhtfrq=-24,-24,-24,-24,-24,-24
+mfilt=1,1,1,1,1,1,1,1,1
+ndens=1,1,1,1,1,1,1,1,1
+nhtfrq=-24,-24,-24,-24,-24,-24,-24,-24,-24
+avgflag_pertape = 'A', 'I', 'I', 'A', 'A', 'I', 'A'
steady_state_ion_elec_temp=.false.
ionos_epotential_amie=.true.
-amienh_file = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_nh.nc'
-amiesh_file = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_sh.nc'
-fincl6 = 'amie_phihm','amie_efxm','amie_kevm','amie_efxg','amie_kevg','amie_efx_phys','amie_kev_phys'
+amienh_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_nh.nc'
+amiesh_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_sh.nc'
+fincl6 = 'prescr_phihm','prescr_efxm','prescr_kevm','prescr_efxp','prescr_kevp','amie_efx_phys','amie_kev_phys'
+oplus_grid = 144,96
+ionos_xport_nsplit = 8
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/shell_commands b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/shell_commands
new file mode 100644
index 0000000000..67c212d596
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/shell_commands
@@ -0,0 +1,5 @@
+./xmlchange ROF_NCPL=\$ATM_NCPL
+./xmlchange GLC_NCPL=\$ATM_NCPL
+./xmlchange RUN_STARTDATE=2010-08-03
+./xmlchange START_TOD=0
+./xmlchange CLM_FORCE_COLDSTART=on
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/user_nl_cam
new file mode 100644
index 0000000000..2148d7e709
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/user_nl_cam
@@ -0,0 +1,8 @@
+mfilt=1,1,1,1,1,1,1,1,1
+ndens=1,1,1,1,1,1,1,1,1
+nhtfrq=-24,-24,-24,-24,-24,-24,-24,-24,-24
+avgflag_pertape = 'A', 'I', 'I', 'A', 'A', 'I', 'A'
+inithist='ENDOFRUN'
+ionos_epotential_ltr=.true.
+ltr_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/REMIX_3-4_Aug_2010.nc'
+fincl6 = 'prescr_phihm','prescr_efxm','prescr_kevm','prescr_efxp','prescr_kevp','ltr_efx_phys','ltr_kev_phys'
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/shell_commands b/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/shell_commands
deleted file mode 100644
index 0554b09004..0000000000
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/shell_commands
+++ /dev/null
@@ -1,5 +0,0 @@
-./xmlchange ROF_NCPL=\$ATM_NCPL
-./xmlchange GLC_NCPL=\$ATM_NCPL
-./xmlchange --append CAM_CONFIG_OPTS="-ionosphere wxi -nlev 81"
-./xmlchange RUN_STARTDATE=1995-01-01
-./xmlchange CAM_NML_USE_CASE=UNSET
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_cam
deleted file mode 100644
index 55667f49bc..0000000000
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_cam
+++ /dev/null
@@ -1,6 +0,0 @@
-mfilt=1,1,1,1,1,1
-ndens=1,1,1,1,1,1
-nhtfrq=3,3,3,3,3,3
-inithist='ENDOFRUN'
-ncdata='$DIN_LOC_ROOT/atm/waccm/ic/WAX3548T08CO_2003top_f2000.waccm_0017bottom_L81_c110906.nc'
-pbuf_global_allocate=.false.
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_clm b/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_clm
deleted file mode 100644
index f3ac27f1e6..0000000000
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_clm
+++ /dev/null
@@ -1,27 +0,0 @@
-!----------------------------------------------------------------------------------
-! Users should add all user specific namelist changes below in the form of
-! namelist_var = new_namelist_value
-!
-! Include namelist variables for drv_flds_in ONLY if -megan and/or -drydep options
-! are set in the CLM_NAMELIST_OPTS env variable.
-!
-! EXCEPTIONS:
-! Set use_cndv by the compset you use and the CLM_BLDNML_OPTS -dynamic_vegetation setting
-! Set use_vichydro by the compset you use and the CLM_BLDNML_OPTS -vichydro setting
-! Set use_cn by the compset you use and CLM_BLDNML_OPTS -bgc setting
-! Set use_crop by the compset you use and CLM_BLDNML_OPTS -crop setting
-! Set spinup_state by the CLM_BLDNML_OPTS -bgc_spinup setting
-! Set irrigate by the CLM_BLDNML_OPTS -irrig setting
-! Set dtime with L_NCPL option
-! Set fatmlndfrc with LND_DOMAIN_PATH/LND_DOMAIN_FILE options
-! Set finidat with RUN_REFCASE/RUN_REFDATE/RUN_REFTOD options for hybrid or branch cases
-! (includes $inst_string for multi-ensemble cases)
-! Set glc_grid with CISM_GRID option
-! Set glc_smb with GLC_SMB option
-! Set maxpatch_glcmec with GLC_NEC option
-! Set glc_do_dynglacier with GLC_TWO_WAY_COUPLING env variable
-!----------------------------------------------------------------------------------
-hist_nhtfrq = 3
-hist_mfilt = 1
-hist_ndens = 1
-
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/shell_commands b/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/shell_commands
deleted file mode 100644
index 7b5dacf365..0000000000
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/shell_commands
+++ /dev/null
@@ -1,5 +0,0 @@
-./xmlchange ROF_NCPL=\$ATM_NCPL
-./xmlchange GLC_NCPL=\$ATM_NCPL
-./xmlchange CAM_CONFIG_OPTS="-phys cam4 -aquaplanet -chem waccm_ma -waccmx -ionosphere none"
-./xmlchange CAM_NML_USE_CASE="waccmx_ma_2000_cam4"
-./xmlchange RUN_STARTDATE="19950101"
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_cam
deleted file mode 100644
index b8f943cd5f..0000000000
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_cam
+++ /dev/null
@@ -1,5 +0,0 @@
-mfilt=1,1,1,1,1,1
-ndens=1,1,1,1,1,1
-nhtfrq=3,3,3,3,3,3
-inithist='ENDOFRUN'
-pbuf_global_allocate=.false.
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/user_nl_clm b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/user_nl_clm
index 0d83b5367b..f79939fc1e 100644
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/user_nl_clm
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/user_nl_clm
@@ -1,11 +1,11 @@
!----------------------------------------------------------------------------------
-! Users should add all user specific namelist changes below in the form of
-! namelist_var = new_namelist_value
+! Users should add all user specific namelist changes below in the form of
+! namelist_var = new_namelist_value
!
! Include namelist variables for drv_flds_in ONLY if -megan and/or -drydep options
! are set in the CLM_NAMELIST_OPTS env variable.
!
-! EXCEPTIONS:
+! EXCEPTIONS:
! Set use_cndv by the compset you use and the CLM_BLDNML_OPTS -dynamic_vegetation setting
! Set use_vichydro by the compset you use and the CLM_BLDNML_OPTS -vichydro setting
! Set use_cn by the compset you use and CLM_BLDNML_OPTS -bgc setting
@@ -25,3 +25,4 @@ hist_nhtfrq = 9
hist_mfilt = 1
hist_ndens = 1
+fsurdat = '$DIN_LOC_ROOT/lnd/clm2/surfdata_map/surfdata_0.47x0.63_78pfts_CMIP6_simyr2000_c180508.nc'
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_amie/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_amie/user_nl_cam
index 51665098f2..1d87194cf8 100644
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_amie/user_nl_cam
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_amie/user_nl_cam
@@ -1,8 +1,11 @@
-mfilt=1,1,1,1,1,1
-ndens=1,1,1,1,1,1
-nhtfrq=9,9,9,9,9,9
+mfilt=1,1,1,1,1,1,1,1,1
+ndens=1,1,1,1,1,1,1,1,1
+nhtfrq=9,9,9,9,9,9,9,9,9
+avgflag_pertape = 'A', 'I', 'I', 'A', 'A', 'I', 'A'
inithist='ENDOFRUN'
ionos_epotential_amie=.true.
-amienh_file = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_nh.nc'
-amiesh_file = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_sh.nc'
-fincl6 = 'amie_phihm','amie_efxm','amie_kevm','amie_efxg','amie_kevg','amie_efx_phys','amie_kev_phys'
+amienh_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_nh.nc'
+amiesh_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/oct27_31_2003_sh.nc'
+fincl6 = 'prescr_phihm','prescr_efxm','prescr_kevm','prescr_efxp','prescr_kevp','amie_efx_phys','amie_kev_phys'
+oplus_grid = 144,96
+ionos_npes = 72
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/shell_commands b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/shell_commands
new file mode 100644
index 0000000000..e993be5146
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/shell_commands
@@ -0,0 +1,5 @@
+./xmlchange ROF_NCPL=\$ATM_NCPL
+./xmlchange GLC_NCPL=\$ATM_NCPL
+./xmlchange RUN_STARTDATE=2010-08-03
+./xmlchange START_TOD=43200
+./xmlchange CLM_FORCE_COLDSTART=on
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/user_nl_cam
new file mode 100644
index 0000000000..c32d6e3a35
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/user_nl_cam
@@ -0,0 +1,9 @@
+mfilt=1,1,1,1,1,1,1,1,1
+ndens=1,1,1,1,1,1,1,1,1
+nhtfrq=9,9,9,9,9,9,9,9,9
+avgflag_pertape = 'A', 'I', 'I', 'A', 'A', 'I', 'A'
+inithist='ENDOFRUN'
+ionos_epotential_ltr=.true.
+ltr_files = '$DIN_LOC_ROOT/atm/waccm/amie_data/REMIX_3-4_Aug_2010_c210302.nc'
+fincl6 = 'prescr_phihm','prescr_efxm','prescr_kevm','prescr_efxp','prescr_kevp','ltr_efx_phys','ltr_kev_phys'
+ionos_npes = 72
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/shell_commands b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/shell_commands
new file mode 100644
index 0000000000..64272e3a91
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/shell_commands
@@ -0,0 +1,3 @@
+./xmlchange ROF_NCPL=\$ATM_NCPL
+./xmlchange GLC_NCPL=\$ATM_NCPL
+./xmlchange NINST=2
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_cam
new file mode 100644
index 0000000000..5c50ec7f2e
--- /dev/null
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_cam
@@ -0,0 +1,4 @@
+mfilt=1,1,1,1,1,1,1,1,1,1
+ndens=1,1,1,1,1,1,1,1,1,1
+nhtfrq=9,9,9,9,9,9,9,9,9,9
+inithist='ENDOFRUN'
diff --git a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_clm b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_clm
similarity index 99%
rename from cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_clm
rename to cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_clm
index f3ac27f1e6..0d83b5367b 100644
--- a/cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_clm
+++ b/cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_clm
@@ -21,7 +21,7 @@
! Set maxpatch_glcmec with GLC_NEC option
! Set glc_do_dynglacier with GLC_TWO_WAY_COUPLING env variable
!----------------------------------------------------------------------------------
-hist_nhtfrq = 3
+hist_nhtfrq = 9
hist_mfilt = 1
hist_ndens = 1
diff --git a/cime_config/testdefs/testmods_dirs/cam/waccmx_weimer/user_nl_cam b/cime_config/testdefs/testmods_dirs/cam/waccmx_weimer/user_nl_cam
index d64f9de803..f71e8a9623 100644
--- a/cime_config/testdefs/testmods_dirs/cam/waccmx_weimer/user_nl_cam
+++ b/cime_config/testdefs/testmods_dirs/cam/waccmx_weimer/user_nl_cam
@@ -4,4 +4,5 @@
nhtfrq = -24,-24,-24,-24,-24,-24,-24,-24,-24
fincl7='UI','VI','WI','PHIM2D','POTEN','QIONSUM','ELECDEN','QJOULE',
'UT_LUNAR','VT_LUNAR'
+ fincl8='op_dt','amb_diff','dfield','dwind'
apply_lunar_tides=.true.
diff --git a/doc/ChangeLog b/doc/ChangeLog
index 3179a148ac..39f9d17205 100644
--- a/doc/ChangeLog
+++ b/doc/ChangeLog
@@ -1,5 +1,275 @@
===============================================================
+Tag name: cam6_3_016
+Originator(s): fvitt, goldy, jedwards
+Date: 31 Mar 2021
+One-line Summary: Generalize coupling of WACCMX ionosphere to CAM physics; CMEPS/NUOPC threading
+Github PR URL:
+ https://github.com/ESCOMP/CAM/pull/264
+ https://github.com/ESCOMP/CAM/pull/348
+
+Purpose of changes (include the issue number and title text for each relevant GitHub issue):
+
+ ESMF regridding utilities are used to regrid fields between independent ion transport
+ geographic and electro-dynamo geomagnetic grids and a generalized cam physics grid mesh.
+ The use of an ESMF gridded component to contain the ion transport and electro-dynamo
+ ionosphere components allows the ionosphere to be executed on a subset of CAM's MPI
+ tasks and for multi-instance WACCMX configurations (a requirement for WACCMX-DART).
+
+ Github issues:
+ WACCMX ionosphere to CAM physics coupling #84
+ WACCMX missing efield diagnostics #223
+ WACCMX dies in initialization if you double the out-of-the-box processor count #151
+ Add threading for CMEPS driver #349 (jedwards4b)
+
+Describe any changes made to build system:
+
+Describe any changes made to the namelist:
+
+ - new namelist vars:
+ . cam_physics_mesh
+ . oplus_grid
+ . edyn_grid
+ . ionos_npes
+ - prescribed high-latitude potential settings (for multiple files):
+ . ionos_epotential_amie
+ . amienh_files
+ . amiesh_files
+ . ionos_epotential_ltr
+ . ltr_files
+
+List any changes to the defaults for the boundary datasets:
+
+Describe any substantial timing or memory changes:
+
+Code reviewed by: CAM review team
+
+List all files eliminated:
+
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/shell_commands
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_cam
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_WX81/user_nl_clm
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/shell_commands
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_cam
+D cime_config/testdefs/testmods_dirs/cam/outfrq3s_waccm_ma/user_nl_clm
+ - obsolete -- removed
+
+D src/ionosphere/waccmx/amie.F90
+ - replaced with amie_module.F90
+
+List all files added and what they do:
+
+A src/ionosphere/waccmx/edyn_grid_comp.F90
+ - ESMF gridded component for edynamo and oplus transport to
+ allow for running ionosphere on a subset of CAM's MPI tasks
+ and allows for multi-instance simulations
+
+A src/ionosphere/waccmx/regridder.F90
+ - encapsulates 2D and 3D field mappings between physics, magnetic, and oplus grids
+
+A src/ionosphere/waccmx/adotv_mod.F90
+ - calculate dot products on the oplus grid -- extracted from edynamo.F90
+
+A src/ionosphere/waccmx/amie_module.F90
+A src/ionosphere/waccmx/ltr_module.F90
+ - prescribed high-latitude potential
+
+A src/ionosphere/waccmx/utils_mod.F90
+ - code shared between ltr_module and amie_module
+
+A src/ionosphere/waccmx/edyn_solver_coefs.F90
+ - added to workaround circular dependency issue
+
+A cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/shell_commands
+A cime_config/testdefs/testmods_dirs/cam/outfrq1d_ltr/user_nl_cam
+A cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/shell_commands
+A cime_config/testdefs/testmods_dirs/cam/outfrq9s_ltr/user_nl_cam
+ - test tests for magnetosphere model high-latitude inputs
+
+A cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/shell_commands
+A cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_cam
+A cime_config/testdefs/testmods_dirs/cam/outfrq9s_multi/user_nl_clm
+ - multi-instance test
+
+List all existing files that have been modified, and describe the changes:
+
+M bld/build-namelist
+ - remove wxi ionosphere option
+ - allow waccmx with any dycore
+ - set defaults for cam physics mesh file, oplus and edyn grid resolutions
+
+M bld/config_files/definition.xml
+ - remove wxi ionosphere option
+
+M bld/configure
+ - remove wxi ionosphere option
+ - remove 81 level configuration
+ - allow any dycore
+ - remove WACCMX_EDYN_ESMF and WACCMX_IONOS cpp variables
+
+M bld/namelist_files/namelist_defaults_cam.xml
+ - add defaults for SE WACCMX
+ - add defaults for cam physics mesh file, oplus and edyn grid resolutions
+ - update IC files
+
+M bld/namelist_files/namelist_definition.xml
+ - new namelist vars:
+ . cam_physics_mesh
+ . oplus_grid
+ . edyn_grid
+ . ionos_npes
+ - prescribed high-latitude potential settings (for multiple files):
+ . ionos_epotential_amie
+ . amienh_files
+ . amiesh_files
+ . ionos_epotential_ltr
+ . ltr_files
+
+M bld/namelist_files/use_cases/sd_waccmx_ma_cam6.xml
+ - update IC file
+
+M bld/namelist_files/use_cases/waccmx_ma_2000_cam4.xml
+ - IC for 81 levels
+
+M bld/namelist_files/use_cases/waccmxie_ma_2000_cam4.xml
+ - fincl7 only for FV dycore
+ - dev24del2flag only for FV dycore
+
+M cime_config/config_component.xml
+M cime_config/config_pes.xml
+ - set default PE layouts for WACCMX SE
+ - change cheyenne default PE layouts for WACCMX FV
+
+M cime_config/testdefs/testlist_cam.xml
+ - add tests for WACCMX SE
+ - add multi-instance test
+ - misc adjustments to WACCMX FV tests
+
+M cime_config/testdefs/testmods_dirs/cam/outfrq1d_amie/user_nl_cam
+M cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/shell_commands
+M cime_config/testdefs/testmods_dirs/cam/outfrq9s_576tsks/user_nl_clm
+M cime_config/testdefs/testmods_dirs/cam/outfrq9s_amie/user_nl_cam
+M cime_config/testdefs/testmods_dirs/cam/waccmx_weimer/user_nl_cam
+
+
+M src/chemistry/mozart/short_lived_species.F90
+ - replace WACCMX_IONOS with WACCMX_PHYS
+ - clean up
+
+M src/control/cam_comp.F90
+ - remove dyn_in from ionosphere_run2 arguments
+
+M src/dynamics/se/dycore/prim_advance_mod.F90
+ - allow SE dycore with species dependent thermodynamics in waccmx
+
+M src/dynamics/se/dycore/prim_advection_mod.F90
+ - expand the write format for small pressures near the top of WACCMX
+
+M src/ionosphere/ionosphere_interface.F90
+ - remove dyn_in from ionosphere_run2 arguments
+
+M src/ionosphere/waccmx/ionosphere_interface.F90
+M src/ionosphere/waccmx/dpie_coupling.F90
+M src/ionosphere/waccmx/edyn_esmf.F90
+M src/ionosphere/waccmx/edyn_init.F90
+M src/ionosphere/waccmx/edyn_maggrid.F90
+M src/ionosphere/waccmx/edyn_geogrid.F90
+M src/ionosphere/waccmx/edyn_mpi.F90
+M src/ionosphere/waccmx/edynamo.F90
+ - infrastructure changes to be independent of CAM's dycore
+ -- couple ionophere to physics via ESMF regridding tools
+ to map between physics mesh and oplus grid and
+ magnetic grid
+
+M src/ionosphere/waccmx/edyn_mud.F90
+M src/ionosphere/waccmx/edyn_mudcom.F90
+M src/ionosphere/waccmx/edyn_mudmod.F90
+M src/ionosphere/waccmx/edyn_muh2cr.F90
+M src/ionosphere/waccmx/edyn_solve.F90
+ - runtime configurable magnetic grid resolution
+ - put mud solver routines inside modules
+
+M src/ionosphere/waccmx/edyn_params.F90
+M src/ionosphere/waccmx/getapex.F90
+M src/ionosphere/waccmx/heelis.F90
+M src/ionosphere/waccmx/wei05sc.F90
+ - misc clean up
+
+M src/ionosphere/waccmx/oplus.F90
+ - include term analysis diagnostics
+ - misc clean up
+
+M src/ionosphere/waccmx/savefield_waccm.F90
+ - removed deprecated re-arranger
+
+M src/physics/cam/cam_diagnostics.F90
+ - add CPAIRV and RAIRV diagnostics
+
+M src/physics/cam/phys_control.F90
+ - add cam_physics_mesh
+
+M src/physics/cam/rk_stratiform.F90
+ - set only ncol columns in dlat (dlat(:ncol)=...)
+
+M src/physics/waccm/aurora_params.F90
+ - amie_period --> prescribed_period
+
+M src/physics/waccm/mo_aurora.F90
+ - amie_period --> prescribed_period (for AMIE and LTR inputs)
+ - initialize AurIPRateSum pbuf field to zero
+
+M src/utils/physconst.F90
+ - bug fix in get_mbarv (factor indexing)
+
+M src/cpl/nuopc/atm_comp_nuopc.F90
+M src/physics/cam/physics_types.F90
+ - fixes for threading in CMEPS/NUOPC component coupling (jedwards4b)
+
+If there were any failures reported from running test_driver.sh on any test
+platform, and checkin with these failures has been OK'd by the gatekeeper,
+then copy the lines from the td.*.status files for the failed tests to the
+appropriate machine below. All failed tests must be justified.
+
+cheyenne/intel/aux_cam:
+ ERC_D_Ln9.f19_f19_mg17.QPX2000.cheyenne_intel.cam-outfrq3s (Overall: DIFF) details:
+ FAIL ERC_D_Ln9.f19_f19_mg17.QPX2000.cheyenne_intel.cam-outfrq3s NLCOMP
+ FAIL ERC_D_Ln9.f19_f19_mg17.QPX2000.cheyenne_intel.cam-outfrq3s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: ERROR BFAIL baseline directory '/glade/p/cesm/amwg/cesm_baselines/cam6_3_015/ERC_D_Ln9.f19_f19_mg17.QPX2000.cheyenne_intel.cam-outfrq3s' does not exist
+ ERS_Ln9.f09_f09_mg17.FX2000.cheyenne_intel.cam-outfrq9s (Overall: DIFF) details:
+ FAIL ERS_Ln9.f09_f09_mg17.FX2000.cheyenne_intel.cam-outfrq9s NLCOMP
+ FAIL ERS_Ln9.f09_f09_mg17.FX2000.cheyenne_intel.cam-outfrq9s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: DIFF
+ ERS_Ln9.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq9s (Overall: DIFF) details:
+ FAIL ERS_Ln9.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq9s NLCOMP
+ FAIL ERS_Ln9.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq9s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: DIFF
+ SMS_D_Ln9.f19_f19_mg17.FXHIST.cheyenne_intel.cam-outfrq9s_amie (Overall: DIFF) details:
+ FAIL SMS_D_Ln9.f19_f19_mg17.FXHIST.cheyenne_intel.cam-outfrq9s_amie NLCOMP
+ FAIL SMS_D_Ln9.f19_f19_mg17.FXHIST.cheyenne_intel.cam-outfrq9s_amie BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: DIFF
+ SMS_D_Ln9.ne16_ne16_mg17.FX2000.cheyenne_intel.cam-outfrq9s (Overall: DIFF) details:
+ FAIL SMS_D_Ln9.ne16_ne16_mg17.FX2000.cheyenne_intel.cam-outfrq9s NLCOMP
+ FAIL SMS_D_Ln9.ne16_ne16_mg17.FX2000.cheyenne_intel.cam-outfrq9s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: ERROR BFAIL baseline directory '/glade/p/cesm/amwg/cesm_baselines/cam6_3_015/SMS_D_Ln9.ne16_ne16_mg17.FX2000.cheyenne_intel.cam-outfrq9s' does not exist
+ - expected baseline failures for WACCMX
+
+ SMS_D_Ln9.ne16_ne16_mg17.QPX2000.cheyenne_intel.cam-outfrq9s (Overall: DIFF) details:
+ FAIL SMS_D_Ln9.ne16_ne16_mg17.QPX2000.cheyenne_intel.cam-outfrq9s NLCOMP
+ FAIL SMS_D_Ln9.ne16_ne16_mg17.QPX2000.cheyenne_intel.cam-outfrq9s BASELINE /glade/p/cesm/amwg/cesm_baselines/cam6_3_015: ERROR BFAIL baseline directory '/glade/p/cesm/amwg/cesm_baselines/cam6_3_015/SMS_D_Ln9.ne16_ne16_mg17.QPX2000.cheyenne_intel.cam-outfrq9s' does not exist
+ - new WACCMX test
+
+izumi/nag/aux_cam:
+ DAE.f45_f45_mg37.FHS94.izumi_nag.cam-dae (Overall: FAIL) details:
+ FAIL DAE.f45_f45_mg37.FHS94.izumi_nag.cam-dae COMPARE_base_da
+ - pre-existing failure
+
+ SMS_D_Ln9.ne5_ne5_mg37.QPC4X.izumi_nag.cam-outfrq9s (Overall: DIFF) details:
+ FAIL SMS_D_Ln9.ne5_ne5_mg37.QPC4X.izumi_nag.cam-outfrq9s NLCOMP
+ FAIL SMS_D_Ln9.ne5_ne5_mg37.QPC4X.izumi_nag.cam-outfrq9s BASELINE /fs/cgd/csm/models/atm/cam/pretag_bl/cam6_3_015_nag: ERROR BFAIL baseline directory '/fs/cgd/csm/models/atm/cam/pretag_bl/cam6_3_015_nag/SMS_D_Ln9.ne5_ne5_mg37.QPC4X.izumi_nag.cam-outfrq9s' does not exist
+ - expected failure -- new test for WACCMX
+
+izumi/pgi/aux_cam: all PASS
+
+Summarize any changes to answers: bit-for-bit unchanged except for WACCMX configurations
+
+===============================================================
+===============================================================
+
Tag name: cam6_3_015
Originator(s): mvertens, goldy
Date: 2021-03-24
diff --git a/src/chemistry/mozart/short_lived_species.F90 b/src/chemistry/mozart/short_lived_species.F90
index 16fa03da9b..76fb30b20e 100644
--- a/src/chemistry/mozart/short_lived_species.F90
+++ b/src/chemistry/mozart/short_lived_species.F90
@@ -11,7 +11,7 @@ module short_lived_species
use cam_logfile, only : iulog
use ppgrid, only : pcols, pver, begchunk, endchunk
use spmd_utils, only : masterproc
-
+
implicit none
@@ -30,7 +30,7 @@ module short_lived_species
integer :: pbf_idx
integer :: map(nslvd)
- character(len=16), parameter :: pbufname = 'ShortLivedSpecies'
+ character(len=*), parameter :: pbufname = 'ShortLivedSpecies'
contains
@@ -39,10 +39,6 @@ module short_lived_species
subroutine register_short_lived_species
use physics_buffer, only : pbuf_add_field, dtype_r8
- implicit none
-
- integer :: m
-
if ( nslvd < 1 ) return
call pbuf_add_field(pbufname,'global',dtype_r8,(/pcols,pver,nslvd/),pbf_idx)
@@ -52,7 +48,7 @@ end subroutine register_short_lived_species
!---------------------------------------------------------------------
!---------------------------------------------------------------------
subroutine short_lived_species_initic
-#ifdef WACCMX_IONOS
+#ifdef WACCMX_PHYS
use cam_history, only : addfld, add_default
integer :: m
@@ -74,11 +70,11 @@ subroutine short_lived_species_writeic( lchnk, pbuf )
integer , intent(in) :: lchnk ! chunk identifier
type(physics_buffer_desc), pointer :: pbuf(:)
-#ifdef WACCMX_IONOS
+#ifdef WACCMX_PHYS
real(r8),pointer :: tmpptr(:,:)
integer :: m
character(len=24) :: varname
-
+
if ( write_inithist() ) then
do m=1,nslvd
varname = trim(slvd_lst(m))//'&IC'
@@ -98,20 +94,19 @@ subroutine initialize_short_lived_species(ncid_ini, pbuf2d)
use mo_tracname, only : solsym
use ncdio_atm, only : infld
use pio, only : file_desc_t
- use physics_buffer, only : physics_buffer_desc, pbuf_set_field, pbuf_get_chunk, pbuf_get_field
+ use physics_buffer, only : physics_buffer_desc, pbuf_set_field
implicit none
type(file_desc_t), intent(inout) :: ncid_ini
type(physics_buffer_desc), pointer :: pbuf2d(:,:)
- integer :: m,n,lchnk
+ integer :: m,n
integer :: grid_id
character(len=8) :: fieldname
character(len=4) :: dim1name, dim2name
logical :: found
real(r8),pointer :: tmpptr(:,:,:) ! temporary pointer
- real(r8),pointer :: tmpptr2(:,:,:) ! temporary pointer
character(len=*), parameter :: subname='INITIALIZE_SHORT_LIVED_SPECIES'
if ( nslvd < 1 ) return
@@ -139,9 +134,9 @@ subroutine initialize_short_lived_species(ncid_ini, pbuf2d)
endif
call pbuf_set_field(pbuf2d, pbf_idx, tmpptr, start=(/1,1,m/),kount=(/pcols,pver,1/))
-
+
if (masterproc) write(iulog,*) fieldname, ' is set to short-lived'
-
+
enddo
deallocate(tmpptr)
@@ -154,7 +149,7 @@ subroutine set_short_lived_species( q, lchnk, ncol, pbuf )
use physics_buffer, only : physics_buffer_desc, pbuf_set_field
- implicit none
+ implicit none
real(r8), intent(in) :: q(pcols,pver,gas_pcnst)
integer, intent(in) :: lchnk, ncol
@@ -176,7 +171,7 @@ end subroutine set_short_lived_species
subroutine get_short_lived_species( q, lchnk, ncol, pbuf )
use physics_buffer, only : physics_buffer_desc, pbuf_get_field
- implicit none
+ implicit none
real(r8), intent(inout) :: q(pcols,pver,gas_pcnst)
integer, intent(in) :: lchnk, ncol
@@ -184,7 +179,7 @@ subroutine get_short_lived_species( q, lchnk, ncol, pbuf )
real(r8),pointer :: tmpptr(:,:)
- integer :: m,n
+ integer :: m,n
if ( nslvd < 1 ) return
@@ -213,7 +208,7 @@ function slvd_index( name )
do m=1,nslvd
if ( name == slvd_lst(m) ) then
slvd_index = m
- return
+ return
endif
enddo
diff --git a/src/control/cam_comp.F90 b/src/control/cam_comp.F90
index 29eff9a6d7..f44faa6262 100644
--- a/src/control/cam_comp.F90
+++ b/src/control/cam_comp.F90
@@ -307,7 +307,7 @@ subroutine cam_run2( cam_out, cam_in )
! Ion transport
!
call t_startf('ionosphere_run2')
- call ionosphere_run2( phys_state, dyn_in, pbuf2d )
+ call ionosphere_run2( phys_state, pbuf2d )
call t_stopf ('ionosphere_run2')
end subroutine cam_run2
diff --git a/src/cpl/nuopc/atm_comp_nuopc.F90 b/src/cpl/nuopc/atm_comp_nuopc.F90
index 8230276253..cea91225bd 100644
--- a/src/cpl/nuopc/atm_comp_nuopc.F90
+++ b/src/cpl/nuopc/atm_comp_nuopc.F90
@@ -84,7 +84,7 @@ module atm_comp_nuopc
integer :: flds_scalar_index_nx = 0
integer :: flds_scalar_index_ny = 0
integer :: flds_scalar_index_nextsw_cday = 0
-
+ integer :: nthrds
integer , parameter :: dbug_flag = 0
type(cam_in_t) , pointer :: cam_in(:)
type(cam_out_t) , pointer :: cam_out(:)
@@ -351,7 +351,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc)
character(len=cl) :: model_doi_url ! DOI for CESM model run
logical :: aqua_planet ! Flag to run model in "aqua planet" mode
logical :: brnch_retain_casename ! true => branch run has same caseid as run being branched from
- logical :: single_column
+ logical :: single_column = .false.
character(len=cl) :: single_column_lnd_domainfile
real(r8) :: scol_lon
real(r8) :: scol_lat
@@ -403,8 +403,16 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc)
call ESMF_VMGet(vm, pet=localPet, peCount=localPeCount, rc=rc)
if (chkerr(rc,__LINE__,u_FILE_u)) return
-!$ call omp_set_num_threads(localPeCount)
- print *,__FILE__,__LINE__,localPeCount
+ if(localPeCount == 1) then
+ call NUOPC_CompAttributeGet(gcomp, "nthreads", value=cvalue, rc=rc)
+ if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=u_FILE_u)) return
+ read(cvalue,*) nthrds
+ else
+ nthrds = localPeCount
+ endif
+
+!$ call omp_set_num_threads(nthrds)
+ print *,__FILE__,__LINE__,nthrds
!----------------------------------------------------------------------------
! determine instance information
@@ -1008,12 +1016,7 @@ subroutine ModelAdvance(gcomp, rc)
rc = ESMF_SUCCESS
- call ESMF_GridCompGet(gcomp, vm=vm, localPet=localPet, rc=rc)
- if (chkerr(rc,__LINE__,u_FILE_u)) return
- call ESMF_VMGet(vm, pet=localPet, peCount=localPeCount, rc=rc)
- if (chkerr(rc,__LINE__,u_FILE_u)) return
-
-!$ call omp_set_num_threads(localPeCount)
+!$ call omp_set_num_threads(nthrds)
call shr_file_getLogUnit (shrlogunit)
call shr_file_setLogUnit (iulog)
diff --git a/src/dynamics/se/dycore/prim_advance_mod.F90 b/src/dynamics/se/dycore/prim_advance_mod.F90
index ca9c125395..2f06fe7119 100644
--- a/src/dynamics/se/dycore/prim_advance_mod.F90
+++ b/src/dynamics/se/dycore/prim_advance_mod.F90
@@ -107,9 +107,6 @@ subroutine prim_advance_exp(elem, fvm, deriv, hvcoord, hybrid,dt, tl, nets, net
! (K&G 2nd order method has CFL=4. tiny CFL improvement not worth 2nd order)
!
- if (dry_air_species_num > 0) &
- call endrun('ERROR: SE dycore not ready for species dependent thermodynamics - ABORT')
-
call omp_set_nested(.true.)
! default weights for computing mean dynamics fluxes
@@ -1293,7 +1290,7 @@ subroutine compute_and_apply_rhs(np1,nm1,n0,dt2,elem,hvcoord,hybrid,&
call gradient_sphere(Ephi(:,:),deriv,elem(ie)%Dinv,vtemp)
density_inv(:,:) = R_dry(:,:,k)*T_v(:,:,k)/p_full(:,:,k)
- if (dry_air_species_num==0) then
+ if (dry_air_species_num==0) then
exner(:,:)=(p_full(:,:,k)/hvcoord%ps0)**kappa(:,:,k,ie)
theta_v(:,:)=T_v(:,:,k)/exner(:,:)
call gradient_sphere(exner(:,:),deriv,elem(ie)%Dinv,grad_exner)
diff --git a/src/dynamics/se/dycore/prim_advection_mod.F90 b/src/dynamics/se/dycore/prim_advection_mod.F90
index 41e15744df..b00019b974 100644
--- a/src/dynamics/se/dycore/prim_advection_mod.F90
+++ b/src/dynamics/se/dycore/prim_advection_mod.F90
@@ -1027,7 +1027,7 @@ subroutine vertical_remap(hybrid,elem,fvm,hvcoord,np1,np1_qdp,nets,nete)
elem(ie)%spherep(i,j)%lon*rad2deg,elem(ie)%spherep(i,j)%lat*rad2deg
write(iulog,*) " "
do k=1,nlev
- write(iulog,'(A21,I5,A1,f12.8,3f8.2)') "k,dp_star_moist,u,v,T: ",k," ",dp_star_moist(i,j,k)/100.0_r8,&
+ write(iulog,'(A21,I5,A1,f16.12,3f10.2)') "k,dp_star_moist,u,v,T: ",k," ",dp_star_moist(i,j,k)/100.0_r8,&
elem(ie)%state%v(i,j,1,k,np1),elem(ie)%state%v(i,j,2,k,np1),elem(ie)%state%T(i,j,k,np1)
end do
end if
diff --git a/src/ionosphere/ionosphere_interface.F90 b/src/ionosphere/ionosphere_interface.F90
index c42199514d..4b7802010c 100644
--- a/src/ionosphere/ionosphere_interface.F90
+++ b/src/ionosphere/ionosphere_interface.F90
@@ -42,17 +42,13 @@ end subroutine ionosphere_run1
!--------------------------------------------------------------------------------
!--------------------------------------------------------------------------------
- subroutine ionosphere_run2( phys_state, dyn_in, pbuf2d )
-
+ subroutine ionosphere_run2( phys_state, pbuf2d )
+ use ppgrid, only: begchunk, endchunk
use physics_types, only: physics_state
use physics_buffer, only: physics_buffer_desc
- use ppgrid, only: begchunk, endchunk
- use dyn_comp, only: dyn_import_t
! args
type(physics_state), intent(in) :: phys_state(begchunk:endchunk)
- type(dyn_import_t), intent(in) :: dyn_in ! dynamics import
-
type(physics_buffer_desc), pointer :: pbuf2d(:,:)
end subroutine ionosphere_run2
diff --git a/src/ionosphere/waccmx/adotv_mod.F90 b/src/ionosphere/waccmx/adotv_mod.F90
new file mode 100644
index 0000000000..255543f2af
--- /dev/null
+++ b/src/ionosphere/waccmx/adotv_mod.F90
@@ -0,0 +1,109 @@
+module adotv_mod
+ use shr_kind_mod,only: r8 => shr_kind_r8 ! 8-byte reals
+
+ implicit none
+
+contains
+
+ subroutine calc_adotv(z, un, vn, wn, adotv1, adotv2, adota1, adota2, &
+ a1dta2, be3, sini, lev0, lev1, lon0, lon1, lat0, lat1)
+ !
+ ! Calculate adotv1,2, adota1,2, a1dta2 and be3.
+ ! All fields should be on O+ grid
+ !
+ use edyn_params, only: r0,h0
+ use edyn_geogrid, only: jspole, jnpole
+ use getapex, only: &
+ zb, & ! downward component of magnetic field
+ bmod, & ! magnitude of magnetic field (gauss)
+ dvec, & ! (nlonp1,nlat,3,2)
+ dddarr, & ! (nlonp1,nlat)
+ be3arr, & ! (nlonp1,nlat)
+ alatm ! (nlonp1,0:nlatp1)
+ !
+ ! Args:
+ integer,intent(in) :: lev0, lev1, lon0, lon1, lat0, lat1
+ real(r8), dimension(lev0:lev1,lon0:lon1,lat0:lat1), intent(in) :: &
+ z, & ! geopotential height (cm)
+ un, & ! neutral zonal velocity (cm/s)
+ vn ! neutral meridional velocity (cm/s)
+ real(r8), dimension(lev0:lev1,lon0:lon1,lat0:lat1), intent(in) :: &
+ wn ! vertical velocity (cm/s)
+
+ real(r8), dimension(lon0:lon1,lat0:lat1,lev0:lev1), intent(out) :: &
+ adotv1, adotv2
+ real(r8), dimension(lon0:lon1,lat0:lat1), intent(out) :: &
+ adota1, adota2, a1dta2, be3, sini
+ !
+ ! Local:
+ integer :: k, i, j
+ real(r8) :: r0or, rat, sinalat
+ real(r8) :: clm2(lon0:lon1,lat0:lat1)
+ !
+ adotv1 = 0.0_r8
+ adotv2 = 0.0_r8
+ adota1 = 0.0_r8
+ adota2 = 0.0_r8
+ a1dta2 = 0.0_r8
+ be3 = 0.0_r8
+ sini = 0.0_r8
+
+ do j = lat0, lat1
+ do i = lon0, lon1
+ sinalat = sin(alatm(i,j)) ! sin(lam)
+ clm2(i,j) = 1._r8 - (sinalat * sinalat) ! cos^2(lam)
+ be3(i,j) = 1.e-9_r8*be3arr(i,j) ! be3 is in T (be3arr in nT)
+ sini(i,j) = zb(i,j)/bmod(i,j) ! sin(I_m)
+
+ do k=lev0,lev1-1
+ !
+ ! d_1 = (R_0/R)^1.5
+ r0or = r0/(r0 + 0.5_r8* (z(k,i,j) + z(k+1,i,j)) - h0)
+ rat = 1.e-2_r8*r0or**1.5_r8 ! 1/100 conversion in cm
+ !
+ ! A_1 dot V = fac( d_1(1) u + d_1(2) v + d_1(3) w
+ adotv1(i,j,k) = rat*( &
+ dvec(i,j,1,1) * un(k,i,j) + &
+ dvec(i,j,2,1) * vn(k,i,j) + &
+ dvec(i,j,3,1) * wn(k,i,j))
+
+ !
+ ! Note: clm2 is being used here to represent the squared cosine
+ ! of the quasi-dipole latitude, not of the M(90) latitude,
+ ! since the wind values are aligned vertically,
+ ! not along the field line.
+ !
+ rat = rat * sqrt((4._r8 - (3._r8 * clm2(i,j))) / &
+ (4._r8 - (3._r8 * r0or * clm2(i,j))))
+ !
+ ! A_2 dot V = fac( d_2(1) u + d_2(2) v + d_2(3) w
+ adotv2(i,j,k) = rat * ( &
+ dvec(i,j,1,2) * un(k,i,j) + &
+ dvec(i,j,2,2) * vn(k,i,j) + &
+ dvec(i,j,3,2) * wn(k,i,j))
+ end do ! k=lev0,lev1-1
+ end do
+ end do
+
+ do j = lat0, lat1
+ if (j==jspole .or. j==jnpole) cycle
+ do i = lon0, lon1
+ !
+ ! Calculation of adota(n) = d(n)**2/D
+ ! a1dta2 = (d(1) dot d(2)) /D
+ !
+ adota1(i,j) = (dvec(i,j,1,1)**2 + dvec(i,j,2,1)**2 + &
+ dvec(i,j,3,1)**2) / dddarr(i,j)
+ adota2(i,j) = (dvec(i,j,1,2)**2 + dvec(i,j,2,2)**2 + &
+ dvec(i,j,3,2)**2) / dddarr(i,j)
+ a1dta2(i,j) = (dvec(i,j,1,1) * dvec(i,j,1,2) + &
+ dvec(i,j,2,1) * dvec(i,j,2,2) + &
+ dvec(i,j,3,1) * dvec(i,j,3,2)) / dddarr(i,j)
+ end do ! i=lon0,lon1
+
+ end do ! j=lat0,lat1
+
+ end subroutine calc_adotv
+
+
+end module adotv_mod
diff --git a/src/ionosphere/waccmx/amie.F90 b/src/ionosphere/waccmx/amie.F90
deleted file mode 100644
index 07d6a102ac..0000000000
--- a/src/ionosphere/waccmx/amie.F90
+++ /dev/null
@@ -1,1088 +0,0 @@
-module amie_module
- !
- ! Module used to read data from the AMIE outputs (POT,mean energy,
- ! and energy flux).
- !
-
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- use spmd_utils ,only: masterproc
- use cam_abortutils ,only: endrun
- use edyn_maggrid, only: nmlat,nmlonp1
- use edyn_mpi ,only: mlon0,mlon1,mlat0,mlat1, &
- lon0,lon1,lat0,lat1
-#ifdef WACCMX_EDYN_ESMF
- use edyn_params ,only: finit
- use edyn_maggrid ,only: &
- ylonm, & ! magnetic latitudes (nmlat) (radians)
- ylatm ! magnetic longtitudes (nmlonp1) (radians)
- use edyn_esmf ,only: mag_efx,mag_kev,geo_efx,geo_kev
- use esmf ,only: ESMF_FIELD ! ESMF library module
- use cam_pio_utils, only: cam_pio_openfile, cam_pio_closefile
- use pio, only: pio_inq_dimid, pio_inquire_dimension, pio_inquire, pio_inq_varid
- use pio, only: file_desc_t, pio_noerr, pio_nowrite, pio_get_var
-#endif
- implicit none
-
- private
- public :: init_amie, getamie
-#ifdef WACCMX_EDYN_ESMF
-
- ! Define parameters for AMIE input data file:
- integer, parameter :: &
- mxgdays = 10, & ! maximum number of days of AMIE data
- mxtimes = 5881, & ! maximum number of times of AMIE data per day
- ithtrns = 30, & ! corresponding to trans lat 40-deg
- ithmx = 55, & ! maximum number of latitudes of AMIE data
- jmxm = 2*ithmx-1, & ! maximum number of global latitudes
- lonmx = 36 ! maximum number of longitudes of AMIE data
- integer :: lonp1,latp1
- ! integer,dimension(mxtimes) :: year,month,day,jday
- ! Define AMIE output fields
- real(r8) :: &
- tiepot(nmlonp1,nmlat),tieekv(nmlonp1,nmlat), &
- tieefx(nmlonp1,nmlat)
- ! defined output AMIE fields in TGCM geographic grid
- ! real,dimension(nlonp4,nlat) ::
- ! | potg_sech, ekvg_sech, efxg_sech
- ! real,dimension(nmlonp1,-2:nlevp1) :: tiepot_sech
- !
- ! Define fields for AMIE input data file:
- ! electric potential in Volt
- ! mean energy in KeV
- ! energy flux in W/m^2
- ! amie_cusplat_nh(sh) and amie_cuspmlt_nh(sh) are
- ! AMIE cusp latitude and MLT in NH and SH
- ! amie_hpi_nh(sh) are AMIE hemi-integrated power
- ! amie_pcp_nh(sh) are AMIE polar-cap potential drop
- ! Saved AMIE outputs with suffix _amie
- !
- real(r8),allocatable,dimension(:,:,:),save :: & ! (lonp1,latp1,ntimes)
- amie_pot_nh, amie_pot_sh, amie_ekv_nh, amie_ekv_sh, &
- amie_efx_nh, amie_efx_sh
- real(r8),allocatable,dimension(:,:),save :: & ! (lonp1,latp1)
- pot_nh_amie,pot_sh_amie, ekv_nh_amie,ekv_sh_amie, &
- efx_nh_amie,efx_sh_amie
- integer, allocatable,dimension(:),save :: & ! (ntimes)
- year,month,day,jday
- real(r8), allocatable,dimension(:),save :: & ! (ntimes)
- amie_cusplat_nh, amie_cuspmlt_nh, amie_hpi_nh, &
- amie_pcp_nh, amie_nh_ut, &
- amie_cusplat_sh, amie_cuspmlt_sh, amie_hpi_sh, &
- amie_pcp_sh, amie_sh_ut
- real(r8) :: &
- cusplat_nh_amie, cuspmlt_nh_amie, cusplat_sh_amie, &
- cuspmlt_sh_amie, hpi_sh_amie, hpi_nh_amie, pcp_sh_amie, &
- pcp_nh_amie
- !
-#endif
-
-contains
- !-----------------------------------------------------------------------
- subroutine init_amie(amienh,amiesh)
- !
- ! Called from tgcm.F
- ! (this is not in init.F to avoid circular dependencies)
- !
- character(len=*),intent(in) :: amienh, amiesh
-
-#ifdef WACCMX_EDYN_ESMF
- ! read north hemisphere file:
- if (len_trim(amienh) > 0) then
- if (masterproc) write(iulog,"('Reading AMIENH file ',a)") trim(amienh)
- call rdamie_nh(amienh)
- end if
- !
- ! Read south hemisphere file:
- if (len_trim(amiesh) > 0) then
- if (masterproc) write(iulog,"('Reading AMIESH file ',a)") trim(amiesh)
- call rdamie_sh(amiesh)
- end if
-#else
- call endrun('Cannot use AMIE without electro-dynamo active.')
-#endif
- end subroutine init_amie
-#ifdef WACCMX_EDYN_ESMF
- !-----------------------------------------------------------------------
- subroutine rdamie_nh(amienh)
- !
- ! Read AMIE data for the northern hemisphere from amienh
- !
- ! Local:
-
- character(len=*),intent(in) :: amienh
- integer :: istat,ntimes,ndims,nvars,ngatts,idunlim,ier
- integer :: id_lon,id_lat,id_time, &
- idv_year,idv_mon,idv_day,idv_jday, &
- idv_ut,idv_pot,idv_ekv, &
- idv_efx,idv_cusplat,idv_cuspmlt,idv_hpi,idv_pcp
- type(file_desc_t) :: ncid
- !
- if (masterproc) write(iulog,"(/,72('-'))")
- if (masterproc) write(iulog,"('RDAMIE_NH: read AMIE data for northern hemisphere:')")
- !
- ! Open netcdf file:
- call cam_pio_openfile(ncid, amienh, pio_nowrite)
- !
- ! Get AMIE grid dimension:
- istat = pio_inq_dimid(ncid,'lon',id_lon)
- istat = pio_inquire_dimension(ncid,id_lon,len=lonp1)
- if (istat /= pio_noerr) call rpt_ncerr(istat, 'rdamie_nh: Error getting AMIE longitude dimension')
-
- istat = pio_inq_dimid(ncid,'lat',id_lat)
- istat = pio_inquire_dimension(ncid,id_lat,len=latp1)
- if (istat /= pio_noerr) call rpt_ncerr(istat, 'rdamie_nh: Error getting AMIE latitude dimension')
- ! write(iulog,"('lonp1=',i3,' latp1=',i3)") lonp1,latp1
- !
- ! Get time dimension:
- istat = pio_inquire(ncid,unlimiteddimid=id_time)
- istat = pio_inquire_dimension(ncid,id_time,len=ntimes)
- !
- ! Search for requested AMIE output fields
- istat = pio_inquire(ncid,ndims,nvars,ngatts,idunlim)
- !
- ! Get 1-D AMIE fields (ntimes)
- if (.not. allocated(year)) allocate(year(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'year',idv_year)
- istat = pio_get_var(ncid,idv_year,year)
- ! write(iulog,*)'rdamie_nh: year=', year(1:10)
- if (.not. allocated(month)) allocate(month(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'month',idv_mon)
- istat = pio_get_var(ncid,idv_mon,month)
- if (.not. allocated(day)) allocate(day(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'day',idv_day)
- istat = pio_get_var(ncid,idv_day,day)
- ! write(iulog,*)'rdamie_nh: day=', day(1:10)
- if (.not. allocated(jday)) allocate(jday(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'jday',idv_jday)
- istat = pio_get_var(ncid,idv_jday,jday)
- !
- ! Allocate 1-d fields:
- if (.not. allocated(amie_nh_ut)) &
- allocate(amie_nh_ut(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_nh_ut: ntimes=',i3)")ntimes
- if (.not. allocated(amie_cusplat_nh)) &
- allocate(amie_cusplat_nh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_cusplat_nh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_cuspmlt_nh)) &
- allocate(amie_cuspmlt_nh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_cuspmlt_nh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_hpi_nh)) &
- allocate(amie_hpi_nh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_hpi_nh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_pcp_nh)) &
- allocate(amie_pcp_nh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_pcp_nh: ntimes=',i3)")ntimes
- !
- ! Get ut
- istat = pio_inq_varid(ncid,'ut',idv_ut)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE UT id')
- istat = pio_get_var(ncid,idv_ut,amie_nh_ut)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable ut')
- !
- ! Get HPI
- istat = pio_inq_varid(ncid,'hpi',idv_hpi)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE hpi id')
- istat = pio_get_var(ncid,idv_hpi,amie_hpi_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable hpi')
- !
- ! Get PCP
- istat = pio_inq_varid(ncid,'pcp',idv_pcp)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE pcp id')
- istat = pio_get_var(ncid,idv_pcp,amie_pcp_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable pcp')
- !
- ! Get cusplat
- istat = pio_inq_varid(ncid,'cusplat',idv_cusplat)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE cusplat id')
- istat = pio_get_var(ncid,idv_cusplat,amie_cusplat_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable cusplat')
- !
- ! Get cuspmlt
- istat = pio_inq_varid(ncid,'cuspmlt',idv_cuspmlt)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE cusplat id')
- istat = pio_get_var(ncid,idv_cuspmlt,amie_cuspmlt_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable cuspmlt')
- !
- ! Allocate 2-d fields:
- if (.not. allocated(pot_nh_amie)) &
- allocate(pot_nh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' pot_nh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- if (.not. allocated(ekv_nh_amie)) &
- allocate(ekv_nh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' ekv_nh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- if (.not. allocated(efx_nh_amie)) &
- allocate(efx_nh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' efx_nh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- !
- ! Allocate 3-d fields:
- if (.not. allocated(amie_pot_nh)) &
- allocate(amie_pot_nh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) WRITE(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_pot_nh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- if (.not. allocated(amie_ekv_nh)) &
- allocate(amie_ekv_nh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_ekv_nh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- if (.not. allocated(amie_efx_nh)) &
- allocate(amie_efx_nh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_nh: error allocating', &
- ' amie_efx_nh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- !
- ! Get 3-D AMIE fields (lon,lat,ntimes)
- !
- ! AMIE electric potential
- istat = pio_inq_varid(ncid,'pot',idv_pot)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE electric potential id')
- istat = pio_get_var(ncid,idv_pot,amie_pot_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable pot')
- !
- ! AMIE mean energy
- istat = pio_inq_varid(ncid,'ekv',idv_ekv)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE mean energy id')
- istat = pio_get_var(ncid,idv_ekv,amie_ekv_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable ekv')
- !
- ! AMIE energy flux
- istat = pio_inq_varid(ncid,'efx',idv_efx)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE energy flux id')
- istat = pio_get_var(ncid,idv_efx,amie_efx_nh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_nh: Error getting NH AMIE variable efx')
- !
- ! Close the file:
- call cam_pio_closefile(ncid)
- if (masterproc) &
- write(iulog,"('Completed read from NH AMIE data file ',a)") trim(amienh)
- if (masterproc) write(iulog,"(72('-'),/)")
- end subroutine rdamie_nh
- !-----------------------------------------------------------------------
- subroutine rdamie_sh(amiesh)
- !
- ! Read AMIE data for the northern hemisphere from amiesh
- !
- ! Local:
-
- character(len=*),intent(in) :: amiesh
- integer :: istat,ntimes,ndims,nvars,ngatts,idunlim,ier
- integer :: id_lon,id_lat,id_time, &
- idv_year,idv_mon,idv_day,idv_jday, &
- idv_ut,idv_pot,idv_ekv, &
- idv_efx,idv_cusplat,idv_cuspmlt,idv_hpi,idv_pcp
- type(file_desc_t) :: ncid
- !
- if (masterproc) write(iulog,"(/,72('-'))")
- if (masterproc) write(iulog,"('RDAMIE_SH: read AMIE data for northern hemisphere:')")
- !
- ! Open netcdf file:
- call cam_pio_openfile(ncid, amiesh, pio_nowrite)
- !
- ! Get AMIE grid dimension:
- istat = pio_inq_dimid(ncid,'lon',id_lon)
- istat = pio_inquire_dimension(ncid,id_lon,len=lonp1)
- if (istat /= pio_noerr) call rpt_ncerr(istat, 'rdamie_sh: Error getting AMIE longitude dimension')
-
- istat = pio_inq_dimid(ncid,'lat',id_lat)
- istat = pio_inquire_dimension(ncid,id_lat,len=latp1)
- if (istat /= pio_noerr) call rpt_ncerr(istat, 'rdamie_sh: Error getting AMIE latitude dimension')
- ! write(iulog,"('lonp1=',i3,' latp1=',i3)") lonp1,latp1
- !
- ! Get time dimension:
- istat = pio_inquire(ncid,unlimiteddimid=id_time)
- istat = pio_inquire_dimension(ncid,id_time,len=ntimes)
- !
- ! Search for requested AMIE output fields
- istat = pio_inquire(ncid,ndims,nvars,ngatts,idunlim)
- !
- ! Get 1-D AMIE fields (ntimes)
- if (.not. allocated(year)) allocate(year(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'year',idv_year)
- istat = pio_get_var(ncid,idv_year,year)
- ! write(iulog,*)'rdamie_sh: year=', year(1:10)
- if (.not. allocated(month)) allocate(month(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'month',idv_mon)
- istat = pio_get_var(ncid,idv_mon,month)
- if (.not. allocated(day)) allocate(day(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'day',idv_day)
- istat = pio_get_var(ncid,idv_day,day)
- ! write(iulog,*)'rdamie_sh: day=', day(1:10)
- if (.not. allocated(jday)) allocate(jday(ntimes),stat=ier)
- istat = pio_inq_varid(ncid,'jday',idv_jday)
- istat = pio_get_var(ncid,idv_jday,jday)
- !
- ! Allocate 1-d fields:
- if (.not. allocated(amie_sh_ut)) &
- allocate(amie_sh_ut(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_sh_ut: ntimes=',i3)")ntimes
- if (.not. allocated(amie_cusplat_sh)) &
- allocate(amie_cusplat_sh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_cusplat_sh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_cuspmlt_sh)) &
- allocate(amie_cuspmlt_sh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_cuspmlt_sh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_hpi_sh)) &
- allocate(amie_hpi_sh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_hpi_sh: ntimes=',i3)")ntimes
- if (.not. allocated(amie_pcp_sh)) &
- allocate(amie_pcp_sh(ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_pcp_sh: ntimes=',i3)")ntimes
- !
- ! Get ut
- istat = pio_inq_varid(ncid,'ut',idv_ut)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE UT id')
- istat = pio_get_var(ncid,idv_ut,amie_sh_ut)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable ut')
- !
- ! Get HPI
- istat = pio_inq_varid(ncid,'hpi',idv_hpi)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE hpi id')
- istat = pio_get_var(ncid,idv_hpi,amie_hpi_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable hpi')
- !
- ! Get PCP
- istat = pio_inq_varid(ncid,'pcp',idv_pcp)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE pcp id')
- istat = pio_get_var(ncid,idv_pcp,amie_pcp_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable pcp')
- !
- ! Get cusplat
- istat = pio_inq_varid(ncid,'cusplat',idv_cusplat)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE cusplat id')
- istat = pio_get_var(ncid,idv_cusplat,amie_cusplat_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable cusplat')
- !
- ! Get cuspmlt
- istat = pio_inq_varid(ncid,'cuspmlt',idv_cuspmlt)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE cusplat id')
- istat = pio_get_var(ncid,idv_cuspmlt,amie_cuspmlt_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable cuspmlt')
- !
- ! Allocate 2-d fields:
- if (.not. allocated(pot_sh_amie)) &
- allocate(pot_sh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' pot_sh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- if (.not. allocated(ekv_sh_amie)) &
- allocate(ekv_sh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' ekv_sh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- if (.not. allocated(efx_sh_amie)) &
- allocate(efx_sh_amie(lonp1,latp1),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' efx_sh_amie: lonp1=',i3,' latp1=',i3)")lonp1,latp1
- !
- ! Allocate 3-d fields:
- if (.not. allocated(amie_pot_sh)) &
- allocate(amie_pot_sh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_pot_sh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- if (.not. allocated(amie_ekv_sh)) &
- allocate(amie_ekv_sh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_ekv_sh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- if (.not. allocated(amie_efx_sh)) &
- allocate(amie_efx_sh(lonp1,latp1,ntimes),stat=ier)
- if (ier /= 0) write(iulog,"('>>> rdamie_sh: error allocating', &
- ' amie_efx_sh: lonp1=',i3,' latp1=',i3,' ntimes=',i3)") &
- lonp1,latp1,ntimes
- !
- ! Get 3-D AMIE fields (lon,lat,ntimes)
- !
- ! AMIE electric potential
- istat = pio_inq_varid(ncid,'pot',idv_pot)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE electric potential id')
- istat = pio_get_var(ncid,idv_pot,amie_pot_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable pot')
- !
- ! AMIE mean energy
- istat = pio_inq_varid(ncid,'ekv',idv_ekv)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE mean energy id')
- istat = pio_get_var(ncid,idv_ekv,amie_ekv_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable ekv')
- !
- ! AMIE energy flux
- istat = pio_inq_varid(ncid,'efx',idv_efx)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE energy flux id')
- istat = pio_get_var(ncid,idv_efx,amie_efx_sh)
- if (istat /= pio_noerr) call rpt_ncerr(istat, &
- 'rdamie_sh: Error getting SH AMIE variable efx')
- !
- ! Close the file:
- call cam_pio_closefile(ncid)
- if (masterproc) &
- write(iulog,"('Completed read from SH AMIE data file ',a)") trim(amiesh)
- if (masterproc) write(iulog,"(72('-'),/)")
- end subroutine rdamie_sh
-#endif
- !-----------------------------------------------------------------------
- subroutine getamie(iyear,imo,iday,iutsec,sunlon,amie_ibkg,iprint, &
- iamie,phihm,amie_efxm,amie_kevm,crad, efxg,kevg)
- use cam_history_support, only: fillvalue
- use rgrd_mod, only: rgrd2
- !
- ! Read AMIE outputs from amie_ncfile file, returning electric potential,
- ! auroral mean energy and energy flux at current date and time,
- ! and the data is linearly interpolated to the model time
- ! gl - 12/07/2002
- !
- !
- ! Args:
-
- integer, intent(in) :: iyear
- integer, intent(in) :: imo
- integer, intent(in) :: iday
- real(r8), intent(in) :: sunlon
- integer, intent(in) :: iutsec
- integer, intent(in) :: amie_ibkg
- integer, intent(in) :: iprint
- integer, intent(inout) :: iamie
- real(r8), intent(out) :: phihm(nmlonp1,nmlat)
- real(r8), intent(out) :: amie_efxm(nmlonp1,nmlat) ! on geomag grid
- real(r8), intent(out) :: amie_kevm(nmlonp1,nmlat) ! on geomag grid
- real(r8), intent(out) :: crad(2)
- real(r8), intent(out) :: efxg(lon0:lon1,lat0:lat1) ! on geographic grid
- real(r8), intent(out) :: kevg(lon0:lon1,lat0:lat1) ! on geographic grid
-#ifdef WACCMX_EDYN_ESMF
-
- !
- ! Local:
- real(r8) :: potm(lonp1,jmxm),efxm(lonp1,jmxm),ekvm(lonp1,jmxm), &
- alat(jmxm),alon(lonp1),alatm(jmxm),alonm(lonp1)
- integer :: ier,lw,liw,intpol(2)
- integer, allocatable :: iw(:)
- real(r8),allocatable :: w(:)
- integer :: i,j
- integer :: nn, iset, iset1, m, mp1, n
- integer :: iboxcar
- real(r8) :: model_ut, denoma, f1, f2
- real(r8) :: del,xmlt,dmlat,dlatm,dlonm,dmltm,rot,dtr,rtd
- integer :: idate,bdate,edate
- real(r8) :: pi
-
- pi = 4._r8*atan(1._r8)
- dtr = pi/180._r8 ! degrees to radians
- rtd = 180._r8/pi
- !
-
- phihm = fillvalue
- amie_efxm = fillvalue
- amie_kevm = fillvalue
- efxg = fillvalue
- kevg = fillvalue
- crad = fillvalue
-
- !
- if (iprint > 0 .and. masterproc) then
- write(iulog,"(/,72('-'))")
- write(iulog,"('GETAMIE:')")
- write(iulog,"('Initial requested iyear=',i4, &
- ' iday=',i3,' iutsec=', i10)") iyear,iday,iutsec
- end if
-
- !
- ! Check times:
- !
- nn = size(amie_sh_ut)
- bdate = year(1)*10000+month(1)*100+day(1)
- edate = year(nn)*10000+month(nn)*100+day(nn)
- idate = iyear*10000+imo*100+iday
-
- if (idateedate) then
- if (masterproc) write(iulog, "('getamie: Model date beyond the AMIE last Data:',3I5)") &
- year(nn),month(nn),day(nn)
- iamie = 0
- return
- endif
-
- if (iamie/=1) return
-
- model_ut = dble(iutsec)/3600._r8
-
- !
- ! interpolate AMIE data to modeltime iutsec
- ! amie_ibkg = 0 use real UT AMIE data
- ! = 1 use the first AMIE volumne as the background
- ! = 2 use the 24-hr average AMIE volumne as the background
- pot_sh_amie(:,:) = 0._r8
- ekv_sh_amie(:,:) = 0._r8
- efx_sh_amie(:,:) = 0._r8
- cusplat_sh_amie = 0._r8
- cuspmlt_sh_amie = 0._r8
- hpi_sh_amie = 0._r8
- pcp_sh_amie = 0._r8
- !
-
- iboxcar = 0
-
- if (amie_ibkg == 0) then
-
- iset = nn
- iset1 = nn
- do i=1,nn
- ! if (amie_sh_ut(i) < model_ut) iset = i
- if (amie_sh_ut(i) < model_ut+(iday-day(i))*24._r8) iset = i
- end do
- ! write(iulog,"('getamie: AMIE SH Data nn,iset,day1,day2=',4i5)")
- ! | nn,iset,jday(1),jday(nn)
- iset1 = iset + 1
- if (iset == nn) iset1 = iset
-
- denoma = amie_sh_ut(iset1) - amie_sh_ut(iset)
- if (denoma > 1._r8) then
- write(iulog, "('getamie: Finding a gap in the AMIE Data set:', &
- 'modelday, amieday =',2I5)") iday,day(n)
- iamie = 2
- return
- end if
- if (denoma == 0._r8) then
- f1 = 1._r8
- f2 = 0._r8
- else
- ! f1 = (amie_sh_ut(iset1) - model_ut)/denoma
- ! f2 = (model_ut - amie_sh_ut(iset))/denoma
- f1 = (amie_sh_ut(iset1) - (model_ut+(iday- &
- day(iset1))*24._r8))/denoma
- f2 = (model_ut+(iday-day(iset1))*24._r8 - &
- amie_sh_ut(iset))/denoma
- end if
- ! write(iulog,"('getamie: AMIE SH Data n,iset,modeltime,f1,f2 =',
- ! | 4i5,2f5.2)")n,iset,iday,day(iset1),f1,f2
- ! write(iulog,"('getamie: AMIE SH Data model_day,model_ut,amie_day,',
- ! | 'amie_ut,f1,f2,iset,iset1 =',i4,f7.1,i4,f7.1,2f5.2,2i3)")
- ! | iday,model_ut,day(iset),amie_sh_ut(iset),f1,f2,
- ! | iset,iset1
- cusplat_sh_amie = (f1*amie_cusplat_sh(iset1) + &
- f2*amie_cusplat_sh(iset))
- cuspmlt_sh_amie = (f1*amie_cuspmlt_sh(iset1) + &
- f2*amie_cuspmlt_sh(iset))
- hpi_sh_amie = (f1*amie_hpi_sh(iset1) + f2*amie_hpi_sh(iset))
- pcp_sh_amie = (f1*amie_pcp_sh(iset1) + f2*amie_pcp_sh(iset))
- if (iboxcar == 0) then
- pot_sh_amie(:,:) = (f1*amie_pot_sh(:,:,iset1) + &
- f2*amie_pot_sh(:,:,iset))
- ekv_sh_amie(:,:) = (f1*amie_ekv_sh(:,:,iset1) + &
- f2*amie_ekv_sh(:,:,iset))
- efx_sh_amie(:,:) = (f1*amie_efx_sh(:,:,iset1) + &
- f2*amie_efx_sh(:,:,iset))
- else
- call boxcar_ave(amie_pot_sh,pot_sh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- call boxcar_ave(amie_efx_sh,efx_sh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- call boxcar_ave(amie_ekv_sh,ekv_sh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- end if
- else
- if (amie_ibkg == 1) then
- pot_sh_amie(:,:) = amie_pot_sh(:,:,1)
- ekv_sh_amie(:,:) = amie_ekv_sh(:,:,1)
- efx_sh_amie(:,:) = amie_efx_sh(:,:,1)
- cusplat_sh_amie = amie_cusplat_sh(1)
- cuspmlt_sh_amie = amie_cuspmlt_sh(1)
- hpi_sh_amie = amie_hpi_sh(1)
- pcp_sh_amie = amie_pcp_sh(1)
- else if (amie_ibkg == 3) then
- pot_sh_amie(:,:) = amie_pot_sh(:,:,241)
- ekv_sh_amie(:,:) = amie_ekv_sh(:,:,241)
- efx_sh_amie(:,:) = amie_efx_sh(:,:,241)
- cusplat_sh_amie = amie_cusplat_sh(241)
- cuspmlt_sh_amie = amie_cuspmlt_sh(241)
- hpi_sh_amie = amie_hpi_sh(241)
- pcp_sh_amie = amie_pcp_sh(241)
- else
- do i=1,nn
- pot_sh_amie(:,:) = pot_sh_amie(:,:) + amie_pot_sh(:,:,1)
- ekv_sh_amie(:,:) = ekv_sh_amie(:,:) + amie_ekv_sh(:,:,1)
- efx_sh_amie(:,:) = efx_sh_amie(:,:) + amie_efx_sh(:,:,1)
- cusplat_sh_amie = cusplat_sh_amie + amie_cusplat_sh(1)
- cuspmlt_sh_amie = cuspmlt_sh_amie + amie_cuspmlt_sh(1)
- hpi_sh_amie = hpi_sh_amie + amie_hpi_sh(1)
- pcp_sh_amie = pcp_sh_amie + amie_pcp_sh(1)
- end do
- pot_sh_amie(:,:) = pot_sh_amie(:,:)/nn
- ekv_sh_amie(:,:) = ekv_sh_amie(:,:)/nn
- efx_sh_amie(:,:) = efx_sh_amie(:,:)/nn
- cusplat_sh_amie = cusplat_sh_amie/nn
- cuspmlt_sh_amie = cuspmlt_sh_amie/nn
- hpi_sh_amie = hpi_sh_amie/nn
- pcp_sh_amie = pcp_sh_amie/nn
- end if
- end if
-
- !
- ! get NH AMIE data
- pot_nh_amie(:,:) = 0._r8
- ekv_nh_amie(:,:) = 0._r8
- efx_nh_amie(:,:) = 0._r8
- cusplat_nh_amie = 0._r8
- cuspmlt_nh_amie = 0._r8
- hpi_nh_amie = 0._r8
- pcp_nh_amie = 0._r8
-
- iboxcar = 0
- ! write(iulog,"('getamie: Interpolate AMIE NH Data nn=',i3)")nn
- if (amie_ibkg == 0) then
- iset = 0
- iset1 = nn
- do i=1,nn
- if (amie_nh_ut(i) < model_ut+(iday-day(i))*24._r8) iset = i
- end do
- iset1 = iset + 1
- if (iset == 0) iset = 1
- if (iset == nn) iset1 = iset
-
- denoma = amie_nh_ut(iset1) - amie_nh_ut(iset)
- if (denoma > 1._r8) then
- write(iulog, "('getamie: Finding a gap in the AMIE Data set:', &
- 'modelday, amieday =',2I5)") iday,day(n)
- iamie = 2
- return
- end if
- if (denoma == 0._r8) then
- f1 = 1._r8
- f2 = 0._r8
- else
- ! f1 = (amie_nh_ut(iset1) - model_ut)/denoma
- ! f2 = (model_ut - amie_nh_ut(iset))/denoma
- f1 = (amie_nh_ut(iset1) - (model_ut+(iday- &
- day(iset1))*24._r8))/denoma
- f2 = (model_ut+(iday-day(iset1))*24._r8 - &
- amie_nh_ut(iset))/denoma
- end if
- ! write(iulog,"('getamie: AMIE NH Data model_day,model_ut,amie_day,',
- ! | 'amie_ut,f1,f2,iset,iset1 =',i4,f7.1,i4,f7.1,2f5.2,2i3)")
- ! | iday,model_ut,day(iset),amie_nh_ut(iset),f1,f2,
- ! | iset,iset1
- !
- cusplat_nh_amie = (f1*amie_cusplat_nh(iset1) + &
- f2*amie_cusplat_nh(iset))
- cuspmlt_nh_amie = (f1*amie_cuspmlt_nh(iset1) + &
- f2*amie_cuspmlt_nh(iset))
- hpi_nh_amie = (f1*amie_hpi_nh(iset1) + f2*amie_hpi_nh(iset))
- pcp_nh_amie = (f1*amie_pcp_nh(iset1) + f2*amie_pcp_nh(iset))
- if (iboxcar == 0) then
- pot_nh_amie(:,:) = (f1*amie_pot_nh(:,:,iset1) + &
- f2*amie_pot_nh(:,:,iset))
- ekv_nh_amie(:,:) = (f1*amie_ekv_nh(:,:,iset1) + &
- f2*amie_ekv_nh(:,:,iset))
- efx_nh_amie(:,:) = (f1*amie_efx_nh(:,:,iset1) + &
- f2*amie_efx_nh(:,:,iset))
- ! write(iulog,"('ekv_nh_amie min, max = ',2e12.4)")
- ! | minval(ekv_nh_amie),maxval(ekv_nh_amie)
- else
- call boxcar_ave(amie_pot_nh,pot_nh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- ! call fminmax(amie_pot_nh(:,:,iset),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('AMIE pot max,min = ',2f8.0)")fmax,fmin
- ! call fminmax(pot_nh_amie(:,:),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('boxcar_ave AMIE pot max,min= ',2f8.0)")fmax,fmin
- call boxcar_ave(amie_efx_nh,efx_nh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- ! call fminmax(amie_efx_nh(:,:,iset),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('AMIE efx max,min = ',2f8.0)")fmax,fmin
- ! call fminmax(efx_nh_amie(:,:),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('boxcar_ave AMIE efx max,min= ',2f8.0)")fmax,fmin
- call boxcar_ave(amie_ekv_nh,ekv_nh_amie,lonp1,latp1, &
- nn,iset,iboxcar)
- ! call fminmax(amie_ekv_nh(:,:,iset),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('AMIE ekv max,min = ',2f8.0)")fmax,fmin
- ! call fminmax(ekv_nh_amie(:,:),lonp1*latp1,fmin,fmax)
- ! write(iulog,"('boxcar_ave AMIE ekv max,min= ',2f8.0)")fmax,fmin
- end if
- else
- if (amie_ibkg == 1) then
- pot_nh_amie(:,:) = amie_pot_nh(:,:,1)
- ekv_nh_amie(:,:) = amie_ekv_nh(:,:,1)
- efx_nh_amie(:,:) = amie_efx_nh(:,:,1)
- cusplat_nh_amie = amie_cusplat_nh(1)
- cuspmlt_nh_amie = amie_cuspmlt_nh(1)
- hpi_nh_amie = amie_hpi_nh(1)
- pcp_nh_amie = amie_pcp_nh(1)
- else if (amie_ibkg == 3) then
- pot_nh_amie(:,:) = amie_pot_nh(:,:,241)
- ekv_nh_amie(:,:) = amie_ekv_nh(:,:,241)
- efx_nh_amie(:,:) = amie_efx_nh(:,:,241)
- cusplat_nh_amie = amie_cusplat_nh(241)
- cuspmlt_nh_amie = amie_cuspmlt_nh(241)
- hpi_nh_amie = amie_hpi_nh(241)
- pcp_nh_amie = amie_pcp_nh(241)
- else
- do i=1,nn
- pot_nh_amie(:,:) = pot_nh_amie(:,:) + amie_pot_nh(:,:,1)
- ekv_nh_amie(:,:) = ekv_nh_amie(:,:) + amie_ekv_nh(:,:,1)
- efx_nh_amie(:,:) = efx_nh_amie(:,:) + amie_efx_nh(:,:,1)
- cusplat_nh_amie = cusplat_nh_amie + amie_cusplat_nh(1)
- cuspmlt_nh_amie = cuspmlt_nh_amie + amie_cuspmlt_nh(1)
- hpi_nh_amie = hpi_nh_amie + amie_hpi_nh(1)
- pcp_nh_amie = pcp_nh_amie + amie_pcp_nh(1)
- end do
- pot_nh_amie(:,:) = pot_nh_amie(:,:)/nn
- ekv_nh_amie(:,:) = ekv_nh_amie(:,:)/nn
- efx_nh_amie(:,:) = efx_nh_amie(:,:)/nn
- cusplat_nh_amie = cusplat_nh_amie/nn
- cuspmlt_nh_amie = cuspmlt_nh_amie/nn
- hpi_nh_amie = hpi_nh_amie/nn
- pcp_nh_amie = pcp_nh_amie/nn
- end if
- end if
- !
- ! The OLTMAX latitude also defines the co-latitude theta0, which in
- ! turn determines crit1(+2.5deg) and crit2(-12.5deg) which are used
- ! in TIE-GCM as the boundaries of the polar cap and the region of
- ! influence of the high-lat potential versus the low-lat dynamo potential
- ! Define this latitude to be between 70 and 77.5 degrees
- !
- ! if (cusplat_sh_amie > 65.0) then
- ! cusplat_sh_amie = 65.0
- ! cuspmlt_sh_amie = 11.
- ! endif
- if (cusplat_sh_amie > 75.0_r8) then
- cusplat_sh_amie = 75.0_r8
- cuspmlt_sh_amie = 11._r8
- end if
- if (cusplat_sh_amie < 60.0_r8) then
- cusplat_sh_amie = 60.0_r8
- cuspmlt_sh_amie = 11._r8
- end if
- if (cusplat_nh_amie > 75.0_r8) then
- cusplat_nh_amie = 75.0_r8
- cuspmlt_nh_amie = 11._r8
- end if
- if (cusplat_nh_amie < 60.0_r8) then
- cusplat_nh_amie = 60.0_r8
- cuspmlt_nh_amie = 11._r8
- end if
- ! cusplat_nh_amie = amin1(65.0,cusplat_nh_amie)
- if (cuspmlt_sh_amie > 12.5_r8) cuspmlt_sh_amie = 12.5_r8
- if (cuspmlt_sh_amie < 11.0_r8) cuspmlt_sh_amie = 11.0_r8
- if (cuspmlt_nh_amie > 12.5_r8) cuspmlt_nh_amie = 12.5_r8
- if (cuspmlt_nh_amie < 11.0_r8) cuspmlt_nh_amie = 11.0_r8
- crad(1) = (90._r8-cusplat_sh_amie)*pi/180._r8
- crad(2) = (90._r8-cusplat_nh_amie)*pi/180._r8
-
- ! mlongitude starts from 180 degree
- rot = sunlon*rtd
- if(rot.lt.0) rot = rot + 360._r8 ! 0 to 360 degrees
- rot = rot/15._r8 ! convert from degree to hrs
-
- dmltm = 24._r8/dble(lonmx)
- do i=1,lonp1
- xmlt = dble(i-1)*dmltm - rot + 24._r8
- xmlt = MOD(xmlt,24._r8)
- m = int(xmlt/dmltm + 1.01_r8)
- mp1 = m + 1
- if (mp1 > lonp1) mp1 = 2
- del = xmlt - (m-1)*dmltm
- ! Initialize arrays around equator
- do j=latp1+1,ithmx
- potm(i,j) = 0._r8
- potm(i,jmxm+1-j) = 0._r8
- ekvm(i,j) = (1._r8-del)*ekv_sh_amie(m,latp1) + &
- del*ekv_sh_amie(mp1,latp1)
- ekvm(i,jmxm+1-j) = (1._r8-del)*ekv_nh_amie(m,latp1) + &
- del*ekv_nh_amie(mp1,latp1)
- efxm(i,j) = 0._r8
- efxm(i,jmxm+1-j) = 0._r8
- end do
- ! Put in AMIE arrays from pole to latp1
- do j=1,latp1
- potm(i,j) = (1._r8-del)*pot_sh_amie(m,j) + &
- del*pot_sh_amie(mp1,j)
- potm(i,jmxm+1-j) = (1._r8-del)*pot_nh_amie(m,j) + &
- del*pot_nh_amie(mp1,j)
- ekvm(i,j) = (1._r8-del)*ekv_sh_amie(m,j) + &
- del*ekv_sh_amie(mp1,j)
- ekvm(i,jmxm+1-j) = (1._r8-del)*ekv_nh_amie(m,j) + &
- del*ekv_nh_amie(mp1,j)
- efxm(i,j) = (1._r8-del)*efx_sh_amie(m,j) + &
- del*efx_sh_amie(mp1,j)
- efxm(i,jmxm+1-j) = (1._r8-del)*efx_nh_amie(m,j) + &
- del*efx_nh_amie(mp1,j)
- end do
-
- end do
-
- ! Set up coeffs to go between EPOTM(IMXMP,JMNH) and TIEPOT(IMAXM,JMAXMH)
-
- ! **** SET GRID SPACING DLATM, DLONG, DLONM
- ! DMLAT=lat spacing in degrees of AMIE apex grid
- dtr = pi/180._r8
- dmlat = 180._r8 / dble(jmxm-1)
- dlatm = dmlat*dtr
- dlonm = 2._r8*pi/dble(lonmx)
- dmltm = 24._r8/dble(lonmx)
- ! ****
- ! **** SET ARRAY YLATM (LATITUDE VALUES FOR GEOMAGNETIC GRID
- ! ****
- alatm(1) = -pi/2._r8
- alat(1) = -90._r8
- alatm(jmxm) = pi/2._r8
- alat(jmxm) = 90._r8
- do i = 2,ithmx
- alat(i) = alat(i-1)+dlatm/dtr
- alat(jmxm+1-i) = alat(jmxm+2-i)-dlatm/dtr
- alatm(i) = alatm(i-1)+dlatm
- alatm(jmxm+1-i) = alatm(jmxm+2-i)-dlatm
- end do
- alon(1) = -pi/dtr
- alonm(1) = -pi
- do i=2,lonp1
- alon(i) = alon(i-1) + dlonm/dtr
- alonm(i) = alonm(i-1) + dlonm
- end do
-
- ! ylatm and ylonm are arrays of latitudes and longitudes of the
- ! distored magnetic grids in radian - from consdyn.h
- ! Convert from apex magnetic grid to distorted magnetic grid
- !
- ! Allocate workspace for regrid routine rgrd2.f:
- lw = nmlonp1+nmlat+2*nmlonp1
- if (.not. allocated(w)) allocate(w(lw),stat=ier)
- IF (ier /= 0) WRITE(iulog,"('>>> horizontal_interp: error allocating', &
- ' w(lw): lw=',i6,' ier=',i4)") lw,ier
- liw = nmlonp1 + nmlat
- if (.not. allocated(iw)) allocate(iw(liw),stat=ier)
- if (ier /= 0) write(iulog,"('>>> horzontal_interp: error allocating', &
- ' iw(liw): liw=',i6,' ier=',i4)") liw,ier
- intpol(:) = 1 ! linear (not cubic) interp in both dimensions
- if (alatm(1) > ylatm(1)) alatm(1) = ylatm(1)
- if (alatm(jmxm) < ylatm(nmlat)) alatm(jmxm) = ylatm(nmlat)
- if (alonm(1) > ylonm(1)) alonm(1) = ylonm(1)
- if (alonm(lonp1) < ylonm(nmlonp1)) alonm(lonp1) = ylonm(nmlonp1)
- ! write(iulog,"(' AMIE: ylatm =',/,(6e12.4))") ylatm
- ! write(iulog,"(' AMIE: ylonm =',/,(6e12.4))") ylonm
- ! write(iulog,"(' AMIE: potm(1,:) =',/,(6e12.4))") potm(1,:)
- ! ylatm from -pi/2 to pi/2, and ylonm from -pi to pi
- call rgrd2(lonp1,jmxm,alonm,alatm,potm,nmlonp1,nmlat, &
- ylonm,ylatm,tiepot,intpol,w,lw,iw,liw,ier)
- call rgrd2(lonp1,jmxm,alonm,alatm,ekvm,nmlonp1,nmlat, &
- ylonm,ylatm,tieekv,intpol,w,lw,iw,liw,ier)
- call rgrd2(lonp1,jmxm,alonm,alatm,efxm,nmlonp1,nmlat, &
- ylonm,ylatm,tieefx,intpol,w,lw,iw,liw,ier)
- ! write(iulog,"(' AMIE: tiepot(1,:) =',/,(6e12.4))") tiepot(1,:)
- phihm(:,:) = tiepot(:,:)
- amie_efxm(:,:) = tieefx(:,:)
- amie_kevm(:,:) = tieekv(:,:)
-
- ! Convert from WACCM-X distorted magnetic grid to geographic one
- ! call mag2geo(tiepot(1,1),potg(1,0),im(1,0),jm(1,0),
- ! | dim(1,0),djm(1,0),nlonp1,nmlonp1,nlon,nlat+2,nmlon,nmlat)
- ! call mag2geo(tieekv(1,1),ekvg(1,0),im(1,0),jm(1,0),
- ! | dim(1,0),djm(1,0),nlonp1,nmlonp1,nlon,nlat+2,nmlon,nmlat)
- ! call mag2geo(tieefx(1,1),efxg(1,0),im(1,0),jm(1,0),
- ! | dim(1,0),djm(1,0),nlonp1,nmlonp1,nlon,nlat+2,nmlon,nmlat)
-
- call mag2geo_2d(amie_efxm(mlon0:mlon1,mlat0:mlat1), &
- efxg, mag_efx,geo_efx,'MEFXAMIE')
- call mag2geo_2d(amie_kevm(mlon0:mlon1,mlat0:mlat1), &
- kevg, mag_kev,geo_kev,'MKEVAMIE')
-
- ! call mag2geo_2d(amie_kevm,amie_kevg,mag_kev,geo_kev,'KEVM')
- if (iprint > 0 .and. masterproc) write(iulog,*) 'Max,min amie_efxm = ', &
- maxval(amie_efxm),minval(amie_efxm)
- if (iprint > 0 .and. masterproc) write(iulog,*) &
- 'Max,min efxg = ',maxval(efxg),minval(efxg)
- ! ****
- ! **** INSERT PERIODIC POINTS
- ! ****
- ! DO j = 1,nlat
- ! ekvg(nlonp1,j) = ekvg(1,j)
- ! efxg(nlonp1,j) = efxg(1,j)
- ! potg(nlonp1,j) = potg(1,j)
- ! ENDDO
- !
- if (iprint > 0 .and. masterproc) then
- write(iulog, "('getamie: AMIE data interpolated to date and time')")
- write(iulog,"('getamie: iyear,imo,iday,iutsec = ',3i6,i10)") &
- iyear,imo,iday,iutsec
- write(iulog,"('getamie: AMIE iset f1,f2,year,mon,day,ut = ', &
- i6,2F9.5,3I6,f10.4)") &
- iset,f1,f2,year(iset),month(iset),day(iset),amie_nh_ut(iset)
- write(iulog,*)'getamie: max,min phihm= ', maxval(phihm),minval(phihm)
- ! write(iulog,*)'getamie: max,min phihm,amie_efx,amie_kev = ',
- ! | maxval(phihm),minval(tiepot),maxval(amie_efx),
- ! | minval(amie_efx),maxval(amie_kev),minval(amie_kev)
- end if
-#else
- call endrun('Cannot use AMIE without electro-dynamo active.')
-#endif
- end subroutine getamie
-#ifdef WACCMX_EDYN_ESMF
- !-------------------------------------------------------------------
- subroutine boxcar_ave(x,y,lon,lat,mtime,itime,ibox)
- !
- ! perform boxcar average
- !
- ! Args:
- integer, intent(in) :: lon
- integer, intent(in) :: lat
- integer, intent(in) :: mtime
- integer, intent(in) :: itime
- integer, intent(in) :: ibox
- real(r8), intent(in) :: x(lon,lat,mtime)
- real(r8), intent(out) :: y(lon,lat)
-
- ! Local:
- integer :: i, iset, iset1
- !
- iset = itime - ibox/2
- if (iset < 1) iset = 1
- iset1 = iset + ibox
- if (iset1 > mtime) then
- iset1 = mtime
- iset = iset1 - ibox
- end if
- ! write(iulog,"('boxcar_ave: mtime,itime,ibox',3i5)")
- ! | mtime,itime,ibox
- !
- y(:,:) = 0._r8
- do i=iset,iset1
- y(:,:) = y(:,:) + x(:,:,i)
- end do
- if (ibox > 0) y(:,:) = y(:,:)/ibox
- !
- end subroutine boxcar_ave
- !-----------------------------------------------------------------------
- subroutine mag2geo(am,ag,im,jm,dim,djm,lg,lm,nlong,nlatg)
- !
- ! Args:
- integer, intent(in) :: lg
- integer, intent(in) :: lm
- real(r8), intent(in) :: am(lm,*)
- real(r8), intent(out) :: ag(lg,*)
- integer, intent(in) :: im(lg,*)
- integer, intent(in) :: jm(lg,*)
- real(r8), intent(in) :: dim(lg,*)
- real(r8), intent(in) :: djm(lg,*)
- integer, intent(in) :: nlong
- integer, intent(in) :: nlatg
- !
- ! Local:
- integer :: ig,jg
- !
- do jg=1,nlatg
- do ig=1,nlong
- ag(ig,jg) = &
- am(im(ig,jg) ,jm(ig,jg)) *(1._r8-dim(ig,jg))*(1._r8-djm(ig,jg))+ &
- am(im(ig,jg)+1,jm(ig,jg)) * dim(ig,jg) *(1._r8-djm(ig,jg))+ &
- am(im(ig,jg) ,jm(ig,jg)+1)*(1._r8-dim(ig,jg))*djm(ig,jg)+ &
- am(im(ig,jg)+1,jm(ig,jg)+1)* dim(ig,jg) *djm(ig,jg)
- end do ! ig=1,nlong
- end do ! jg=1,nlatg
- end subroutine mag2geo
- !-----------------------------------------------------------------------
- subroutine mag2geo_2d(fmag,fgeo,ESMF_mag,ESMF_geo,fname)
- !
- ! Convert field on geomagnetic grid fmag to geographic grid in fgeo.
- !
- use edyn_esmf,only: edyn_esmf_set2d_mag,edyn_esmf_regrid, &
- edyn_esmf_get_2dfield
- !
- ! Args:
- real(r8), intent(in) :: fmag(mlon0:mlon1,mlat0:mlat1)
- real(r8), intent(out) :: fgeo(lon0:lon1,lat0:lat1)
- type(ESMF_Field), intent(inout) :: ESMF_mag, ESMF_geo
- character(len=*), intent(in) :: fname
- !
- ! Local:
- integer :: j
- character (len=8) :: fnames(1)
- type(ESMF_Field) :: magfields(1)
- real(r8),pointer,dimension(:,:) :: fptr
-
- fgeo = finit
- fnames(1) = fname
- magfields(1) = ESMF_mag
- !
- ! Put fmag into ESMF mag field on mag source grid:
- call edyn_esmf_set2d_mag(magfields,fnames,fmag,1, &
- mlon0,mlon1,mlat0,mlat1)
- !
- ! Regrid to geographic destination grid, defining ESMF_geo:
- call edyn_esmf_regrid(ESMF_mag,ESMF_geo,'mag2geo',2)
- !
- ! Put regridded geo field into pointer:
- call edyn_esmf_get_2dfield(ESMF_geo,fptr,fname)
- ! write(iulog,*) 'mag2geo: Max,min fptr = ',maxval(fptr),minval(fptr)
- !
- ! Transfer from pointer to output arg:
- do j=lat0,lat1
- fgeo(:,j) = fptr(:,j)
- end do
- ! write(iulog,*) 'mag2geo: max,min fmag = ',maxval(fmag),minval(fmag)
- ! write(iulog,*) 'mag2geo: max,min fgeo = ',maxval(fgeo),minval(fgeo)
- end subroutine mag2geo_2d
- !-----------------------------------------------------------------------
- subroutine rpt_ncerr(istat,msg)
- !
- ! Handle a netcdf lib error:
- !
- integer, intent(in) :: istat
- character(len=*),intent(in) :: msg
- !
- write(iulog,"(/72('-'))")
- write(iulog,"('>>> Error from netcdf library:')")
- write(iulog,"(a)") trim(msg)
-
- write(iulog,"('istat=',i5)") istat
- write(iulog,"(72('-')/)")
- return
- end subroutine rpt_ncerr
-
-#endif
-
-end module amie_module
diff --git a/src/ionosphere/waccmx/amie_module.F90 b/src/ionosphere/waccmx/amie_module.F90
new file mode 100644
index 0000000000..6a906cbd5b
--- /dev/null
+++ b/src/ionosphere/waccmx/amie_module.F90
@@ -0,0 +1,867 @@
+module amie_module
+ !
+ ! Module used to read data from the AMIE outputs (POT,mean energy,
+ ! and energy flux).
+ !
+
+ use shr_kind_mod, only: r8 => shr_kind_r8, cl => shr_kind_cl
+ use cam_logfile, only: iulog
+ use spmd_utils, only: masterproc
+ use edyn_maggrid, only: nmlat, nmlonp1
+ use edyn_maggrid, only: ylonm ! magnetic latitudes (nmlat) (radians)
+ use edyn_maggrid, only: ylatm ! magnetic longtitudes (nmlonp1) (radians)
+ use cam_pio_utils, only: cam_pio_openfile, cam_pio_closefile
+ use pio, only: pio_inq_dimid, pio_inquire_dimension
+ use pio, only: pio_inquire, pio_inq_varid
+ use pio, only: file_desc_t, pio_noerr, pio_nowrite, pio_get_var
+ use utils_mod, only: check_ncerr, check_alloc, boxcar_ave
+ use edyn_mpi, only: ntask, mytid
+ use edyn_params, only: pi, dtr, rtd
+
+ implicit none
+
+ private
+ public :: init_amie
+ public :: getamie
+
+ ! Define parameters for AMIE input data file:
+ integer, parameter :: &
+ ithmx = 55, & ! maximum number of latitudes of AMIE data
+ jmxm = 2*ithmx-1, & ! maximum number of global latitudes
+ lonmx = 36 ! maximum number of longitudes of AMIE data
+ integer :: lonp1,latp1
+ !
+ ! Define fields for AMIE input data file:
+ ! electric potential in Volt
+ ! mean energy in KeV
+ ! energy flux in W/m^2
+ ! cusplat_nh_input(sh) and cuspmlt_nh_input(sh) are
+ ! AMIE cusp latitude and MLT in NH and SH
+ ! hpi_nh(sh) are AMIE hemi-integrated power
+ ! pcp_nh(sh) are AMIE polar-cap potential drop
+ ! Time interpolated AMIE outputs with suffix _amie
+ !
+ real(r8), allocatable, dimension(:,:,:), save :: & ! (lonp1,latp1,ntimes)
+ pot_nh_input, pot_sh_input, &
+ ekv_nh_input, ekv_sh_input, &
+ efx_nh_input, efx_sh_input
+ real(r8), allocatable, dimension(:,:), save :: & ! (lonp1,latp1)
+ pot_nh_amie, pot_sh_amie, ekv_nh_amie, ekv_sh_amie, &
+ efx_nh_amie, efx_sh_amie
+ integer, allocatable, dimension(:), save :: & ! (ntimes)
+ year, month, day, jday
+ real(r8), allocatable, dimension(:), save :: & ! (ntimes)
+ cusplat_nh_input, cuspmlt_nh_input, hpi_nh_input, &
+ pcp_nh_input, amie_nh_ut, &
+ cusplat_sh_input, cuspmlt_sh_input, hpi_sh_input, &
+ pcp_sh_input, amie_sh_ut
+ real(r8) :: &
+ cusplat_nh_amie, cuspmlt_nh_amie, cusplat_sh_amie, &
+ cuspmlt_sh_amie, hpi_sh_amie, hpi_nh_amie, pcp_sh_amie, &
+ pcp_nh_amie
+ !
+ type(file_desc_t) :: ncid_nh
+ type(file_desc_t) :: ncid_sh
+
+ character(len=cl), allocatable :: amienh_files(:)
+ character(len=cl), allocatable :: amiesh_files(:)
+ integer :: num_files, file_ndx
+
+contains
+
+ !-----------------------------------------------------------------------
+ subroutine init_amie(amienh_list,amiesh_list)
+
+ character(len=*),intent(in) :: amienh_list(:)
+ character(len=*),intent(in) :: amiesh_list(:)
+
+ integer :: n, nfiles
+
+ nfiles = min( size(amienh_list), size(amiesh_list) )
+ num_files = 0
+
+ count_files: do n = 1,nfiles
+ if (len_trim(amienh_list(n))<1 .or. len_trim(amiesh_list(n))<1 .or. &
+ trim(amienh_list(n))=='NONE' .or. trim(amiesh_list(n))=='NONE') then
+ exit count_files
+ else
+ num_files = num_files + 1
+ end if
+ end do count_files
+
+ allocate(amienh_files(num_files), amiesh_files(num_files))
+ amienh_files(:num_files) = amienh_list(:num_files)
+ amiesh_files(:num_files) = amiesh_list(:num_files)
+ file_ndx = 1
+ call open_files()
+
+ end subroutine init_amie
+
+ !-----------------------------------------------------------------------
+ subroutine rdamie_nh(amienh)
+ !
+ ! Read AMIE data for the northern hemisphere from amienh
+ !
+
+ ! Dummy argument
+ character(len=*), intent(in) :: amienh
+ ! Local variables:
+ integer :: istat, ntimes, ndims, nvars, ngatts
+ integer :: idunlim, ier
+ integer :: id_lon, id_lat, id_time
+ integer :: idv_year, idv_mon, idv_day, idv_jday
+ integer :: idv_ut, idv_cusplat, idv_cuspmlt
+ integer :: idv_hpi, idv_pcp
+ character(len=*), parameter :: subname = 'rdamie_nh'
+ !
+ if (masterproc) then
+ write(iulog, "(/,72('-'))")
+ write(iulog, "(a,': read AMIE data for northern hemisphere:')") subname
+ end if
+ !
+ ! Open netcdf file:
+ call cam_pio_openfile(ncid_nh, amienh, pio_nowrite)
+ !
+ ! Get AMIE grid dimension:
+ istat = pio_inq_dimid(ncid_nh, 'lon', id_lon)
+ istat = pio_inquire_dimension(ncid_nh, id_lon, len=lonp1)
+ call check_ncerr(istat, subname, 'AMIE longitude dimension')
+
+ istat = pio_inq_dimid(ncid_nh, 'lat', id_lat)
+ istat = pio_inquire_dimension(ncid_nh, id_lat, len=latp1)
+ call check_ncerr(istat, subname, 'AMIE latitude dimension')
+ !
+ ! Get time dimension:
+ istat = pio_inquire(ncid_nh, unlimiteddimid=id_time)
+ istat = pio_inquire_dimension(ncid_nh, id_time, len=ntimes)
+ call check_ncerr(istat, subname, 'AMIE time dimension')
+ !
+ ! Search for requested AMIE output fields
+ istat = pio_inquire(ncid_nh, ndims, nvars, ngatts, idunlim)
+ !
+ ! Get 1-D AMIE fields (ntimes)
+ if (.not. allocated(year)) then
+ allocate(year(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'year', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_nh, 'year', idv_year)
+ call check_ncerr(istat, subname, 'AMIE year id')
+ istat = pio_get_var(ncid_nh, idv_year, year)
+ call check_ncerr(istat, subname, 'AMIE year')
+
+ if (.not. allocated(month)) then
+ allocate(month(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'month', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_nh, 'month', idv_mon)
+ call check_ncerr(istat, subname, 'AMIE month id')
+ istat = pio_get_var(ncid_nh, idv_mon, month)
+ call check_ncerr(istat, subname, 'AMIE month')
+ if (.not. allocated(day)) then
+ allocate(day(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'day', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_nh, 'day', idv_day)
+ call check_ncerr(istat, subname, 'AMIE day id')
+ istat = pio_get_var(ncid_nh, idv_day, day)
+ call check_ncerr(istat, subname, 'AMIE day')
+
+ if (.not. allocated(jday)) then
+ allocate(jday(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'jday', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_nh, 'jday', idv_jday)
+ call check_ncerr(istat, subname, 'AMIE jday id')
+ istat = pio_get_var(ncid_nh, idv_jday, jday)
+ call check_ncerr(istat, subname, 'AMIE jday')
+ !
+ ! Allocate 1-d fields:
+ if (.not. allocated(amie_nh_ut)) then
+ allocate(amie_nh_ut(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'amie_nh_ut', ntimes=ntimes)
+ end if
+ if (.not. allocated(cusplat_nh_input)) then
+ allocate(cusplat_nh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'cusplat_nh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(cuspmlt_nh_input)) then
+ allocate(cuspmlt_nh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'cuspmlt_nh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(hpi_nh_input)) then
+ allocate(hpi_nh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'hpi_nh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(pcp_nh_input)) then
+ allocate(pcp_nh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pcp_nh_input', ntimes=ntimes)
+ end if
+ !
+ ! Get ut
+ istat = pio_inq_varid(ncid_nh, 'ut', idv_ut)
+ call check_ncerr(istat, subname, 'AMIE ut id')
+ istat = pio_get_var(ncid_nh, idv_ut, amie_nh_ut)
+ call check_ncerr(istat, subname, 'AMIE ut')
+ !
+ ! Get HPI
+ istat = pio_inq_varid(ncid_nh, 'hpi', idv_hpi)
+ call check_ncerr(istat, subname, 'AMIE hpi id')
+ istat = pio_get_var(ncid_nh, idv_hpi, hpi_nh_input)
+ call check_ncerr(istat, subname, 'AMIE hpi')
+ !
+ ! Get PCP
+ istat = pio_inq_varid(ncid_nh, 'pcp', idv_pcp)
+ call check_ncerr(istat, subname, 'AMIE pcp id')
+ istat = pio_get_var(ncid_nh, idv_pcp, pcp_nh_input)
+ call check_ncerr(istat, subname, 'AMIE pcp')
+ !
+ ! Get cusplat
+ istat = pio_inq_varid(ncid_nh, 'cusplat', idv_cusplat)
+ call check_ncerr(istat, subname, 'AMIE cusplat id')
+ istat = pio_get_var(ncid_nh, idv_cusplat, cusplat_nh_input)
+ call check_ncerr(istat, subname, 'AMIE cusplat')
+ !
+ ! Get cuspmlt
+ istat = pio_inq_varid(ncid_nh, 'cuspmlt', idv_cuspmlt)
+ call check_ncerr(istat, subname, 'AMIE cuspmlt id')
+ istat = pio_get_var(ncid_nh, idv_cuspmlt, cuspmlt_nh_input)
+ call check_ncerr(istat, subname, 'AMIE cuspmlt')
+ !
+ ! Allocate 2-d fields:
+ if (.not. allocated(pot_nh_amie)) then
+ allocate(pot_nh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'pot_nh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(ekv_nh_amie)) then
+ allocate(ekv_nh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'ekv_nh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(efx_nh_amie)) then
+ allocate(efx_nh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'efx_nh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ !
+ ! Allocate 3-d fields:
+ if (.not. allocated(pot_nh_input)) then
+ allocate(pot_nh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pot_nh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(ekv_nh_input)) then
+ allocate(ekv_nh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'ekv_nh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(efx_nh_input)) then
+ allocate(efx_nh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'efx_nh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ end subroutine rdamie_nh
+
+ !-----------------------------------------------------------------------
+ subroutine rdamie_sh(amiesh)
+ !
+ ! Read AMIE data for the southern hemisphere from amiesh
+ !
+
+ ! Dummy argument
+ character(len=*), intent(in) :: amiesh
+ ! Local variables:
+ integer :: istat, ntimes, ndims, nvars, ngatts, ier
+ integer :: idunlim
+ integer :: id_lon, id_lat, id_time
+ integer :: idv_year, idv_mon, idv_day, idv_jday
+ integer :: idv_ut
+ integer :: idv_cusplat, idv_cuspmlt, idv_hpi, idv_pcp
+ character(len=*), parameter :: subname = 'rdamie_sh'
+ !
+ if (masterproc) then
+ write(iulog, "(/, 72('-'))")
+ write(iulog, "(a, ': read AMIE data for southern hemisphere:')") subname
+ end if
+ !
+ ! Open netcdf file:
+ call cam_pio_openfile(ncid_sh, amiesh, pio_nowrite)
+ !
+ ! Get AMIE grid dimension:
+ istat = pio_inq_dimid(ncid_sh, 'lon', id_lon)
+ istat = pio_inquire_dimension(ncid_sh, id_lon, len=lonp1)
+ call check_ncerr(istat, subname, 'AMIE longitude dimension')
+
+ istat = pio_inq_dimid(ncid_sh, 'lat', id_lat)
+ istat = pio_inquire_dimension(ncid_sh, id_lat, len=latp1)
+ call check_ncerr(istat, subname, 'AMIE latitude dimension')
+ !
+ ! Get time dimension:
+ istat = pio_inquire(ncid_sh, unlimiteddimid=id_time)
+ istat = pio_inquire_dimension(ncid_sh, id_time, len=ntimes)
+ call check_ncerr(istat, subname, 'AMIE time dimension')
+ !
+ ! Search for requested AMIE output fields
+ istat = pio_inquire(ncid_sh, ndims, nvars, ngatts, idunlim)
+ !
+ ! Get 1-D AMIE fields (ntimes)
+ if (.not. allocated(year)) then
+ allocate(year(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'year', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_sh, 'year', idv_year)
+ call check_ncerr(istat, subname, 'AMIE year id')
+ istat = pio_get_var(ncid_sh, idv_year, year)
+ call check_ncerr(istat, subname, 'AMIE year')
+
+ if (.not. allocated(month)) then
+ allocate(month(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'month', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_sh, 'month', idv_mon)
+ call check_ncerr(istat, subname, 'AMIE month id')
+ istat = pio_get_var(ncid_sh, idv_mon, month)
+ call check_ncerr(istat, subname, 'AMIE month')
+ if (.not. allocated(day)) then
+ allocate(day(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'day', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_sh, 'day', idv_day)
+ call check_ncerr(istat, subname, 'AMIE day id')
+ istat = pio_get_var(ncid_sh, idv_day, day)
+ call check_ncerr(istat, subname, 'AMIE day')
+
+ if (.not. allocated(jday)) then
+ allocate(jday(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'jday', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid_sh, 'jday', idv_jday)
+ call check_ncerr(istat, subname, 'AMIE jday id')
+ istat = pio_get_var(ncid_sh, idv_jday, jday)
+ call check_ncerr(istat, subname, 'AMIE jday')
+ !
+ ! Allocate 1-d fields:
+ if (.not. allocated(amie_sh_ut)) then
+ allocate(amie_sh_ut(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'amie_sh_ut', ntimes=ntimes)
+ end if
+ if (.not. allocated(cusplat_sh_input)) then
+ allocate(cusplat_sh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'cusplat_sh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(cuspmlt_sh_input)) then
+ allocate(cuspmlt_sh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'cuspmlt_sh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(hpi_sh_input)) then
+ allocate(hpi_sh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'hpi_sh_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(pcp_sh_input)) then
+ allocate(pcp_sh_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pcp_sh_input', ntimes=ntimes)
+ end if
+ !
+ ! Get ut
+ istat = pio_inq_varid(ncid_sh, 'ut', idv_ut)
+ call check_ncerr(istat, subname, 'AMIE ut id')
+ istat = pio_get_var(ncid_sh, idv_ut, amie_sh_ut)
+ call check_ncerr(istat, subname, 'AMIE ut')
+ !
+ ! Get HPI
+ istat = pio_inq_varid(ncid_sh, 'hpi', idv_hpi)
+ call check_ncerr(istat, subname, 'AMIE hpi id')
+ istat = pio_get_var(ncid_sh, idv_hpi, hpi_sh_input)
+ call check_ncerr(istat, subname, 'AMIE hpi')
+ !
+ ! Get PCP
+ istat = pio_inq_varid(ncid_sh, 'pcp', idv_pcp)
+ call check_ncerr(istat, subname, 'AMIE pcp id')
+ istat = pio_get_var(ncid_sh, idv_pcp, pcp_sh_input)
+ call check_ncerr(istat, subname, 'AMIE pcp')
+ !
+ ! Get cusplat
+ istat = pio_inq_varid(ncid_sh, 'cusplat', idv_cusplat)
+ call check_ncerr(istat, subname, 'AMIE cusplat id')
+ istat = pio_get_var(ncid_sh, idv_cusplat, cusplat_sh_input)
+ call check_ncerr(istat, subname, 'AMIE cusplat')
+ !
+ ! Get cuspmlt
+ istat = pio_inq_varid(ncid_sh, 'cuspmlt', idv_cuspmlt)
+ call check_ncerr(istat, subname, 'AMIE cuspmlt id')
+ istat = pio_get_var(ncid_sh, idv_cuspmlt, cuspmlt_sh_input)
+ call check_ncerr(istat, subname, 'AMIE cuspmlt')
+ !
+ ! Allocate 2-d fields:
+ if (.not. allocated(pot_sh_amie)) then
+ allocate(pot_sh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'pot_sh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(ekv_sh_amie)) then
+ allocate(ekv_sh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'ekv_sh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(efx_sh_amie)) then
+ allocate(efx_sh_amie(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'efx_sh_amie', lonp1=lonp1, latp1=latp1)
+ end if
+ !
+ ! Allocate 3-d fields:
+ if (.not. allocated(pot_sh_input)) then
+ allocate(pot_sh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pot_sh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(ekv_sh_input)) then
+ allocate(ekv_sh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'ekv_sh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(efx_sh_input)) then
+ allocate(efx_sh_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'efx_sh_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ end subroutine rdamie_sh
+
+ !-----------------------------------------------------------------------
+ subroutine update_3d_fields( ncid, offset, kount, pot_3d,ekv_3d,efx_3d )
+
+ type(file_desc_t), intent(in) :: ncid
+ integer, intent(in) :: offset(:)
+ integer, intent(in) :: kount(:)
+ real(r8),intent(out) :: pot_3d(:,:,:)
+ real(r8),intent(out) :: ekv_3d(:,:,:)
+ real(r8),intent(out) :: efx_3d(:,:,:)
+
+
+ integer :: istat
+ integer :: idv_pot, idv_ekv, idv_efx
+ character(len=*), parameter :: subname = 'update_3d_fields'
+
+ !
+ ! Get 3-D fields (lon,lat,ntimes)
+ !
+ ! electric potential
+ istat = pio_inq_varid(ncid, 'pot', idv_pot)
+ call check_ncerr(istat, subname, 'AMIE pot id')
+ istat = pio_get_var(ncid, idv_pot, offset, kount, pot_3d)
+ call check_ncerr(istat, subname, 'AMIE pot')
+ !
+ ! mean energy
+ istat = pio_inq_varid(ncid, 'ekv', idv_ekv)
+ call check_ncerr(istat, subname, 'AMIE ekv id')
+ istat = pio_get_var(ncid, idv_ekv, offset, kount, ekv_3d)
+ call check_ncerr(istat, subname, 'AMIE ekv')
+ !
+ ! energy flux
+ istat = pio_inq_varid(ncid, 'efx', idv_efx)
+ call check_ncerr(istat, subname, 'AMIE efx id')
+ istat = pio_get_var(ncid, idv_efx, offset, kount, efx_3d)
+ call check_ncerr(istat, subname, 'AMIE efx')
+
+ end subroutine update_3d_fields
+
+ !-----------------------------------------------------------------------
+ subroutine getamie(iyear, imo, iday, iutsec, sunlon, iprint, &
+ iamie, phihm, amie_efxm, amie_kevm, crad)
+ use cam_history_support, only: fillvalue
+ use rgrd_mod, only: rgrd2
+
+ !
+ ! Read AMIE outputs from amie_ncfile file, returning electric potential,
+ ! auroral mean energy and energy flux at current date and time,
+ ! and the data is linearly interpolated to the model time
+ ! gl - 12/07/2002
+ !
+ !
+ ! Args:
+
+ integer, intent(in) :: iyear
+ integer, intent(in) :: imo
+ integer, intent(in) :: iday
+ real(r8), intent(in) :: sunlon
+ integer, intent(in) :: iutsec
+ integer, intent(in) :: iprint
+ integer, intent(out) :: iamie
+ real(r8), intent(out) :: phihm(nmlonp1,nmlat)
+ real(r8), intent(out) :: amie_efxm(nmlonp1,nmlat) ! on geomag grid
+ real(r8), intent(out) :: amie_kevm(nmlonp1,nmlat) ! on geomag grid
+ real(r8), intent(out) :: crad(2)
+ !
+ !
+ ! Local:
+ real(r8) :: potm(lonp1,jmxm)
+ real(r8) :: efxm(lonp1,jmxm), ekvm(lonp1,jmxm)
+ real(r8) :: alat(jmxm), alon(lonp1)
+ real(r8) :: alatm(jmxm), alonm(lonp1)
+ integer :: ier, lw, liw, intpol(2)
+ integer, allocatable :: iw(:)
+ real(r8), allocatable :: w(:)
+ integer :: i, j
+ integer :: nn, iset, iset1, m, mp1, n
+ integer :: iboxcar
+ integer :: idate, bdate, edate
+ real(r8) :: model_ut, denoma, f1, f2
+ real(r8) :: del, xmlt, dmlat, dlatm, dlonm, dmltm, rot
+ integer :: offset(3), kount(3)
+ character(len=*), parameter :: subname = 'getamie'
+
+ phihm = fillvalue
+ amie_efxm = fillvalue
+ amie_kevm = fillvalue
+ crad = fillvalue
+
+ if (iprint > 0 .and. masterproc) then
+ write(iulog,"(/,72('-'))")
+ write(iulog,"(a,':')") subname
+ write(iulog,"(a,i4,', iday = ',i3,', iutsec = ',i10)") &
+ 'Initial requested iyear= ', iyear, iday, iutsec
+ end if
+
+ nn = size(amie_sh_ut)
+ bdate = year(1)*10000+month(1)*100+day(1)
+ edate = year(nn)*10000+month(nn)*100+day(nn)
+ idate = iyear*10000+imo*100+iday
+ !
+ ! Check times:
+ !
+ iamie=-1
+ check_loop: do while( iamie/=1 )
+ if (masterproc) write(iulog,*) 'file_ndx = ',file_ndx
+
+ iamie = 1
+
+ if (idateedate) then
+ if (masterproc) then
+ write(iulog, "(a,': Model date beyond the AMIE last Data:',3I5)") &
+ subname, year(nn), month(nn), day(nn)
+ end if
+ iamie = 0
+
+ if (file_ndx 1._r8) then
+ write(iulog, "(a,2(a,i0))") &
+ subname, ': Finding a gap in the AMIE Data set: modelday = ', &
+ iday, ', amieday = ', day(n)
+ iamie = 2
+ return
+ end if
+ if (denoma == 0._r8) then
+ f1 = 1._r8
+ f2 = 0._r8
+ else
+ f1 = (amie_sh_ut(iset1) - (model_ut+(iday- &
+ day(iset1))*24._r8))/denoma
+ f2 = (model_ut+(iday-day(iset1))*24._r8 - &
+ amie_sh_ut(iset))/denoma
+ end if
+ cusplat_sh_amie = (f1*cusplat_sh_input(iset1) + &
+ f2*cusplat_sh_input(iset))
+ cuspmlt_sh_amie = (f1*cuspmlt_sh_input(iset1) + &
+ f2*cuspmlt_sh_input(iset))
+ hpi_sh_amie = (f1*hpi_sh_input(iset1) + f2*hpi_sh_input(iset))
+ pcp_sh_amie = (f1*pcp_sh_input(iset1) + f2*pcp_sh_input(iset))
+
+ offset = (/1,1,iset/)
+ kount = (/lonp1,latp1,2/)
+
+ call update_3d_fields( ncid_sh, offset, kount, pot_sh_input,ekv_sh_input,efx_sh_input )
+ if (iboxcar == 0) then
+ pot_sh_amie(:,:) = (f1*pot_sh_input(:,:,2) + &
+ f2*pot_sh_input(:,:,1))
+ ekv_sh_amie(:,:) = (f1*ekv_sh_input(:,:,2) + &
+ f2*ekv_sh_input(:,:,1))
+ efx_sh_amie(:,:) = (f1*efx_sh_input(:,:,2) + &
+ f2*efx_sh_input(:,:,1))
+ else
+ call boxcar_ave(pot_sh_input,pot_sh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ call boxcar_ave(efx_sh_input,efx_sh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ call boxcar_ave(ekv_sh_input,ekv_sh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ end if
+!
+! get NH AMIE data
+ pot_nh_amie(:,:) = 0._r8
+ ekv_nh_amie(:,:) = 0._r8
+ efx_nh_amie(:,:) = 0._r8
+ cusplat_nh_amie = 0._r8
+ cuspmlt_nh_amie = 0._r8
+ hpi_nh_amie = 0._r8
+ pcp_nh_amie = 0._r8
+
+ iboxcar = 0
+ iset = 0
+ iset1 = nn
+
+ do i=1,nn
+ if (amie_nh_ut(i) < model_ut+(iday-day(i))*24._r8) iset = i
+ end do
+ if (iset == 0) iset = 1
+ if (iset == nn) iset = nn-1
+ iset1 = iset + 1
+
+ denoma = amie_nh_ut(iset1) - amie_nh_ut(iset)
+ if (denoma > 1._r8) then
+ write(iulog, "('getamie: Finding a gap in the AMIE Data set:', &
+ 'modelday, amieday =',2I5)") iday,day(n)
+ iamie = 2
+ return
+ end if
+ if (denoma == 0._r8) then
+ f1 = 1._r8
+ f2 = 0._r8
+ else
+ f1 = (amie_nh_ut(iset1) - (model_ut+(iday- &
+ day(iset1))*24._r8))/denoma
+ f2 = (model_ut+(iday-day(iset1))*24._r8 - &
+ amie_nh_ut(iset))/denoma
+ end if
+ cusplat_nh_amie = (f1*cusplat_nh_input(iset1) + &
+ f2*cusplat_nh_input(iset))
+ cuspmlt_nh_amie = (f1*cuspmlt_nh_input(iset1) + &
+ f2*cuspmlt_nh_input(iset))
+ hpi_nh_amie = (f1*hpi_nh_input(iset1) + f2*hpi_nh_input(iset))
+ pcp_nh_amie = (f1*pcp_nh_input(iset1) + f2*pcp_nh_input(iset))
+
+ offset = (/1,1,iset/)
+ kount = (/lonp1,latp1,2/)
+
+ call update_3d_fields( ncid_nh, offset, kount, pot_nh_input,ekv_nh_input,efx_nh_input )
+
+ if (iboxcar == 0) then
+ pot_nh_amie(:,:) = (f1*pot_nh_input(:,:,2) + &
+ f2*pot_nh_input(:,:,1))
+ ekv_nh_amie(:,:) = (f1*ekv_nh_input(:,:,2) + &
+ f2*ekv_nh_input(:,:,1))
+ efx_nh_amie(:,:) = (f1*efx_nh_input(:,:,2) + &
+ f2*efx_nh_input(:,:,1))
+ else
+ call boxcar_ave(pot_nh_input,pot_nh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ call boxcar_ave(efx_nh_input,efx_nh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ call boxcar_ave(ekv_nh_input,ekv_nh_amie,lonp1,latp1, &
+ nn,iset,iboxcar)
+ end if
+ !
+ ! The OLTMAX latitude also defines the co-latitude theta0, which in
+ ! turn determines crit1(+2.5deg) and crit2(-12.5deg) which are used
+ ! in TIE-GCM as the boundaries of the polar cap and the region of
+ ! influence of the high-lat potential versus the low-lat dynamo potential
+ ! Define this latitude to be between 70 and 77.5 degrees
+ !
+ if (cusplat_sh_amie > 75.0_r8) then
+ cusplat_sh_amie = 75.0_r8
+ cuspmlt_sh_amie = 11._r8
+ end if
+ if (cusplat_sh_amie < 60.0_r8) then
+ cusplat_sh_amie = 60.0_r8
+ cuspmlt_sh_amie = 11._r8
+ end if
+ if (cusplat_nh_amie > 75.0_r8) then
+ cusplat_nh_amie = 75.0_r8
+ cuspmlt_nh_amie = 11._r8
+ end if
+ if (cusplat_nh_amie < 60.0_r8) then
+ cusplat_nh_amie = 60.0_r8
+ cuspmlt_nh_amie = 11._r8
+ end if
+ ! cusplat_nh_amie = amin1(65.0,cusplat_nh_amie)
+ if (cuspmlt_sh_amie > 12.5_r8) cuspmlt_sh_amie = 12.5_r8
+ if (cuspmlt_sh_amie < 11.0_r8) cuspmlt_sh_amie = 11.0_r8
+ if (cuspmlt_nh_amie > 12.5_r8) cuspmlt_nh_amie = 12.5_r8
+ if (cuspmlt_nh_amie < 11.0_r8) cuspmlt_nh_amie = 11.0_r8
+ crad(1) = (90._r8-cusplat_sh_amie)*pi/180._r8
+ crad(2) = (90._r8-cusplat_nh_amie)*pi/180._r8
+
+ active_task: if ( mytid lonp1) mp1 = 2
+ del = xmlt - (m-1)*dmltm
+ ! Initialize arrays around equator
+ do j = latp1+1, ithmx
+ potm(i,j) = 0._r8
+ potm(i,jmxm+1-j) = 0._r8
+ ekvm(i,j) = (1._r8-del)*ekv_sh_amie(m,latp1) + &
+ del*ekv_sh_amie(mp1,latp1)
+ ekvm(i,jmxm+1-j) = (1._r8-del)*ekv_nh_amie(m,latp1) + &
+ del*ekv_nh_amie(mp1,latp1)
+ efxm(i,j) = 0._r8
+ efxm(i,jmxm+1-j) = 0._r8
+ end do
+ ! Put in AMIE arrays from pole to latp1
+ do j = 1, latp1
+ potm(i,j) = (1._r8-del)*pot_sh_amie(m,j) + &
+ del*pot_sh_amie(mp1,j)
+ potm(i,jmxm+1-j) = (1._r8-del)*pot_nh_amie(m,j) + &
+ del*pot_nh_amie(mp1,j)
+ ekvm(i,j) = (1._r8-del)*ekv_sh_amie(m,j) + &
+ del*ekv_sh_amie(mp1,j)
+ ekvm(i,jmxm+1-j) = (1._r8-del)*ekv_nh_amie(m,j) + &
+ del*ekv_nh_amie(mp1,j)
+ efxm(i,j) = (1._r8-del)*efx_sh_amie(m,j) + &
+ del*efx_sh_amie(mp1,j)
+ efxm(i,jmxm+1-j) = (1._r8-del)*efx_nh_amie(m,j) + &
+ del*efx_nh_amie(mp1,j)
+ end do
+
+ end do
+
+ ! Set up coeffs to go between EPOTM(IMXMP,JMNH) and TIEPOT(IMAXM,JMAXMH)
+
+ ! **** SET GRID SPACING DLATM, DLONM
+ ! DMLAT=lat spacing in degrees of AMIE apex grid
+ dmlat = 180._r8 / real(jmxm-1, kind=r8)
+ dlatm = dmlat * dtr
+ dlonm = 2._r8 * pi / real(lonmx, kind=r8)
+ dmltm = 24._r8 / real(lonmx, kind=r8)
+ ! ****
+ ! **** SET ARRAY YLATM (LATITUDE VALUES FOR GEOMAGNETIC GRID
+ ! ****
+ alatm(1) = -pi / 2._r8
+ alat(1) = -90._r8
+ alatm(jmxm) = pi / 2._r8
+ alat(jmxm) = 90._r8
+ do i = 2, ithmx
+ alat(i) = alat(i-1)+dlatm*rtd
+ alat(jmxm+1-i) = alat(jmxm+2-i)-dlatm*rtd
+ alatm(i) = alatm(i-1)+dlatm
+ alatm(jmxm+1-i) = alatm(jmxm+2-i)-dlatm
+ end do
+ alon(1) = -pi*rtd
+ alonm(1) = -pi
+ do i = 2, lonp1
+ alon(i) = alon(i-1) + dlonm*rtd
+ alonm(i) = alonm(i-1) + dlonm
+ end do
+
+ ! ylatm and ylonm are arrays of latitudes and longitudes of the
+ ! distorted magnetic grids in radian - from consdyn.h
+ ! Convert from apex magnetic grid to distorted magnetic grid
+ !
+ ! Allocate workspace for regrid routine rgrd_mod:
+ lw = nmlonp1+nmlat+2*nmlonp1
+ if (.not. allocated(w)) then
+ allocate(w(lw), stat=ier)
+ call check_alloc(ier, 'getamie', 'w', lw=lw)
+ end if
+ liw = nmlonp1 + nmlat
+ if (.not. allocated(iw)) then
+ allocate(iw(liw), stat=ier)
+ call check_alloc(ier, 'getamie', 'iw', lw=liw)
+ end if
+ intpol(:) = 1 ! linear (not cubic) interp in both dimensions
+ if (alatm(1) > ylatm(1)) then
+ alatm(1) = ylatm(1)
+ end if
+ if (alatm(jmxm) < ylatm(nmlat)) then
+ alatm(jmxm) = ylatm(nmlat)
+ end if
+ if (alonm(1) > ylonm(1)) then
+ alonm(1) = ylonm(1)
+ end if
+ if (alonm(lonp1) < ylonm(nmlonp1)) then
+ alonm(lonp1) = ylonm(nmlonp1)
+ end if
+
+ ! ylatm from -pi/2 to pi/2, and ylonm from -pi to pi
+ call rgrd2(lonp1, jmxm, alonm, alatm, potm, nmlonp1, nmlat, &
+ ylonm, ylatm, phihm, intpol, w, lw, iw, liw, ier)
+ call rgrd2(lonp1, jmxm, alonm, alatm, ekvm, nmlonp1, nmlat, &
+ ylonm, ylatm, amie_kevm, intpol, w, lw, iw, liw, ier)
+ call rgrd2(lonp1, jmxm, alonm, alatm, efxm, nmlonp1, nmlat, &
+ ylonm, ylatm, amie_efxm, intpol, w, lw, iw, liw, ier)
+
+ if (iprint > 0 .and. masterproc) then
+ write(iulog, *) subname, ': Max, min amie_efxm = ', &
+ maxval(amie_efxm), minval(amie_efxm)
+ write(iulog, "(a,': AMIE data interpolated to date and time')") subname
+ write(iulog,"(a,': iyear,imo,iday,iutsec = ',3i6,i10)") subname, &
+ iyear, imo, iday, iutsec
+ write(iulog,"(2a,i6,2F9.5,3I6,f10.4)") &
+ subname, ': AMIE iset f1,f2,year,mon,day,ut = ', iset, &
+ f1, f2, year(iset), month(iset), day(iset), amie_nh_ut(iset)
+ write(iulog,*) subname, ': max,min phihm= ', maxval(phihm), minval(phihm)
+ end if
+ end if active_task
+
+ end subroutine getamie
+
+ !-----------------------------------------------------------------------
+ subroutine close_files
+
+ deallocate( year,month,day )
+ deallocate( cusplat_nh_input, cuspmlt_nh_input, hpi_nh_input, &
+ pcp_nh_input, amie_nh_ut, &
+ cusplat_sh_input, cuspmlt_sh_input, hpi_sh_input, &
+ pcp_sh_input, amie_sh_ut )
+
+ call cam_pio_closefile(ncid_nh)
+ call cam_pio_closefile(ncid_sh)
+
+
+ end subroutine close_files
+ !-----------------------------------------------------------------------
+ subroutine open_files()
+
+ call rdamie_nh(amienh_files(file_ndx))
+ call rdamie_sh(amiesh_files(file_ndx))
+
+ end subroutine open_files
+
+end module amie_module
diff --git a/src/ionosphere/waccmx/dpie_coupling.F90 b/src/ionosphere/waccmx/dpie_coupling.F90
index b7f6b2b1c4..d9539040b0 100644
--- a/src/ionosphere/waccmx/dpie_coupling.F90
+++ b/src/ionosphere/waccmx/dpie_coupling.F90
@@ -1,24 +1,22 @@
module dpie_coupling
-!
-! Dynamics/Physics Ionosphere/Electrodynamics coupler.
-! B. Foster, 2015.
-!
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- use cam_history ,only: outfld
- use cam_history ,only: addfld, horiz_only
+ !
+ ! Dynamics/Physics Ionosphere/Electrodynamics coupler.
+ !
+ use shr_kind_mod, only: r8 => shr_kind_r8
+ use cam_logfile, only: iulog
+ use cam_history, only: hist_fld_active, outfld
+ use cam_history, only: addfld, horiz_only
use cam_history_support, only: fillvalue
- use cam_abortutils ,only: endrun
- use spmd_utils ,only: masterproc
- use savefield_waccm ,only: savefld_waccm
- use edyn_mpi ,only: array_ptr_type
- use perf_mod ,only: t_startf, t_stopf
- use amie_module ,only: getamie
- use edyn_solve ,only: phihm
- use edyn_params ,only: dtr, rtd
- use edyn_mpi, only: switch_model_format
- use aurora_params, only: amie_period ! turns on overwrite of energy fields in aurora phys
-
+ use cam_abortutils, only: endrun
+ use spmd_utils, only: masterproc, mpi_logical, mpicom, masterprocid
+ use edyn_mpi, only: array_ptr_type
+ use perf_mod, only: t_startf, t_stopf
+ use amie_module, only: getamie
+ use ltr_module, only: getltr
+ use edyn_solve, only: phihm
+ use edyn_params, only: dtr, rtd
+ use aurora_params, only: prescribed_period ! turns on overwrite of energy fields in aurora phys
+
implicit none
private
@@ -26,17 +24,18 @@ module dpie_coupling
public :: d_pie_epotent ! sets electric potential
public :: d_pie_coupling ! handles coupling with edynamo and ion transport
- logical :: ionos_edyn_active, ionos_oplus_xport ! if true, call oplus_xport for O+ transport
- integer :: nspltop ! nsplit for oplus_xport
+ logical :: ionos_edyn_active ! if true, call oplus_xport for O+ transport
+ logical :: ionos_oplus_xport ! if true, call oplus_xport for O+ transport
+ integer :: nspltop ! nsplit for oplus_xport
- logical :: debug = .false.
+ logical :: debug = .false.
real(r8) :: crad(2), crit(2)
- logical :: crit_user_set = .false.
+ logical :: crit_user_set = .false.
real(r8), parameter :: amie_default_crit(2) = (/ 35._r8, 40._r8 /)
-
+
contains
-!----------------------------------------------------------------------
+ !----------------------------------------------------------------------
subroutine d_pie_init( edyn_active_in, oplus_xport_in, oplus_nsplit_in, crit_colats_deg )
logical, intent(in) :: edyn_active_in, oplus_xport_in
@@ -50,833 +49,765 @@ subroutine d_pie_init( edyn_active_in, oplus_xport_in, oplus_nsplit_in, crit_col
crit_user_set = all( crit_colats_deg(:) > 0._r8 )
if (crit_user_set) then
crit(:) = crit_colats_deg(:)*dtr
- endif
-
- ! Dynamo inputs (called from dpie_coupling. Fields are in waccm format, in CGS units):
- call addfld ('DPIE_OMEGA',(/ 'lev' /), 'I', 'Pa/s ','OMEGA input to DPIE coupling', gridname='fv_centers')
- call addfld ('DPIE_MBAR' ,(/ 'lev' /), 'I', ' ','MBAR Mean Mass from dpie_coupling', gridname='fv_centers')
- call addfld ('DPIE_TN ',(/ 'lev' /), 'I', 'deg K ','DPIE_TN' , gridname='fv_centers')
- call addfld ('DPIE_UN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_UN' , gridname='fv_centers')
- call addfld ('DPIE_VN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_VN' , gridname='fv_centers')
- call addfld ('DPIE_WN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_WN' , gridname='fv_centers')
- call addfld ('DPIE_OM ',(/ 'lev' /), 'I', 's-1 ','DPIE_OM' , gridname='fv_centers')
- call addfld ('DPIE_ZHT ',(/ 'lev' /), 'I', 'cm ','DPIE_ZHT (geometric height,simple)', gridname='fv_centers')
- call addfld ('DPIE_ZGI ',(/ 'lev' /), 'I', 'cm ','DPIE_ZGI (geopotential height on interfaces)', gridname='fv_centers')
- call addfld ('DPIE_BARM ',(/ 'lev' /), 'I', ' ','DPIE_BARM' , gridname='fv_centers')
- call addfld ('DPIE_O2 ',(/ 'lev' /), 'I', 'mmr ','DPIE_O2' , gridname='fv_centers')
- call addfld ('DPIE_O ',(/ 'lev' /), 'I', 'mmr ','DPIE_O' , gridname='fv_centers')
- call addfld ('DPIE_N2 ',(/ 'lev' /), 'I', 'mmr ','DPIE_N2' , gridname='fv_centers')
- call addfld ('DPIE_TE ',(/ 'lev' /), 'I', 'deg K ','DPIE_TE' , gridname='fv_centers')
- call addfld ('DPIE_TI ',(/ 'lev' /), 'I', 'deg K ','DPIE_TI' , gridname='fv_centers')
-
- call addfld ('DPIE_OPMMR' ,(/ 'lev' /), 'I', 'mmr' ,'DPIE_OPMMR' , gridname='fv_centers')
- call addfld ('DPIE_O2P',(/ 'lev' /), 'I', 'm^3','DPIE_O2P(dpie input)', gridname='fv_centers')
- call addfld ('DPIE_NOP',(/ 'lev' /), 'I', 'm^3','DPIE_NOP(dpie input)', gridname='fv_centers')
- call addfld ('DPIE_N2P',(/ 'lev' /), 'I', 'm^3','DPIE_N2P(dpie input)', gridname='fv_centers')
-
- call addfld ('OPLUS', (/ 'lev' /), 'I', 'cm^3','O+ (oplus_xport output)', gridname='fv_centers')
- call addfld ('WACCM_UI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_UI (dpie output)', gridname='fv_centers')
- call addfld ('WACCM_VI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_VI (dpie output)', gridname='fv_centers')
- call addfld ('WACCM_WI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_WI (dpie output)', gridname='fv_centers')
-
- call addfld ('HMF2' , horiz_only , 'I', 'km' ,'Height of the F2 Layer' , gridname='fv_centers')
- call addfld ('NMF2' , horiz_only , 'I', 'cm-3','Peak Density of the F2 Layer', gridname='fv_centers')
-
- call addfld ('Z3GM' ,(/ 'lev' /), 'I', 'm' ,'Geometric height' , gridname='fv_centers')
- call addfld ('Z3GMI ',(/ 'lev' /), 'I', 'm' ,'Geometric height (Interfaces)', gridname='fv_centers')
- call addfld ('OpDens' ,(/ 'lev' /), 'I', 'cm^3','O+ Number Density' , gridname='fv_centers')
- call addfld ('EDens' ,(/ 'lev' /), 'I', 'cm^3','e Number Density (sum of O2+,NO+,N2+,O+)', gridname='fv_centers')
+ end if
+ ! Dynamo inputs (called from dpie_coupling. Fields are in waccm format, in CGS units):
+ call addfld ('DPIE_OMEGA',(/ 'lev' /), 'I', 'Pa/s ','OMEGA input to DPIE coupling', gridname='physgrid')
+ call addfld ('DPIE_MBAR' ,(/ 'lev' /), 'I', 'kg/kmole','MBAR Mean Mass from dpie_coupling', gridname='physgrid')
+ call addfld ('DPIE_TN ',(/ 'lev' /), 'I', 'deg K ','DPIE_TN' , gridname='physgrid')
+ call addfld ('DPIE_UN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_UN' , gridname='physgrid')
+ call addfld ('DPIE_VN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_VN' , gridname='physgrid')
+ call addfld ('DPIE_WN ',(/ 'lev' /), 'I', 'cm/s ','DPIE_WN' , gridname='physgrid')
+ call addfld ('DPIE_OM ',(/ 'lev' /), 'I', 's-1 ','DPIE_OM' , gridname='physgrid')
+ call addfld ('DPIE_ZHT ',(/ 'lev' /), 'I', 'cm ','DPIE_ZHT (geometric height,simple)', gridname='physgrid')
+ call addfld ('DPIE_ZGI ',(/ 'lev' /), 'I', 'cm ','DPIE_ZGI (geopotential height on interfaces)', gridname='physgrid')
+ call addfld ('DPIE_O2 ',(/ 'lev' /), 'I', 'mmr ','DPIE_O2' , gridname='physgrid')
+ call addfld ('DPIE_O ',(/ 'lev' /), 'I', 'mmr ','DPIE_O' , gridname='physgrid')
+ call addfld ('DPIE_N2 ',(/ 'lev' /), 'I', 'mmr ','DPIE_N2' , gridname='physgrid')
+ call addfld ('DPIE_TE ',(/ 'lev' /), 'I', 'deg K ','DPIE_TE' , gridname='physgrid')
+ call addfld ('DPIE_TI ',(/ 'lev' /), 'I', 'deg K ','DPIE_TI' , gridname='physgrid')
+
+
+ call addfld ('DPIE_OPMMR' ,(/ 'lev' /), 'I', 'mmr' ,'DPIE_OPMMR' , gridname='physgrid')
+ call addfld ('DPIE_O2P',(/ 'lev' /), 'I', 'm^-3','DPIE_O2P(dpie input)', gridname='physgrid')
+ call addfld ('DPIE_NOP',(/ 'lev' /), 'I', 'm^-3','DPIE_NOP(dpie input)', gridname='physgrid')
+ call addfld ('DPIE_N2P',(/ 'lev' /), 'I', 'm^-3','DPIE_N2P(dpie input)', gridname='physgrid')
+
+ call addfld ('HMF2' , horiz_only , 'I', 'km' ,'Height of the F2 Layer' , gridname='physgrid')
+ call addfld ('NMF2' , horiz_only , 'I', 'cm-3','Peak Density of the F2 Layer', gridname='physgrid')
+
+ call addfld ('OpDens' ,(/ 'lev' /), 'I', 'cm^-3','O+ Number Density' , gridname='physgrid')
+ call addfld ('EDens' ,(/ 'lev' /), 'I', 'cm^-3','e Number Density (sum of O2+,NO+,N2+,O+)', gridname='physgrid')
+
+ call addfld ('prescr_efxp' , horiz_only, 'I','mW/m2','Prescribed energy flux on geo grid' ,gridname='physgrid')
+ call addfld ('prescr_kevp' , horiz_only, 'I','keV ','Prescribed mean energy on geo grid' ,gridname='physgrid')
+
+ call addfld ('WACCM_UI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_UI (dpie output)', gridname='physgrid')
+ call addfld ('WACCM_VI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_VI (dpie output)', gridname='physgrid')
+ call addfld ('WACCM_WI' ,(/ 'lev' /), 'I', 'm/s' ,'WACCM_WI (dpie output)', gridname='physgrid')
+ call addfld ('WACCM_OP' ,(/ 'lev' /), 'I', 'kg/kg' ,'WACCM_OP (dpie output)', gridname='physgrid')
end subroutine d_pie_init
-!-----------------------------------------------------------------------
- subroutine d_pie_epotent( highlat_potential_model, crit_out, i0,i1,j0,j1, efxg, kevg )
- use edyn_solve, only: pfrac ! NH fraction of potential (nmlonp1,nmlat0)
- use edyn_geogrid,only: nglblat=>nlat
- use time_manager,only: get_curr_date
- use heelis, only: heelis_model
- use wei05sc, only: weimer05 ! driver for weimer high-lat convection model
- use edyn_esmf, only: edyn_esmf_update
- use solar_parms_data,only: solar_parms_advance
- use solar_wind_data, only: solar_wind_advance
- use solar_wind_data, only: bzimf=>solar_wind_bzimf, byimf=>solar_wind_byimf
- use solar_wind_data, only: swvel=>solar_wind_swvel, swden=>solar_wind_swden
- use edyn_mpi, only: mlat0,mlat1,mlon0,omlon1
- use edyn_maggrid ,only: nmlonp1,nmlat
-! Args:
-!
- character(len=*),intent(in) :: highlat_potential_model
- real(r8), intent(out):: crit_out(2) ! critical colatitudes (degrees)
- integer,optional,intent(in) :: &
- i0, & ! grid%ifirstxy
- i1, & ! grid%ilastxy
- j0, & ! grid%jfirstxy
- j1 ! grid%jlastxy
- real(r8),optional,intent(out) :: efxg(:,:) ! energy flux from AMIE
- real(r8),optional,intent(out) :: kevg(:,:) ! characteristic mean energy from AMIE
+ !-----------------------------------------------------------------------
+ subroutine d_pie_epotent( highlat_potential_model, crit_out, cols, cole, efx_phys, kev_phys, amie_in, ltr_in )
+ use edyn_solve, only: pfrac ! NH fraction of potential (nmlonp1,nmlat0)
+ use time_manager, only: get_curr_date
+ use heelis, only: heelis_model
+ use wei05sc, only: weimer05 ! driver for weimer high-lat convection model
+ use edyn_esmf, only: edyn_esmf_update
+ use solar_parms_data, only: solar_parms_advance
+ use solar_wind_data, only: solar_wind_advance
+ use solar_wind_data, only: bzimf=>solar_wind_bzimf
+ use solar_wind_data, only: byimf=>solar_wind_byimf
+ use solar_wind_data, only: swvel=>solar_wind_swvel
+ use solar_wind_data, only: swden=>solar_wind_swden
+ use edyn_mpi, only: mlat0, mlat1, mlon0, mlon1, omlon1, ntask, mytid
+ use edyn_maggrid, only: nmlonp1, nmlat
+ use regridder, only: regrid_mag2phys_2d
+
+ ! Args:
+ !
+ character(len=*), intent(in) :: highlat_potential_model
+ real(r8), intent(out) :: crit_out(2) ! critical colatitudes (degrees)
+ integer, optional, intent(in) :: cols, cole
+ logical, optional,intent(in) :: amie_in
+ logical, optional,intent(in) :: ltr_in
+
+ ! Prescribed energy flux
+ real(r8), optional, intent(out) :: efx_phys(:)
+ ! Prescribed characteristic mean energy
+ real(r8), optional, intent(out) :: kev_phys(:)
+
!
! local vars
!
+ logical :: amie_inputs, ltr_inputs
- real(r8) :: secs ! time of day in seconds
- integer :: iyear,imo,iday,tod ! tod is time-of-day in seconds
- real(r8) :: sunlons(nglblat)
+ real(r8) :: secs ! time of day in seconds
+ integer :: iyear,imo,iday,tod ! tod is time-of-day in seconds
+ real(r8) :: sunlon
- integer :: iprint,amie_ibkg
- integer :: i, j, iamie
- type(array_ptr_type) :: ptrs(2)
+ integer :: iprint
+ integer :: j, iamie, iltr, ierr
!
! AMIE fields (extra dimension added for longitude switch)
!
- real(r8) :: amie_efxm(nmlonp1,nmlat), amie_kevm(nmlonp1,nmlat) ! auroral energy flux and
- real(r8) :: amie_phihm(nmlonp1,nmlat)
- real(r8),allocatable,target :: amie_efxg (:,:,:) ! AMIE energy flux
- real(r8),allocatable,target :: amie_kevg (:,:,:) ! AMIE characteristic mean energy
-
+ real(r8) :: prescr_efxm(nmlonp1,nmlat), prescr_kevm(nmlonp1,nmlat)
+ real(r8) :: prescr_phihm(nmlonp1,nmlat)
+
call edyn_esmf_update()
- call get_curr_date(iyear,imo,iday,tod) ! tod is integer time-of-day in seconds
- secs = tod ! should promote from int to real(r8)
+ call get_curr_date(iyear, imo,iday, tod)
+ ! tod is integer time-of-day in seconds
+ secs = real(tod, r8)
! update solar wind data (IMF, etc.)
call solar_wind_advance()
! update kp -- phys timestep init happens later ...
call solar_parms_advance()
-
-
- !
- ! Get sun's longitude at latitudes (geographic):
- !
- call sunloc(iday,secs,sunlons) ! sunlons(nglblat) is returned
- !
- ! Get high-latitude convection from empirical model (heelis or weimer).
- ! High-latitude potential phihm (edyn_solve) is defined for edynamo.
- !
- if (trim(highlat_potential_model) == 'heelis') then
- call heelis_model(sunlons) ! heelis.F90
- elseif (trim(highlat_potential_model) == 'weimer') then
+ if ( mytid>> iamie=',i2)") iamie
-
- call getamie(iyear,imo,iday,tod,sunlons(1),amie_ibkg,iprint,iamie, &
- amie_phihm,amie_efxm,amie_kevm,crad,efxg,kevg)
-
- if (masterproc) write(iulog,"('After Calling getamie >>> iamie=',i2)") iamie
- amie_period = iamie == 1
-
- do j=mlat0,mlat1
- call outfld('amie_phihm',amie_phihm(mlon0:omlon1,j),omlon1-mlon0+1,j)
- call outfld('amie_efxm',amie_efxm(mlon0:omlon1,j),omlon1-mlon0+1,j)
- call outfld('amie_kevm',amie_kevm(mlon0:omlon1,j),omlon1-mlon0+1,j)
- enddo
+ amie_inputs=.false.
+ ltr_inputs=.false.
+ if (present(amie_in)) amie_inputs=amie_in
+ if (present(ltr_in)) ltr_inputs= ltr_in
- if (amie_period) then
+ prescribed_inputs: if (amie_inputs .or. ltr_inputs) then
- phihm = amie_phihm
+ if (.not. (present(kev_phys).and.present(efx_phys)) ) then
+ call endrun('d_pie_epotent: kev_phys and efx_phys must be present')
+ end if
- ! Load AMIE fields into pointers for TIE-GCM to WACCM longitude swap
- !
- allocate(amie_efxg(1,i0:i1,j0:j1))
- allocate(amie_kevg(1,i0:i1,j0:j1))
+ iprint = 1
+ if (amie_inputs) then
+ if (masterproc) then
+ write(iulog,*) 'Calling getamie >>> '
+ end if
- do i=i0,i1
- do j=j0,j1
- amie_efxg(1,i,j) = efxg(i-i0+1,j-j0+1)
- amie_kevg(1,i,j) = kevg(i-i0+1,j-j0+1)
- enddo
- enddo
+ call getamie(iyear, imo, iday, tod, sunlon, iprint, iamie, &
+ prescr_phihm, prescr_efxm, prescr_kevm, crad)
- ptrs(1)%ptr => amie_efxg
- ptrs(2)%ptr => amie_kevg
- call switch_model_format(ptrs,1,1,i0,i1,j0,j1, 2)
+ if (masterproc) then
+ write(iulog,"('After Calling getamie >>> iamie = ', i2)") iamie
+ end if
+ prescribed_period = iamie == 1
+ else
+ if (masterproc) then
+ write(iulog,*) 'Calling getltr >>> '
+ end if
- do i=i0,i1
- do j=j0,j1
- efxg(i-i0+1,j-j0+1) = amie_efxg(1,i,j)
- kevg(i-i0+1,j-j0+1) = amie_kevg(1,i,j)
- enddo
- enddo
+ call getltr(iyear, imo, iday, tod,sunlon, iprint, iltr, &
+ prescr_phihm, prescr_efxm, prescr_kevm )
- deallocate(amie_efxg)
- deallocate(amie_kevg)
+ if (masterproc) then
+ write(iulog,"('After Calling getltr >>> iltr = ', i2)") iltr
+ end if
+ prescribed_period = iltr == 1
+ end if
- endif
+ do j = mlat0, mlat1
+ call outfld('prescr_phihm',prescr_phihm(mlon0:omlon1,j),omlon1-mlon0+1,j)
+ call outfld('prescr_efxm', prescr_efxm(mlon0:omlon1,j), omlon1-mlon0+1,j)
+ call outfld('prescr_kevm', prescr_kevm(mlon0:omlon1,j), omlon1-mlon0+1,j)
+ end do
- call savefld_waccm(efxg,'amie_efxg',1,i0,i1,j0,j1)
- call savefld_waccm(kevg,'amie_kevg',1,i0,i1,j0,j1)
+ if (prescribed_period) then
+ phihm = prescr_phihm
+ end if
- endif
-
- call calc_pfrac(sunlons(1),pfrac) ! returns pfrac for dynamo (edyn_solve)
+ call mpi_bcast(prescribed_period, 1, mpi_logical, masterprocid, mpicom, ierr)
- crit_out(:) = crit(:)*rtd ! degrees
- end subroutine d_pie_epotent
+ call regrid_mag2phys_2d(prescr_kevm(mlon0:mlon1,mlat0:mlat1), kev_phys, cols, cole)
+ call regrid_mag2phys_2d(prescr_efxm(mlon0:mlon1,mlat0:mlat1), efx_phys, cols, cole)
-!-----------------------------------------------------------------------
- subroutine d_pie_coupling(omega,pe,zgi,zgpmid,u,v,tn, &
- sigma_ped,sigma_hall,te,ti,o2mmr,o1mmr,h1mmr,o2pmmr, &
- nopmmr,n2pmmr,opmmr,opmmrtm1,ui,vi,wi, &
- rmassO2,rmassO1,rmassH,rmassN2,rmassO2p, rmassNOp,rmassN2p,rmassOp, &
- i0,i1,j0,j1)
-!
-! Call dynamo to calculate electric potential, electric field, and ion drifts.
-! Then call oplus_xport to transport O+, which is passed back to physics.
-!
-! This routine is called from p_d_coupling (dynamics/fv/dp_coupling.F90) when
-! nstep > 0.
-!
- use edyn_geogrid, only: nlev, nilev
- use shr_const_mod,only: &
- grav => shr_const_g, & ! gravitational constant (m/s^2)
- kboltz => shr_const_boltz ! Boltzmann constant (J/K/molecule)
- use time_manager, only: get_nstep
- use time_manager, only: get_curr_date
- use edynamo, only: dynamo
- use edyn_mpi, only: mp_geo_halos,mp_pole_halos
- use oplus, only: oplus_xport
- use ref_pres, only: pref_mid
-!
-! Args:
-!
- integer,intent(in) :: &
- i0, & ! grid%ifirstxy
- i1, & ! grid%ilastxy
- j0, & ! grid%jfirstxy
- j1 ! grid%jlastxy
-
- real(r8),intent(in) :: omega (i0:i1,j0:j1,nlev) ! pressure velocity on midpoints (Pa/s) (i,k,j)
- real(r8),intent(in) :: pe (i0:i1,nilev,j0:j1) ! interface pressure (Pa) (note i,k,j dims)
- real(r8),intent(in) :: zgi (i0:i1,j0:j1,nlev) ! geopotential height (on interfaces) (m)
- real(r8),intent(in) :: zgpmid (i0:i1,j0:j1,nlev) ! geopotential height (on midpoints) (m)
- real(r8),intent(in) :: u (i0:i1,j0:j1,nlev) ! U-wind (m/s)
- real(r8),intent(in) :: v (i0:i1,j0:j1,nlev) ! V-wind (m/s)
- real(r8),intent(in) :: tn (i0:i1,j0:j1,nlev) ! neutral temperature (K)
- real(r8),intent(in) :: sigma_ped (i0:i1,j0:j1,nlev) ! Pedersen conductivity
- real(r8),intent(in) :: sigma_hall(i0:i1,j0:j1,nlev) ! Hall conductivity
- real(r8),intent(in) :: te(i0:i1,j0:j1,nlev) ! electron temperature
- real(r8),intent(in) :: ti(i0:i1,j0:j1,nlev) ! ion temperature
- real(r8),intent(in) :: o2mmr(i0:i1,j0:j1,nlev) ! O2 mass mixing ratio (for oplus)
- real(r8),intent(in) :: o1mmr(i0:i1,j0:j1,nlev) ! O mass mixing ratio (for oplus)
- real(r8),intent(in) :: h1mmr(i0:i1,j0:j1,nlev) ! H mass mixing ratio (for oplus)
- real(r8),intent(in) :: o2pmmr(i0:i1,j0:j1,nlev) ! O2+ mass mixing ratio (for oplus)
- real(r8),intent(in) :: nopmmr(i0:i1,j0:j1,nlev) ! NO+ mass mixing ratio (for oplus)
- real(r8),intent(in) :: n2pmmr(i0:i1,j0:j1,nlev) ! N2+ mass mixing ratio (for oplus)
- real(r8),intent(inout) :: opmmr(i0:i1,j0:j1,nlev) ! O+ mass mixing ratio (oplus_xport output)
- real(r8),intent(inout) :: opmmrtm1(i0:i1,j0:j1,nlev) ! O+ previous time step (oplus_xport output)
- real(r8),intent(inout) :: ui(i0:i1,j0:j1,nlev) ! zonal ion drift (edynamo or empirical)
- real(r8),intent(inout) :: vi(i0:i1,j0:j1,nlev) ! meridional ion drift (edynamo or empirical)
- real(r8),intent(inout) :: wi(i0:i1,j0:j1,nlev) ! vertical ion drift (edynamo or empirical)
- real(r8),intent(in) :: rmassO2 ! O2 molecular weight kg/kmol
- real(r8),intent(in) :: rmassO1 ! O atomic weight kg/kmol
- real(r8),intent(in) :: rmassH ! H atomic weight kg/kmol
- real(r8),intent(in) :: rmassN2 ! N2 molecular weight kg/kmol
- real(r8),intent(in) :: rmassO2p ! O2+ molecular weight kg/kmol
- real(r8),intent(in) :: rmassNOp ! NO+ molecular weight kg/kmol
- real(r8),intent(in) :: rmassN2p ! N2+ molecular weight kg/kmol
- real(r8),intent(in) :: rmassOp ! O+ molecular weight kg/kmol
-!
-! Local:
-!
- integer :: i,j,k
- integer :: kx ! Vertical index at peak of F2 layer electron density
- integer :: nstep
- integer :: nfields ! Number of fields for multi-field calls
- integer :: iyear,imo,iday,tod ! tod is time-of-day in seconds
- integer :: isplit ! loop index
-
- real(r8) :: secs ! time of day in seconds
-
- real(r8), parameter :: n2min = 1.e-6_r8 ! lower limit of N2 mixing ratios
- real(r8), parameter :: small = 1.e-25_r8 ! for fields not currently available
- real(r8) :: zht (i0:i1,j0:j1,nlev) ! geometric height (m) (Simple method - interfaces)
- real(r8) :: zhtmid(i0:i1,j0:j1,nlev)! geometric height (m) (Simple method - midpoints)
- real(r8) :: wn (i0:i1,j0:j1,nlev) ! vertical velocity (from omega)
- real(r8) :: mbar (i0:i1,j0:j1,nlev) ! mean molecular weight
- real(r8) :: n2mmr(i0:i1,j0:j1,nlev) ! N2 mass mixing ratio (for oplus)
- real(r8) :: pmid_inv(nlev) ! inverted reference pressure at midpoints (Pa)
- real(r8) :: pmid(i0:i1,nlev,j0:j1) ! pressure at midpoints (Pa) (global i,j)
- real(r8) :: re = 6.370e6_r8 ! earth radius (m)
-
- real(r8),dimension(i0:i1,j0:j1,nlev) :: & ! ion number densities (m^3)
- o2p,nop,n2p,op,ne, optm1
-
- real(r8),dimension(nlev,i0:i1,j0:j1) :: opmmr_kij
-!
-! Args for dynamo:
- real(r8),target :: edyn_tn (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_un (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_vn (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_wn (nlev,i0:i1,j0:j1) ! vertical wind (cm/s)
- real(r8),target :: edyn_zht (nlev,i0:i1,j0:j1) ! geometric height (cm)
- real(r8),target :: edyn_mbar (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_ped (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_hall (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_ui (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_vi (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_wi (nlev,i0:i1,j0:j1)
-!
-! Additional fields needed by oplus_xport:
- real(r8),target :: edyn_te (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_ti (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_o2 (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_o1 (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_n2 (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_op (nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_optm1(nlev,i0:i1,j0:j1)
- real(r8),target :: edyn_om (nlev,i0:i1,j0:j1) ! omega vertical motion (1/s)
- real(r8),target :: edyn_zgi (nlev,i0:i1,j0:j1) ! geopotential height (cm) (interfaces)
- real(r8),target :: op_out (nlev,i0:i1,j0:j1) ! oplus_xport output
- real(r8),target :: opnm_out (nlev,i0:i1,j0:j1) ! oplus_xport output at time n-1
- real(r8),target :: edyn_ne (nlev,i0:i1,j0:j1) ! electron density diagnostic
-
- real(r8),target :: halo_tn (nlev,i0-2:i1+2,j0-2:j1+2) ! neutral temperature (deg K)
- real(r8),target :: halo_te (nlev,i0-2:i1+2,j0-2:j1+2) ! electron temperature (deg K)
- real(r8),target :: halo_ti (nlev,i0-2:i1+2,j0-2:j1+2) ! ion temperature (deg K)
- real(r8),target :: halo_un (nlev,i0-2:i1+2,j0-2:j1+2) ! neutral zonal wind (cm/s)
- real(r8),target :: halo_vn (nlev,i0-2:i1+2,j0-2:j1+2) ! neutral meridional wind (cm/s)
- real(r8),target :: halo_om (nlev,i0-2:i1+2,j0-2:j1+2) ! omega (1/s)
- real(r8),target :: halo_o2 (nlev,i0-2:i1+2,j0-2:j1+2) ! o2 (mmr)
- real(r8),target :: halo_o1 (nlev,i0-2:i1+2,j0-2:j1+2) ! o (mmr)
- real(r8),target :: halo_n2 (nlev,i0-2:i1+2,j0-2:j1+2) ! n2 (mmr)
- real(r8),target :: halo_mbar(nlev,i0-2:i1+2,j0-2:j1+2) ! mean molecular weight
- real(r8), allocatable :: polesign(:)
-!
- real(r8) :: nmf2 (i0:i1,j0:j1) ! Electron number density at F2 peak (m-3 converted to cm-3)
- real(r8) :: hmf2 (i0:i1,j0:j1) ! Height of electron number density F2 peak (m converted to km)
- real(r8) :: &
- height(3), & ! Surrounding heights when locating electron density F2 peak
- nde(3) ! Surround densities when locating electron density F2 peak
- real(r8) h12,h22,h32,deltx,atx,ax,btx,bx,ctx,cx ! Variables used for weighting when locating F2 peak
-!
- logical :: do_integrals
-!
-! Pointers for multiple-field calls:
- type(array_ptr_type),allocatable :: ptrs(:)
-
- call t_startf('d_pie_coupling')
+ call outfld_phys1d( 'prescr_efxp', efx_phys )
+ call outfld_phys1d( 'prescr_kevp', kev_phys )
- if (debug.and.masterproc) then
+ end if prescribed_inputs
- nstep = get_nstep()
- call get_curr_date(iyear,imo,iday,tod) ! tod is integer time-of-day in seconds
- secs = tod ! integer to float
+ if ( mytid= ne(i,j,k-1) .and. ne(i,j,k) >= ne(i,j,k+1)) then
- kx = k
- exit kloop
- endif
- enddo kloop
-
- if (kx==0) then
- hmf2(i,j) = fillvalue
- nmf2(i,j) = fillvalue
- exit iloop
- endif
-
- height = (/zht(i,j,kx+1),zht(i,j,kx),zht(i,j,kx-1)/)
- nde = (/ne(i,j,kx+1),ne(i,j,kx),ne(i,j,kx-1)/)
-
- h12 = height(1)*height(1)
- h22 = height(2)*height(2)
- h32 = height(3)*height(3)
-
- deltx=h12*height(2)+h22*height(3)+h32*height(1)-h32*height(2)-h12*height(3)-h22*height(1)
- atx=nde(1)*height(2)+nde(2)*height(3)+nde(3)*height(1)-height(2)*nde(3)-height(3)*nde(1)-height(1)*nde(2)
- ax=atx/deltx
-
- btx=h12*nde(2)+h22*nde(3)+h32*nde(1)-h32*nde(2)-h12*nde(3)-h22*nde(1)
- bx=btx/deltx
- ctx=h12*height(2)*nde(3)+h22*height(3)*nde(1)+h32*height(1)*nde(2)-h32*height(2)*nde(1)- &
- h12*height(3)*nde(2)-h22*height(1)*nde(3)
- cx=ctx/deltx
-
- hmf2(i,j)=-(bx/(2._r8*ax)) * 1.E-03_r8
- nmf2(i,j)=-((bx*bx-4._r8*ax*cx)/(4._r8*ax)) * 1.E-06_r8
-
- enddo iloop ! i=i0,i1
-
- call outfld('HMF2',hmf2(i0:i1,j),i1-i0+1,j)
- call outfld('NMF2',nmf2(i0:i1,j),i1-i0+1,j)
-
- enddo jloop
-!
-! Save fields to waccm history:
-! (must be transformed from (i,j,k) to (k,i,j))
-!
- do j=j0,j1
- do i=i0,i1
- opmmr_kij(:,i,j) = opmmr(i,j,:)
- enddo
- enddo
- call savefld_waccm(opmmr_kij,'DPIE_OPMMR',nlev,i0,i1,j0,j1) ! mmr
-!
-! Prepare inputs to edynamo and oplus_xport:
-!
- do k = 1,nlev
- edyn_tn (k,i0:i1,j0:j1) = tn (i0:i1,j0:j1,k)
- edyn_un (k,i0:i1,j0:j1) = u (i0:i1,j0:j1,k) * 100._r8 ! m/s -> cm/s
- edyn_vn (k,i0:i1,j0:j1) = v (i0:i1,j0:j1,k) * 100._r8 ! m/s -> cm/s
- edyn_wn (k,i0:i1,j0:j1) = wn (i0:i1,j0:j1,k) * 100._r8 ! m/s -> cm/s
- edyn_zgi (k,i0:i1,j0:j1) = zgi (i0:i1,j0:j1,k) * 100._r8 ! m -> cm
- edyn_zht (k,i0:i1,j0:j1) = zht (i0:i1,j0:j1,k) * 100._r8 ! m -> cm
- edyn_mbar (k,i0:i1,j0:j1) = mbar (i0:i1,j0:j1,k)
- edyn_ped (k,i0:i1,j0:j1) = sigma_ped (i0:i1,j0:j1,k)
- edyn_hall (k,i0:i1,j0:j1) = sigma_hall(i0:i1,j0:j1,k)
- edyn_ui (k,i0:i1,j0:j1) = ui (i0:i1,j0:j1,k) * 100._r8 ! zonal ion drift (m/s -> cm/s)
- edyn_vi (k,i0:i1,j0:j1) = vi (i0:i1,j0:j1,k) * 100._r8 ! meridional ion drift (m/s -> cm/s)
- edyn_wi (k,i0:i1,j0:j1) = wi (i0:i1,j0:j1,k) * 100._r8 ! vertical ion drift (m/s -> cm/s)
-!
-! Additional fields for oplus:
-!
- edyn_te (k,i0:i1,j0:j1) = te (i0:i1,j0:j1,k)
- edyn_ti (k,i0:i1,j0:j1) = ti (i0:i1,j0:j1,k)
- edyn_o2 (k,i0:i1,j0:j1) = o2mmr (i0:i1,j0:j1,k)
- edyn_o1 (k,i0:i1,j0:j1) = o1mmr (i0:i1,j0:j1,k)
- edyn_n2 (k,i0:i1,j0:j1) = n2mmr (i0:i1,j0:j1,k)
- edyn_om (k,i0:i1,j0:j1) = -(omega(i0:i1,j0:j1,k) / pmid(i0:i1,k,j0:j1)) ! Pa/s -> 1/s
- edyn_op (k,i0:i1,j0:j1) = op (i0:i1,j0:j1,k) / 1.e6_r8 ! m^3 -> cm^3
- edyn_optm1(k,i0:i1,j0:j1) = optm1 (i0:i1,j0:j1,k) / 1.e6_r8 ! m^3 -> cm^3
- enddo
-!
-! At first timestep, allocate optm1 module data, and initialize local
-! edyn_optm1 to op from physics. This will be input to oplus_xport.
-! After oplus_xport, optm1 will be updated from local oplus_xport output.
-! After first timestep, simply update edyn_optm1 from optm1.
-! optm1 is m^3 for waccm, whereas edyn_optm1 is cm^3 for oplus_xport.
-!
-! At this point, everything is in waccm format. The locals edyn_op and
-! edyn_optm1 will be converted to tiegcm format for the call to oplus_xport,
-! then oplus_xport output (opnm_out) will be converted back to waccm format
-! before using it to update optm1 module data.
-!
-! if (nstep==1) then
-! optm1 = 0._r8
-! do k=1,nlev
-! edyn_optm1(k,i0:i1,j0:j1) = op(i0:i1,j0:j1,k) / 1.e6_r8 ! m^3 -> cm^3
-! enddo
-!
-! After the first step, edyn_optm1 input is updated from the module data
-! (note edyn_optm1 will be converted to TIEGCM format before being
-! passed in to oplus_xport)
-!
-! else ! nstep > 1
-! do k=1,nlev
-! edyn_optm1(k,i0:i1,j0:j1) = optm1(i0:i1,j0:j1,k) / 1.e6_r8 ! m^3 -> cm^3
-! enddo
-! endif
-!
-! These are in WACCM format, and most are in CGS units (see above):
-! (units are specified in addfld calls, edyn_init.F90)
-!
- call savefld_waccm(edyn_tn ,'DPIE_TN' ,nlev,i0,i1,j0,j1) ! deg K
- call savefld_waccm(edyn_un ,'DPIE_UN' ,nlev,i0,i1,j0,j1) ! cm/s
- call savefld_waccm(edyn_vn ,'DPIE_VN' ,nlev,i0,i1,j0,j1) ! cm/s
- call savefld_waccm(edyn_wn ,'DPIE_WN' ,nlev,i0,i1,j0,j1) ! cm/s
- call savefld_waccm(edyn_om ,'DPIE_OM' ,nlev,i0,i1,j0,j1) ! omega on midpoints (1/s)
- call savefld_waccm(edyn_zht ,'DPIE_ZHT' ,nlev,i0,i1,j0,j1) ! geometric height (cm)
- call savefld_waccm(edyn_zgi ,'DPIE_ZGI' ,nlev,i0,i1,j0,j1) ! geopotential height on interfaces (cm)
- call savefld_waccm(edyn_mbar ,'DPIE_BARM',nlev,i0,i1,j0,j1) ! mean mass
- call savefld_waccm(edyn_o2 ,'DPIE_O2' ,nlev,i0,i1,j0,j1) ! cm^3
- call savefld_waccm(edyn_o1 ,'DPIE_O' ,nlev,i0,i1,j0,j1) ! cm^3
- call savefld_waccm(edyn_n2 ,'DPIE_N2' ,nlev,i0,i1,j0,j1) ! cm^3
- call savefld_waccm(edyn_te ,'DPIE_TE' ,nlev,i0,i1,j0,j1)
- call savefld_waccm(edyn_ti ,'DPIE_TI' ,nlev,i0,i1,j0,j1)
-!
-! Save electron density to TIEGCM-format file (edynamo.nc):
-! (ne(i,j,k) was calculated in m^3 above, save here in cm^3)
-!
- do j=j0,j1
- do i=i0,i1
- do k=1,nlev
- edyn_ne(k,i,j) = ne(i,j,k)*1.e-6_r8 ! m^3 -> cm^3
- enddo
- enddo
- enddo
-!
-! Convert input fields from "WACCM format" to "TIEGCM format"
-! (phase shift longitude data and invert the vertical dimension).
-!
- if (ionos_edyn_active) then
- nfields = 21
- allocate(ptrs(nfields))
- !
- ! Fields needed for edynamo:
- ptrs(1)%ptr => edyn_tn ; ptrs(2)%ptr => edyn_un ; ptrs(3)%ptr => edyn_vn
- ptrs(4)%ptr => edyn_wn ; ptrs(5)%ptr => edyn_zht ; ptrs(6)%ptr => edyn_zgi
- ptrs(7)%ptr => edyn_mbar ; ptrs(8)%ptr => edyn_ped ; ptrs(9)%ptr => edyn_hall
- !
- ! Additional fields needed for oplus (and Ne for diag):
- ptrs(10)%ptr => edyn_te ; ptrs(11)%ptr => edyn_ti ; ptrs(12)%ptr => edyn_o2
- ptrs(13)%ptr => edyn_o1 ; ptrs(14)%ptr => edyn_n2 ; ptrs(15)%ptr => edyn_om
- ptrs(16)%ptr => edyn_op ; ptrs(17)%ptr => edyn_optm1 ; ptrs(18)%ptr => edyn_ne
- ptrs(19)%ptr => edyn_ui ; ptrs(20)%ptr => edyn_vi ; ptrs(21)%ptr => edyn_wi
- !
- ! Convert from WACCM to TIEGCM format:
- call switch_model_format(ptrs,1,nlev,i0,i1,j0,j1,nfields)
- deallocate(ptrs)
- endif
-!
-! Call electrodynamo (edynamo.F90)
-! If using time3d conductances, tell dynamo to *not* do fieldline
-! integrations (i.e., do_integrals == false). In this case, edynamo
-! conductances zigmxx,rim1,2 from time3d will be set by subroutine
-! transform_glbin in time3d module.
-!
+! Pointers for multiple-field calls:
+ type(array_ptr_type),allocatable :: ptrs(:)
+
+ character(len=*), parameter :: subname = 'd_pie_coupling'
+
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1) :: & ! 3d fields on mag grid
+ ped_mag, & ! pedersen conductivity on magnetic grid
+ hal_mag, & ! hall conductivity on magnetic grid
+ zpot_mag ! geopotential on magnetic grid
+
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1) :: & ! 3d fields on mag grid
+ ped_mag_in, & ! pedersen conductivity on magnetic grid
+ hal_mag_in, & ! hall conductivity on magnetic grid
+ zpot_mag_in ! geopotential on magnetic grid
+
+ real(r8), dimension(lon0:lon1,lat0:lat1,lev0:lev1) :: & ! 3d fields on geo grid
+ zpot_geo, & ! geopotential on magnetic grid
+ tn_geo, &
+ te_geo, &
+ ti_geo, &
+ un_geo, &
+ vn_geo, &
+ wn_geo, &
+ ui_geo, &
+ vi_geo, &
+ wi_geo, &
+ omega_geo, &
+ o2_geo, &
+ o_geo, &
+ n2_geo, &
+ op_geo, &
+ optm1_geo, &
+ pmid_geo, &
+ mbar_geo
+
+ real(r8), dimension(lon0:lon1,lat0:lat1,lev0:lev1) :: &
+ adotv1_in, adotv2_in
+ real(r8), dimension(lon0:lon1,lat0:lat1) :: &
+ adota1_in, adota2_in, a1dta2_in, be3_in, sini_in
+
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1) :: &
+ adotv1_mag, adotv2_mag
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1) :: &
+ adota1_mag, adota2_mag, a1dta2_mag, be3_mag, sini_mag
+
+ call t_startf(subname)
+
+ if (debug .and. masterproc) then
+
+ nstep = get_nstep()
+ call get_curr_date(iyear, imo, iday, tod)
+ secs = real(tod, r8)
+
+ write(iulog,"(3a,i8,a,3i5,a,f6.2)") &
+ 'Enter ',subname,': nstep = ',nstep, ', iyear,imo,iday = ', &
+ iyear, imo, iday, ' ut (hrs) = ', secs/3600._r8
+
+ write(iulog,"(a,': nspltop = ',i3)") subname, nspltop
+ end if
+
+ !---------------------------------------------------------------
+ ! Calculate vertical neutral wind velocity wn(i,j,k).
+ ! (omega (Pa/s), tn (K), and mbar (kg/kmole) are inputs, grav is m/s^2)
+ !---------------------------------------------------------------
+ call calc_wn(tn, omega, pmid, mbar, grav, wn, cols, cole, nlev)
+
+ !---------------------------------------------------------------
+ ! Convert from mmr to number densities (m^3):
+ !---------------------------------------------------------------
+ do i = cols, cole
+ do k = 1, nlev
+ ! O2+, NO+, N2+, O+:
+ o2p(k, i) = o2pmmr(k, i) * mbar(k, i) / rmassO2p * &
+ pmid(k,i) / (kboltz * tn(k, i))
+ nop(k, i) = nopmmr(k, i) * mbar(k, i) / rmassNOp * &
+ pmid(k,i) / (kboltz * tn(k, i))
+ n2p(k, i) = n2pmmr(k, i) * mbar(k, i) / rmassN2p * &
+ pmid(k,i) / (kboltz * tn(k, i))
+ op(k, i) = opmmr(k, i) * mbar(k, i) / rmassOp * &
+ pmid(k,i) / (kboltz * tn(k, i))
+ optm1(k, i) = opmmrtm1(k, i) * mbar(k, i) / rmassOp * &
+ pmid(k,i) / (kboltz * tn(k, i))
+ ne(k, i) = o2p(k,i)+nop(k,i)+n2p(k,i)+op(k,i)
+ end do
+ end do ! k=1,nlev
+
+ call outfld_phys('DPIE_TN',tn)
+ call outfld_phys('DPIE_UN',u* 100._r8)
+ call outfld_phys('DPIE_VN',v* 100._r8)
+ call outfld_phys('DPIE_WN',wn* 100._r8)
+ call outfld_phys('DPIE_ZHT',zht* 100._r8)
+ call outfld_phys('DPIE_ZGI',zgi* 100._r8)
+ call outfld_phys('DPIE_MBAR',mbar)
+
+ call outfld_phys('DPIE_N2',n2mmr)
+ call outfld_phys('DPIE_O2',o2mmr)
+ call outfld_phys('DPIE_O',o1mmr)
+
+ call outfld_phys('DPIE_OMEGA',omega)
+ call outfld_phys('DPIE_OM',-omega/pmid)
+
+ call outfld_phys('DPIE_TE',te)
+ call outfld_phys('DPIE_TI',ti)
+
+ call outfld_phys('DPIE_O2P',o2p)
+ call outfld_phys('DPIE_NOP',nop)
+ call outfld_phys('DPIE_N2P',n2p)
+ call outfld_phys('EDens',ne/1.E6_r8)
+ call outfld_phys('OpDens',op/1.E6_r8)
+
+ !-------------------------------------------------------------------------
+ ! Derive diagnostics nmF2 and hmF2 for output based on TIE-GCM algorithm
+ !-------------------------------------------------------------------------
+ if (hist_fld_active('HMF2') .or. hist_fld_active('NMF2')) then
+ iloop: do i = cols, cole
+ kx = 0
+ kloop: do k= 2, nlev
+ if (ne(k,i) >= ne(k-1,i) .and. ne(k,i) >= ne(k+1,i)) then
+ kx = k
+ exit kloop
+ end if
+ end do kloop
+
+ if (kx==0) then
+ hmf2(i) = fillvalue
+ nmf2(i) = fillvalue
+ exit iloop
+ end if
+
+ height = (/zht(kx+1,i),zht(kx,i),zht(kx-1,i)/)
+ nde = (/ne(kx+1,i),ne(kx,i),ne(kx-1,i)/)
+
+ h12 = height(1)*height(1)
+ h22 = height(2)*height(2)
+ h32 = height(3)*height(3)
+
+ deltx=h12*height(2)+h22*height(3)+h32*height(1)-h32*height(2)-h12*height(3)-h22*height(1)
+
+ atx=nde(1)*height(2)+nde(2)*height(3)+nde(3)*height(1)-height(2)*nde(3)-height(3)*nde(1)-height(1)*nde(2)
+ ax=atx/deltx
+
+ btx=h12*nde(2)+h22*nde(3)+h32*nde(1)-h32*nde(2)-h12*nde(3)-h22*nde(1)
+ bx=btx/deltx
+ ctx=h12*height(2)*nde(3)+h22*height(3)*nde(1)+h32*height(1)*nde(2)-h32*height(2)*nde(1)- &
+ h12*height(3)*nde(2)-h22*height(1)*nde(3)
+ cx=ctx/deltx
+
+ hmf2(i)=-(bx/(2._r8*ax)) * 1.E-03_r8
+ nmf2(i)=-((bx*bx-4._r8*ax*cx)/(4._r8*ax)) * 1.E-06_r8
+
+ end do iloop ! i=cols, cole
+
+ call outfld_phys1d('HMF2',hmf2)
+ call outfld_phys1d('NMF2',nmf2)
+ end if
+
+ call outfld_phys('DPIE_OPMMR', opmmr)
+ call outfld_phys('PED_phys', sigma_ped )
+ call outfld_phys('HAL_phys', sigma_hall )
+
+ if (ionos_edyn_active .or. ionos_oplus_xport) then
+
+ call regrid_phys2geo_3d( zgi,zpot_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( u, un_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( v, vn_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( wn,wn_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( ui, ui_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( vi, vi_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( wi, wi_geo, plev, cols, cole )
+
+ do k = 1, nlev
+ kk = nlev-k+1
+ do j = lat0, lat1
+ do i = lon0, lon1
+ zpot_in(kk,i,j) = zpot_geo(i,j,k) * 100._r8 ! m -> cm
+ halo_un(kk,i,j) = un_geo(i,j,k) * 100._r8 ! m/s -> cm/s
+ halo_vn(kk,i,j) = vn_geo(i,j,k) * 100._r8 ! m/s -> cm/s
+ wn_in(kk,i,j) = wn_geo(i,j,k) * 100._r8 ! m/s -> cm/s
+ ui_in(kk,i,j) = ui_geo(i,j,k) * 100._r8 ! zonal ion drift (m/s -> cm/s)
+ vi_in(kk,i,j) = vi_geo(i,j,k) * 100._r8 ! meridional ion drift (m/s -> cm/s)
+ wi_in(kk,i,j) = wi_geo(i,j,k) * 100._r8 ! vertical ion drift (m/s -> cm/s)
+ end do
+ end do
+ end do
+
+ end if
+
+ !
+ !
+ ! Call electrodynamo (edynamo.F90)
+ ! If using time3d conductances, tell dynamo to *not* do fieldline
+ ! integrations (i.e., do_integrals == false). In this case, edynamo
+ ! conductances zigmxx,rim1,2 from time3d will be set by subroutine
+ ! transform_glbin in time3d module.
+ !
do_integrals = .true.
-!
-! If ionos_edyn_active=false, then empirical ion drifts were passed in from physics,
-! otherwise dynamo calculates them here, and they will be passed to physics.
-!
+ !
+ ! If ionos_edyn_active=false, then empirical ion drifts were passed in from physics,
+ ! otherwise dynamo calculates them here, and they will be passed to physics.
+ !
if (ionos_edyn_active) then
- if (debug.and.masterproc) then
- write(iulog,"('dpie_coupling call dynamo... nstep=',i8)") nstep
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edyn_tn ', &
- MINVAL(edyn_tn(:,i0:i1,j0:j1)), MAXVAL(edyn_tn(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edyn_un ', &
- MINVAL(edyn_un(:,i0:i1,j0:j1)), MAXVAL(edyn_un(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edyn_vn ', &
- MINVAL(edyn_un(:,i0:i1,j0:j1)), MAXVAL(edyn_vn(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edyn_wn ', &
- MINVAL(edyn_wn(:,i0:i1,j0:j1)), MAXVAL(edyn_wn(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edynz_zgi ', &
- MINVAL(edyn_zgi(:,i0:i1,j0:j1)), MAXVAL(edyn_zgi(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edynz_ped ', &
- MINVAL(edyn_ped(:,i0:i1,j0:j1)), MAXVAL(edyn_ped(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edynz_hall ', &
- MINVAL(edyn_hall(:,i0:i1,j0:j1)), MAXVAL(edyn_hall(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edynz_op ', &
- MINVAL(edyn_op(:,i0:i1,j0:j1)), MAXVAL(edyn_op(:,i0:i1,j0:j1))
- write(iulog,*) 'dpie_coupling: before dynamo MIN/MAX edynz_optm1 ', &
- MINVAL(edyn_optm1(:,i0:i1,j0:j1)), MAXVAL(edyn_optm1(:,i0:i1,j0:j1))
+ call t_startf('dpie_ionos_dynamo')
+
+ call calc_adotv( zpot_in(lev0:lev1,lon0:lon1,lat0:lat1), &
+ halo_un(lev0:lev1,lon0:lon1,lat0:lat1), &
+ halo_vn(lev0:lev1,lon0:lon1,lat0:lat1), &
+ wn_in(lev0:lev1,lon0:lon1,lat0:lat1), &
+ adotv1_in, adotv2_in, adota1_in, adota2_in, &
+ a1dta2_in, be3_in, sini_in, lev0, lev1, lon0, lon1, lat0, lat1)
+
+ call regrid_geo2mag_3d( adotv1_in, adotv1_mag )
+ call regrid_geo2mag_3d( adotv2_in, adotv2_mag )
+
+ call outfld_geo('EDYN_ADOTV1', adotv1_in(:,:,lev1:lev0:-1) )
+ call outfld_geo('EDYN_ADOTV2', adotv2_in(:,:,lev1:lev0:-1) )
+
+ call outfld_geo2d( 'EDYN_ADOTA1', adota1_in )
+ call outfld_geo2d( 'EDYN_ADOTA2', adota2_in )
+ call outfld_geo2d( 'EDYN_A1DTA2', a1dta2_in )
+ call outfld_geo2d( 'EDYN_BE3' , be3_in )
+ call outfld_geo2d( 'EDYN_SINI', sini_in )
+
+ call regrid_geo2mag_2d( adota1_in, adota1_mag )
+ call regrid_geo2mag_2d( adota2_in, adota2_mag )
+ call regrid_geo2mag_2d( a1dta2_in, a1dta2_mag )
+ call regrid_geo2mag_2d( be3_in, be3_mag )
+ call regrid_geo2mag_2d( sini_in, sini_mag )
+
+ call outfld_mag2d('ADOTA1_MAG', adota1_mag )
+ call outfld_mag2d('SINI_MAG', sini_mag )
+
+ call regrid_phys2mag_3d( sigma_ped, ped_mag, plev, cols, cole )
+ call regrid_phys2mag_3d( sigma_hall, hal_mag, plev, cols, cole )
+ call regrid_phys2mag_3d( zgi, zpot_mag, plev, cols, cole )
+
+ if (mytid cm
+ ped_mag_in(:,:,mlev0:mlev1) = ped_mag(:,:,mlev1:mlev0:-1)
+ hal_mag_in(:,:,mlev0:mlev1) = hal_mag(:,:,mlev1:mlev0:-1)
+
+ call dynamo( zpot_mag_in, ped_mag_in, hal_mag_in, adotv1_mag, adotv2_mag, adota1_mag, &
+ adota2_mag, a1dta2_mag, be3_mag, sini_mag, &
+ zpot_in, ui_in, vi_in, wi_in, &
+ lon0,lon1, lat0,lat1, lev0,lev1, do_integrals )
endif
- call t_startf('dpie_ionos_dynamo')
- call dynamo(edyn_tn, edyn_un, edyn_vn, edyn_wn, edyn_zgi, &
- edyn_ped, edyn_hall, edyn_ui, edyn_vi, edyn_wi, &
- 1,nlev,i0,i1,j0,j1,do_integrals)
call t_stopf ('dpie_ionos_dynamo')
- if (debug.and.masterproc) then
- write(iulog,"('dpie_coupling after dynamo: nstep=',i8)") nstep
- write(iulog,"(' ui min,max (cm/s)=',2es12.4)") minval(edyn_ui),maxval(edyn_ui)
- write(iulog,"(' vi min,max (cm/s)=',2es12.4)") minval(edyn_vi),maxval(edyn_vi)
- write(iulog,"(' wi min,max (cm/s)=',2es12.4)") minval(edyn_wi),maxval(edyn_wi)
- endif
else
- if (debug.and.masterproc) then
+ if (debug .and. masterproc) then
write(iulog,"('dpie_coupling (dynamo NOT called): nstep=',i8)") nstep
- write(iulog,"(' empirical ExB ui min,max (cm/s)=',2es12.4)") minval(ui),maxval(ui)
- write(iulog,"(' empirical ExB vi min,max (cm/s)=',2es12.4)") minval(vi),maxval(vi)
- write(iulog,"(' empirical ExB wi min,max (cm/s)=',2es12.4)") minval(wi),maxval(wi)
- endif
- endif
-!
-! Call O+ transport routine. Now all inputs to oplus_xport should be in
-! tiegcm-format wrt longitude (-180->180), vertical (bot2top), and units (CGS).
-! (Composition is mmr, ne is cm^3, winds are cm/s)
-! Output op_out and opnm_out will be in cm^3, converted to mmr below.
-!
+ write(iulog,"(' empirical ExB ui min,max (cm/s)=',2es12.4)") &
+ minval(ui),maxval(ui)
+ write(iulog,"(' empirical ExB vi min,max (cm/s)=',2es12.4)") &
+ minval(vi),maxval(vi)
+ write(iulog,"(' empirical ExB wi min,max (cm/s)=',2es12.4)") &
+ minval(wi),maxval(wi)
+ end if
+ end if
+
+ !
+ ! Call O+ transport routine. Now all inputs to oplus_xport should be in
+ ! tiegcm-format wrt longitude (-180->180), vertical (bot2top), and units (CGS).
+ ! (Composition is mmr, ne is cm^3, winds are cm/s)
+ ! Output op_out and opnm_out will be in cm^3, converted to mmr below.
+ !
if (ionos_oplus_xport) then
- pmid_inv(1:nlev) = pref_mid(nlev:1:-1) ! invert ref pressure (Pa) as in tiegcm
+ pmid_inv(1:nlev) = pref_mid(nlev:1:-1) ! invert ref pressure (Pa) as in tiegcm
-!
-! Transport O+ (all args in 'TIEGCM format')
-! Subcycle oplus_xport nspltop times.
-!
- if (debug.and.masterproc) &
- write(iulog,"('dpie_coupling before subcycling oplus_xport: nstep=',i8,' nspltop=',i3)") nstep,nspltop
-
- call t_startf('dpie_halo')
-!$omp parallel do private(i, j, k)
- do k=1,nlev
- do j=j0,j1
- do i=i0,i1
- halo_tn(k,i,j) = edyn_tn(k,i,j)
- halo_te(k,i,j) = edyn_te(k,i,j)
- halo_ti(k,i,j) = edyn_ti(k,i,j)
- halo_un(k,i,j) = edyn_un(k,i,j)
- halo_vn(k,i,j) = edyn_vn(k,i,j)
- halo_om(k,i,j) = edyn_om(k,i,j)
- halo_o2(k,i,j) = edyn_o2(k,i,j)
- halo_o1(k,i,j) = edyn_o1(k,i,j)
- halo_n2(k,i,j) = edyn_n2(k,i,j)
- halo_mbar(k,i,j) = edyn_mbar(k,i,j)
- enddo
- enddo
- enddo
- !
- ! Define halo points on inputs:
- ! WACCM has global longitude values at the poles (j=1,j=nlev)
- ! (they are constant for most, except the winds.)
- !
- ! Set two halo points in lat,lon:
- !
- nfields=10
- allocate(ptrs(nfields),polesign(nfields))
- ptrs(1)%ptr => halo_tn ; ptrs(2)%ptr => halo_te ; ptrs(3)%ptr => halo_ti
- ptrs(4)%ptr => halo_un ; ptrs(5)%ptr => halo_vn ; ptrs(6)%ptr => halo_om
- ptrs(7)%ptr => halo_o2 ; ptrs(8)%ptr => halo_o1 ; ptrs(9)%ptr => halo_n2
- ptrs(10)%ptr => halo_mbar
-
- polesign = 1._r8
- polesign(4:5) = -1._r8 ! un,vn
-
- call mp_geo_halos(ptrs,1,nlev,i0,i1,j0,j1,nfields)
- !
- ! Set latitude halo points over the poles (this does not change the poles).
- ! (the 2nd halo over the poles will not actually be used (assuming lat loops
- ! are lat=2,plat-1), because jp1,jm1 will be the pole itself, and jp2,jm2
- ! will be the first halo over the pole)
- !
- call mp_pole_halos(ptrs,1,nlev,i0,i1,j0,j1,nfields,polesign)
- deallocate(ptrs,polesign)
- call t_stopf('dpie_halo')
-
- call t_startf('dpie_oplus_xport')
- do isplit=1,nspltop
-
- if (isplit > 1) then
- edyn_op = op_out
- edyn_optm1 = opnm_out
- endif
-
- call oplus_xport(halo_tn,halo_te,halo_ti,halo_un,halo_vn,halo_om, &
- edyn_zgi,halo_o2,halo_o1,halo_n2,edyn_op,edyn_optm1, &
- halo_mbar,edyn_ui,edyn_vi,edyn_wi,pmid_inv, &
- op_out,opnm_out, &
- i0,i1,j0,j1,nspltop,isplit)
-
- enddo ! isplit=1,nspltop
- call t_stopf ('dpie_oplus_xport')
-
- if (debug.and.masterproc) then
- write(iulog,"('dpie_coupling after subcycling oplus_xport: nstep=',i8,' nspltop=',i3)") &
- nstep,nspltop
- write(iulog,"(' op_out min,max (cm^3)=',2es12.4)") minval(op_out) ,maxval(op_out)
- write(iulog,"(' opnm_out min,max (cm^3)=',2es12.4)") minval(opnm_out),maxval(opnm_out)
- endif
-
- endif ! ionos_oplus_xport
-!
-! Convert ion drifts and O+ output from TIEGCM to WACCM format:
-!
- if (ionos_edyn_active) then
- nfields = 5 ! ui,vi,wi,op,opnm
- allocate(ptrs(nfields))
- ptrs(1)%ptr => edyn_ui ; ptrs(2)%ptr => edyn_vi ; ptrs(3)%ptr => edyn_wi
- ptrs(4)%ptr => op_out ; ptrs(5)%ptr => opnm_out
- call switch_model_format(ptrs,1,nlev,i0,i1,j0,j1,nfields)
- deallocate(ptrs)
- endif
-!
- if (ionos_oplus_xport) then
- call savefld_waccm(op_out,'OPLUS',nlev,i0,i1,j0,j1) ! cm^3
-!
-! Pass new O+ for current and previous time step back to physics (convert from cm^3 to m^3 and back to mmr).
-!
- do k=1,nlev
- do j=j0,j1
- do i=i0,i1
- opmmr(i,j,k) = op_out(k,i,j)*1.e6_r8 * rmassOp / mbar(i,j,k) * &
- (kboltz * tn(i,j,k)) / pmid(i,k,j)
- op_out(k,i,j) = opmmr(i,j,k) ! for save to waccm hist in mmr
- opmmrtm1(i,j,k) = opnm_out(k,i,j)*1.e6_r8 * rmassOp / mbar(i,j,k) * &
- (kboltz * tn(i,j,k)) / pmid(i,k,j)
- enddo
- enddo
- enddo
+ !
+ ! Transport O+ (all args in 'TIEGCM format')
+ ! Subcycle oplus_xport nspltop times.
+ !
+ if (debug .and. masterproc) then
+ write(iulog,"(a,i8,a,i3)") &
+ 'dpie_coupling before subcycling oplus_xport: nstep = ', &
+ nstep, ' nspltop = ', nspltop
+ end if
+
+ call regrid_phys2geo_3d( tn, tn_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( te, te_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( ti, ti_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( omega, omega_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( o2mmr, o2_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( n2mmr, n2_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( o1mmr, o_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( op, op_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( optm1, optm1_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( pmid, pmid_geo, plev, cols, cole )
+ call regrid_phys2geo_3d( mbar, mbar_geo, plev, cols, cole )
+
+ call t_startf('dpie_halo')
+ if (mytid 1/s
+ halo_o2(kk,i,j) = o2_geo(i,j,k)
+ halo_o1(kk,i,j) = o_geo(i,j,k)
+ halo_n2(kk,i,j) = n2_geo(i,j,k)
+ halo_mbar(kk,i,j) = mbar_geo(i,j,k)
+ op_in(kk,i,j) = op_geo(i,j,k) / 1.e6_r8 ! m^3 -> cm^3
+ optm1_in(kk,i,j) = optm1_geo(i,j,k) / 1.e6_r8 ! m^3 -> cm^3
+ end do
+ end do
+ end do
+ !
+ ! Define halo points on inputs:
+ ! WACCM has global longitude values at the poles (j=1,j=nlev)
+ ! (they are constant for most, except the winds.)
+ !
+ ! Set two halo points in lat,lon:
+ !
+ nfields = 10
+ allocate(ptrs(nfields), polesign(nfields))
+ ptrs(1)%ptr => halo_tn ; ptrs(2)%ptr => halo_te ; ptrs(3)%ptr => halo_ti
+ ptrs(4)%ptr => halo_un ; ptrs(5)%ptr => halo_vn ; ptrs(6)%ptr => halo_om
+ ptrs(7)%ptr => halo_o2 ; ptrs(8)%ptr => halo_o1 ; ptrs(9)%ptr => halo_n2
+ ptrs(10)%ptr => halo_mbar
- endif ! ionos_oplus_xport
-!
-! Convert ion drifts from cm/s to m/s for WACCM physics and history files.
-! real(r8),intent(inout) :: ui(i0:i1,j0:j1,nlev) ! zonal ion drift (edynamo or empirical)
-!
- do k=1,nlev
- do j=j0,j1
- do i=i0,i1
- ui(i,j,k) = edyn_ui(k,i,j)/100._r8
- vi(i,j,k) = edyn_vi(k,i,j)/100._r8
- wi(i,j,k) = edyn_wi(k,i,j)/100._r8
- enddo
- enddo
- enddo
- call savefld_waccm(edyn_ui/100._r8,'WACCM_UI',nlev,i0,i1,j0,j1)
- call savefld_waccm(edyn_vi/100._r8,'WACCM_VI',nlev,i0,i1,j0,j1)
- call savefld_waccm(edyn_wi/100._r8,'WACCM_WI',nlev,i0,i1,j0,j1)
+ polesign = 1._r8
+ polesign(4:5) = -1._r8 ! un,vn
+
+ call mp_geo_halos(ptrs,1,nlev,lon0,lon1,lat0,lat1,nfields)
+ !
+ ! Set latitude halo points over the poles (this does not change the poles).
+ ! (the 2nd halo over the poles will not actually be used (assuming lat loops
+ ! are lat=2,plat-1), because jp1,jm1 will be the pole itself, and jp2,jm2
+ ! will be the first halo over the pole)
+ !
+ call mp_pole_halos(ptrs,1,nlev,lon0,lon1,lat0,lat1,nfields,polesign)
+ deallocate(ptrs,polesign)
+ call t_stopf('dpie_halo')
+
+ call outfld_geokij( 'OPtm1i',optm1_in, lev0,lev1, lon0,lon1, lat0,lat1 )
+
+ call t_startf('dpie_oplus_xport')
+ do isplit = 1, nspltop
+
+ if (isplit > 1) then
+ op_in = op_out
+ optm1_in = optm1_out
+ end if
+
+ call oplus_xport(halo_tn, halo_te, halo_ti, halo_un, halo_vn, halo_om, &
+ zpot_in, halo_o2, halo_o1, halo_n2, op_in, optm1_in, &
+ halo_mbar, ui_in, vi_in, wi_in, pmid_inv, &
+ op_out, optm1_out, &
+ lon0, lon1, lat0, lat1, nspltop, isplit)
+
+ end do ! isplit=1,nspltop
+ call t_stopf ('dpie_oplus_xport')
+ if (debug.and.masterproc) then
+ write(iulog,"('dpie_coupling after subcycling oplus_xport: nstep=',i8,' nspltop=',i3)") &
+ nstep,nspltop
+ write(iulog,"(' op_out min,max (cm^3)=',2es12.4)") minval(op_out) ,maxval(op_out)
+ write(iulog,"(' optm1_out min,max (cm^3)=',2es12.4)") minval(optm1_out),maxval(optm1_out)
+ end if
+
+ call outfld_geokij( 'OPLUS', op_out, lev0,lev1, lon0,lon1, lat0,lat1 )
+ call outfld_geokij( 'OPtm1o',optm1_out, lev0,lev1, lon0,lon1, lat0,lat1 )
+ !
+ ! Pass new O+ for current and previous time step back to physics (convert from cm^3 to m^3 and back to mmr).
+ !
+ do k=1,nlev
+ kk = nlev-k+1
+ do j = lat0,lat1
+ do i = lon0,lon1
+ op_geo(i,j,k) = op_out(kk,i,j)*1.e6_r8 * rmassOp / mbar_geo(i,j,k) * &
+ (kboltz * tn_geo(i,j,k)) / pmid_geo(i,j,k)
+ optm1_geo(i,j,k) = optm1_out(kk,i,j)*1.e6_r8 * rmassOp / mbar_geo(i,j,k) * &
+ (kboltz * tn_geo(i,j,k)) / pmid_geo(i,j,k)
+ ui_geo(i,j,k) = ui_in(kk,i,j)/100._r8 ! cm/s -> m/s
+ vi_geo(i,j,k) = vi_in(kk,i,j)/100._r8 ! cm/s -> m/s
+ wi_geo(i,j,k) = wi_in(kk,i,j)/100._r8 ! cm/s -> m/s
+ end do
+ end do
+ end do
+
+ endif
+
+ call regrid_geo2phys_3d( op_geo, opmmr, plev, cols, cole )
+ call regrid_geo2phys_3d( optm1_geo, opmmrtm1, plev, cols, cole )
+ call regrid_geo2phys_3d( ui_geo, ui, plev, cols, cole )
+ call regrid_geo2phys_3d( vi_geo, vi, plev, cols, cole )
+ call regrid_geo2phys_3d( wi_geo, wi, plev, cols, cole )
+
+ end if ! ionos_oplus_xport
+
+ call outfld_phys('WACCM_UI',ui)
+ call outfld_phys('WACCM_VI',vi)
+ call outfld_phys('WACCM_WI',wi)
+ call outfld_phys('WACCM_OP',opmmr)
call t_stopf('d_pie_coupling')
end subroutine d_pie_coupling
-!-----------------------------------------------------------------------
- subroutine calc_wn(tn,omega,pmid,mbar,grav,wn,i0,i1,j0,j1,nlev)
- use shr_const_mod,only : shr_const_rgas ! Universal gas constant
-!
-! Calculate neutral vertical wind on midpoints (m/s)
-!
-! Inputs:
- integer,intent(in) :: i0,i1,j0,j1,nlev
- real(r8),dimension(i0:i1,j0:j1,nlev),intent(in) :: &
- tn, & ! neutral temperature (deg K)
- omega,& ! pressure velocity (Pa/s)
- mbar ! mean molecular weight
- real(r8),dimension(i0:i1,nlev,j0:j1),intent(in) :: &
- pmid ! pressure at midpoints (Pa)
- real(r8),intent(in) :: grav ! m/s^2
-!
-! Output:
- real(r8),intent(out) :: wn(i0:i1,j0:j1,nlev) ! vertical velocity output (m/s)
-!
-! Local:
- integer :: i,j,k
- real(r8) :: scheight(i0:i1,j0:j1,nlev) ! dimensioned for vectorization
-
- do k=1,nlev
- do j=j0,j1
- do i=i0,i1
- scheight(i,j,k) = shr_const_rgas*tn(i,j,k)/(mbar(i,j,k)*grav)
- wn(i,j,k) = -omega(i,j,k)*scheight(i,j,k)/pmid(i,k,j)
- enddo
- enddo
- enddo
+ !-----------------------------------------------------------------------
+ subroutine calc_wn(tn,omega,pmid,mbar,grav,wn,cols,cole,nlev)
+ use shr_const_mod,only : shr_const_rgas ! Universal gas constant
+ !
+ ! Calculate neutral vertical wind on midpoints (m/s)
+ !
+ ! Inputs:
+ integer,intent(in) :: cols, cole, nlev
+ real(r8),dimension(nlev, cols:cole),intent(in) :: &
+ tn, & ! neutral temperature (deg K)
+ omega,& ! pressure velocity (Pa/s)
+ mbar ! mean molecular weight
+ real(r8),dimension(nlev,cols:cole),intent(in) :: &
+ pmid ! pressure at midpoints (Pa)
+ real(r8),intent(in) :: grav ! m/s^2
+ !
+ ! Output:
+ real(r8),intent(out) :: wn(nlev, cols:cole) ! vertical velocity output (m/s)
+ !
+ ! Local:
+ integer :: i,k
+ real(r8) :: scheight(nlev, cols:cole) ! dimensioned for vectorization
+
+ do i = cols, cole
+ do k = 1, nlev
+ scheight(k,i) = shr_const_rgas*tn(k,i)/(mbar(k,i)*grav)
+ wn(k,i) = -omega(k,i)*scheight(k,i)/pmid(k,i)
+ end do
+ end do
end subroutine calc_wn
-!-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
subroutine calc_pfrac(sunlon,pfrac)
-!
-! Calculate pfrac fractional presence of dynamo equation using critical
-! convection colatitudes crit(2).
-!
+ !
+ ! Calculate pfrac fractional presence of dynamo equation using critical
+ ! convection colatitudes crit(2).
+ !
use edyn_maggrid ,only: nmlonp1,ylonm,ylatm
use edyn_solve ,only: nmlat0
use aurora_params ,only: offc, dskofc, theta0, aurora_params_set
implicit none
-!
-! Args:
+ !
+ ! Args:
real(r8),intent(in) :: sunlon ! Sun's longitude in dipole coordinates
-!
-! Output: fractional presence of dynamo equation using critical colatitudes
-!
+ !
+ ! Output: fractional presence of dynamo equation using critical colatitudes
+ !
real(r8),intent(out) :: pfrac(nmlonp1,nmlat0) ! NH fraction of potential
-!
-! Local:
+ !
+ ! Local:
integer :: j,i
real(r8),dimension(nmlonp1,nmlat0) :: colatc
real(r8) :: sinlat,coslat,aslonc,ofdc,cosofc,sinofc,crit1deg
if (.not. crit_user_set) then
- if (amie_period) then
+ if (prescribed_period) then
crit(:) = amie_default_crit(:)*dtr
else
crit1deg = max(15._r8,0.5_r8*(theta0(1)+theta0(2))*rtd + 5._r8)
@@ -885,108 +816,228 @@ subroutine calc_pfrac(sunlon,pfrac)
! Critical colatitudes:
crit(1) = crit1deg*dtr
crit(2) = crit(1) + 15._r8*dtr
- endif
- endif
+ end if
+ end if
if (.not.aurora_params_set) then
offc(:) = 1._r8*dtr
dskofc(:) = 0._r8
- endif
+ end if
-!
-! offc(2), dskofc(2) are for northern hemisphere aurora
-!
- ofdc = sqrt(offc(2)**2+dskofc(2)**2)
+ !
+ ! offc(2), dskofc(2) are for northern hemisphere aurora
+ !
+ ofdc = sqrt(offc(2)**2 + dskofc(2)**2)
cosofc = cos(ofdc)
sinofc = sin(ofdc)
aslonc = asin(dskofc(2)/ofdc)
-!
-! Define colatc with northern convection circle coordinates
-!
+ !
+ ! Define colatc with northern convection circle coordinates
+ !
do j=1,nmlat0
- sinlat = sin(abs(ylatm(j+nmlat0-1)))
- coslat = cos( ylatm(j+nmlat0-1))
- do i=1,nmlonp1
- colatc(i,j) = cos(ylonm(i)-sunlon+aslonc)
- colatc(i,j) = acos(cosofc*sinlat-sinofc*coslat*colatc(i,j))
- enddo ! i=1,nmlonp1
-!
-! Calculate fractional presence of dynamo equation at each northern
-! hemisphere geomagnetic grid point. Output in pfrac(nmlonp1,nmlat0)
-!
- do i=1,nmlonp1
- pfrac(i,j) = (colatc(i,j)-crit(1))/(crit(2)-crit(1))
- if (pfrac(i,j) < 0._r8) pfrac(i,j) = 0._r8
- if (pfrac(i,j) >= 1._r8) pfrac(i,j) = 1._r8
- enddo ! i=1,nmlonp1
- enddo ! j=1,nmlat0
-!
+ sinlat = sin(abs(ylatm(j+nmlat0-1)))
+ coslat = cos( ylatm(j+nmlat0-1))
+ do i=1,nmlonp1
+ colatc(i,j) = cos(ylonm(i)-sunlon+aslonc)
+ colatc(i,j) = acos(cosofc*sinlat-sinofc*coslat*colatc(i,j))
+ end do ! i=1,nmlonp1
+ !
+ ! Calculate fractional presence of dynamo equation at each northern
+ ! hemisphere geomagnetic grid point. Output in pfrac(nmlonp1,nmlat0)
+ !
+ do i = 1 , nmlonp1
+ pfrac(i,j) = (colatc(i,j)-crit(1)) / (crit(2)-crit(1))
+ if (pfrac(i,j) < 0._r8) then
+ pfrac(i,j) = 0._r8
+ end if
+ if (pfrac(i,j) >= 1._r8) then
+ pfrac(i,j) = 1._r8
+ end if
+ end do ! i=1,nmlonp1
+ end do ! j=1,nmlat0
+ !
end subroutine calc_pfrac
-!-----------------------------------------------------------------------
- subroutine sunloc(iday,secs,sunlons)
-!
-! Given day of year and ut, return sun's longitudes in dipole coordinates
-! in sunlons(nlat)
-!
- use getapex ,only: alonm ! (nlonp1,0:nlatp1)
- use edyn_geogrid ,only: nlon,nlat
- use edyn_params ,only: pi
-!
-! Args:
+ !-----------------------------------------------------------------------
+ subroutine sunloc(iday, secs, sunlon)
+ !
+ ! Given day of year and ut, return sun's longitude in dipole coordinates
+ ! in sunlon
+ !
+ use getapex, only: alonm ! (nlonp1,0:nlatp1)
+ use edyn_geogrid, only: nlon, nlat, dphi, dlamda
+ use edyn_params, only: pi
+ !
+ ! Args:
integer,intent(in) :: iday ! day of year
real(r8),intent(in) :: secs ! ut in seconds
- real(r8),intent(out) :: sunlons(nlat) ! output
-!
-! Local:
- integer :: j,i,ii,isun,jsun
- real(r8) :: glats,glons,pisun,pjsun,sndlons,csdlons
- real(r8) :: dphi,dlamda
- real(r8) :: rlonm(nlon+4,nlat) ! (nlon+4,nlat)
- real(r8) :: r8_nlat, r8_nlon
+ real(r8),intent(out) :: sunlon ! output
+ !
+ ! Local:
+ integer :: j, i, ii, isun, jsun
+ real(r8) :: glats, glons, pisun, pjsun, sndlons, csdlons
+ real(r8) :: rlonm(nlon+4, nlat) ! (nlon+4,nlat)
real(r8) :: r8_isun, r8_jsun
-!
-! Sun's geographic coordinates:
- r8_nlat = dble(nlat)
- r8_nlon = dble(nlon)
- glats = asin(.398749_r8*sin(2._r8*pi*(iday-80)/365._r8))
- glons = pi*(1._r8-2._r8*secs/86400._r8)
- dphi = pi/r8_nlat
- dlamda = 2._r8*pi/r8_nlon
-
- do j=1,nlat
- do i=1,nlon
- ii = i+2
- rlonm(ii,j) = alonm(i,j)
- enddo
- do i=1,2
- rlonm(i,j) = rlonm(i+nlon,j)
- rlonm(i+nlon+2,j) = rlonm(i+2,j)
- enddo
- enddo
-
- pisun = (glons+pi)/dlamda+1._r8
- pjsun = (glats+.5_r8*(pi-dphi))/dphi+1._r8
+ !
+ ! Sun's geographic coordinates:
+ glats = asin(.398749_r8*sin(2._r8 * pi * real(iday-80, r8) / 365._r8))
+ glons = pi * (1._r8 - (2._r8 * secs / 86400._r8))
+
+ do j = 1, nlat
+ do i = 1, nlon
+ ii = i + 2
+ rlonm(ii, j) = alonm(i, j)
+ end do
+ do i = 1, 2
+ rlonm(i, j) = rlonm(i+nlon, j)
+ rlonm(i+nlon+2, j) = rlonm(i+2, j)
+ end do
+ end do
+
+ pisun = ((glons + pi) / dlamda) + 1._r8
+ pjsun = ((glats + (.5_r8 * (pi - dphi))) / dphi) + 1._r8
isun = int(pisun)
jsun = int(pjsun)
- r8_isun = dble(isun)
- r8_jsun = dble(jsun)
- pisun = pisun-r8_isun
- pjsun = pjsun-r8_jsun
-
- sndlons = (1._r8-pisun)*(1._r8-pjsun)*sin(rlonm(isun+2,jsun))+ &
- pisun*(1._r8-pjsun) *sin(rlonm(isun+3,jsun))+ &
- pisun*pjsun *sin(rlonm(isun+3,jsun+1))+ &
- (1._r8-pisun)*pjsun *sin(rlonm(isun+2,jsun+1))
- csdlons = (1._r8-pisun)*(1._r8-pjsun)*cos(rlonm(isun+2,jsun))+ &
- pisun*(1._r8-pjsun) *cos(rlonm(isun+3,jsun))+ &
- pisun*pjsun *cos(rlonm(isun+3,jsun+1))+ &
- (1._r8-pisun)*pjsun *cos(rlonm(isun+2,jsun+1))
- sunlons(1) = atan2(sndlons,csdlons)
- do j = 2,nlat
- sunlons(j) = sunlons(1)
- enddo
+ r8_isun = real(isun, r8)
+ r8_jsun = real(jsun, r8)
+ pisun = pisun - r8_isun
+ pjsun = pjsun - r8_jsun
+
+ sndlons = (1._r8-pisun) * (1._r8-pjsun) * sin(rlonm(isun+2, jsun)) + &
+ pisun*(1._r8-pjsun) * sin(rlonm(isun+3,jsun)) + &
+ pisun*pjsun * sin(rlonm(isun+3,jsun+1)) + &
+ (1._r8-pisun)*pjsun * sin(rlonm(isun+2,jsun+1))
+ csdlons = (1._r8-pisun) * (1._r8-pjsun) * cos(rlonm(isun+2,jsun)) + &
+ pisun*(1._r8-pjsun) * cos(rlonm(isun+3,jsun))+ &
+ pisun*pjsun * cos(rlonm(isun+3,jsun+1))+ &
+ (1._r8-pisun)*pjsun * cos(rlonm(isun+2,jsun+1))
+ sunlon = atan2(sndlons, csdlons)
end subroutine sunloc
-!-----------------------------------------------------------------------
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine outfld_phys1d( fldname, array )
+ use ppgrid, only: pcols, begchunk, endchunk
+ use phys_grid,only: get_ncols_p
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(:)
+
+ integer :: i,j, lchnk,ncol
+ real(r8) :: tmparr(pcols)
+
+ if (hist_fld_active(fldname)) then
+ j = 0
+ do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+ do i = 1, ncol
+ j = j + 1
+ tmparr(i) = array(j)
+ enddo
+ call outfld(fldname,tmparr(:ncol),ncol,lchnk)
+ enddo
+ end if
+
+ end subroutine outfld_phys1d
+ !-----------------------------------------------------------------------
+ subroutine outfld_phys( fldname, array )
+ use ppgrid, only: pcols, pver, begchunk, endchunk
+ use phys_grid,only: get_ncols_p
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(:,:)
+
+ integer :: i,j,k, lchnk,ncol
+ real(r8) :: tmparr(pcols, pver)
+
+ if (hist_fld_active(fldname)) then
+ j = 0
+ do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+ do i = 1, ncol
+ j = j + 1
+ do k = 1, pver
+ tmparr(i,k) = array(k,j)
+ enddo
+ enddo
+ call outfld(fldname,tmparr(:ncol,:),ncol,lchnk)
+ enddo
+ end if
+
+ end subroutine outfld_phys
+ !-----------------------------------------------------------------------
+ subroutine outfld_geokij( name, array, ilev0,ilev1, ilon0,ilon1, ilat0,ilat1 )
+
+ character(len=*), intent(in) :: name
+ integer, intent(in) :: ilev0,ilev1, ilon0,ilon1, ilat0,ilat1
+ real(r8), intent(in) :: array(ilev0:ilev1, ilon0:ilon1, ilat0:ilat1)
+
+ integer :: j,k
+ real(r8) :: tmpout(ilon0:ilon1,ilev0:ilev1)
+
+ do j = ilat0,ilat1
+ do k = ilev0,ilev1
+ tmpout(ilon0:ilon1,k) = array(ilev1-k+1,ilon0:ilon1,j)
+ end do
+ call outfld( name, tmpout, ilon1-ilon0+1, j )
+ end do
+ end subroutine outfld_geokij
+ !-----------------------------------------------------------------------
+ subroutine outfld_geo( fldname, array )
+ use edyn_mpi, only: lon0, lon1, lat0, lat1, lev0, lev1
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(lon0:lon1,lat0:lat1,lev0:lev1)
+
+ integer :: j
+
+ do j = lat0,lat1
+ call outfld( fldname, array(lon0:lon1,j,lev0:lev1), lon1-lon0+1, j )
+ end do
+
+ end subroutine outfld_geo
+ !-----------------------------------------------------------------------
+ subroutine outfld_geo2d( fldname, array )
+ use edyn_mpi, only: lon0, lon1, lat0, lat1
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(lon0:lon1,lat0:lat1)
+
+ integer :: j
+
+ do j = lat0,lat1
+ call outfld( fldname, array(lon0:lon1,j), lon1-lon0+1, j )
+ end do
+
+ end subroutine outfld_geo2d
+ !-----------------------------------------------------------------------
+ subroutine outfld_mag( fldname, array )
+ use edyn_mpi, only: omlon1, mlon0, mlon1, mlat0, mlat1, mlev0, mlev1
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1)
+
+ integer :: j
+
+ do j = mlat0,mlat1
+ call outfld( fldname, array(mlon0:omlon1,j,mlev0:mlev1),omlon1-mlon0+1,j)
+ end do
+
+ end subroutine outfld_mag
+ !-----------------------------------------------------------------------
+ subroutine outfld_mag2d( fldname, array )
+ use edyn_mpi, only: mlon0, mlon1, mlat0, mlat1
+
+ character(len=*), intent(in) :: fldname
+ real(r8), intent(in) :: array(mlon0:mlon1,mlat0:mlat1)
+
+ integer :: j
+
+ do j = mlat0,mlat1
+ call outfld( fldname, array(mlon0:mlon1,j), mlon1-mlon0+1, j )
+ end do
+
+ end subroutine outfld_mag2d
+
end module dpie_coupling
diff --git a/src/ionosphere/waccmx/edyn_esmf.F90 b/src/ionosphere/waccmx/edyn_esmf.F90
index ae0f35d784..dfcde6a3bf 100644
--- a/src/ionosphere/waccmx/edyn_esmf.F90
+++ b/src/ionosphere/waccmx/edyn_esmf.F90
@@ -1,1140 +1,1288 @@
module edyn_esmf
-#ifdef WACCMX_EDYN_ESMF
-
- use esmf ,only: ESMF_Grid, ESMF_Field, ESMF_RouteHandle, & ! ESMF library module
- ESMF_SUCCESS, ESMF_KIND_R8, ESMF_KIND_I4, &
- ESMF_FieldGet, ESMF_STAGGERLOC_CENTER, ESMF_FieldRegridStore, &
- ESMF_REGRIDMETHOD_BILINEAR, ESMF_POLEMETHOD_ALLAVG, ESMF_FieldSMMStore, &
- ESMF_GridCreate1PeriDim, ESMF_INDEX_GLOBAL, ESMF_GridAddCoord, ESMF_GridGetCoord, &
- ESMF_TYPEKIND_R8, ESMF_FieldCreate, ESMF_Array, ESMF_ArraySpec, ESMF_DistGrid, &
- ESMF_GridGet, ESMF_ArraySpecSet, ESMF_ArrayCreate, ESMF_FieldGet, ESMF_FieldSMM, &
- ESMF_TERMORDER_SRCSEQ
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- use cam_abortutils ,only: endrun
- use edyn_mpi ,only: ntask,ntaski,ntaskj,tasks,lon0,lon1,lat0,lat1,&
- nmagtaski,nmagtaskj,mlon0,mlon1,mlat0,mlat1
- use getapex ,only: gdlatdeg,gdlondeg
- use edyn_geogrid ,only: nlon,nlat,nlev,glon,glat,jspole,jnpole ! dynamically allocated geo grid
- use edyn_maggrid ,only: nmlev,gmlat,gmlon
-
-#endif
-
- implicit none
- save
- private
-
- public :: edyn_esmf_update
-
-#ifdef WACCMX_EDYN_ESMF
-
- public :: nf_3dgeo,f_3dgeo
- public :: edyn_esmf_update_flag
- public :: edyn_esmf_init, edyn_esmf_final, edyn_esmf_update_step, edyn_esmf_regrid
- public :: edyn_esmf_get_2dfield, edyn_esmf_set2d_geo, edyn_esmf_get_3dfield, edyn_esmf_set3d_mag, edyn_esmf_set3d_geo
- public :: edyn_esmf_set2d_mag
-
- public :: mag_be3, mag_adota1,mag_adota2,mag_a1dta2,mag_sini,mag_adotv2,mag_adotv1,mag_scht
- public :: mag_efx, mag_kev
- public :: mag_zpot,mag_hal,mag_ped, mag_phi3d
- public :: geo_be3,geo_adotv2,geo_a1dta2,geo_adota2,geo_adota1,geo_adotv1,geo_sini,geo_scht,geo_zpot
- public :: geo_efx, geo_kev
- public :: geo_hal, geo_ped, mag_des_grid, geo_src_grid, geo_phi3d, geo_emz3d, geo_elam3d, geo_ephi3d
- public :: mag_emz3d, mag_elam3d, mag_ephi3d
-
- type(ESMF_Grid) :: &
- geo_src_grid, mag_src_grid, & ! source grids (will not have periodic pts)
- geo_des_grid, mag_des_grid ! destination grids (will have periodic pts)
-!
-! 3d (i,j,k) ESMF Fields on geographic subdomains:
-!
- type(ESMF_Field) :: & ! 3d ESMF fields on geographic grid
- geo_ped, & ! pedersen conductivity
- geo_hal, & ! hall conductivity
- geo_zpot, & ! geopotential height (cm)
- geo_scht, & ! scale height (cm)
- geo_adotv1, & ! ue1 (m/s)
- geo_adotv2 ! ue2 (m/s)
- integer,parameter :: nf_3dgeo=6 ! number of 3d fields on geographic grid
- type(ESMF_Field) :: f_3dgeo(nf_3dgeo) ! fields on 3d geo grid (could be bundled?)
-!
-! 2d (i,j) ESMF fields on geographic subdomains:
-!
- type(ESMF_Field) :: & ! 2d ESMF fields on geographic grid
- geo_sini, & ! sin(I_m)
- geo_adota1, & ! d(1)**2/D
- geo_adota2, & ! d(2)**2/D
- geo_a1dta2, & ! (d(1) dot d(2)) /D
- geo_be3, & ! mag field strength (T)
- geo_efx, geo_kev ! amie fields
-!
-! 3d (i,j,k) ESMF fields regridded to magnetic subdomains:
-!
- type(ESMF_Field) :: & ! 3d ESMF fields on geomagnetic grid
- mag_ped, & ! pedersen conductivity
- mag_hal, & ! hall conductivity
- mag_zpot, & ! geopotential height (cm)
- mag_scht, & ! scale height (cm)
- mag_adotv1, & ! ue1 (m/s)
- mag_adotv2 ! ue2 (m/s)
-!
-! 2d (i,j) ESMF fields on magnetic subdomains:
-!
- type(ESMF_Field) :: & ! 2d fields on geomagnetic grid
- mag_sini, & ! sin(I_m)
- mag_adota1, & ! d(1)**2/D
- mag_adota2, & ! d(2)**2/D
- mag_a1dta2, & ! (d(1) dot d(2)) /D
- mag_be3, & ! mag field strength (T)
- mag_efx, mag_kev ! amie fields
-!
-! 3d electric potential and electric field for mag to geo regridding:
-!
- type(ESMF_Field) :: mag_phi3d,mag_ephi3d,mag_elam3d,mag_emz3d
- type(ESMF_Field) :: geo_phi3d,geo_ephi3d,geo_elam3d,geo_emz3d
-
- type(ESMF_RouteHandle) :: & ! ESMF route handles for regridding
- routehandle_geo2mag, & ! for geo to mag regrid
- routehandle_mag2geo, & ! for mag to geo regrid
- routehandle_geo2mag_2d, & ! for 2d geo to mag
- routehandle_mag2geo_2d ! for 2d mag to geo for AMIE fields
-!
- real(r8),allocatable :: unitv(:)
-!
- private routehandle_geo2mag, routehandle_mag2geo,&
- routehandle_geo2mag_2d
-
- logical, protected :: edyn_esmf_update_step = .true.
- logical :: debug=.false. ! set true for prints to stdout at each call
-#endif
-
- contains
-#ifdef WACCMX_EDYN_ESMF
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_init( mpi_comm )
-
- integer, intent(in) :: mpi_comm
-
- end subroutine edyn_esmf_init
-
-!-----------------------------------------------------------------------
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_final
-
- end subroutine edyn_esmf_final
-
-#endif
-
-!-----------------------------------------------------------------------
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_update
- use getapex, only: get_apex,magfield, alonm
- use mo_apex, only: geomag_year_updated
-
-#ifdef WACCMX_EDYN_ESMF
-! Create ESMF grids for geographic and magnetic, and create ESMF fields
-! as necessary on both grids. Define the 2d coordinates for each grid,
-! and save an ESMF routehandles for geo2mag and mag2geo regridding.
-!
-! Local:
- integer :: rc ! return code for ESMF calls
- real(ESMF_KIND_R8),pointer :: fptr(:,:,:)
- integer :: lbnd_destgeo(3),ubnd_destgeo(3) ! 3d bounds of destination geo grid
- integer :: lbnd_destmag(3),ubnd_destmag(3) ! 3d bounds of destination mag grid
- integer :: lbnd_srcgeo(3),ubnd_srcgeo(3) ! 3d bounds of source geo grid
- integer :: lbnd_srcmag(3),ubnd_srcmag(3) ! 3d bounds of source mag grid
- integer(ESMF_KIND_I4),pointer :: factorIndexList(:,:)
- real(ESMF_KIND_R8),pointer :: factorList(:)
- integer :: smm_srctermproc, smm_pipelinedep
-#endif
-
- if (.not.geomag_year_updated .and. allocated(alonm)) return
-!
-! Get apex coordinates.
-!
- call get_apex( ) ! get apex coordinates
- call magfield ! calculate magnetic field parameters
-
-#ifdef WACCMX_EDYN_ESMF
-
- smm_srctermproc = 0
- smm_pipelinedep = 16
-!
-! Set unit vector (this routine called once per run unless crossing year boundary):
-! Handle year boundary by checking if field is allocated
-!
- if (.not.allocated(unitv)) allocate(unitv(nlon))
- unitv(:) = 1._r8
-!
-! Make magnetic and geographic grids for geo2mag regridding:
-!
- call create_geo_grid(geo_src_grid,'src') ! geo source grid
- call create_mag_grid(mag_des_grid,'des') ! mag destination grid
-!
-! Make grids for mag2geo regridding:
-!
- call create_mag_grid(mag_src_grid,'src')
- call create_geo_grid(geo_des_grid,'des')
-!
-! Create empty fields on geographic grid that will be transformed to
-! the magnetic grid and passed as input to the dynamo. This does not
-! assign any values.
-!
-! 3d fields on source geo grid (these exclude periodic points):
-!
- call edyn_esmf_create_geofield(geo_ped,geo_src_grid, 'PED ',nlev)
- call edyn_esmf_create_geofield(geo_hal ,geo_src_grid, 'HAL ',nlev)
- call edyn_esmf_create_geofield(geo_zpot,geo_src_grid, 'ZPOT ',nlev)
- call edyn_esmf_create_geofield(geo_scht,geo_src_grid, 'SCHT ',nlev)
- call edyn_esmf_create_geofield(geo_adotv1,geo_src_grid,'ADOTV1 ',nlev)
- call edyn_esmf_create_geofield(geo_adotv2,geo_src_grid,'ADOTV2 ',nlev)
-!
-! Get 3d bounds of source geo field:
-!
- call ESMF_FieldGet(geo_ped,localDe=0,farrayPtr=fptr, &
- computationalLBound=lbnd_srcgeo, &
- computationalUBound=ubnd_srcgeo,rc=rc)
-
- if (debug) then
- write(iulog,"('Bounds of source geo field: lbnd_srcgeo=',3i4,' ubnd_srcgeo=',3i4,' glon=',2f9.3)") &
- lbnd_srcgeo,ubnd_srcgeo
- endif
-!
-! 2d fields on source geo grid (these exclude periodic points):
-!
- call edyn_esmf_create_geofield(geo_sini ,geo_src_grid,'SINI ',0)
- call edyn_esmf_create_geofield(geo_adota1,geo_src_grid,'ADOTA1 ',0)
- call edyn_esmf_create_geofield(geo_adota2,geo_src_grid,'ADOTA2 ',0)
- call edyn_esmf_create_geofield(geo_a1dta2,geo_src_grid,'A1DTA2 ',0)
- call edyn_esmf_create_geofield(geo_be3 ,geo_src_grid,'BE3 ',0)
-!
-! 3d fields on destination mag grid (will include periodic point):
-!
- call edyn_esmf_create_magfield(mag_ped ,mag_des_grid, 'PED ',nmlev)
- call edyn_esmf_create_magfield(mag_hal ,mag_des_grid, 'HAL ',nmlev)
- call edyn_esmf_create_magfield(mag_zpot,mag_des_grid, 'ZPOT ',nmlev)
- call edyn_esmf_create_magfield(mag_scht,mag_des_grid, 'SCHT ',nmlev)
- call edyn_esmf_create_magfield(mag_adotv1,mag_des_grid,'ADOTV1 ',nmlev)
- call edyn_esmf_create_magfield(mag_adotv2,mag_des_grid,'ADOTV2 ',nmlev)
-!
-! Get 3d bounds of destination mag field:
-!
- call ESMF_FieldGet(mag_ped,localDe=0,farrayPtr=fptr, &
- computationalLBound=lbnd_destmag, &
- computationalUBound=ubnd_destmag,rc=rc)
-
- if (debug) then
- write(iulog,"('Bounds of destination mag field: lbnd_destmag=',3i4,' ubnd_destmag=',3i4,' gmlon=',2f9.3)") &
- lbnd_destmag,ubnd_destmag
- write(iulog,"('esmf_init: lon bnd_destmag =',2i4,' gmlon=',2f9.3)") &
- lbnd_destmag(1),ubnd_destmag(1),gmlon(lbnd_destmag(1)),gmlon(ubnd_destmag(1))
- write(iulog,"('esmf_init: lat bnd_destmag =',2i4,' gmlat=',2f9.3)") &
- lbnd_destmag(2),ubnd_destmag(2),gmlat(lbnd_destmag(2)),gmlat(ubnd_destmag(2))
- endif
-!
-! 2d fields on destination mag grid (will include periodic point):
-!
- call edyn_esmf_create_magfield(mag_sini ,mag_des_grid,'SINI ',0)
- call edyn_esmf_create_magfield(mag_adota1,mag_des_grid,'ADOTA1 ',0)
- call edyn_esmf_create_magfield(mag_adota2,mag_des_grid,'ADOTA2 ',0)
- call edyn_esmf_create_magfield(mag_a1dta2,mag_des_grid,'A1DTA2 ',0)
- call edyn_esmf_create_magfield(mag_be3 ,mag_des_grid,'BE3 ',0)
-!
-! 3d fields on source mag grid for mag2geo:
-!
- call edyn_esmf_create_magfield(mag_phi3d ,mag_src_grid,'PHIM3D ',nmlev)
- call edyn_esmf_create_magfield(mag_ephi3d,mag_src_grid,'EPHI3D ',nmlev)
- call edyn_esmf_create_magfield(mag_elam3d,mag_src_grid,'ELAM3D ',nmlev)
- call edyn_esmf_create_magfield(mag_emz3d ,mag_src_grid,'EMZ3D ',nmlev)
- call edyn_esmf_create_magfield(mag_efx ,mag_src_grid,'MEFXAMIE',0)
- call edyn_esmf_create_magfield(mag_kev ,mag_src_grid,'MKEVAMIE',0)
-!
-! 3d fields on destination geo grid for mag2geo:
-!
- call edyn_esmf_create_geofield(geo_phi3d ,geo_des_grid,'PHIG3D ',nlev)
- call edyn_esmf_create_geofield(geo_ephi3d,geo_des_grid,'EPHI3D ',nlev)
- call edyn_esmf_create_geofield(geo_elam3d,geo_des_grid,'ELAM3D ',nlev)
- call edyn_esmf_create_geofield(geo_emz3d ,geo_des_grid,'EMZ3D ',nlev)
- call edyn_esmf_create_geofield(geo_efx ,geo_des_grid,'GEFXAMIE',0)
- call edyn_esmf_create_geofield(geo_kev ,geo_des_grid,'GKEVAMIE',0)
-!
-! Get 3d bounds of source mag field:
- call ESMF_FieldGet(mag_phi3d,localDe=0,farrayPtr=fptr,&
- computationalLBound=lbnd_srcmag, &
- computationalUBound=ubnd_srcmag,rc=rc)
-
- if (debug) then
- write(iulog,"('esmf_init: lon bnd_srcmag =',2i4,' gmlon=',2f9.3)") &
- lbnd_srcmag(1),ubnd_srcmag(1)
- write(iulog,"('esmf_init: lat bnd_srcmag =',2i4,' gmlat=',2f9.3)") &
- lbnd_srcmag(2),ubnd_srcmag(2)
- endif
-!
-! Get 3d bounds of destination geo field:
-!
- call ESMF_FieldGet(geo_phi3d,localDe=0,farrayPtr=fptr,&
- computationalLBound=lbnd_destgeo, &
- computationalUBound=ubnd_destgeo,rc=rc)
-
- if (debug) then
- write(iulog,"('esmf_init: lon bnd_destgeo=',2i4,' glon=',2f9.3)") &
- lbnd_destgeo(1),ubnd_destgeo(1)
- write(iulog,"('esmf_init: lat bnd_destgeo=',2i4,' glat=',2f9.3)") &
- lbnd_destgeo(2),ubnd_destgeo(2)
- endif
-!
-! Save route handles for grid transformations in both directions
-! geo2mag and mag2geo. FieldRegridStore needs to be called only
-! once for each transformation before the timestep loop (src and
-! dest fields are still required, so just use ped here). Once inside
-! the timestep loop, the same routehandle can be used for all fields
-! that are regridded in the given direction.
-!
-! These calls will leave *.vtk info files in execdir:
-! call ESMF_GridWriteVTK(geo_src_grid, &
-! staggerloc=ESMF_STAGGERLOC_CENTER, filename="geoGrid",rc=rc)
-! call ESMF_GridWriteVTK(mag_des_grid, &
-! staggerloc=ESMF_STAGGERLOC_CENTER, filename="magGrid",rc=rc)
-!
-! Save route handle and get esmf indices and weights for geo2mag:
-!
- call ESMF_FieldRegridStore(srcField=geo_ped,dstField=mag_ped, &
- regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
- polemethod=ESMF_POLEMETHOD_ALLAVG, &
- routeHandle=routehandle_geo2mag,factorIndexList=factorIndexList, &
- factorList=factorList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(a,a,i4)") '>>> edyn_esmf_update: error return from ', &
- 'ESMF_FieldRegridStore for 3d geo2mag: rc=',rc
- call endrun('edyn_esmf_update: ESMF_FieldRegridStore ped')
- endif
-!
-! Store route handle for geo2mag 3d fields.
-!
- call ESMF_FieldSMMStore(geo_ped,mag_ped,routehandle_geo2mag, &
- factorList,factorIndexList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> edyn_esmf_update: error return from ESMF_FieldSMMStore for ',&
- '3d geo2mag: rc=',rc
- call endrun('edyn_esmf_update: ESMF_FieldSMMStore for 3d geo2mag ped')
- endif
-!
-! Store route handle geo2mag 2d fields:
-!
- call ESMF_FieldSMMStore(geo_sini,mag_sini,routehandle_geo2mag_2d, &
- factorList,factorIndexList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> edyn_esmf_update: error return from ESMF_FieldSMMStore',&
- ' for 2d geo2mag: rc=',rc
- call endrun('edyn_esmf_update: ESMF_FieldSMMStore for 2d geo2mag sini')
- endif
-!
-! Save route handle and get esmf indices and weights for mag2geo:
-! (this overwrites factorIndexList and factorList from geo2mag call above)
-!
-! These calls will leave *.vtk info files in execdir:
-! call ESMF_GridWriteVTK(mag_src_grid, &
-! staggerloc=ESMF_STAGGERLOC_CENTER, filename="magSrcGrid",rc=rc)
-! call ESMF_GridWriteVTK(geo_des_grid, &
-! staggerloc=ESMF_STAGGERLOC_CENTER, filename="geoDesGrid",rc=rc)
-
-! Save route handle and get esmf indices and weights for mag2geo:
-!
- call ESMF_FieldRegridStore(srcField=mag_phi3d,dstField=geo_phi3d, &
- regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
- polemethod=ESMF_POLEMETHOD_ALLAVG, &
- routeHandle=routehandle_mag2geo,factorIndexList=factorIndexList,&
- factorList=factorList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> edyn_esmf_update: error return from ',&
- 'ESMF_FieldRegridStore for 3d mag2geo: rc=',rc
- call endrun('edyn_esmf_update: ESMF_FieldRegridStore for 3d mag2geo phi3d')
- endif
-!
-! mag2geo 3d fields:
-!
- call ESMF_FieldSMMStore(mag_phi3d,geo_phi3d,routehandle_mag2geo,&
- factorList,factorIndexList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> edyn_esmf_update: error return from ESMF_FieldSMMStore ',&
- 'for 3d mag2geo: rc=',rc
- call endrun('edyn_esmf_update: ESMF_FieldSMMStore for 3d geo2mag phi3d')
- endif
+ use shr_kind_mod, only: r8 => shr_kind_r8
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+ use infnan, only: nan, assignment(=)
+
+ use ESMF, only: ESMF_Grid, ESMF_Mesh, ESMF_Field, ESMF_RouteHandle
+ use ESMF, only: ESMF_SUCCESS
+ use ESMF, only: ESMF_KIND_R8, ESMF_KIND_I4
+ use ESMF, only: ESMF_FieldGet
+ use ESMF, only: ESMF_STAGGERLOC_CENTER, ESMF_FieldRegridStore, ESMF_FieldRegrid
+ use ESMF, only: ESMF_StaggerLoc
+ use ESMF, only: ESMF_REGRIDMETHOD_BILINEAR, ESMF_POLEMETHOD_ALLAVG
+ use ESMF, only: ESMF_GridCreate1PeriDim, ESMF_INDEX_GLOBAL
+ use ESMF, only: ESMF_GridAddCoord, ESMF_GridGetCoord
+ use ESMF, only: ESMF_TYPEKIND_R8, ESMF_FieldCreate, ESMF_Array
+ use ESMF, only: ESMF_ArraySpec, ESMF_DistGrid, ESMF_DELayout
+ use ESMF, only: ESMF_GridGet, ESMF_ArraySpecSet
+ use ESMF, only: ESMF_ArrayCreate
+ use ESMF, only: ESMF_GridComp, ESMF_TERMORDER_SRCSEQ
+ use ESMF, only: ESMF_EXTRAPMETHOD_NEAREST_IDAVG
+ use ESMF, only: ESMF_UNMAPPEDACTION_IGNORE
+ use ESMF, only: ESMF_GridDestroy, ESMF_FieldDestroy, ESMF_RouteHandleDestroy
+ use ESMF, only: ESMF_Mesh, ESMF_MeshIsCreated, ESMF_MeshDestroy
+ use ESMF, only: ESMF_MESHLOC_ELEMENT
+ use edyn_mpi, only: mytid, ntask, ntaski, ntaskj, tasks, lon0, lon1, lat0
+ use edyn_mpi, only: lat1, nmagtaski, nmagtaskj, mlon0, mlon1
+ use edyn_mpi, only: mlat0,mlat1
+ use getapex, only: gdlatdeg, gdlondeg
+ ! dynamically allocated geo grid for Oplus transport model
+ use edyn_geogrid, only: nlon, nlev, glon, glat
+ use edyn_maggrid, only: gmlat, gmlon
+ use spmd_utils, only: masterproc
+
+ implicit none
+ save
+ private
+
+ public :: edyn_esmf_update
+ public :: edyn_esmf_final ! Clean up any edyn usage of ESMF
+
+ public :: edyn_esmf_regrid_phys2geo
+ public :: edyn_esmf_regrid_geo2phys
+ public :: edyn_esmf_regrid_phys2mag
+ public :: edyn_esmf_regrid_mag2phys
+ public :: edyn_esmf_regrid_geo2mag
+ public :: edyn_esmf_regrid_mag2geo
+
+ public :: edyn_esmf_get_1dfield
+ public :: edyn_esmf_get_2dfield ! Retrieve a pointer to 2D ESMF field data
+ public :: edyn_esmf_get_3dfield ! Retrieve a pointer to 3D ESMF field data
+ public :: edyn_esmf_get_2dphysfield
+ public :: edyn_esmf_set3d_geo ! Set ESMF field with 3D geo data
+ public :: edyn_esmf_set2d_geo ! Set ESMF field with 2D geo data
+ public :: edyn_esmf_set3d_mag ! Set ESMF field with 3D mag field data
+ public :: edyn_esmf_set2d_mag ! Set ESMF field with 2D mag field data
+ public :: edyn_esmf_set3d_phys ! Set ESMF field with 3D physics field data
+ public :: edyn_esmf_set2d_phys ! Set ESMF field with 2D physics field data
+ public :: edyn_esmf_update_phys_mesh
+
+ public :: phys_3dfld, phys_2dfld
+ public :: geo_3dfld, geo_2dfld
+ public :: mag_des_3dfld, mag_des_2dfld
+ public :: mag_src_3dfld, mag_src_2dfld
+
+ public :: edyn_esmf_chkerr
+
+ type(ESMF_Grid) :: &
+ mag_src_grid, & ! source grid (will not have periodic pts)
+ mag_des_grid, & ! destination grid (will have periodic pts)
+ geo_grid ! geographic grid for Oplus transport
+
+ ! phys_mesh: Mesh representation of physics decomposition
+ type(ESMF_Mesh), public, protected :: phys_mesh
+
+ ! ESMF fields used for mapping between physics, oplus geographic, and geomagnetic grids
+ type(ESMF_Field) :: phys_3dfld, phys_2dfld
+ type(ESMF_Field) :: geo_3dfld, geo_2dfld
+ type(ESMF_Field) :: mag_des_3dfld, mag_des_2dfld
+ type(ESMF_Field) :: mag_src_3dfld, mag_src_2dfld
+
+
+ type(ESMF_RouteHandle) :: & ! ESMF route handles for regridding
+ routehandle_phys2geo, & ! for physics to geo 3-D regrid
+ routehandle_geo2phys, & ! for geo to physics 3-D regrid
+ routehandle_phys2mag, & ! for physics to mag 3-D regrid
+ routehandle_geo2mag, & ! for geo to mag 3-D regrid
+ routehandle_mag2geo, & ! for geo to mag 3-D regrid
+ routehandle_phys2mag_2d, & ! for 2d geo to phys
+ routehandle_mag2phys_2d, & ! for 2d phys to geo for AMIE fields
+ routehandle_geo2mag_2d ! for 2d geo to mag
+
+ logical, parameter :: debug = .false.
+
+ integer, allocatable :: petmap(:,:,:)
+ logical :: initialized=.false.
+
+contains
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_chkerr(subname, routine, rc)
+ use shr_kind_mod, only: shr_kind_cl
+
+ character(len=*), intent(in) :: subname
+ character(len=*), intent(in) :: routine
+ integer, intent(in) :: rc
+
+ character(len=shr_kind_cl) :: errmsg
-! amie fields
- call ESMF_FieldRegridStore(srcField=mag_efx,dstField=geo_efx, &
- regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
- polemethod=ESMF_POLEMETHOD_ALLAVG, &
- routeHandle=routehandle_mag2geo_2d,factorIndexList=factorIndexList,&
- factorList=factorList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(6,"(2a,i4)") '>>> esmf_init: error return from ',&
- 'ESMF_FieldRegridStore for 2d mag2geo_2d: rc=',rc
- call endrun
- endif
-!
-! mag2geo 2d fields:
-!
- call ESMF_FieldSMMStore(mag_efx,geo_efx,routehandle_mag2geo_2d,&
- factorList,factorIndexList,srcTermProcessing=smm_srctermproc,pipelineDepth=smm_pipelinedep,rc=rc)
-
-
- edyn_esmf_update_step = .true.
-#endif
- end subroutine edyn_esmf_update
-
-#ifdef WACCMX_EDYN_ESMF
-!-----------------------------------------------------------------------
- real(r8) function select_wt_mag2geo(n,dimx,djmy)
- integer,intent(in) :: n
- real(r8),intent(in) :: dimx,djmy
-
- select_wt_mag2geo = 0._r8
- select case (n)
- case(1)
- select_wt_mag2geo = (1._r8-dimx)*(1._r8-djmy)
- case(2)
- select_wt_mag2geo = dimx*(1._r8-djmy)
- case(3)
- select_wt_mag2geo = dimx*djmy
- case(4)
- select_wt_mag2geo = (1._r8-dimx)*djmy
- end select
- end function select_wt_mag2geo
-!-----------------------------------------------------------------------
- subroutine create_mag_grid(grid_out,srcdes)
-!
-! Create ESMF geomagnetic grid, w/ lon,lat coordinates.
-! This is called from esmf_init during model initialization.
-!
-! Args:
- type(ESMF_Grid),intent(out) :: grid_out
- character(len=*),intent(in) :: srcdes
-!
-! Local:
- integer :: i,j,n,rc
- real(ESMF_KIND_R8),pointer :: coordX(:,:),coordY(:,:)
- integer :: lbnd(2),ubnd(2)
- integer :: nmlons_task(ntaski) ! number of lons per task
- integer :: nmlats_task(ntaskj) ! number of lats per task
-!
-! We are creating either a source grid or a destination grid:
-!
- if (srcdes /= 'src' .and. srcdes /= 'des') then
- write(iulog,"(a)") '>>> create_mag_grid: srcdes = ''',srcdes, &
- ''' but must be either ''src'' or ''des'''
- call endrun('create_mag_grid: srcdes')
- endif
-!
-! nmlons_task(nmagtaski) = number of mag lons per task in lon dim
-!
- do i=1,nmagtaski
- loop: do n=0,ntask-1
- if (tasks(n)%magtidi==i-1) then
- nmlons_task(i) = tasks(n)%nmaglons
- exit loop
- endif
- enddo loop
- enddo
-!
-! Exclude periodic points (1 point fewer for mpi tasks at east end)
-! for source grids (this overwrites above for eastern-most tasks):
-!
- if (srcdes == 'src') then
- do n=0,ntask-1
- if (tasks(n)%magtidi==nmagtaski-1) then ! east edge of proc matrix
- nmlons_task(tasks(n)%magtidi+1) = tasks(n)%nmaglons-1
- endif
- enddo
- endif
-!
-! nmlats_task(nmagtaskj) = number of mag lats per task in lat dim
-!
- do j=1,nmagtaskj
- loop1: do n=0,ntask-1
- if (tasks(n)%magtidj==j-1) then
- nmlats_task(j) = tasks(n)%nmaglats
- exit loop1
- endif
- enddo loop1
- enddo
-!
-! Create curvilinear magnetic grid (both coords depend
-! on both dimensions, i.e., lon(i,j),lat(i,j)):
-!
- grid_out = ESMF_GridCreate1PeriDim( &
- countsPerDEDim1=nmlons_task, coordDep1=(/1,2/), &
- countsPerDEDim2=nmlats_task, coordDep2=(/1,2/), &
- indexflag=ESMF_INDEX_GLOBAL,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> create_mag_grid: error return from ',&
- 'ESMF_GridCreateShapeTile: rc=',rc
- call endrun('create_mag_grid: ESMF_GridCreate1PeriDim')
- endif
-!
-! Allocate coordinates:
-!
- call ESMF_GridAddCoord(grid_out,staggerloc=ESMF_STAGGERLOC_CENTER,rc=rc)
-
- if (rc /=ESMF_SUCCESS) then
- write(iulog,"(2a,i4)") '>>> create_mag_grid: error return from ',&
- 'ESMF_GridAddCoord: rc=',rc
- call endrun('create_mag_grid: ESMF_GridAddCoord mag_grid')
- endif
-!
-! Get pointer and set mag grid longitude coordinates:
-!
- call ESMF_GridGetCoord(grid_out, coordDim=1, localDE=0, &
- computationalLBound=lbnd, computationalUBound=ubnd, &
- farrayPtr=coordX, staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(i4)") '>>> create_mag_grid: error return from ', &
- 'ESMF_GridGetCoord for longitude coords: rc=',rc
- call endrun('create_mag_grid: ESMF_GridGetCoord mag grid longitude')
- endif
+ if (rc /= ESMF_SUCCESS) then
+ write(errmsg, '(4a,i0)') trim(subname), ': Error return from ', trim(routine), ', rc = ', rc
+ if (masterproc) then
+ write(iulog, '(2a)') 'ERROR: ', trim(errmsg)
+ end if
+ call endrun(trim(errmsg))
+ end if
+ end subroutine edyn_esmf_chkerr
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_final
+
+ call edyn_esmf_destroy_mag_objs()
+ call edyn_esmf_destroy_nonmag_objs()
+
+ end subroutine edyn_esmf_final
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_destroy_mag_objs
+
+ integer :: rc ! return code for ESMF calls
+ character(len=*), parameter :: subname = 'edyn_esmf_destroy_mag_objs'
+
+ call ESMF_RouteHandleDestroy(routehandle_phys2mag, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_phys2mag', rc)
+ call ESMF_RouteHandleDestroy(routehandle_geo2mag, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_geo2mag', rc)
+ call ESMF_RouteHandleDestroy(routehandle_mag2geo, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_mag2geo', rc)
+ call ESMF_RouteHandleDestroy(routehandle_phys2mag_2d, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_phys2mag_2d', rc)
+ call ESMF_RouteHandleDestroy(routehandle_mag2phys_2d, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_mag2phys_2d', rc)
+ call ESMF_RouteHandleDestroy(routehandle_geo2mag_2d, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_geo2mag_2d', rc)
+
+ call ESMF_FieldDestroy(mag_des_3dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy mag_des_3dfld', rc)
+ call ESMF_FieldDestroy(mag_des_2dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy mag_des_2dfld', rc)
+ call ESMF_FieldDestroy(mag_src_3dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy mag_src_3dfld', rc)
+ call ESMF_FieldDestroy(mag_src_2dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy mag_src_2dfld', rc)
+
+ call ESMF_GridDestroy(mag_src_grid, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridDestroy mag_src_grid', rc)
+ call ESMF_GridDestroy(mag_des_grid, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridDestroy mag_des_grid', rc)
+
+ end subroutine edyn_esmf_destroy_mag_objs
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_destroy_nonmag_objs
+
+ integer :: rc ! return code for ESMF calls
+ character(len=*), parameter :: subname = 'edyn_esmf_destroy_nonmag_objs'
+
+ call ESMF_RouteHandleDestroy(routehandle_phys2geo, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_phys2geo', rc)
+ call ESMF_RouteHandleDestroy(routehandle_geo2phys, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_RouteHandleDestroy routehandle_geo2phys', rc)
+
+ call ESMF_FieldDestroy(phys_3dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy phys_3dfld', rc)
+ call ESMF_FieldDestroy(phys_2dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy phys_2dfld', rc)
+ call ESMF_FieldDestroy(geo_3dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy geo_3dfld', rc)
+ call ESMF_FieldDestroy(geo_2dfld, rc=rc )
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldDestroy geo_2dfld', rc)
+
+ call ESMF_GridDestroy(geo_grid, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridDestroy geo_grid', rc)
+ call ESMF_MeshDestroy(phys_mesh, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_MeshDestroy phys_mesh', rc)
+
+ end subroutine edyn_esmf_destroy_nonmag_objs
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_update
+ use getapex, only: get_apex, magfield, alonm
+ use mo_apex, only: geomag_year_updated
+
+ ! Create ESMF grids for physics, geographic (ion transport), and
+ ! magnetic grids, and create ESMF fields as necessary on each grid.
+ ! Define the 2d coordinates for each grid, and save an ESMF
+ ! routehandles for regridding.
+ !
+ ! Local:
+ integer :: rc ! return code for ESMF calls
+ integer :: lbnd_destgeo(3), ubnd_destgeo(3) ! 3d bounds of dest geo grid
+ integer :: lbnd_destmag(3), ubnd_destmag(3) ! 3d bounds of dest mag grid
+ integer :: lbnd_srcgeo(3), ubnd_srcgeo(3) ! 3d bounds of src geo grid
+ integer :: lbnd_srcmag(3), ubnd_srcmag(3) ! 3d bounds of src mag grid
+ real(ESMF_KIND_R8), pointer :: fptr(:,:,:)
+ integer(ESMF_KIND_I4), pointer :: factorIndexList(:,:)
+ real(ESMF_KIND_R8), pointer :: factorList(:)
+ integer :: smm_srctermproc, smm_pipelinedep
+
+ character(len=*), parameter :: subname = 'edyn_esmf_update'
+
+ if (.not.geomag_year_updated .and. initialized) then
+ return
+ end if
+
+ if (mytid>> create_mag_grid: error return from ',&
- 'ESMF_GridGetCoord for latitude coords: rc=',rc
- call endrun('create_mag_grid: ESMF_GridGetCoord latitude')
- endif
+ endif
- do j=lbnd(2),ubnd(2)
- do i=lbnd(1),ubnd(1)
- coordY(i,j) = gdlatdeg(i,j)
- enddo
- enddo
+ smm_srctermproc = 0
+ smm_pipelinedep = 16
- if (debug) then
- write(iulog,"(4a,2i4,a,2i4,a,2i4,a,2i4)") 'Created ESMF ',srcdes,' mag grid: ', &
- ' lbnd,ubnd_lon=',lbnd(1),ubnd(1),' mlon0,1=',mlon0,mlon1, &
- ' lbnd,ubnd_lat=',lbnd(2),ubnd(2),' mlat0,1=',mlat0,mlat1
- endif
+ if (initialized) then
+ call edyn_esmf_destroy_mag_objs()
+ endif
+ if (.not.initialized) then
+ !
+ ! Make geographic grid for phys2geo and geo2phys regridding:
+ !
+ call create_geo_grid(geo_grid) ! geo (Oplus) grid
+ endif
+ !
+ ! Make magnetic grid for phys2mag regridding:
+ !
+ call create_mag_grid(mag_des_grid, 'des') ! mag destination grid
+ !
+ ! Make grid for mag2phys regridding:
+ !
+ call create_mag_grid(mag_src_grid, 'src')
+ !
+ ! Create empty fields on geographic grid or phyiscs mesh that
+ ! will be transformed to the magnetic grid and passed as input
+ ! to the dynamo. This does not assign any values.
+ !
+ ! 3d fields (inputs to edynamo) on physics mesh for phys2mag:
+ !
+ if (.not.initialized) then
+ call edyn_esmf_create_physfield(phys_2dfld, phys_mesh, 'PHYS_2DFLD', 0)
+ call edyn_esmf_create_physfield(phys_3dfld, phys_mesh, 'PHYS_3DFLD', nlev)
+
+ call edyn_esmf_create_geofield(geo_2dfld, geo_grid, 'GEO_2DFLD', 0)
+ call edyn_esmf_create_geofield(geo_3dfld, geo_grid, 'GEO_3DFLD', nlev)
+ endif
- end subroutine create_mag_grid
-!-----------------------------------------------------------------------
- subroutine create_geo_grid(grid_out,srcdes)
-!
-! Args:
- type(ESMF_Grid),intent(out) :: grid_out
- character(len=*),intent(in) :: srcdes
-!
-! Local:
- integer :: i,j,n,rc
- integer :: lbnd_lat,ubnd_lat,lbnd_lon,ubnd_lon,lbnd(1),ubnd(1)
- real(ESMF_KIND_R8),pointer :: coordX(:),coordY(:)
- integer :: nlons_task(ntaski) ! number of lons per task
- integer :: nlats_task(ntaskj) ! number of lats per task
- logical :: has_poles
-!
-! We are creating either a source grid or a destination grid:
-!
- if (srcdes /= 'src' .and. srcdes /= 'des') then
- write(iulog,"(a)") '>>> create_geo_grid: srcdes = ''',srcdes, &
- ''' but must be either ''src'' or ''des'''
- call endrun('create_geo_grid: srcdes')
- endif
-!
-! nlons_task(ntaski) = number of geo lons per task.
-!
- do i=1,ntaski
- loop: do n=0,ntask-1
- if (tasks(n)%mytidi==i-1) then
- nlons_task(i) = tasks(n)%nlons
- exit loop
- endif
- enddo loop
- enddo
-!
-! Exclude periodic points (2 points fewer for procs at each end)
-! for source grids only (east and west edges of task table).
-! (TIMEGCM only)
-!
-! if (srcdes == 'src'.and.trim(model_name)=='TIMEGCM') then
-! do n=0,ntask-1
-! east or west edge of task table:
-! if (tasks(n)%mytidi==ntaski-1.or.tasks(n)%mytidi==0) &
-! nlons_task(tasks(n)%mytidi+1) = tasks(n)%nlons-2
-! enddo
-! endif
-!
-! nlats_task(ntaskj) = number of geo lats per task.
-!
- do j=1,ntaskj
- loop1: do n=0,ntask-1
- if (tasks(n)%mytidj==j-1) then
- nlats_task(j) = tasks(n)%nlats
- exit loop1
- endif
- enddo loop1
- enddo
-!
-! Check to see if global glat(nlat) has poles (WACCM does, TIMEGCM does not):
- has_poles = .false.
- do j=1,nlat
- if (abs(glat(j))==90._r8) has_poles = .true.
- enddo
-
- if (debug) write(iulog,"('create_geo_grid: srcdes=',a,' has_poles=',l1)") srcdes,has_poles
-!
-! If making destination grid and glat does not have poles, add extra points
-! at north and south edges of task table:
-!
- if (.not.has_poles.and.srcdes=='des') then ! probably TIMEGCM
- do n=0,ntask-1
-! north or south edge of task table: add 1 lat for pole
- if (tasks(n)%mytidj==ntaskj-1.or.tasks(n)%mytidj==0) &
- nlats_task(tasks(n)%mytidj+1) = tasks(n)%nlats+1
- enddo
-!
-! Create 2d geographic destination grid (minimum lat index is 0 to include poles):
- grid_out = ESMF_GridCreate1PeriDim( &
- countsPerDEDim1=nlons_task, coordDep1=(/1/), &
- countsPerDEDim2=nlats_task, coordDep2=(/2/), &
- indexflag=ESMF_INDEX_GLOBAL,minIndex=(/1,0/),rc=rc)
-
- elseif (has_poles) then ! geo source grid does not have poles
-!
-! Create 2d geographic source grid (without poles)
- grid_out = ESMF_GridCreate1PeriDim( &
- countsPerDEDim1=nlons_task, coordDep1=(/1/), &
- countsPerDEDim2=nlats_task, coordDep2=(/2/), &
- indexflag=ESMF_INDEX_GLOBAL,minIndex=(/1,1/),rc=rc)
- else
- write(iulog,*) 'No capability for ESMF to handle source grid without poles'
- call endrun('create_geo_grid: No ESMF capability for source grid without poles')
- endif
+ call edyn_esmf_create_magfield(mag_des_2dfld, mag_des_grid, 'MAG_DES_2DFLD', 0)
+ call edyn_esmf_create_magfield(mag_des_3dfld, mag_des_grid, 'MAG_DES_3DFLD', nlev)
+
+ call edyn_esmf_create_magfield(mag_src_2dfld, mag_src_grid, 'MAG_SRC_2DFLD', 0)
+ call edyn_esmf_create_magfield(mag_src_3dfld, mag_src_grid, 'MAG_SRC_3DFLD', nlev)
+
+ if (debug .and. masterproc) then
+ !
+ ! Get 3d bounds of source geo field:
+ !
+ call ESMF_FieldGet(geo_3dfld, localDe=0, farrayPtr=fptr, &
+ computationalLBound=lbnd_srcgeo, &
+ computationalUBound=ubnd_srcgeo, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldGet, geo_3dfld', rc)
+
+ write(iulog,"(2a,i4,2(', ',i4),a,i4,2(', ',i4),a)") subname, &
+ ': Bounds of source geo field: lbnd_srcgeo = (', &
+ lbnd_srcgeo, '), ubnd_srcgeo = (', ubnd_srcgeo,')'
+ end if
+
+ if (debug .and. masterproc) then
+ !
+ ! Get 3d bounds of destination mag field:
+ !
+ call ESMF_FieldGet(mag_des_3dfld, localDe=0, farrayPtr=fptr, &
+ computationalLBound=lbnd_destmag, &
+ computationalUBound=ubnd_destmag, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldGet, mag_des_3dfld', rc)
+
+ write(iulog,"(2a,3i4,a,3i4,' gmlon=',2f9.3)") subname, &
+ ': Bounds of destination mag field: lbnd_destmag = ', &
+ lbnd_destmag, ' ubnd_destmag = ', ubnd_destmag
+ write(iulog,"(a,': lon bnd_destmag =',2i4,' gmlon = ',2f9.3)") &
+ subname, lbnd_destmag(1), ubnd_destmag(1), &
+ gmlon(lbnd_destmag(1)), gmlon(ubnd_destmag(1))
+ write(iulog,"(a,': lat bnd_destmag = ',2i4,' gmlat = ',2f9.3)") &
+ subname, lbnd_destmag(2), ubnd_destmag(2), &
+ gmlat(lbnd_destmag(2)), gmlat(ubnd_destmag(2))
+ !
+ ! Get 3d bounds of source mag field:
+ !
+ call ESMF_FieldGet(mag_src_3dfld, localDe=0, farrayPtr=fptr, &
+ computationalLBound=lbnd_srcmag, &
+ computationalUBound=ubnd_srcmag, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldGet, mag_src_3dfld', rc)
+
+ write(iulog,"(a,2(a,i4),a)") subname, ': lon srcmag bounds = (', &
+ lbnd_srcmag(1), ', ', ubnd_srcmag(1), ')'
+ write(iulog,"(a,2(a,i4),a)") subname, ': lat srcmag bounds = (', &
+ lbnd_srcmag(2), ', ', ubnd_srcmag(2), ')'
+ !
+ ! Get 3d bounds of destination geo field:
+ !
+ call ESMF_FieldGet(geo_3dfld, localDe=0, farrayPtr=fptr, &
+ computationalLBound=lbnd_destgeo, &
+ computationalUBound=ubnd_destgeo, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldGet, geo_3dfld', rc)
+
+ write(iulog,"(a,': lon bnd_destgeo=',2i4)") subname, &
+ lbnd_destgeo(1),ubnd_destgeo(1)
+ write(iulog,"(a,': lat bnd_destgeo=',2i4)") subname, &
+ lbnd_destgeo(2),ubnd_destgeo(2)
+ end if
+
+ !
+ ! Save route handles for grid transformations in both directions
+ ! phys2mag and mag2phys. FieldRegridStore needs to be called only
+ ! once for each transformation before the timestep loop (src and
+ ! dest fields are still required, so just use ped here). Once inside
+ ! the timestep loop, the same routehandle can be used for all fields
+ ! that are regridded in the given direction.
+ !
+
+ !
+ ! Compute and store route handle for phys2mag 2d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=phys_2dfld, dstField=mag_des_2dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ extrapMethod=ESMF_EXTRAPMETHOD_NEAREST_IDAVG, &
+ routeHandle=routehandle_phys2mag_2d, &
+ factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 2D phys2mag', rc)
+
+ !
+ ! Compute and store route handle for phys2mag 3d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=phys_3dfld, dstField=mag_des_3dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ extrapMethod=ESMF_EXTRAPMETHOD_NEAREST_IDAVG, &
+ routeHandle=routehandle_phys2mag, factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 3D phys2mag', rc)
+ !
+ ! Compute and store route handle for mag2phys 2d (amie) fields:
+ !
+ call ESMF_FieldRegridStore(srcField=mag_src_2dfld, dstField=phys_2dfld,&
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ extrapMethod=ESMF_EXTRAPMETHOD_NEAREST_IDAVG, &
+ routeHandle=routehandle_mag2phys_2d, &
+ factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 2D mag2phys', rc)
+ if (.not.initialized) then
+ !
+ ! Compute and store route handle for phys2geo 3d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=phys_3dfld, dstField=geo_3dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ extrapMethod=ESMF_EXTRAPMETHOD_NEAREST_IDAVG, &
+ routeHandle=routehandle_phys2geo, factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 3D phys2geo', rc)
+
+ !
+ ! Compute and store route handle for geo2phys 3d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=geo_3dfld, dstField=phys_3dfld,&
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ extrapMethod=ESMF_EXTRAPMETHOD_NEAREST_IDAVG, &
+ routeHandle=routehandle_geo2phys, factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 3D geo2phys', rc)
+ endif
+ !
+ ! Compute and store route handle for geo2mag 3d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=geo_3dfld, dstField=mag_des_3dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ routeHandle=routehandle_geo2mag, factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 3D geo2mag', rc)
+ !
+ ! Compute and store route handle for geo2mag 2d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=geo_2dfld, dstField=mag_des_2dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ routeHandle=routehandle_geo2mag_2d, &
+ factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 2D geo2mag', rc)
+
+ !
+ ! Compute and store route handle for mag2geo 3d fields:
+ !
+ call ESMF_FieldRegridStore(srcField=mag_src_3dfld, dstField=geo_3dfld, &
+ regridMethod=ESMF_REGRIDMETHOD_BILINEAR, &
+ polemethod=ESMF_POLEMETHOD_ALLAVG, &
+ routeHandle=routehandle_mag2geo, factorIndexList=factorIndexList, &
+ factorList=factorList, srcTermProcessing=smm_srctermproc, &
+ pipelineDepth=smm_pipelinedep, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegridStore for 3D mag2geo', rc)
+
+ initialized=.true.
+
+ end subroutine edyn_esmf_update
+
+ !-----------------------------------------------------------------------
+ subroutine create_mag_grid(grid_out, srcdes)
+ !
+ ! Create ESMF geomagnetic grid, w/ lon,lat coordinates.
+ !
+ ! Args:
+ type(ESMF_Grid), intent(out) :: grid_out
+ character(len=*), intent(in) :: srcdes
+ !
+ ! Local:
+ integer :: i,j,n,rc
+ real(ESMF_KIND_R8), pointer :: coordX(:,:),coordY(:,:)
+ integer :: lbnd(2),ubnd(2)
+ integer :: nmlons_task(ntaski) ! number of lons per task
+ integer :: nmlats_task(ntaskj) ! number of lats per task
+ character(len=*), parameter :: subname = 'create_mag_grid'
+
+ !
+ ! We are creating either a source grid or a destination grid:
+ !
+ if (srcdes /= 'src' .and. srcdes /= 'des') then
+ write(iulog,"(a)") '>>> create_mag_grid: srcdes = ''',srcdes, &
+ ''' but must be either ''src'' or ''des'''
+ call endrun('create_mag_grid: srcdes')
+ end if
+ !
+ ! nmlons_task(nmagtaski) = number of mag lons per task in lon dim
+ !
+ do i = 1, nmagtaski
+ loop: do n = 0, ntask - 1
+ if (tasks(n)%magtidi == i-1) then
+ nmlons_task(i) = tasks(n)%nmaglons
+ exit loop
+ end if
+ end do loop
+ end do
+ !
+ ! Exclude periodic points (1 point fewer for mpi tasks at east end)
+ ! for source grids (this overwrites above for eastern-most tasks):
+ !
+ if (srcdes == 'src') then
+ do n = 0, ntask-1
+ if (tasks(n)%magtidi == nmagtaski-1) then ! east edge of proc matrix
+ nmlons_task(tasks(n)%magtidi+1) = tasks(n)%nmaglons-1
+ end if
+ end do
+ end if
+ !
+ ! nmlats_task(nmagtaskj) = number of mag lats per task in lat dim
+ !
+ do j = 1, nmagtaskj
+ loop1: do n = 0, ntask-1
+ if (tasks(n)%magtidj == j-1) then
+ nmlats_task(j) = tasks(n)%nmaglats
+ exit loop1
+ end if
+ end do loop1
+ end do
+ !
+ ! Create curvilinear magnetic grid (both coords depend
+ ! on both dimensions, i.e., lon(i,j),lat(i,j)):
+ !
+ grid_out = ESMF_GridCreate1PeriDim( &
+ countsPerDEDim1=nmlons_task, coordDep1=(/1,2/), &
+ countsPerDEDim2=nmlats_task, coordDep2=(/1,2/), petmap=petmap, &
+ indexflag=ESMF_INDEX_GLOBAL,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCreate1PeriDim', rc)
+ !
+ ! Allocate coordinates:
+ !
+ call ESMF_GridAddCoord(grid_out,staggerloc=ESMF_STAGGERLOC_CENTER,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridAddCoord', rc)
+ if (mytid 72
+ end do
+ !
+ ! Get pointer and set geo grid latitude coordinates, including poles:
+ !
+ call ESMF_GridGetCoord(grid_out, coordDim=2, localDE=0, &
+ computationalLBound=lbnd, computationalUBound=ubnd, &
+ farrayPtr=coordY, staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridGetCoord for latitude coords', rc)
+
+ lbnd_lat = lbnd(1)
+ ubnd_lat = ubnd(1)
+ do i = lbnd_lat, ubnd_lat
+ coordY(i) = glat(i)
+ end do
+
+ if (debug .and. masterproc) then
+ write(iulog,"(2a,2i4,a,2i4,a,2i4,a,2i4)") 'Created ESMF geo_grid:', &
+ ' lbnd,ubnd_lon=', lbnd_lon, ubnd_lon, ' lon0,1=', lon0, lon1, &
+ ' lbnd,ubnd_lat=', lbnd_lat, ubnd_lat, ' lat0,1=', lat0, lat1
+ write(iulog,"('coordX for geo grid = ',/,(8f10.4))") coordX
+ write(iulog,"('coordY for geo grid = ',/,(8f10.4))") coordY
+ end if
+ endif
- if (rc /=ESMF_SUCCESS) then
- write(iulog,"(/,2a,i4)") '>>> create_geo_grid: error return from ',&
- 'ESMF_GridCreate1PeriDim: rc=',rc
- call endrun('create_geo_grid: ESMF_GridCreate1PeriDim')
- endif
-!
-! Allocate coordinates:
-!
- call ESMF_GridAddCoord(grid_out,staggerloc=ESMF_STAGGERLOC_CENTER,rc=rc)
-
- if (rc /=ESMF_SUCCESS) then
- write(iulog,"(/,a)") '>>> create_geo_grid: error return from ESMF_GridAddCoord'
- call endrun('create_geo_grid: ESMF_GridAddCoord')
- endif
-!
-! Get pointer and set geo grid longitude coordinates:
-!
- call ESMF_GridGetCoord(grid_out, coordDim=1, localDE=0, &
- computationalLBound=lbnd, computationalUBound=ubnd, &
- farrayPtr=coordX, staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
-
- if (rc /=ESMF_SUCCESS) then
- write(iulog,"(/,2a)") '>>> create_geo_grid: error return from ',&
- 'ESMF_GridGetCoord for longitude coords'
- call endrun('create_geo_grid: ESMF_GridGetCoord longitude')
- endif
-!
-! Note glon was shifted to +/-180 by sub set_geogrid (edyn_init.F90)
-!
- lbnd_lon = lbnd(1) ; ubnd_lon = ubnd(1)
- do i=lbnd_lon,ubnd_lon
- coordX(i) = glon(i) ! 1 -> 72
- enddo
-!
-! Get pointer and set geo grid latitude coordinates, including poles:
-!
- call ESMF_GridGetCoord(grid_out, coordDim=2, localDE=0, &
- computationalLBound=lbnd, computationalUBound=ubnd, &
- farrayPtr=coordY, staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
-
- if (rc /=ESMF_SUCCESS) then
- write(iulog,"(/,2a)") '>>> create_geo_grid: error return from ',&
- 'ESMF_GridGetCoord for latitude coords'
- call endrun('create_geo_grid: ESMF_GridGetCoord latitude')
- endif
+ end subroutine create_geo_grid
+
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_create_physfield(field, mesh, name, nlev)
+ !
+ ! Create ESMF field (2d or 3d) on physics mesh
+ ! If nlev == 0, field is 2d (i,j), otherwise field is 3d,
+ ! and 3rd dimension is ungridded
+ !
+ ! Args:
+ integer, intent(in) :: nlev ! if nlev == 0, field is 2d (i,j)
+ type(ESMF_Mesh), intent(in) :: mesh
+ character(len=*), intent(in) :: name
+ type(ESMF_Field), intent(out) :: field
+ !
+ ! Local:
+ integer :: rc
+ type(ESMF_ArraySpec) :: arrayspec
+ character(len=*), parameter :: subname = 'edyn_esmf_create_physfield'
+
+
+ ! Create 3d field (i,j,k), with non-distributed vertical dimension:
+ if (nlev > 0) then
+ call ESMF_ArraySpecSet(arrayspec, 2, ESMF_TYPEKIND_R8, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 2D', rc)
+ field = ESMF_FieldCreate(mesh, arrayspec, &
+ gridToFieldMap=(/2/), meshloc=ESMF_MESHLOC_ELEMENT, &
+ ungriddedLBound=(/1/), ungriddedUBound=(/nlev/), rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 2D field', rc)
+ !
+ ! Create 2d field (i,j):
+ else ! create 2d field
+ call ESMF_ArraySpecSet(arrayspec, 1, ESMF_TYPEKIND_R8, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 2D', rc)
+ field = ESMF_FieldCreate(mesh, arrayspec, meshloc=ESMF_MESHLOC_ELEMENT, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 2D field', rc)
+ end if
+
+ end subroutine edyn_esmf_create_physfield
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_create_geofield(field, grid, name, nlev)
+ !
+ ! Create ESMF field (2d or 3d) on geo grid (will exclude periodic points)
+ ! If nlev == 0, field is 2d (i,j), otherwise field is 3d,
+ ! and 3rd dimension is ungridded
+ !
+ ! Args:
+ integer, intent(in) :: nlev ! if nlev == 0, field is 2d (i,j)
+ type(ESMF_Grid), intent(in) :: grid
+ character(len=*), intent(in) :: name
+ type(ESMF_Field), intent(out) :: field
+ !
+ ! Local:
+ integer :: rc
+ type(ESMF_ArraySpec) :: arrayspec
+ character(len=*), parameter :: subname = 'edyn_esmf_create_geofield'
+ !
+ ! Create 3d field (i,j,k), with non-distributed vertical dimension:
+ if (nlev > 0) then
+ call ESMF_ArraySpecSet(arrayspec,3,ESMF_TYPEKIND_R8,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 3D', rc)
+ field = ESMF_FieldCreate(grid, arrayspec,ungriddedLBound=(/1/), &
+ ungriddedUBound=(/nlev/),staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 3D field', rc)
+ !
+ ! Create 2d field (i,j):
+ else ! create 2d field
+ call ESMF_ArraySpecSet(arrayspec, 2, ESMF_TYPEKIND_R8, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 2D', rc)
+ field = ESMF_FieldCreate(grid, arrayspec,&
+ staggerloc=ESMF_STAGGERLOC_CENTER,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 2D field', rc)
+ end if
+ end subroutine edyn_esmf_create_geofield
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_create_magfield(field, grid, name, nlev)
+ !
+ ! Create ESMF field (2d or 3d) on mag grid. This will include the
+ ! mag periodic point, which will be zero after regridding.
+ ! If nlev == 0, field is 2d (i,j), otherwise field is 3d,
+ ! and 3rd dimension is ungridded
+ !
+ ! Args:
+ integer, intent(in) :: nlev ! if nlev == 0, field is 2d (i,j)
+ type(ESMF_Grid), intent(in) :: grid
+ character(len=*), intent(in) :: name
+ type(ESMF_Field), intent(out) :: field
+ !
+ ! Local:
+ integer :: rc
+ type(ESMF_ArraySpec) :: arrayspec
+ type(ESMF_Array) :: array3d,array2d
+ type(ESMF_DistGrid) :: distgrid
+ character(len=*), parameter :: subname = 'edyn_esmf_create_magfield'
+ !
+ ! Get necessary information from the mag grid:
+ call ESMF_GridGet(grid, staggerloc=ESMF_STAGGERLOC_CENTER, &
+ distgrid=distgrid,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridGet', rc)
+ !
+ ! Create 3d mag field (i,j,k), with non-distributed vertical dimension:
+ ! (add periodic point in longitude with computationalEdgeUWidth)
+ !
+ if (nlev > 0) then
+ call ESMF_ArraySpecSet(arrayspec,3,ESMF_TYPEKIND_R8,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 3D field', rc)
+
+ array3d = ESMF_ArrayCreate(arrayspec=arrayspec, &
+ distgrid=distgrid,computationalEdgeUWidth=(/1,0/), &
+ undistLBound=(/1/),undistUBound=(/nlev/), &
+ indexflag=ESMF_INDEX_GLOBAL,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArrayCreate 3D field', rc)
+
+ field = ESMF_FieldCreate(grid, array3d, &
+ ungriddedLBound=(/1/), ungriddedUBound=(/nlev/), &
+ staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 3D field', rc)
+ !
+ ! Create 2d mag field (i,j):
+ ! (add periodic point in longitude with computationalEdgeUWidth)
+ !
+ else ! create 2d field
+ call ESMF_ArraySpecSet(arrayspec,2,ESMF_TYPEKIND_R8,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArraySpecSet 2D field', rc)
+
+ array2d = ESMF_ArrayCreate(arrayspec=arrayspec, &
+ distgrid=distgrid,computationalEdgeUWidth=(/1,0/), &
+ indexflag=ESMF_INDEX_GLOBAL,rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_ArrayCreate 2D field', rc)
+ field = ESMF_FieldCreate(grid, array2d, &
+ staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldCreate 2D field', rc)
+ end if
+ end subroutine edyn_esmf_create_magfield
+
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_set3d_geo(field, fdata, ilon0, ilon1, ilat0, ilat1, ilev0, ilev1 )
+ !
+ ! Set values of a 3d ESMF field on geographic source grid, prior to
+ ! geographic to physics grid transformation.
+ ! Periodic points are excluded, geographic poles are at
+ ! j==jspole and jnpole
+ ! Note dimension order changes from input (k,i,j) to output (i,j,k).
+ !
+ ! Args:
+ type(ESMF_Field), intent(in) :: field ! esmf fields on geo grid
+ !
+ ! field is input data on model subdomains (including periodic points)
+ ! (note esmf source field excludes periodic points)
+ !
+ integer, intent(in) :: ilev0, ilev1, ilon0, ilon1, ilat0, ilat1
+ real(r8), intent(in) :: fdata(ilon0:ilon1,ilat0:ilat1,ilev0:ilev1)
+ !
+ ! Local:
+ integer :: i, j, k, rc
+ integer :: lbnd(3), ubnd(3) ! 3d field bounds
+ !
+ ! fptr is esmf pointer (i,j,k) to 3d field, set by this subroutine
+ real(ESMF_KIND_R8), pointer :: fptr(:,:,:)
+ character(len=*), parameter :: subname = 'edyn_esmf_set3d_geo'
+ if (mytid 0) then
- call ESMF_ArraySpecSet(arrayspec,3,ESMF_TYPEKIND_R8,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_geofield: ESMF_ArraySpecSet 3d field')
- field = ESMF_FieldCreate(grid, arrayspec,ungriddedLBound=(/1/), &
- ungriddedUBound=(/nlev/),staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_geofield: ESMF_FieldCreate 3d field')
-!
-! Create 2d field (i,j):
- else ! create 2d field
- call ESMF_ArraySpecSet(arrayspec,2,ESMF_TYPEKIND_R8,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_geofield: ESMF_ArraySpecSet 2d field')
- field = ESMF_FieldCreate(grid, arrayspec,&
- staggerloc=ESMF_STAGGERLOC_CENTER,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_geofield: ESMF_FieldCreate 2d field')
- endif
- end subroutine edyn_esmf_create_geofield
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_create_magfield(field,grid,name,nlev)
-!
-! Create ESMF field (2d or 3d) on mag grid. This will include the
-! mag periodic point, which will be zero after regridding.
-! If nlev == 0, field is 2d (i,j), otherwise field is 3d,
-! and 3rd dimension is ungridded
-!
-! Args:
- integer,intent(in) :: nlev ! if nlev == 0, field is 2d (i,j)
- type(ESMF_Grid),intent(in) :: grid
- character(len=*),intent(in) :: name
- type(ESMF_Field),intent(out) :: field
-!
-! Local:
- integer :: rc
- type(ESMF_ArraySpec) :: arrayspec
- type(ESMF_Array) :: array3d,array2d
- type(ESMF_DistGrid) :: distgrid
-!
-! Get necessary information from the mag grid:
- call ESMF_GridGet(grid, staggerloc=ESMF_STAGGERLOC_CENTER,&
- distgrid=distgrid,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_magfield: ESMF_GridGet')
-!
-! Create 3d mag field (i,j,k), with non-distributed vertical dimension:
-! (add periodic point in longitude with computationalEdgeUWidth)
-!
- if (nlev > 0) then
- call ESMF_ArraySpecSet(arrayspec,3,ESMF_TYPEKIND_R8,rc=rc)
- if (rc /= ESMF_SUCCESS)call endrun('edyn_esmf_create_magfield: ESMF_ArraySpecSet 3d field')
-
- array3d = ESMF_ArrayCreate(arrayspec=arrayspec, &
- distgrid=distgrid,computationalEdgeUWidth=(/1,0/), &
- undistLBound=(/1/),undistUBound=(/nlev/), &
- indexflag=ESMF_INDEX_GLOBAL,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_magfield: ESMF_ArrayCreate 3d field')
-
- field = ESMF_FieldCreate(grid, array3d, &
- ungriddedLBound=(/1/), ungriddedUBound=(/nlev/), &
- staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_magfield: ESMF_FieldCreate 3d field')
-!
-! Create 2d mag field (i,j):
-! (add periodic point in longitude with computationalEdgeUWidth)
-!
- else ! create 2d field
- call ESMF_ArraySpecSet(arrayspec,2,ESMF_TYPEKIND_R8,rc=rc)
- if (rc /= ESMF_SUCCESS)call endrun('edyn_esmf_create_magfield: ESMF_ArraySpecSet 2d field')
-
- array2d = ESMF_ArrayCreate(arrayspec=arrayspec, &
- distgrid=distgrid,computationalEdgeUWidth=(/1,0/), &
- indexflag=ESMF_INDEX_GLOBAL,rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_magfield: ESMF_ArrayCreate 2d field')
- field = ESMF_FieldCreate(grid, array2d, &
- staggerloc=ESMF_STAGGERLOC_CENTER, rc=rc)
- if (rc /= ESMF_SUCCESS) call endrun('edyn_esmf_create_magfield: ESMF_FieldCreate 2d field')
+ data(:,:,:) = fptr(:,:,:)
endif
- end subroutine edyn_esmf_create_magfield
-
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_set3d_geo(fields,fnames,f,nf,ilev0,ilev1,&
- ilon0,ilon1,ilat0,ilat1)
-!
-! Set values of a 3d ESMF field on geographic source grid, prior to
-! geographic to magnetic grid transformation.
-! Periodic points are excluded, geographic poles are at j==jspole and jnpole
-! Note dimension order changes from input (k,i,j) to output (i,j,k).
-!
-! Args:
- integer,intent(in) :: nf
- type(ESMF_Field) ,intent(in) :: fields(nf) ! esmf fields on geo grid
- character(len=*) ,intent(in) :: fnames(nf) ! field names
-!
-! f is input data on model subdomains (including periodic points)
-! (note esmf source field excludes periodic points)
-!
- integer,intent(in) :: ilev0,ilev1,ilon0,ilon1,ilat0,ilat1
- real(r8),intent(in) :: f(ilev0:ilev1,ilon0:ilon1,ilat0:ilat1,nf)
-!
-! Local:
- integer :: i,ii,j,k,rc,n,istat
- integer,parameter :: mxf=8 ! for call by dynamo_inputs
- integer :: lbnd(3),ubnd(3) ! lower,upper bounds of 3d field
-!
-! fptr is esmf pointer (i,j,k) to 3d field, set by this subroutine
- real(ESMF_KIND_R8),pointer :: fptr(:,:,:)
- real(r8),allocatable :: ftmp(:,:,:,:) ! esmf bounds, plus nf
-
- if (nf > mxf) then
- write(iulog,"('>>> esmf_set3d_geo: nf cannot be greater than mxf: nf=',i4,' mxf=',i4)") &
- nf,mxf
- call endrun('edyn_esmf_set3d_geo: nf > mxf')
+
+ end subroutine edyn_esmf_get_3dfield
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_get_2dfield(field, data, i0,i1,j0,j1 )
+ !
+ ! Get pointer to 2d esmf field (i,j):
+ !
+ ! Args:
+ integer, intent(in) :: i0,i1,j0,j1
+ type(ESMF_field), intent(in) :: field
+ real(r8), intent(out) :: data(i0:i1,j0:j1)
+ !
+ ! Local:
+ real(r8), pointer :: fptr(:,:)
+ integer :: rc, lbnd(2), ubnd(2)
+ character(len=*), parameter :: subname = 'edyn_esmf_get_2dfield'
+ if (mytid>> esmf_set3d_geo: error from ESMF_FieldGet: rc=',i4)") rc
- call endrun('edyn_esmf_set3d_geo: ESMF_FieldGet field 1')
+ if (i0/=lbnd(1).or.i1/=ubnd(1).or.j0/=lbnd(2).or.j1/=ubnd(2)) then
+ call endrun(subname//' array bnds do not match')
endif
-!
-! Do the allocation:
- allocate(ftmp(lbnd(1):ubnd(1),lbnd(2):ubnd(2),lbnd(3):ubnd(3),mxf),stat=istat)
- if (istat /= 0) then
- write(iulog,"('>>> esmf_set3d_geo: error allocating ftmp')")
- call endrun('edyn_esmf_set3d_geo: allocating ftmp')
+
+ data(:,:) = fptr(:,:)
+
+ end subroutine edyn_esmf_get_2dphysfield
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_get_1dfield(field, data, i0,i1 )
+ !
+ ! Get pointer to 2d esmf field (i,j):
+ !
+ ! Args:
+ integer, intent(in) :: i0,i1
+ type(ESMF_field), intent(in) :: field
+ real(r8), intent(out) :: data(i0:i1)
+ !
+ ! Local:
+ real(r8), pointer :: fptr(:)
+ integer :: rc, lbnd(1), ubnd(1)
+ character(len=*), parameter :: subname = 'edyn_esmf_get_1dfield'
+
+ call ESMF_FieldGet(field, localDe=0, farrayPtr=fptr, &
+ computationalLBound=lbnd, computationalUBound=ubnd, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldGet', rc)
+
+ if (i0/=lbnd(1).or.i1/=ubnd(1)) then
+ call endrun(subname//' array bnds do not match')
endif
-!
-! Fields loop:
- do n=1,nf
- ftmp(:,:,:,n) = 0._r8
-!
-! Set interior latitudes (ftmp(i,j,k,n) <- f(k,i,j,n))
-! ftmp excludes periodic points.
-!
- do j=lbnd(2),ubnd(2) ! lat
- if (j /= jspole .and. j /= jnpole) then ! interior latitudes (not poles)
- do i=lbnd(1),ubnd(1) ! lon
- ii = i
- do k=lbnd(3),ubnd(3) ! lev
- ftmp(i,j,k,n) = f(k,ii,j,n)
- enddo ! lev
- enddo ! lon
- endif ! poles or interior
- enddo ! lat
- enddo ! n=1,nf
-!
-! Get and set pointer to the field:
- do n=1,nf
- call ESMF_FieldGet(fields(n),localDe=0,farrayPtr=fptr, &
- computationalLBound=lbnd,computationalUBound=ubnd,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(a,i4)") '>>> esmf_set3d_geo: error from ESMF_FieldGet: rc=',rc
- call endrun('edyn_esmf_set3d_geo: ESMF_FieldGet field')
+
+ data(:) = fptr(:)
+
+ end subroutine edyn_esmf_get_1dfield
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_regrid_phys2mag(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ integer :: ndim
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_phys2mag'
+ !
+ if (ndim == 2) then
+ !
+ ! Do sparse matrix multiply for 2d phys2mag.
+ !
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_phys2mag_2d, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid phys2mag 2D', rc)
+ else ! 3d geo2mag
+ !
+ ! Do sparse matrix multiply for 3d geo2mag.
+ !
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_phys2mag, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid phys2mag 3D', rc)
+ end if
+ end subroutine edyn_esmf_regrid_phys2mag
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_regrid_mag2phys(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ integer :: ndim
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_mag2phys'
+ !
+ if (ndim == 2) then
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_mag2phys_2d, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid mag2phys 2D', rc)
+ else
+ ! call ESMF_FieldRegrid(srcfield, dstfield, routehandle_mag2phys, &
+ ! termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ ! call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid mag2phys 3D', rc)
+ call endrun(subname//': routehandle_mag2phys not implemented')
+ end if
+ end subroutine edyn_esmf_regrid_mag2phys
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_regrid_phys2geo(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ integer :: ndim
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_phys2geo'
+ !
+ if (ndim == 2) then
+ ! call ESMF_FieldRegrid( srcfield, dstfield, routehandle_phys2geo_2d, &
+ ! termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ ! call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid phys2geo 2D', rc)
+ call endrun(subname//': routehandle_phys2geo_2d not implemented')
+ else ! 3d phys2geo
+ !
+ ! Do sparse matrix multiply for 3d phys2geo.
+ !
+ call ESMF_FieldRegrid( srcfield, dstfield, routehandle_phys2geo, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid phys2geo 3D', rc)
+ end if
+ end subroutine edyn_esmf_regrid_phys2geo
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_regrid_geo2phys(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ integer :: ndim
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_geo2phys'
+ !
+ if (ndim == 2) then
+ ! call ESMF_FieldRegrid(srcfield, dstfield, routehandle_geo2phys_2d, &
+ ! termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ ! call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2phys 2D', rc)
+ call endrun(subname//': routehandle_geo2phys_2d not implemented')
+ else
+ call ESMF_FieldRegrid( srcfield, dstfield, routehandle_geo2phys, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2phys 3D', rc)
+ end if
+ end subroutine edyn_esmf_regrid_geo2phys
+ !-----------------------------------------------------------------------
+ subroutine edyn_esmf_regrid_geo2mag(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ integer :: ndim
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_geo2mag'
+ !
+ if (ndim == 2) then
+ !
+ ! Do sparse matrix multiply for 2d geo2mag.
+ !
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_geo2mag_2d, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2mag 2D', rc)
+ else ! 3d geo2mag
+ !
+ ! Do sparse matrix multiply for 3d geo2mag.
+ !
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_geo2mag, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2mag 3D', rc)
+ end if
+ end subroutine edyn_esmf_regrid_geo2mag
+ !-----------------------------------------------------------------------
+
+ subroutine edyn_esmf_regrid_mag2geo(srcfield, dstfield, ndim)
+ !
+ ! Args:
+ integer :: ndim
+ type(ESMF_Field), intent(inout) :: srcfield, dstfield
+ !
+ ! Local:
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_esmf_regrid_mag2geo'
+ !
+ if (ndim == 2) then
+ ! call ESMF_FieldRegrid(srcfield, dstfield, routehandle_mag2geo_2d, &
+ ! termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ ! call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2mag 2D', rc)
+ call endrun(subname//': routehandle_mag2geo_2d not implemented')
+ else ! 3d geo2mag
+ !
+ ! Do sparse matrix multiply for 3d geo2mag.
+ !
+ call ESMF_FieldRegrid(srcfield, dstfield, routehandle_mag2geo, &
+ termorderflag=ESMF_TERMORDER_SRCSEQ, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_FieldRegrid geo2mag 3D', rc)
+ end if
+ end subroutine edyn_esmf_regrid_mag2geo
+
+
+ !-----------------------------------------------------------------------
+
+ subroutine edyn_esmf_update_phys_mesh(new_phys_mesh)
+
+ ! Dummy argument
+ type(ESMF_Mesh), intent(in) :: new_phys_mesh
+
+ integer :: petcnt, i,j
+
+ ! Ignore return code here as all we need is an attempt to reclaim memory
+ if (ESMF_MeshIsCreated(phys_mesh)) then
+ call ESMF_MeshDestroy(phys_mesh)
+ end if
+
+ phys_mesh = new_phys_mesh
+
+ if (.not. allocated(petmap)) then
+ allocate(petmap(ntaski,ntaskj,1))
endif
- fptr(:,:,:) = ftmp(:,:,:,n)
- enddo ! n=1,nf
-
- deallocate(ftmp)
-
- end subroutine edyn_esmf_set3d_geo
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_set2d_geo(field,grid,fname,f,ilon0,ilon1,ilat0,ilat1)
-!
-! Set values of a 2d ESMF field on geographic source grid, prior to
-! geographic to magnetic grid transformation. (Essentially the same
-! as esmf_set3d_geo, except for 2d fields instead of 3d)
-! Periodic points are excluded, geographic poles are at j==jspole and jnpole
-!
-! Args:
- type(ESMF_Field) ,intent(in) :: field
- type(ESMF_Grid) ,intent(in) :: grid
- character(len=*) ,intent(in) :: fname ! field name
- integer ,intent(in) :: ilon0,ilon1,ilat0,ilat1
- real(r8) ,intent(in) :: f(ilon0:ilon1,ilat0:ilat1)
-!
-! Local:
- integer :: i,ii,j,rc
- real(ESMF_KIND_R8),pointer :: fptr(:,:) ! i,j
- integer :: lbnd(2),ubnd(2)
-!
-! Get pointer to the field:
- call ESMF_FieldGet(field,localDe=0,farrayPtr=fptr,&
- computationalLBound=lbnd,computationalUBound=ubnd,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(a,i4)") '>>> esmf_set2d_geo: error from ESMF_FieldGet: rc=',rc
- call endrun('edyn_esmf_set2d_geo: ESMF_FieldGet')
- endif
-!
- fptr(:,:) = 0._r8 ! init
-!
-! Set interior latitudes (excluding poles):
- do j=lbnd(2),ubnd(2)
- if (j /= jspole .and. j /= jnpole) then
- do i=lbnd(1),ubnd(1)
- ii = i
- fptr(i,j) = f(ii,j)
- enddo
- endif ! interior latitudes only
- enddo
-
- if (debug) &
- write(iulog,"('esmf_set2d_geo field ',a,': lon bnds=',2i4, &
- ' lat bnds=',2i4,' 2d mnmx=',2e12.4)") &
- fname,lbnd(1),ubnd(1),lbnd(2),ubnd(2), &
- minval(fptr(:,:)),maxval(fptr(:,:))
-
- end subroutine edyn_esmf_set2d_geo
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_set3d_mag(fields,fnames,f,nf,ilev0,ilev1,ilon0,ilon1,ilat0,ilat1)
-!
-! Set values of a 3d ESMF field on magnetic grid, prior to magnetic to
-! geographic grid transformation.
-!
-! Args:
- integer,intent(in) :: nf
- type(ESMF_Field) ,intent(in) :: fields(nf) ! esmf fields on mag grid
- character(len=*) ,intent(in) :: fnames(nf) ! field names
-!
-! f is input data on model subdomains:
-!
- integer,intent(in) :: ilev0,ilev1,ilon0,ilon1,ilat0,ilat1
- real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,ilev0:ilev1,nf)
-!
-! Local:
- integer :: i,j,k,rc,n
- integer :: lbnd(3),ubnd(3) ! lower,upper bounds of 3d field
-!
-! fptr is esmf pointer (i,j,k) to 3d field, set by this subroutine
- real(ESMF_KIND_R8),pointer :: fptr(:,:,:)
-!
-! Fields loop:
- do n=1,nf
- call ESMF_FieldGet(fields(n),localDe=0,farrayPtr=fptr,&
- computationalLBound=lbnd,computationalUBound=ubnd,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(a,i4)") '>>> esmf_set3d_mag: error from ESMF_FieldGet: rc=',rc
- call endrun('edyn_esmf_set3d_mag: ESMF_FieldGet')
- endif
-!
- fptr(:,:,:) = 0._r8
-!
-! Set ESMF pointer:
-!
- do j=lbnd(2),ubnd(2) ! lat
- do i=lbnd(1),ubnd(1) ! lon
- do k=lbnd(3),ubnd(3) ! lev
- fptr(i,j,k) = f(i,j,k,n)
- enddo ! mlev
- enddo ! mlon
- enddo ! mlat
- enddo ! n=1,nf
- end subroutine edyn_esmf_set3d_mag
-!-----------------------------------------------------------------------
-!
- subroutine edyn_esmf_set2d_mag(fields,fnames,f,nf,ilon0,ilon1,ilat0,ilat1)
-!
-! Set values of a 2d ESMF field on magnetic grid, prior to magnetic to
-! geographic grid transformation.
-!
-! Args:
- integer,intent(in) :: nf
- type(ESMF_Field) ,intent(in) :: fields(nf) ! esmf fields on mag grid
- character(len=*) ,intent(in) :: fnames(nf) ! field names
-!
-! f is input data on model subdomains:
-!
- integer,intent(in) :: ilon0,ilon1,ilat0,ilat1
- real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nf)
-!
-! Local:
- integer :: i,j,rc,n
- integer :: lbnd(2),ubnd(2) ! lower,upper bounds of 2d field
-!
-! fptr is esmf pointer (i,j,k) to 2d field, set by this subroutine
- real(ESMF_KIND_R8),pointer :: fptr(:,:)
-!
-! Fields loop:
- do n=1,nf
- call ESMF_FieldGet(fields(n),localDe=0,farrayPtr=fptr,&
- computationalLBound=lbnd,computationalUBound=ubnd,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(a,i4)") '>>> esmf_set2d_mag: error from ESMF_FieldGet: rc=',rc
- call endrun('edyn_esmf_set2d_mag: ESMF_FieldGet')
- endif
-!
- fptr(:,:) = 0._r8
-!
-! Set ESMF pointer:
-!
- do j=lbnd(2),ubnd(2) ! lat
- do i=lbnd(1),ubnd(1) ! lon
- fptr(i,j) = f(i,j,n)
- enddo ! mlon
- enddo ! mlat
- enddo ! n=1,nf
-!
- end subroutine edyn_esmf_set2d_mag
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_get_3dfield(field, fptr, name)
-!
-! Get pointer to 3d esmf field (i,j,k):
-!
-! Args:
- type(ESMF_field),intent(in) :: field
- real(r8),pointer,dimension(:,:,:),intent(out) :: fptr
- character(len=*),intent(in) :: name
-!
-! Local:
- integer :: rc,lbnd(3),ubnd(3)
- character(len=80) :: errmsg
-
- call ESMF_FieldGet(field,localDe=0,farrayPtr=fptr, &
- computationalLBound=lbnd,computationalUBound=ubnd,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(errmsg,"('esmf_get_field 3d field ',a)") trim(name)
- call endrun('edyn_esmf_get_3dfield: ESMF_FieldGet')
- endif
- end subroutine edyn_esmf_get_3dfield
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_get_2dfield(field, fptr, name)
-!
-! Get pointer to 2d esmf field (i,j):
-!
-! Args:
- type(ESMF_field),intent(in) :: field
- real(r8),pointer,dimension(:,:),intent(out) :: fptr
- character(len=*),intent(in) :: name
-!
-! Local:
- integer :: rc
- character(len=80) :: errmsg
-
- call ESMF_FieldGet(field,localDe=0,farrayPtr=fptr,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(errmsg,"('edyn_esmf_get_2dfield ',a)") trim(name)
- call endrun('edyn_esmf_get_2dfield: ESMF_FieldGet')
- endif
- end subroutine edyn_esmf_get_2dfield
-!-----------------------------------------------------------------------
- subroutine edyn_esmf_regrid(srcfield,dstfield,direction,ndim)
-!
-! Args:
- integer :: ndim
- type(ESMF_Field),intent(inout) :: srcfield,dstfield
- character(len=*),intent(in) :: direction
-!
-! Local:
- integer :: rc
- type(ESMF_RouteHandle) :: routehandle
-!
-! Direction is either geo2mag or mag2geo.
-! Use corresponding route handle (module data)
-!
- select case(trim(direction))
- case ('geo2mag')
- routehandle = routehandle_geo2mag
- if (ndim==2) then
-!
-! Do sparse matrix multiply for 2d geo2mag.
-!
- routehandle = routehandle_geo2mag_2d
- call ESMF_FieldSMM(srcfield,dstfield,routehandle,termorderflag=ESMF_TERMORDER_SRCSEQ,rc=rc)
-
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(/,4a,i4)") '>>> edyn_esmf_regrid: error return from ',&
- 'ESMF_FieldSMM for 2d ',trim(direction),': rc=',rc
- call endrun('edyn_esmf_regrid: ESMF_FieldSMM 2d')
- endif
- else ! 3d geo2mag
-!
-! Do sparse matrix multiply for 3d geo2mag.
-!
- routehandle = routehandle_geo2mag
- call ESMF_FieldSMM(srcfield,dstfield,routehandle,termorderflag=ESMF_TERMORDER_SRCSEQ,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(/,4a,i4)") '>>> edyn_esmf_regrid: error return from ',&
- 'ESMF_FieldSMM for 3d ',trim(direction),': rc=',rc
- call endrun('edyn_esmf_regrid: ESMF_FieldSMM 3d')
- endif
- endif
-!
-! Do sparse matrix multiply for 3d mag2geo.
-! btf 6/18/14: mag2geo is not working due to error return rc=51 from the
-! below call. Calls to mag2geo_3d at end of sub pefield (edynamo.F90)
-! are commented out (mag2geo_3d calls this routine with direction='mag2geo').
-!
- case ('mag2geo')
- if (ndim==2) then
- routehandle = routehandle_mag2geo_2d
- else
- routehandle = routehandle_mag2geo
- endif
- call ESMF_FieldSMM(srcfield,dstfield,routehandle,termorderflag=ESMF_TERMORDER_SRCSEQ,checkflag=.true.,rc=rc)
- if (rc /= ESMF_SUCCESS) then
- write(iulog,"(/,4a,i4)") '>>> edyn_esmf_regrid: error return from ',&
- 'ESMF_FieldSMM for 3d ',trim(direction),': rc=',rc
- call endrun('edyn_esmf_regrid: ESMF_FieldSMM magtogeo')
- endif
- case default
- write(iulog,"('>>> edyn_esmf_regrid: bad direction=',a)") trim(direction)
- call endrun
- end select
- end subroutine edyn_esmf_regrid
-!-----------------------------------------------------------------------
-
- subroutine edyn_esmf_update_flag( flag )
- logical, intent(in) :: flag
- edyn_esmf_update_step=flag
- end subroutine edyn_esmf_update_flag
-
-#endif
+ petcnt = 0
+ do j = 1,ntaskj
+ do i = 1,ntaski
+ petmap(i,j,1) = petcnt
+ petcnt = petcnt+1
+ end do
+ end do
+
+ end subroutine edyn_esmf_update_phys_mesh
+
end module edyn_esmf
diff --git a/src/ionosphere/waccmx/edyn_geogrid.F90 b/src/ionosphere/waccmx/edyn_geogrid.F90
index cacff0d7e8..773494df61 100644
--- a/src/ionosphere/waccmx/edyn_geogrid.F90
+++ b/src/ionosphere/waccmx/edyn_geogrid.F90
@@ -1,73 +1,236 @@
module edyn_geogrid
!
-! Global geographic grid.
+! Global geographic grid.
! See sub set_geogrid (edyn_init.F90)
!
- use shr_kind_mod, only : r8 => shr_kind_r8 ! 8-byte reals
+ use shr_kind_mod, only: r8 => shr_kind_r8 ! 8-byte reals
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+
implicit none
+ private
save
- integer :: & ! dimensions
+ integer, public, protected :: & ! dimensions
nlat, & ! number of latitudes
nlon, & ! number of longitudes
nlev, & ! number of midpoint levels
- nilev, & ! number of interface latitudes
- ntime ! number of times on input file
+ nilev, & ! number of interface levels
+ npes ! number of PEs in geogrid
- real(r8),allocatable,dimension(:) :: & ! coordinate vars
+ real(r8), public, protected, allocatable , dimension(:) :: & ! coordinate vars
glat, & ! latitude coordinates (degrees)
glon, & ! longitude coordinates (degrees)
ylatg, & ! latitudes (radians)
ylong, & ! longitudes (radians)
zlev, & ! midpoint vertical coordinates
- zilev, & ! interface vertical coordinates
- time ! times (histories) on input file
+ zilev ! interface vertical coordinates
- real(r8),allocatable,dimension(:) :: &
- cs, & ! cos(phi) (0:nlat+1)
- zp, & ! log pressure (as in tiegcm lev(nlev))
- expz ! exp(-zp)
+ real(r8), public, allocatable, protected :: cs(:) ! cos(phi) (0:nlat+1)
- integer :: & ! model independent (set by sub get_geogrid)
+ integer, public, protected :: & ! model independent (set by sub get_geogrid)
nlonp1, & ! nlon+1
nlonp2, & ! nlon+2
nlatp1 ! nlat+1
- real(r8) :: dlatg,dlong
- real(r8) :: dphi,dlamda
+ real(r8), public, protected :: dphi
+ real(r8), public, protected :: dlamda
!
! Using p0 in microbars, as in TIEGCM.
- real(r8),parameter :: p0 = 5.0e-4_r8 ! standard pressure (microbars)
+ real(r8), parameter, public :: p0 = 5.0e-4_r8 ! standard pressure (microbars)
- integer :: & ! model dependent (set by subs read_tgcm, read_waccm)
+ integer, public, protected :: & ! model dependent (set by subs read_tgcm, read_waccm)
jspole, & ! latitude index to geographic south pole
jnpole ! latitude index to geographic north pole
-!
-! lev_sequence is a string indicating ordering of the vertical
-! coordinates lev and ilev, and of the field arrays along the
-! vertical dimension. lev_sequence can have 1 of 2 values:
-!
-! 'bottom2top' means lev(1) is the bottom boundary, lev(nlev) is the top boundary
-! 'top2bottom' means lev(1) is the top boundary, lev(nlev) is the bottom boundary
-!
-! For example, TIMEGCM history files are bottom2top, whereas
-! WACCM files are top2bottom. The edynamo code assumes bottom2top,
-! so WACCM input fields are reversed to be bottom2top for the edynamo
-! calculations, then reversed back to the native WACCM sequence
-! (top2bottom) before writing to the edynamo output file.
-!
- character(len=10) :: lev_sequence
-!
-! lon_sequence is a string indicating ordering of the longitude
-! coordinate lon, and of the field arrays along this dimension.
-! lon_sequece can have 1 of 2 values:
-!
-! '-180to180' means lon(1) is -180 deg west longitude, lon(nlon) is +180 east
-! 'zeroto360' means lon(1) is 0 deg west longitude, lon(nlon) is 360 deg east
-!
-! Note that TIMEGCM convention is '-180to180' and WACCM convention is 'zeroto360'
-! (this is treating similarly to lev_sequence above)
-!
- character(len=9) :: lon_sequence
+
+ ! set_geogrid sets up a distributed finite-volume lat/lon grid
+ public :: set_geogrid
+
+ logical :: debug = .false. ! set true for prints to stdout at each call
+
+contains
+
+ !-----------------------------------------------------------------------
+ subroutine set_geogrid(nlon_g, nlat_g, nlev_in, npes_in, iam, pres_mid_in, pres_edge_in, min_lat_pe_in)
+ use shr_const_mod, only: pi => shr_const_pi
+ use edyn_params, only: kbotdyn, pbotdyn
+ use edyn_mpi, only: mp_distribute_geo
+ use spmd_utils, only: masterproc
+ use edyn_maggrid, only: nmlat
+
+ ! Dummy Args
+ integer, intent(in) :: nlon_g ! Global num longitudes
+ integer, intent(in) :: nlat_g ! Global num latitudes
+ integer, intent(in) :: nlev_in ! Num levels
+ integer, intent(in) :: npes_in
+ integer, intent(in) :: iam
+ real(r8), intent(in) :: pres_mid_in(:)
+ real(r8), intent(in) :: pres_edge_in(:)
+ integer, optional, intent(in) :: min_lat_pe_in ! Min # lats / PE
+ !
+ ! Local:
+ integer :: latind, lonind, js, k
+ integer :: lon_beg, lon_end, lat_beg, lat_end
+ integer :: lons_per_task, lats_per_task
+ integer :: lons_overflow, lats_overflow
+ integer :: ntasks_lat, ntasks_lon
+ integer :: task_cnt, i,j
+ integer :: minlats_per_pe
+ integer :: ierr
+ real(r8) :: phi
+ real(r8) :: delta ! Coordinate spacing
+ real(r8), parameter :: eps = 1.e-6_r8
+
+ real(r8) :: pmid(nlev_in)
+
+ nlon = nlon_g
+ nlat = nlat_g
+ nlev = nlev_in
+ npes = npes_in
+
+ nilev = nlev+1
+
+ nlonp1 = nlon + 1
+ nlonp2 = nlon + 2
+ nlatp1 = nlat + 1
+
+ jspole = 1
+ jnpole = nlat
+
+ if (present(min_lat_pe_in)) then
+ minlats_per_pe = min_lat_pe_in
+ else
+ minlats_per_pe = 2
+ end if
+
+ dphi = pi / real(nlat,r8)
+ dlamda = 2._r8*pi / real(nlon,r8)
+
+ !
+ ! Allocate coordinate variables:
+ !
+ allocate(glon(nlon))
+ allocate(glat(nlat))
+ !
+ ! Create a finite-volume coordinate grid (in degrees)
+ !
+ delta = 360.0_r8 / real(nlon, r8)
+ do lonind = 1, nlon
+ glon(lonind) = -180.0_r8 + ((lonind - 1) * delta)
+ end do
+ delta = 180.0_r8 / real((nlat - 1), r8)
+ ! Set the poles exactly (they might be checked later)
+ glat(1) = -90.0_r8
+ glat(nlat) = 90.0_r8
+ do latind = 2, nlat - 1
+ glat(latind) = -90.0_r8 + ((latind - 1) * delta)
+ end do
+
+ if (masterproc.and.debug) then
+ write(iulog,*) 'set_geogrid glon : ',glon(:)
+ write(iulog,*) 'set_geogrid glat : ',glat(:)
+ end if
+
+ allocate(zlev(nlev))
+ allocate(zilev(nilev))
+ !
+ ! Hybrid-sigma levels from ref_pres module:
+ !
+ zlev(:nlev) = pres_mid_in(:) ! midpoints vertical coord (top down)
+ zilev(:nilev) = pres_edge_in(:nilev) ! interfaces vertical coord
+
+ ! do bottom up search for kbotdyn
+ pmid(:nlev) = zlev(nlev:1:-1)
+ kloop: do k = 1, nlev
+ if ( pmid(k) <= pbotdyn) then
+ kbotdyn = k
+ exit kloop
+ end if
+ end do kloop
+ if ( kbotdyn < 1 ) then
+ call endrun('set_geogrid: kbotdyn is not set')
+ endif
+ if (debug) then
+ write(iulog,"('set_geogrid: kbotdyn=',i4,' pmid(kbotdyn)=',es12.4)") kbotdyn,pmid(kbotdyn)
+ endif
+
+ !
+ ! Setup a decomposition for the geogrid
+ !
+ ! First, try using a 1-D latitude decomposition
+
+ do ntasks_lon = 1,nlon_g
+ ntasks_lat = npes/ntasks_lon
+ if ( minlats_per_pe*ntasks_latiam) exit jloop
+ end do
+ enddo jloop
+ endif
+
+ call mp_distribute_geo(lon_beg, lon_end, lat_beg, lat_end, 1, nlev, ntasks_lon, ntasks_lat)
+
+ !
+ ! Set horizontal geographic grid in radians (for apex code):
+ !
+ allocate(ylatg(nlat)) ! waccm grid includes poles
+ allocate(ylong(nlonp1)) ! single periodic point
+ ylatg(1) = -pi/2._r8+eps ! south pole
+ ylatg(nlat) = pi/2._r8-eps ! north pole
+ do latind = 2, nlat-1
+ ylatg(latind) = -0.5_r8*(pi-dphi)+real(latind-1,r8)*dphi
+ end do
+ do lonind = 1, nlonp1
+ ylong(lonind) = -pi+real(lonind-1,r8)*dlamda
+ end do
+ !
+ ! Calculate cosine of latitude
+ !
+ allocate(cs(0:nlat+1))
+ js = -(nlat/2)
+ do latind = 1, nlat
+ phi = (latind + js - .5_r8) * dphi
+ cs(latind) = cos(phi)
+ end do
+ cs(0) = -cs(1)
+ cs(nlat+1) = -cs(nlat)
+
+ end subroutine set_geogrid
end module edyn_geogrid
diff --git a/src/ionosphere/waccmx/edyn_grid_comp.F90 b/src/ionosphere/waccmx/edyn_grid_comp.F90
new file mode 100644
index 0000000000..3796879fb1
--- /dev/null
+++ b/src/ionosphere/waccmx/edyn_grid_comp.F90
@@ -0,0 +1,481 @@
+!-------------------------------------------------------------------------------
+! This localizes ESMF regridding operations to allow for multiple instances of
+! CAM.
+!-------------------------------------------------------------------------------
+module edyn_grid_comp
+ use shr_kind_mod, only: r8 => shr_kind_r8, cs=>shr_kind_cs, cl=>shr_kind_cl
+ use ESMF, only: ESMF_KIND_I4, ESMF_Mesh, ESMF_DistGrid
+ use ESMF, only: ESMF_State, ESMF_Clock, ESMF_GridComp
+ use ppgrid, only: pcols
+ use cam_logfile, only: iulog
+ use shr_sys_mod, only: shr_sys_flush
+ use cam_abortutils, only: endrun
+
+ implicit none
+
+ private
+
+ public :: edyn_grid_comp_init
+ public :: edyn_grid_comp_run1
+ public :: edyn_grid_comp_run2
+ public :: edyn_grid_comp_final
+
+ ! Private data and interfaces
+ ! phys_mesh: Local copy of physics grid
+ type(ESMF_Mesh) :: phys_mesh
+ ! edyn_comp: ESMF gridded component for the ionosphere models
+ type(ESMF_GridComp) :: phys_comp
+ ! Local copy of ionosphere epotential model
+ character(len=16) :: ionos_epotential_model = 'none'
+ ! Total number of columns on this task
+ integer :: total_cols = 0
+ integer :: col_start = 1
+ integer :: col_end = -1
+ integer :: nlev = 0
+ ! dist_grid_2d: DistGrid for 2D fields
+ type(ESMF_DistGrid) :: dist_grid_2d
+ ! Which run?
+ integer :: do_run
+ ! Pointers for run1 output
+ real(r8), pointer :: prescr_efx_phys(:) => NULL()
+ real(r8), pointer :: prescr_kev_phys(:) => NULL()
+ logical :: ionos_epotential_amie
+ logical :: ionos_epotential_ltr
+ ! Pointers for run2
+ real(r8), pointer :: omega_blck(:,:) => NULL()
+ real(r8), pointer :: pmid_blck(:,:) => NULL()
+ real(r8), pointer :: zi_blck(:,:) => NULL()
+ real(r8), pointer :: hi_blck(:,:) => NULL()
+ real(r8), pointer :: u_blck(:,:) => NULL()
+ real(r8), pointer :: v_blck(:,:) => NULL()
+ real(r8), pointer :: tn_blck(:,:) => NULL()
+ real(r8), pointer :: sigma_ped_blck(:,:) => NULL()
+ real(r8), pointer :: sigma_hall_blck(:,:) => NULL()
+ real(r8), pointer :: te_blck(:,:) => NULL()
+ real(r8), pointer :: ti_blck(:,:) => NULL()
+ real(r8), pointer :: mbar_blck(:,:) => NULL()
+ real(r8), pointer :: n2mmr_blck(:,:) => NULL()
+ real(r8), pointer :: o2mmr_blck(:,:) => NULL()
+ real(r8), pointer :: o1mmr_blck(:,:) => NULL()
+ real(r8), pointer :: o2pmmr_blck(:,:) => NULL()
+ real(r8), pointer :: nopmmr_blck(:,:) => NULL()
+ real(r8), pointer :: n2pmmr_blck(:,:) => NULL()
+ real(r8), pointer :: opmmr_blck(:,:) => NULL()
+ real(r8), pointer :: opmmrtm1_blck(:,:) => NULL()
+ real(r8), pointer :: ui_blck(:,:) => NULL()
+ real(r8), pointer :: vi_blck(:,:) => NULL()
+ real(r8), pointer :: wi_blck(:,:) => NULL()
+ real(r8) :: rmassO2p
+ real(r8) :: rmassNOp
+ real(r8) :: rmassN2p
+ real(r8) :: rmassOp
+
+CONTAINS
+
+ subroutine edyn_gcomp_init(comp, importState, exportState, clock, rc)
+ use ESMF, only: ESMF_DistGridCreate, ESMF_MeshCreate
+ use ESMF, only: ESMF_FILEFORMAT_ESMFMESH, ESMF_MeshGet
+ use cam_instance, only: inst_name
+ use phys_control, only: phys_getopts
+ use phys_grid, only: get_ncols_p, get_gcol_p, get_rlon_all_p, get_rlat_all_p
+ use ppgrid, only: begchunk, endchunk
+ use edyn_esmf, only: edyn_esmf_chkerr, edyn_esmf_update_phys_mesh
+ use shr_const_mod,only: shr_const_pi
+
+ ! Dummy arguments
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: importState
+ type(ESMF_State) :: exportState
+ type(ESMF_Clock) :: clock
+ integer, intent(out) :: rc
+
+ ! Local variables
+ integer :: ncols
+ integer :: chnk, col, dindex
+ integer, allocatable :: decomp(:)
+ character(len=cl) :: grid_file
+ character(len=*), parameter :: subname = 'edyn_gcomp_init'
+ real(r8) , parameter :: radtodeg = 180.0_r8/shr_const_pi
+ integer :: spatialDim
+ integer :: numOwnedElements
+ real(r8), pointer :: ownedElemCoords(:)
+ real(r8), pointer :: lat(:), latMesh(:)
+ real(r8), pointer :: lon(:), lonMesh(:)
+ real(r8) :: lats(pcols) ! array of chunk latitudes
+ real(r8) :: lons(pcols) ! array of chunk longitude
+ integer :: i, c, n
+ character(len=cs) :: tempc1,tempc2
+ character(len=300) :: errstr
+
+ real(r8), parameter :: abstol = 1.e-6_r8
+
+ ! Find the physics grid file
+ call phys_getopts(physics_grid_out=grid_file)
+ ! Compute the local decomp
+ total_cols = 0
+ do chnk = begchunk, endchunk
+ total_cols = total_cols + get_ncols_p(chnk)
+ end do
+ allocate(decomp(total_cols))
+ dindex = 0
+ do chnk = begchunk, endchunk
+ ncols = get_ncols_p(chnk)
+ do col = 1, ncols
+ dindex = dindex + 1
+ decomp(dindex) = get_gcol_p(chnk, col)
+ end do
+ end do
+ ! Create a DistGrid based on the physics decomp
+ dist_grid_2d = ESMF_DistGridCreate(arbSeqIndexList=decomp, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_DistGridCreate phys decomp', rc)
+ ! Create an ESMF_mesh for the physics decomposition
+ phys_mesh = ESMF_MeshCreate(trim(grid_file), ESMF_FILEFORMAT_ESMFMESH, &
+ elementDistgrid=dist_grid_2d, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_MeshCreateFromFile', rc)
+ call edyn_esmf_update_phys_mesh(phys_mesh)
+ do_run = 1
+
+
+ ! Check that the mesh coordinates are consistent with the model physics column coordinates
+
+ ! obtain mesh lats and lons
+ call ESMF_MeshGet(phys_mesh, spatialDim=spatialDim, numOwnedElements=numOwnedElements, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_MeshGet', rc)
+
+ if (numOwnedElements /= total_cols) then
+ write(tempc1,'(i10)') numOwnedElements
+ write(tempc2,'(i10)') total_cols
+ call endrun(trim(subname)//": ERROR numOwnedElements "// &
+ trim(tempc1) //" not equal to local size "// trim(tempc2))
+ end if
+
+ allocate(ownedElemCoords(spatialDim*numOwnedElements))
+ allocate(lonMesh(total_cols), latMesh(total_cols))
+ call ESMF_MeshGet(phys_mesh, ownedElemCoords=ownedElemCoords)
+
+ do n = 1,total_cols
+ lonMesh(n) = ownedElemCoords(2*n-1)
+ latMesh(n) = ownedElemCoords(2*n)
+ end do
+
+ ! obtain internally generated cam lats and lons
+ allocate(lon(total_cols)); lon(:) = 0._r8
+ allocate(lat(total_cols)); lat(:) = 0._r8
+ n=0
+ do c = begchunk, endchunk
+ ncols = get_ncols_p(c)
+ ! latitudes and longitudes returned in radians
+ call get_rlat_all_p(c, ncols, lats)
+ call get_rlon_all_p(c, ncols, lons)
+ do i=1,ncols
+ n = n+1
+ lat(n) = lats(i)*radtodeg
+ lon(n) = lons(i)*radtodeg
+ end do
+ end do
+
+ errstr = ''
+ ! error check differences between internally generated lons and those read in
+ do n = 1,total_cols
+ if (abs(lonMesh(n) - lon(n)) > abstol) then
+ if ( (abs(lonMesh(n)-lon(n)) > 360._r8+abstol) .or. (abs(lonMesh(n)-lon(n)) < 360._r8-abstol) ) then
+ write(errstr,100) n,lon(n),lonMesh(n), abs(lonMesh(n)-lon(n))
+ write(iulog,*) trim(errstr)
+ endif
+ end if
+ if (abs(latMesh(n) - lat(n)) > abstol) then
+ ! poles in the 4x5 SCRIP file seem to be off by 1 degree
+ if (.not.( (abs(lat(n))>88.0_r8) .and. (abs(latMesh(n))>88.0_r8) )) then
+ write(errstr,101) n,lat(n),latMesh(n), abs(latMesh(n)-lat(n))
+ write(iulog,*) trim(errstr)
+ endif
+ end if
+ end do
+
+ if ( len_trim(errstr) > 0 ) then
+ call endrun(subname//': physics mesh coords do not match model coords')
+ end if
+
+ ! deallocate memory
+ deallocate(ownedElemCoords)
+ deallocate(lon, lonMesh)
+ deallocate(lat, latMesh)
+ deallocate(decomp)
+
+100 format('edyn_gcomp_init: coord mismatch... n, lon(n), lonmesh(n), diff_lon = ',i6,2(f21.13,3x),d21.5)
+101 format('edyn_gcomp_init: coord mismatch... n, lat(n), latmesh(n), diff_lat = ',i6,2(f21.13,3x),d21.5)
+
+ end subroutine edyn_gcomp_init
+
+ !-----------------------------------------------------------------------
+ subroutine edyn_gcomp_run(comp, importState, exportState, clock, rc)
+ use ESMF, only: ESMF_SUCCESS, ESMF_Array, ESMF_ArrayGet
+ use ESMF, only: ESMF_StateGet
+ use epotential_params, only: epot_crit_colats
+ use edyn_esmf, only: edyn_esmf_chkerr
+ use dpie_coupling, only: d_pie_epotent
+ use dpie_coupling, only: d_pie_coupling
+
+ ! Dummy arguments
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: importState
+ type(ESMF_State) :: exportState
+ type(ESMF_Clock) :: clock
+ integer, intent(out) :: rc
+ ! Local variables
+ type(ESMF_Array) :: run_type
+ integer :: cols, cole, blksize
+ character(len=cs) :: errmsg
+ character(len=*), parameter :: subname = 'edyn_gcomp_run'
+
+ if (do_run == 1) then
+ if ( ionos_epotential_amie .or. ionos_epotential_ltr) then
+ call d_pie_epotent(ionos_epotential_model, epot_crit_colats, &
+ cols=col_start, cole=col_end, &
+ efx_phys=prescr_efx_phys, kev_phys=prescr_kev_phys, &
+ amie_in=ionos_epotential_amie, ltr_in=ionos_epotential_ltr )
+ else
+ call d_pie_epotent(ionos_epotential_model, epot_crit_colats)
+ end if
+ else if (do_run == 2) then
+ call d_pie_coupling(omega_blck, pmid_blck, zi_blck, hi_blck, &
+ u_blck, v_blck, tn_blck, sigma_ped_blck, sigma_hall_blck, &
+ te_blck, ti_blck, mbar_blck, n2mmr_blck, o2mmr_blck, &
+ o1mmr_blck, o2pmmr_blck, nopmmr_blck, n2pmmr_blck, &
+ opmmr_blck, opmmrtm1_blck, ui_blck, vi_blck, wi_blck, &
+ rmassO2p, rmassNOp, rmassN2p, rmassOp, col_start, col_end, nlev)
+ else
+ write(errmsg, '(2a,i0)') subname, ': Unknown run number, ', do_run
+ call endrun(trim(errmsg))
+ end if
+
+ rc = ESMF_SUCCESS
+
+ end subroutine edyn_gcomp_run
+ !-----------------------------------------------------------------------
+ subroutine edyn_gcomp_final(comp, importState, exportState, clock, rc)
+ use ESMF, only: ESMF_MeshDestroy
+ use ESMF, only: ESMF_SUCCESS
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ ! Dummy arguments
+ type(ESMF_GridComp) :: comp
+ type(ESMF_State) :: importState
+ type(ESMF_State) :: exportState
+ type(ESMF_Clock) :: clock
+ integer, intent(out) :: rc
+ ! Local variables
+ character(len=*), parameter :: subname = 'edyn_gcomp_final'
+
+ call ESMF_MeshDestroy(phys_mesh, rc=rc)
+ rc = ESMF_SUCCESS
+
+ end subroutine edyn_gcomp_final
+
+ !-----------------------------------------------------------------------
+ subroutine edyn_gcomp_SetServices(comp, rc)
+ use ESMF, only: ESMF_GridCompSetEntryPoint
+ use ESMF, only: ESMF_METHOD_INITIALIZE, ESMF_METHOD_RUN
+ use ESMF, only: ESMF_METHOD_FINALIZE, ESMF_SUCCESS
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ type(ESMF_GridComp) :: comp
+ integer, intent(out) :: rc
+ character(len=*), parameter :: subname = 'edyn_gcomp_SetServices'
+
+ ! Set the entry points for standard ESMF Component methods
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_INITIALIZE, &
+ userRoutine=edyn_gcomp_Init, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompSetEntryPoint init', rc)
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_RUN, &
+ userRoutine=edyn_gcomp_Run, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompSetEntryPoint run', rc)
+ call ESMF_GridCompSetEntryPoint(comp, ESMF_METHOD_FINALIZE, &
+ userRoutine=edyn_gcomp_Final, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompSetEntryPoint final', rc)
+
+ end subroutine edyn_gcomp_SetServices
+
+ subroutine edyn_grid_comp_init(mpi_comm)
+ use mpi, only: MPI_INTEGER
+ use ESMF, only: ESMF_StateCreate, ESMF_GridCompInitialize
+ use ESMF, only: ESMF_GridCompCreate, ESMF_GridCompSetServices
+ use ESMF, only: ESMF_VM, ESMF_VMGetCurrent, ESMF_VMGet
+ use cam_instance, only: inst_index, inst_name
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ ! Dummy argument
+ integer, intent(in) :: mpi_comm
+ ! Local variables
+ integer, allocatable :: petlist(:)
+ integer :: iam
+ integer :: npes
+ integer :: localPet
+ integer :: petCount
+ integer :: rc
+ type(ESMF_VM) :: vm_init
+ character(len=*), parameter :: subname = 'edyn_grid_comp_init'
+
+ !! Gather PE information for this instance
+ call ESMF_VMGetCurrent(vm_init, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_VMGetCurrent', rc)
+ call ESMF_VMGet(vm_init, localPet=localPet, petCount=petCount)
+ call edyn_esmf_chkerr(subname, 'ESMF_VMGet', rc)
+ call mpi_comm_size(mpi_comm, npes, rc)
+ call mpi_comm_rank(mpi_comm, iam, rc)
+ ! Collect all the PETS for each instance for phys grid
+ allocate(petlist(npes))
+ call mpi_allgather(localPet, 1, MPI_INTEGER, petlist, 1, MPI_INTEGER, mpi_comm, rc)
+ ! Now, we should be able to create a gridded component
+ phys_comp = ESMF_GridCompCreate(name=trim(inst_name), petList=petlist, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompCreate '//trim(inst_name), rc)
+ call ESMF_GridCompSetServices(phys_comp, edyn_gcomp_SetServices, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompSetServices '//trim(inst_name), rc)
+ ! Initialize the required component arguments
+ call ESMF_GridCompInitialize(phys_comp, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompInitialize', rc)
+
+ end subroutine edyn_grid_comp_init
+
+ subroutine edyn_grid_comp_run1(ionos_epotential_model_in, &
+ cols, cole, efx_phys, kev_phys, amie_in, ltr_in)
+
+ use ESMF, only: ESMF_GridCompRun
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ ! Dummy arguments
+ character(len=*), intent(in) :: ionos_epotential_model_in
+ integer, optional, intent(in) :: cols
+ integer, optional, intent(in) :: cole
+ real(r8), optional, target, intent(out) :: efx_phys(:)
+ real(r8), optional, target, intent(out) :: kev_phys(:)
+ logical, optional, intent(in) :: amie_in
+ logical, optional, intent(in) :: ltr_in
+
+ ! Local variables
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_grid_comp_run1'
+ logical :: args_present(6)
+
+ do_run = 1
+ args_present(:) = (/ present(cols), present(cole), present(efx_phys), present(kev_phys), &
+ present(amie_in), present(ltr_in) /)
+
+ if ( any( args_present ) ) then
+ if (.not. all( args_present ) ) then
+ call endrun(subname//': all optional arguments must be present for AMIE/LTR')
+ endif
+
+ ionos_epotential_amie = amie_in
+ ionos_epotential_ltr = ltr_in
+ prescr_efx_phys => efx_phys
+ prescr_kev_phys => kev_phys
+ col_start = cols
+ col_end = cole
+ else
+ ! No else check assume no optional arguments are passed
+ nullify(prescr_efx_phys)
+ nullify(prescr_kev_phys)
+ end if
+ ionos_epotential_model = ionos_epotential_model_in
+ call ESMF_GridCompRun(phys_comp, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompRun', rc)
+
+ end subroutine edyn_grid_comp_run1
+
+ subroutine edyn_grid_comp_run2(omega_blck_in, pmid_blck_in, zi_blck_in, &
+ hi_blck_in, u_blck_in, v_blck_in, tn_blck_in, sigma_ped_blck_in, &
+ sigma_hall_blck_in, te_blck_in, ti_blck_in, mbar_blck_in, &
+ n2mmr_blck_in, o2mmr_blck_in, o1mmr_blck_in, o2pmmr_blck_in, &
+ nopmmr_blck_in, n2pmmr_blck_in, opmmr_blck_in, opmmrtm1_blck_in, &
+ ui_blck_in, vi_blck_in, wi_blck_in, rmassO2p_in, rmassNOp_in, &
+ rmassN2p_in, rmassOp_in, cols, cole, pver)
+ use ESMF, only: ESMF_GridCompRun
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ ! Dummy arguments
+ real(r8), pointer :: omega_blck_in(:,:)
+ real(r8), pointer :: pmid_blck_in(:,:)
+ real(r8), pointer :: zi_blck_in(:,:)
+ real(r8), pointer :: hi_blck_in(:,:)
+ real(r8), pointer :: u_blck_in(:,:)
+ real(r8), pointer :: v_blck_in(:,:)
+ real(r8), pointer :: tn_blck_in(:,:)
+ real(r8), pointer :: sigma_ped_blck_in(:,:)
+ real(r8), pointer :: sigma_hall_blck_in(:,:)
+ real(r8), pointer :: te_blck_in(:,:)
+ real(r8), pointer :: ti_blck_in(:,:)
+ real(r8), pointer :: mbar_blck_in(:,:)
+ real(r8), pointer :: n2mmr_blck_in(:,:)
+ real(r8), pointer :: o2mmr_blck_in(:,:)
+ real(r8), pointer :: o1mmr_blck_in(:,:)
+ real(r8), pointer :: o2pmmr_blck_in(:,:)
+ real(r8), pointer :: nopmmr_blck_in(:,:)
+ real(r8), pointer :: n2pmmr_blck_in(:,:)
+ real(r8), pointer :: opmmr_blck_in(:,:)
+ real(r8), pointer :: opmmrtm1_blck_in(:,:)
+ real(r8), pointer :: ui_blck_in(:,:)
+ real(r8), pointer :: vi_blck_in(:,:)
+ real(r8), pointer :: wi_blck_in(:,:)
+ real(r8) :: rmassO2p_in
+ real(r8) :: rmassNOp_in
+ real(r8) :: rmassN2p_in
+ real(r8) :: rmassOp_in
+ integer, intent(in) :: cols
+ integer, intent(in) :: cole
+ integer, intent(in) :: pver
+ ! Local variables
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_grid_comp_run2'
+
+ do_run = 2
+ omega_blck => omega_blck_in
+ pmid_blck => pmid_blck_in
+ zi_blck => zi_blck_in
+ hi_blck => hi_blck_in
+ u_blck => u_blck_in
+ v_blck => v_blck_in
+ tn_blck => tn_blck_in
+ sigma_ped_blck => sigma_ped_blck_in
+ sigma_hall_blck => sigma_hall_blck_in
+ te_blck => te_blck_in
+ ti_blck => ti_blck_in
+ mbar_blck => mbar_blck_in
+ n2mmr_blck => n2mmr_blck_in
+ o2mmr_blck => o2mmr_blck_in
+ o1mmr_blck => o1mmr_blck_in
+ o2pmmr_blck => o2pmmr_blck_in
+ nopmmr_blck => nopmmr_blck_in
+ n2pmmr_blck => n2pmmr_blck_in
+ opmmr_blck => opmmr_blck_in
+ opmmrtm1_blck => opmmrtm1_blck_in
+ ui_blck => ui_blck_in
+ vi_blck => vi_blck_in
+ wi_blck => wi_blck_in
+ rmassO2p = rmassO2p_in
+ rmassNOp = rmassNOp_in
+ rmassN2p = rmassN2p_in
+ rmassOp = rmassOp_in
+ col_start = cols
+ col_end = cole
+ nlev = pver
+ call ESMF_GridCompRun(phys_comp, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompRun', rc)
+
+ end subroutine edyn_grid_comp_run2
+
+ subroutine edyn_grid_comp_final()
+ use ESMF, only: ESMF_GridCompFinalize
+ use edyn_esmf, only: edyn_esmf_chkerr
+
+ ! Local variables
+ integer :: rc
+ character(len=*), parameter :: subname = 'edyn_grid_comp_final'
+
+ call ESMF_GridCompFinalize(phys_comp, rc=rc)
+ call edyn_esmf_chkerr(subname, 'ESMF_GridCompInitialize', rc)
+
+ end subroutine edyn_grid_comp_final
+
+
+end module edyn_grid_comp
diff --git a/src/ionosphere/waccmx/edyn_init.F90 b/src/ionosphere/waccmx/edyn_init.F90
index e2a1b22b9c..928ff284a7 100644
--- a/src/ionosphere/waccmx/edyn_init.F90
+++ b/src/ionosphere/waccmx/edyn_init.F90
@@ -1,398 +1,266 @@
- module edyn_init
+module edyn_init
!
-! Initialize edynamo
+! Initialize edynamo
!
- use shr_kind_mod ,only: r8 => shr_kind_r8 ! 8-byte reals
- use shr_const_mod, only: pi => shr_const_pi
- use cam_logfile ,only: iulog
- use cam_abortutils ,only: endrun
- use spmd_utils, only: masterproc
- use infnan, only: nan, assignment(=)
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+ use spmd_utils, only: masterproc
- use edyn_geogrid ,only: nlon,nlat,nlev,nilev,glon,glat,zlev,zilev,&
- nlonp1,nlonp2,nlatp1,jspole,jnpole,dlatg,dlong,&
- ylatg,ylong,dphi,dlamda,cs,expz,zp
- use edyn_params ,only: kbotdyn, pbotdyn
+ implicit none
- implicit none
+ private
+ public :: edynamo_init
- private
- public :: edynamo_init, lonshift_global
-
- logical :: debug=.false. ! set true for prints to stdout at each call
-
- contains
-!-----------------------------------------------------------------------
- subroutine edynamo_init(mpicomm, nlon_in,nlat_in,nlev_in, lonndx0,lonndx1,latndx0,latndx1,levndx0,levndx1, ntaski,ntaskj, &
- glon_in, glat_in, pres_in, pres_edge_in )
-!
-! One-time initialization, called from inital.F90 after dyn_init and initcom.
-!
- use edyn_maggrid ,only: set_maggrid
- use edyn_mpi ,only: mp_init,mp_distribute_geo,mp_distribute_mag,&
- mp_exchange_tasks
-#ifdef WACCMX_EDYN_ESMF
- use edynamo ,only: alloc_edyn
- use edyn_esmf ,only: edyn_esmf_init ! initialize ESMF
-#endif
-!
-! Args:
- integer, intent(in) :: mpicomm
- integer, intent(in) :: nlon_in,nlat_in,nlev_in
- integer, intent(in) :: lonndx0,lonndx1,latndx0,latndx1,levndx0,levndx1, ntaski,ntaskj
- real(r8),intent(in) :: glon_in(:), glat_in(:)
- real(r8),intent(in) :: pres_in(:), pres_edge_in(:)
-
- if (masterproc) then
- write(iulog,"('Enter edynamo_init:')")
- endif
-
- call mp_init(mpicomm) ! get ntask,mytid
- call set_geogrid(nlon_in,nlat_in,nlev_in, glon_in, glat_in, pres_in, pres_edge_in) ! set global geographic grid
- call set_maggrid () ! set parameter-based global magnetic grid
-
- call mp_distribute_geo(lonndx0,lonndx1,latndx0,latndx1,levndx0,levndx1, ntaski,ntaskj)
- call mp_distribute_mag
- call register_maggrid
- call mp_exchange_tasks(0) ! single arg is iprint
-
-#ifdef WACCMX_EDYN_ESMF
- call alloc_edyn ! allocate dynamo arrays
- call edyn_esmf_init(mpicomm) ! initialize ESMF
-#endif
-
- call add_fields ! add fields to WACCM history master list
-
- end subroutine edynamo_init
+contains
!-----------------------------------------------------------------------
- subroutine set_geogrid( nlon_in,nlat_in,nlev_in, glon_in, glat_in, pres_in, pres_edge_in )
-
- ! Args
- integer, intent(in) :: nlon_in,nlat_in,nlev_in
- real(r8),intent(in) :: glon_in(:), glat_in(:)
- real(r8),intent(in) :: pres_in(:), pres_edge_in(:)
-!
-! Local:
- integer :: i,j,js, k
- real(r8) :: real8,phi
- real(r8),parameter :: eps = 1.e-6_r8
-
- real(r8) :: pmid(nlev_in)
-
- nlon = nlon_in
- nlat = nlat_in
- nlev = nlev_in
-
- nilev = nlev+1
-
- nlonp1 = nlon+1
- nlonp2 = nlon+2
- nlatp1 = nlat+1
-
- jspole = 1
- jnpole = nlat
-
- dphi = pi/dble(nlat)
- dlamda = 2._r8*pi/dble(nlon)
-
-!
-! Allocate coordinate variables:
-!
- allocate(glon(nlon))
- glon(:nlon) = glon_in(:nlon)
-
- allocate(glat(nlat))
- glat(:nlat) = glat_in(:nlat)
+ subroutine edynamo_init(mpicomm)
+
+ !
+ ! One-time initialization, called from ionosphere_init
+ ! before dyn_init and phys_init
+ !
+ use edyn_maggrid, only: set_maggrid, gmlat, nmlonp1, nmlat, nmlath, nmlev
+ use edyn_mpi, only: mp_exchange_tasks
+ use edyn_mpi, only: mp_distribute_mag
+ use edynamo, only: alloc_edyn
+ use edyn_grid_comp, only: edyn_grid_comp_init
+ use edyn_solve, only: edyn_solve_init
+
+ !
+ ! Args:
+ integer, intent(in) :: mpicomm
+
+ if (masterproc) then
+ write(iulog,"('Enter edynamo_init:')")
+ endif
- allocate(zlev(nlev))
- allocate(zilev(nilev))
-!
-! zp and expz are not set until oplus is called from dpie_coupling.
- allocate(zp(nlev)) ! log pressure (as in TIEGCM)
- allocate(expz(nlev)) ! exp(-zp)
- zp = nan
- expz = nan
-!
-!
- call lonshift_global(glon,nlon,'-180to180',.true.) ! shift to +/-180
-!
-! Hybrid-sigma levels from ref_pres module:
-!
- zlev(:nlev) = pres_in(:) ! midpoints vertical coord (top down)
- zilev(:nilev) = pres_edge_in(:nilev) ! interfaces vertical coord
-
- ! do bottom up search for kbotdyn
- pmid(:nlev) = zlev(nlev:1:-1)
- kloop: do k=1,nlev
- if ( pmid(k) <= pbotdyn) then
- kbotdyn = k
- exit kloop
- end if
- enddo kloop
- if ( kbotdyn < 1 ) then
- call endrun('set_geogrid: kbotdyn is not set')
- endif
- if (debug) then
- write(iulog,"('set_geogrid: kbotdyn=',i4,' pmid(kbotdyn)=',es12.4)") kbotdyn,pmid(kbotdyn)
- endif
+ call set_maggrid () ! set parameter-based global magnetic grid
+
+ call edyn_solve_init
+
+ call mp_distribute_mag(nmlonp1, nmlat, nmlath, nmlev)
+
+ call register_grids()
+ call mp_exchange_tasks(mpicomm, 0, gmlat) ! single arg is iprint
+
+ call alloc_edyn() ! allocate dynamo arrays
+ call edyn_grid_comp_init(mpicomm)
+
+ call add_fields() ! add fields to WACCM history master list
+
+ end subroutine edynamo_init
+
+ !-----------------------------------------------------------------------
+ subroutine add_fields
+ use cam_history, only: addfld, horiz_only, add_default
+ use phys_control, only: phys_getopts !Method used to get flag for waccmx ionosphere output variables
+
+ logical :: history_waccmx
+
+ ! Geomagnetic fields are in waccm format, in CGS units):
+ call addfld ('PED_MAG' ,(/ 'lev' /), 'I', 'S/m ','Pedersen Conductivity' ,gridname='gmag_grid')
+ call addfld ('HAL_MAG' ,(/ 'lev' /), 'I', 'S/m ','Hall Conductivity' ,gridname='gmag_grid')
+ call addfld ('PHIM2D' , horiz_only, 'I', 'VOLTS','PHIM2D: Electric Potential' ,gridname='gmag_grid')
+ call addfld ('ED1' , horiz_only, 'I', 'V/m ','ED1: Eastward Electric Field' ,gridname='gmag_grid')
+ call addfld ('ED2' , horiz_only, 'I', 'V/m ','ED2: Equatorward Electric Field' ,gridname='gmag_grid')
+ call addfld ('PHIM3D' ,(/ 'lev' /), 'I', 'VOLTS','PHIM3D: 3d Electric Potential' ,gridname='gmag_grid')
+
+ call addfld ('EPHI3D' ,(/ 'lev' /), 'I', ' ','EPHI3D' ,gridname='gmag_grid')
+ call addfld ('ELAM3D' ,(/ 'lev' /), 'I', ' ','ELAM3D' ,gridname='gmag_grid')
+ call addfld ('EMZ3D' ,(/ 'lev' /), 'I', ' ','EMZ3D' ,gridname='gmag_grid')
+
+ call addfld ('ED13D' ,(/ 'lev' /), 'I', 'V/m ','ED13D: Eastward Electric Field' ,gridname='gmag_grid')
+ call addfld ('ED23D' ,(/ 'lev' /), 'I', 'V/m ','ED23D: Equatorward Electric Field',gridname='gmag_grid')
+ call addfld ('ZPOT_MAG' ,(/ 'lev' /), 'I', 'cm ','Geopotential on mag grid (h0 min)',gridname='gmag_grid')
+ call addfld ('ADOTV1_MAG',(/ 'lev' /), 'I', ' ','ADOTV1 on mag grid' ,gridname='gmag_grid')
+ call addfld ('ADOTV2_MAG',(/ 'lev' /), 'I', ' ','ADOTV2 on mag grid' ,gridname='gmag_grid')
+ !
+ call addfld ('prescr_phihm' , horiz_only, 'I','VOLTS','Prescribed Electric Potential-mag grid' ,gridname='gmag_grid')
+ call addfld ('prescr_efxm' , horiz_only, 'I','mW/m2','Prescribed energy flux on mag grid' ,gridname='gmag_grid')
+ call addfld ('prescr_kevm' , horiz_only, 'I','keV ','Prescribed mean energy on mag grid' ,gridname='gmag_grid')
+
+ !
+ ! Dynamo inputs from sub dynamo_input (edynamo.F90):
+
+ call addfld ('EDYN_ADOTV1 ',(/ 'lev' /), 'I', ' ','EDYN_ADOTV1' , gridname='geo_grid')
+ call addfld ('EDYN_ADOTV2 ',(/ 'lev' /), 'I', ' ','EDYN_ADOTV2' , gridname='geo_grid')
+ !
+ ! 2d dynamo input fields on geo grid (edynamo.F90):
+ call addfld ('EDYN_SINI ', horiz_only , 'I', ' ','EDYN_SINI' , gridname='geo_grid')
+ call addfld ('EDYN_ADOTA1 ', horiz_only , 'I', ' ','EDYN_ADOTA1' , gridname='geo_grid')
+ call addfld ('EDYN_ADOTA2 ', horiz_only , 'I', ' ','EDYN_ADOTA2' , gridname='geo_grid')
+ call addfld ('EDYN_A1DTA2 ', horiz_only , 'I', ' ','EDYN_A1DTA2' , gridname='geo_grid')
+ call addfld ('EDYN_BE3 ', horiz_only , 'I', ' ','EDYN_BE3' , gridname='geo_grid')
+
+
+ call addfld ('ADOTA1_MAG', horiz_only , 'I', ' ','ADOTA1 in geo-mag coords' , gridname='gmag_grid')
+ call addfld ('SINI_MAG', horiz_only , 'I', ' ','sini in geo-mag coords' , gridname='gmag_grid')
+
+ call addfld ('adota1_mag_a', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
+ call addfld ('ZIGM11_a', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM11_0', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM11', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM11_PED', horiz_only, 'I', 'S','Pedersen Conductance',gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM22', horiz_only, 'I', ' ','EDYN_ZIGM22',gridname='gmag_grid')
+ call addfld ('EDYN_ZIGMC' , horiz_only, 'I', ' ','EDYN_ZIGMC' ,gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM2' , horiz_only, 'I', ' ','EDYN_ZIGM2' ,gridname='gmag_grid')
+ call addfld ('EDYN_ZIGM2_HAL' , horiz_only, 'I', 'S','Hall Conductance' ,gridname='gmag_grid')
+ call addfld ('EDYN_RIM1' , horiz_only, 'I', ' ','EDYN_RIM1' ,gridname='gmag_grid')
+ call addfld ('EDYN_RIM2' , horiz_only, 'I', ' ','EDYN_RIM2' ,gridname='gmag_grid')
+
+ call addfld ('POTEN' ,(/ 'lev' /), 'I', 'Volts','POTEN: Electric Potential',&
+ gridname='geo_grid')
+ call addfld ('EX' ,(/ 'lev' /), 'I', 'V/m' ,'EX: Zonal component of Electric Field',&
+ gridname='geo_grid')
+ call addfld ('EY' ,(/ 'lev' /), 'I', 'V/m' ,'EY: Meridional component of Electric Field',&
+ gridname='geo_grid')
+ call addfld ('EZ' ,(/ 'lev' /), 'I', 'V/m' ,'EZ: Vertical component of Electric Field',&
+ gridname='geo_grid')
+
+ call addfld ('BMOD', horiz_only, 'I', 'gauss','magnitude of magnetic field',gridname='geo_grid')
+ call addfld ('XB', horiz_only, 'I', 'gauss','northward component of magnetic field',gridname='geo_grid')
+ call addfld ('YB', horiz_only, 'I', 'gauss','eastward component of magnetic field',gridname='geo_grid')
+ call addfld ('ZB', horiz_only, 'I', 'gauss','downward component of magnetic field',gridname='geo_grid')
+
+ ! rjac: scaled derivatives of geomagnetic coords wrt geographic coordinates.
+ call addfld ('RJAC11',(/'lev'/), 'I', '1','cos(thetas)/cos(theta)*d(lamdas)/d(lamda)' ,gridname='geo_grid')
+ call addfld ('RJAC12',(/'lev'/), 'I', '1','cos(thetas)*d(lamdas)/d(theta)' ,gridname='geo_grid')
+ call addfld ('RJAC21',(/'lev'/), 'I', '1','1./cos(theta)*d(thetas)/d(lamda)' ,gridname='geo_grid')
+ call addfld ('RJAC22',(/'lev'/), 'I', '1','d(thetas)/d(theta)' ,gridname='geo_grid')
+
+ call addfld ('OPLUS', (/ 'lev' /), 'I', 'cm^3','O+ (oplus_xport output)', gridname='geo_grid')
+ call addfld ('OPtm1i',(/ 'lev' /), 'I', 'cm^3','O+ (oplus_xport output)', gridname='geo_grid')
+ call addfld ('OPtm1o',(/ 'lev' /), 'I', 'cm^3','O+ (oplus_xport output)', gridname='geo_grid')
+
+ call addfld ('PED_phys',(/ 'lev' /), 'I', 'S/m','Pedersen Conductivity' , gridname='physgrid')
+ call addfld ('HAL_phys',(/ 'lev' /), 'I', 'S/m','Hall Conductivity' , gridname='physgrid')
+
+ !-------------------------------------------------------------------------------
+ ! Set default values for ionosphere history variables
+ !-------------------------------------------------------------------------------
+ call phys_getopts(history_waccmx_out=history_waccmx)
+
+ if (history_waccmx) then
+ call add_default ('EDYN_ZIGM11_PED', 1, ' ')
+ call add_default ('EDYN_ZIGM2_HAL' , 1, ' ')
+ end if
+
+ end subroutine add_fields
+ !-----------------------------------------------------------------------
+
+ subroutine register_grids()
+
+ use cam_grid_support, only: horiz_coord_t, horiz_coord_create, iMap
+ use cam_grid_support, only: cam_grid_register
+ use edyn_mpi, only: mlat0, mlat1, mlon0, omlon1, ntask, mytid
+ use edyn_mpi, only: lat0, lat1, lon0, lon1
+ use edyn_maggrid, only: gmlat, gmlon, nmlat, nmlon
+ use edyn_geogrid, only: glat, glon, nlat, nlon
+
+ integer, parameter :: mag_decomp = 111 ! Must be unique within CAM
+ integer, parameter :: geo_decomp = 112 ! Must be unique within CAM
+
+ type(horiz_coord_t), pointer :: lat_coord => null()
+ type(horiz_coord_t), pointer :: lon_coord => null()
+ integer(iMap), pointer :: grid_map(:,:) => null()
+ integer(iMap), pointer :: coord_map(:) => null()
+ integer :: i, j, ind
+
+ if (mytid>=ntask) then
+
+ if (mlon0/=1) then
+ call endrun('register_grids: mlon0 needs to be 1 on inactive PEs')
+ end if
+ if (omlon1/=0) then
+ call endrun('register_grids: omlon1 needs to be 0 on inactive PEs')
+ end if
+ if (mlat0/=1) then
+ call endrun('register_grids: mlat0 needs to be 1 on inactive PEs')
+ end if
+ if (mlat1/=0) then
+ call endrun('register_grids: mlat1 needs to be 0 on inactive PEs')
+ end if
+
+ if (lon0/=1) then
+ call endrun('register_grids: lon0 needs to be 1 on inactive PEs')
+ end if
+ if (lon1/=0) then
+ call endrun('register_grids: lon1 needs to be 0 on inactive PEs')
+ end if
+ if (lat0/=1) then
+ call endrun('register_grids: lat0 needs to be 1 on inactive PEs')
+ end if
+ if (lat1/=0) then
+ call endrun('register_grids: lat1 needs to be 0 on inactive PEs')
+ end if
-!
-! Set horizontal geographic grid in radians (for apex code):
-!
- allocate(ylatg(nlat)) ! waccm grid includes poles
- allocate(ylong(nlonp1)) ! single periodic point
- real8 = dble(nlat) ; dlatg = pi/real8
- real8 = dble(nlon) ; dlong = 2._r8*pi/real8
- ylatg(1) = -pi/2._r8+eps ! south pole
- ylatg(nlat) = pi/2._r8-eps ! north pole
- do j=2,nlat-1
- real8 = dble(j-1)
- ylatg(j) = -0.5_r8*(pi-dlatg)+real8*dlatg
- enddo
- do i=1,nlonp1
- real8 = dble(i-1)
- ylong(i) = -pi+real8*dlong
- enddo
-!
-! Calculate cosine of latitude
-!
- allocate(cs(0:nlat+1))
- js = -(nlat/2)
- do j=1,nlat
- phi = (j+js-.5_r8)*dphi
- cs(j) = cos(phi)
- enddo
- cs(0) = -cs(1)
- cs(nlat+1) = -cs(nlat)
-
- end subroutine set_geogrid
-!-----------------------------------------------------------------------
- subroutine lonshift_global(f,nlon,lonseq,iscoord)
-!
-! Shift longitude vector f(nlon) forward 180 degrees according to input
-! string lonseq. Input f can be either arbitrary field values or
-! the coordinate array itself. Shift f in the 'lonseq' manner, as follows:
-!
-! If lonseq='-180to180', then shift from 0->360 to -180->+180
-! If lonseq='zeroto360', then shift from -180->+180 to 0->360
-!
-! WARNING: This routine works with WACCM-X history files, where nlon=144, 72, or 80
-! It has not been tested with other models or resolutions.
-! (e.g., there is no test for center point, its assumed to be nlon/2)
-!
-! Args:
- integer,intent(in) :: nlon
- real(r8),intent(inout) :: f(nlon)
- character(len=*),intent(in) :: lonseq
- logical,intent(in) :: iscoord ! if true, f is a coordinate, otherwise it is data
-!
-! Local:
- character(len=80) :: msg
- integer :: ihalf,i
-
- if (lonseq /= '-180to180'.and.lonseq /= 'zeroto360') then
- write(msg,"('shift_lon: bad lonseq=',a,' must be either ''-180to180'' or ''zeroto360''')") &
- lonseq
- call endrun
- endif
-
- ihalf = nlon/2
- if (lonseq == '-180to180') then ! shift to -180 -> +180
- f = cshift(f,ihalf) ! cshift is circular shift intrinsic
- if (iscoord) then
- do i=1,ihalf
- f(i) = f(i)-360._r8
- enddo
- endif
- else ! shift to 0 -> 360
- f = cshift(f,ihalf) ! cshift is circular shift intrinsic
- if (iscoord) then
- do i=ihalf+1,nlon
- f(i) = f(i)+360._r8
- enddo
endif
- endif
- end subroutine lonshift_global
-!-----------------------------------------------------------------------
- subroutine add_fields
- use cam_history, only: addfld, horiz_only, add_default
- use phys_control, only: phys_getopts !Method used to get flag for waccmx ionosphere output variables
-
- logical :: history_waccmx
-
-! Geomagnetic fields are in waccm format, in CGS units):
- call addfld ('PED_MAG' ,(/ 'lev' /), 'I', 'S/m ','Pedersen Conductivity' ,gridname='gmag_grid')
- call addfld ('HAL_MAG' ,(/ 'lev' /), 'I', 'S/m ','Hall Conductivity' ,gridname='gmag_grid')
- call addfld ('ZMAG' ,(/ 'lev' /), 'I', 'cm ','ZMAG: Geopotential' ,gridname='gmag_grid')
- call addfld ('PHIM2D' , horiz_only, 'I', 'VOLTS','PHIM2D: Electric Potential' ,gridname='gmag_grid')
- call addfld ('ED1' , horiz_only, 'I', 'V/m ','ED1: Eastward Electric Field' ,gridname='gmag_grid')
- call addfld ('ED2' , horiz_only, 'I', 'V/m ','ED2: Equatorward Electric Field' ,gridname='gmag_grid')
- call addfld ('PHIM3D' ,(/ 'lev' /), 'I', 'VOLTS','PHIM3D: 3d Electric Potential' ,gridname='gmag_grid')
-
- call addfld ('EPHI3D' ,(/ 'lev' /), 'I', ' ','EPHI3D' ,gridname='gmag_grid')
- call addfld ('ELAM3D' ,(/ 'lev' /), 'I', ' ','ELAM3D' ,gridname='gmag_grid')
- call addfld ('EMZ3D' ,(/ 'lev' /), 'I', ' ','EMZ3D' ,gridname='gmag_grid')
-
- call addfld ('ED13D' ,(/ 'lev' /), 'I', 'V/m ','ED13D: Eastward Electric Field' ,gridname='gmag_grid')
- call addfld ('ED23D' ,(/ 'lev' /), 'I', 'V/m ','ED23D: Equatorward Electric Field',gridname='gmag_grid')
- call addfld ('ZPOT_MAG' ,(/ 'lev' /), 'I', 'cm ','Geopotential on mag grid (h0 min)',gridname='gmag_grid')
- call addfld ('ADOTV1_MAG',(/ 'lev' /), 'I', ' ','ADOTV1 on mag grid' ,gridname='gmag_grid')
- call addfld ('ADOTV2_MAG',(/ 'lev' /), 'I', ' ','ADOTV2 on mag grid' ,gridname='gmag_grid')
-!
- call addfld ('amie_phihm' , horiz_only, 'I','VOLTS','AMIE Electric Potential-mag grid' ,gridname='gmag_grid')
- call addfld ('amie_efxm' , horiz_only, 'I','mW/m2','AMIE energy flux on mag grid' ,gridname='gmag_grid')
- call addfld ('amie_kevm' , horiz_only, 'I','keV ','AMIE mean energy on mag grid' ,gridname='gmag_grid')
- call addfld ('amie_efxg' , horiz_only, 'I','mW/m2','AMIE energy flux on geo grid' ,gridname='fv_centers')
- call addfld ('amie_kevg' , horiz_only, 'I','keV ','AMIE mean energy on geo grid' ,gridname='fv_centers')
-
-!
-! Dynamo inputs from sub dynamo_input (edynamo.F90):
- call addfld ('EDYN_TN ',(/ 'lev' /), 'I', 'deg K ','EDYN_TN' , gridname='fv_centers')
- call addfld ('EDYN_UN ',(/ 'lev' /), 'I', 'cm/s ','EDYN_UN' , gridname='fv_centers')
- call addfld ('EDYN_VN ',(/ 'lev' /), 'I', 'cm/s ','EDYN_VN' , gridname='fv_centers')
- call addfld ('EDYN_OMG ',(/ 'lev' /), 'I', 's-1 ','EDYN_OMG' , gridname='fv_centers')
- call addfld ('EDYN_Z ',(/ 'lev' /), 'I', 'cm ','EDYN_ZHT' , gridname='fv_centers')
- call addfld ('EDYN_BARM ',(/ 'lev' /), 'I', ' ','EDYN_MBAR' , gridname='fv_centers')
- call addfld ('EDYN_PED ',(/ 'lev' /), 'I', 'S/m ','EDYN_PED' , gridname='fv_centers')
- call addfld ('EDYN_HALL ',(/ 'lev' /), 'I', 'S/m ','EDYN_HALL' , gridname='fv_centers')
-
-! call addfld ('EDYN_SCHT ',(/ 'lev' /), 'I', ' ','EDYN_SCHT ' , gridname='fv_centers')
- call addfld ('EDYN_WN ',(/ 'lev' /), 'I', 'm/s ','EDYN_WN ' , gridname='fv_centers')
- call addfld ('EDYN_ADOTV1 ',(/ 'lev' /), 'I', ' ','EDYN_ADOTV1' , gridname='fv_centers')
- call addfld ('EDYN_ADOTV2 ',(/ 'lev' /), 'I', ' ','EDYN_ADOTV2' , gridname='fv_centers')
-!
-! 2d dynamo input fields on geo grid (edynamo.F90):
- call addfld ('EDYN_SINI ', horiz_only , 'I', ' ','EDYN_SINI' , gridname='fv_centers')
- call addfld ('EDYN_ADOTA1 ', horiz_only , 'I', ' ','EDYN_ADOTA1' , gridname='fv_centers')
- call addfld ('EDYN_ADOTA2 ', horiz_only , 'I', ' ','EDYN_ADOTA2' , gridname='fv_centers')
- call addfld ('EDYN_A1DTA2 ', horiz_only , 'I', ' ','EDYN_A1DTA2' , gridname='fv_centers')
- call addfld ('EDYN_BE3 ', horiz_only , 'I', ' ','EDYN_BE3' , gridname='fv_centers')
-
-
- call addfld ('ADOTA1', horiz_only , 'I', ' ','ADOTA1' , gridname='fv_centers')
- call addfld ('ADOTA1_MAG', horiz_only , 'I', ' ','ADOTA1 in geo-mag coords' , gridname='fv_centers')
-
-! 3d ion drifts and 2d conductances at end of dpie_coupling
-! (from either edynamo or time3d):
-!
-! call addfld ('TIME3D_ZIGM11',horiz_only,'I',' ','TIME3D_ZIGM11',gridname='gmag_grid)
-! call addfld ('TIME3D_ZIGM22',horiz_only,'I',' ','TIME3D_ZIGM22',gridname='gmag_grid)
-! call addfld ('TIME3D_ZIGMC' ,horiz_only,'I',' ','TIME3D_ZIGMC' ,gridname='gmag_grid)
-! call addfld ('TIME3D_ZIGM2' ,horiz_only,'I',' ','TIME3D_ZIGM2' ,gridname='gmag_grid)
-! call addfld ('TIME3D_RIM1' ,horiz_only,'I',' ','TIME3D_RIM1' ,gridname='gmag_grid)
-! call addfld ('TIME3D_RIM2' ,horiz_only,'I',' ','TIME3D_RIM2' ,gridname='gmag_grid)
-
-! call addfld ('TIME3D_UI',(/ 'lev' /),'I',' ','TIME3D_UI')
-! call addfld ('TIME3D_VI',(/ 'lev' /),'I',' ','TIME3D_VI')
-! call addfld ('TIME3D_WI',(/ 'lev' /),'I',' ','TIME3D_WI')
-
-! call addfld ('T3D_OP_2WACCM',(/ 'lev' /),'I',' ','T3D_OP_2WACCM')
-! call addfld ('DPIE_OP',(/ 'lev' /),'I',' ','DPIE_OP') ! this is also below
-
- call addfld ('QEP',(/ 'lev' /), 'I', 'm^3/s' ,'Photo-Electron Production', gridname='fv_centers')
- call addfld ('QOP',(/ 'lev' /), 'I', 'm^3/s' ,'O+ Production Rate' , gridname='fv_centers')
- call addfld ('OpO2',(/ 'lev' /), 'I', 'cm^3/s' ,'Op+O2 Loss Rate' , gridname='fv_centers')
- call addfld ('OpN2',(/ 'lev' /), 'I', 'cm^3/s' ,'Op+N2 Loss Rate' , gridname='fv_centers')
- call addfld ('LOP',(/ 'lev' /), 'I', 'cm^3/s' ,'O+ Loss Rate' , gridname='fv_centers')
- call addfld ('SIGMA_PED' ,(/ 'lev' /), 'I', ' ','Pederson Conductivity' , gridname='fv_centers')
- call addfld ('SIGMA_HALL',(/ 'lev' /), 'I', ' ','Hall Conductivity' , gridname='fv_centers')
-
- call addfld ('adota1_mag_a', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
- call addfld ('ZIGM11_a', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
- call addfld ('EDYN_ZIGM11_0', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
- call addfld ('EDYN_ZIGM11', horiz_only, 'I', ' ','EDYN_ZIGM11',gridname='gmag_grid')
- call addfld ('EDYN_ZIGM11_PED', horiz_only, 'I', 'S','Pedersen Conductance',gridname='gmag_grid')
- call addfld ('EDYN_ZIGM22', horiz_only, 'I', ' ','EDYN_ZIGM22',gridname='gmag_grid')
- call addfld ('EDYN_ZIGMC' , horiz_only, 'I', ' ','EDYN_ZIGMC' ,gridname='gmag_grid')
- call addfld ('EDYN_ZIGM2' , horiz_only, 'I', ' ','EDYN_ZIGM2' ,gridname='gmag_grid')
- call addfld ('EDYN_ZIGM2_HAL' , horiz_only, 'I', 'S','Hall Conductance' ,gridname='gmag_grid')
- call addfld ('EDYN_RIM1' , horiz_only, 'I', ' ','EDYN_RIM1' ,gridname='gmag_grid')
- call addfld ('EDYN_RIM2' , horiz_only, 'I', ' ','EDYN_RIM2' ,gridname='gmag_grid')
-
- call addfld ('EDYN_UI',(/ 'lev' /), 'I', 'cm/s','EDYN_UI', gridname='fv_centers')
- call addfld ('EDYN_VI',(/ 'lev' /), 'I', 'cm/s','EDYN_VI', gridname='fv_centers')
- call addfld ('EDYN_WI',(/ 'lev' /), 'I', 'cm/s','EDYN_WI', gridname='fv_centers')
-
- call addfld ('POTEN' ,(/ 'lev' /), 'I', 'Volts','POTEN: Electric Potential',&
- gridname='fv_centers')
- call addfld ('EX' ,(/ 'lev' /), 'I', 'V/m' ,'EX: Zonal component of Electric Field',&
- gridname='fv_centers')
- call addfld ('EY' ,(/ 'lev' /), 'I', 'V/m' ,'EY: Meridional component of Electric Field',&
- gridname='fv_centers')
- call addfld ('EZ' ,(/ 'lev' /), 'I', 'V/m' ,'EZ: Vertical component of Electric Field',&
- gridname='fv_centers')
-
- call addfld ('ZEDYN360 ' ,(/ 'lev' /), 'I', 'm ','Geopotential 0 to 360 lon grid', gridname='fv_centers')
- call addfld ('ZEDYN180 ',(/ 'lev' /), 'I', 'm ','Geopotential -180 to 180 lon grid', gridname='fv_centers')
-
- call addfld ('BMOD' , horiz_only, 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('XB' , horiz_only, 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('YB' , horiz_only, 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('ZB' , horiz_only, 'I', ' ',' ' ,gridname='fv_centers')
-
- call addfld ('RJAC11' ,(/'lev'/), 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('RJAC12' ,(/'lev'/), 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('RJAC21' ,(/'lev'/), 'I', ' ',' ' ,gridname='fv_centers')
- call addfld ('RJAC22' ,(/'lev'/), 'I', ' ',' ' ,gridname='fv_centers')
-
- !-------------------------------------------------------------------------------
- ! Set default values for ionosphere history variables
- !-------------------------------------------------------------------------------
- call phys_getopts(history_waccmx_out=history_waccmx)
-
- if (history_waccmx) then
- call add_default ('EDYN_ZIGM11_PED' , 1, ' ')
- call add_default ('EDYN_ZIGM2_HAL' , 1, ' ')
- end if
-
- end subroutine add_fields
-!-----------------------------------------------------------------------
- subroutine register_maggrid
-
- use cam_grid_support, only: horiz_coord_t, horiz_coord_create, iMap, cam_grid_register
- use edyn_mpi, only: mlat0,mlat1,mlon0,omlon1
- use edyn_maggrid, only: gmlat, gmlon, nmlat, nmlon
- integer, parameter :: mag_decomp = 111 !arbitrary value
-
- type(horiz_coord_t), pointer :: lat_coord
- type(horiz_coord_t), pointer :: lon_coord
- integer(iMap), pointer :: grid_map(:,:)
- integer(iMap), pointer :: coord_map(:)
- integer :: i,j,ind
-
- allocate(grid_map(4, ((omlon1 - mlon0 + 1) * (mlat1 - mlat0 + 1))))
- ind = 0
- do i = mlat0, mlat1
- do j = mlon0, omlon1
- ind = ind + 1
- grid_map(1, ind) = j
- grid_map(2, ind) = i
- grid_map(3, ind) = j
- grid_map(4, ind) = i
- end do
- end do
-
- allocate(coord_map(mlat1 - mlat0 + 1))
- coord_map = (/ (i, i = mlat0, mlat1) /)
- lat_coord => horiz_coord_create('mlat', '', nmlat, 'latitude', &
- 'degrees_north', mlat0, mlat1, gmlat(mlat0:mlat1), &
- map=coord_map)
- nullify(coord_map)
-
- allocate(coord_map(omlon1 - mlon0 + 1))
- coord_map = (/ (i, i = mlon0, omlon1) /)
- lon_coord => horiz_coord_create('mlon', '', nmlon, 'longitude', &
- 'degrees_east', mlon0, omlon1, gmlon(mlon0:omlon1), &
- map=coord_map)
- deallocate(coord_map)
- nullify(coord_map)
-
- call cam_grid_register('gmag_grid', mag_decomp, lat_coord, lon_coord, &
- grid_map, unstruct=.false.)
- nullify(grid_map)
-
- end subroutine register_maggrid
+ allocate(grid_map(4, ((omlon1 - mlon0 + 1) * (mlat1 - mlat0 + 1))))
+ ind = 0
+ do i = mlat0, mlat1
+ do j = mlon0, omlon1
+ ind = ind + 1
+ grid_map(1, ind) = j
+ grid_map(2, ind) = i
+ grid_map(3, ind) = j
+ grid_map(4, ind) = i
+ end do
+ end do
+
+ allocate(coord_map(mlat1 - mlat0 + 1))
+ coord_map = (/ (i, i = mlat0, mlat1) /)
+ lat_coord => horiz_coord_create('mlat', '', nmlat, 'latitude', &
+ 'degrees_north', mlat0, mlat1, gmlat(mlat0:mlat1), &
+ map=coord_map)
+ nullify(coord_map)
+
+ allocate(coord_map(omlon1 - mlon0 + 1))
+ coord_map = (/ (i, i = mlon0, omlon1) /)
+ lon_coord => horiz_coord_create('mlon', '', nmlon, 'longitude', &
+ 'degrees_east', mlon0, omlon1, gmlon(mlon0:omlon1), &
+ map=coord_map)
+ deallocate(coord_map)
+ nullify(coord_map)
+
+ call cam_grid_register('gmag_grid', mag_decomp, lat_coord, lon_coord, &
+ grid_map, unstruct=.false.)
+ nullify(grid_map)
+
+
+ ! for the Oplus geo grid
+ allocate(grid_map(4, ((lon1 - lon0 + 1) * (lat1 - lat0 + 1))))
+ ind = 0
+ do i = lat0, lat1
+ do j = lon0, lon1
+ ind = ind + 1
+ grid_map(1, ind) = j
+ grid_map(2, ind) = i
+ grid_map(3, ind) = j
+ grid_map(4, ind) = i
+ end do
+ end do
+
+ allocate(coord_map(lat1 - lat0 + 1))
+ coord_map = (/ (i, i = lat0, lat1) /)
+ lat_coord => horiz_coord_create('glat', '', nlat, 'latitude', &
+ 'degrees_north', lat0, lat1, glat(lat0:lat1), &
+ map=coord_map)
+ nullify(coord_map)
+
+ allocate(coord_map(lon1 - lon0 + 1))
+ coord_map = (/ (i, i = lon0, lon1) /)
+ lon_coord => horiz_coord_create('glon', '', nlon, 'longitude', &
+ 'degrees_east', lon0, lon1, glon(lon0:lon1), &
+ map=coord_map)
+ deallocate(coord_map)
+ nullify(coord_map)
+
+ call cam_grid_register('geo_grid', geo_decomp, lat_coord, lon_coord, &
+ grid_map, unstruct=.false.)
+ nullify(grid_map)
+
+ end subroutine register_grids
!-----------------------------------------------------------------------
end module edyn_init
diff --git a/src/ionosphere/waccmx/edyn_maggrid.F90 b/src/ionosphere/waccmx/edyn_maggrid.F90
index e5935ff4d5..c3d455c559 100644
--- a/src/ionosphere/waccmx/edyn_maggrid.F90
+++ b/src/ionosphere/waccmx/edyn_maggrid.F90
@@ -1,150 +1,182 @@
module edyn_maggrid
- use shr_kind_mod, only : r8 => shr_kind_r8 ! 8-byte reals
- use cam_logfile, only: iulog
- implicit none
- save
-
-!
-! Global geomagnetic grid:
-!
- integer, parameter :: &
- nmlat = 97, & ! number of mag latitudes
- nmlath = (nmlat+1)/2, & ! index of magnetic equator
- nmlon = 80, & ! number of mag longitudes
- nmlonp1 = nmlon+1 ! number of longitudes plus periodic point
-!
-! Mag grid coordinates:
-!
- real(r8) :: &
- ylatm(nmlat), & ! magnetic latitudes (radians)
- ylonm(nmlonp1), & ! magnetic longitudes (radians)
- gmlat(nmlat), & ! magnetic latitudes (degrees)
- gmlon(nmlonp1), & ! magnetic longitudes (degrees)
- dlonm,dlatm
-!
-! Level coordinates will be same as geographic levels:
-!
- integer :: nmlev ! number of levels (same as nlev in geographic)
-
- real(r8) :: &
- rcos0s(nmlat), & ! cos(theta0)/cos(thetas)
- dt0dts(nmlat), & ! d(theta0)/d(thetas)
- dt1dts(nmlat) ! dt0dts/abs(sinim) (non-zero at equator)
-
- real(r8) :: table(91,2)
-
- logical :: debug=.false. ! set true for prints to stdout at each call
-
- contains
-!-----------------------------------------------------------------------
- subroutine set_maggrid
- use edyn_params ,only: pi,pi_dyn,rtd,r0
- use edyn_geogrid,only: nlev
-!
-! Local:
- integer :: i,j,n
- real(r8) :: tanths2,dtheta,real8
- real(r8) :: &
- tanth0(nmlat), &
- tanths(nmlat), &
- theta0(nmlat), &
- hamh0(nmlat)
-
- real(r8),parameter :: &
- e=1.e-6_r8, &
- r1=1.06e7_r8, &
- alfa=1.668_r8
-
- real(r8) :: table2(91,3:5)
-
- real8 = dble(nmlat-1)
- dlatm = pi_dyn/real8
- real8 = dble(nmlon)
- dlonm = 2._r8*pi_dyn/real8
-!
-! ylatm is equally spaced in theta0, but holds corresponding value of thetas.
-!
- do j=1,nmlat
- real8 = dble(j-1)
- theta0(j) = -pi_dyn/2._r8+real8*dlatm ! note use of pi_dyn
- enddo ! j=1,nmlat
- do j=2,nmlat-1
- tanth0(j) = abs(tan(theta0(j)))
- hamh0(j) = r1*tanth0(j)+r0*tanth0(j)**(2._r8+2._r8*alfa)/ &
- (1._r8+tanth0(j)**2)**alfa
- tanths(j) = sqrt(hamh0(j)/r0)
- ylatm(j) = sign(atan(tanths(j)),theta0(j))
- rcos0s(j) = sqrt((1._r8+tanths(j)**2)/(1._r8+tanth0(j)**2))
-!
-! Timegcm has an alternate calculation for dt1dts and dt0dts if dynamo
-! is not called.
-!
- tanths2 = tanths(j)**2
- dt1dts(j) = &
- (r0*sqrt(1._r8+4._r8*tanths2)*(1._r8+tanths2))/ &
- (r1*(1._r8+tanth0(j)**2)+2._r8*r0*tanth0(j)**(2._r8*alfa+1._r8)* &
- (1._r8+alfa+tanth0(j)**2)/(1._r8+tanth0(j)**2)**alfa)
- dt0dts(j) = dt1dts(j)*2._r8*tanths(j)/sqrt(1._r8+4._r8*tanths2)
- enddo ! j=2,nmlat-1
-!
-! Magnetic poles:
-!
- ylatm(1) = theta0(1)
- ylatm(nmlat) = theta0(nmlat)
- rcos0s(1) = 1._r8
- rcos0s(nmlat) = 1._r8
- dt0dts(1) = 1._r8
- dt0dts(nmlat) = 1._r8
-!
-! Magnetic longitudes:
-!
- do i=1,nmlonp1
- real8 = dble(i-1)
- ylonm(i) = -pi+real8*dlonm
-! ylonm(i) = real8*dlonm
- enddo ! i=1,nmlonp1
-!
-! Define mag grid in degrees, and mag levels:
-!
- gmlat(:) = ylatm(:)*rtd
- gmlon(:) = ylonm(:)*rtd
-!
-! Magnetic levels are same as midpoint geographic levels:
-!
- nmlev = nlev
-
-!
-! Calculate table:
-!
- table(1,1) = 0._r8
- table(1,2) = 0._r8
- dtheta = pi/180._r8
- do i=2,91
- table(i,1) = table(i-1,1)+dtheta
- enddo
- do i=2,90
- table2(i,4) = tan(table(i,1))
- table(i,2) = table(i,1)
- enddo ! i=2,90
- table(91,2) = table(91,1)
- do n=1,7
+ use shr_kind_mod, only : r8 => shr_kind_r8 ! 8-byte reals
+ use cam_logfile, only: iulog
+ use edyn_params, only: finit
+
+ implicit none
+
+ !
+ ! Global geomagnetic grid:
+ !
+ integer, protected :: &
+ nmlat, & ! number of mag latitudes
+ nmlath, & ! index of magnetic equator
+ nmlon, & ! number of mag longitudes
+ nmlonp1 ! number of longitudes plus periodic point
+
+ !
+ ! geomagnetic grid resolution parameters:
+ !
+ integer, protected :: res_nlev
+ integer, protected :: res_ngrid
+
+ !
+ ! Mag grid coordinates:
+ !
+ real(r8), allocatable, protected :: &
+ ylatm(:), & ! magnetic latitudes (radians)
+ ylonm(:), & ! magnetic longitudes (radians)
+ gmlat(:), & ! magnetic latitudes (degrees)
+ gmlon(:) ! magnetic longitudes (degrees)
+ real(r8), protected :: dlonm,dlatm
+ !
+ ! Level coordinates will be same as geographic levels:
+ !
+ integer, protected :: nmlev ! number of levels (same as nlev in geographic)
+
+ real(r8), allocatable, protected :: &
+ rcos0s(:), & ! cos(theta0)/cos(thetas)
+ dt0dts(:), & ! d(theta0)/d(thetas)
+ dt1dts(:) ! dt0dts/abs(sinim) (non-zero at equator)
+
+
+ real(r8), protected :: table(91,2) = finit
+
+ logical, private :: debug = .false. ! set true for prints to stdout at each call
+
+ contains
+
+ !-----------------------------------------------------------------------
+ subroutine alloc_maggrid( mag_nlon, mag_nlat, mag_nlev, mag_ngrid )
+
+ integer, intent(in) :: mag_nlon, mag_nlat, mag_nlev, mag_ngrid
+
+ res_nlev = mag_nlev
+ res_ngrid = mag_ngrid
+
+ nmlat = mag_nlat ! number of mag latitudes
+ nmlath = (nmlat+1)/2 ! index of magnetic equator
+ nmlon = mag_nlon ! number of mag longitudes
+ nmlonp1 = nmlon+1 ! number of longitudes plus periodic point
+
+ allocate(ylatm(nmlat))
+ allocate(ylonm(nmlonp1))
+ allocate(gmlat(nmlat))
+ allocate(gmlon(nmlonp1))
+ allocate(rcos0s(nmlat))
+ allocate(dt0dts(nmlat))
+ allocate(dt1dts(nmlat))
+
+ end subroutine alloc_maggrid
+
+ !-----------------------------------------------------------------------
+ subroutine set_maggrid()
+ use edyn_params, only: pi, pi_dyn, rtd, r0
+ use edyn_mpi, only: nlev => nlev_geo
+ !
+ ! Local:
+ integer :: i, j, n
+ real(r8) :: tanths2, dtheta, real8
+ real(r8) :: tanth0(nmlat)
+ real(r8) :: tanths(nmlat)
+ real(r8) :: theta0(nmlat)
+ real(r8) :: hamh0(nmlat)
+
+ real(r8), parameter :: e = 1.e-6_r8
+ real(r8), parameter :: r1 = 1.06e7_r8
+ real(r8), parameter :: alfa = 1.668_r8
+
+ real(r8) :: table2(91, 3:5)
+
+ real8 = real(nmlat-1, r8)
+ dlatm = pi_dyn / real8
+ real8 = real(nmlon, r8)
+ dlonm = 2._r8 * pi_dyn / real8
+ !
+ ! ylatm is equally spaced in theta0, but holds the corresponding
+ ! value of thetas.
+ !
+ do j = 1, nmlat
+ real8 = real(j-1, r8)
+ theta0(j) = -pi_dyn/2._r8+real8*dlatm ! note use of pi_dyn
+ end do ! j=1,nmlat
+ do j=2,nmlat-1
+ tanth0(j) = abs(tan(theta0(j)))
+ hamh0(j) = r1*tanth0(j)+r0*tanth0(j)**(2._r8+2._r8*alfa)/ &
+ (1._r8+tanth0(j)**2)**alfa
+ tanths(j) = sqrt(hamh0(j)/r0)
+ ylatm(j) = sign(atan(tanths(j)),theta0(j))
+ rcos0s(j) = sqrt((1._r8+tanths(j)**2)/(1._r8+tanth0(j)**2))
+ !
+ ! Timegcm has an alternate calculation for dt1dts and dt0dts if dynamo
+ ! is not called.
+ !
+ tanths2 = tanths(j)**2
+ dt1dts(j) = &
+ (r0*sqrt(1._r8+4._r8*tanths2)*(1._r8+tanths2))/ &
+ (r1*(1._r8+tanth0(j)**2)+2._r8*r0*tanth0(j)**(2._r8*alfa+1._r8)* &
+ (1._r8+alfa+tanth0(j)**2)/(1._r8+tanth0(j)**2)**alfa)
+ dt0dts(j) = dt1dts(j)*2._r8*tanths(j)/sqrt(1._r8+4._r8*tanths2)
+ end do ! j=2,nmlat-1
+ !
+ ! Magnetic poles:
+ !
+ ylatm(1) = theta0(1)
+ ylatm(nmlat) = theta0(nmlat)
+ rcos0s(1) = 1._r8
+ rcos0s(nmlat) = 1._r8
+ dt0dts(1) = 1._r8
+ dt0dts(nmlat) = 1._r8
+ !
+ ! Magnetic longitudes:
+ !
+ do i=1,nmlonp1
+ real8 = real(i-1, r8)
+ ylonm(i) = -pi+real8*dlonm
+ ! ylonm(i) = real8*dlonm
+ end do ! i=1,nmlonp1
+ !
+ ! Define mag grid in degrees, and mag levels:
+ !
+ gmlat(:) = ylatm(:)*rtd
+ gmlon(:) = ylonm(:)*rtd
+ !
+ ! Magnetic levels are same as midpoint geographic levels:
+ !
+ nmlev = nlev
+
+ !
+ ! Calculate table:
+ !
+ table(1,1) = 0._r8
+ table(1,2) = 0._r8
+ dtheta = pi / 180._r8
+ do i = 2, 91
+ table(i,1) = table(i-1,1)+dtheta
+ end do
do i=2,90
- table2(i,3) = table(i,2)
- table(i,2) = tan(table2(i,3))
- table2(i,5) = sqrt(r1/r0*table(i,2)+table(i,2)**(2._r8*(1._r8+alfa))/ &
- (1._r8+table(i,2)**2)**alfa)
- table(i,2) = table2(i,3)-(table2(i,5)-table2(i,4))*2._r8* &
- table2(i,5)/(r1/r0*(1._r8+table(i,2)**2)+2._r8*table(i,2)** &
- (2._r8*alfa+1._r8)*(1._r8+alfa+table(i,2)**2)/ &
- (1._r8+table(i,2)**2)**alfa)
- enddo ! i=2,90
- enddo ! n=1,7
-
- if (debug) then
- write(iulog,"('set_maggrid: table= ',/,(6e12.4))") table
- write(iulog,"('set_maggrid: table2=',/,(6e12.4))") table2
- endif
-
- end subroutine set_maggrid
-!-----------------------------------------------------------------------
+ table2(i,4) = tan(table(i,1))
+ table(i,2) = table(i,1)
+ end do ! i=2,90
+ table(91,2) = table(91,1)
+ do n=1,7
+ do i=2,90
+ table2(i,3) = table(i,2)
+ table(i,2) = tan(table2(i,3))
+ table2(i,5) = sqrt(r1/r0*table(i,2)+table(i,2)**(2._r8*(1._r8+alfa))/ &
+ (1._r8+table(i,2)**2)**alfa)
+ table(i,2) = table2(i,3)-(table2(i,5)-table2(i,4))*2._r8* &
+ table2(i,5)/(r1/r0*(1._r8+table(i,2)**2)+2._r8*table(i,2)** &
+ (2._r8*alfa+1._r8)*(1._r8+alfa+table(i,2)**2)/ &
+ (1._r8+table(i,2)**2)**alfa)
+ end do ! i=2,90
+ end do ! n=1,7
+
+ if (debug) then
+ write(iulog,"('set_maggrid: table= ',/,(6e12.4))") table
+ write(iulog,"('set_maggrid: table2=',/,(6e12.4))") table2
+ end if
+
+ end subroutine set_maggrid
+ !-----------------------------------------------------------------------
end module edyn_maggrid
diff --git a/src/ionosphere/waccmx/edyn_mpi.F90 b/src/ionosphere/waccmx/edyn_mpi.F90
index 691b1051a6..f93a2208a4 100644
--- a/src/ionosphere/waccmx/edyn_mpi.F90
+++ b/src/ionosphere/waccmx/edyn_mpi.F90
@@ -1,2081 +1,2101 @@
module edyn_mpi
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- use cam_abortutils,only: endrun
-
- use edyn_geogrid ,only: nlon,nlat
- use edyn_maggrid ,only: nmlonp1,nmlat,nmlath,nmlev ! note nmlev is not a parameter
- use spmd_utils ,only: masterproc
- use mpi ,only: mpi_comm_size, mpi_comm_rank, MPI_PROC_NULL, mpi_comm_split, &
- MPI_INTEGER, MPI_STATUS_SIZE, mpi_wait, &
- MPI_REAL8, MPI_SUCCESS, MPI_SUM, &
- MPI_Comm_rank
-
- implicit none
- private
-
- public :: array_ptr_type,switch_model_format,mp_geo_halos,mp_pole_halos,mlon0,mlon1,omlon1, &
- mlat0,mlat1,mlev0,mlev1,mytid,lon0,lon1,lat0,lat1,lev0,lev1,mp_mag_halos,mp_scatter_phim, &
- mp_mageq,mp_mageq_jpm1,mp_magpole_2d,mp_mag_foldhem,mp_mag_periodic_f2d,mp_gather_edyn, &
- mp_mageq_jpm3,mp_mag_jslot,mp_magpoles,ixfind,mp_magpole_3d,ntask,ntaski,ntaskj,tasks, &
- nmagtaski,nmagtaskj,setpoles, mp_gatherlons_f3d, mytidi, mp_scatterlons_f3d, mp_exchange_tasks, &
- mp_distribute_mag, mp_distribute_geo, mp_init
-
-
-
-!
-! Number of MPI tasks and current task id (geo or mag):
-!
- integer :: &
- ntask, & ! number of mpi tasks
- mytid ! my task id
-!
-! Geographic subdomains for current task:
-!
- integer :: &
- ntaski, & ! number of tasks in lon dimension
- ntaskj, & ! number of tasks in lat dimension
- mytidi, & ! i coord for current task in task table
- mytidj, & ! j coord for current task in task table
- lat0,lat1, & ! first and last lats for each task
- lon0,lon1, & ! first and last lons for each task
- lev0,lev1, & ! first and last levs for each task (not distributed)
- mxlon,mxlat ! max number of subdomain lon,lat points among all tasks
-!
-! Magnetic subdomains for current task:
-!
- integer :: &
- nmagtaski, & ! number of tasks in mag lon dimension
- nmagtaskj, & ! number of tasks in mag lat dimension
- magtidi, & ! i coord for current task in task table
- magtidj, & ! j coord for current task in task table
- mlat0,mlat1, & ! first and last mag lats for each task
- mlon0,mlon1, & ! first and last mag lons for each task
- omlon1, & ! last mag lons for each task to remove periodic point from outputs
- mlev0,mlev1, & ! first and last mag levs (not distributed)
- mxmaglon, & ! max number of mag subdomain lon points among all tasks
- mxmaglat ! max number of mag subdomain lat points among all tasks
-
- integer,allocatable,save :: &
- itask_table_geo(:,:), & ! 2d table of tasks on geographic grid (i,j)
- itask_table_mag(:,:) ! 2d table of tasks on mag grid (i,j)
-
- integer :: cols_comm ! communicators for each task column
- integer :: rows_comm ! communicators for each task row
-!
-! Task type: subdomain information for all tasks, known by all tasks:
-!
- type task
- integer :: mytid ! task id
-!
-! Geographic subdomains in task structure:
- integer :: mytidi ! task coord in longitude dimension of task table
- integer :: mytidj ! task coord in latitude dimension of task table
- integer :: nlats ! number of latitudes calculated by this task
- integer :: nlons ! number of longitudes calculated by this task
- integer :: lat0,lat1 ! first and last latitude indices
- integer :: lon0,lon1 ! first and last longitude indices
-!
-! Magnetic subdomains in task structure:
- integer :: magtidi ! task coord in mag longitude dimension of task table
- integer :: magtidj ! task coord in mag latitude dimension of task table
- integer :: nmaglats ! number of mag latitudes calculated by this task
- integer :: nmaglons ! number of mag longitudes calculated by this task
- integer :: mlat0,mlat1 ! first and last latitude indices
- integer :: mlon0,mlon1 ! first and last longitude indices
- end type task
-!
-! type(task) :: tasks(ntask) will be made available to all tasks
-! (so each task has information about all tasks)
-!
- type(task),allocatable,save :: tasks(:)
-!
-! Conjugate points in mag subdomains, for mp_mag_foldhem
-!
- integer,allocatable,dimension(:),save :: & ! (ntask)
- nsend_south, & ! number of south lats to send to north (each task)
- nrecv_north ! number of north lats to send to south (each task)
- integer,allocatable,dimension(:,:),save :: & ! (mxlats,ntask)
- send_south_coords, & ! south j lats to send to north
- recv_north_coords ! north j lats to recv from south
-
- type array_ptr_type
- real(r8),pointer :: ptr(:,:,:) ! (k,i,j)
- end type array_ptr_type
-
- integer, protected :: mpi_comm_edyn = -9999
-
- logical, parameter :: debug = .false.
-
- contains
-!-----------------------------------------------------------------------
- subroutine mp_init( mpi_comm )
-!
-! Initialize MPI, and allocate task table.
-!
- integer, intent(in) :: mpi_comm
-
- integer :: ier
-
- mpi_comm_edyn = mpi_comm
-
- call mpi_comm_size(mpi_comm_edyn,ntask,ier)
- call mpi_comm_rank(mpi_comm_edyn,mytid,ier)
-!
-! Allocate array of task structures:
-!
- allocate(tasks(0:ntask-1),stat=ier)
- if (ier /= 0) then
- write(iulog,"('>>> mp_init: error allocating tasks(',i3,')')") ntask
- call endrun('edyn_mpi mp_init')
- endif
- end subroutine mp_init
-!-----------------------------------------------------------------------
- subroutine mp_distribute_geo(lonndx0,lonndx1,latndx0,latndx1,levndx0,levndx1, ntaski_in,ntaskj_in)
-!
-! Args:
- integer, intent(in) :: lonndx0,lonndx1,latndx0,latndx1,levndx0,levndx1, ntaski_in,ntaskj_in
-!
-! Local:
- integer :: i,j,n,irank,ier,tidrow,nj,ni
-!
-! Define all task structures with current task values
-! (redundant for alltoall):
-! Use WACCM subdomains:
-!
- lon0 = lonndx0 ; lon1 = lonndx1
- lat0 = latndx0 ; lat1 = latndx1
- lev0 = levndx0 ; lev1 = levndx1
-
- ntaski = ntaski_in
- ntaskj = ntaskj_in
-!
-! Allocate and set 2d table of tasks:
-!
- allocate(itask_table_geo(-1:ntaski,-1:ntaskj),stat=ier)
- if (ier /= 0) then
- write(iulog,"('>>> Error allocating itable: ntaski,j=',2i4)") ntaski,ntaskj
- call endrun('itask_table_geo')
- endif
- itask_table_geo(:,:) = MPI_PROC_NULL
-
- irank = 0
- do j = 0,ntaskj-1
- do i = 0,ntaski-1
- itask_table_geo(i,j) = irank
- if (mytid == irank) then
- mytidi = i
- mytidj = j
- endif
- irank = irank+1
- enddo
-!
-! Tasks are periodic in longitude:
-! (this is not done in tiegcm, but here sub mp_geo_halos depends on it)
-!
- itask_table_geo(-1,j) = itask_table_geo(ntaski-1,j)
- itask_table_geo(ntaski,j) = itask_table_geo(0,j)
-
- enddo ! j=0,ntaskj-1
-
- if (debug.and.masterproc) then
- write(iulog,"('mp_distribute_geo: mytid=',i4,' ntaski,j=',2i4,' mytidi,j=',2i4,&
- ' lon0,1=',2i4,' lat0,1=',2i4,' lev0,1=',2i4)") &
- mytid,ntaski,ntaskj,mytidi,mytidj,lon0,lon1,lat0,lat1,lev0,lev1
-!
-! Print table to stdout, including -1,ntaski:
-!
- write(iulog,"(/,'ntask=',i3,' ntaski=',i2,' ntaskj=',i2,' Geo Task Table:')") &
- ntask,ntaski,ntaskj
- do j=-1,ntaskj
- write(iulog,"('j=',i3,' itask_table_geo(:,j)=',100i3)") j,itask_table_geo(:,j)
- enddo
- endif
-!
-! Calculate start and end indices in lon,lat dimensions for each task:
-! For WACCM: do not call distribute_1d - lon0,1, lat0,1 are set from
-! waccm grid above.
-!
-! call distribute_1d(1,nlon,ntaski,mytidi,lon0,lon1)
-! call distribute_1d(1,nlat,ntaskj,mytidj,lat0,lat1)
+ use shr_kind_mod, only: r8 => shr_kind_r8, cl=>shr_kind_cl
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+
+ use spmd_utils, only: masterproc
+ use mpi, only: mpi_comm_size, mpi_comm_rank, mpi_comm_split
+ use mpi, only: MPI_PROC_NULL, mpi_wait, MPI_STATUS_SIZE
+ use mpi, only: MPI_INTEGER, MPI_REAL8, MPI_SUCCESS, MPI_SUM
+
+ implicit none
+ private
+ save
+
+ ! Public data
+ public :: mlon0, mlon1
+ public :: omlon1
+ public :: mlat0, mlat1
+ public :: mlev0, mlev1
+ public :: mytid
+ public :: lon0, lon1
+ public :: lat0, lat1
+ public :: lev0, lev1
+ public :: nlev_geo
+ public :: ntask
+ public :: ntaski
+ public :: ntaskj
+ public :: tasks
+ public :: nmagtaski
+ public :: nmagtaskj
+ public :: mytidi
+ ! Public type
+ public :: array_ptr_type
+ ! Public interfaces
+ public :: mp_init
+ public :: mp_geo_halos
+ public :: mp_pole_halos
+ public :: mp_mag_halos
+ public :: mp_scatter_phim
+ public :: mp_mageq
+ public :: mp_mageq_jpm1
+ public :: mp_magpole_2d
+ public :: mp_mag_foldhem
+ public :: mp_mag_periodic_f2d
+ public :: mp_gather_edyn
+ public :: mp_mageq_jpm3
+ public :: mp_mag_jslot
+ public :: mp_magpoles
+ public :: ixfind
+ public :: mp_magpole_3d
+ public :: setpoles
+ public :: mp_gatherlons_f3d
+ public :: mp_scatterlons_f3d
+ public :: mp_exchange_tasks
+ public :: mp_distribute_mag
+ public :: mp_distribute_geo
+
+ !
+ ! Number of MPI tasks and current task id (geo or mag):
+ !
+ integer :: &
+ ntask, & ! number of mpi tasks
+ mytid ! my task id
+ !
+ ! Geographic subdomains for current task:
+ !
+
+ integer, protected :: &
+ nlev_geo, & !
+ lon0=1, lon1=0, & ! first and last lons for each task
+ lat0=1, lat1=0, & ! first and last lats for each task
+ lev0, lev1, & ! first and last levs for each task (not distributed)
+ ntaski, & ! number of tasks in lon dimension
+ ntaskj, & ! number of tasks in lat dimension
+ mytidi ! i coord for current task in task table
+ integer :: &
+ nlon_geo, & ! size of geo lon dimension
+ nlat_geo, & ! size of geo lat dimension
+ mxlon, & ! max number of subdomain lon points among all tasks
+ mxlat, & ! max number of subdomain lat points among all tasks
+ mytidj ! j coord for current task in task table
+ !
+ ! Magnetic subdomains for current task:
+ !
+ integer, protected :: &
+ nmagtaski, & ! number of tasks in mag lon dimension
+ nmagtaskj, & ! number of tasks in mag lat dimension
+ magtidi, & ! i coord for current task in task table
+ magtidj, & ! j coord for current task in task table
+ mlat0=1,mlat1=0, & ! first and last mag lats for each task
+ mlon0=1,mlon1=0, & ! first and last mag lons for each task
+ omlon1=0, & ! last mag lons for each task to remove periodic point from outputs
+ mlev0,mlev1 ! first and last mag levs (not distributed)
+
+ integer :: &
+ mxmaglon, & ! max number of mag subdomain lon points among all tasks
+ mxmaglat ! max number of mag subdomain lat points among all tasks
+
+ integer, allocatable :: &
+ itask_table_geo(:,:), & ! 2d table of tasks on geographic grid (i,j)
+ itask_table_mag(:,:) ! 2d table of tasks on mag grid (i,j)
+
+ integer :: cols_comm ! communicators for each task column
+ integer :: rows_comm ! communicators for each task row
+ !
+ ! Task type: subdomain information for all tasks, known by all tasks:
+ !
+ type task
+ integer :: mytid ! task id
+ !
+ ! Geographic subdomains in task structure:
+ integer :: mytidi = -1 ! task coord in longitude dimension of task table
+ integer :: mytidj = -1 ! task coord in latitude dimension of task table
+ integer :: nlats = 0 ! number of latitudes calculated by this task
+ integer :: nlons = 0 ! number of longitudes calculated by this task
+ integer :: lat0 = 1, lat1 = 0 ! first and last latitude indices
+ integer :: lon0 = 1, lon1 = 0 ! first and last longitude indices
+ !
+ ! Magnetic subdomains in task structure:
+ integer :: magtidi = -1 ! task coord in mag longitude dimension of task table
+ integer :: magtidj = -1 ! task coord in mag latitude dimension of task table
+ integer :: nmaglats = 0 ! number of mag latitudes calculated by this task
+ integer :: nmaglons = 0 ! number of mag longitudes calculated by this task
+ integer :: mlat0 = 1,mlat1 = 0 ! first and last latitude indices
+ integer :: mlon0 = 1,mlon1 = 0 ! first and last longitude indices
+ end type task
+ !
+ ! type(task) :: tasks(ntask) will be made available to all tasks
+ ! (so each task has information about all tasks)
+ !
+ type(task), allocatable :: tasks(:)
+ !
+ ! Conjugate points in mag subdomains, for mp_mag_foldhem
+ !
+ integer,allocatable,dimension(:) :: & ! (ntask)
+ nsend_south, & ! number of south lats to send to north (each task)
+ nrecv_north ! number of north lats to send to south (each task)
+ integer,allocatable,dimension(:,:) :: & ! (mxlats,ntask)
+ send_south_coords, & ! south j lats to send to north
+ recv_north_coords ! north j lats to recv from south
+
+ !
+ ! Magnetic grid parameters
+ !
+ integer :: nmlat ! number of mag latitudes
+ integer :: nmlath ! index of magnetic equator
+ integer :: nmlonp1 ! number of longitudes plus periodic point
+ integer :: nmlev ! number of levels (= nlev in geo grid)
+
+ type array_ptr_type
+ real(r8),pointer :: ptr(:,:,:) ! (k,i,j)
+ end type array_ptr_type
+
+ integer, protected :: mpi_comm_edyn = -9999
+
+ logical, parameter :: debug = .false.
+
+contains
+ !-----------------------------------------------------------------------
+ subroutine mp_init(mpi_comm, ionos_npes, nlon_geo_in, nlat_geo_in, nlev_geo_in)
+ !
+ ! Initialize MPI, and allocate task table.
+ !
+ integer, intent(in) :: mpi_comm
+ integer, intent(in) :: ionos_npes
+ integer, intent(in) :: nlon_geo_in
+ integer, intent(in) :: nlat_geo_in
+ integer, intent(in) :: nlev_geo_in
+
+ integer :: ierr
+ integer :: color, npes
+ character(len=cl) :: errmsg
+
+ nlon_geo = nlon_geo_in
+ nlat_geo = nlat_geo_in
+ nlev_geo = nlev_geo_in
+ ntask = ionos_npes
+
+ call mpi_comm_size(mpi_comm, npes, ierr)
+ call mpi_comm_rank(mpi_comm, mytid, ierr)
+ color = mytid/ionos_npes
+ call mpi_comm_split(mpi_comm, color, mytid, mpi_comm_edyn, ierr)
+
+ !
+ ! Allocate array of task structures:
+ !
+ allocate(tasks(0:npes-1), stat=ierr)
+ if (ierr /= 0) then
+ write(errmsg,"('>>> mp_init: error allocating tasks(',i3,')')") ntask
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ endif
+ end subroutine mp_init
+ !-----------------------------------------------------------------------
+ subroutine mp_distribute_geo(lonndx0, lonndx1, latndx0, latndx1, levndx0, levndx1, ntaski_in, ntaskj_in)
+ !
+ ! Args:
+ integer, intent(in) :: lonndx0
+ integer, intent(in) :: lonndx1
+ integer, intent(in) :: latndx0
+ integer, intent(in) :: latndx1
+ integer, intent(in) :: levndx0
+ integer, intent(in) :: levndx1
+ integer, intent(in) :: ntaski_in
+ integer, intent(in) :: ntaskj_in
+ !
+ ! Local:
+ integer :: i, j, n, irank, ier, tidrow, nj, ni
+
+ !
+ ! Define all task structures with current task values
+ ! (redundant for alltoall):
+ ! Use WACCM subdomains:
+ !
+ lon0 = lonndx0 ; lon1 = lonndx1
+ lat0 = latndx0 ; lat1 = latndx1
+ lev0 = levndx0 ; lev1 = levndx1
+
+ ntaski = ntaski_in
+ ntaskj = ntaskj_in
+ !
+ ! Allocate and set 2d table of tasks:
+ !
+ allocate(itask_table_geo(-1:ntaski,-1:ntaskj),stat=ier)
+ if (ier /= 0) then
+ write(iulog,"('>>> Error allocating itable: ntaski,j=',2i4)") ntaski,ntaskj
+ call endrun('itask_table_geo')
+ endif
+ itask_table_geo(:,:) = MPI_PROC_NULL
+
+ irank = 0
+ mytidi = -1
+ mytidj = -1
+ do j = 0, ntaskj-1
+ do i = 0, ntaski-1
+ itask_table_geo(i,j) = irank
+ if (mytid == irank) then
+ mytidi = i
+ mytidj = j
+ end if
+ irank = irank+1
+ end do
+ !
+ ! Tasks are periodic in longitude:
+ ! (this is not done in tiegcm, but here sub mp_geo_halos depends on it)
+ !
+ itask_table_geo(-1,j) = itask_table_geo(ntaski-1,j)
+ itask_table_geo(ntaski,j) = itask_table_geo(0,j)
+
+ end do ! j=0,ntaskj-1
+
+ if (debug ) then
+ write(6,"('mp_distribute_geo: mytid=',i4,' ntaski,j=',2i4,' mytidi,j=',2i4,&
+ ' lon0,1=',2i4,' lat0,1=',2i4,' lev0,1=',2i4)") &
+ mytid,ntaski,ntaskj,mytidi,mytidj,lon0,lon1,lat0,lat1,lev0,lev1
+ !
+ ! Print table to stdout, including -1,ntaski:
+ !
+ write(6,"(/,'ntask=',i3,' ntaski=',i2,' ntaskj=',i2,' Geo Task Table:')") &
+ ntask,ntaski,ntaskj
+ do j=-1,ntaskj
+ write(iulog,"('j=',i3,' itask_table_geo(:,j)=',100i3)") j,itask_table_geo(:,j)
+ enddo
+ endif
+ !
+ ! Calculate start and end indices in lon,lat dimensions for each task:
+ ! For WACCM: do not call distribute_1d - lon0,1, lat0,1 are set from
+ ! waccm grid above.
+ !
+ ! call distribute_1d(1,nlon,ntaski,mytidi,lon0,lon1)
+ ! call distribute_1d(1,nlat,ntaskj,mytidj,lat0,lat1)
nj = lat1-lat0+1 ! number of latitudes for this task
ni = lon1-lon0+1 ! number of longitudes for this task
-!
-! Report my stats to stdout:
-! write(iulog,"(/,'mytid=',i3,' mytidi,j=',2i3,' lat0,1=',2i3,' (',i2,') lon0,1=',2i3,' (',i2,') ncells=',i4)") &
-! mytid,mytidi,mytidj,lat0,lat1,nj,lon0,lon1,ni
-!
-! Define all task structures with current task values
-! (redundant for alltoall):
-!
- do n=0,ntask-1
- tasks(n)%mytid = mytid
- tasks(n)%mytidi = mytidi
- tasks(n)%mytidj = mytidj
- tasks(n)%nlats = nj
- tasks(n)%nlons = ni
- tasks(n)%lat0 = lat0
- tasks(n)%lat1 = lat1
- tasks(n)%lon0 = lon0
- tasks(n)%lon1 = lon1
- enddo
-!
-! All tasks must have at least 4 longitudes:
-!
- do n=0,ntask-1
-
- if (debug.and.masterproc) then
- write(iulog,"('mp_distribute_geo: n=',i3,' tasks(n)%nlons=',i3,' tasks(n)%nlats=',i3)") &
- n,tasks(n)%nlons,tasks(n)%nlats
- endif
-
- if (tasks(n)%nlons < 4) then
- write(iulog,"('>>> mp_distribute_geo: each task must carry at least 4 longitudes. task=',i4,' nlons=',i4)") &
- n,tasks(n)%nlons
- call endrun('edyn_mpi: nlons per task')
+ !
+ ! Report my stats to stdout:
+ if (debug ) then
+ write(6,"(/,'mytid=',i3,' mytidi,j=',2i3,' lat0,1=',2i3,' (',i2,') lon0,1=',2i3,' (',i2,') ncells=',i4)") &
+ mytid,mytidi,mytidj,lat0,lat1,nj,lon0,lon1,ni
endif
- enddo
-!
-! Create sub-communicators for each task row (used by mp_geopole_3d):
-!
-! call mpi_comm_split(mpi_comm_edyn,mod(mytid,ntaskj),mytid,rows_comm,ier)
-! call MPI_Comm_rank(rows_comm,tidrow,ier)
-
- call mpi_comm_split(mpi_comm_edyn,mytidj,mytid,rows_comm,ier)
- call MPI_Comm_rank(rows_comm,tidrow,ier)
-
- if (debug.and.masterproc) then
- write(iulog,"('mp_distribute_geo: ntaskj=',i3,' tidrow=',i3)") &
- ntaskj,tidrow
- endif
-
- end subroutine mp_distribute_geo
-!-----------------------------------------------------------------------
- subroutine mp_distribute_mag
-!
-! Local:
- integer :: i,j,n,irank,ier,tidcol,nj,ni,ncells
-!
-! Number of tasks in mag lon,lat same as geo grid:
-! Also true for WACCM processor distribution.
-!
- nmagtaski = ntaski
- nmagtaskj = ntaskj
-!
-! Vertical dimension is not distributed:
- mlev0 = 1
- mlev1 = nmlev
-!
-! Allocate and set 2d table of tasks:
- allocate(itask_table_mag(-1:nmagtaski,-1:nmagtaskj),stat=ier)
- if (ier /= 0) then
- write(iulog,"('>>> Error allocating itable: nmagtaski,j=',2i3)") &
- nmagtaski,nmagtaskj
- call endrun('itask_table_mag')
- endif
- itask_table_mag(:,:) = MPI_PROC_NULL
- irank = 0
- do j = 0,nmagtaskj-1
- do i = 0,nmagtaski-1
- itask_table_mag(i,j) = irank
- if (mytid == irank) then
- magtidi = i
- magtidj = j
- endif
- irank = irank+1
- enddo
-!
-! Tasks are periodic in longitude:
-!
- itask_table_mag(-1,j) = itask_table_mag(nmagtaski-1,j)
- itask_table_mag(nmagtaski,j) = itask_table_mag(0,j)
- enddo
-
- if (debug.and.masterproc) then
-!
-! Print table to stdout:
- write(iulog,"(/,'ntask=',i3,' nmagtaski=',i2,' nmagtaskj=',i2,' Mag Task Table:')") &
- ntask,nmagtaski,nmagtaskj
- do j=-1,nmagtaskj
- write(iulog,"('j=',i3,' itask_table_mag(:,j)=',100i3)") j,itask_table_mag(:,j)
+ !
+ ! Define all task structures with current task values
+ ! (redundant for alltoall):
+ !
+ do n=0,ntask-1
+ tasks(n)%mytid = mytid
+ tasks(n)%mytidi = mytidi
+ tasks(n)%mytidj = mytidj
+ tasks(n)%nlats = nj
+ tasks(n)%nlons = ni
+ tasks(n)%lat0 = lat0
+ tasks(n)%lat1 = lat1
+ tasks(n)%lon0 = lon0
+ tasks(n)%lon1 = lon1
enddo
- endif
-!
-! Calculate start and end indices in mag lon,lat dimensions for each task:
-!
- call distribute_1d(1,nmlonp1,nmagtaski,magtidi,mlon0,mlon1)
- call distribute_1d(1,nmlat ,nmagtaskj,magtidj,mlat0,mlat1)
-
- omlon1=mlon1
- if (omlon1 == nmlonp1) omlon1=omlon1-1
-
- nj = mlat1-mlat0+1 ! number of mag latitudes for this task
- ni = mlon1-mlon0+1 ! number of mag longitudes for this task
- ncells = nj*ni ! total number of grid cells for this task
-
- if (debug.and.masterproc) then
-!
-! Report my stats to stdout:
- write(iulog,"(/,'mytid=',i3,' magtidi,j=',2i3,' mlat0,1=',2i3,' (',i2,') mlon0,1=',2i3,' (',i2,') ncells=',i4)") &
- mytid,magtidi,magtidj,mlat0,mlat1,nj,mlon0,mlon1,ni,ncells
- endif
-!
-! Define all task structures with current task values
-! (redundant for alltoall):
-!
- do n=0,ntask-1
- tasks(n)%magtidi = magtidi
- tasks(n)%magtidj = magtidj
- tasks(n)%nmaglats = nj
- tasks(n)%nmaglons = ni
- tasks(n)%mlat0 = mlat0
- tasks(n)%mlat1 = mlat1
- tasks(n)%mlon0 = mlon0
- tasks(n)%mlon1 = mlon1
- enddo
-!
-! All tasks must have at least 4 longitudes:
- do n=0,ntask-1
- if (tasks(n)%nmaglons < 4) then
- write(iulog,"('>>> mp_distribute_mag: each task must carry at least 4 longitudes. task=',i4,' nmaglons=',i4)") &
- n,tasks(n)%nmaglons
- call endrun('edyn_mpi: nmaglons per task')
+ !
+ ! All tasks must have at least 4 longitudes:
+ !
+ if (mytid < ntask) then
+ do n=0,ntask-1
+
+ if (debug) then
+ write(6,"('mp_distribute_geo: n=',i3,' tasks(n)%nlons=',i3,' tasks(n)%nlats=',i3)") &
+ n,tasks(n)%nlons,tasks(n)%nlats
+ endif
+
+ if (tasks(n)%nlons < 4) then
+ write(iulog,"('>>> mp_distribute_geo: each task must carry at least 4 longitudes. task=',i4,' nlons=',i4)") &
+ n,tasks(n)%nlons
+ call endrun('edyn_mpi: nlons per task')
+ endif
+ enddo
endif
- enddo
-!
-! Create subgroup communicators for each task column:
-! These communicators will be used by sub mp_mag_jslot (mpi.F).
-!
- call mpi_comm_split(mpi_comm_edyn,mod(mytid,nmagtaski),mytid,cols_comm,ier)
- call MPI_Comm_rank(cols_comm,tidcol,ier)
-
- if (debug.and.masterproc) then
- write(iulog,"('mp_distribute_mag: nmagtaski=',i3,' mod(mytid,nmagtaski)=',i3,' tidcol=',i3)") &
- nmagtaski,mod(mytid,nmagtaski),tidcol
- endif
-
- end subroutine mp_distribute_mag
-!-----------------------------------------------------------------------
- subroutine distribute_1d(n1,n2,nprocs,myrank,istart,iend)
-!
-! Distribute work across a 1d vector(n1->n2) to nprocs.
-! Return start and end indices for proc myrank.
-!
-! Args:
- integer,intent(in) :: n1,n2,nprocs,myrank
- integer,intent(out) :: istart,iend
-!
-! Local:
- integer :: lenproc,iremain,n
-!
- n = n2-n1+1
- lenproc = n/nprocs
- iremain = mod(n,nprocs)
- istart = n1 + myrank*lenproc + min(myrank,iremain)
- iend = istart+lenproc-1
- if (iremain > myrank) iend = iend+1
- end subroutine distribute_1d
-!-----------------------------------------------------------------------
- subroutine mp_exchange_tasks(iprint)
-!
-! Args:
- integer,intent(in) :: iprint
-!
-! Local:
-! itasks_send(len_task_type,ntask) will be used to send tasks(:) info
-! to all tasks (directly passing mpi derived data types is reportedly
-! not stable, or not available until MPI 2.x).
-!
- integer :: n,ier
- integer,parameter :: len_task_type = 17 ! see type task above
- integer,allocatable,save :: &
- itasks_send(:,:), & ! send buffer
- itasks_recv(:,:) ! send buffer
-!
-! Pack tasks(mytid) into itasks_send:
- allocate(itasks_send(len_task_type,0:ntask-1),stat=ier)
- if (ier /= 0) then
- write(iulog,"(i4,i4)") '>>> Error allocating itasks_send: len_task_type=',&
- len_task_type,' ntask=',ntask
- endif
- allocate(itasks_recv(len_task_type,0:ntask-1),stat=ier)
- if (ier /= 0) then
- write(iulog,"(i4,i4)") '>>> Error allocating itasks_recv: len_task_type=',&
- len_task_type,' ntask=',ntask
- endif
- do n=0,ntask-1
- itasks_send(1,n) = tasks(mytid)%mytid
-
- itasks_send(2,n) = tasks(mytid)%mytidi
- itasks_send(3,n) = tasks(mytid)%mytidj
- itasks_send(4,n) = tasks(mytid)%nlats
- itasks_send(5,n) = tasks(mytid)%nlons
- itasks_send(6,n) = tasks(mytid)%lat0
- itasks_send(7,n) = tasks(mytid)%lat1
- itasks_send(8,n) = tasks(mytid)%lon0
- itasks_send(9,n) = tasks(mytid)%lon1
-
- itasks_send(10,n) = tasks(mytid)%magtidi
- itasks_send(11,n) = tasks(mytid)%magtidj
- itasks_send(12,n) = tasks(mytid)%nmaglats
- itasks_send(13,n) = tasks(mytid)%nmaglons
- itasks_send(14,n) = tasks(mytid)%mlat0
- itasks_send(15,n) = tasks(mytid)%mlat1
- itasks_send(16,n) = tasks(mytid)%mlon0
- itasks_send(17,n) = tasks(mytid)%mlon1
- enddo
-!
-! Send itasks_send and receive itasks_recv:
- call mpi_alltoall(itasks_send,len_task_type,MPI_INTEGER,&
- itasks_recv,len_task_type,MPI_INTEGER,&
- mpi_comm_edyn,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'edyn_mpi: mpi_alltoall to send/recv itasks')
-!
-! Unpack itasks_recv into tasks(n)
-!
- do n=0,ntask-1
- tasks(n)%mytid = itasks_recv(1,n)
-
- tasks(n)%mytidi = itasks_recv(2,n)
- tasks(n)%mytidj = itasks_recv(3,n)
- tasks(n)%nlats = itasks_recv(4,n)
- tasks(n)%nlons = itasks_recv(5,n)
- tasks(n)%lat0 = itasks_recv(6,n)
- tasks(n)%lat1 = itasks_recv(7,n)
- tasks(n)%lon0 = itasks_recv(8,n)
- tasks(n)%lon1 = itasks_recv(9,n)
-
- tasks(n)%magtidi = itasks_recv(10,n)
- tasks(n)%magtidj = itasks_recv(11,n)
- tasks(n)%nmaglats = itasks_recv(12,n)
- tasks(n)%nmaglons = itasks_recv(13,n)
- tasks(n)%mlat0 = itasks_recv(14,n)
- tasks(n)%mlat1 = itasks_recv(15,n)
- tasks(n)%mlon0 = itasks_recv(16,n)
- tasks(n)%mlon1 = itasks_recv(17,n)
-!
-! Report to stdout:
-!
- if (n==mytid.and.iprint > 0) then
- write(iulog,"(/,'Task ',i3,':')") n
- write(iulog,"(/,'Subdomain on geographic grid:')")
- write(iulog,"('tasks(',i3,')%mytid =',i3)") n,tasks(n)%mytid
- write(iulog,"('tasks(',i3,')%mytidi=',i3)") n,tasks(n)%mytidi
- write(iulog,"('tasks(',i3,')%mytidj=',i3)") n,tasks(n)%mytidj
- write(iulog,"('tasks(',i3,')%nlats =',i3)") n,tasks(n)%nlats
- write(iulog,"('tasks(',i3,')%nlons =',i3)") n,tasks(n)%nlons
- write(iulog,"('tasks(',i3,')%lat0 =',i3)") n,tasks(n)%lat0
- write(iulog,"('tasks(',i3,')%lat1 =',i3)") n,tasks(n)%lat1
- write(iulog,"('tasks(',i3,')%lon0 =',i3)") n,tasks(n)%lon0
- write(iulog,"('tasks(',i3,')%lon1 =',i3)") n,tasks(n)%lon1
- write(iulog,"('Number of geo subdomain grid points = ',i6)") &
- tasks(n)%nlons * tasks(n)%nlats
- write(iulog,"(/,'Subdomain on geomagnetic grid:')")
- write(iulog,"('tasks(',i3,')%magtidi=',i3)") n,tasks(n)%magtidi
- write(iulog,"('tasks(',i3,')%magtidj=',i3)") n,tasks(n)%magtidj
- write(iulog,"('tasks(',i3,')%nmaglats =',i3)") n,tasks(n)%nmaglats
- write(iulog,"('tasks(',i3,')%nmaglons =',i3)") n,tasks(n)%nmaglons
- write(iulog,"('tasks(',i3,')%mlat0 =',i3)") n,tasks(n)%mlat0
- write(iulog,"('tasks(',i3,')%mlat1 =',i3)") n,tasks(n)%mlat1
- write(iulog,"('tasks(',i3,')%mlon0 =',i3)") n,tasks(n)%mlon0
- write(iulog,"('tasks(',i3,')%mlon1 =',i3)") n,tasks(n)%mlon1
- write(iulog,"('Number of mag subdomain grid points = ',i6)") &
- tasks(n)%nmaglons * tasks(n)%nmaglats
+
+ !
+ ! Create sub-communicators for each task row (used by mp_geopole_3d):
+ !
+ ! call mpi_comm_split(mpi_comm_edyn,mod(mytid,ntaskj),mytid,rows_comm,ier)
+ ! call MPI_Comm_rank(rows_comm,tidrow,ier)
+
+ call mpi_comm_split(mpi_comm_edyn,mytidj,mytid,rows_comm,ier)
+ call MPI_Comm_rank(rows_comm,tidrow,ier)
+
+ if (debug.and.masterproc) then
+ write(iulog,"('mp_distribute_geo: ntaskj=',i3,' tidrow=',i3)") &
+ ntaskj,tidrow
endif
- enddo
-!
-! Release locally allocated space:
- deallocate(itasks_send)
- deallocate(itasks_recv)
-!
-! mxlon,mxlat are maximum number of lons,lats owned by all tasks:
- mxlon = -9999
- do n=0,ntask-1
- if (tasks(n)%nlons > mxlon) mxlon = tasks(n)%nlons
- enddo
- mxlat = -9999
- do n=0,ntask-1
- if (tasks(n)%nlats > mxlat) mxlat = tasks(n)%nlats
- enddo
-!
-! mxmaglon,mxmaglat are maximum number of mag lons,lats owned by all tasks:
- mxmaglon = -9999
- do n=0,ntask-1
- if (tasks(n)%nmaglons > mxmaglon) mxmaglon = tasks(n)%nmaglons
- enddo
- mxmaglat = -9999
- do n=0,ntask-1
- if (tasks(n)%nmaglats > mxmaglat) mxmaglat = tasks(n)%nmaglats
- enddo
-!
-! Find conjugate points for folding hemispheres:
- call conjugate_points
-
- end subroutine mp_exchange_tasks
-!-----------------------------------------------------------------------
- subroutine mp_mageq(fin,fout,nf,mlon0,mlon1,mlat0,mlat1,nmlev)
-!
-! Each task needs values of conductivities and adotv1,2 fields at the
-! at the mag equator for its longitude subdomain (and all levels), for
-! the fieldline integrations.
-!
-! On input, fin is ped_mag, hal_mag, adotv1_mag, adotv2_mag
-! on (i,j,k) magnetic subdomain.
-! On output, fout(mlon0:mlon1,nmlev,nf) is ped_meq, hal_meq, adotv1_meq,
-! adotv2_meq at mag equator at longitude subdomain and all levels.
-!
-! Args:
- integer :: mlon0,mlon1,mlat0,mlat1,nmlev,nf
- real(r8),intent(in) :: fin (mlon0:mlon1,mlat0:mlat1,nmlev,nf)
- real(r8),intent(out) :: fout(mlon0:mlon1,nmlev,nf)
-!
-! Local:
- real(r8) :: & ! mpi buffers
- sndbuf(mxmaglon,nmlev,nf), & ! mxmaglon,nmlev,nf
- rcvbuf(mxmaglon,nmlev,nf) ! mxmaglon,nmlev,nf
- integer :: i,j,n,itask,ier,len,jlateq,ireqsend,ireqrecv
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
- logical :: have_eq
-
- sndbuf = 0._r8
- rcvbuf = 0._r8
- len = mxmaglon*nmlev*nf
-!
-! If mag equator is in current subdomain, load it into sndbuf
-! and send to other tasks in my task column (mytidi)
-!
- jlateq = (nmlat+1)/2 ! lat index of mag equator (49)
- have_eq = .false.
- do j=mlat0,mlat1
- if (j == jlateq) then ! load send buffer w/ data at equator
- have_eq = .true.
- do i=mlon0,mlon1
- sndbuf(i-mlon0+1,:,:) = fin(i,j,:,:)
- enddo
-!
-! Send mag equator data to other tasks in my task column (mytidi):
- do itask=0,ntask-1
- if (itask /= mytid.and.tasks(itask)%mytidi==mytidi) then
- call mpi_isend(sndbuf,len,MPI_REAL8,itask,1, &
- mpi_comm_edyn,ireqsend,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mageq isend')
- call mpi_wait(ireqsend,irstat,ier)
- endif ! another task in mytidi
- enddo ! itask=0,ntask-1
- endif ! j==jlateq
- enddo ! j=mlat0,mlat1
-!
-! Receive by other tasks in the sending task's column:
- fout = 0._r8
- if (.not.have_eq) then ! find task to receive from
- do itask=0,ntask-1
- do j=tasks(itask)%mlat0,tasks(itask)%mlat1
- if (j == jlateq.and.tasks(itask)%mytidi==mytidi) then
- call mpi_irecv(rcvbuf,len,MPI_REAL8,itask,1, &
- mpi_comm_edyn,ireqrecv,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mageq irecv')
- call mpi_wait(ireqrecv,irstat,ier)
- do n=1,nf
- do i=mlon0,mlon1
- fout(i,:,n) = rcvbuf(i-mlon0+1,:,n)
- enddo
- enddo
- endif ! itask has mag eq and is in my column (sending task)
- enddo ! scan itask latitudes
- enddo ! task table search
-!
-! If I am the sending task, set fout to equator values of input array:
- else
- do n=1,nf
- do i=mlon0,mlon1
- fout(i,:,n) = fin(i,jlateq,:,n)
- enddo
- enddo
- endif ! I am receiving or sending task
- end subroutine mp_mageq
-!-----------------------------------------------------------------------
- subroutine mp_mageq_jpm1(f,mlon0,mlon1,mlat0,mlat1,nmlonp1,feq_jpm1,nf)
-!
-! All tasks need data at mag latitudes equator-1, equator+1 at global
-! longitudes.
-! On input: f is 6 fields on mag subdomains: zigm11,zigm22,zigmc,zigm2,rim1,rim2
-! On output: feq_jpm1(nmlonp1,2,nf)
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nf
- real(r8),intent(in) :: f(mlon0:mlon1,mlat0:mlat1,nf)
- real(r8),intent(out) :: feq_jpm1(nmlonp1,2,nf) ! eq-1,eq+1
-!
-! Local:
- integer :: j,ier,len,jlateq
- real(r8) :: sndbuf(nmlonp1,2,nf)
-
- sndbuf = 0._r8
- feq_jpm1 = 0._r8
- len = nmlonp1*2*nf
-!
-! Load send buffer w/ eq +/- 1 for current subdomain
-! (redundant to all tasks for alltoall)
-!
- jlateq = (nmlat+1)/2
- do j=mlat0,mlat1
- if (j == jlateq+1) then ! equator+1
- sndbuf(mlon0:mlon1,1,:) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq-1) then ! equator-1
- sndbuf(mlon0:mlon1,2,:) = f(mlon0:mlon1,j,:)
- endif ! j==jlateq
- enddo ! j=mlat0,mlat1
-!
-! Do the exchange:
-!
- call mpi_allreduce( sndbuf(:,:,1:nf), feq_jpm1(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
- if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_mageq_jpm1 call mpi_allreduce')
-
-!
-! Periodic point:
- feq_jpm1(nmlonp1,:,:) = feq_jpm1(1,:,:)
-
- end subroutine mp_mageq_jpm1
-!-----------------------------------------------------------------------
- subroutine mp_mageq_jpm3(f,mlon0,mlon1,mlat0,mlat1,nmlonp1,feq_jpm3,nf)
-!
-! All tasks need global longitudes at mag latitudes equator,
-! and equator +/- 1,2,3
-! On input: f is nf fields on mag subdomains
-! On output: feq_jpm3(nmlonp1,-3:3,nf) has global lons at eq, eq +/- 1,2,3
-! 2nd dimension of feq_jpm3 (and send/recv buffers) is as follows:
-! +3: eq+3
-! +2: eq+2
-! +1: eq+1
-! 0: eq
-! -1: eq-1
-! -2: eq-2
-! -3: eq-3
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nf
- real(r8),intent(in) :: f(mlon0:mlon1,mlat0:mlat1,nf)
- real(r8),intent(out) :: feq_jpm3(nmlonp1,-3:3,nf)
-!
-! Local:
- integer :: j,ier,len,jlateq
- integer,parameter :: mxnf=6
-
- real(r8) :: sndbuf(nmlonp1,-3:3,mxnf)
-
- if (nf > mxnf) then
- write(iulog,"('>>> mp_mageq_jpm3: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
- nf,mxnf
- call endrun('mp_mageq_jpm3')
- endif
-
- sndbuf = 0._r8
- feq_jpm3 = 0._r8
- len = nmlonp1*7*nf
-!
-! Load send buffer w/ eq +/- 3 for current subdomain
-!
- jlateq = (nmlat+1)/2
- do j=mlat0,mlat1
- if (j == jlateq-3) then ! equator-3
- sndbuf(mlon0:mlon1,-3,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq-2) then ! equator-2
- sndbuf(mlon0:mlon1,-2,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq-1) then ! equator-1
- sndbuf(mlon0:mlon1,-1,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq) then ! equator
- sndbuf(mlon0:mlon1,0,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq+1) then ! equator+1
- sndbuf(mlon0:mlon1,1,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq+2) then ! equator+2
- sndbuf(mlon0:mlon1,2,1:nf) = f(mlon0:mlon1,j,:)
- elseif (j == jlateq+3) then ! equator+3
- sndbuf(mlon0:mlon1,3,1:nf) = f(mlon0:mlon1,j,:)
- endif ! j==jlateq
- enddo ! j=mlat0,mlat1
-!
-! Do the exchange:
-!
- call mpi_allreduce( sndbuf(:,:,1:nf), feq_jpm3(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
- if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_mageq_jpm3 call mpi_allreduce')
-
-!
-! Periodic point:
- feq_jpm3(nmlonp1,:,:) = feq_jpm3(1,:,:)
-
- end subroutine mp_mageq_jpm3
-!-----------------------------------------------------------------------
- subroutine mp_magpole_2d(f,ilon0,ilon1,ilat0,ilat1, &
- nglblon,jspole,jnpole,fpole_jpm2,nf)
-!
-! Return fpole_jpm2(nglblon,1->4,nf) as:
-! 1: j = jspole+1 (spole+1)
-! 2: j = jspole+2 (spole+2)
-! 3: j = jnpole-1 (npole-1)
-! 4: j = jnpole-2 (npole-2)
-! This can be called with different number of fields nf, but cannot
-! be called w/ > mxnf fields.
-!
-! Args:
- integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon,jspole,jnpole,nf
- real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nf)
- real(r8),intent(out) :: fpole_jpm2(nglblon,4,nf)
-!
-! Local:
- integer :: j,ier,len
- integer,parameter :: mxnf=6
- real(r8) :: sndbuf(nglblon,4,mxnf)
-
- if (nf > mxnf) then
- write(iulog,"('>>> mp_magpole_2d: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
- nf,mxnf
- call endrun('mp_magpole_2d')
- endif
-
- sndbuf = 0._r8
- fpole_jpm2 = 0._r8
- len = nglblon*4*nf
-!
-! Load send buffer with values at poles +/- 2 for current subdomain
-!
- do j=ilat0,ilat1
- if (j==jspole+1) then ! south pole +1
- sndbuf(ilon0:ilon1,1,1:nf) = f(ilon0:ilon1,j,:)
- elseif (j==jspole+2) then ! south pole +2
- sndbuf(ilon0:ilon1,2,1:nf) = f(ilon0:ilon1,j,:)
- elseif (j==jnpole-1) then ! north pole -1
- sndbuf(ilon0:ilon1,3,1:nf) = f(ilon0:ilon1,j,:)
- elseif (j==jnpole-2) then ! north pole -2
- sndbuf(ilon0:ilon1,4,1:nf) = f(ilon0:ilon1,j,:)
- endif
- enddo
-
-!
-! Do the exchange:
-!
- call mpi_allreduce( sndbuf(:,:,1:nf), fpole_jpm2(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
- if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpole_2d call mpi_allreduce')
-
- end subroutine mp_magpole_2d
-!-----------------------------------------------------------------------
- subroutine mp_magpole_3d(f,ilon0,ilon1,ilat0,ilat1,nlev, nglblon,jspole,jnpole,fpole_jpm2,nf)
-!
-! Return fpole_jpm2(nglblon,1->4,nlev,nf) as:
-! 1: j = jspole+1 (spole+1)
-! 2: j = jspole+2 (spole+2)
-! 3: j = jnpole-1 (npole-1)
-! 4: j = jnpole-2 (npole-2)
-! This can be called with different number of fields nf, but cannot
-! be called w/ > mxnf fields.
-!
-! Args:
- integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon,&
- jspole,jnpole,nf,nlev
- real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nlev,nf)
- real(r8),intent(out) :: fpole_jpm2(nglblon,4,nlev,nf)
-!
-! Local:
- integer :: j,k,ier,len
- integer,parameter :: mxnf=6
- real(r8) :: sndbuf(nglblon,4,nlev,mxnf)
-
- if (nf > mxnf) then
- write(iulog,"('>>> mp_magpole_3d: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
- nf,mxnf
- call endrun('mp_magpole_3d')
- endif
-
- sndbuf = 0._r8
- fpole_jpm2 = 0._r8
- len = nglblon*4*nlev*nf
-!
-! Load send buffer with values at poles +/- 2 for current subdomain
-!
- do j=ilat0,ilat1
- do k=1,nlev
- if (j==jspole+1) then ! south pole +1
- sndbuf(ilon0:ilon1,1,k,1:nf) = f(ilon0:ilon1,j,k,:)
- elseif (j==jspole+2) then ! south pole +2
- sndbuf(ilon0:ilon1,2,k,1:nf) = f(ilon0:ilon1,j,k,:)
- elseif (j==jnpole-1) then ! north pole -1
- sndbuf(ilon0:ilon1,3,k,1:nf) = f(ilon0:ilon1,j,k,:)
- elseif (j==jnpole-2) then ! north pole -2
- sndbuf(ilon0:ilon1,4,k,1:nf) = f(ilon0:ilon1,j,k,:)
- endif
- enddo
- enddo
-
-!
-! Do the exchange:
-!
- call mpi_allreduce( sndbuf(:,:,:,1:nf), fpole_jpm2(:,:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
- if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpole_3d call mpi_allreduce')
-
- end subroutine mp_magpole_3d
-!-----------------------------------------------------------------------
- subroutine mp_magpoles(f,ilon0,ilon1,ilat0,ilat1,nglblon, jspole,jnpole,fpoles,nf)
-!
-! Similiar to mp_magpole_2d, but returns global longitudes for
-! j==1 and j==nmlat (not for poles +/- 2)
-! Return fpoles(nglblon,2,nf) as:
-! 1: j = jspole (spole)
-! 2: j = jnpole (npole)
-! This can be called with different number of fields nf, but cannot
-! be called w/ > mxnf fields.
-!
-! Args:
- integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon, jspole,jnpole,nf
- real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nf)
- real(r8),intent(out) :: fpoles(nglblon,2,nf)
-!
-! Local:
- integer :: j,ier,len
- real(r8) :: sndbuf(nglblon,2,nf)
-
- sndbuf = 0._r8
- fpoles = 0._r8
- len = nglblon*2*nf
-!
-! Load send buffer with values at poles +/- 2 for current subdomain
-!
- do j=ilat0,ilat1
- if (j==jspole) then ! south pole
- sndbuf(ilon0:ilon1,1,1:nf) = f(ilon0:ilon1,j,:)
- elseif (j==jnpole) then ! npole pole
- sndbuf(ilon0:ilon1,2,1:nf) = f(ilon0:ilon1,j,:)
- endif
- enddo
-
-!
-! Do the exchange:
-!
- call mpi_allreduce( sndbuf(:,:,1:nf), fpoles(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
- if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpoles call mpi_allreduce')
-
- end subroutine mp_magpoles
-!-----------------------------------------------------------------------
- integer function getpe(ix,jx)
- integer,intent(in) :: ix,jx
- integer :: it
-
- getpe = -1
- do it=0,ntask-1
- if ((tasks(it)%lon0 <= ix .and. tasks(it)%lon1 >= ix).and.&
- (tasks(it)%lat0 <= jx .and. tasks(it)%lat1 >= jx)) then
- getpe = it
- exit
+
+ end subroutine mp_distribute_geo
+ !-----------------------------------------------------------------------
+ subroutine mp_distribute_mag(nmlonp1_in, nmlat_in, nmlath_in, nmlev_in)
+ !
+ ! Args:
+ integer, intent(in) :: nmlat_in ! number of mag latitudes
+ integer, intent(in) :: nmlath_in ! index of magnetic equator
+ integer, intent(in) :: nmlonp1_in ! number of longitudes plus periodic point
+ integer, intent(in) :: nmlev_in ! number of levels (= nlev in geo grid)
+ !
+ ! Local:
+ integer :: i, j, n, irank, ier, tidcol, nj, ni, ncells
+ character(len=cl) :: errmsg
+ character(len=*), parameter :: subname = 'mp_distribute_mag'
+ !
+ ! Number of tasks in mag lon,lat same as geo grid:
+ !
+ nmagtaski = ntaski
+ nmagtaskj = ntaskj
+ !
+ ! Store magetic grid parameters
+ nmlat = nmlat_in
+ nmlath = nmlath_in
+ nmlonp1 = nmlonp1_in
+ nmlev = nmlev_in
+ if (mytid>> Error allocating itable: nmagtaski = ', nmagtaski, &
+ ', j = ', nmagtaskj
+ if (masterproc) then
+ write(iulog, errmsg)
+ end if
+ call endrun(errmsg)
+ endif
+ itask_table_mag(:,:) = MPI_PROC_NULL
+ irank = 0
+ do j = 0, nmagtaskj-1
+ do i = 0, nmagtaski-1
+ itask_table_mag(i,j) = irank
+ if (mytid == irank) then
+ magtidi = i
+ magtidj = j
+ endif
+ irank = irank + 1
+ end do
+ !
+ ! Tasks are periodic in longitude:
+ !
+ itask_table_mag(-1,j) = itask_table_mag(nmagtaski-1,j)
+ itask_table_mag(nmagtaski,j) = itask_table_mag(0,j)
+ end do
+
+ if (debug .and. masterproc) then
+ !
+ ! Print table to stdout:
+ write(iulog,"(/,a,/a,i3,a,i2,a,i2,' Mag Task Table:')") subname, &
+ 'ntask=',ntask,' nmagtaski=',nmagtaski,' nmagtaskj=',nmagtaskj
+ do j = -1, nmagtaskj
+ write(iulog,"('j = ',i3,', itask_table_mag(:,j) = ',100i3)") &
+ j, itask_table_mag(:,j)
+ end do
+ end if
+ !
+ ! Calculate start and end indices in mag lon,lat dimensions for each task:
+ !
+ call distribute_1d(1, nmlonp1, nmagtaski, magtidi, mlon0, mlon1)
+ call distribute_1d(1, nmlat, nmagtaskj, magtidj, mlat0, mlat1)
+
+ omlon1 = mlon1
+ if (omlon1 == nmlonp1) then
+ omlon1 = omlon1-1
+ end if
+
+ nj = mlat1 - mlat0 + 1 ! number of mag latitudes for this task
+ ni = mlon1 - mlon0 + 1 ! number of mag longitudes for this task
+ ncells = nj * ni ! total number of grid cells for this task
+
+ if (debug) then
+ !
+ ! Report my stats to stdout:
+ write(6,"(/,a,i3,a,2i3,a,2i3,a,i2,2a,2i3,a,i2,a,i4)") &
+ 'mytid = ',mytid, ', magtidi,j = ', magtidi, magtidj, &
+ ', mlat0,1 = ', mlat0, mlat1, ' (', nj, ')', &
+ ', mlon0,1 = ', mlon0, mlon1, ' (', ni, ') ncells = ', ncells
+ end if
+ !
+ ! Define all task structures with current task values
+ ! (redundant for alltoall):
+ !
+ do n=0,ntask-1
+ tasks(n)%magtidi = magtidi
+ tasks(n)%magtidj = magtidj
+ tasks(n)%nmaglats = nj
+ tasks(n)%nmaglons = ni
+ tasks(n)%mlat0 = mlat0
+ tasks(n)%mlat1 = mlat1
+ tasks(n)%mlon0 = mlon0
+ tasks(n)%mlon1 = mlon1
+ enddo
+ !
+ ! All tasks must have at least 4 longitudes:
+ do n = 0, ntask-1
+ if (tasks(n)%nmaglons < 4) then
+ write(errmsg, "(3a,i0,', nmaglons = ',i4)") '>>> ', subname, &
+ ': each task must carry at least 4 longitudes. task = ', &
+ n, tasks(n)%nmaglons
+ if (masterproc) then
+ write(iulog, errmsg)
+ end if
+ call endrun(errmsg)
+ end if
+ end do
+ !
+ ! Create subgroup communicators for each task column:
+ ! These communicators will be used by sub mp_mag_jslot (mpi.F).
+ !
+ call mpi_comm_split(mpi_comm_edyn, mod(mytid,nmagtaski), mytid, &
+ cols_comm, ier)
+ call MPI_Comm_rank(cols_comm,tidcol,ier)
+
+ if (debug .and. masterproc) then
+ write(iulog,"(2a,i3,' mod(mytid,nmagtaski)=',i3,' tidcol=',i3)") &
+ subname, ': nmagtaski = ', nmagtaski, mod(mytid,nmagtaski), tidcol
+ end if
+ end if
+
+ end subroutine mp_distribute_mag
+ !-----------------------------------------------------------------------
+ subroutine distribute_1d(n1,n2,nprocs,myrank,istart,iend)
+ !
+ ! Distribute work across a 1d vector(n1->n2) to nprocs.
+ ! Return start and end indices for proc myrank.
+ !
+ ! Args:
+ integer,intent(in) :: n1,n2,nprocs,myrank
+ integer,intent(out) :: istart,iend
+ !
+ ! Local:
+ integer :: lenproc,iremain,n
+ !
+ n = n2-n1+1
+ lenproc = n/nprocs
+ iremain = mod(n,nprocs)
+ istart = n1 + myrank*lenproc + min(myrank,iremain)
+ iend = istart+lenproc-1
+ if (iremain > myrank) iend = iend+1
+ end subroutine distribute_1d
+ !-----------------------------------------------------------------------
+ subroutine mp_exchange_tasks(mpi_comm, iprint, gmlat)
+ !
+ ! Args:
+ integer, intent(in) :: mpi_comm
+ integer, intent(in) :: iprint
+ real(r8), intent(in) :: gmlat(:)
+ !
+ ! Local:
+ ! itasks_send(len_task_type,ntask) will be used to send tasks(:) info
+ ! to all tasks (directly passing mpi derived data types is reportedly
+ ! not stable, or not available until MPI 2.x).
+ !
+ integer :: n, ier
+ integer, parameter :: len_task_type = 17 ! see type task above
+ integer, allocatable :: &
+ itasks_send(:,:), & ! send buffer
+ itasks_recv(:,:) ! send buffer
+ integer :: npes
+
+ call mpi_comm_size(mpi_comm, npes, ier)
+
+ !
+ ! Pack tasks(mytid) into itasks_send:
+ allocate(itasks_send(len_task_type,0:npes-1),stat=ier)
+ if (ier /= 0) then
+ write(iulog,"(i4,i4)") '>>> Error allocating itasks_send: len_task_type=',&
+ len_task_type,' npes=',npes
+ call endrun('mp_exchange_tasks: unable to allocate itasks_send')
+ endif
+ allocate(itasks_recv(len_task_type,0:npes-1),stat=ier)
+ if (ier /= 0) then
+ write(iulog,"(i4,i4)") '>>> Error allocating itasks_recv: len_task_type=',&
+ len_task_type,' npes=',npes
+ call endrun('mp_exchange_tasks: unable to allocate itasks_recv')
endif
- enddo
- if (getpe < 0) then
- write(iulog,"('getpe: pe with ix=',i4,' not found.')") ix
- call endrun('getpe')
- endif
- end function getpe
-!-----------------------------------------------------------------------
- subroutine mp_pole_halos(f,lev0,lev1,lon0,lon1,lat0,lat1,nf,polesign)
-!
-! Set latitude halo points over the poles.
-!
-! Args:
- integer,intent(in) :: lev0,lev1,lon0,lon1,lat0,lat1,nf
- real(r8),intent(in) :: polesign(nf)
- type(array_ptr_type) :: f(nf) ! (plev,i0-2:i1+2,j0-2:j1+2)
-!
-! Local:
- integer :: if,i,j,k,ihalo,it,i0,i1,j0,j1,itask
-
-! real(r8) :: fglblon(lev0:lev1,nlon,lat0-2:lat1+2,nf)
- type(array_ptr_type) :: pglblon(nf) ! (lev0:lev1,nlon,lat0-2:lat1+2)
-
- if (mytidj /= 0 .and. mytidj /= ntaskj-1) return
-
-! fglblon = 0._r8 ! init
-!
-! Allocate local fields with global longitudes:
- do if=1,nf
- allocate(pglblon(if)%ptr(lev0:lev1,nlon,lat0-2:lat1+2))
- enddo
-!
-! Define my subdomain in local fglblon, which has global lon dimension:
-!
- do if=1,nf
- do j=lat0-2,lat1+2
- do i=lon0,lon1
- pglblon(if)%ptr(lev0:lev1,i,j) = f(if)%ptr(lev0:lev1,i,j)
- enddo
+ do n=0,npes-1
+ itasks_send(1,n) = tasks(mytid)%mytid
+
+ itasks_send(2,n) = tasks(mytid)%mytidi
+ itasks_send(3,n) = tasks(mytid)%mytidj
+ itasks_send(4,n) = tasks(mytid)%nlats
+ itasks_send(5,n) = tasks(mytid)%nlons
+ itasks_send(6,n) = tasks(mytid)%lat0
+ itasks_send(7,n) = tasks(mytid)%lat1
+ itasks_send(8,n) = tasks(mytid)%lon0
+ itasks_send(9,n) = tasks(mytid)%lon1
+
+ itasks_send(10,n) = tasks(mytid)%magtidi
+ itasks_send(11,n) = tasks(mytid)%magtidj
+ itasks_send(12,n) = tasks(mytid)%nmaglats
+ itasks_send(13,n) = tasks(mytid)%nmaglons
+ itasks_send(14,n) = tasks(mytid)%mlat0
+ itasks_send(15,n) = tasks(mytid)%mlat1
+ itasks_send(16,n) = tasks(mytid)%mlon0
+ itasks_send(17,n) = tasks(mytid)%mlon1
enddo
- enddo
-!
-! Gather longitude data to westernmost processors (far north and south):
-!
- call mp_gatherlons_f3d(pglblon,lev0,lev1,lon0,lon1,lat0-2,lat1+2,nf)
-!
-! Loop over tasks in my latitude row (far north or far south),
-! including myself, and set halo points over the poles.
-!
- if (mytidi==0) then
- do it=0,ntaski-1
- itask = tasks(itask_table_geo(it,mytidj))%mytid
- i0 = tasks(itask)%lon0
- i1 = tasks(itask)%lon1
- j0 = tasks(itask)%lat0
- j1 = tasks(itask)%lat1
- do if=1,nf
- if (j0==1) then ! south
- do i=i0,i1
- ihalo = 1+mod(i-1+nlon/2,nlon)
- pglblon(if)%ptr(lev0:lev1,i,j0-2) = pglblon(if)%ptr(lev0:lev1,ihalo,j0+2) ! get lat -1 from lat 3
- pglblon(if)%ptr(lev0:lev1,i,j0-1) = pglblon(if)%ptr(lev0:lev1,ihalo,j0+1) ! get lat 0 from lat 2
+ !
+ ! Send itasks_send and receive itasks_recv:
+ call mpi_alltoall(itasks_send,len_task_type,MPI_INTEGER,&
+ itasks_recv,len_task_type,MPI_INTEGER,&
+ mpi_comm,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'edyn_mpi: mpi_alltoall to send/recv itasks')
+ !
+ ! Unpack itasks_recv into tasks(n)
+ !
+ do n=0,npes-1
+ tasks(n)%mytid = itasks_recv(1,n)
+
+ tasks(n)%mytidi = itasks_recv(2,n)
+ tasks(n)%mytidj = itasks_recv(3,n)
+ tasks(n)%nlats = itasks_recv(4,n)
+ tasks(n)%nlons = itasks_recv(5,n)
+ tasks(n)%lat0 = itasks_recv(6,n)
+ tasks(n)%lat1 = itasks_recv(7,n)
+ tasks(n)%lon0 = itasks_recv(8,n)
+ tasks(n)%lon1 = itasks_recv(9,n)
+
+ tasks(n)%magtidi = itasks_recv(10,n)
+ tasks(n)%magtidj = itasks_recv(11,n)
+ tasks(n)%nmaglats = itasks_recv(12,n)
+ tasks(n)%nmaglons = itasks_recv(13,n)
+ tasks(n)%mlat0 = itasks_recv(14,n)
+ tasks(n)%mlat1 = itasks_recv(15,n)
+ tasks(n)%mlon0 = itasks_recv(16,n)
+ tasks(n)%mlon1 = itasks_recv(17,n)
+ !
+ ! Report to stdout:
+ !
+ if (n==mytid.and.iprint > 0) then
+ write(iulog,"(/,'Task ',i3,':')") n
+ write(iulog,"(/,'Subdomain on geographic grid:')")
+ write(iulog,"('tasks(',i3,')%mytid =',i3)") n,tasks(n)%mytid
+ write(iulog,"('tasks(',i3,')%mytidi=',i3)") n,tasks(n)%mytidi
+ write(iulog,"('tasks(',i3,')%mytidj=',i3)") n,tasks(n)%mytidj
+ write(iulog,"('tasks(',i3,')%nlats =',i3)") n,tasks(n)%nlats
+ write(iulog,"('tasks(',i3,')%nlons =',i3)") n,tasks(n)%nlons
+ write(iulog,"('tasks(',i3,')%lat0 =',i3)") n,tasks(n)%lat0
+ write(iulog,"('tasks(',i3,')%lat1 =',i3)") n,tasks(n)%lat1
+ write(iulog,"('tasks(',i3,')%lon0 =',i3)") n,tasks(n)%lon0
+ write(iulog,"('tasks(',i3,')%lon1 =',i3)") n,tasks(n)%lon1
+ write(iulog,"('Number of geo subdomain grid points = ',i6)") &
+ tasks(n)%nlons * tasks(n)%nlats
+ write(iulog,"(/,'Subdomain on geomagnetic grid:')")
+ write(iulog,"('tasks(',i3,')%magtidi=',i3)") n,tasks(n)%magtidi
+ write(iulog,"('tasks(',i3,')%magtidj=',i3)") n,tasks(n)%magtidj
+ write(iulog,"('tasks(',i3,')%nmaglats =',i3)") n,tasks(n)%nmaglats
+ write(iulog,"('tasks(',i3,')%nmaglons =',i3)") n,tasks(n)%nmaglons
+ write(iulog,"('tasks(',i3,')%mlat0 =',i3)") n,tasks(n)%mlat0
+ write(iulog,"('tasks(',i3,')%mlat1 =',i3)") n,tasks(n)%mlat1
+ write(iulog,"('tasks(',i3,')%mlon0 =',i3)") n,tasks(n)%mlon0
+ write(iulog,"('tasks(',i3,')%mlon1 =',i3)") n,tasks(n)%mlon1
+ write(iulog,"('Number of mag subdomain grid points = ',i6)") &
+ tasks(n)%nmaglons * tasks(n)%nmaglats
+ endif
+ enddo
+ !
+ ! Release locally allocated space:
+ deallocate(itasks_send)
+ deallocate(itasks_recv)
+ !
+ ! mxlon / mxlat is the maximum number of lons / lats owned by any task:
+ mxlon = -9999
+ do n= 0, npes-1
+ if (tasks(n)%nlons > mxlon) then
+ mxlon = tasks(n)%nlons
+ end if
+ end do
+ mxlat = -9999
+ do n = 0, npes-1
+ if (tasks(n)%nlats > mxlat) then
+ mxlat = tasks(n)%nlats
+ end if
+ end do
+ !
+ ! mxmaglon / mxmaglat is max number of mag lons / lats owned by any task:
+ mxmaglon = -9999
+ do n = 0, npes-1
+ if (tasks(n)%nmaglons > mxmaglon) then
+ mxmaglon = tasks(n)%nmaglons
+ end if
+ end do
+ mxmaglat = -9999
+ do n = 0, npes-1
+ if (tasks(n)%nmaglats > mxmaglat) then
+ mxmaglat = tasks(n)%nmaglats
+ end if
+ end do
+ !
+ ! Find conjugate points for folding hemispheres:
+ call conjugate_points(gmlat)
+
+ end subroutine mp_exchange_tasks
+ !-----------------------------------------------------------------------
+ subroutine mp_mageq(fin,fout,nf,mlon0,mlon1,mlat0,mlat1,nmlev)
+ !
+ ! Each task needs values of conductivities and adotv1,2 fields at the
+ ! at the mag equator for its longitude subdomain (and all levels), for
+ ! the fieldline integrations.
+ !
+ ! On input, fin is ped_mag, hal_mag, adotv1_mag, adotv2_mag
+ ! on (i,j,k) magnetic subdomain.
+ ! On output, fout(mlon0:mlon1,nmlev,nf) is ped_meq, hal_meq, adotv1_meq,
+ ! adotv2_meq at mag equator at longitude subdomain and all levels.
+ !
+ ! Args:
+ integer :: mlon0,mlon1,mlat0,mlat1,nmlev,nf
+ real(r8),intent(in) :: fin (mlon0:mlon1,mlat0:mlat1,nmlev,nf)
+ real(r8),intent(out) :: fout(mlon0:mlon1,nmlev,nf)
+ !
+ ! Local:
+ real(r8) :: & ! mpi buffers
+ sndbuf(mxmaglon,nmlev,nf), & ! mxmaglon,nmlev,nf
+ rcvbuf(mxmaglon,nmlev,nf) ! mxmaglon,nmlev,nf
+ integer :: i,j,n,itask,ier,len,jlateq,ireqsend,ireqrecv
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+ logical :: have_eq
+
+ sndbuf = 0._r8
+ rcvbuf = 0._r8
+ len = mxmaglon*nmlev*nf
+ !
+ ! If mag equator is in current subdomain, load it into sndbuf
+ ! and send to other tasks in my task column (mytidi)
+ !
+ jlateq = (nmlat+1)/2 ! lat index of mag equator (49)
+ have_eq = .false.
+ do j=mlat0,mlat1
+ if (j == jlateq) then ! load send buffer w/ data at equator
+ have_eq = .true.
+ do i=mlon0,mlon1
+ sndbuf(i-mlon0+1,:,:) = fin(i,j,:,:)
enddo
- else ! north
- do i=i0,i1
- ihalo = 1+mod(i-1+nlon/2,nlon)
- pglblon(if)%ptr(lev0:lev1,i,j1+1) = pglblon(if)%ptr(lev0:lev1,ihalo,j1-1) ! get lat plat+1 from plat-1
- pglblon(if)%ptr(lev0:lev1,i,j1+2) = pglblon(if)%ptr(lev0:lev1,ihalo,j1-2) ! get lat plat+2 from plat-2
+ !
+ ! Send mag equator data to other tasks in my task column (mytidi):
+ do itask=0,ntask-1
+ if (itask /= mytid.and.tasks(itask)%mytidi==mytidi) then
+ call mpi_isend(sndbuf,len,MPI_REAL8,itask,1, &
+ mpi_comm_edyn,ireqsend,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mageq isend')
+ call mpi_wait(ireqsend,irstat,ier)
+ endif ! another task in mytidi
+ enddo ! itask=0,ntask-1
+ endif ! j==jlateq
+ enddo ! j=mlat0,mlat1
+ !
+ ! Receive by other tasks in the sending task's column:
+ fout = 0._r8
+ if (.not.have_eq) then ! find task to receive from
+ do itask=0,ntask-1
+ do j=tasks(itask)%mlat0,tasks(itask)%mlat1
+ if (j == jlateq.and.tasks(itask)%mytidi==mytidi) then
+ call mpi_irecv(rcvbuf,len,MPI_REAL8,itask,1, &
+ mpi_comm_edyn,ireqrecv,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mageq irecv')
+ call mpi_wait(ireqrecv,irstat,ier)
+ do n=1,nf
+ do i=mlon0,mlon1
+ fout(i,:,n) = rcvbuf(i-mlon0+1,:,n)
+ enddo
+ enddo
+ endif ! itask has mag eq and is in my column (sending task)
+ enddo ! scan itask latitudes
+ enddo ! task table search
+ !
+ ! If I am the sending task, set fout to equator values of input array:
+ else
+ do n=1,nf
+ do i=mlon0,mlon1
+ fout(i,:,n) = fin(i,jlateq,:,n)
enddo
- endif
- enddo ! if=1,nf
- enddo ! it=0,ntaski-1
- endif ! mytidi==0
-!
-! Scatter data back out to processors in my latitude row:
-!
- call mp_scatterlons_f3d(pglblon,lev0,lev1,lon0,lon1,lat0-2,lat1+2,nf)
-!
-! Finally, define halo points in data arrays from local global lon array,
-! changing sign if necessary (winds):
-!
- if (lat0==1) then ! south
+ enddo
+ endif ! I am receiving or sending task
+ end subroutine mp_mageq
+ !-----------------------------------------------------------------------
+ subroutine mp_mageq_jpm1(f,mlon0,mlon1,mlat0,mlat1,nmlonp1,feq_jpm1,nf)
+ !
+ ! All tasks need data at mag latitudes equator-1, equator+1 at global
+ ! longitudes.
+ ! On input: f is 6 fields on mag subdomains: zigm11,zigm22,zigmc,zigm2,rim1,rim2
+ ! On output: feq_jpm1(nmlonp1,2,nf)
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nf
+ real(r8),intent(in) :: f(mlon0:mlon1,mlat0:mlat1,nf)
+ real(r8),intent(out) :: feq_jpm1(nmlonp1,2,nf) ! eq-1,eq+1
+ !
+ ! Local:
+ integer :: j,ier,len,jlateq
+ real(r8) :: sndbuf(nmlonp1,2,nf)
+
+ sndbuf = 0._r8
+ feq_jpm1 = 0._r8
+ len = nmlonp1*2*nf
+ !
+ ! Load send buffer w/ eq +/- 1 for current subdomain
+ ! (redundant to all tasks for alltoall)
+ !
+ jlateq = (nmlat+1)/2
+ do j=mlat0,mlat1
+ if (j == jlateq+1) then ! equator+1
+ sndbuf(mlon0:mlon1,1,:) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq-1) then ! equator-1
+ sndbuf(mlon0:mlon1,2,:) = f(mlon0:mlon1,j,:)
+ endif ! j==jlateq
+ enddo ! j=mlat0,mlat1
+ !
+ ! Do the exchange:
+ !
+ call mpi_allreduce( sndbuf(:,:,1:nf), feq_jpm1(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
+ if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_mageq_jpm1 call mpi_allreduce')
+
+ !
+ ! Periodic point:
+ feq_jpm1(nmlonp1,:,:) = feq_jpm1(1,:,:)
+
+ end subroutine mp_mageq_jpm1
+ !-----------------------------------------------------------------------
+ subroutine mp_mageq_jpm3(f,mlon0,mlon1,mlat0,mlat1,nmlonp1,feq_jpm3,nf)
+ !
+ ! All tasks need global longitudes at mag latitudes equator,
+ ! and equator +/- 1,2,3
+ ! On input: f is nf fields on mag subdomains
+ ! On output: feq_jpm3(nmlonp1,-3:3,nf) has global lons at eq, eq +/- 1,2,3
+ ! 2nd dimension of feq_jpm3 (and send/recv buffers) is as follows:
+ ! +3: eq+3
+ ! +2: eq+2
+ ! +1: eq+1
+ ! 0: eq
+ ! -1: eq-1
+ ! -2: eq-2
+ ! -3: eq-3
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nf
+ real(r8),intent(in) :: f(mlon0:mlon1,mlat0:mlat1,nf)
+ real(r8),intent(out) :: feq_jpm3(nmlonp1,-3:3,nf)
+ !
+ ! Local:
+ integer :: j,ier,len,jlateq
+ integer,parameter :: mxnf=6
+
+ real(r8) :: sndbuf(nmlonp1,-3:3,mxnf)
+
+ if (nf > mxnf) then
+ write(iulog,"('>>> mp_mageq_jpm3: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
+ nf,mxnf
+ call endrun('mp_mageq_jpm3')
+ endif
+
+ sndbuf = 0._r8
+ feq_jpm3 = 0._r8
+ len = nmlonp1*7*nf
+ !
+ ! Load send buffer w/ eq +/- 3 for current subdomain
+ !
+ jlateq = (nmlat+1)/2
+ do j=mlat0,mlat1
+ if (j == jlateq-3) then ! equator-3
+ sndbuf(mlon0:mlon1,-3,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq-2) then ! equator-2
+ sndbuf(mlon0:mlon1,-2,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq-1) then ! equator-1
+ sndbuf(mlon0:mlon1,-1,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq) then ! equator
+ sndbuf(mlon0:mlon1,0,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq+1) then ! equator+1
+ sndbuf(mlon0:mlon1,1,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq+2) then ! equator+2
+ sndbuf(mlon0:mlon1,2,1:nf) = f(mlon0:mlon1,j,:)
+ elseif (j == jlateq+3) then ! equator+3
+ sndbuf(mlon0:mlon1,3,1:nf) = f(mlon0:mlon1,j,:)
+ endif ! j==jlateq
+ enddo ! j=mlat0,mlat1
+ !
+ ! Do the exchange:
+ !
+ call mpi_allreduce( sndbuf(:,:,1:nf), feq_jpm3(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
+ if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_mageq_jpm3 call mpi_allreduce')
+
+ !
+ ! Periodic point:
+ feq_jpm3(nmlonp1,:,:) = feq_jpm3(1,:,:)
+
+ end subroutine mp_mageq_jpm3
+ !-----------------------------------------------------------------------
+ subroutine mp_magpole_2d(f,ilon0,ilon1,ilat0,ilat1, &
+ nglblon,jspole,jnpole,fpole_jpm2,nf)
+ !
+ ! Return fpole_jpm2(nglblon,1->4,nf) as:
+ ! 1: j = jspole+1 (spole+1)
+ ! 2: j = jspole+2 (spole+2)
+ ! 3: j = jnpole-1 (npole-1)
+ ! 4: j = jnpole-2 (npole-2)
+ ! This can be called with different number of fields nf, but cannot
+ ! be called w/ > mxnf fields.
+ !
+ ! Args:
+ integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon,jspole,jnpole,nf
+ real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nf)
+ real(r8),intent(out) :: fpole_jpm2(nglblon,4,nf)
+ !
+ ! Local:
+ integer :: j,ier,len
+ integer,parameter :: mxnf=6
+ real(r8) :: sndbuf(nglblon,4,mxnf)
+
+ if (nf > mxnf) then
+ write(iulog,"('>>> mp_magpole_2d: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
+ nf,mxnf
+ call endrun('mp_magpole_2d')
+ endif
+
+ sndbuf = 0._r8
+ fpole_jpm2 = 0._r8
+ len = nglblon*4*nf
+ !
+ ! Load send buffer with values at poles +/- 2 for current subdomain
+ !
+ do j=ilat0,ilat1
+ if (j==jspole+1) then ! south pole +1
+ sndbuf(ilon0:ilon1,1,1:nf) = f(ilon0:ilon1,j,:)
+ elseif (j==jspole+2) then ! south pole +2
+ sndbuf(ilon0:ilon1,2,1:nf) = f(ilon0:ilon1,j,:)
+ elseif (j==jnpole-1) then ! north pole -1
+ sndbuf(ilon0:ilon1,3,1:nf) = f(ilon0:ilon1,j,:)
+ elseif (j==jnpole-2) then ! north pole -2
+ sndbuf(ilon0:ilon1,4,1:nf) = f(ilon0:ilon1,j,:)
+ endif
+ enddo
+
+ !
+ ! Do the exchange:
+ !
+ call mpi_allreduce( sndbuf(:,:,1:nf), fpole_jpm2(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
+ if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpole_2d call mpi_allreduce')
+
+ end subroutine mp_magpole_2d
+ !-----------------------------------------------------------------------
+ subroutine mp_magpole_3d(f,ilon0,ilon1,ilat0,ilat1,nlev, nglblon,jspole,jnpole,fpole_jpm2,nf)
+ !
+ ! Return fpole_jpm2(nglblon,1->4,nlev,nf) as:
+ ! 1: j = jspole+1 (spole+1)
+ ! 2: j = jspole+2 (spole+2)
+ ! 3: j = jnpole-1 (npole-1)
+ ! 4: j = jnpole-2 (npole-2)
+ ! This can be called with different number of fields nf, but cannot
+ ! be called w/ > mxnf fields.
+ !
+ ! Args:
+ integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon,&
+ jspole,jnpole,nf,nlev
+ real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nlev,nf)
+ real(r8),intent(out) :: fpole_jpm2(nglblon,4,nlev,nf)
+ !
+ ! Local:
+ integer :: j,k,ier,len
+ integer,parameter :: mxnf=6
+ real(r8) :: sndbuf(nglblon,4,nlev,mxnf)
+
+ if (nf > mxnf) then
+ write(iulog,"('>>> mp_magpole_3d: nf=',i4,' but cannot be called with greater than mxnf=',i4)") &
+ nf,mxnf
+ call endrun('mp_magpole_3d')
+ endif
+
+ sndbuf = 0._r8
+ fpole_jpm2 = 0._r8
+ len = nglblon*4*nlev*nf
+ !
+ ! Load send buffer with values at poles +/- 2 for current subdomain
+ !
+ do j=ilat0,ilat1
+ do k=1,nlev
+ if (j==jspole+1) then ! south pole +1
+ sndbuf(ilon0:ilon1,1,k,1:nf) = f(ilon0:ilon1,j,k,:)
+ elseif (j==jspole+2) then ! south pole +2
+ sndbuf(ilon0:ilon1,2,k,1:nf) = f(ilon0:ilon1,j,k,:)
+ elseif (j==jnpole-1) then ! north pole -1
+ sndbuf(ilon0:ilon1,3,k,1:nf) = f(ilon0:ilon1,j,k,:)
+ elseif (j==jnpole-2) then ! north pole -2
+ sndbuf(ilon0:ilon1,4,k,1:nf) = f(ilon0:ilon1,j,k,:)
+ endif
+ enddo
+ enddo
+
+ !
+ ! Do the exchange:
+ !
+ call mpi_allreduce( sndbuf(:,:,:,1:nf), fpole_jpm2(:,:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
+ if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpole_3d call mpi_allreduce')
+
+ end subroutine mp_magpole_3d
+ !-----------------------------------------------------------------------
+ subroutine mp_magpoles(f,ilon0,ilon1,ilat0,ilat1,nglblon, jspole,jnpole,fpoles,nf)
+ !
+ ! Similiar to mp_magpole_2d, but returns global longitudes for
+ ! j==1 and j==nmlat (not for poles +/- 2)
+ ! Return fpoles(nglblon,2,nf) as:
+ ! 1: j = jspole (spole)
+ ! 2: j = jnpole (npole)
+ ! This can be called with different number of fields nf, but cannot
+ ! be called w/ > mxnf fields.
+ !
+ ! Args:
+ integer,intent(in) :: ilon0,ilon1,ilat0,ilat1,nglblon, jspole,jnpole,nf
+ real(r8),intent(in) :: f(ilon0:ilon1,ilat0:ilat1,nf)
+ real(r8),intent(out) :: fpoles(nglblon,2,nf)
+ !
+ ! Local:
+ integer :: j,ier,len
+ real(r8) :: sndbuf(nglblon,2,nf)
+
+ sndbuf = 0._r8
+ fpoles = 0._r8
+ len = nglblon*2*nf
+ !
+ ! Load send buffer with values at poles +/- 2 for current subdomain
+ !
+ do j=ilat0,ilat1
+ if (j==jspole) then ! south pole
+ sndbuf(ilon0:ilon1,1,1:nf) = f(ilon0:ilon1,j,:)
+ elseif (j==jnpole) then ! npole pole
+ sndbuf(ilon0:ilon1,2,1:nf) = f(ilon0:ilon1,j,:)
+ endif
+ enddo
+
+ !
+ ! Do the exchange:
+ !
+ call mpi_allreduce( sndbuf(:,:,1:nf), fpoles(:,:,1:nf), len, MPI_REAL8, MPI_SUM, mpi_comm_edyn, ier )
+ if ( ier .ne. MPI_SUCCESS ) call handle_mpi_err(ier,'mp_magpoles call mpi_allreduce')
+
+ end subroutine mp_magpoles
+ !-----------------------------------------------------------------------
+ integer function getpe(ix,jx)
+ integer,intent(in) :: ix,jx
+ integer :: it
+
+ getpe = -1
+ do it=0,ntask-1
+ if ((tasks(it)%lon0 <= ix .and. tasks(it)%lon1 >= ix).and.&
+ (tasks(it)%lat0 <= jx .and. tasks(it)%lat1 >= jx)) then
+ getpe = it
+ exit
+ endif
+ enddo
+ if (getpe < 0) then
+ write(iulog,"('getpe: pe with ix=',i4,' not found.')") ix
+ call endrun('getpe')
+ endif
+ end function getpe
+ !-----------------------------------------------------------------------
+ subroutine mp_pole_halos(f,lev0,lev1,lon0,lon1,lat0,lat1,nf,polesign)
+ !
+ ! Set latitude halo points over the poles.
+ !
+ ! Args:
+ integer,intent(in) :: lev0,lev1,lon0,lon1,lat0,lat1,nf
+ real(r8),intent(in) :: polesign(nf)
+ type(array_ptr_type) :: f(nf) ! (plev,i0-2:i1+2,j0-2:j1+2)
+ !
+ ! Local:
+ integer :: if,i,j,k,ihalo,it,i0,i1,j0,j1,itask
+
+ ! real(r8) :: fglblon(lev0:lev1,nlon,lat0-2:lat1+2,nf)
+ type(array_ptr_type) :: pglblon(nf) ! (lev0:lev1,nlon,lat0-2:lat1+2)
+
+ if (mytidj /= 0 .and. mytidj /= ntaskj-1) return
+
+ ! fglblon = 0._r8 ! init
+ !
+ ! Allocate local fields with global longitudes:
do if=1,nf
- do j=lat0-2,lat0-1
- do k=lev0,lev1
- f(if)%ptr(k,lon0:lon1,j) = pglblon(if)%ptr(k,lon0:lon1,j)*polesign(if)
- enddo
- enddo
+ allocate(pglblon(if)%ptr(lev0:lev1,nlon_geo,lat0-2:lat1+2))
enddo
- else ! north
+ !
+ ! Define my subdomain in local fglblon, which has global lon dimension:
+ !
do if=1,nf
- do j=lat1+1,lat1+2
- do k=lev0,lev1
- f(if)%ptr(k,lon0:lon1,j) = pglblon(if)%ptr(k,lon0:lon1,j)*polesign(if)
- enddo
- enddo
+ do j=lat0-2,lat1+2
+ do i=lon0,lon1
+ pglblon(if)%ptr(lev0:lev1,i,j) = f(if)%ptr(lev0:lev1,i,j)
+ enddo
+ enddo
enddo
- endif
-
- do if=1,nf
- deallocate(pglblon(if)%ptr)
- enddo
- end subroutine mp_pole_halos
-!-----------------------------------------------------------------------
- subroutine conjugate_points
- use edyn_maggrid,only: gmlat
-!
-! Local:
- integer :: ier,j,js,jn,itask,jj
-!
-! nsend_south(ntask): number of lats in south to send north
-! nrecv_north(ntask): number of lats in north to recv from south
-!
- allocate(nsend_south(0:ntask-1),stat=ier)
- allocate(nrecv_north(0:ntask-1),stat=ier)
-!
-! send_south_coords: south j lats to send north
-! recv_north_coords: north j lats to recv from south
-!
- allocate(send_south_coords(mxmaglat,0:ntask-1),stat=ier)
- allocate(recv_north_coords(mxmaglat,0:ntask-1),stat=ier)
-
- nsend_south(:) = 0
- nrecv_north(:) = 0
- send_south_coords(:,:) = 0
- recv_north_coords(:,:) = 0
-
- magloop: do j=mlat0,mlat1
-!
-! In north hem: find tasks w/ conjugate points in south to recv:
-! (nmlath is in params module)
- if (gmlat(j) > 0._r8) then ! in north hem of current task
- js = nmlath-(j-nmlath) ! j index to south conjugate point (should be -j)
- do itask=0,ntask-1
- do jj = tasks(itask)%mlat0,tasks(itask)%mlat1
-!
-! Receive these north coords from the south:
- if (jj==js.and.mlon0==tasks(itask)%mlon0.and. &
- mlon1==tasks(itask)%mlon1) then
- nrecv_north(itask) = nrecv_north(itask)+1
- recv_north_coords(nrecv_north(itask),itask) = j
- endif
- enddo ! jj of remote task
- enddo ! itask=0,ntask-1
- if (all(nrecv_north==0)) &
- write(iulog,"(2a,i4,a,f8.2)") '>>> WARNING: could not find north conjugate',&
- ' points corresponding to south latitude js=',js,' gmlat(js)=',gmlat(js)
-!
-! In south hem: find tasks w/ conjugate points in north to send:
- elseif (gmlat(j) < 0._r8.and.j /= nmlath) then ! in south hem
- jn = nmlath+(nmlath-j) ! j index of north conjugate point
- do itask=0,ntask-1
- do jj = tasks(itask)%mlat0,tasks(itask)%mlat1
- if (jj==jn.and.mlon0==tasks(itask)%mlon0.and. &
- mlon1==tasks(itask)%mlon1) then
- nsend_south(itask) = nsend_south(itask)+1
-! Send these south coords to the north:
- send_south_coords(nsend_south(itask),itask) = j
- endif
- enddo ! jj of remote task
- enddo ! itask=0,ntask-1
- if (all(nsend_south==0)) &
- write(iulog,"(2a,i4,a,f8.2)") '>>> WARNING: could not find south conjugate',&
- ' points corresponding to north latitude jn=',jn,' gmlat(jn)=',gmlat(jn)
- endif ! in north or south hem
- enddo magloop ! j=mlat0,mlat1
- end subroutine conjugate_points
-!-----------------------------------------------------------------------
- subroutine mp_mag_foldhem(f,mlon0,mlon1,mlat0,mlat1,nf)
-!
-! For each point in northern hemisphere (if any) of the current task
-! subdomain, receive data from conjugate point in the south (from the
-! south task that owns it), and sum it to the north point data.
-! Do this for nf fields. Conjugate point indices to send/recv to/from
-! each task were determined by sub conjugate_points (this module).
-! nsend_south, ! number of south lats to send to north (each task)
-! nrecv_north ! number of north lats to send to south (each task)
-!
-! This routine is called from edynamo at every timestep.
-! Sub conjugate_points is called once per run, from mp_distribute.
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
- real(r8),intent(inout) :: f(mlon0:mlon1,mlat0:mlat1,nf)
-!
-! Local:
- integer :: j,n,len,itask,ifld,ier,nmlons
- real(r8) :: sndbuf(mxmaglon,mxmaglat,nf,0:ntask-1)
- real(r8) :: rcvbuf(mxmaglon,mxmaglat,nf,0:ntask-1)
- integer :: jsend(0:ntask-1),jrecv(0:ntask-1)
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
-
-!
- sndbuf = 0._r8 ; rcvbuf = 0._r8
- jsend = 0 ; jrecv = 0
- len = mxmaglon*mxmaglat*nf
- nmlons = mlon1-mlon0+1
-!
-! Send south data to north itask:
-! (To avoid deadlock, do not send if north task is also myself. This will
-! happen when there is an odd number of tasks in the latitude dimension,
-! e.g., ntask == 12, 30, etc)
-!
- do itask=0,ntask-1
-
-! Attempt to fetch from allocatable variable NSEND_SOUTH when it is not allocated
-
- if (nsend_south(itask) > 0 .and. itask /= mytid) then
- do ifld = 1,nf
- do n=1,nsend_south(itask)
- sndbuf(1:nmlons,n,ifld,itask) = &
- f(:,send_south_coords(n,itask),ifld)
- enddo
- enddo ! ifld=1,nf
- call mpi_isend(sndbuf(1,1,1,itask),len,MPI_REAL8, &
- itask,1,mpi_comm_edyn,jsend(itask),ier)
- call mpi_wait(jsend(itask),irstat,ier)
- endif ! nsend_south(itask) > 0
- enddo ! itask=0,ntask-1
-!
-! Receive north data from south itask and add to north,
-! i.e., north = north+south. (do not receive if south task is
-! also myself, but do add south data to my north points, see below)
-!
- do itask=0,ntask-1
- if (nrecv_north(itask) > 0 .and. itask /= mytid) then
- call mpi_irecv(rcvbuf(1,1,1,itask),len,MPI_REAL8, &
- itask,1,mpi_comm_edyn,jrecv(itask),ier)
- call mpi_wait(jrecv(itask),irstat,ier)
- do ifld=1,nf
- do n=1,nrecv_north(itask)
-!
-! Receive lats in reverse order:
- f(mlon0:mlon1, &
- recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) = &
- f(mlon0:mlon1, &
- recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) + &
- rcvbuf(1:nmlons,n,ifld,itask)
- enddo ! n=1,nrecv_north(itask)
- enddo ! ifld=1,nf
-!
-! If I am send *and* receive task, simply add my south data to my north points:
- elseif (nrecv_north(itask) > 0 .and. itask == mytid) then
- do ifld=1,nf
- do n=1,nrecv_north(itask)
- f(mlon0:mlon1, &
- recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) = &
- f(mlon0:mlon1, &
- recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) + &
- f(mlon0:mlon1,send_south_coords(n,itask),ifld)
- enddo ! n=1,nrecv_north(itask)
- enddo ! ifld=1,nf
- endif ! nrecv_north(itask) > 0
- enddo ! itask=0,ntask-1
-!
-! Mag equator is also "folded", but not included in conjugate points,
-! so double it here:
- do j=mlat0,mlat1
- if (j==nmlath) then
- do ifld=1,nf
- f(:,j,ifld) = f(:,j,ifld)+f(:,j,ifld)
- enddo
- endif
- enddo
-
- end subroutine mp_mag_foldhem
-!-----------------------------------------------------------------------
- subroutine mp_mag_periodic_f2d(f,mlon0,mlon1,mlat0,mlat1,nf)
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
- real(r8),intent(inout) :: f(mlon0:mlon1,mlat0:mlat1,nf)
-!
-! Local:
- integer :: j,ier,idest,isrc,len,ireqsend,ireqrecv,msgtag
- real(r8) :: sndbuf(mxmaglat,nf),rcvbuf(mxmaglat,nf)
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
-
- if (ntaski>1) then
- len = mxmaglat*nf
- !
- ! I am a western-most task. Send lon 1 to eastern-most tasks:
- if (mytidi==0) then
- idest = itask_table_mag(ntaski-1,mytidj)
- do j=mlat0,mlat1
- sndbuf(j-mlat0+1,:) = f(1,j,:)
- enddo
- msgtag = mytid
- call mpi_isend(sndbuf,len,MPI_REAL8,idest,msgtag,mpi_comm_edyn, ireqsend,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d send to idest')
- call mpi_wait(ireqsend,irstat,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d wait for send')
- !
- ! I am eastern-most task. Receive lon 1 from western-most tasks,
- ! and assign to nmlonp1:
- elseif (mytidi==ntaski-1) then
- isrc = itask_table_mag(0,mytidj)
- msgtag = isrc
- call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,msgtag,mpi_comm_edyn, ireqrecv,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d recv from isrc')
- call mpi_wait(ireqrecv,irstat,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d wait for recv')
-
- do j=mlat0,mlat1
- f(nmlonp1,j,:) = rcvbuf(j-mlat0+1,:)
- enddo
- endif ! mytidi == 0 or ntaski-1
- else
- do j=mlat0,mlat1
- f(nmlonp1,j,:) = f(1,j,:)
- enddo
- endif
-
- end subroutine mp_mag_periodic_f2d
-!-----------------------------------------------------------------------
- subroutine mp_mag_halos(fmsub,mlon0,mlon1,mlat0,mlat1,nf)
-!
-! Exchange halo/ghost points between magnetic grid subdomains for nf fields.
-! Only a single halo point is required in both lon and lat dimensions.
-! Note that all tasks in any row of the task matrix have the same
-! mlat0,mlat1, and that all tasks in any column of the task matrix
-! have the same mlon0,mlon1.
-! Longitude halos are done first, exchanging mlat0:mlat1, then latitude
-! halos are done, exchanging mlon0-1:mlon1+1 (i.e., including the
-! longitude halos that were defined first).
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
- real(r8),intent(inout) :: fmsub(mlon0-1:mlon1+1,mlat0-1:mlat1+1,nf)
-!
-! Local:
- integer :: ifld,west,east,north,south,len,isend0,isend1, &
- irecv0,irecv1,ier,nmlats,istat(MPI_STATUS_SIZE,4),ireq(4),nmlons
- real(r8),dimension(mlat1-mlat0+1,nf)::sndlon0,sndlon1,rcvlon0,rcvlon1
- real(r8),dimension((mlon1+1)-(mlon0-1)+1,nf) :: &
- sndlat0,sndlat1,rcvlat0,rcvlat1
-
-!
-! Init send/recv buffers for lon halos:
- sndlon0 = 0._r8 ; rcvlon0 = 0._r8
- sndlon1 = 0._r8 ; rcvlon1 = 0._r8
-!
-! Identify east and west neightbors:
- west = itask_table_mag(mytidi-1,mytidj)
- east = itask_table_mag(mytidi+1,mytidj)
-!
-! Exchange mlat0:mlat1 (lat halos are not yet defined):
- nmlats = mlat1-mlat0+1
- len = nmlats*nf
-!
-! Send mlon0 to the west neighbor, and mlon1 to the east.
-! However, tasks are periodic in longitude (see itask_table_mag),
-! and far west tasks send mlon0+1, and far east tasks send mlon1-1
-!
- do ifld=1,nf
-! Far west tasks send mlon0+1 to far east (periodic) tasks:
+ !
+ ! Gather longitude data to westernmost processors (far north and south):
+ !
+ call mp_gatherlons_f3d(pglblon,lev0,lev1,lon0,lon1,lat0-2,lat1+2,nf)
+ !
+ ! Loop over tasks in my latitude row (far north or far south),
+ ! including myself, and set halo points over the poles.
+ !
if (mytidi==0) then
- sndlon0(:,ifld) = fmsub(mlon0+1,mlat0:mlat1,ifld)
-! Interior tasks send mlon0 to west neighbor:
- else
- sndlon0(:,ifld) = fmsub(mlon0,mlat0:mlat1,ifld)
+ do it=0,ntaski-1
+ itask = tasks(itask_table_geo(it,mytidj))%mytid
+ i0 = tasks(itask)%lon0
+ i1 = tasks(itask)%lon1
+ j0 = tasks(itask)%lat0
+ j1 = tasks(itask)%lat1
+ do if=1,nf
+ if (j0==1) then ! south
+ do i=i0,i1
+ ihalo = 1+mod(i-1+nlon_geo/2,nlon_geo)
+ pglblon(if)%ptr(lev0:lev1,i,j0-2) = pglblon(if)%ptr(lev0:lev1,ihalo,j0+2) ! get lat -1 from lat 3
+ pglblon(if)%ptr(lev0:lev1,i,j0-1) = pglblon(if)%ptr(lev0:lev1,ihalo,j0+1) ! get lat 0 from lat 2
+ enddo
+ else ! north
+ do i=i0,i1
+ ihalo = 1+mod(i-1+nlon_geo/2,nlon_geo)
+ pglblon(if)%ptr(lev0:lev1,i,j1+1) = pglblon(if)%ptr(lev0:lev1,ihalo,j1-1) ! get lat plat+1 from plat-1
+ pglblon(if)%ptr(lev0:lev1,i,j1+2) = pglblon(if)%ptr(lev0:lev1,ihalo,j1-2) ! get lat plat+2 from plat-2
+ enddo
+ endif
+ enddo ! if=1,nf
+ enddo ! it=0,ntaski-1
+ endif ! mytidi==0
+ !
+ ! Scatter data back out to processors in my latitude row:
+ !
+ call mp_scatterlons_f3d(pglblon,lev0,lev1,lon0,lon1,lat0-2,lat1+2,nf)
+ !
+ ! Finally, define halo points in data arrays from local global lon array,
+ ! changing sign if necessary (winds):
+ !
+ if (lat0==1) then ! south
+ do if=1,nf
+ do j=lat0-2,lat0-1
+ do k=lev0,lev1
+ f(if)%ptr(k,lon0:lon1,j) = pglblon(if)%ptr(k,lon0:lon1,j)*polesign(if)
+ enddo
+ enddo
+ enddo
+ else ! north
+ do if=1,nf
+ do j=lat1+1,lat1+2
+ do k=lev0,lev1
+ f(if)%ptr(k,lon0:lon1,j) = pglblon(if)%ptr(k,lon0:lon1,j)*polesign(if)
+ enddo
+ enddo
+ enddo
endif
-! Far east tasks send mlon1-1 to far west (periodic) tasks:
- if (mytidi==nmagtaski-1) then
- sndlon1(:,ifld) = fmsub(mlon1-1,mlat0:mlat1,ifld)
-! Interior tasks send mlon1 to east neighbor:
+ do if=1,nf
+ deallocate(pglblon(if)%ptr)
+ enddo
+ end subroutine mp_pole_halos
+ !-----------------------------------------------------------------------
+ subroutine conjugate_points(gmlat)
+
+ real(r8), intent(in) :: gmlat(:)
+ !
+ ! Local:
+ integer :: ier,j,js,jn,itask,jj
+ !
+ ! nsend_south(ntask): number of lats in south to send north
+ ! nrecv_north(ntask): number of lats in north to recv from south
+ !
+ allocate(nsend_south(0:ntask-1),stat=ier)
+ allocate(nrecv_north(0:ntask-1),stat=ier)
+ !
+ ! send_south_coords: south j lats to send north
+ ! recv_north_coords: north j lats to recv from south
+ !
+ allocate(send_south_coords(mxmaglat,0:ntask-1),stat=ier)
+ allocate(recv_north_coords(mxmaglat,0:ntask-1),stat=ier)
+
+ nsend_south(:) = 0
+ nrecv_north(:) = 0
+ send_south_coords(:,:) = 0
+ recv_north_coords(:,:) = 0
+
+ magloop: do j=mlat0,mlat1
+ !
+ ! In north hem: find tasks w/ conjugate points in south to recv:
+ ! (nmlath is in params module)
+ if (gmlat(j) > 0._r8) then ! in north hem of current task
+ js = nmlath-(j-nmlath) ! j index to south conjugate point (should be -j)
+ do itask=0,ntask-1
+ do jj = tasks(itask)%mlat0,tasks(itask)%mlat1
+ !
+ ! Receive these north coords from the south:
+ if (jj==js.and.mlon0==tasks(itask)%mlon0.and. &
+ mlon1==tasks(itask)%mlon1) then
+ nrecv_north(itask) = nrecv_north(itask)+1
+ recv_north_coords(nrecv_north(itask),itask) = j
+ endif
+ enddo ! jj of remote task
+ enddo ! itask=0,ntask-1
+ if (all(nrecv_north==0)) &
+ write(iulog,"(2a,i4,a,f8.2)") '>>> WARNING: could not find north conjugate',&
+ ' points corresponding to south latitude js=',js,' gmlat(js)=',gmlat(js)
+ !
+ ! In south hem: find tasks w/ conjugate points in north to send:
+ elseif (gmlat(j) < 0._r8.and.j /= nmlath) then ! in south hem
+ jn = nmlath+(nmlath-j) ! j index of north conjugate point
+ do itask=0,ntask-1
+ do jj = tasks(itask)%mlat0,tasks(itask)%mlat1
+ if (jj==jn.and.mlon0==tasks(itask)%mlon0.and. &
+ mlon1==tasks(itask)%mlon1) then
+ nsend_south(itask) = nsend_south(itask)+1
+ ! Send these south coords to the north:
+ send_south_coords(nsend_south(itask),itask) = j
+ endif
+ enddo ! jj of remote task
+ enddo ! itask=0,ntask-1
+ if (all(nsend_south==0)) &
+ write(iulog,"(2a,i4,a,f8.2)") '>>> WARNING: could not find south conjugate',&
+ ' points corresponding to north latitude jn=',jn,' gmlat(jn)=',gmlat(jn)
+ endif ! in north or south hem
+ enddo magloop ! j=mlat0,mlat1
+ end subroutine conjugate_points
+ !-----------------------------------------------------------------------
+ subroutine mp_mag_foldhem(f,mlon0,mlon1,mlat0,mlat1,nf)
+ !
+ ! For each point in northern hemisphere (if any) of the current task
+ ! subdomain, receive data from conjugate point in the south (from the
+ ! south task that owns it), and sum it to the north point data.
+ ! Do this for nf fields. Conjugate point indices to send/recv to/from
+ ! each task were determined by sub conjugate_points (this module).
+ ! nsend_south, ! number of south lats to send to north (each task)
+ ! nrecv_north ! number of north lats to send to south (each task)
+ !
+ ! This routine is called from edynamo at every timestep.
+ ! Sub conjugate_points is called once per run, from mp_distribute.
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
+ real(r8),intent(inout) :: f(mlon0:mlon1,mlat0:mlat1,nf)
+ !
+ ! Local:
+ integer :: j,n,len,itask,ifld,ier,nmlons
+ real(r8) :: sndbuf(mxmaglon,mxmaglat,nf,0:ntask-1)
+ real(r8) :: rcvbuf(mxmaglon,mxmaglat,nf,0:ntask-1)
+ integer :: jsend(0:ntask-1),jrecv(0:ntask-1)
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+
+ !
+ sndbuf = 0._r8 ; rcvbuf = 0._r8
+ jsend = 0 ; jrecv = 0
+ len = mxmaglon*mxmaglat*nf
+ nmlons = mlon1-mlon0+1
+ !
+ ! Send south data to north itask:
+ ! (To avoid deadlock, do not send if north task is also myself. This will
+ ! happen when there is an odd number of tasks in the latitude dimension,
+ ! e.g., ntask == 12, 30, etc)
+ !
+ do itask=0,ntask-1
+
+ ! Attempt to fetch from allocatable variable NSEND_SOUTH when it is not allocated
+
+ if (nsend_south(itask) > 0 .and. itask /= mytid) then
+ do ifld = 1,nf
+ do n=1,nsend_south(itask)
+ sndbuf(1:nmlons,n,ifld,itask) = &
+ f(:,send_south_coords(n,itask),ifld)
+ enddo
+ enddo ! ifld=1,nf
+ call mpi_isend(sndbuf(1,1,1,itask),len,MPI_REAL8, &
+ itask,1,mpi_comm_edyn,jsend(itask),ier)
+ call mpi_wait(jsend(itask),irstat,ier)
+ endif ! nsend_south(itask) > 0
+ enddo ! itask=0,ntask-1
+ !
+ ! Receive north data from south itask and add to north,
+ ! i.e., north = north+south. (do not receive if south task is
+ ! also myself, but do add south data to my north points, see below)
+ !
+ do itask=0,ntask-1
+ if (nrecv_north(itask) > 0 .and. itask /= mytid) then
+ call mpi_irecv(rcvbuf(1,1,1,itask),len,MPI_REAL8, &
+ itask,1,mpi_comm_edyn,jrecv(itask),ier)
+ call mpi_wait(jrecv(itask),irstat,ier)
+ do ifld=1,nf
+ do n=1,nrecv_north(itask)
+ !
+ ! Receive lats in reverse order:
+ f(mlon0:mlon1, &
+ recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) = &
+ f(mlon0:mlon1, &
+ recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) + &
+ rcvbuf(1:nmlons,n,ifld,itask)
+ enddo ! n=1,nrecv_north(itask)
+ enddo ! ifld=1,nf
+ !
+ ! If I am send *and* receive task, simply add my south data to my north points:
+ elseif (nrecv_north(itask) > 0 .and. itask == mytid) then
+ do ifld=1,nf
+ do n=1,nrecv_north(itask)
+ f(mlon0:mlon1, &
+ recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) = &
+ f(mlon0:mlon1, &
+ recv_north_coords(nrecv_north(itask)-n+1,itask),ifld) + &
+ f(mlon0:mlon1,send_south_coords(n,itask),ifld)
+ enddo ! n=1,nrecv_north(itask)
+ enddo ! ifld=1,nf
+ endif ! nrecv_north(itask) > 0
+ enddo ! itask=0,ntask-1
+ !
+ ! Mag equator is also "folded", but not included in conjugate points,
+ ! so double it here:
+ do j=mlat0,mlat1
+ if (j==nmlath) then
+ do ifld=1,nf
+ f(:,j,ifld) = f(:,j,ifld)+f(:,j,ifld)
+ enddo
+ endif
+ enddo
+
+ end subroutine mp_mag_foldhem
+ !-----------------------------------------------------------------------
+ subroutine mp_mag_periodic_f2d(f,mlon0,mlon1,mlat0,mlat1,nf)
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
+ real(r8),intent(inout) :: f(mlon0:mlon1,mlat0:mlat1,nf)
+ !
+ ! Local:
+ integer :: j,ier,idest,isrc,len,ireqsend,ireqrecv,msgtag
+ real(r8) :: sndbuf(mxmaglat,nf),rcvbuf(mxmaglat,nf)
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+
+ if (ntaski>1) then
+ len = mxmaglat*nf
+ !
+ ! I am a western-most task. Send lon 1 to eastern-most tasks:
+ if (mytidi==0) then
+ idest = itask_table_mag(ntaski-1,mytidj)
+ do j=mlat0,mlat1
+ sndbuf(j-mlat0+1,:) = f(1,j,:)
+ enddo
+ msgtag = mytid
+ call mpi_isend(sndbuf,len,MPI_REAL8,idest,msgtag,mpi_comm_edyn, ireqsend,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d send to idest')
+ call mpi_wait(ireqsend,irstat,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d wait for send')
+ !
+ ! I am eastern-most task. Receive lon 1 from western-most tasks,
+ ! and assign to nmlonp1:
+ elseif (mytidi==ntaski-1) then
+ isrc = itask_table_mag(0,mytidj)
+ msgtag = isrc
+ call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,msgtag,mpi_comm_edyn, ireqrecv,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d recv from isrc')
+ call mpi_wait(ireqrecv,irstat,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_periodic_f2d wait for recv')
+
+ do j=mlat0,mlat1
+ f(nmlonp1,j,:) = rcvbuf(j-mlat0+1,:)
+ enddo
+ endif ! mytidi == 0 or ntaski-1
else
- sndlon1(:,ifld) = fmsub(mlon1,mlat0:mlat1,ifld)
+ do j=mlat0,mlat1
+ f(nmlonp1,j,:) = f(1,j,:)
+ enddo
endif
- enddo ! ifld=1,nf
-!
-! Send mlon0 to the west:
- call mpi_isend(sndlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,isend0,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlon0 to west')
-!
-! Send mlon1 to the east:
- call mpi_isend(sndlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,isend1,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlon1 to east')
-!
-! Recv mlon0-1 from west:
- call mpi_irecv(rcvlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,irecv0,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlon0 from west')
-!
-! Recv mlon1+1 from east:
- call mpi_irecv(rcvlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,irecv1,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlon1 from east')
-!
-! Wait for completions:
- ireq = (/isend0,isend1,irecv0,irecv1/)
- istat = 0
- call mpi_waitall(4,ireq,istat,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos waitall for lons')
-!
-! Copy mlon0-1 from rcvlon0, and mlon1+1 from rcvlon1:
- do ifld=1,nf
- fmsub(mlon0-1,mlat0:mlat1,ifld) = rcvlon0(:,ifld)
- fmsub(mlon1+1,mlat0:mlat1,ifld) = rcvlon1(:,ifld)
-!
-! Fix special case of 2 tasks in longitude dimension:
- if (east == west) then
- fmsub(mlon0-1,mlat0:mlat1,ifld) = rcvlon1(:,ifld)
- fmsub(mlon1+1,mlat0:mlat1,ifld) = rcvlon0(:,ifld)
- endif
- enddo ! ifld=1,nf
-!
-! Now exchange latitudes:
- sndlat0 = 0._r8 ; rcvlat0 = 0._r8
- sndlat1 = 0._r8 ; rcvlat1 = 0._r8
-
- south = itask_table_mag(mytidi,mytidj-1) ! neighbor to south
- north = itask_table_mag(mytidi,mytidj+1) ! neighbor to north
-!
-! Include halo longitudes that were defined by the exchanges above:
- nmlons = (mlon1+1)-(mlon0-1)+1
- len = nmlons*nf
-!
-! Send mlat0 to south neighbor, and mlat1 to north:
- do ifld=1,nf
- sndlat0(:,ifld) = fmsub(:,mlat0,ifld)
- sndlat1(:,ifld) = fmsub(:,mlat1,ifld)
- enddo
-!
-! Send mlat0 to south:
- call mpi_isend(sndlat0,len,MPI_REAL8,south,1,mpi_comm_edyn,isend0,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlat0 to south')
-!
-! Send mlat1 to north:
- call mpi_isend(sndlat1,len,MPI_REAL8,north,1,mpi_comm_edyn,isend1,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlat1 to north')
-!
-! Recv mlat0-1 from south:
- call mpi_irecv(rcvlat0,len,MPI_REAL8,south,1,mpi_comm_edyn,irecv0,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlat0-1 from south')
-!
-! Recv mlat1+1 from north:
- call mpi_irecv(rcvlat1,len,MPI_REAL8,north,1,mpi_comm_edyn,irecv1,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlat1+1 from north')
-!
-! Wait for completions:
- ireq = (/isend0,isend1,irecv0,irecv1/)
- istat = 0
- call mpi_waitall(4,ireq,istat,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos waitall for lats')
-!
-! Copy mlat0-1 from rcvlat0, and mlat1+1 from rcvlat1:
- do ifld=1,nf
- fmsub(:,mlat0-1,ifld) = rcvlat0(:,ifld)
- fmsub(:,mlat1+1,ifld) = rcvlat1(:,ifld)
- enddo ! ifld=1,nf
-
- end subroutine mp_mag_halos
-!-----------------------------------------------------------------------
- subroutine mp_geo_halos(fmsub,lev0,lev1,lon0,lon1,lat0,lat1,nf)
-!
-! Exchange halo/ghost points between geographic grid subdomains for nf fields.
-! Two halo points are set in both lon and lat dimensions.
-! Longitude halos are done first, then latitude halos are done, including
-! longitude halos that were defined first).
-!
-! Args:
+
+ end subroutine mp_mag_periodic_f2d
+ !-----------------------------------------------------------------------
+ subroutine mp_mag_halos(fmsub,mlon0,mlon1,mlat0,mlat1,nf)
+ !
+ ! Exchange halo/ghost points between magnetic grid subdomains for nf fields.
+ ! Only a single halo point is required in both lon and lat dimensions.
+ ! Note that all tasks in any row of the task matrix have the same
+ ! mlat0,mlat1, and that all tasks in any column of the task matrix
+ ! have the same mlon0,mlon1.
+ ! Longitude halos are done first, exchanging mlat0:mlat1, then latitude
+ ! halos are done, exchanging mlon0-1:mlon1+1 (i.e., including the
+ ! longitude halos that were defined first).
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nf
+ real(r8),intent(inout) :: fmsub(mlon0-1:mlon1+1,mlat0-1:mlat1+1,nf)
+ !
+ ! Local:
+ integer :: ifld,west,east,north,south,len,isend0,isend1, &
+ irecv0,irecv1,ier,nmlats,istat(MPI_STATUS_SIZE,4),ireq(4),nmlons
+ real(r8),dimension(mlat1-mlat0+1,nf)::sndlon0,sndlon1,rcvlon0,rcvlon1
+ real(r8),dimension((mlon1+1)-(mlon0-1)+1,nf) :: &
+ sndlat0,sndlat1,rcvlat0,rcvlat1
+
+ !
+ ! Init send/recv buffers for lon halos:
+ sndlon0 = 0._r8 ; rcvlon0 = 0._r8
+ sndlon1 = 0._r8 ; rcvlon1 = 0._r8
+ !
+ ! Identify east and west neightbors:
+ west = itask_table_mag(mytidi-1,mytidj)
+ east = itask_table_mag(mytidi+1,mytidj)
+ !
+ ! Exchange mlat0:mlat1 (lat halos are not yet defined):
+ nmlats = mlat1-mlat0+1
+ len = nmlats*nf
+ !
+ ! Send mlon0 to the west neighbor, and mlon1 to the east.
+ ! However, tasks are periodic in longitude (see itask_table_mag),
+ ! and far west tasks send mlon0+1, and far east tasks send mlon1-1
+ !
+ do ifld=1,nf
+ ! Far west tasks send mlon0+1 to far east (periodic) tasks:
+ if (mytidi==0) then
+ sndlon0(:,ifld) = fmsub(mlon0+1,mlat0:mlat1,ifld)
+ ! Interior tasks send mlon0 to west neighbor:
+ else
+ sndlon0(:,ifld) = fmsub(mlon0,mlat0:mlat1,ifld)
+ endif
+
+ ! Far east tasks send mlon1-1 to far west (periodic) tasks:
+ if (mytidi==nmagtaski-1) then
+ sndlon1(:,ifld) = fmsub(mlon1-1,mlat0:mlat1,ifld)
+ ! Interior tasks send mlon1 to east neighbor:
+ else
+ sndlon1(:,ifld) = fmsub(mlon1,mlat0:mlat1,ifld)
+ endif
+ enddo ! ifld=1,nf
+ !
+ ! Send mlon0 to the west:
+ call mpi_isend(sndlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,isend0,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlon0 to west')
+ !
+ ! Send mlon1 to the east:
+ call mpi_isend(sndlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,isend1,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlon1 to east')
+ !
+ ! Recv mlon0-1 from west:
+ call mpi_irecv(rcvlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,irecv0,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlon0 from west')
+ !
+ ! Recv mlon1+1 from east:
+ call mpi_irecv(rcvlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,irecv1,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlon1 from east')
+ !
+ ! Wait for completions:
+ ireq = (/isend0,isend1,irecv0,irecv1/)
+ istat = 0
+ call mpi_waitall(4,ireq,istat,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos waitall for lons')
+ !
+ ! Copy mlon0-1 from rcvlon0, and mlon1+1 from rcvlon1:
+ do ifld=1,nf
+ fmsub(mlon0-1,mlat0:mlat1,ifld) = rcvlon0(:,ifld)
+ fmsub(mlon1+1,mlat0:mlat1,ifld) = rcvlon1(:,ifld)
+ !
+ ! Fix special case of 2 tasks in longitude dimension:
+ if (east == west) then
+ fmsub(mlon0-1,mlat0:mlat1,ifld) = rcvlon1(:,ifld)
+ fmsub(mlon1+1,mlat0:mlat1,ifld) = rcvlon0(:,ifld)
+ endif
+ enddo ! ifld=1,nf
+ !
+ ! Now exchange latitudes:
+ sndlat0 = 0._r8 ; rcvlat0 = 0._r8
+ sndlat1 = 0._r8 ; rcvlat1 = 0._r8
+
+ south = itask_table_mag(mytidi,mytidj-1) ! neighbor to south
+ north = itask_table_mag(mytidi,mytidj+1) ! neighbor to north
+ !
+ ! Include halo longitudes that were defined by the exchanges above:
+ nmlons = (mlon1+1)-(mlon0-1)+1
+ len = nmlons*nf
+ !
+ ! Send mlat0 to south neighbor, and mlat1 to north:
+ do ifld=1,nf
+ sndlat0(:,ifld) = fmsub(:,mlat0,ifld)
+ sndlat1(:,ifld) = fmsub(:,mlat1,ifld)
+ enddo
+ !
+ ! Send mlat0 to south:
+ call mpi_isend(sndlat0,len,MPI_REAL8,south,1,mpi_comm_edyn,isend0,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlat0 to south')
+ !
+ ! Send mlat1 to north:
+ call mpi_isend(sndlat1,len,MPI_REAL8,north,1,mpi_comm_edyn,isend1,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos send mlat1 to north')
+ !
+ ! Recv mlat0-1 from south:
+ call mpi_irecv(rcvlat0,len,MPI_REAL8,south,1,mpi_comm_edyn,irecv0,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlat0-1 from south')
+ !
+ ! Recv mlat1+1 from north:
+ call mpi_irecv(rcvlat1,len,MPI_REAL8,north,1,mpi_comm_edyn,irecv1,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos recv mlat1+1 from north')
+ !
+ ! Wait for completions:
+ ireq = (/isend0,isend1,irecv0,irecv1/)
+ istat = 0
+ call mpi_waitall(4,ireq,istat,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_mag_halos waitall for lats')
+ !
+ ! Copy mlat0-1 from rcvlat0, and mlat1+1 from rcvlat1:
+ do ifld=1,nf
+ fmsub(:,mlat0-1,ifld) = rcvlat0(:,ifld)
+ fmsub(:,mlat1+1,ifld) = rcvlat1(:,ifld)
+ enddo ! ifld=1,nf
+
+ end subroutine mp_mag_halos
+ !-----------------------------------------------------------------------
+ subroutine mp_geo_halos(fmsub,lev0,lev1,lon0,lon1,lat0,lat1,nf)
+ !
+ ! Exchange halo/ghost points between geographic grid subdomains for nf fields.
+ ! Two halo points are set in both lon and lat dimensions.
+ ! Longitude halos are done first, then latitude halos are done, including
+ ! longitude halos that were defined first).
+ !
+ ! Args:
integer,intent(in) :: lev0,lev1,lon0,lon1,lat0,lat1,nf
type(array_ptr_type) :: fmsub(nf) ! (lev0:lev1,lon0-2:lon1+2,lat0-2:lat1+2)
-!
-! Local:
+ !
+ ! Local:
integer :: k,i,ifld,west,east,north,south,len,isend0,isend1, &
- irecv0,irecv1,ier,nlats,istat(MPI_STATUS_SIZE,4),ireq(4),nlons
+ irecv0,irecv1,ier,nlats,istat(MPI_STATUS_SIZE,4),ireq(4),nlons
real(r8),dimension(lev0:lev1,2,lat1-lat0+1,nf) :: &
- sndlon0,sndlon1,rcvlon0,rcvlon1
+ sndlon0,sndlon1,rcvlon0,rcvlon1
real(r8),dimension(lev0:lev1,2,(lon1+2)-(lon0-2)+1,nf) :: &
- sndlat0,sndlat1,rcvlat0,rcvlat1
+ sndlat0,sndlat1,rcvlat0,rcvlat1
-! if (mpi_timing) starttime = mpi_wtime()
-!
-! Init send/recv buffers for lon halos:
+ ! if (mpi_timing) starttime = mpi_wtime()
+ !
+ ! Init send/recv buffers for lon halos:
sndlon0 = 0._r8 ; rcvlon0 = 0._r8
sndlon1 = 0._r8 ; rcvlon1 = 0._r8
-!
-! Identify east and west neighbors:
+ !
+ ! Identify east and west neighbors:
west = itask_table_geo(mytidi-1,mytidj)
east = itask_table_geo(mytidi+1,mytidj)
-!
-! Exchange lat0:lat1 (lat halos are not yet defined):
+ !
+ ! Exchange lat0:lat1 (lat halos are not yet defined):
nlats = lat1-lat0+1
len = (lev1-lev0+1)*2*nlats*nf
-!
-! Send lon0:lon0+1 to the west neighbor, and lon1-1:lon1 to the east.
-!
+ !
+ ! Send lon0:lon0+1 to the west neighbor, and lon1-1:lon1 to the east.
+ !
do ifld=1,nf
- do i=1,2
- do k=lev0,lev1
- sndlon0(k,i,:,ifld) = fmsub(ifld)%ptr(k,lon0+i-1,lat0:lat1) ! lon0, lon0+1
- sndlon1(k,i,:,ifld) = fmsub(ifld)%ptr(k,lon1+i-2,lat0:lat1) ! lon1-1, lon1
- enddo
- enddo
+ do i=1,2
+ do k=lev0,lev1
+ sndlon0(k,i,:,ifld) = fmsub(ifld)%ptr(k,lon0+i-1,lat0:lat1) ! lon0, lon0+1
+ sndlon1(k,i,:,ifld) = fmsub(ifld)%ptr(k,lon1+i-2,lat0:lat1) ! lon1-1, lon1
+ enddo
+ enddo
enddo ! ifld=1,nf
-!
-! Send lon0:lon0+1 to the west:
+ !
+ ! Send lon0:lon0+1 to the west:
call mpi_isend(sndlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,isend0,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos send lon0:lon0+1 to west')
-!
-! Send lon1-1:lon1 to the east:
+ 'mp_geo_halos send lon0:lon0+1 to west')
+ !
+ ! Send lon1-1:lon1 to the east:
call mpi_isend(sndlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,isend1,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos send lon1-1:lon1 to east')
-!
-! Recv lon0-2:lon0-1 from west:
+ 'mp_geo_halos send lon1-1:lon1 to east')
+ !
+ ! Recv lon0-2:lon0-1 from west:
call mpi_irecv(rcvlon0,len,MPI_REAL8,west,1,mpi_comm_edyn,irecv0,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos recv lon0-2:lon0-1 from west')
-!
-! Recv lon1+1:lon1+2 from east:
+ 'mp_geo_halos recv lon0-2:lon0-1 from west')
+ !
+ ! Recv lon1+1:lon1+2 from east:
call mpi_irecv(rcvlon1,len,MPI_REAL8,east,1,mpi_comm_edyn,irecv1,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos recv lon1+1:lon1+2 from east')
-!
-! Wait for completions:
+ 'mp_geo_halos recv lon1+1:lon1+2 from east')
+ !
+ ! Wait for completions:
ireq = (/isend0,isend1,irecv0,irecv1/)
istat = 0
call mpi_waitall(4,ireq,istat,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos waitall for lons')
-!
-! Copy lon0-2:lon0-1 from rcvlon0, and lon1+1:lon1+2 from rcvlon1:
+ 'mp_geo_halos waitall for lons')
+ !
+ ! Copy lon0-2:lon0-1 from rcvlon0, and lon1+1:lon1+2 from rcvlon1:
do ifld=1,nf
- if (east /= west) then
- do i=1,2
- do k=lev0,lev1
- fmsub(ifld)%ptr(k,lon0-3+i,lat0:lat1) = rcvlon0(k,i,:,ifld) ! lon0-2, lon0-1
- fmsub(ifld)%ptr(k,lon1+i ,lat0:lat1) = rcvlon1(k,i,:,ifld) ! lon1+1, lon1+2
- enddo
- enddo ! i=1,2
-!
-! Fix special case of 2 tasks in longitude dimension:
- else ! east==west
- do i=1,2
- do k=lev0,lev1
- fmsub(ifld)%ptr(k,lon0-3+i,lat0:lat1) = rcvlon1(k,i,:,ifld) ! lon0-2, lon0-1
- fmsub(ifld)%ptr(k,lon1+i ,lat0:lat1) = rcvlon0(k,i,:,ifld) ! lon1+1, lon1+2
+ if (east /= west) then
+ do i=1,2
+ do k=lev0,lev1
+ fmsub(ifld)%ptr(k,lon0-3+i,lat0:lat1) = rcvlon0(k,i,:,ifld) ! lon0-2, lon0-1
+ fmsub(ifld)%ptr(k,lon1+i ,lat0:lat1) = rcvlon1(k,i,:,ifld) ! lon1+1, lon1+2
+ enddo
+ enddo ! i=1,2
+ !
+ ! Fix special case of 2 tasks in longitude dimension:
+ else ! east==west
+ do i=1,2
+ do k=lev0,lev1
+ fmsub(ifld)%ptr(k,lon0-3+i,lat0:lat1) = rcvlon1(k,i,:,ifld) ! lon0-2, lon0-1
+ fmsub(ifld)%ptr(k,lon1+i ,lat0:lat1) = rcvlon0(k,i,:,ifld) ! lon1+1, lon1+2
+ enddo
enddo
- enddo
- endif ! east==west
+ endif ! east==west
enddo ! ifld=1,nf
-!
-! Now exchange latitudes:
+ !
+ ! Now exchange latitudes:
sndlat0 = 0._r8 ; rcvlat0 = 0._r8
sndlat1 = 0._r8 ; rcvlat1 = 0._r8
south = itask_table_geo(mytidi,mytidj-1) ! neighbor to south
north = itask_table_geo(mytidi,mytidj+1) ! neighbor to north
-!
-! Include halo longitudes that were defined by the exchanges above:
- nlons = (lon1+2)-(lon0-2)+1
+ !
+ ! Include halo longitudes that were defined by the exchanges above:
+ nlons = (lon1+2)-(lon0-2)+1
len = (lev1-lev0+1)*2*nlons*nf
-!
-! Send lat0:lat0+1 to south neighbor, and lat1-1:lat1 to north:
+ !
+ ! Send lat0:lat0+1 to south neighbor, and lat1-1:lat1 to north:
do ifld=1,nf
- do k=lev0,lev1
- sndlat0(k,1,:,ifld) = fmsub(ifld)%ptr(k,:,lat0 ) ! send lat0 to south
- sndlat0(k,2,:,ifld) = fmsub(ifld)%ptr(k,:,lat0+1) ! send lat0+1 to south
+ do k=lev0,lev1
+ sndlat0(k,1,:,ifld) = fmsub(ifld)%ptr(k,:,lat0 ) ! send lat0 to south
+ sndlat0(k,2,:,ifld) = fmsub(ifld)%ptr(k,:,lat0+1) ! send lat0+1 to south
- sndlat1(k,1,:,ifld) = fmsub(ifld)%ptr(k,:,lat1 ) ! send lat1 to north
- sndlat1(k,2,:,ifld) = fmsub(ifld)%ptr(k,:,lat1-1) ! send lat1-1 to north
- enddo
+ sndlat1(k,1,:,ifld) = fmsub(ifld)%ptr(k,:,lat1 ) ! send lat1 to north
+ sndlat1(k,2,:,ifld) = fmsub(ifld)%ptr(k,:,lat1-1) ! send lat1-1 to north
+ enddo
enddo
-!
-! Send lat0:lat0+1 to south (matching recv is lat1+1:lat1+2):
+ !
+ ! Send lat0:lat0+1 to south (matching recv is lat1+1:lat1+2):
call mpi_isend(sndlat0,len,MPI_REAL8,south,100,mpi_comm_edyn,isend0,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos send lat0:lat0+1 to south')
-!
-! Send lat1-1:lat1 to north (matching recv is lat0-2:lat0-1):
+ 'mp_geo_halos send lat0:lat0+1 to south')
+ !
+ ! Send lat1-1:lat1 to north (matching recv is lat0-2:lat0-1):
call mpi_isend(sndlat1,len,MPI_REAL8,north,101,mpi_comm_edyn,isend1,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos send lat1-1:lat1 to north')
-!
-! Recv lat0-2:lat0-1 from south:
+ 'mp_geo_halos send lat1-1:lat1 to north')
+ !
+ ! Recv lat0-2:lat0-1 from south:
call mpi_irecv(rcvlat0,len,MPI_REAL8,south,101,mpi_comm_edyn,irecv0,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos recv lat0-2:lat0-1 from south')
-!
-! Recv lat1+1:lat1+2 from north:
+ 'mp_geo_halos recv lat0-2:lat0-1 from south')
+ !
+ ! Recv lat1+1:lat1+2 from north:
call mpi_irecv(rcvlat1,len,MPI_REAL8,north,100,mpi_comm_edyn,irecv1,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos recv lat1+1:lat1+2 from north')
-!
-! Wait for completions:
+ 'mp_geo_halos recv lat1+1:lat1+2 from north')
+ !
+ ! Wait for completions:
ireq = (/isend0,isend1,irecv0,irecv1/)
istat = 0
call mpi_waitall(4,ireq,istat,ier)
if (ier /= 0) call handle_mpi_err(ier, &
- 'mp_geo_halos waitall for lats')
-!
-! Copy lat0-2:lat0-1 from rcvlat0, and lat1+1:lat1+2 from rcvlat1:
+ 'mp_geo_halos waitall for lats')
+ !
+ ! Copy lat0-2:lat0-1 from rcvlat0, and lat1+1:lat1+2 from rcvlat1:
do ifld=1,nf
- do k=lev0,lev1
- fmsub(ifld)%ptr(k,:,lat0-1) = rcvlat0(k,1,:,ifld) ! recv lat0-1 from south
- fmsub(ifld)%ptr(k,:,lat0-2) = rcvlat0(k,2,:,ifld) ! recv lat0-2 from south
-
- fmsub(ifld)%ptr(k,:,lat1+1) = rcvlat1(k,1,:,ifld) ! recv lat1+1 from north
- fmsub(ifld)%ptr(k,:,lat1+2) = rcvlat1(k,2,:,ifld) ! recv lat1+2 from north
- enddo
-!
-! Fix special case of 2 tasks in latitude dimension:
-! Not sure if this will happen in WACCM:
-!
- if (north == south) then
- call endrun('mp_geo_halos: north==south')
- endif
+ do k=lev0,lev1
+ fmsub(ifld)%ptr(k,:,lat0-1) = rcvlat0(k,1,:,ifld) ! recv lat0-1 from south
+ fmsub(ifld)%ptr(k,:,lat0-2) = rcvlat0(k,2,:,ifld) ! recv lat0-2 from south
+
+ fmsub(ifld)%ptr(k,:,lat1+1) = rcvlat1(k,1,:,ifld) ! recv lat1+1 from north
+ fmsub(ifld)%ptr(k,:,lat1+2) = rcvlat1(k,2,:,ifld) ! recv lat1+2 from north
+ enddo
+ !
+ ! Fix special case of 2 tasks in latitude dimension:
+ ! Not sure if this will happen in WACCM:
+ !
+ if (north == south) then
+ call endrun('mp_geo_halos: north==south')
+ endif
enddo ! ifld=1,nf
- end subroutine mp_geo_halos
-!-----------------------------------------------------------------------
- subroutine mp_gather_edyn(fmsub,mlon0,mlon1,mlat0,mlat1,fmglb,nmlonp1,nmlat,nf)
-!
-! Gather fields on mag subdomains to root task, so root task can
-! complete non-parallel portion of dynamo (starting after rhspde)
-!
-! Args:
- integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nmlat,nf
- real(r8),intent(in) :: fmsub(mlon0:mlon1,mlat0:mlat1,nf)
- real(r8),intent(out) :: fmglb(nmlonp1,nmlat,nf)
-!
-! Local:
- integer :: len,i,j,ifld,ier
- real(r8),dimension(nmlonp1,nmlat,nf) :: sndbuf
-
- sndbuf = 0._r8
- fmglb = 0._r8
-
- len = nmlonp1*nmlat*nf
-!
-! Load send buffer with my subdomain:
- do ifld=1,nf
- do j=mlat0,mlat1
- do i=mlon0, mlon1
- sndbuf(i,j,ifld) = fmsub(i,j,ifld)
- enddo
- enddo
- enddo
-
-!
-! Gather to root by using scalable reduce method:
-
- call mpi_reduce(sndbuf, fmglb, len, MPI_REAL8, MPI_SUM, 0, mpi_comm_edyn, ier )
- if (ier /= 0) call handle_mpi_err(ier,'mp_gather_edyn: mpi_gather to root')
-
- end subroutine mp_gather_edyn
-!-----------------------------------------------------------------------
- subroutine mp_scatter_phim(phim_glb,phim)
- real(r8),intent(in) :: phim_glb(nmlonp1,nmlat)
- real(r8),intent(out) :: phim(mlon0:mlon1,mlat0:mlat1)
-!
-! Local:
- integer :: ier,len,i,j
-
-! if (mpi_timing) starttime = mpi_wtime()
-!
-! Broadcast global phim (from pdynamo phim(nmlonp1,nmlat)):
- len = nmlat*nmlonp1
- call mpi_bcast(phim_glb,len,MPI_REAL8,0,mpi_comm_edyn,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_scatter_phim: bcast global phim')
-!
-! Define subdomains:
- do j=mlat0,mlat1
- do i=mlon0,mlon1
- phim(i,j) = phim_glb(i,j)
+ end subroutine mp_geo_halos
+ !-----------------------------------------------------------------------
+ subroutine mp_gather_edyn(fmsub,mlon0,mlon1,mlat0,mlat1,fmglb,nmlonp1,nmlat,nf)
+ !
+ ! Gather fields on mag subdomains to root task, so root task can
+ ! complete non-parallel portion of dynamo (starting after rhspde)
+ !
+ ! Args:
+ integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlonp1,nmlat,nf
+ real(r8),intent(in) :: fmsub(mlon0:mlon1,mlat0:mlat1,nf)
+ real(r8),intent(out) :: fmglb(nmlonp1,nmlat,nf)
+ !
+ ! Local:
+ integer :: len,i,j,ifld,ier
+ real(r8),dimension(nmlonp1,nmlat,nf) :: sndbuf
+
+ sndbuf = 0._r8
+ fmglb = 0._r8
+
+ len = nmlonp1*nmlat*nf
+ !
+ ! Load send buffer with my subdomain:
+ do ifld=1,nf
+ do j=mlat0,mlat1
+ do i=mlon0, mlon1
+ sndbuf(i,j,ifld) = fmsub(i,j,ifld)
+ enddo
+ enddo
+ enddo
+
+ !
+ ! Gather to root by using scalable reduce method:
+
+ call mpi_reduce(sndbuf, fmglb, len, MPI_REAL8, MPI_SUM, 0, mpi_comm_edyn, ier )
+ if (ier /= 0) call handle_mpi_err(ier,'mp_gather_edyn: mpi_gather to root')
+
+ end subroutine mp_gather_edyn
+ !-----------------------------------------------------------------------
+ subroutine mp_scatter_phim(phim_glb,phim)
+ real(r8),intent(in) :: phim_glb(nmlonp1,nmlat)
+ real(r8),intent(out) :: phim(mlon0:mlon1,mlat0:mlat1)
+ !
+ ! Local:
+ integer :: ier,len,i,j
+
+ ! if (mpi_timing) starttime = mpi_wtime()
+ !
+ ! Broadcast global phim (from pdynamo phim(nmlonp1,nmlat)):
+ len = nmlat*nmlonp1
+ call mpi_bcast(phim_glb,len,MPI_REAL8,0,mpi_comm_edyn,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_scatter_phim: bcast global phim')
+ !
+ ! Define subdomains:
+ do j=mlat0,mlat1
+ do i=mlon0,mlon1
+ phim(i,j) = phim_glb(i,j)
+ enddo
enddo
- enddo
-
- end subroutine mp_scatter_phim
-!-----------------------------------------------------------------------
- subroutine mp_mag_jslot(fin,mlon00,mlon11,mlat00,mlat11, &
- fout,jneed,mxneed,nf)
-!
-! Current task needs to receive (from other tasks) field f at (non-zero)
-! latitude indices in jneed, at all longitudes in the current subdomain.
-! Note subdomains include halo points mlon0-1 and mlat1+1. Data in f also
-! includes halo points (will need the lat data at halo-longitudes)
-!
-! Args:
- integer,intent(in) :: mlon00,mlon11,mlat00,mlat11 ! subdomains w/ halos
- integer,intent(in) :: nf ! number of fields
- integer,intent(in) :: mxneed ! max number of needed lats (nmlat+2)
- integer,intent(in) :: jneed(mxneed) ! j-indices of needed lats (where /= -1)
- real(r8),intent(in) :: fin(mlon00:mlon11,mlat00:mlat11,nf) ! data at current subdomain
- real(r8),intent(out) :: fout(mlon00:mlon11,mxneed,nf) ! returned data at needed lats
- !
- ! Local:
- integer,parameter :: sndbuf_cntr_max = 20 ! Maximum number of ibsend from one mpi task
- integer :: ier,njneed,i,j,n,nj,idest, &
- icount,len,nlons,isrc,msgid,ifld,sndbuf_cntr
- integer :: tij ! rank in cols_comm (0 to nmagtaskj-1)
- integer :: jhave(mxneed),njhave,wid
- integer :: peersneed(mxneed,0:nmagtaskj-1)
- integer :: jneedall (mxneed,0:nmagtaskj-1)
- real(r8) :: sndbuf(mxmaglon+2,mxneed,nf,sndbuf_cntr_max)
- real(r8) :: rcvbuf(mxmaglon+2,mxneed,nf)
- real(r8) :: buffer((mxmaglon+2)*mxneed*nf*sndbuf_cntr_max)
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
- integer :: isstat(MPI_STATUS_SIZE,sndbuf_cntr_max) !mpi_ibsend wait status
- integer :: ibsend_requests(sndbuf_cntr_max) !array of ibsend requests
-
- sndbuf = 0._r8
- rcvbuf = 0._r8
- njneed = 0
- ibsend_requests = 0
- sndbuf_cntr = 0
- do j=1,mxneed
- if (jneed(j) /= -1) njneed=njneed+1
- enddo
- if (any(jneed(1:njneed)==-1)) call endrun('mp_mag_jslot jneed')
- !
- call MPI_Comm_rank(cols_comm,tij,ier)
- call MPI_buffer_attach(buffer,(mxmaglon+2)*mxneed*nf*sndbuf_cntr_max,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_buffer_attach')
-
- !
- ! Send needed lat indices to all tasks in my column:
- ! (redundant for alltoall)
- do n=0,nmagtaskj-1
- jneedall(:,n) = jneed(:)
- enddo
-
- call mpi_alltoall(jneedall,mxneed,MPI_INTEGER, &
- peersneed,mxneed,MPI_INTEGER,cols_comm,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_alltoall')
- !
- ! Check if I have any needed lats, and who to send to:
- do n=0,nmagtaskj-1
- if (n==tij) cycle
- njhave = 0
+
+ end subroutine mp_scatter_phim
+ !-----------------------------------------------------------------------
+ subroutine mp_mag_jslot(fin,mlon00,mlon11,mlat00,mlat11, &
+ fout,jneed,mxneed,nf)
+ !
+ ! Current task needs to receive (from other tasks) field f at (non-zero)
+ ! latitude indices in jneed, at all longitudes in the current subdomain.
+ ! Note subdomains include halo points mlon0-1 and mlat1+1. Data in f also
+ ! includes halo points (will need the lat data at halo-longitudes)
+ !
+ ! Args:
+ integer,intent(in) :: mlon00,mlon11,mlat00,mlat11 ! subdomains w/ halos
+ integer,intent(in) :: nf ! number of fields
+ integer,intent(in) :: mxneed ! max number of needed lats (nmlat+2)
+ integer,intent(in) :: jneed(mxneed) ! j-indices of needed lats (where /= -1)
+ real(r8),intent(in) :: fin(mlon00:mlon11,mlat00:mlat11,nf) ! data at current subdomain
+ real(r8),intent(out) :: fout(mlon00:mlon11,mxneed,nf) ! returned data at needed lats
+ !
+ ! Local:
+ integer,parameter :: sndbuf_cntr_max = 40 ! Maximum number of ibsend from one mpi task
+ integer :: ier,njneed,i,j,n,nj,idest, &
+ icount,len,nlons,isrc,msgid,ifld,sndbuf_cntr
+ integer :: tij ! rank in cols_comm (0 to nmagtaskj-1)
+ integer :: jhave(mxneed),njhave,wid
+ integer :: peersneed(mxneed,0:nmagtaskj-1)
+ integer :: jneedall (mxneed,0:nmagtaskj-1)
+ real(r8) :: sndbuf(mxmaglon+2,mxneed,nf,sndbuf_cntr_max)
+ real(r8) :: rcvbuf(mxmaglon+2,mxneed,nf)
+ real(r8) :: buffer((mxmaglon+2)*mxneed*nf*sndbuf_cntr_max)
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+ integer :: isstat(MPI_STATUS_SIZE,sndbuf_cntr_max) !mpi_ibsend wait status
+ integer :: ibsend_requests(sndbuf_cntr_max) !array of ibsend requests
+
+ sndbuf = 0._r8
+ rcvbuf = 0._r8
+ njneed = 0
+ ibsend_requests = 0
+ sndbuf_cntr = 0
do j=1,mxneed
- if (peersneed(j,n) >= mlat00.and.peersneed(j,n) <= mlat11)then
- njhave = njhave+1
- jhave(njhave) = peersneed(j,n)
- idest = n
- wid = itask_table_geo(mytidi,idest)
- endif
+ if (jneed(j) /= -1) njneed=njneed+1
enddo
- if (njhave > 0) then
-
- sndbuf_cntr = sndbuf_cntr + 1
- if (sndbuf_cntr > sndbuf_cntr_max) call endrun('sndbuf_cntr exceeded sndbuf_cntr_max')
-
- !
- ! Load send buffer:
- nlons = mlon11-mlon00+1
- do ifld=1,nf
- do j=1,njhave
- do i=mlon00,mlon11
- sndbuf(i-mlon00+1,j,ifld,sndbuf_cntr) = fin(i,jhave(j),ifld)
- enddo
- enddo
- enddo
- len = nlons*njhave*nf
- msgid = mytid+wid*10000
- call mpi_ibsend(sndbuf(1:nlons,1:njhave,:,sndbuf_cntr),len,MPI_REAL8, &
- idest,msgid,cols_comm,ibsend_requests(sndbuf_cntr),ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_ibsend')
- endif
- enddo ! n=0,nmagtaskj-1
-
- call MPI_waitall(sndbuf_cntr,ibsend_requests,isstat,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_waitall')
- call MPI_buffer_detach(buffer,(mxmaglon+2)*mxneed*nf*sndbuf_cntr_max,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_buffer_detach')
-
- !
- ! Determine which tasks to receive which lats from. Task to
- ! receive from must be in same task column magtidi as I am.
- if (njneed > 0) then
- njhave = 0
- jhave(:) = -1
- do n=0,ntask-1
- njhave = 0
- do j=1,njneed
- if (jneed(j) >= tasks(n)%mlat0-1 .and. &
- jneed(j) <= tasks(n)%mlat1+1) then
- njhave = njhave+1
- jhave(njhave) = jneed(j)
- endif
- enddo
- if (njhave > 0 .and. tasks(n)%magtidi==magtidi) then
- isrc = tasks(n)%magtidj ! task id in cols_comm to recv from
- nlons = mlon11-mlon00+1
- len = nlons*njhave*nf
- msgid = mytid*10000+n
- rcvbuf = 0._r8
- call mpi_recv(rcvbuf(1:nlons,1:njhave,:),len,MPI_REAL8, &
- isrc,msgid,cols_comm,irstat,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_mag_jslot call mpi_recv')
- !
- ! Get data from receive buffer:
- ! real,intent(out) :: fout(mlon00:mlon11,mxneed) ! returned data at needed lats
- do ifld=1,nf
- do j=1,njhave
- nj = ixfind(jneed,mxneed,jhave(j),icount)
- if (nj==0) call endrun('jhave(j) not in jneed')
- do i=mlon00,mlon11
- fout(i,nj,ifld) = rcvbuf(i-mlon00+1,j,ifld)
- enddo
- enddo ! j=1,njhave
- enddo ! ifld=1,nf
- endif ! jhave > 0
- enddo ! n=0,ntask-1
- endif ! njneed > 0
-
- end subroutine mp_mag_jslot
-!-----------------------------------------------------------------------
- subroutine mp_gatherlons_f3d(f,k0,k1,i0,i1,j0,j1,nflds)
-!
-! Gather longitude data in a row of tasks to leftmost task in the row.
-! On entry f(k0:k1,i0:i1,j0:j1,nflds) is defined for current task.
-! On exit f(k0:k1,nlonp4,j0:j1,nflds) is defined for task with mytidi==0.
-!
-
-!
-! Args:
-!
- integer,intent(in) :: k0,k1,i0,i1,j0,j1,nflds
-! real(r8),intent(inout) :: f(k0:k1,nlon,j0:j1,nflds)
- type(array_ptr_type) :: f(nflds) ! f(n)%ptr(k0:k1,nlon,j0:j1)
-!
-! Local:
-!
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
- integer :: j,n,nlons,nlonrecv,nlevs,len,idest,isrc,ier, &
- isend,irecv,itask,lonrecv0,lonrecv1,mtag
- real(r8) :: &
- sndbuf(k0:k1,mxlon,mxlat+4,nflds), & ! send buffer
- rcvbuf(k0:k1,mxlon,mxlat+4,nflds) ! recv buffer
-!
-! Exec:
-!
- nlons = i1-i0+1
- nlevs = k1-k0+1
-
- sndbuf = 0._r8
- rcvbuf = 0._r8
- len = nlevs*mxlon*(mxlat+4)*nflds ! +4 is for when this is called from mp_pole_halos
-!
-! If mytidi==0, receive from other tasks in my row (mytidi>0,mytidj):
- if (mytidi == 0) then
- do itask=1,ntaski-1
- isrc = itask_table_geo(itask,mytidj)
- mtag = isrc+mytid
- call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,mtag,mpi_comm_edyn,irecv,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_gatherlons_f3d recv fm isrc')
- call mpi_wait(irecv,irstat,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_gatherlons_f3d wait for recv0')
-!
-! Copy data from receive buffer:
- lonrecv0 = tasks(isrc)%lon0
- lonrecv1 = tasks(isrc)%lon1
- nlonrecv = lonrecv1-lonrecv0+1
- do n=1,nflds
- do j=j0,j1
- f(n)%ptr(k0:k1,lonrecv0:lonrecv1,j) = rcvbuf(k0:k1,1:nlonrecv,j-j0+1,n)
- enddo ! j=j0,j1
- enddo ! n=1,nflds
- enddo ! itask=1,ntaski-1
-!
-! If mytidi > 0, load send buffer, and send to task (0,mytidj):
- else ! mytidi /= 0
- idest = itask_table_geo(0,mytidj)
- do n=1,nflds
- do j=j0,j1
- sndbuf(:,1:nlons,j-j0+1,n) = f(n)%ptr(k0:k1,i0:i1,j)
- enddo ! j=j0,j1
- enddo ! n=1,nflds
- mtag = idest+mytid
- call mpi_isend(sndbuf,len,MPI_REAL8,idest,mtag,mpi_comm_edyn,isend,ier)
+ if (any(jneed(1:njneed)==-1)) call endrun('mp_mag_jslot jneed')
+ !
+ call MPI_Comm_rank(cols_comm,tij,ier)
+ call MPI_buffer_attach(buffer,(mxmaglon+2)*mxneed*nf*sndbuf_cntr_max,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_buffer_attach')
+
+ !
+ ! Send needed lat indices to all tasks in my column:
+ ! (redundant for alltoall)
+ do n=0,nmagtaskj-1
+ jneedall(:,n) = jneed(:)
+ enddo
+
+ call mpi_alltoall(jneedall,mxneed,MPI_INTEGER, &
+ peersneed,mxneed,MPI_INTEGER,cols_comm,ier)
if (ier /= 0) &
- call handle_mpi_err(ier,'mp_gatherlons_f3d send0 to idest')
- call mpi_wait(isend,irstat,ier)
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_alltoall')
+ !
+ ! Check if I have any needed lats, and who to send to:
+ do n=0,nmagtaskj-1
+ if (n==tij) cycle
+ njhave = 0
+ do j=1,mxneed
+ if (peersneed(j,n) >= mlat00.and.peersneed(j,n) <= mlat11)then
+ njhave = njhave+1
+ jhave(njhave) = peersneed(j,n)
+ idest = n
+ wid = itask_table_geo(mytidi,idest)
+ endif
+ enddo
+ if (njhave > 0) then
+
+ sndbuf_cntr = sndbuf_cntr + 1
+ if (sndbuf_cntr > sndbuf_cntr_max) call endrun('sndbuf_cntr exceeded sndbuf_cntr_max')
+
+ !
+ ! Load send buffer:
+ nlons = mlon11-mlon00+1
+ do ifld=1,nf
+ do j=1,njhave
+ do i=mlon00,mlon11
+ sndbuf(i-mlon00+1,j,ifld,sndbuf_cntr) = fin(i,jhave(j),ifld)
+ enddo
+ enddo
+ enddo
+ len = nlons*njhave*nf
+ msgid = mytid+wid*10000
+ call mpi_ibsend(sndbuf(1:nlons,1:njhave,:,sndbuf_cntr),len,MPI_REAL8, &
+ idest,msgid,cols_comm,ibsend_requests(sndbuf_cntr),ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_ibsend')
+ endif
+ enddo ! n=0,nmagtaskj-1
+
+ call MPI_waitall(sndbuf_cntr,ibsend_requests,isstat,ier)
if (ier /= 0) &
- call handle_mpi_err(ier,'mp_gatherlons_f3d wait for send0')
- endif ! mytidi==0
- end subroutine mp_gatherlons_f3d
-!-----------------------------------------------------------------------
- subroutine mp_scatterlons_f3d(f,k0,k1,i0,i1,j0,j1,nflds)
-!
-! Redistribute longitudes from left most task in j-row to other tasks
-! in the row.
-! On input, f(:,nlonp4,j0:j1,nflds) is defined for tasks with mytidi==0.
-! On output, f(:,i0:i1,j0:j1,nflds) is defined for all tasks.
-!
-! Args:
-!
- integer,intent(in) :: k0,k1,i0,i1,j0,j1,nflds
- type(array_ptr_type) :: f(nflds) ! f(n)%ptr(k0:k1,nlon,j0:j1)
-!
-! Local:
-!
- integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
- integer :: j,n,nlevs,nlons,nlonsend,len,idest,isrc,ier, &
- isend,irecv,itask,lonsend0,lonsend1,mtag
- real(r8) :: &
- sndbuf(k0:k1,mxlon,mxlat+4,nflds), & ! send buffer
- rcvbuf(k0:k1,mxlon,mxlat+4,nflds) ! recv buffer
-!
-! Exec:
-!
- nlons = i1-i0+1
- nlevs = k1-k0+1
-
- sndbuf = 0._r8 ; rcvbuf = 0._r8
- len = nlevs*mxlon*(mxlat+4)*nflds ! +4 is for when this is called from mp_pole_halos
-!
-! If mytidi==0, send to other tasks in my row (mytidi>0,mytidj):
- if (mytidi == 0) then
- do itask=1,ntaski-1
- idest = itask_table_geo(itask,mytidj)
- lonsend0 = tasks(idest)%lon0
- lonsend1 = tasks(idest)%lon1
- nlonsend = lonsend1-lonsend0+1
- mtag = idest+mytid
- do n=1,nflds
- do j=j0,j1
- sndbuf(:,1:nlonsend,j-j0+1,n) = f(n)%ptr(:,lonsend0:lonsend1,j)
- enddo ! j=j0,j1
- enddo ! n=1,nflds
- mtag = idest+mytid
- call mpi_isend(sndbuf,len,MPI_REAL8,idest,mtag,mpi_comm_edyn,isend,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_scatterlons_f3d send to idest')
- call mpi_wait(isend,irstat,ier)
- if (ier /= 0) call handle_mpi_err(ier,'mp_scatterlons_f3d wait for send')
- enddo ! itask=1,ntaski-1
-!
-! If mytidi > 0, receive from task (0,mytidj):
- else
- isrc = itask_table_geo(0,mytidj)
- mtag = isrc+mytid
- call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,mtag,mpi_comm_edyn,irecv,ier)
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_waitall')
+ call MPI_buffer_detach(buffer,(mxmaglon+2)*mxneed*nf*sndbuf_cntr_max,ier)
if (ier /= 0) &
- call handle_mpi_err(ier,'mp_scatterlons_f3d recv fm isrc')
- call mpi_wait(irecv,irstat,ier)
- if (ier /= 0) &
- call handle_mpi_err(ier,'mp_scatterlons_f3d wait for recv')
- do n=1,nflds
- do j=j0,j1
- f(n)%ptr(:,i0:i1,j) = rcvbuf(:,1:nlons,j-j0+1,n)
- enddo ! j=j0,j1
- enddo ! n=1,nflds
- endif
- end subroutine mp_scatterlons_f3d
-!-----------------------------------------------------------------------
- subroutine handle_mpi_err(ierrcode,string)
-!
-! Args:
- integer,intent(in) :: ierrcode
- character(len=*) :: string
-!
-! Local:
- character(len=80) :: errstring
- integer :: len_errstring, ierr
-!
- call mpi_error_string(ierrcode,errstring,len_errstring, ierr)
- write(iulog,"(/,'>>> mpi error: ',a)") trim(string)
- write(iulog,"(' ierrcode=',i3,': ',a)") trim(errstring)
- end subroutine handle_mpi_err
-!-----------------------------------------------------------------------
- integer function ixfind(iarray,idim,itarget,icount)
-!
-! Search iarray(idim) for itarget, returning first index in iarray
-! where iarray(idim)==target. Also return number of elements of
-! iarray that == itarget in icount.
-!
-! Args:
- integer,intent(in) :: idim,itarget
- integer,intent(in) :: iarray(idim)
- integer,intent(out) :: icount
-!
-! Local:
- integer :: i
-!
- ixfind = 0
- icount = 0
- if (.not.any(iarray==itarget)) return
- icount = count(iarray==itarget)
- do i=1,idim
- if (iarray(i)==itarget) then
- ixfind = i
- exit
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_buffer_detach')
+
+ !
+ ! Determine which tasks to receive which lats from. Task to
+ ! receive from must be in same task column magtidi as I am.
+ if (njneed > 0) then
+ njhave = 0
+ jhave(:) = -1
+ do n=0,ntask-1
+ njhave = 0
+ do j=1,njneed
+ if (jneed(j) >= tasks(n)%mlat0-1 .and. &
+ jneed(j) <= tasks(n)%mlat1+1) then
+ njhave = njhave+1
+ jhave(njhave) = jneed(j)
+ endif
+ enddo
+ if (njhave > 0 .and. tasks(n)%magtidi==magtidi) then
+ isrc = tasks(n)%magtidj ! task id in cols_comm to recv from
+ nlons = mlon11-mlon00+1
+ len = nlons*njhave*nf
+ msgid = mytid*10000+n
+ rcvbuf = 0._r8
+ call mpi_recv(rcvbuf(1:nlons,1:njhave,:),len,MPI_REAL8, &
+ isrc,msgid,cols_comm,irstat,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_mag_jslot call mpi_recv')
+ !
+ ! Get data from receive buffer:
+ ! real,intent(out) :: fout(mlon00:mlon11,mxneed) ! returned data at needed lats
+ do ifld=1,nf
+ do j=1,njhave
+ nj = ixfind(jneed,mxneed,jhave(j),icount)
+ if (nj==0) call endrun('jhave(j) not in jneed')
+ do i=mlon00,mlon11
+ fout(i,nj,ifld) = rcvbuf(i-mlon00+1,j,ifld)
+ enddo
+ enddo ! j=1,njhave
+ enddo ! ifld=1,nf
+ endif ! jhave > 0
+ enddo ! n=0,ntask-1
+ endif ! njneed > 0
+
+ end subroutine mp_mag_jslot
+ !-----------------------------------------------------------------------
+ subroutine mp_gatherlons_f3d(f,k0,k1,i0,i1,j0,j1,nflds)
+ !
+ ! Gather longitude data in a row of tasks to leftmost task in the row.
+ ! On entry f(k0:k1,i0:i1,j0:j1,nflds) is defined for current task.
+ ! On exit f(k0:k1,nlonp4,j0:j1,nflds) is defined for task with mytidi==0.
+ !
+
+ !
+ ! Args:
+ !
+ integer,intent(in) :: k0,k1,i0,i1,j0,j1,nflds
+ ! real(r8),intent(inout) :: f(k0:k1,nlon,j0:j1,nflds)
+ type(array_ptr_type) :: f(nflds) ! f(n)%ptr(k0:k1,nlon,j0:j1)
+ !
+ ! Local:
+ !
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+ integer :: j,n,nlons,nlonrecv,nlevs,len,idest,isrc,ier, &
+ isend,irecv,itask,lonrecv0,lonrecv1,mtag
+ real(r8) :: &
+ sndbuf(k0:k1,mxlon,mxlat+4,nflds), & ! send buffer
+ rcvbuf(k0:k1,mxlon,mxlat+4,nflds) ! recv buffer
+ !
+ ! Exec:
+ !
+ nlons = i1-i0+1
+ nlevs = k1-k0+1
+
+ sndbuf = 0._r8
+ rcvbuf = 0._r8
+ len = nlevs*mxlon*(mxlat+4)*nflds ! +4 is for when this is called from mp_pole_halos
+ !
+ ! If mytidi==0, receive from other tasks in my row (mytidi>0,mytidj):
+ if (mytidi == 0) then
+ do itask=1,ntaski-1
+ isrc = itask_table_geo(itask,mytidj)
+ mtag = isrc+mytid
+ call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,mtag,mpi_comm_edyn,irecv,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_gatherlons_f3d recv fm isrc')
+ call mpi_wait(irecv,irstat,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_gatherlons_f3d wait for recv0')
+ !
+ ! Copy data from receive buffer:
+ lonrecv0 = tasks(isrc)%lon0
+ lonrecv1 = tasks(isrc)%lon1
+ nlonrecv = lonrecv1-lonrecv0+1
+ do n=1,nflds
+ do j=j0,j1
+ f(n)%ptr(k0:k1,lonrecv0:lonrecv1,j) = rcvbuf(k0:k1,1:nlonrecv,j-j0+1,n)
+ enddo ! j=j0,j1
+ enddo ! n=1,nflds
+ enddo ! itask=1,ntaski-1
+ !
+ ! If mytidi > 0, load send buffer, and send to task (0,mytidj):
+ else ! mytidi /= 0
+ idest = itask_table_geo(0,mytidj)
+ do n=1,nflds
+ do j=j0,j1
+ sndbuf(:,1:nlons,j-j0+1,n) = f(n)%ptr(k0:k1,i0:i1,j)
+ enddo ! j=j0,j1
+ enddo ! n=1,nflds
+ mtag = idest+mytid
+ call mpi_isend(sndbuf,len,MPI_REAL8,idest,mtag,mpi_comm_edyn,isend,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_gatherlons_f3d send0 to idest')
+ call mpi_wait(isend,irstat,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_gatherlons_f3d wait for send0')
+ endif ! mytidi==0
+ end subroutine mp_gatherlons_f3d
+ !-----------------------------------------------------------------------
+ subroutine mp_scatterlons_f3d(f,k0,k1,i0,i1,j0,j1,nflds)
+ !
+ ! Redistribute longitudes from left most task in j-row to other tasks
+ ! in the row.
+ ! On input, f(:,nlonp4,j0:j1,nflds) is defined for tasks with mytidi==0.
+ ! On output, f(:,i0:i1,j0:j1,nflds) is defined for all tasks.
+ !
+ ! Args:
+ !
+ integer,intent(in) :: k0,k1,i0,i1,j0,j1,nflds
+ type(array_ptr_type) :: f(nflds) ! f(n)%ptr(k0:k1,nlon,j0:j1)
+ !
+ ! Local:
+ !
+ integer :: irstat(MPI_STATUS_SIZE) ! mpi receive status
+ integer :: j,n,nlevs,nlons,nlonsend,len,idest,isrc,ier, &
+ isend,irecv,itask,lonsend0,lonsend1,mtag
+ real(r8) :: &
+ sndbuf(k0:k1,mxlon,mxlat+4,nflds), & ! send buffer
+ rcvbuf(k0:k1,mxlon,mxlat+4,nflds) ! recv buffer
+ !
+ ! Exec:
+ !
+ nlons = i1-i0+1
+ nlevs = k1-k0+1
+
+ sndbuf = 0._r8 ; rcvbuf = 0._r8
+ len = nlevs*mxlon*(mxlat+4)*nflds ! +4 is for when this is called from mp_pole_halos
+ !
+ ! If mytidi==0, send to other tasks in my row (mytidi>0,mytidj):
+ if (mytidi == 0) then
+ do itask=1,ntaski-1
+ idest = itask_table_geo(itask,mytidj)
+ lonsend0 = tasks(idest)%lon0
+ lonsend1 = tasks(idest)%lon1
+ nlonsend = lonsend1-lonsend0+1
+ mtag = idest+mytid
+ do n=1,nflds
+ do j=j0,j1
+ sndbuf(:,1:nlonsend,j-j0+1,n) = f(n)%ptr(:,lonsend0:lonsend1,j)
+ enddo ! j=j0,j1
+ enddo ! n=1,nflds
+ mtag = idest+mytid
+ call mpi_isend(sndbuf,len,MPI_REAL8,idest,mtag,mpi_comm_edyn,isend,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_scatterlons_f3d send to idest')
+ call mpi_wait(isend,irstat,ier)
+ if (ier /= 0) call handle_mpi_err(ier,'mp_scatterlons_f3d wait for send')
+ enddo ! itask=1,ntaski-1
+ !
+ ! If mytidi > 0, receive from task (0,mytidj):
+ else
+ isrc = itask_table_geo(0,mytidj)
+ mtag = isrc+mytid
+ call mpi_irecv(rcvbuf,len,MPI_REAL8,isrc,mtag,mpi_comm_edyn,irecv,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_scatterlons_f3d recv fm isrc')
+ call mpi_wait(irecv,irstat,ier)
+ if (ier /= 0) &
+ call handle_mpi_err(ier,'mp_scatterlons_f3d wait for recv')
+ do n=1,nflds
+ do j=j0,j1
+ f(n)%ptr(:,i0:i1,j) = rcvbuf(:,1:nlons,j-j0+1,n)
+ enddo ! j=j0,j1
+ enddo ! n=1,nflds
endif
- enddo
- end function ixfind
-
-!-----------------------------------------------------------------------
- subroutine setpoles(f,k0,k1,i0,i1,j0,j1)
-!
-! Args:
- integer,intent(in) :: k0,k1,i0,i1,j0,j1
- real(r8),intent(inout) :: f(k0:k1,i0:i1,j0:j1)
-!
-! Local:
- integer :: i,j,k,lon0,lon1,it,itask
- type(array_ptr_type) :: ptr(1)
- real(r8) :: fave(k0:k1)
- real(r8) :: rnlon
-
- if (j0 /= 1 .and. j1 /= nlat) return ! subdomain does not include poles
-
- rnlon = dble(nlon)
- allocate(ptr(1)%ptr(k0:k1,nlon,j0:j1))
-!
-! Define subdomains in global longitude dimension of ptmp:
-!
- do j=j0,j1
- do i=i0,i1
- ptr(1)%ptr(k0:k1,i,j) = f(k0:k1,i,j)
- enddo
- enddo
-!
-! Get values for global longitudes at the latitude below each pole,
-! average them at each level, and assign the average redundantly
-! to all lons at each pole.
-!
- call mp_gatherlons_f3d(ptr,k0,k1,i0,i1,j0,j1,1)
-!
- if (mytidi==0) then ! only westernmost tasks have global longitudes
-
- if (j0 == 1) then ! subdomain includes south pole
- fave(:) = 0._r8
-!
-! Find average of all lons at each level, at first lat equatorward of south pole.
-!
- do k=k0,k1
- do i=1,nlon
- fave(k) = fave(k)+ptr(1)%ptr(k,i,j0+1)
- enddo
- fave(k) = fave(k) / rnlon
- enddo
- if (debug.and.masterproc) write(iulog,"('setpoles: spole ave(k0:k1)=',/,(8es12.4))") fave
-!
-! Define south pole in ptmp on subdomains for each tasks in my latitude row
-! (I am SW corner task):
-!
- do it=0,ntaski-1
- itask = tasks(itask_table_geo(it,mytidj))%mytid
- lon0 = tasks(itask)%lon0
- lon1 = tasks(itask)%lon1
- do k=k0,k1
- ptr(1)%ptr(k,lon0:lon1,j0) = fave(k) ! all lons get the average
- enddo
- enddo
- endif ! south pole
-
- if (j1 == nlat) then ! subdomain includes north pole
- fave(:) = 0._r8
-!
-! Find average of all lons at each level, at first lat equatorward of north pole.
-!
- do k=k0,k1
- do i=1,nlon
- fave(k) = fave(k)+ptr(1)%ptr(k,i,j1-1)
- enddo
- fave(k) = fave(k) / rnlon
- enddo
- if (debug.and.masterproc) write(iulog,"('setpoles: npole fave(k0:k1)=',/,(8es12.4))") fave
-!
-! Define north pole in ptmp on subdomains for each tasks in my latitude row
-! (I am NW corner task):
-!
- do it=0,ntaski-1
- itask = tasks(itask_table_geo(it,mytidj))%mytid
- lon0 = tasks(itask)%lon0
- lon1 = tasks(itask)%lon1
- do k=k0,k1
- ptr(1)%ptr(k,lon0:lon1,j1) = fave(k)
- enddo
- enddo
- endif ! north pole
- endif ! mytidj==0
-!
-! Scatter to tasks in my latitude row:
-!
- call mp_scatterlons_f3d(ptr,k0,k1,i0,i1,j0,j1,1)
-!
-! Define poles on current subdomain inout arg array:
-!
- if (j0==1) then
- do i=i0,i1
- do k=k0,k1
- f(k,i,j0) = ptr(1)%ptr(k,i,j0)
- enddo
- enddo
- endif
- if (j1==nlat) then
- do i=i0,i1
- do k=k0,k1
- f(k,i,j1) = ptr(1)%ptr(k,i,j1)
- enddo
- enddo
- endif
- deallocate(ptr(1)%ptr)
- end subroutine setpoles
-!-----------------------------------------------------------------------
- subroutine lonshift_blocks(f,k0,k1,i0,i1,j0,j1,nfields)
-!
-! On input, field(s) f are in subdomains
-! On output, field(s) f subdomain longitudes are shifted by 180 degrees
-! (either 0->360 to -180->+180, or the reverse)
-!
- use edyn_geogrid ,only: nlon
-!
-! Args:
- integer :: k0,k1,i0,i1,j0,j1,nfields
- type(array_ptr_type) :: f(nfields) ! f(n)%ptr(k0:k1,i0:i1,j0:j1)
-!
-! Local variables
-!
- integer :: i,j,k,ifield
- integer :: midpoint ! middle point of longitude dimension
- real(r8) :: flons(nlon) ! fields at global longitudes
- type(array_ptr_type) :: pglblon(nfields) ! pglblon(n)%ptr(k0:k1,nlon,j0:j1)
-!
-! Shift longitude grid from 0 to 360 to -180 to 180 for edynamo
-! Check for compatible geographic longitude dimension and quit if not compatible
-!
- if (nlon /= 576 .and. nlon /= 288 .and. nlon /= 144 .and. nlon /= 80 .and. nlon /= 72 .and. nlon /= 24) then
- write(iulog,"('ERROR lonshift_blocks: incompatible nlon = ',i5,' i0,i1=',2i4)") nlon,i0,i1
- call endrun
- end if
-!
-! Load subdomains into local global longitude pointer:
- do ifield=1,nfields
- allocate(pglblon(ifield)%ptr(k0:k1,nlon,j0:j1))
- do j=j0,j1
- do i=i0,i1
- pglblon(ifield)%ptr(k0:k1,i,j) = f(ifield)%ptr(k0:k1,i,j)
- enddo
+ end subroutine mp_scatterlons_f3d
+ !-----------------------------------------------------------------------
+ subroutine handle_mpi_err(ierrcode,string)
+ !
+ ! Args:
+ integer,intent(in) :: ierrcode
+ character(len=*) :: string
+ !
+ ! Local:
+ character(len=80) :: errstring
+ integer :: len_errstring, ierr
+ !
+ call mpi_error_string(ierrcode,errstring,len_errstring, ierr)
+ write(iulog,"(/,'>>> mpi error: ',a)") trim(string)
+ write(iulog,"(' ierrcode=',i3,': ',a)") trim(errstring)
+ end subroutine handle_mpi_err
+ !-----------------------------------------------------------------------
+ integer function ixfind(iarray,idim,itarget,icount)
+ !
+ ! Search iarray(idim) for itarget, returning first index in iarray
+ ! where iarray(idim)==target. Also return number of elements of
+ ! iarray that == itarget in icount.
+ !
+ ! Args:
+ integer,intent(in) :: idim,itarget
+ integer,intent(in) :: iarray(idim)
+ integer,intent(out) :: icount
+ !
+ ! Local:
+ integer :: i
+ !
+ ixfind = 0
+ icount = 0
+ if (.not.any(iarray==itarget)) return
+ icount = count(iarray==itarget)
+ do i=1,idim
+ if (iarray(i)==itarget) then
+ ixfind = i
+ exit
+ endif
enddo
- enddo
-
- call mp_gatherlons_f3d(pglblon,k0,k1,i0,i1,j0,j1,nfields)
-!
-! Only leftmost tasks (mytidi=0) at each latitude does the longitude shift for that latitude
-!
- if (mytidi==0) then
+ end function ixfind
+
+ !-----------------------------------------------------------------------
+ subroutine setpoles(f,k0,k1,i0,i1,j0,j1)
+ !
+ ! Args:
+ integer,intent(in) :: k0,k1,i0,i1,j0,j1
+ real(r8),intent(inout) :: f(k0:k1,i0:i1,j0:j1)
+ !
+ ! Local:
+ integer :: i,j,k,lon0,lon1,it,itask
+ type(array_ptr_type) :: ptr(1)
+ real(r8) :: fave(k0:k1)
+ real(r8) :: rnlon
+
+ if (j0 /= 1 .and. j1 /= nlat_geo) then
+ return ! subdomain does not include poles
+ end if
+
+ rnlon = real(nlon_geo,kind=r8)
+ allocate(ptr(1)%ptr(k0:k1,nlon_geo,j0:j1))
+ !
+ ! Define subdomains in global longitude dimension of ptmp:
+ !
do j=j0,j1
- midpoint = nlon/2
- do ifield = 1,nfields
- do k = k0,k1
- flons(:) = pglblon(ifield)%ptr(k,1:nlon,j)
- flons = cshift(flons,midpoint)
- pglblon(ifield)%ptr(k,1:nlon,j) = flons(:)
- enddo ! k0,k1
- enddo ! nfields
- enddo ! j=j0,j1
- endif ! mytidi==0
-!
-! Now leftmost task at each j-row must redistribute filtered data
-! back to other tasks in the j-row (mytidi>0,mytidj) (includes latitude):
-!
- call mp_scatterlons_f3d(pglblon,k0,k1,i0,i1,j0,j1,nfields)
-!
-! Update fields argument:
- do ifield=1,nfields
- do j=j0,j1
- do i=i0,i1
- f(ifield)%ptr(k0:k1,i,j) = pglblon(ifield)%ptr(k0:k1,i,j)
- enddo
+ do i=i0,i1
+ ptr(1)%ptr(k0:k1,i,j) = f(k0:k1,i,j)
+ enddo
enddo
- enddo
-
- do ifield=1,nfields
- deallocate(pglblon(ifield)%ptr)
- enddo
- end subroutine lonshift_blocks
-!-----------------------------------------------------------------------
- subroutine switch_model_format(fptr,k0,k1,i0,i1,j0,j1,nfields)
-!
-! fptr is array of pointer structures to nfields fields. Convert these
-! fields in "model format", i.e., phase shift longitude data by 180 degrees,
-! and invert the vertical dimension. This may be converting from WACCM to
-! TIEGCM, or the reverse. It is up to the calling routine to keep track of
-! which model format the data is being converted from/to.
-! (This routine does not do unit conversion on the fields)
-!
-! Args:
- integer,intent(in) :: k0,k1,i0,i1,j0,j1,nfields
-!
-! Pointer structures to each field:
- type(array_ptr_type) :: fptr(nfields) ! (fptr(n)%ptr(k0:k1,i0:i1,j0:j1))
-!
-! Local:
- integer :: ifield
-!
-! Phase shift longitudes by 180 degrees:
-!
- call lonshift_blocks(fptr,k0,k1,i0,i1,j0,j1,nfields)
-!
-! Invert vertical dimension:
-!
- do ifield=1,nfields
- fptr(ifield)%ptr(k0:k1,i0:i1,j0:j1) = fptr(ifield)%ptr(k1:k0:-1,i0:i1,j0:j1)
- enddo
- end subroutine switch_model_format
-!-----------------------------------------------------------------------
+ !
+ ! Get values for global longitudes at the latitude below each pole,
+ ! average them at each level, and assign the average redundantly
+ ! to all lons at each pole.
+ !
+ call mp_gatherlons_f3d(ptr,k0,k1,i0,i1,j0,j1,1)
+ !
+ if (mytidi==0) then ! only westernmost tasks have global longitudes
+
+ if (j0 == 1) then ! subdomain includes south pole
+ fave(:) = 0._r8
+ !
+ ! Find average of all lons at each level, at first lat equatorward of south pole.
+ !
+ do k=k0,k1
+ do i=1,nlon_geo
+ fave(k) = fave(k)+ptr(1)%ptr(k,i,j0+1)
+ enddo
+ fave(k) = fave(k) / rnlon
+ enddo
+ !
+ ! Define south pole in ptmp on subdomains for each tasks in my latitude row
+ ! (I am SW corner task):
+ !
+ do it=0,ntaski-1
+ itask = tasks(itask_table_geo(it,mytidj))%mytid
+ lon0 = tasks(itask)%lon0
+ lon1 = tasks(itask)%lon1
+ do k=k0,k1
+ ptr(1)%ptr(k,lon0:lon1,j0) = fave(k) ! all lons get the average
+ enddo
+ enddo
+ endif ! south pole
+
+ if (j1 == nlat_geo) then ! subdomain includes north pole
+ fave(:) = 0._r8
+ !
+ ! Find average of all lons at each level, at first lat equatorward of north pole.
+ !
+ do k=k0,k1
+ do i=1,nlon_geo
+ fave(k) = fave(k)+ptr(1)%ptr(k,i,j1-1)
+ enddo
+ fave(k) = fave(k) / rnlon
+ enddo
+ if (debug.and.masterproc) write(iulog,"('setpoles: npole fave(k0:k1)=',/,(8es12.4))") fave
+ !
+ ! Define north pole in ptmp on subdomains for each tasks in my latitude row
+ ! (I am NW corner task):
+ !
+ do it=0,ntaski-1
+ itask = tasks(itask_table_geo(it,mytidj))%mytid
+ lon0 = tasks(itask)%lon0
+ lon1 = tasks(itask)%lon1
+ do k=k0,k1
+ ptr(1)%ptr(k,lon0:lon1,j1) = fave(k)
+ enddo
+ enddo
+ endif ! north pole
+ endif ! mytidj==0
+ !
+ ! Scatter to tasks in my latitude row:
+ !
+ call mp_scatterlons_f3d(ptr,k0,k1,i0,i1,j0,j1,1)
+ !
+ ! Define poles on current subdomain inout arg array:
+ !
+ if (j0==1) then
+ do i=i0,i1
+ do k=k0,k1
+ f(k,i,j0) = ptr(1)%ptr(k,i,j0)
+ enddo
+ enddo
+ endif
+ if (j1==nlat_geo) then
+ do i=i0,i1
+ do k=k0,k1
+ f(k,i,j1) = ptr(1)%ptr(k,i,j1)
+ enddo
+ enddo
+ endif
+ deallocate(ptr(1)%ptr)
+ end subroutine setpoles
end module edyn_mpi
diff --git a/src/ionosphere/waccmx/edyn_mud.F90 b/src/ionosphere/waccmx/edyn_mud.F90
index 614fd52b9a..44c8416cbf 100644
--- a/src/ionosphere/waccmx/edyn_mud.F90
+++ b/src/ionosphere/waccmx/edyn_mud.F90
@@ -1,159 +1,20 @@
-!-----------------------------------------------------------------------
- subroutine mud(pe,jntl,isolve)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_abortutils ,only: endrun
- use edyn_solve,only: nc,ncee,cee
-!
- implicit none
- integer,intent(in) :: isolve
- integer jntl
-!
-! set grid size params
-!
- integer,parameter :: iixp = 5 , jjyq = 3, iiex = 5, jjey = 5
- integer,parameter :: nnx=iixp*2**(iiex-1)+1, nny=jjyq*2**(jjey-1)+1
-!
-! estimate work space for point relaxation (see mud2cr.d)
-!
- integer,parameter :: llwork=(7*(nnx+2)*(nny+2)+76*nnx*nny)/3
- real(r8) :: phi(nnx,nny),rhs(nnx,nny),work(llwork)
- real(r8) :: time0,time1
-!
-! put integer and floating point argument names in contiguous
-! storage for labelling in vectors iprm,fprm
-!
-! btf 1/21/14: dimension iprm(17) to match iprm in edyn_muh2cr.F90
-! integer iprm(16),mgopt(4)
- integer iprm(17),mgopt(4)
- real(r8) :: fprm(6)
- integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nx,ny, &
- iguess,maxcy,method,nwork,lwrkqd,itero
- common/itmud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nx,ny, &
- iguess,maxcy,method,nwork,lwrkqd,itero
- real(r8) :: xa,xb,yc,yd,tolmax,relmax
- common/ftmud2cr/xa,xb,yc,yd,tolmax,relmax
- equivalence(intl,iprm)
- equivalence(xa,fprm)
- integer i,j,ierror
- real(r8) :: PE(NNX,1)
- integer maxcya
- DATA MAXCYA/150/
- integer mm,nn,jj,jjj
- real(r8) :: pi
-!
-! set input integer arguments
-!
- MM = NNX
- NN = NNY
- PI = 4._r8*ATAN(1._r8)
-!
-! SET INPUT INTEGER PARAMETERS
-!
- INTL = JNTL
-!
-! set boundary condition flags
-!
- nxa = 0
- nxb = 0
- nyc = 2
- nyd = 1
-!
-! set grid sizes from parameter statements
-!
- ixp = iixp
- jyq = jjyq
- iex = iiex
- jey = jjey
- nx = nnx
- ny = nny
-!
-! set multigrid arguments (w(2,1) cycling with fully weighted
-! residual restriction and cubic prolongation)
-!
- mgopt(1) = 2
- mgopt(2) = 2
- mgopt(3) = 1
- mgopt(4) = 3
-!
-! set for one cycle
-!
- maxcy = maxcya
-!
-! set no initial guess forcing full multigrid cycling
-!
- iguess = 0
-!
-! set work space length approximation from parameter statement
-!
- nwork = llwork
-!
-! set line z relaxation
-!
- method = 3
-!
-! set end points of solution rectangle in (x,y) space
-!
- xa = -pi
- xb = pi
- yc = 0.0_r8
- yd = 0.5_r8*pi
-!
-! set error control flag
-!
- tolmax = 0.01_r8
-!
-! set right hand side in rhs
-! initialize phi to zero
-!
- if (isolve >= 0) then ! called from dynamo
- do i=1,nx
- do j=1,ny
- RHS(I,J) = CEE(I+(J-1)*NX+9*NX*NY)
- phi(i,j) = 0.0_r8
- end do
- end do
-!
-! set specified boundaries in phi
-!
- DO I=1,NX
- PHI(I,NY) = RHS(I,NY)/CEE(I+(NY-1)*NX+8*NX*NY)
- END DO
-!
-! set specified boundaries in phi
-!
- endif ! isolve
-!
-! intialization call
-!
- call mud2cr(iprm,fprm,work,rhs,phi,mgopt,ierror,isolve)
- if (ierror.gt.0) call endrun('mud call init mud2cr')
-!
-! attempt solution
-!
- intl = 1
- call mud2cr(iprm,fprm,work,rhs,phi,mgopt,ierror,isolve)
- if (ierror.gt.0) call endrun('mud call solve mud2cr')
-!
-! COPY PHI TO PE
-!
- DO J = 1,NY
- JJ = NY+J-1
- JJJ = NY+1-J
- DO I = 1,NX
- PE(I,JJ) = PHI(I,J)
- PE(I,JJJ) = PHI(I,J)
- END DO
- END DO
-! ITRANS = 0
-! CALL EZCNTR(PE(1,JMX0),IMX0,JMX0)
-! ITRANS = 1
-! CALL SET(.05,.95,.05,.95,-1.,1.,-1.,1.,1)
-! CALL CONREC(PE(1,JMX0),IMX0,IMX0,JMX0,0.,0.,0.,1,0,-1430B)
-! CALL FRAME
-! ITRANS = 0
-! CALL EZCNTR(PE(1,JMX0),IMX0,JMX0)
-! ITRANS = 1
- end subroutine mud
+module edyn_mud
+ use shr_kind_mod,only: r8 => shr_kind_r8
+ use cam_abortutils,only: endrun
+ use edyn_mudcom, only: cor2, res2, factri, factrp, prolon2, trsfc2, swk2
+
+ implicit none
+
+ private
+
+ public :: mud2cr1
+ public :: dismd2cr
+ public :: adjmd2cr
+ public :: kcymd2cr
+ public :: relmd2cr
+ public :: resmd2cr
+
+ contains
!-----------------------------------------------------------------------
!
! file mud2cr.f (version 4.0 modified for Cicley 2/99)
@@ -167,7 +28,7 @@ end subroutine mud
! ... For MUDPACK information, visit the website:
! (https://www2.cisl.ucar.edu/resources/legacy/mudpack)
!
-! ... purpose
+! ... purpose
!
! mud2cr attempts to produce a second order finite difference
! approximation to the two dimensional nonseparable elliptic
@@ -180,7 +41,7 @@ end subroutine mud
! ... documentation
!
! see the documentation on above website for a complete discussion
-! of how to use subroutine mud2cr.
+! of how to use subroutine mud2cr.
!
! ... required MUDPACK files
!
@@ -190,8 +51,7 @@ end subroutine mud
!
subroutine mud2cr(iparm,fparm,work,rhs,phi,mgopt, &
ierror,isolve)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- implicit none
+
integer,intent(in) :: isolve
integer iparm,mgopt,ierror
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
@@ -353,8 +213,7 @@ subroutine mud2cr(iparm,fparm,work,rhs,phi,mgopt, &
end subroutine mud2cr
!-----------------------------------------------------------------------
subroutine mud2cr1(nx,ny,rhsf,phif,wk)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- implicit none
+
integer nx,ny
real(r8) :: phif(nx,ny),rhsf(nx,ny),wk(*)
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
@@ -484,12 +343,10 @@ subroutine mud2cr1(nx,ny,rhsf,phif,wk)
end subroutine mud2cr1
!-----------------------------------------------------------------------
subroutine kcymd2cr(wk)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! execute multigrid k cycle from kcur grid level
! kcycle=1 for v cycles, kcycle=2 for w cycles
!
- implicit none
real(r8) :: wk(*)
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
@@ -646,13 +503,11 @@ subroutine kcymd2cr(wk)
end subroutine kcymd2cr
!-----------------------------------------------------------------------
subroutine dismd2cr(nx,ny,cf,tx,ty,wk,ier,isolve)
- use edyn_solve,only: nc,ncee,cee,ceee
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_abortutils ,only: endrun
+ use edyn_solver_coefs,only: nc,cee,ceee
+ use edyn_maggrid, only: res_nlev
!
! discretize elliptic pde for mud2cr, set nonfatal errors
!
- implicit none
integer,intent(in) :: isolve
integer nx,ny,i,j,l,im1,jm1,ier,nnx,nny
real(r8) :: cf(nx,ny,10),tx(nx,ny,*),ty(ny,nx,*)
@@ -675,7 +530,7 @@ subroutine dismd2cr(nx,ny,cf,tx,ty,wk,ier,isolve)
call endrun('dismd2cr in mud')
ENDIF
if (isolve >= 0) then
- call ceee(cee(nc(6-klevel)),nx,ny,cf)
+ call ceee(cee(nc(res_nlev+1-klevel)),nx,ny,cf)
endif
!
! set coefficient for specified boundaries
@@ -792,11 +647,9 @@ subroutine dismd2cr(nx,ny,cf,tx,ty,wk,ier,isolve)
end subroutine dismd2cr
!-----------------------------------------------------------------------
subroutine adjmd2cr(nx,ny,phi,cf)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! adjust righthand side in cf(i,j,10) for boundary conditions
!
- implicit none
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
kcycle,iprer,ipost,intpol,kps
@@ -838,12 +691,10 @@ subroutine adjmd2cr(nx,ny,phi,cf)
end subroutine adjmd2cr
!-----------------------------------------------------------------------
subroutine resmd2cr(nx,ny,phi,ncx,ncy,phic,rhsc,cof,resf)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! restrict residual from fine to coarse mesh using fully weighted
! residual restriction
!
- implicit none
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
kcycle,iprer,ipost,intpol,kps
@@ -887,11 +738,9 @@ subroutine resmd2cr(nx,ny,phi,ncx,ncy,phic,rhsc,cof,resf)
end subroutine resmd2cr
!-----------------------------------------------------------------------
subroutine relmd2cr(nx,ny,phi,cof,tx,ty,sum)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! relaxation for mud2
!
- implicit none
integer nx,ny
real(r8) :: phi(*),cof(*),tx(*),ty(*),sum(*)
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
@@ -914,11 +763,9 @@ subroutine relmd2cr(nx,ny,phi,cof,tx,ty,sum)
end subroutine relmd2cr
!-----------------------------------------------------------------------
subroutine relmd2crp(nx,ny,phi,cof)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! gauss-seidel four color point relaxation
!
- implicit none
integer nx,ny,i,j,lcolor,i1,i2,i3,i4,it
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
@@ -1019,13 +866,11 @@ subroutine relmd2crp(nx,ny,phi,cof)
end subroutine relmd2crp
!-----------------------------------------------------------------------
subroutine slxmd2cr(nx,ny,phi,cof,tx,sum)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! line relaxation in the x direction (periodic or nonperiodic)
!
- implicit none
- integer nx,ny,i,ib,j,ii
+ integer nx,ny,i,ib,j
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess,&
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
kcycle,iprer,ipost,intpol,kps
@@ -1033,7 +878,6 @@ subroutine slxmd2cr(nx,ny,phi,cof,tx,sum)
iguess, maxcy,method,nwork,lwork,itero,ngrid,&
klevel,kcur,kcycle,iprer,ipost,intpol,kps
real(r8) :: phi(0:nx+1,0:ny+1),cof(nx,ny,10),tx(nx,ny,*),sum(ny)
- real(r8) :: starttime,endtime
!
! replace line x with point gauss-seidel if
! x direction is periodic and nx = 3 (coarsest)
@@ -1212,8 +1056,6 @@ subroutine slxmd2cr(nx,ny,phi,cof,tx,sum)
end subroutine slxmd2cr
!-----------------------------------------------------------------------
subroutine slymd2cr(nx,ny,phi,cof,ty,sum)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- implicit none
integer nx,ny,i,j,jb
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
@@ -1223,7 +1065,6 @@ subroutine slymd2cr(nx,ny,phi,cof,ty,sum)
iguess, maxcy,method,nwork,lwork,itero,ngrid, &
klevel,kcur,kcycle,iprer,ipost,intpol,kps
real(r8) :: phi(0:nx+1,0:ny+1),cof(nx,ny,10),ty(ny,nx,*),sum(nx)
- real(r8) :: starttime,endtime
!
! replace line y with point gauss-seidel if
! y direction is periodic and ny = 3
@@ -1401,3 +1242,4 @@ subroutine slymd2cr(nx,ny,phi,cof,ty,sum)
return
end subroutine slymd2cr
!-----------------------------------------------------------------------
+end module edyn_mud
diff --git a/src/ionosphere/waccmx/edyn_mudcom.F90 b/src/ionosphere/waccmx/edyn_mudcom.F90
index bf840a4b8f..7c7595b67e 100644
--- a/src/ionosphere/waccmx/edyn_mudcom.F90
+++ b/src/ionosphere/waccmx/edyn_mudcom.F90
@@ -1,9 +1,23 @@
-!module mudcom
-! use shr_kind_mod ,only: r8 => shr_kind_r8
-! use cam_logfile ,only: iulog
-! use cam_abortutils ,only: endrun
+module edyn_mudcom
+ use shr_kind_mod, only: r8 => shr_kind_r8
+
+ implicit none
+
+ private
+
+ public :: cor2
+ public :: factri
+ public :: factrp
+ public :: swk2
+ public :: trsfc2
+ public :: prolon2
+ public :: res2
+ public :: sgfa
+ public :: sgsl
+ public :: transp
+
!-----------------------------------------------------------------------
-! contains
+ contains
!-----------------------------------------------------------------------
!
! file mudcom.f
@@ -28,7 +42,6 @@
!
!-----------------------------------------------------------------------
subroutine swk2(nfx,nfy,phif,rhsf,phi,rhs)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! set phif,rhsf input in arrays which include
! virtual boundaries for phi (for all 2-d real codes)
@@ -58,7 +71,6 @@ subroutine swk2(nfx,nfy,phif,rhsf,phi,rhs)
end subroutine swk2
!-----------------------------------------------------------------------
subroutine trsfc2(nx,ny,phi,rhs,ncx,ncy,phic,rhsc)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! transfer fine grid to coarse grid
!
@@ -118,7 +130,6 @@ subroutine trsfc2(nx,ny,phi,rhs,ncx,ncy,phic,rhsc)
end subroutine trsfc2
!-----------------------------------------------------------------------
subroutine res2(nx,ny,resf,ncx,ncy,rhsc,nxa,nxb,nyc,nyd)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer nx,ny,ncx,ncy,nxa,nxb,nyc,nyd
integer i,j,ic,jc,im1,ip1,jm1,jp1,ix,jy
@@ -258,7 +269,6 @@ end subroutine res2
! prolon2 modified from rgrd2u 11/20/97
!
subroutine prolon2(ncx,ncy,p,nx,ny,q,nxa,nxb,nyc,nyd,intpol)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer ncx,ncy,nx,ny,intpol,nxa,nxb,nyc,nyd
real(r8) :: p(0:ncx+1,0:ncy+1),q(0:nx+1,0:ny+1)
@@ -402,7 +412,6 @@ end subroutine prolon2
! 11/20/97 modification of rgrd1u.f for mudpack
!
subroutine prolon1(ncx,p,nx,q,nxa,nxb,intpol)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer intpol,nxa,nxb,ncx,nx,i,ic,ist,ifn,ioddst,ioddfn
real(r8) :: p(0:ncx+1),q(0:nx+1)
@@ -500,7 +509,6 @@ subroutine prolon1(ncx,p,nx,q,nxa,nxb,intpol)
end subroutine prolon1
!-----------------------------------------------------------------------
subroutine cor2(nx,ny,phif,ncx,ncy,phic,nxa,nxb,nyc,nyd,intpol,phcor)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! add coarse grid correction in phic to fine grid approximation
! in phif using linear or cubic interpolation
@@ -552,7 +560,6 @@ subroutine cor2(nx,ny,phif,ncx,ncy,phic,nxa,nxb,nyc,nyd,intpol,phcor)
end subroutine cor2
!-----------------------------------------------------------------------
subroutine pde2(nx,ny,u,i,j,ux3,ux4,uy3,uy4,nxa,nyc)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer nx,ny,i,j,nxa,nyc
real(r8) :: u(nx,ny),dlx,dly,dlxx,dlyy,tdlx3,tdly3,dlx4,dly4
@@ -679,7 +686,6 @@ subroutine pde2(nx,ny,u,i,j,ux3,ux4,uy3,uy4,nxa,nyc)
end subroutine pde2
!-----------------------------------------------------------------------
subroutine swk3(nfx,nfy,nfz,phif,rhsf,phi,rhs)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! set phif,rhsf input in arrays which include
! virtual boundaries for phi (for all 2-d real codes)
@@ -721,7 +727,6 @@ subroutine swk3(nfx,nfy,nfz,phif,rhsf,phi,rhs)
end subroutine swk3
!-----------------------------------------------------------------------
subroutine trsfc3(nx,ny,nz,phi,rhs,ncx,ncy,ncz,phic,rhsc)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! transfer fine grid to coarse grid
!
@@ -793,7 +798,6 @@ end subroutine trsfc3
!-----------------------------------------------------------------------
subroutine res3(nx,ny,nz,resf,ncx,ncy,ncz,rhsc, &
nxa,nxb,nyc,nyd,nze,nzf)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer nx,ny,nz,ncx,ncy,ncz,nxa,nxb,nyc,nyd,nze,nzf
integer ix,jy,kz,i,j,k,ic,jc,kc,im1,ip1,jm1,jp1,km1,kp1
@@ -1138,7 +1142,6 @@ end subroutine res3
!
subroutine prolon3(ncx,ncy,ncz,p,nx,ny,nz,q,nxa,nxb,nyc,nyd, &
nze,nzf,intpol)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer ncx,ncy,ncz,nx,ny,nz,intpol,nxa,nxb,nyc,nyd,nze,nzf
real(r8) :: p(0:ncx+1,0:ncy+1,0:ncz+1),q(0:nx+1,0:ny+1,0:nz+1)
@@ -1310,7 +1313,6 @@ end subroutine prolon3
!-----------------------------------------------------------------------
subroutine cor3(nx,ny,nz,phif,ncx,ncy,ncz,phic,nxa,nxb,nyc,nyd, &
nze,nzf,intpol,phcor)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer nx,ny,nz,ncx,ncy,ncz,nxa,nxb,nyc,nyd,nze,nzf,intpol
integer i,j,k,ist,ifn,jst,jfn,kst,kfn
@@ -1384,7 +1386,6 @@ subroutine cor3(nx,ny,nz,phif,ncx,ncy,ncz,phic,nxa,nxb,nyc,nyd, &
end subroutine cor3
!-----------------------------------------------------------------------
subroutine per3vb(nx,ny,nz,phi,nxa,nyc,nze)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! set virtual periodic boundaries from interior values
! in three dimensions (for all 3-d solvers)
@@ -1423,7 +1424,6 @@ subroutine per3vb(nx,ny,nz,phi,nxa,nyc,nze)
end subroutine per3vb
!-----------------------------------------------------------------------
subroutine pde2cr(nx,ny,u,i,j,ux3y,uxy3,ux2y2)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! compute mixed partial derivative approximations
!
@@ -1724,7 +1724,6 @@ end subroutine pde2cr
!-----------------------------------------------------------------------
subroutine pde3(nx,ny,nz,u,i,j,k,ux3,ux4,uy3,uy4,uz3,uz4, &
nxa,nyc,nze)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! estimate third and fourth partial derivatives in x,y,z
!
@@ -1801,7 +1800,6 @@ subroutine pde3(nx,ny,nz,u,i,j,k,ux3,ux4,uy3,uy4,uz3,uz4, &
end subroutine pde3
!-----------------------------------------------------------------------
subroutine p3de2(nx,ny,u,i,j,ux3,ux4,uy3,uy4,nxa,nyc)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! third and fourth partial derivatives in x and y
!
@@ -1876,7 +1874,6 @@ subroutine p3de2(nx,ny,u,i,j,ux3,ux4,uy3,uy4,nxa,nyc)
end subroutine p3de2
!-----------------------------------------------------------------------
subroutine p3de1(nx,u,i,ux3,ux4,nxa)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! third and fourth derivatives in x
!
@@ -1944,7 +1941,6 @@ end subroutine p3de1
! of order n arising from nonperiodic or periodic discretizations
!
subroutine factri(m,n,a,b,c)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! factor the m simultaneous tridiagonal systems of order n
!
@@ -1961,7 +1957,6 @@ subroutine factri(m,n,a,b,c)
end subroutine factri
!-----------------------------------------------------------------------
subroutine factrp(m,n,a,b,c,d,e,sum)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! factor the m simultaneous "tridiagonal" systems of order n
! from discretized periodic system (leave out periodic n point)
@@ -2018,7 +2013,6 @@ subroutine factrp(m,n,a,b,c,d,e,sum)
end subroutine factrp
!-----------------------------------------------------------------------
subroutine transp(n,amat)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! transpose n by n real matrix
!
@@ -2036,95 +2030,92 @@ subroutine transp(n,amat)
end subroutine transp
!-----------------------------------------------------------------------
subroutine sgfa (a,lda,n,ipvt,info)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- integer lda,n,ipvt(1),info
- real(r8) :: a(lda,1)
- real(r8) :: t
- integer isfmax,j,k,kp1,l,nm1
- info = 0
- nm1 = n - 1
- if (nm1 .lt. 1) go to 70
- do 60 k = 1, nm1
- kp1 = k + 1
+ integer lda,n,ipvt(1),info
+ real(r8) :: a(lda,1)
+ real(r8) :: t
+ integer :: j,k,kp1,l,nm1
+ info = 0
+ nm1 = n - 1
+ if (nm1 .lt. 1) go to 70
+ do 60 k = 1, nm1
+ kp1 = k + 1
l = isfmax(n-k+1,a(k,k),1) + k - 1
- ipvt(k) = l
- if (a(l,k) .eq. 0.0e0_r8) go to 40
- if (l .eq. k) go to 10
- t = a(l,k)
- a(l,k) = a(k,k)
- a(k,k) = t
- 10 continue
- t = -1.0e0_r8/a(k,k)
+ ipvt(k) = l
+ if (a(l,k) .eq. 0.0e0_r8) go to 40
+ if (l .eq. k) go to 10
+ t = a(l,k)
+ a(l,k) = a(k,k)
+ a(k,k) = t
+ 10 continue
+ t = -1.0e0_r8/a(k,k)
call sscl(n-k,t,a(k+1,k),1)
- do 30 j = kp1, n
- t = a(l,j)
- if (l .eq. k) go to 20
- a(l,j) = a(k,j)
- a(k,j) = t
- 20 continue
+ do 30 j = kp1, n
+ t = a(l,j)
+ if (l .eq. k) go to 20
+ a(l,j) = a(k,j)
+ a(k,j) = t
+ 20 continue
call sxpy(n-k,t,a(k+1,k),1,a(k+1,j),1)
- 30 continue
- go to 50
- 40 continue
- info = k
- 50 continue
- 60 continue
- 70 continue
- ipvt(n) = n
- if (a(n,n) .eq. 0.0e0_r8) info = n
- return
+ 30 continue
+ go to 50
+ 40 continue
+ info = k
+ 50 continue
+ 60 continue
+ 70 continue
+ ipvt(n) = n
+ if (a(n,n) .eq. 0.0e0_r8) info = n
+ return
end subroutine sgfa
!-----------------------------------------------------------------------
subroutine sgsl (a,lda,n,ipvt,b,job)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
integer lda,n,ipvt(1),job
- real(r8) :: a(lda,1),b(1)
- real(r8) :: sdt,t
- integer k,kb,l,nm1
- nm1 = n - 1
- if (job .ne. 0) go to 50
- if (nm1 .lt. 1) go to 30
- do 20 k = 1, nm1
- l = ipvt(k)
- t = b(l)
- if (l .eq. k) go to 10
- b(l) = b(k)
- b(k) = t
- 10 continue
+ real(r8) :: a(lda,1),b(1)
+ real(r8) :: t
+ integer k,kb,l,nm1
+ nm1 = n - 1
+ if (job .ne. 0) go to 50
+ if (nm1 .lt. 1) go to 30
+ do 20 k = 1, nm1
+ l = ipvt(k)
+ t = b(l)
+ if (l .eq. k) go to 10
+ b(l) = b(k)
+ b(k) = t
+ 10 continue
call sxpy(n-k,t,a(k+1,k),1,b(k+1),1)
- 20 continue
- 30 continue
- do 40 kb = 1, n
- k = n + 1 - kb
- b(k) = b(k)/a(k,k)
- t = -b(k)
+ 20 continue
+ 30 continue
+ do 40 kb = 1, n
+ k = n + 1 - kb
+ b(k) = b(k)/a(k,k)
+ t = -b(k)
call sxpy(k-1,t,a(1,k),1,b(1),1)
- 40 continue
- go to 100
- 50 continue
- do 60 k = 1, n
+ 40 continue
+ go to 100
+ 50 continue
+ do 60 k = 1, n
t = sdt(k-1,a(1,k),1,b(1),1)
- b(k) = (b(k) - t)/a(k,k)
- 60 continue
- if (nm1 .lt. 1) go to 90
- do 80 kb = 1, nm1
- k = n - kb
+ b(k) = (b(k) - t)/a(k,k)
+ 60 continue
+ if (nm1 .lt. 1) go to 90
+ do 80 kb = 1, nm1
+ k = n - kb
b(k) = b(k) + sdt(n-k,a(k+1,k),1,b(k+1),1)
- l = ipvt(k)
- if (l .eq. k) go to 70
- t = b(l)
- b(l) = b(k)
- b(k) = t
- 70 continue
- 80 continue
- 90 continue
- 100 continue
- return
+ l = ipvt(k)
+ if (l .eq. k) go to 70
+ t = b(l)
+ b(l) = b(k)
+ b(k) = t
+ 70 continue
+ 80 continue
+ 90 continue
+ 100 continue
+ return
end subroutine sgsl
!-----------------------------------------------------------------------
function sdt(n,sx,incx,sy,incy) result(sdtx)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
@@ -2134,127 +2125,124 @@ function sdt(n,sx,incx,sy,incy) result(sdtx)
integer :: i,ix,iy,m,mp1
real(r8) :: sdtx
real(r8) :: stemp
-
- stemp = 0.0e0_r8
+
+ stemp = 0.0e0_r8
sdtx = 0.0e0_r8
- if(n.le.0)return
- if(incx.eq.1.and.incy.eq.1)go to 20
- ix = 1
- iy = 1
- if(incx.lt.0)ix = (-n+1)*incx + 1
- if(incy.lt.0)iy = (-n+1)*incy + 1
- do 10 i = 1,n
- stemp = stemp + sx(ix)*sy(iy)
- ix = ix + incx
- iy = iy + incy
- 10 continue
+ if(n.le.0)return
+ if(incx.eq.1.and.incy.eq.1)go to 20
+ ix = 1
+ iy = 1
+ if(incx.lt.0)ix = (-n+1)*incx + 1
+ if(incy.lt.0)iy = (-n+1)*incy + 1
+ do 10 i = 1,n
+ stemp = stemp + sx(ix)*sy(iy)
+ ix = ix + incx
+ iy = iy + incy
+ 10 continue
sdtx = stemp
- return
- 20 m = mod(n,5)
- if( m .eq. 0 ) go to 40
- do 30 i = 1,m
- stemp = stemp + sx(i)*sy(i)
- 30 continue
- if( n .lt. 5 ) go to 60
- 40 mp1 = m + 1
- do 50 i = mp1,n,5
- stemp = stemp + sx(i)*sy(i) + sx(i + 1)*sy(i + 1) + &
- sx(i + 2)*sy(i + 2) + sx(i + 3)*sy(i + 3) + sx(i + 4)*sy(i + 4)
- 50 continue
+ return
+ 20 m = mod(n,5)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ stemp = stemp + sx(i)*sy(i)
+ 30 continue
+ if( n .lt. 5 ) go to 60
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,5
+ stemp = stemp + sx(i)*sy(i) + sx(i + 1)*sy(i + 1) + &
+ sx(i + 2)*sy(i + 2) + sx(i + 3)*sy(i + 3) + sx(i + 4)*sy(i + 4)
+ 50 continue
60 sdtx = stemp
- return
+ return
end function sdt
!-----------------------------------------------------------------------
integer function isfmax(n,sx,incx)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
- real(r8) :: sx(1),smax
- integer i,incx,ix,n
+ real(r8) :: sx(1),smax
+ integer i,incx,ix,n
isfmax = 0
- if( n .lt. 1 ) return
+ if( n .lt. 1 ) return
isfmax = 1
- if(n.eq.1)return
- if(incx.eq.1)go to 20
- ix = 1
- smax = abs(sx(1))
- ix = ix + incx
- do 10 i = 2,n
- if(abs(sx(ix)).le.smax) go to 5
+ if(n.eq.1)return
+ if(incx.eq.1)go to 20
+ ix = 1
+ smax = abs(sx(1))
+ ix = ix + incx
+ do 10 i = 2,n
+ if(abs(sx(ix)).le.smax) go to 5
isfmax = i
- smax = abs(sx(ix))
- 5 ix = ix + incx
- 10 continue
- return
- 20 smax = abs(sx(1))
- do 30 i = 2,n
- if(abs(sx(i)).le.smax) go to 30
+ smax = abs(sx(ix))
+ 5 ix = ix + incx
+ 10 continue
+ return
+ 20 smax = abs(sx(1))
+ do 30 i = 2,n
+ if(abs(sx(i)).le.smax) go to 30
isfmax = i
- smax = abs(sx(i))
- 30 continue
- return
+ smax = abs(sx(i))
+ 30 continue
+ return
end function isfmax
!-----------------------------------------------------------------------
subroutine sxpy(n,sa,sx,incx,sy,incy)
- use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
- real(r8) :: sx(1),sy(1),sa
- integer i,incx,incy,ix,iy,m,mp1,n
- if(n.le.0)return
- if (sa .eq. 0.0_r8) return
- if(incx.eq.1.and.incy.eq.1)go to 20
- ix = 1
- iy = 1
- if(incx.lt.0)ix = (-n+1)*incx + 1
- if(incy.lt.0)iy = (-n+1)*incy + 1
- do 10 i = 1,n
- sy(iy) = sy(iy) + sa*sx(ix)
- ix = ix + incx
- iy = iy + incy
- 10 continue
- return
- 20 m = mod(n,4)
- if( m .eq. 0 ) go to 40
- do 30 i = 1,m
- sy(i) = sy(i) + sa*sx(i)
- 30 continue
- if( n .lt. 4 ) return
- 40 mp1 = m + 1
- do 50 i = mp1,n,4
- sy(i) = sy(i) + sa*sx(i)
- sy(i + 1) = sy(i + 1) + sa*sx(i + 1)
- sy(i + 2) = sy(i + 2) + sa*sx(i + 2)
- sy(i + 3) = sy(i + 3) + sa*sx(i + 3)
- 50 continue
- return
+ real(r8) :: sx(1),sy(1),sa
+ integer i,incx,incy,ix,iy,m,mp1,n
+ if(n.le.0)return
+ if (sa .eq. 0.0_r8) return
+ if(incx.eq.1.and.incy.eq.1)go to 20
+ ix = 1
+ iy = 1
+ if(incx.lt.0)ix = (-n+1)*incx + 1
+ if(incy.lt.0)iy = (-n+1)*incy + 1
+ do 10 i = 1,n
+ sy(iy) = sy(iy) + sa*sx(ix)
+ ix = ix + incx
+ iy = iy + incy
+ 10 continue
+ return
+ 20 m = mod(n,4)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ sy(i) = sy(i) + sa*sx(i)
+ 30 continue
+ if( n .lt. 4 ) return
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,4
+ sy(i) = sy(i) + sa*sx(i)
+ sy(i + 1) = sy(i + 1) + sa*sx(i + 1)
+ sy(i + 2) = sy(i + 2) + sa*sx(i + 2)
+ sy(i + 3) = sy(i + 3) + sa*sx(i + 3)
+ 50 continue
+ return
end subroutine sxpy
!-----------------------------------------------------------------------
subroutine sscl(n,sa,sx,incx)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- implicit none
- real(r8) :: sa,sx(1)
- integer i,incx,m,mp1,n,nincx
- if(n.le.0)return
- if(incx.eq.1)go to 20
- nincx = n*incx
- do 10 i = 1,nincx,incx
- sx(i) = sa*sx(i)
- 10 continue
- return
- 20 m = mod(n,5)
- if( m .eq. 0 ) go to 40
- do 30 i = 1,m
- sx(i) = sa*sx(i)
- 30 continue
- if( n .lt. 5 ) return
- 40 mp1 = m + 1
- do 50 i = mp1,n,5
- sx(i) = sa*sx(i)
- sx(i + 1) = sa*sx(i + 1)
- sx(i + 2) = sa*sx(i + 2)
- sx(i + 3) = sa*sx(i + 3)
- sx(i + 4) = sa*sx(i + 4)
- 50 continue
- return
+
+ real(r8) :: sa,sx(1)
+ integer i,incx,m,mp1,n,nincx
+ if(n.le.0)return
+ if(incx.eq.1)go to 20
+ nincx = n*incx
+ do 10 i = 1,nincx,incx
+ sx(i) = sa*sx(i)
+ 10 continue
+ return
+ 20 m = mod(n,5)
+ if( m .eq. 0 ) go to 40
+ do 30 i = 1,m
+ sx(i) = sa*sx(i)
+ 30 continue
+ if( n .lt. 5 ) return
+ 40 mp1 = m + 1
+ do 50 i = mp1,n,5
+ sx(i) = sa*sx(i)
+ sx(i + 1) = sa*sx(i + 1)
+ sx(i + 2) = sa*sx(i + 2)
+ sx(i + 3) = sa*sx(i + 3)
+ sx(i + 4) = sa*sx(i + 4)
+ 50 continue
+ return
end subroutine sscl
!-----------------------------------------------------------------------
-!end module mudcom
+end module edyn_mudcom
diff --git a/src/ionosphere/waccmx/edyn_mudmod.F90 b/src/ionosphere/waccmx/edyn_mudmod.F90
index 7fb68acbc0..24d88db476 100644
--- a/src/ionosphere/waccmx/edyn_mudmod.F90
+++ b/src/ionosphere/waccmx/edyn_mudmod.F90
@@ -1,27 +1,33 @@
-!-----------------------------------------------------------------------
- subroutine mudmod(pe,phi_out,jntl,isolve,ier)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_abortutils ,only: endrun
- use edyn_solve ,only: cee
- use cam_logfile ,only: iulog
+module edyn_mudmod
+ use shr_kind_mod, only: r8 => shr_kind_r8
+ use cam_logfile, only: iulog
+ use edyn_mud, only: dismd2cr, mud2cr1, adjmd2cr, kcymd2cr, relmd2cr, resmd2cr
+ use edyn_mudcom, only: swk2, trsfc2, prolon2, cor2, res2
- implicit none
+ implicit none
- integer jntl,ier ! output: not converged ier < 0
- integer,intent(in) :: isolve
+ private
+
+ public :: mudmod
+
+contains
+!-----------------------------------------------------------------------
+ subroutine mudmod(pe,phi_out,jntl,isolve,nlev,ier)
+ use edyn_solver_coefs,only: cee
+ use edyn_params, only: pi
+
+ integer,intent(in) :: jntl, isolve, nlev
+ integer,intent(out) :: ier ! output: not converged ier < 0
!
! set grid size params
!
integer iixp,jjyq,iiex,jjey,nnx,nny,llwork
- parameter (iixp = 5 , jjyq = 3, iiex = 5, jjey = 5 )
- parameter (nnx=iixp*2**(iiex-1)+1, nny=jjyq*2**(jjey-1)+1)
+ parameter (iixp = 5 , jjyq = 3)
!
! estimate work space for point relaxation (see mud2cr.d)
!
- parameter (llwork=(7*(nnx+2)*(nny+2)+76*nnx*nny)/3 )
- real(r8) :: phi(nnx,nny),rhs(nnx,nny),work(llwork)
- real(r8) :: phi_out(0:nnx+1,0:nny+1)
- real(r8) :: time0,time1
+ real(r8) :: phi_out(0:iixp*2**(nlev-1)+1+1,0:jjyq*2**(nlev-1)+1+1)
+ real(r8),allocatable :: phi(:,:),rhs(:,:),work(:)
!
! put integer and floating point argument names in contiguous
! storage for labelling in vectors iprm,fprm
@@ -37,17 +43,17 @@ subroutine mudmod(pe,phi_out,jntl,isolve,ier)
equivalence(intl,iprm)
equivalence(xa,fprm)
integer i,j,ierror
- real(r8) :: PE(NNX,*)
- integer maxcya
- DATA MAXCYA/50/
- integer mm,nn,jj,jjj,ij
- real(r8) :: pi
-!
-! set input integer arguments
-!
- MM = NNX
- NN = NNY
- PI = 4._r8*ATAN(1._r8)
+ real(r8) :: PE(iixp*2**(nlev-1)+1,*)
+ integer, parameter :: maxcya=50
+ integer jj,jjj,ij
+
+ iiex = nlev
+ jjey = nlev
+ nnx=iixp*2**(iiex-1)+1
+ nny=jjyq*2**(jjey-1)+1
+ llwork=(7*(nnx+2)*(nny+2)+76*nnx*nny)/3
+
+ allocate(phi(nnx,nny),rhs(nnx,nny),work(llwork))
!
! SET INPUT INTEGER PARAMETERS
!
@@ -119,52 +125,17 @@ subroutine mudmod(pe,phi_out,jntl,isolve,ier)
do i=1,nx
phi(i,ny) = rhs(i,ny)/cee(i+(ny-1)*nx+8*nx*ny)
end do
-
-! write(iulog,100)
-! 100 format(//' mud2cr test ')
-! write (iulog,101) (iprm(i),i=1,15)
-! 101 format(/,' integer input arguments ',/,
-! | ' intl = ',i2,/,' nxa = ',i2,' nxb = ',i2,' nyc = ',i2,
-! | ' nyd = ',i2,/,' ixp = ',i2,' jyq = ',i2,' iex = ',i2,
-! | ' jey = ',i2,/,' nx = ',i3,' ny = ',i3,' iguess = ',i2,
-! | ' maxcy = ',i3,/,' method = ',i2, ' work space estimate = ',i7)
-! write (iulog,102) (mgopt(i),i=1,4)
-! 102 format(/' multigrid option arguments ',
-! | /,' kcycle = ',i2,
-! | /,' iprer = ',i2,
-! | /,' ipost = ',i2
-! | /,' intpol = ',i2)
-! write(iulog,103) xa,xb,yc,yd,tolmax
-! 103 format(/' floating point input parameters ',
-! | /,' xa = ',f6.3,' xb = ',f6.3,' yc = ',f6.3,' yd = ',f6.3,
-! | /,' tolerance (error control) = ',e10.3)
-! write(iulog,"('fprm(1-5) (xa,xb,yc,yd,tolmax=',6f8.3)") fprm(1:5)
-!
-! intialization call
-!
-! write(iulog,104) intl
-! 104 format(/' discretization call to mud2cr', ' intl = ', i2)
call mud2cm(iprm,fprm,work,rhs,phi,mgopt,ierror,isolve)
-! write (iulog,200) ierror,iprm(16)
-! 200 format(' ierror = ',i2, ' minimum work space = ',i7)
-! if (ierror.gt.0) call exit(0)
!
! attempt solution
!
intl = 1
-! write(iulog,106) intl,method,iguess
-! 106 format(/' approximation call to mud2cr',
-! +/' intl = ',i2, ' method = ',i2,' iguess = ',i2)
-
+
call mud2cm(iprm,fprm,work,rhs,phi,mgopt,ierror,isolve)
ier = ierror ! ier < 0 not converged
if(ier < 0 ) goto 108
-
-! write (iulog,107) ierror
-! 107 format(' ierror = ',i2)
- if (ierror.gt.0) call endrun('mudmod call mud2cm')
!
! COPY PHI TO PE
!
@@ -179,7 +150,7 @@ subroutine mudmod(pe,phi_out,jntl,isolve,ier)
! am 8/10 for calculating residual: convert work array (solution) into array
! sized as coefficient stencil (c0, cofum) including values at index 0, nmlon0+1
-! and nmlat0+1
+! and nmlat0+1
do j=0,ny+1
jj = j*(nx+2)
@@ -188,14 +159,15 @@ subroutine mudmod(pe,phi_out,jntl,isolve,ier)
phi_out(i,j) = work(ij)
end do
end do
-
- 108 continue
+
+ 108 continue
+
+ deallocate(phi,rhs,work)
+
end subroutine mudmod
!-------------------------------------------------------------------
subroutine mud2cm(iparm,fparm,work,rhs,phi,mgopt,ierror,isolve)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- implicit none
+
integer,intent(in) :: isolve
integer iparm,mgopt,ierror
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
@@ -212,7 +184,7 @@ subroutine mud2cm(iparm,fparm,work,rhs,phi,mgopt,ierror,isolve)
common/fmud2cr/xa,xb,yc,yd,tolmax,relmax
common/mud2crc/kpbgn(50),kcbgn(50),ktxbgn(50),ktybgn(50), &
nxk(50),nyk(50),isx,jsy
-
+
data int / 0 /
save int
@@ -353,29 +325,28 @@ subroutine mud2cm(iparm,fparm,work,rhs,phi,mgopt,ierror,isolve)
if (tolmax.gt.0.0_r8) then ! check for convergence
fparm(6) = relmax
if (relmax.gt.tolmax) then
-
+
! ierror = -1 ! flag convergenc failure
write(iulog,*) "no convergence with mudmod"
-!
- iguess = 1
- iparm(12)= iguess
+!
+ iguess = 1
+ iparm(12)= iguess
call mud2cr1(nx,ny,rhs,phi,work) ! solve with modified stencils
-
+
fparm(6) = relmax
if (relmax.gt.tolmax) then
write(iulog,*) "no convergence with mud"
ierror = -1 ! flag convergenc failure
end if
-
+
end if
end if
-
+
return
end subroutine mud2cm
-!------------------------------------------------------------------------
+!------------------------------------------------------------------------
subroutine mud2c1m(nx,ny,rhsf,phif,wk)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- implicit none
+
integer nx,ny
real(r8) :: phif(nx,ny),rhsf(nx,ny),wk(*)
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
@@ -385,7 +356,7 @@ subroutine mud2c1m(nx,ny,rhsf,phif,wk)
integer kpbgn,kcbgn,ktxbgn,ktybgn,nxk,nyk,isx,jsy
integer k,kb,ip,ic,ir,ipc,irc,icc
integer ncx,ncy,jj,ij,i,j,iter
- integer iw,itx,ity,ierror
+
common/imud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy, &
iguess, maxcy,method,nwork,lwork,itero,ngrid, &
klevel,kcur,kcycle,iprer,ipost,intpol,kps
@@ -473,14 +444,14 @@ subroutine mud2c1m(nx,ny,rhsf,phif,wk)
!
relmax = 0.0_r8
phmax = 0.0_r8
-
+
do j=1,nfy
jj = j*(nfx+2)
do i=1,nfx
ij = jj+i+1
phmax = max(phmax,abs(wk(ij)))
relmax = max(relmax,abs(wk(ij)-phif(i,j)))
-
+
phif(i,j) = wk(ij)
end do
end do
@@ -506,13 +477,11 @@ end subroutine mud2c1m
!------------------------------------------------------------------------
subroutine kcym2cm(wk)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use edyn_solve,only: cofum
+ use edyn_solver_coefs,only: cofum
!
! execute multigrid k cycle from kcur grid level
! kcycle=1 for v cycles, kcycle=2 for w cycles
!
- implicit none
real(r8) :: wk(*)
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur, &
@@ -528,8 +497,8 @@ subroutine kcym2cm(wk)
nxk(50),nyk(50),isx,jsy
integer kount(50)
! real(r8) :: :: cofum
-! common/mudmd/cofum(1)
-
+! common/mudmd/cofum(1)
+
klevel = kcur
nx = nxk(klevel)
ny = nyk(klevel)
@@ -673,14 +642,12 @@ subroutine kcym2cm(wk)
end do
return
end subroutine kcym2cm
-!----------------------------------------------------------------------
+!----------------------------------------------------------------------
subroutine resm2cm(nx,ny,phi,ncx,ncy,phic,rhsc,cof,resf,cofum)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! restrict residual from fine to coarse mesh using fully weighted
! residual restriction
!
- implicit none
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur, &
kcycle,iprer,ipost,intpol,kps
@@ -700,12 +667,12 @@ subroutine resm2cm(nx,ny,phi,ncx,ncy,phic,rhsc,cof,resf,cofum)
phic(ic,jc) = 0.0_r8
end do
end do
-
+
call bnd2cm(nx,ny,cofum)
!
! compute residual on fine mesh in resf
!
- l2norm = 0._r8
+ l2norm = 0._r8
!$OMP PARALLEL DO SHARED(resf,cof,phi,nx,ny) PRIVATE(i,j)
do j=1,ny
do i=1,nx
@@ -719,7 +686,7 @@ subroutine resm2cm(nx,ny,phi,ncx,ncy,phic,rhsc,cof,resf,cofum)
cofum(i,j,7)*phi(i,j-1)+ &
cofum(i,j,8)*phi(i+1,j-1)+ &
cofum(i,j,9)*phi(i,j))
-
+
l2norm = l2norm + resf(i,j)*resf(i,j)
end do
end do
@@ -732,23 +699,20 @@ end subroutine resm2cm
!-----------------------------------------------------------------------
subroutine bnd2cm(nx,ny,cf)
- use shr_kind_mod ,only: r8 => shr_kind_r8
!
! set stencil & boundary condition for finest stencil
!
- implicit none
integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur, &
kcycle,iprer,ipost,intpol,kps
real(r8) :: xa,xb,yc,yd,tolmax,relmax
- integer nx,ny,i,j,kbdy,l,im1,jm1,ier,jc,nnx,nny
+ integer nx,ny,i,j,l
real(r8) :: cf(nx,ny,*)
- real(r8) :: dlx,dlx2,dlxx,dly,dly2,dlyy,cmin,alfmax,cemax
common/imud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy, &
iguess, maxcy,method,nwork,lwork,itero,ngrid, &
klevel,kcur,kcycle,iprer,ipost,intpol,kps
common/fmud2cr/xa,xb,yc,yd,tolmax,relmax
-
+
!
! set coefficient for specified boundaries
!
@@ -792,3 +756,4 @@ subroutine bnd2cm(nx,ny,cf)
return
end subroutine bnd2cm
!-----------------------------------------------------------------------
+end module edyn_mudmod
diff --git a/src/ionosphere/waccmx/edyn_muh2cr.F90 b/src/ionosphere/waccmx/edyn_muh2cr.F90
index 78a31e0fdd..d58bd0132b 100644
--- a/src/ionosphere/waccmx/edyn_muh2cr.F90
+++ b/src/ionosphere/waccmx/edyn_muh2cr.F90
@@ -1,32 +1,45 @@
+module edyn_muh2cr
+ use shr_kind_mod , only: r8 => shr_kind_r8
+ use cam_abortutils, only: endrun
+ use edyn_mudcom, only: prolon2, trsfc2, factri,factrp, sgfa, sgsl, transp
+ use edyn_mudcom, only: swk2, cor2, transp, res2
+
+ implicit none
+
+ private
+
+ public :: muh
+
+contains
!-----------------------------------------------------------------------
- subroutine muh(pe,jntl)
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_abortutils ,only: endrun
- use edyn_solve,only: nc,ncee,cee
- use cam_logfile ,only: iulog
+ subroutine muh(pe,nlon,nlat,nlev,jntl)
+ use edyn_solver_coefs, only: cee
+ use edyn_params, only: pi
implicit none
- integer jntl
+
+ integer,intent(in) :: nlon, nlat, nlev, jntl
+ real(r8),intent(out) :: PE(nlon+1,*)
!
! set grid size params
!
- integer,parameter :: iixp = 80 , jjyq = 48,iiex = 1, jjey = 1
- integer,parameter :: nnx=iixp*2**(iiex-1)+1, nny=jjyq*2**(jjey-1)+1
+ integer :: iixp, jjyq
+ integer,parameter :: iiex = 1, jjey = 1
+ integer :: nnx, nny
!
! estimate work space for point relaxation (see muh2cr.d)
!
- integer,parameter :: llwork=(5*((nnx+2)*(nny+2)+18*nnx*nny)/3+ &
- (nnx+2)*(nny+2)+ (iixp+1)*(jjyq+1)*(2*iixp+3))
- integer,parameter :: iiwork=(iixp+1)*(jjyq+1)
- real(r8) :: phi(nnx,nny),rhs(nnx,nny),work(llwork)
- integer iwork(iiwork)
+ integer :: llwork
+ integer :: iiwork
+ real(r8), allocatable :: phi(:,:),rhs(:,:),work(:)
+ integer, allocatable :: iwork(:)
!
! put integer and floating point argument names in contiguous
! storage for labelling in vectors iprm,fprm
!
- integer iprm(17),mgopt(4)
+ integer :: iprm(17),mgopt(4)
real(r8) :: fprm(6)
- integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nx,ny,&
+ integer :: intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nx,ny,&
iguess,maxcy,method,nwork,lwrkqd,itero
common/itmud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nx,ny,&
iguess,maxcy,method,nwork,lwrkqd,itero
@@ -34,19 +47,21 @@ subroutine muh(pe,jntl)
common/ftmud2cr/xa,xb,yc,yd,tolmax,relmax
equivalence(intl,iprm)
equivalence(xa,fprm)
- integer i,j,ierror
- real(r8) :: PE(NNX,1)
- integer maxcya
-! DATA MAXCYA/20/
- DATA MAXCYA/1/
- integer mm,nn,jj,jjj
- real(r8) :: pi
-!
-! set input integer arguments
-!
- MM = NNX
- NN = NNY
- PI = 4._r8*ATAN(1._r8)
+ integer :: i,j,ierror
+ integer, parameter :: maxcya = 1
+ integer jj,jjj
+
+ iixp = nlon
+ jjyq = (nlat-1)/2
+ nnx=iixp*2**(iiex-1)+1
+ nny=jjyq*2**(jjey-1)+1
+ llwork=(5*((nnx+2)*(nny+2)+18*nnx*nny)/3+ &
+ (nnx+2)*(nny+2)+ (iixp+1)*(jjyq+1)*(2*iixp+3))
+ iiwork=(iixp+1)*(jjyq+1)
+
+ allocate(phi(nnx,nny),rhs(nnx,nny),work(llwork))
+ allocate(iwork(iiwork))
+
!
! SET INPUT INTEGER PARAMETERS
!
@@ -74,7 +89,12 @@ subroutine muh(pe,jntl)
mgopt(1) = 2
mgopt(2) = 2
mgopt(3) = 2
- mgopt(4) = 3
+ if (nlat<=97) then
+ mgopt(4) = 3
+ else
+ ! 1 deg, changed to mgopt(4) = 1 per Astrid's suggestion
+ mgopt(4) = 1
+ end if
!
! set for one cycle
!
@@ -101,7 +121,13 @@ subroutine muh(pe,jntl)
!
! set error control flag
!
- tolmax = 0.01_r8
+ if (nlev>6) then
+ tolmax = 0.05_r8
+ else if (nlev>5) then
+ tolmax = 0.03_r8
+ else
+ tolmax = 0.01_r8
+ end if
!
! set right hand side in rhs
! initialize phi to zero
@@ -124,45 +150,17 @@ subroutine muh(pe,jntl)
DO I=1,NX
PHI(I,NY) = RHS(I,NY)/CEE(I+(NY-1)*NX+8*NX*NY)
END DO
-
-! write(iulog,100)
- 100 format(//' mud2cr test ')
-! write (iulog,101) (iprm(i),i=1,15)
-! 101 format(/,' integer input arguments ',/,
-! | ' intl = ',i2,/,' nxa = ',i2,' nxb = ',i2,' nyc = ',i2,
-! | ' nyd = ',i2,/,' ixp = ',i2,' jyq = ',i2,' iex = ',i2,
-! | ' jey = ',i2,/,' nx = ',i3,' ny = ',i3,' iguess = ',i2,
-! | ' maxcy = ',i3,/,' method = ',i2, ' work space estimate = ',i7)
-! write (iulog,102) (mgopt(i),i=1,4)
-! 102 format(/' multigrid option arguments ',
-! | /,' kcycle = ',i2,
-! | /,' iprer = ',i2,
-! | /,' ipost = ',i2
-! | /,' intpol = ',i2)
-! write(iulog,103) xa,xb,yc,yd,tolmax
-! 103 format(/' floating point input parameters ',
-! | /,' xa = ',f6.3,' xb = ',f6.3,' yc = ',f6.3,' yd = ',f6.3,
-! | /,' tolerance (error control) = ',e10.3)
-! write(iulog,"('fprm(1-5) (xa,xb,yc,yd,tolmax=',6f8.3)") fprm(1:5)
+
!
! intialization call
!
-! write(iulog,104) intl
- 104 format(/' discretization call to muh2cr', ' intl = ', i2)
call muh2cr(iprm,fprm,work,iwork,rhs,phi,mgopt,ierror)
-! write (iulog,200) ierror,iprm(16)
-! 200 format(' ierror = ',i2, ' minimum work space = ',i7)
if (ierror.gt.0) call endrun('muh call init muh2cr')
!
! attempt solution
!
intl = 1
-! write(iulog,106) intl,method,iguess
-! 106 format(/' approximation call to muh2cr',
-! +/' intl = ',i2, ' method = ',i2,' iguess = ',i2)
call muh2cr(iprm,fprm,work,iwork,rhs,phi,mgopt,ierror)
-! write (iulog,107) ierror
- 107 format(' ierror = ',i2)
if (ierror.gt.0) call endrun('muh call solve muh2cr')
!
! COPY PHI TO PE
@@ -175,8 +173,11 @@ subroutine muh(pe,jntl)
PE(I,JJJ) = PHI(I,J)
END DO
END DO
+
+ deallocate( phi, rhs, work, iwork)
+
end subroutine muh
-!-----------------------------------------------------------------------
+!-----------------------------------------------------------------------
subroutine muh2cr(iparm,fparm,wk,iwk,rhs,phi,mgopt,ierror)
use shr_kind_mod ,only: r8 => shr_kind_r8
implicit none
@@ -338,7 +339,7 @@ subroutine muh2cr(iparm,fparm,wk,iwk,rhs,phi,mgopt,ierror)
itx = ktxbgn(k)
ity = ktybgn(k)
klevel = k
- call dismh2cr(nx,ny,wk(ic),wk(itx),wk(ity),wk,iwk,ierror)
+ call dismh2cr(nx,ny,wk(ic),wk(itx),wk(ity),wk,iwk)
end do
return
end if ! end of intl=0 initialization call block
@@ -460,7 +461,7 @@ subroutine muh2cr1(nx,ny,rhsf,phif,wk,iwk)
ij = jj+i+1
phmax = max(phmax,abs(wk(ij)))
relmax = max(relmax,abs(wk(ij)-phif(i,j)))
-
+
phif(i,j) = wk(ij)
end do
end do
@@ -468,7 +469,7 @@ subroutine muh2cr1(nx,ny,rhsf,phif,wk,iwk)
! set maximum relative difference and check for convergence
!
if (phmax.gt.0.0_r8) relmax = relmax/phmax
-
+
if (relmax.le.tolmax) return
end if
end do
@@ -684,11 +685,10 @@ subroutine kcymh2cr(wk,iwk)
return
end subroutine kcymh2cr
!-----------------------------------------------------------------------
- subroutine dismh2cr(nx,ny,cf,tx,ty,wk,iwk,ier)
+ subroutine dismh2cr(nx,ny,cf,tx,ty,wk,iwk)
use shr_kind_mod ,only: r8 => shr_kind_r8
use cam_abortutils ,only: endrun
- use edyn_solve,only: nc,ncee,cee,ceee
- use cam_logfile ,only: iulog
+ use edyn_solver_coefs,only: nc,cee,ceee
!
! discretize elliptic pde for muh2cr, set nonfatal errors
!
@@ -697,7 +697,7 @@ subroutine dismh2cr(nx,ny,cf,tx,ty,wk,iwk,ier)
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur,&
kcycle,iprer,ipost,intpol,kps
real(r8) :: xa,xb,yc,yd,tolmax,relmax
- integer nx,ny,iwk(*),i,j,kbdy,l,im1,jm1,ier,jc
+ integer nx,ny,iwk(*),i,j,l,im1,jm1
real(r8) :: cf(nx,ny,10),tx(nx,ny,*),ty(ny,nx,*)
real(r8) :: wk(*)
common/imud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,&
@@ -962,7 +962,7 @@ subroutine for2cr(nx,ny,phi,frhs,alfa)
phi(i,j)=phi(i,j)-sum
end do
end do
- return
+ return
end subroutine for2cr
!-----------------------------------------------------------------------
subroutine bkw2cr(nx,ny,phi,cof,beta,index,nxa)
@@ -992,7 +992,7 @@ subroutine bkw2cr(nx,ny,phi,cof,beta,index,nxa)
end if
call sgsl(beta(1,1,jcur),nx ,nx ,index(1,jcur),phi(1,jcur),iz)
end do
- return
+ return
end subroutine bkw2cr
!-----------------------------------------------------------------------
subroutine lud2crp(nx,ny,cof,beta,alfa,zmat,dmat,index,nxa)
@@ -1384,11 +1384,11 @@ subroutine setbcr(nx,ny,cof,beta,jcur,nxa)
do i=1,nx-1
beta(i,i+1,jcur) = cof(i,jcur,1)
end do
- if (nxa.eq.0) then
+ if (nxa.eq.0) then
beta(1,nx-1,jcur) = cof(1,jcur,5)
beta(nx,2,jcur) = cof(nx,jcur,1)
- end if
- return
+ end if
+ return
end subroutine setbcr
!-----------------------------------------------------------------------
subroutine setacr(nx,ny,cof,alfa,jcur,nxa)
@@ -1424,26 +1424,18 @@ subroutine adjmh2cr(nx,ny,phi,cf)
! adjust righthand side in cf(i,j,10) for boundary conditions
!
implicit none
- integer intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
+ integer :: intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy,iguess, &
maxcy,method,nwork,lwork,itero,ngrid,klevel,kcur, &
kcycle,iprer,ipost,intpol,kps
real(r8) :: xa,xb,yc,yd,tolmax,relmax
- integer nx,ny,i,j,kbdy
+ integer :: nx,ny,i,j
real(r8) :: cf(nx,ny,10),phi(0:nx+1,0:ny+1)
- real(r8) :: dlx,dlx2,dlxx,dly,dly2,dlyy,dlxy,dlxy2,dlxy4,dxoy,dyox
- real(r8) :: x,y,cxx,cxy,cyy,cx,cy,ce,c1,c2,c3,c4,c5
- real(r8) :: c6,c7,c8
- real(r8) :: alfaa,alfab,alfac,alfad,betaa,betab,betac,betad,det
- real(r8) :: gamaa,gamab,gamac,gamad
- real(r8) :: alfim1,alfi,alfip1,betim1,beti,betip1,gamim1,gami,gamip1
- real(r8) :: alfjm1,alfj,alfjp1,betjm1,betj,betjp1,gamjm1,gamj,gamjp1
- real(r8) :: gbdim1,gbdi,gbdip1,gbdj,gbdjm1,gbdjp1
- real(r8) :: gbdya,gbdyb,gbdyc,gbdyd
+
common/imud2cr/intl,nxa,nxb,nyc,nyd,ixp,jyq,iex,jey,nfx,nfy, &
iguess, maxcy,method,nwork,lwork,itero,ngrid, &
klevel,kcur,kcycle,iprer,ipost,intpol,kps
common/fmud2cr/xa,xb,yc,yd,tolmax,relmax
-
+
!
! set specified boundaries in rhs from phi
@@ -2022,3 +2014,4 @@ subroutine slymh2cr(nx,ny,phi,cof,ty,sum)
return
end subroutine slymh2cr
!-----------------------------------------------------------------------
+end module edyn_muh2cr
diff --git a/src/ionosphere/waccmx/edyn_params.F90 b/src/ionosphere/waccmx/edyn_params.F90
index ad2fefc93a..ddfff65cde 100644
--- a/src/ionosphere/waccmx/edyn_params.F90
+++ b/src/ionosphere/waccmx/edyn_params.F90
@@ -1,44 +1,45 @@
module edyn_params
-!
-! Constants for edynamo.
-!
- use shr_kind_mod, only: r8 => shr_kind_r8 ! 8-byte reals
- use physconst, only: pi
+ !
+ ! Constants for edynamo.
+ !
+ use shr_kind_mod, only: r8 => shr_kind_r8 ! 8-byte reals
+ use shr_const_mod, only: rearth_m => SHR_CONST_REARTH ! meters
+ use physconst, only: pi
- implicit none
- save
+ implicit none
+ save
- private
+ private
- public :: pi, pi_dyn, re_dyn, r0, re, rtd, dtr, finit, h0, hs
- public :: kbotdyn, pbotdyn, cm2km
+ public :: pi, pi_dyn, re_dyn, r0, Rearth, rtd, dtr, finit, h0, hs
+ public :: kbotdyn, pbotdyn, cm2km
- real(r8),parameter :: &
- finit = 0._r8, & ! initialization value
- re = 6.37122e8_r8, & ! earth radius (cm)
- h0 = 9.7e6_r8, & ! minimum height (cm)
- r0 = re+h0, & ! min height from earth center
- hs = 1.3e7_r8, &
- cm2km = 1.e-5_r8 ! cm to km conversion
-!
-! Special pi for mag field calculations. If pi=4.*atan(1.) and code is
-! linked with -lmass lib, then the last 2 digits (16th and 17th) of pi
-! are different (56 instead of 12), resulting in theta0(j=49)==0., which
-! is wrong (should be .1110e-15).
-!
- real(r8),parameter :: pi_dyn = 3.14159265358979312_r8 ! pi for dynamo
- real(r8),parameter :: re_dyn = 6.378165e8_r8 ! earth radius (cm) for dynamo
-!
- real(r8),parameter :: dtr = pi/180._r8 ! degrees to radians
- real(r8),parameter :: rtd = 180._r8/pi ! radians to degrees
-!
-! kbotdyn is the column index at which upward dynamo integrals begin.
-! This should correspond to about 85 km (zbotdyn). The index is determined
-! by function find_kbotdyn (edynamo.F90) at every step (called by sub
-! dynamo_input). The function insures that all processors use the same
-! (minimum) kbotdyn.
-!
- real(r8),parameter :: pbotdyn = 1.0_r8 ! Pa pressure (~80 km) at which to set kbotdyn
- integer :: kbotdyn = -1
+ real(r8), parameter :: &
+ finit = 0._r8, & ! initialization value
+ Rearth = rearth_m*100._r8, & ! earth radius (cm)
+ h0 = 9.7e6_r8, & ! minimum height (cm)
+ r0 = Rearth + h0, & ! min height from earth center
+ hs = 1.3e7_r8, & ! apex reference altitude (cm) (XXgoldyXX:modified?)
+ cm2km = 1.e-5_r8 ! cm to km conversion
+ !
+ ! Special pi for mag field calculations. If pi=4.*atan(1.) and code is
+ ! linked with -lmass lib, then the last 2 digits (16th and 17th) of pi
+ ! are different (56 instead of 12), resulting in theta0(j=49)==0., which
+ ! is wrong (should be .1110e-15).
+ !
+ real(r8),parameter :: pi_dyn = 3.14159265358979312_r8 ! pi for dynamo
+ real(r8),parameter :: re_dyn = 6.378165e8_r8 ! earth radius (cm) for dynamo
+ !
+ real(r8),parameter :: dtr = pi/180._r8 ! degrees to radians
+ real(r8),parameter :: rtd = 180._r8/pi ! radians to degrees
+ !
+ ! kbotdyn is the column index at which upward dynamo integrals begin.
+ ! This should correspond to about 85 km (zbotdyn). The index is determined
+ ! by function find_kbotdyn (edynamo.F90) at every step (called by sub
+ ! dynamo_input). The function insures that all processors use the same
+ ! (minimum) kbotdyn.
+ !
+ real(r8), parameter :: pbotdyn = 1.0_r8 ! Pa pressure (~80 km) at which to set kbotdyn
+ integer :: kbotdyn = -1
end module edyn_params
diff --git a/src/ionosphere/waccmx/edyn_solve.F90 b/src/ionosphere/waccmx/edyn_solve.F90
index d570a004e8..4c94dce05f 100644
--- a/src/ionosphere/waccmx/edyn_solve.F90
+++ b/src/ionosphere/waccmx/edyn_solve.F90
@@ -1,103 +1,154 @@
module edyn_solve
!
-! Prepare stencils and call mudpack PDE solver. This is executed
+! Prepare stencils and call mudpack PDE solver. This is executed
! by the root task only, following the gather_edyn call in edynamo.F90.
!
use shr_kind_mod ,only: r8 => shr_kind_r8 ! 8-byte reals
use cam_logfile ,only: iulog
use edyn_params ,only: finit
- use edyn_maggrid ,only: nmlon,nmlonp1,nmlat,nmlath,nmlev
+ use edyn_maggrid ,only: nmlon,nmlonp1,nmlat,nmlath
+ use edyn_maggrid ,only: res_nlev, res_ngrid
+ use spmd_utils, only: masterproc
+ use edyn_solver_coefs, only: nc, cee, cofum
implicit none
- save
+ private
+
+ public :: edyn_solve_init
+ public :: solve_edyn
+
!
! Global 2d fields for root task to complete serial part of dynamo.
-! The zigmxxx, rhs and rims are gathered from subdomains by in sub
+! The zigmxxx, rhs and rims are gathered from subdomains by in sub
! gather_edyn (edynamo.F90).
!
- real(r8),dimension(nmlonp1,nmlat) :: &
+ real(r8),allocatable, dimension(:,:), public :: &
zigm11_glb ,&
zigm22_glb ,&
zigmc_glb ,&
zigm2_glb ,&
rhs_glb
- real(r8),dimension(nmlonp1,nmlat,2) :: &
+ real(r8),allocatable, dimension(:,:,:), public :: &
rim_glb ! pde solver output
- real(r8),dimension(0:nmlonp1,0:nmlat+1) :: &
+ real(r8),allocatable, dimension(:,:) :: &
phisolv
!
-! Dimensions of the 5 grid resolutions for the multi-grid PDE:
- integer,parameter :: &
- nmlon0=nmlon+1, &
- nmlat0=(nmlat +1)/2, &
- nmlon1=(nmlon0+1)/2, &
- nmlat1=(nmlat0+1)/2, &
- nmlon2=(nmlon1+1)/2, &
- nmlat2=(nmlat1+1)/2, &
- nmlon3=(nmlon2+1)/2, &
- nmlat3=(nmlat2+1)/2, &
- nmlon4=(nmlon3+1)/2, &
- nmlat4=(nmlat3+1)/2
-!
-! Unmodified coefficients for using modified mudpack:
- real(r8),dimension(nmlon0,nmlat0,9) :: cofum
-!
-! Space needed for descretized coefficients of of dynamo pde at all
-! 5 levels of resolution:
-!
- integer,parameter :: &
- ncee=10*nmlon0*nmlat0+9*(nmlon1*nmlat1+nmlon2*nmlat2+nmlon3* &
- nmlat3+nmlon4*nmlat4)
-!
-! Coefficients are stored in 1-d array cee(ncee)
-! cee transmits descretized dynamo PDE coefficients to the multi-grid
-! mudpack solver. (cee was formerly in ceee.h)
-! The common block /cee_com/ is retained from earlier versions because
-! of the equivalencing below of coefficient arrays c0, c1, etc.
-!
- real(r8) :: cee(ncee)
- common/cee_com/ cee
+! Dimensions of the grid resolutions for the multi-grid PDE:
+ integer, public, protected :: &
+ nmlon0, &
+ nmlat0, &
+ nmlon1, &
+ nmlat1, &
+ nmlon2, &
+ nmlat2, &
+ nmlon3, &
+ nmlat3, &
+ nmlon4, &
+ nmlat4, &
+ nmlon5, &
+ nmlat5, &
+ nmlon6, &
+ nmlat6, &
+ nmlon7, &
+ nmlat7
+!
+! Space needed for descretized coefficients of of dynamo pde at all levels:
+!
+ integer :: ncee
!
! The following parameters nc0,nc1,... are pointers to the beginning of
! the coefficients for each level of resolution.
!
- integer,parameter :: &
- nc0=1, &
- nc1=nc0+10*nmlon0*nmlat0, &
- nc2=nc1+9 *nmlon1*nmlat1, &
- nc3=nc2+9 *nmlon2*nmlat2, &
- nc4=nc3+9 *nmlon3*nmlat3
-!
-! nc(1:6) are pointers to beginning of coefficient blocks at each of
-! 5 levels of resolution:
-! nc(1) = nc0, pointer to coefficients for highest resolution.
-! nc(2) = nc1, pointer to coefficients at half the resolution of nc0,
-! and so on for nc(3), nc(4), nc(5), etc.
-! nc(6) = ncee, the dimension of the entire cee array, containing
-! coefficients for all 5 levels of resolution.
+ integer :: &
+ nc0, &
+ nc1, &
+ nc2, &
+ nc3, &
+ nc4, &
+ nc5, &
+ nc6, &
+ nc7
+
+ real(r8), private, pointer :: &
+ c0(:), &
+ c1(:), &
+ c2(:), &
+ c3(:), &
+ c4(:), &
+ c5(:), &
+ c6(:), &
+ c7(:)
+
+! phihm is high-latitude potential, set by the high-latitude potential model (e.g. Heelis)
+! or is prescribed (e.g. AMIE)
!
- integer :: nc(6)
+ real(r8), allocatable, public :: phihm(:,:) ! high-latitude potential
+ real(r8), allocatable, public :: pfrac(:,:) ! NH fraction of potential
- real(r8) :: &
- c0(nmlon0,nmlat0,10), &
- c1(nmlon1,nmlat1,9), &
- c2(nmlon2,nmlat2,9), &
- c3(nmlon3,nmlat3,9), &
- c4(nmlon4,nmlat4,9)
- equivalence &
- (cee,c0), &
- (cee(nc1),c1), &
- (cee(nc2),c2), &
- (cee(nc3),c3), &
- (cee(nc4),c4)
-!
-! phihm is high-latitude potential, obtained from the Heelis model
-! (heelis.F90):
-!
- real(r8) :: phihm(nmlonp1,nmlat) ! high-latitude potential
- real(r8) :: pfrac(nmlonp1,nmlat0) ! NH fraction of potential
-
contains
+
+!-----------------------------------------------------------------------
+ subroutine edyn_solve_init
+ use infnan, only: nan, assignment(=)
+
+ allocate(zigm11_glb(nmlonp1,nmlat))
+ allocate(zigm22_glb(nmlonp1,nmlat))
+ allocate(zigmc_glb(nmlonp1,nmlat))
+ allocate(zigm2_glb(nmlonp1,nmlat))
+ allocate(rhs_glb(nmlonp1,nmlat))
+ allocate(rim_glb(nmlonp1,nmlat,2))
+ allocate(phisolv(0:nmlonp1,0:nmlat+1))
+
+ nmlon0=nmlon+1
+ nmlat0=(nmlat +1)/2
+ nmlon1=(nmlon0+1)/2
+ nmlat1=(nmlat0+1)/2
+ nmlon2=(nmlon1+1)/2
+ nmlat2=(nmlat1+1)/2
+ nmlon3=(nmlon2+1)/2
+ nmlat3=(nmlat2+1)/2
+ nmlon4=(nmlon3+1)/2
+ nmlat4=(nmlat3+1)/2
+ nmlon5=(nmlon4+1)/2
+ nmlat5=(nmlat4+1)/2
+ nmlon6=(nmlon5+1)/2
+ nmlat6=(nmlat5+1)/2
+ nmlon7=(nmlon6+1)/2
+ nmlat7=(nmlat6+1)/2
+
+ allocate(cofum(nmlon0,nmlat0,9))
+
+ ncee=10*nmlon0*nmlat0+9*(nmlon1*nmlat1+nmlon2*nmlat2+nmlon3* &
+ nmlat3+nmlon4*nmlat4+nmlon5*nmlat5+nmlon6*nmlat6+nmlon7*nmlat7)
+
+ allocate(cee(ncee))
+
+ nc0=1
+ nc1=nc0+10*nmlon0*nmlat0
+ nc2=nc1+9 *nmlon1*nmlat1
+ nc3=nc2+9 *nmlon2*nmlat2
+ nc4=nc3+9 *nmlon3*nmlat3
+ nc5=nc4+9 *nmlon4*nmlat4
+ nc6=nc5+9 *nmlon5*nmlat5
+ nc7=nc6+9 *nmlon6*nmlat6
+
+ c0 => cee
+ c1 => cee(nc1:)
+ c2 => cee(nc2:)
+ c3 => cee(nc3:)
+ c4 => cee(nc4:)
+ c5 => cee(nc5:)
+ c6 => cee(nc6:)
+ c7 => cee(nc7:)
+
+ allocate(phihm(nmlonp1,nmlat))
+ allocate(pfrac(nmlonp1,nmlat0))
+
+ phihm = nan
+ pfrac = nan
+
+ end subroutine edyn_solve_init
+
!-----------------------------------------------------------------------
subroutine solve_edyn
!
@@ -116,7 +167,7 @@ subroutine stencils
use edyn_maggrid,only: dlatm,dlonm
!
! Locals:
- integer :: i,j,jj,jjj,j0,n,ncc,nmaglon,nmaglat
+ integer :: i,j,jj,jjj,j0,n,ncc,nmaglon,nmaglat, ndx1,ndx2
real(r8) :: sym
real(r8) :: cs(nmlat0)
@@ -128,7 +179,10 @@ subroutine stencils
nc(3) = nc2
nc(4) = nc3
nc(5) = nc4
- nc(6) = ncee
+ nc(6) = nc5
+ nc(7) = nc6
+ nc(8) = nc7
+ nc(9) = ncee
do j=1,nmlat0
cs(j) = cos(pi_dyn/2._r8-(nmlat0-j)*dlatm)
@@ -190,28 +244,31 @@ subroutine stencils
!
! Sigma_(phi phi)/( cos(lam_m)*dt0dts*(Delta lon)^2 )
sym = 1._r8
- call stencmd(zigm11_glb,cs,nmlon0,nmlat0,sym,cee,1)
+ call stencmd(zigm11_glb,nmlon0,nmlat0,sym,cee,1)
!
! Sigma_(lam lam)*cos(lam_m)*dt0dts/(Delta lam)^2
sym = 1._r8
- call stencmd(zigm22_glb,cs,nmlon0,nmlat0,sym,cee,4)
+ call stencmd(zigm22_glb,nmlon0,nmlat0,sym,cee,4)
!
! Sigma_(phi lam)/( 4*Delta lam* Delta lon )
sym = -1._r8
- call stencmd(zigmc_glb,cs,nmlon0,nmlat0,sym,cee,2)
+ call stencmd(zigmc_glb,nmlon0,nmlat0,sym,cee,2)
!
! Sigma_(lam phi)/( 4*Delta lam* Delta lon )
sym = -1._r8
- call stencmd(zigm2_glb,cs,nmlon0,nmlat0,sym,cee,3)
+ call stencmd(zigm2_glb,nmlon0,nmlat0,sym,cee,3)
!
! Insert RHS in finest stencil:
do j = 1,nmlat0
jj = nmlath-nmlat0+j
do i = 1,nmlon0
- c0(i,j,10) = rhs_glb(i,jj)
+ ndx1 = 9*nmlat0*nmlon0 + (j-1)*nmlon0 + i
+ c0(ndx1) = rhs_glb(i,jj)
enddo ! i = 1,nmlon0
enddo ! j = 1,nmlat0
- c0(nmlonp1,1,10) = c0(1,1,10)
+ ndx1 = 9*nmlat0*nmlon0 + nmlonp1
+ ndx2 = 9*nmlat0*nmlon0 + 1
+ c0(ndx1) = c0(ndx2)
!
! Set boundary condition at the pole:
call edges(c0,nmlon0,nmlat0)
@@ -219,19 +276,38 @@ subroutine stencils
call edges(c2,nmlon2,nmlat2)
call edges(c3,nmlon3,nmlat3)
call edges(c4,nmlon4,nmlat4)
+ if ( res_nlev > 5 ) then
+ call edges(c5,nmlon5,nmlat5)
+ endif
+ if ( res_nlev > 6 ) then
+ call edges(c6,nmlon6,nmlat6)
+ endif
+ if ( res_nlev > 7 ) then
+ call edges(c7,nmlon7,nmlat7)
+ endif
call edges(cofum,nmlon0,nmlat0)
!
! Divide stencils by cos(lam_0) (not rhs):
- call divide(c0,nmlon0,nmlat0,nmlon0,nmlat0,cs,1)
- call divide(c1,nmlon1,nmlat1,nmlon0,nmlat0,cs,1)
- call divide(c2,nmlon2,nmlat2,nmlon0,nmlat0,cs,1)
- call divide(c3,nmlon3,nmlat3,nmlon0,nmlat0,cs,1)
- call divide(c4,nmlon4,nmlat4,nmlon0,nmlat0,cs,1)
- call divide(cofum,nmlon0,nmlat0,nmlon0,nmlat0,cs,0)
+ call divide(c0,nmlon0,nmlat0,nmlon0,cs,1)
+ call divide(c1,nmlon1,nmlat1,nmlon0,cs,1)
+ call divide(c2,nmlon2,nmlat2,nmlon0,cs,1)
+ call divide(c3,nmlon3,nmlat3,nmlon0,cs,1)
+ call divide(c4,nmlon4,nmlat4,nmlon0,cs,1)
+ if ( res_nlev > 5 ) then
+ call divide(c5,nmlon5,nmlat5,nmlon0,cs,1)
+ endif
+ if ( res_nlev > 6 ) then
+ call divide(c6,nmlon6,nmlat6,nmlon0,cs,1)
+ endif
+ if ( res_nlev > 7 ) then
+ call divide(c7,nmlon7,nmlat7,nmlon0,cs,1)
+ endif
+ call divide(cofum,nmlon0,nmlat0,nmlon0,cs,0)
!
! Set value of solution to 1. at pole:
do i=1,nmlon0
- c0(i,nmlat0,10) = 1._r8
+ ndx1 = 9*nmlat0*nmlon0 + (nmlat0-1)*nmlon0 + i
+ c0(ndx1) = 1._r8
enddo
!
! Modify stencils and RHS so that the NH high lat potential is inserted at
@@ -248,13 +324,13 @@ subroutine stencils
ncc = 1
nmaglon = nmlon0
nmaglat = nmlat0
- do n=1,5
+ do n=1,res_nlev ! resolution levels
call stenmd(nmaglon,nmaglat,cee(ncc),phihm(1,nmlat0),pfrac)
ncc = ncc+9*nmaglon*nmaglat
if (n==1) ncc = ncc+nmaglon*nmaglat ! rhs is in 10th slot
nmaglon = (nmaglon+1)/2
nmaglat = (nmaglat+1)/2
- enddo ! n=1,5
+ enddo
end subroutine stencils
!-----------------------------------------------------------------------
@@ -276,11 +352,11 @@ subroutine clearcee(cee,nlon0,nlat0)
nlon = nlon0
nlat = nlat0
n = 0
- do m=1,5 ! 5 resolution levels
+ do m=1,res_nlev ! resolution levels
n = n+nlon*nlat
nlon = (nlon+1)/2
nlat = (nlat+1)/2
- enddo ! m=1,5 (5 resolution levels)
+ enddo
n = 9*n+nlon0*nlat0
!
! Clear cee:
@@ -289,7 +365,7 @@ subroutine clearcee(cee,nlon0,nlat0)
enddo
end subroutine clearcee
!-----------------------------------------------------------------------
- subroutine stencmd(zigm,cs,nlon0,nlat0,sym,cee,ncoef)
+ subroutine stencmd(zigm,nlon0,nlat0,sym,cee,ncoef)
!
! Calculate contribution fo 3 by 3 stencil from coefficient zigm
! at each grid point and level.
@@ -301,14 +377,13 @@ subroutine stencmd(zigm,cs,nlon0,nlat0,sym,cee,ncoef)
ncoef ! integer identifier of coefficient
real(r8),intent(in) :: &
zigm(nlon0,nlat0), & ! coefficients (nlon0+1/2,(nlat0+1)/2)
- sym, & ! 1. if zigm symmetric w.r.t. equator, -1 otherwise
- cs(nlat0)
+ sym ! 1. if zigm symmetric w.r.t. equator, -1 otherwise
real(r8),intent(inout) :: & ! output stencil array consisting of c0,c1,c2,c3,c4
- cee(*)
+ cee(*)
!
! Local:
integer :: nc,nlon,nlat,n
- real(r8) :: wkarray(-15:nmlon0+16,nmlat0)
+ real(r8) :: wkarray(-res_ngrid+1:nmlon0+res_ngrid,nmlat0)
!
! Perform half-way interpolation and extend zigm in wkarray:
!
@@ -322,15 +397,15 @@ subroutine stencmd(zigm,cs,nlon0,nlat0,sym,cee,ncoef)
!
! Calculate modified and unmodified stencil on finest grid
!
- call cnmmod(nlon0,nlat0,nlon,nlat,cee(nc),ncoef,wkarray,cofum)
+ call cnmmod(nlon0,nlon,nlat,cee(nc),ncoef,wkarray,cofum)
!
! Stencils on other grid levels remain the same.
nc = nc+10*nlon*nlat
nlon = (nlon+1)/2
nlat = (nlat+1)/2
!
- do n=2,5
- call cnm(nlon0,nlat0,nlon,nlat,cee(nc),ncoef,wkarray)
+ do n=2,res_nlev
+ call cnm(nlon0,nlon,nlat,cee(nc),ncoef,wkarray)
nc = nc+9*nlon*nlat
if (n==1) nc = nc+nlon*nlat
nlon = (nlon+1)/2
@@ -346,7 +421,7 @@ subroutine htrpex(coeff,nmlon0,nmlat0,sym,wkarray)
! Args:
integer,intent(in) :: nmlon0,nmlat0
real(r8),intent(in) :: coeff(nmlon0,nmlat0),sym
- real(r8),intent(out) :: wkarray(-15:nmlon0+16,nmlat0)
+ real(r8),intent(out) :: wkarray(-res_ngrid+1:nmlon0+res_ngrid,nmlat0)
!
! Local:
integer :: i,j,jj
@@ -359,25 +434,25 @@ subroutine htrpex(coeff,nmlon0,nmlat0,sym,wkarray)
enddo ! i=1,nmlon0
enddo ! j=1,nmlat0
!
-! Extend over 32 grid spaces to allow for a total of 5 grid levels:
- do i=1,16
+! Extend over 2*res_ngrid grid spaces to allow for a total of res_nlev grid levels:
+ do i=1,res_ngrid
do j=1,nmlat0
wkarray(1-i,j) = wkarray(nmlon0-i,j)
wkarray(nmlon0+i,j) = wkarray(1+i,j)
enddo ! j=1,nmlat0
- enddo ! i=1,16
+ enddo ! i=1,res_ngrid
end subroutine htrpex
!-----------------------------------------------------------------------
- subroutine cnm(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray)
-!
+ subroutine cnm(nlon0,nlon,nlat,c,ncoef,wkarray)
+!
! Compute contribution to stencil from zigm(ncoef) on grid nlon by nlat,
-! Finest grid is nlon0 by nlat0.
+! Finest grid is nlon0.
!
! Args:
integer,intent(in) :: &
- nlon0,nlat0, & ! finest grid dimensions
+ nlon0, & ! finest grid dimensions
nlon,nlat ! output grid dimensions
- real(r8),intent(in) :: wkarray(-15:nmlon0+16,nmlat0)
+ real(r8),intent(in) :: wkarray(-res_ngrid+1:nmlon0+res_ngrid,nmlat0)
!
! ncoef: integer id of coefficient:
! ncoef = 1 for zigm11
@@ -392,11 +467,11 @@ subroutine cnm(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray)
! Local:
integer :: i,j,nint,i0,j0
! For now, retain this pi to insure bit compatability w/ old code
- real(r8),parameter :: pi=3.141592654_r8
+ real(r8),parameter :: pi=3.141592654_r8
real(r8) :: wk(nlon0,3)
-!
+!
! Compute separation of grid points of resolution nlon x nlat within
-! grid of resolution nlon0,nlat0. Evaluate dlon and dlat, grid spacing
+! grid of resolution nlon0. Evaluate dlon and dlat, grid spacing
! of nlon x nlat.
!
nint = (nlon0-1)/(nlon-1)
@@ -405,12 +480,12 @@ subroutine cnm(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray)
! from zigm(ncoef)
i0 = 1-nint
j0 = 1-nint
-!
-! zigm11:
+!
+! zigm11:
! am 2001-6-27 include boundary condition at equator
- if (ncoef==1) then
+ if (ncoef==1) then
do j = 1,nlat-1
- do i = 1,nlon
+ do i = 1,nlon
c(i,j,1) = c(i,j,1)+0.5_r8*(wkarray(i0+i*nint,j0+j*nint)+ &
wkarray(i0+(i+1)*nint,j0+j*nint))
c(i,j,5) = c(i,j,5)+0.5_r8*(wkarray(i0+i*nint,j0+j*nint)+ &
@@ -508,20 +583,20 @@ subroutine cnm(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray)
endif ! ncoef
end subroutine cnm
!-----------------------------------------------------------------------
- subroutine cnmmod(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray,cofum)
+ subroutine cnmmod(nlon0,nlon,nlat,c,ncoef,wkarray,cofum)
!
! Compute contribution to stencil from zigm(ncoef) on grid nlon by nlat,
-! Finest grid is nlon0 by nlat0.
-!
+! Finest grid is nlon0.
+!
! Args:
integer,intent(in) :: &
- nlon0,nlat0, & ! finest grid dimensions
+ nlon0, & ! finest grid dimensions
nlon,nlat ! output grid dimensions
- real(r8),intent(in) :: wkarray(-15:nmlon0+16,nmlat0)
+ real(r8),intent(in) :: wkarray(-res_ngrid+1:nmlon0+res_ngrid,nmlat0)
real(r8),dimension(nmlon0,nmlat0,9),intent(inout) :: cofum
-!
+!
! ncoef: integer id of coefficient:
-! ncoef = 1 for zigm11
+! ncoef = 1 for zigm11
! ncoef = 2 for zigm12 (=zigmc+zigm2)
! ncoef = 3 for zigm21 (=zigmc-zigm2)
! ncoef = 4 for zigm22
@@ -529,24 +604,24 @@ subroutine cnmmod(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray,cofum)
integer,intent(in) :: ncoef
real(r8),intent(inout) :: &
c(nlon,nlat,*) ! output array for grid point stencils at resolution nlon x nlat
-!
+!
! Local:
integer :: i,j,nint,i0,j0
! For now, retain this pi to insure bit compatability w/ old code
real(r8),parameter :: pi=3.141592654_r8
real(r8) :: wk(nlon0,3)
-!
+!
! Compute separation of grid points of resolution nlon x nlat within
-! grid of resolution nlon0,nlat0. Evaluate dlon and dlat, grid spacing
+! grid of resolution nlon0. Evaluate dlon and dlat, grid spacing
! of nlon x nlat.
!
nint = (nlon0-1)/(nlon-1)
-!
+!
! Scan wkarray nlon x nlat calculating and adding contributions to stencil
! from zigm(ncoef)
i0 = 1-nint
j0 = 1-nint
-!
+!
! zigm11:
! am 2001-6-27 include boundary condition at equator
if (ncoef==1) then
@@ -616,7 +691,7 @@ subroutine cnmmod(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray,cofum)
wkarray(i0+i*nint,j0+(j-1)*nint))
wk(i,1) = 0.5_r8*(wkarray(i0+i*nint,j0+(j+1)*nint)- &
wkarray(i0+i*nint,j0+(j-1)*nint))
-!
+!
! Unmodified:
cofum(i,j,2) = c(i,j,2)
cofum(i,j,4) = c(i,j,4)
@@ -633,7 +708,7 @@ subroutine cnmmod(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray,cofum)
c(i,j,9) = c(i,j,9)-2._r8*wk(i,3)
enddo ! i = 1,nlon
enddo ! j = 2,nlat-1
-!
+!
! Low latitude boundary condition:
j = 1
do i=1,nlon
@@ -692,28 +767,6 @@ subroutine cnmmod(nlon0,nlat0,nlon,nlat,c,ncoef,wkarray,cofum)
enddo ! i=1,nlon
endif ! ncoef
end subroutine cnmmod
-!-----------------------------------------------------------------------
- subroutine ceee(cee,nx,ny,cf)
-
-!
-! Called from mudpack solvers to transfer coefficients.
-!
-! Args:
- integer,intent(in) :: nx,ny
- real(r8),intent(in) :: cee(nx,ny,*)
- real(r8),intent(out) :: cf(nx,ny,*)
-!
-! Local:
- integer :: i,j,n
-
- do n = 1,9
- do j = 1,ny
- do i = 1,nx
- cf(i,j,n) = cee(i,j,n)
- enddo
- enddo
- enddo
- end subroutine ceee
!--------------------------------------------------------------------
subroutine edges(c,nlon,nlat)
!
@@ -721,46 +774,50 @@ subroutine edges(c,nlon,nlat)
!
! Args:
integer,intent(in) :: nlon,nlat
- real(r8),intent(out) :: c(nlon,nlat,*)
+ real(r8),intent(out) :: c(*)
!
! Local:
- integer :: n,i
+ integer :: n,i, ndx
do n=1,8
- do i=1,nlon
- c(i,nlat,n) = 0._r8
+ do i=1,nlon
+ ndx = (n-1)*nlat*nlon + (nlat-1)*nlon + i
+ c(ndx) = 0._r8
enddo
enddo
do i=1,nlon
- c(i,nlat,9) = 1._r8
+ ndx = 8*nlat*nlon + (nlat-1)*nlon + i
+ c(ndx) = 1._r8
enddo
end subroutine edges
!--------------------------------------------------------------------
- subroutine divide(c,nlon,nlat,nlon0,nlat0,cs,igrid)
+ subroutine divide(c,nlon,nlat,nlon0,cs,igrid)
!
! Divide stencil C by cos(theta(i,j))
!
! Args:
- integer,intent(in) :: nlon,nlat,nlon0,nlat0,igrid
- real(r8),intent(in) :: cs(*)
- real(r8),intent(out) :: c(nlon,nlat,*)
+ integer,intent(in) :: nlon,nlat,nlon0,igrid
+ real(r8),intent(in) :: cs(:)
+ real(r8),intent(out) :: c(*)
!
! Local:
- integer :: nint,j0,n,j,i
+ integer :: nint,j0,n,j,i, ndx
!
nint = (nlon0-1)/(nlon-1)
j0 = 1-nint
do n = 1,9
do j = 1,nlat-1
do i = 1,nlon
- c(i,j,n) = c(i,j,n)/(cs(j0+j*nint)*nint**2)
+ ndx = (n-1)*nlat*nlon + (j-1)*nlon + i
+ c(ndx) = c(ndx)/(cs(j0+j*nint)*nint**2)
enddo ! i = 1,nlon
enddo ! j = 1,nlat-1
enddo ! n = 1,9
!
if (nint==1.and.igrid > 0) then
do i = 1,nlon
- c(i,1,10) = c(i,1,10)/cs(1)
+ ndx = 9*nlat*nlon + i
+ c(ndx) = c(ndx)/cs(1)
enddo ! i = 1,nlon
endif
end subroutine divide
@@ -838,8 +895,8 @@ subroutine stenmd(inlon,inlat,c,phihm,pfrac)
end subroutine stenmd
!--------------------------------------------------------------------
subroutine solver(cofum,c0)
-! use edyn_mudmod, only: mudmod
-! use edyn_muh2cr, only: muh
+ use edyn_mudmod, only: mudmod
+ use edyn_muh2cr, only: muh
!
! Call mudpack to solve PDE. Solution is returned in rim:
! real,dimension(nmlonp1,nmlat,2) :: rim
@@ -867,10 +924,10 @@ subroutine solver(cofum,c0)
jntl = 0
ier = 0
isolve = 2
- call mudmod(rim_glb,phisolv,jntl,isolve,ier)! solver in mudmod.F
+ call mudmod(rim_glb,phisolv,jntl,isolve,res_nlev,ier)
if (ier < 0 ) then ! not converged
- write(iulog,*) 'muh: use direct solver'
- call muh(rim_glb,jntl) ! solver in mud.F
+ if (masterproc) write(iulog,*) 'solver: use muh direct solver'
+ call muh(rim_glb,nmlon,nmlat,res_nlev,jntl)
endif
l2norm=0._r8
diff --git a/src/ionosphere/waccmx/edyn_solver_coefs.F90 b/src/ionosphere/waccmx/edyn_solver_coefs.F90
new file mode 100644
index 0000000000..3f149e53a3
--- /dev/null
+++ b/src/ionosphere/waccmx/edyn_solver_coefs.F90
@@ -0,0 +1,51 @@
+module edyn_solver_coefs
+
+ use shr_kind_mod ,only: r8 => shr_kind_r8 ! 8-byte reals
+
+!
+! nc(1:9) are pointers to beginning of coefficient blocks at each of
+! levels of resolution:
+! nc(1) = nc0, pointer to coefficients for highest resolution.
+! nc(2) = nc1, pointer to coefficients at half the resolution of nc0,
+! and so on for nc(3), nc(4), nc(5), etc.
+! nc(9) = ncee, the dimension of the entire cee array, containing
+! coefficients for all levels.
+!
+ integer :: nc(9)
+
+!
+! Coefficients are stored in 1-d array cee(ncee)
+! cee transmits descretized dynamo PDE coefficients to the multi-grid
+! mudpack solver. (cee was formerly in ceee.h)
+!
+ real(r8), target, allocatable :: cee(:)
+!
+! Unmodified coefficients for using modified mudpack:
+ real(r8), allocatable :: cofum(:,:,:)
+
+contains
+
+!-----------------------------------------------------------------------
+ subroutine ceee(cee,nx,ny,cf)
+
+!
+! Called from mudpack solvers to transfer coefficients.
+!
+! Args:
+ integer,intent(in) :: nx,ny
+ real(r8),intent(in) :: cee(nx,ny,*)
+ real(r8),intent(out) :: cf(nx,ny,*)
+!
+! Local:
+ integer :: i,j,n
+
+ do n = 1,9
+ do j = 1,ny
+ do i = 1,nx
+ cf(i,j,n) = cee(i,j,n)
+ enddo
+ enddo
+ enddo
+ end subroutine ceee
+
+end module edyn_solver_coefs
diff --git a/src/ionosphere/waccmx/edynamo.F90 b/src/ionosphere/waccmx/edynamo.F90
index 677be28f85..c600433943 100644
--- a/src/ionosphere/waccmx/edynamo.F90
+++ b/src/ionosphere/waccmx/edynamo.F90
@@ -5,56 +5,31 @@ module edynamo
! Electro-dynamo module
!-----------------------------------------------------------------------
!
- use shr_kind_mod ,only: r8 => shr_kind_r8 ! 8-byte reals
- use cam_logfile ,only: iulog
- use cam_abortutils,only: endrun
- use spmd_utils ,only: masterproc
-#ifdef WACCMX_EDYN_ESMF
- use edyn_params ,only: finit ! initialization value
- use edyn_maggrid ,only: nmlon,nmlonp1,nmlat,nmlath,nmlev
- use edyn_mpi ,only: mlon0,mlon1,omlon1,mlat0,mlat1,mlev0,mlev1,mytid,&
- lon0,lon1,lat0,lat1,lev0,lev1
- use edyn_solve ,only: solve_edyn
- use time_manager, only: get_nstep ! for debug
- use cam_history, only : outfld, hist_fld_active
- use savefield_waccm,only: savefld_waccm_switch
- use esmf, only : ESMF_KIND_R8, ESMF_Field ! ESMF library module
-#endif
+ use shr_kind_mod, only: r8 => shr_kind_r8 ! 8-byte reals
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+ use spmd_utils, only: masterproc
+ use edyn_params, only: finit ! initialization value
+ use edyn_maggrid, only: nmlon, nmlonp1, nmlat, nmlath, nmlev
+ use edyn_mpi, only: mlon0, mlon1, omlon1, mytid, mlat0, mlat1
+ use edyn_mpi, only: mlev0, mlev1, lon0, lon1, lat0, lat1
+ use edyn_solve, only: solve_edyn
+ use time_manager, only: get_nstep ! for debug
+ use cam_history, only: outfld, hist_fld_active
+ use savefield_waccm, only: savefld_waccm
implicit none
save
private
-#ifdef WACCMX_EDYN_ESMF
integer :: nstep
!
-! 3d pointers to fields regridded to magnetic subdomains (i,j,k):
-! (mlon0:mlon1,mlat0:mlat1,nmlev)
-!
- real(ESMF_KIND_R8),pointer,dimension(:,:,:) :: & ! 3d fields on mag grid
- ped_mag, & ! pedersen conductivity on magnetic grid
- hal_mag, & ! hall conductivity on magnetic grid
- zpot_mag, & ! geopotential on magnetic grid
- scht_mag, & ! scale height on magnetic grid
- adotv1_mag, & ! ue1 (m/s)
- adotv2_mag ! ue2 (m/s)
-!
-! 2d pointers to fields on magnetic subdomains (i,j):
-! (mlon0:mlon1,mlat0:mlat1)
-!
- real(ESMF_KIND_R8),pointer,dimension(:,:) :: &
- sini_mag, & ! sin(I_m)
- adota1_mag, & ! d(1)**2/D
- adota2_mag, & ! d(2)**2/D
- a1dta2_mag, & ! (d(1) dot d(2)) /D
- be3_mag ! mag field strength (T)
-!
! 2d coefficients and RHS terms for PDE on magnetic subdomains
! (including halo points).
! If use_time3d_integ==.true., these will be input from time3d
! (see use-association in time3d.F90)
!
- real(r8),allocatable,dimension(:,:) :: &
+ real(r8), allocatable, dimension(:,:) :: &
zigm11, & ! sigma11*cos(theta0)
zigmc, & ! sigmac
zigm2, & ! sigma2
@@ -66,121 +41,133 @@ module edynamo
!
! 3d potential and electric field on mag subdomains (see sub pthreed):
! (mlon0:mlon1,mlat0:mlat1,mlev0:mlev1)
-! Electric potential and field components are output fields of edynamo
+! Electric potential and field components are output fields of edynamo
! (later, these can be output arguments of the main driver, sub dynamo)
!
- real(r8),allocatable,dimension(:,:,:) :: &
+ real(r8), allocatable, dimension(:,:,:) :: &
phim3d, & ! 3d electric potential
ed13d,ed23d, & ! 3d electric field for current calculations
ephi3d, & ! 3d eastward electric field
elam3d, & ! 3d equatorward electric field
emz3d, & ! 3d upward electric field
+ zpot_mag, &
zpotm3d ! 3d geopotential (values at all levels)
!
! 3d ion drift velocities on geographic grid (output):
!
-! real(r8),allocatable,dimension(:,:,:),save,target :: & ! (nlev,lon0:lon1,lat0:lat1)
+! real(r8), allocatable, dimension(:,:,:),save,target :: & ! (nlev,lon0:lon1,lat0:lat1)
! ui, & ! zonal ion drift
! vi, & ! meridional ion drift
! wi ! vertical ion drift
!
! 3d electric field on geographic subdomains (see sub pefield):
! (nlev,lon0-2,lon1+2,lat0:lat1)
- real(r8),allocatable,dimension(:,:,:) :: ex,ey,ez
+ real(r8), allocatable, dimension(:,:,:) :: ex,ey,ez
!
! 3d electric potential on geographic subdomains (lon0:lon1,lat0:lat1,nlevp1)
! This will be regridded from phim3d for output to history files.
- real(r8),allocatable,dimension(:,:,:) :: phig3d ! (lon0:lon1,lat0:lat1,nlevp1)
- real(r8),allocatable,dimension(:,:,:) :: poten ! (nlevp1,lon0:lon1,lat0:lat1)
+ real(r8), allocatable, dimension(:,:,:) :: phig3d ! (lon0:lon1,lat0:lat1,nlevp1)
+ real(r8), allocatable, dimension(:,:,:) :: poten ! (nlevp1,lon0:lon1,lat0:lat1)
!
! Fields at mag equator:
!
- real(r8),allocatable,dimension(:,:) :: & ! (mlon0:mlon1,nmlev)
- ped_meq, hal_meq, adotv1_meq, adotv2_meq, zpot_meq
- real(r8),allocatable,dimension(:,:,:) :: & ! (mlon0:mlon1,nmlev,4)
- fmeq_out
- real(r8),allocatable,dimension(:,:,:,:) :: & ! (mlon0:mlon1,mlat0:mlat1,nmlev,4)
- fmeq_in
-!
+ real(r8), allocatable, dimension(:,:) :: & ! (mlon0:mlon1,nmlev)
+ ped_meq, hal_meq, adotv1_meq, adotv2_meq
+ real(r8), allocatable, dimension(:,:,:) :: & ! (mlon0:mlon1,nmlev,4)
+ fmeq_out
+ real(r8), allocatable, dimension(:,:,:,:) :: & ! (mlon0:mlon1,mlat0:mlat1,nmlev,4)
+ fmeq_in
+!
! Global longitude values near mag equator and poles for complete_integrals and rhs.
-! These are declared in module data because they are used by subs complete_integrals
-! and rhspde. The nf2d 6 fields are: zigm11,zigm22,zigmc,zigm2,rim1,rim2,
+! These are declared in module data because they are used by subs complete_integrals
+! and rhspde. The nf2d 6 fields are: zigm11,zigm22,zigmc,zigm2,rim1,rim2,
! order is important (see feq_jpm1 and fpole_jpm2)!
!
- integer,parameter :: nf2d=6 ! 6 2d fields
- real(r8) :: feq_jpm1(nmlonp1,2,nf2d) ! 6 fields at 2 lats (eq-1, eq+1)
- real(r8) :: fpole_jpm2(nmlonp1,4,nf2d) ! fields at S pole+1,2 and N pole-1,2
+ integer, parameter :: nf2d=6 ! 6 2d fields
+ real(r8), allocatable :: feq_jpm1(:,:,:) ! 6 fields at 2 lats (eq-1, eq+1)
+ real(r8), allocatable :: fpole_jpm2(:,:,:) ! fields at S pole+1,2 and N pole-1,2
- real(r8),parameter :: unitvm(nmlon)=1._r8
+ real(r8), allocatable :: unitvm(:)
!
! ed1,ed2: 2d electric field output on mag grid:
! (use-associated by dpie_coupling)
!
- real(r8),allocatable,dimension(:,:) :: ed1,ed2 ! (mlon0-1:mlon1+1,mlat0-1:mlat1+1)
+ real(r8), allocatable, dimension(:,:) :: ed1, ed2 ! (mlon0-1:mlon1+1,mlat0-1:mlat1+1)
!
-! Global inputs to time3d: Note dimension order switch:
+! Global inputs to time3d: Note dimension order switch:
! edynamo has subdomains (mlon,mlat), whereas time3d has global (nmlat,nmlonp1)
! These are use-associated by time3d, and are init to zero in edyn_init.
!
- real(r8),dimension(nmlat,nmlonp1) :: ed1_glb,ed2_glb
- logical :: do_integ ! from input arg do_integrals
- logical :: debug=.false. ! set true for prints to stdout at each call
+ real(r8), allocatable, dimension(:,:) :: ed1_glb, ed2_glb
+ logical :: debug = .false. ! set true for prints to stdout at each call
- public alloc_edyn,ed1,ed2,ed1_glb,ed2_glb
- public zigm11,zigmc,zigm2,zigm22,rim1,rim2
-#endif
+ public :: alloc_edyn, ed1, ed2, ed1_glb, ed2_glb
+ public :: zigm11, zigmc, zigm2, zigm22, rim1, rim2
public :: dynamo
- contains
+contains
!-----------------------------------------------------------------------
- subroutine dynamo(tn,un,vn,wn,zpot,ped,hall,ui,vi,wi, &
- lev0,lev1,lon0,lon1,lat0,lat1,do_integrals)
- use edyn_mpi,only: &
- mp_mag_halos, & ! set magnetic halo points
- mp_scatter_phim ! scatter solution to slave tasks
- use edyn_solve,only: rim_glb ! pde solver output (nmlonp1,nmlat,2)
+ subroutine dynamo( zpot_mag_in, ped_mag, hall_mag, adotv1_mag, adotv2_mag, adota1_mag, &
+ adota2_mag, a1dta2_mag,be3_mag, sini_mag, zpot, &
+ ui, vi, wi, lon0,lon1, lat0,lat1, lev0,lev1, do_integrals )
+ use edyn_mpi, only: &
+ mp_mag_halos, & ! set magnetic halo points
+ mp_scatter_phim ! scatter solution to slave tasks
+ use edyn_solve, only: rim_glb ! pde solver output (nmlonp1,nmlat,2)
!
! Main driver for edynamo.
! Note alloc_edyn and esmf_init are called from edyn_init.
!
! Args:
integer,intent(in) :: & ! geographic subdomain
- lev0,lev1, & ! first,last level indices (not distributed)
- lon0,lon1, & ! first,last longitude indices of geographic subdomain
- lat0,lat1 ! first,last latitude indices of geographic subdomain
+ lon0, lon1, & ! first,last longitude indices of geographic subdomain
+ lat0, lat1, & ! first,last latitude indices of geographic subdomain
+ lev0, lev1 ! first,last level indices (not distributed)
+!
+! Inputs :
+!
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1), intent(in) :: &
+ zpot_mag_in, & ! geopotential (cm)
+ ped_mag, & ! pedersen conductivity (S/m)
+ hall_mag, & ! hall conductivity (S/m)
+ adotv1_mag,& ! ue1 (m/s)
+ adotv2_mag ! ue2 (m/s)
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1), intent(in) :: &
+ adota1_mag, &
+ adota2_mag, &
+ a1dta2_mag, &
+ be3_mag, &
+ sini_mag
+
+ ! inputs on geographic (oplus) grid
+ real(r8), dimension(lev0:lev1,lon0:lon1,lat0:lat1), intent(in) :: &
+ zpot ! geopotential (cm)
+
+ real(r8), dimension(lev0:lev1,lon0:lon1,lat0:lat1), intent(out) :: &
+ ui, & ! zonal ion drift (cm/s)
+ vi, & ! meridional ion drift (cm/s)
+ wi ! vertical ion drift (cm/s)
+
logical,intent(in) :: do_integrals
-!
-! Inputs from neutral atmosphere (on geographic subdomain):
-! (intent(inout) because they are passed to sub dynamo_input)
-!
- real(r8),intent(inout),dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
- tn, & ! neutral temperature (deg K)
- un, & ! neutral zonal wind velocity (cm/s)
- vn, & ! neutral meridional wind velocity (cm/s)
- wn, & ! neutral vertical wind velocity (cm/s)
- zpot, & ! geopotential height (cm)
- ped, & ! pedersen conductivity (S/m)
- hall, & ! hall conductivity (S/m)
- ui, & ! zonal ion drift (cm/s)
- vi, & ! meridional ion drift (cm/s)
- wi ! vertical ion drift (cm/s)
-#ifdef WACCMX_EDYN_ESMF
+
if (debug) then
nstep = get_nstep()
- write(iulog,"('Enter dynamo: nstep=',i5,' do_integrals=',l1)") nstep,do_integrals
- endif
+ write(iulog,"(a,i5,a,l1)") 'Enter dynamo: nstep=', nstep, &
+ ', do_integrals=', do_integrals
+ end if
- do_integ = do_integrals ! do_integ is module data
!
-! Regrid input fields from geographic to magnetic, and calculate
+! Regrid input fields from geographic to magnetic, and calculate
! some additional fields. If conductances are passed in from
! time3d (.not.do_integrals), then we do not need these inputs.
!
if (do_integrals) then
- call dynamo_input(tn,un,vn,wn,zpot,ped,hall,&
- lev0,lev1,lon0,lon1,lat0,lat1)
- if (debug) write(iulog,"('edynamo debug: after dynamo_input')")
- endif
+ call dynamo_set_data( zpot_mag_in, ped_mag, hall_mag, adotv1_mag, adotv2_mag )
+ if (debug) then
+ write(iulog,"('edynamo debug: after dynamo_input')")
+ end if
+ end if
+
!
! Fieldline integration:
!
@@ -189,33 +176,44 @@ subroutine dynamo(tn,un,vn,wn,zpot,ped,hall,ui,vi,wi, &
! (nmlat,nmlonp1) to (nmlonp1,nmlat), defining zigmxx and rim1,2
! for the solver.
!
- if (do_integrals) call fieldline_integrals
+ if (do_integrals) then
+ call fieldline_integrals(ped_mag, hall_mag, adotv1_mag, adotv2_mag, &
+ adota1_mag, adota2_mag, a1dta2_mag, be3_mag, sini_mag)
+ end if
!
! Equatorial and polar values, hemisphere folding:
! (these will be time3d integrations if do_integrals==.false.)
!
- call complete_integrals
- if (debug) write(iulog,"('edynamo debug: after complete_integrals')")
+ call complete_integrals()
+ if (debug) then
+ write(iulog,"('edynamo debug: after complete_integrals')")
+ end if
!
! Calculate right-hand side on mag subdomains:
! (mag halos are needed in rim1,2 for rhs calculation)
!
- call mp_mag_halos(rim1,mlon0,mlon1,mlat0,mlat1,1)
+ call mp_mag_halos(rim1,mlon0,mlon1,mlat0,mlat1,1)
call mp_mag_halos(rim2,mlon0,mlon1,mlat0,mlat1,1)
call rhspde
- if (debug) write(iulog,"('edynamo debug: after rhspde')")
+ if (debug) then
+ write(iulog,"('edynamo debug: after rhspde')")
+ end if
!
! Gather needed arrays to root task for the serial solver:
!
- call gather_edyn
- if (debug) write(iulog,"('edynamo debug: after gather_edyn')")
+ call gather_edyn()
+ if (debug) then
+ write(iulog,"('edynamo debug: after gather_edyn')")
+ end if
!
-! Root task now sets up stencils and calls the PDE solver:
+! Root task now sets up stencils and calls the PDE solver:
!
- if (debug) write(iulog,"('edynamo debug: call solve_edyn (master only)')")
- if (mytid==0) then
- call solve_edyn
- endif
+ if (debug) then
+ write(iulog,"('edynamo debug: call solve_edyn (master only)')")
+ end if
+ if (mytid == 0) then
+ call solve_edyn()
+ end if
if (debug) write(iulog,"('edynamo debug: after solve_edyn (master only)')")
!
! rim1 after solver is needed for highlat_poten. rim_glb is distributed
@@ -223,300 +221,97 @@ subroutine dynamo(tn,un,vn,wn,zpot,ped,hall,ui,vi,wi, &
! fieldline_integrals, complete_integrals, etc.
!
call mp_scatter_phim(rim_glb(:,:,1),rim1(mlon0:mlon1,mlat0:mlat1))
- if (debug) write(iulog,"('edynamo debug: after mp_scatter_phim')")
+ if (debug) then
+ write(iulog,"('edynamo debug: after mp_scatter_phim')")
+ end if
call mp_mag_halos(rim1,mlon0,mlon1,mlat0,mlat1,1)
- if (debug) write(iulog,"('edynamo debug: after mp_mag_halos')")
-!
+ if (debug) then
+ write(iulog,"('edynamo debug: after mp_mag_halos')")
+ end if
+!
! Add high latitude potential from empirical model (heelis or weimer)
! to solution rim1, defining phim2d on mag subdomains.
!
- call highlat_poten
- if (debug) write(iulog,"('edynamo debug: after highlat_poten')")
+ call highlat_poten()
+ if (debug) then
+ write(iulog,"('edynamo debug: after highlat_poten')")
+ end if
!
! Expand phim2d to phim3d, first setting mag halos in phim2d from
! hightlat_poten. phim3d will then be the final potential from pdynamo.
!
call mp_mag_halos(phim2d,mlon0,mlon1,mlat0,mlat1,1)
- call pthreed
- if (debug) write(iulog,"('edynamo debug: after pthreed')")
+ call pthreed()
+ if (debug) then
+ write(iulog,"('edynamo debug: after pthreed')")
+ end if
!
! Convert electric field to geographic grid:
- call pefield
- if (debug) write(iulog,"('edynamo debug: after pefield')")
+ call pefield()
+ if (debug) then
+ write(iulog,"('edynamo debug: after pefield')")
+ end if
!
! Calculate ion drift velocities:
!
- call ionvel(zpot,ui,vi,wi)
- if (debug) write(iulog,"('edynamo debug: after ionvel')")
-#else
- call endrun('ERROR: To use edymamo must build with cppdef WACCMX_EDYN_ESMF')
-#endif
+ call ionvel(zpot,ui,vi,wi, lon0,lon1, lat0,lat1, lev0,lev1)
+ if (debug) then
+ write(iulog,"('edynamo debug: after ionvel')")
+ end if
+
end subroutine dynamo
!-----------------------------------------------------------------------
-#ifdef WACCMX_EDYN_ESMF
- subroutine dynamo_input(tn,un,vn,wn,zpot,ped,hall,&
- lev0,lev1,lon0,lon1,lat0,lat1)
-!
-! Input fields are in "TIEGCM format" and CGS units.
-! Provide needed inputs to the dynamo by regridding the fields
-! from geographic to magnetic.
-!
- use edyn_params ,only: h0,kbotdyn
- use getapex ,only: & ! (nlonp1,0:nlatp1)
- zb, & ! downward component of magnetic field
- bmod ! magnitude of magnetic field (gauss?)
- use edyn_geogrid,only: nlev
-
- use edyn_mpi,only: &
-! mp_periodic_f2d, & ! set 2d periodic points
-! mp_periodic_f3d, & ! set 3d periodic points
- mp_mageq ! get global values at mag equator
-
- use edyn_esmf,only: & ! use-associate grid definitions and subroutines
- geo_src_grid, & ! geographic source grid (ESMF_Grid type)
- edyn_esmf_regrid, & ! subroutine that calls ESMF to regrid a field
- edyn_esmf_set2d_geo, & ! set values of a 2d ESMF field on geographic grid
- edyn_esmf_set3d_geo, & ! set values of a 3d ESMF field on geographic grid
- edyn_esmf_get_3dfield, & ! retrieve values of a 3d ESMF field
- edyn_esmf_get_2dfield ! retrieve values of a 2d ESMF field
-
- use edyn_esmf,only: & ! 3d ESMF fields on geographic grid
- geo_ped, & ! pedersen conductivity
- geo_hal, & ! hall conductivity
- geo_zpot, & ! geopotential height
- geo_scht, & ! scale height
- geo_adotv1, & ! ue1 (m/s)
- geo_adotv2 ! ue2 (m/s)
-
- use edyn_esmf,only: & ! 2d ESMF fields on geographic grid
- geo_sini, & ! sin(I_m)
- geo_adota1, & ! d(1)**2/D
- geo_adota2, & ! d(2)**2/D
- geo_a1dta2, & ! (d(1) dot d(2)) /D
- geo_be3 ! mag field strength (T)
-
- use edyn_esmf,only: & ! 3d ESMF fields on geomagnetic grid
- mag_ped, & ! pedersen conductivity
- mag_hal, & ! hall conductivity
- mag_zpot, & ! geopotential height
- mag_scht, & ! scale height
- mag_adotv1, & ! ue1 (m/s)
- mag_adotv2 ! ue2 (m/s)
-
- use edyn_esmf,only: & ! 3d fields on geographic grid (bundled?)
- nf_3dgeo, & ! number of 3d geo fields
- f_3dgeo ! array of nf_3dgeo pointers to 3d geo fields
-
- use edyn_esmf,only: & ! 2d ESMF fields on geomagnetic grid
- mag_sini, & ! sin(I_m)
- mag_adota1, & ! d(1)**2/D
- mag_adota2, & ! d(2)**2/D
- mag_a1dta2, & ! (d(1) dot d(2)) /D
- mag_be3 ! mag field strength (T)
-
- use edyn_esmf,only: edyn_esmf_update_step ! indicates ESMF updated the current time step with updated geo-mag coordinates
- use edyn_esmf,only: edyn_esmf_update_flag
-!
-! Args: Input fields on geographic grid:
+ subroutine dynamo_set_data( zpot_mag_in, ped_mag, hall_mag, adotv1_mag, adotv2_mag )
!
- integer,intent(in) :: lev0,lev1,lon0,lon1,lat0,lat1
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1),intent(in) :: &
- tn, & ! neutral temperature (deg K)
- un, & ! neutral zonal velocity (cm/s)
- vn, & ! neutral meridional velocity (cm/s)
- wn ! neutral vertical velocity (cm/s)
-
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1),intent(inout) :: &
- zpot, & ! geopotential height (cm)
- ped, & ! pedersen conductivity (S/m)
- hall ! hall conductivity (S/m)
-!
-! Local:
-!
- integer :: j,i,k
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
- scheight, & ! scale height (no longer necessary since wn calculated outside)
- adotv1, & ! ue1 (m/s)
- adotv2 ! ue2 (m/s)
- real(r8),dimension(lon0:lon1,lat0:lat1) :: &
- sini, & ! sin(I_m)
- adota1, & ! d(1)**2/D
- adota2, & ! d(2)**2/D
- a1dta2, & ! (d(1) dot d(2)) /D
- be3 ! mag field strength (T)
-
-!
-! See nf_3dgeo above:
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1,nf_3dgeo) :: f3d
- character(len=8) :: fnames(nf_3dgeo)
-!
-! For wc timing:
-! real(r8) :: starttime,endtime
-
- scheight = 0._r8
-
-! starttime = mpi_wtime()
-
- if (debug) write(iulog,"('Enter dynamo_input')")
!
-! Save 3d input fields on geo grid to WACCM history:
- call savefld_waccm_switch(tn ,'EDYN_TN' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(un ,'EDYN_UN' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(vn ,'EDYN_VN' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(wn ,'EDYN_WN' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(zpot ,'EDYN_Z' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(ped ,'EDYN_PED' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(hall ,'EDYN_HALL',nlev,lon0,lon1,lat0,lat1)
+ use edyn_params, only: h0, kbotdyn
+ use edyn_mpi, only: mp_mageq ! get global values at mag equator
!
- if (debug) write(iulog,"('dynamo_input after savefld_waccm calls')")
- if (debug) write(iulog,"('dynamo_input: kbotdyn=',i4)") kbotdyn
+! Args: Input fields on geographic grid:
!
-! Calculate some 2d and 3d fields:
- call calc_adotv(zpot,un,vn,wn,adotv1,adotv2,adota1,adota2, &
- a1dta2,be3,lev0,lev1,lon0,lon1,lat0,lat1)
- if (debug) write(iulog,"('dynamo_input after calc_adotv')")
-
- call savefld_waccm_switch(adotv1 ,'EDYN_ADOTV1',nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(adotv2 ,'EDYN_ADOTV2',nlev,lon0,lon1,lat0,lat1)
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),intent(in) :: &
+ zpot_mag_in,&! cm
+ ped_mag, & ! pedersen conductivity (S/m)
+ hall_mag, & ! hall conductivity (S/m)
+ adotv1_mag,& ! ue1 (m/s)
+ adotv2_mag ! ue2 (m/s)
!
-! Calculate sini sin(I_m) (zb and bmod are from apex)
+! Local:
!
- do j=lat0,lat1
- do i=lon0,lon1
- sini(i,j) = zb(i,j)/bmod(i,j) ! sin(I_m)
- enddo
- enddo
+ integer :: j, i, k
!
-! Set 3d field values on geographic source grid, including
-! separate calculations at the poles. This is consolidated
-! into a single call, so mp_geopole_3d can be called by
-! esmf_set3d_geo once for all fields.
-!
- fnames = (/'PED ','HAL ','ZPOT ','SCHT ',&
- 'ADOTV1 ','ADOTV2 '/)
-
- f3d(:,:,:,1) = ped
- f3d(:,:,:,2) = hall
- f3d(:,:,:,3) = zpot
- f3d(:,:,:,4) = scheight
- f3d(:,:,:,5) = adotv1
- f3d(:,:,:,6) = adotv2
-
- f_3dgeo(1) = geo_ped
- f_3dgeo(2) = geo_hal
- f_3dgeo(3) = geo_zpot
- f_3dgeo(4) = geo_scht
- f_3dgeo(5) = geo_adotv1
- f_3dgeo(6) = geo_adotv2
-
- call edyn_esmf_set3d_geo(f_3dgeo,fnames,f3d,nf_3dgeo, &
- lev0,lev1,lon0,lon1,lat0,lat1)
-
- geo_ped = f_3dgeo(1)
- geo_hal = f_3dgeo(2)
- geo_zpot = f_3dgeo(3)
- geo_scht = f_3dgeo(4)
- geo_adotv1 = f_3dgeo(5)
- geo_adotv2 = f_3dgeo(6)
-
- ped = f3d(:,:,:,1)
- hall = f3d(:,:,:,2)
- zpot = f3d(:,:,:,3)
- scheight= f3d(:,:,:,4)
- adotv1 = f3d(:,:,:,5)
- adotv2 = f3d(:,:,:,6)
-
- if (debug) write(iulog,"('dynamo_input after edyn_esmf_set3d_geo')")
-!
-! 2d fields need only be calculated in first timestep:
- if (edyn_esmf_update_step) then
-!
-! Set 2d field values on geographic grid:
-! (esmf fields on source grid exclude periodic points)
-!
- call edyn_esmf_set2d_geo(geo_sini, geo_src_grid,'SINI ',&
- sini, lon0,lon1,lat0,lat1)
- call edyn_esmf_set2d_geo(geo_adota1,geo_src_grid,'ADOTA1 ',&
- adota1,lon0,lon1,lat0,lat1)
- call edyn_esmf_set2d_geo(geo_adota2,geo_src_grid,'ADOTA2 ',&
- adota2,lon0,lon1,lat0,lat1)
- call edyn_esmf_set2d_geo(geo_a1dta2,geo_src_grid,'A1DTA2 ',&
- a1dta2,lon0,lon1,lat0,lat1)
- call edyn_esmf_set2d_geo(geo_be3, geo_src_grid,'BE3 ',&
- be3, lon0,lon1,lat0,lat1)
- if (debug) write(iulog,"('dynamo_input after edyn_esmf_set2d_geo')")
- endif
-
-!
-! Regrid 3d geo fields to mag grid:
- call edyn_esmf_regrid(geo_ped ,mag_ped, 'geo2mag',3)
- call edyn_esmf_regrid(geo_hal ,mag_hal, 'geo2mag',3)
- call edyn_esmf_regrid(geo_zpot ,mag_zpot, 'geo2mag',3)
- call edyn_esmf_regrid(geo_scht ,mag_scht, 'geo2mag',3)
- call edyn_esmf_regrid(geo_adotv1 ,mag_adotv1, 'geo2mag',3)
- call edyn_esmf_regrid(geo_adotv2 ,mag_adotv2, 'geo2mag',3)
- if (debug) write(iulog,"('dynamo_input after edyn_esmf_regrid')")
-!
-! Regrid time-independent 2d geo fields to mag grid:
- if (edyn_esmf_update_step) then
- call edyn_esmf_regrid(geo_sini ,mag_sini , 'geo2mag',2)
- call edyn_esmf_regrid(geo_adota1 ,mag_adota1, 'geo2mag',2)
- call edyn_esmf_regrid(geo_adota2 ,mag_adota2, 'geo2mag',2)
- call edyn_esmf_regrid(geo_a1dta2 ,mag_a1dta2, 'geo2mag',2)
- call edyn_esmf_regrid(geo_be3 ,mag_be3 , 'geo2mag',2)
- endif
-!
-! Define edynamo module data pointers to the regridded mag fields.
-! First arg of esmf_get_field is input esmf field (my_esmf module),
-! second arg is output data pointer (edynamo module)
-! (These destination grid fields have periodic points allocated and set)
-!
-! Get regridded 3d mag fields:
-!
- call edyn_esmf_get_3dfield(mag_ped ,ped_mag, "PED ")
- call edyn_esmf_get_3dfield(mag_hal ,hal_mag, "HAL ")
- call edyn_esmf_get_3dfield(mag_zpot ,zpot_mag, "ZPOT ")
- call edyn_esmf_get_3dfield(mag_scht ,scht_mag, "SCHT ")
- call edyn_esmf_get_3dfield(mag_adotv1,adotv1_mag,"ADOTV1 ")
- call edyn_esmf_get_3dfield(mag_adotv2,adotv2_mag,"ADOTV2 ")
-!
-! Get regridded 2d mag fields (time-independent):
-! First arg is input ESMF field, second is output pointer:
-!
- if (edyn_esmf_update_step) then
- call edyn_esmf_get_2dfield(mag_sini ,sini_mag , "SINI ")
- call edyn_esmf_get_2dfield(mag_adota1,adota1_mag, "ADOTA1 ")
- call edyn_esmf_get_2dfield(mag_adota2,adota2_mag, "ADOTA2 ")
- call edyn_esmf_get_2dfield(mag_a1dta2,a1dta2_mag, "A1A2M ")
- call edyn_esmf_get_2dfield(mag_be3 ,be3_mag , "BE3 ")
- call edyn_esmf_update_flag(.false.)
- endif
+ if (debug .and. masterproc) then
+ write(iulog,"('dynamo_input after savefld_waccm calls')")
+ write(iulog,"('dynamo_input: kbotdyn=',i4)") kbotdyn
+ end if
!
! fmeq_in are input fields on 3d mag subdomains.
! allocate(fmeq_in(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1,4)
!
- fmeq_in(:,:,:,1) = ped_mag(:,:,:)
- fmeq_in(:,:,:,2) = hal_mag(:,:,:)
- fmeq_in(:,:,:,3) = adotv1_mag(:,:,:)
- fmeq_in(:,:,:,4) = adotv2_mag(:,:,:)
+ fmeq_in(:,:,:,1) = ped_mag(:,:,:)
+ fmeq_in(:,:,:,2) = hall_mag(:,:,:)
+ fmeq_in(:,:,:,3) = adotv1_mag(:,:,:)
+ fmeq_in(:,:,:,4) = adotv2_mag(:,:,:)
!
! Tasks w/ mag equator send eq data(i,k) to other tasks in their tidi:
!
- call mp_mageq(fmeq_in,fmeq_out,4,mlon0,mlon1,mlat0,mlat1,nmlev)
+ call mp_mageq(fmeq_in, fmeq_out, 4, mlon0, mlon1, mlat0, mlat1, nmlev)
!
! Output arrays now have mag equator data on longitude subdomain
! and full column (mlon0:mlon1,nmlev)
! These will be used in fieldline_integrals.
!
- ped_meq(:,:) = fmeq_out(:,:,1)
- hal_meq(:,:) = fmeq_out(:,:,2)
- adotv1_meq(:,:) = fmeq_out(:,:,3)
- adotv2_meq(:,:) = fmeq_out(:,:,4)
+ ped_meq(:,:) = fmeq_out(:,:,1)
+ hal_meq(:,:) = fmeq_out(:,:,2)
+ adotv1_meq(:,:) = fmeq_out(:,:,3)
+ adotv2_meq(:,:) = fmeq_out(:,:,4)
+
+ zpot_mag(:,:,:) = zpot_mag_in(:,:,:)
!
! Save geopotential on magnetic grid in zpotm3d, then
! limit max zpot_mag to h0 for use in fieldline integrals
@@ -524,147 +319,28 @@ subroutine dynamo_input(tn,un,vn,wn,zpot,ped,hall,&
! below kbotdyn. It is not necessary to set poles of zpotm3d
! since sub pthreed does not reference the poles of zpotm3d.
!
- do k=mlev0,mlev1
- do j=mlat0,mlat1
- do i=mlon0,mlon1
- zpotm3d(i,j,k) = zpot_mag(i,j,k)
- if (zpot_mag(i,j,k) < h0) zpot_mag(i,j,k)=h0
- enddo
- enddo
- enddo
-!
-! Set 3d mag fields to zero below kbotdyn:
-!
-! ped_mag(:,:,1:kbotdyn-1) = finit
-! hal_mag(:,:,1:kbotdyn-1) = finit
-! adotv1_mag(:,:,1:kbotdyn-1) = finit
-! adotv2_mag(:,:,1:kbotdyn-1) = finit
-
-! call savefld_waccm_switch(adota1_mag(mlon0:mlon1,mlat0:mlat1) ,'ADOTA1_MAG' ,1,mlon0,mlon1,mlat0,mlat1)
-
- do j=mlat0,mlat1
- call outfld('PED_MAG',ped_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
- call outfld('HAL_MAG',hal_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
- call outfld('ZPOT_MAG',zpot_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
- call outfld('ADOTV1_MAG',adotv1_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
- call outfld('ADOTV2_MAG',adotv2_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
- enddo
-!
-! Save 3d input fields on geo grid to waccm files (switch to "waccm format"):
-! call savefld_waccm_switch(tn ,'EDYN_TN' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(un ,'EDYN_UN' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(vn ,'EDYN_VN' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(wn ,'EDYN_WN' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(zpot ,'EDYN_Z' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(ped ,'EDYN_PED' ,nlev,lon0,lon1,lat0,lat1)
-! call savefld_waccm_switch(hall ,'EDYN_HALL',nlev,lon0,lon1,lat0,lat1)
-
-! call savefld_waccm_switch(scheight,'EDYN_SCHT' ,nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(adotv1 ,'EDYN_ADOTV1',nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(adotv2 ,'EDYN_ADOTV2',nlev,lon0,lon1,lat0,lat1)
-!
-! Save 2d geo fields (lon0:lon1,lat0:lat1):
- call savefld_waccm_switch(sini ,'EDYN_SINI' ,1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(adota1,'EDYN_ADOTA1',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(adota2,'EDYN_ADOTA2',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(a1dta2,'EDYN_A1DTA2',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(be3 ,'EDYN_BE3' ,1,lon0,lon1,lat0,lat1)
-
-! endtime = mpi_wtime()
-! time_dynamo_input=time_dynamo_input+(endtime-starttime)
- end subroutine dynamo_input
+ do k = mlev0, mlev1
+ do j = mlat0, mlat1
+ do i=mlon0,mlon1
+ zpotm3d(i,j,k) = zpot_mag(i,j,k)
+ if (zpot_mag(i,j,k) < h0) then
+ zpot_mag(i,j,k) = h0
+ end if
+ end do
+ end do
+ end do
+ do j = mlat0, mlat1
+ call outfld('PED_MAG',ped_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
+ call outfld('HAL_MAG',hall_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
+ call outfld('ZPOT_MAG',zpot_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
+ call outfld('ADOTV1_MAG',adotv1_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
+ call outfld('ADOTV2_MAG',adotv2_mag(mlon0:omlon1,j,mlev1:mlev0:-1),omlon1-mlon0+1,j)
+ end do
+ end subroutine dynamo_set_data
!-----------------------------------------------------------------------
- subroutine calc_adotv(z,un,vn,wn,adotv1,adotv2,adota1,adota2,&
- a1dta2,be3,lev0,lev1,lon0,lon1,lat0,lat1)
-!
-! Calculate adotv1,2, adota1,2, a1dta2 and be3.
-!
- use edyn_params ,only: r0,h0
- use edyn_geogrid,only: jspole,jnpole
- use getapex, only: &
- dvec, & ! (nlonp1,nlat,3,2)
- dddarr, & ! (nlonp1,nlat)
- be3arr, & ! (nlonp1,nlat)
- alatm ! (nlonp1,0:nlatp1)
-!
-! Args:
- integer,intent(in) :: lev0,lev1,lon0,lon1,lat0,lat1
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1),intent(in) :: &
- z, & ! geopotential height (cm)
- un, & ! neutral zonal velocity (cm/s)
- vn ! neutral meridional velocity (cm/s)
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1),intent(in) :: &
- wn ! vertical velocity (cm/s)
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1),intent(out) :: &
- adotv1, adotv2
- real(r8),dimension(lon0:lon1,lat0:lat1),intent(out) :: &
- adota1, adota2, a1dta2, be3
-!
-! Local:
- integer :: k,i,j
- real(r8) :: r0or,rat,sinalat
- real(r8) :: clm2(lon0:lon1,lat0:lat1)
-!
- adotv1 = finit
- adotv2 = finit
- adota1 = finit
- adota2 = finit
- a1dta2 = finit
- be3 = finit
-
- do j=lat0,lat1
- if (j==jspole.or.j==jnpole) cycle
- do i=lon0,lon1
- sinalat = sin(alatm(i,j)) ! sin(lam)
- clm2(i,j) = 1._r8-sinalat*sinalat ! cos^2(lam)
- be3(i,j) = 1.e-9_r8*be3arr(i,j) ! be3 is in T (be3arr in nT)
-
- do k=lev0,lev1-1
-!
-! d_1 = (R_0/R)^1.5
- r0or = r0/(r0 + 0.5_r8*(z(k,i,j)+z(k+1,i,j))-h0)
- rat = 1.e-2_r8*r0or**1.5_r8 ! 1/100 conversion in cm
-!
-! A_1 dot V = fac( d_1(1) u + d_1(2) v + d_1(3) w
- adotv1(k,i,j) = rat*( &
- dvec(i,j,1,1)*un(k,i,j)+ &
- dvec(i,j,2,1)*vn(k,i,j)+ &
- dvec(i,j,3,1)*wn(k,i,j))
-
-!
-! Note: clm2 is being used here to represent the squared cosine of the
-! quasi-dipole latitude, not of the M(90) latitude, since the wind
-! values are aligned vertically, not along the field line.
-!
- rat = rat*sqrt((4._r8-3._r8*clm2(i,j))/(4._r8-3._r8*r0or*clm2(i,j)))
-!
-! A_2 dot V = fac( d_2(1) u + d_2(2) v + d_2(3) w
- adotv2(k,i,j) = rat*( &
- dvec(i,j,1,2)*un(k,i,j)+ &
- dvec(i,j,2,2)*vn(k,i,j)+ &
- dvec(i,j,3,2)*wn(k,i,j))
- enddo ! k=lev0,lev1-1
-!
-! Calculation of adota(n) = d(n)**2/D
-! a1dta2 = (d(1) dot d(2)) /D
-!
- adota1(i,j) = (dvec(i,j,1,1)**2 + dvec(i,j,2,1)**2 + &
- dvec(i,j,3,1)**2)/dddarr(i,j)
- adota2(i,j) = (dvec(i,j,1,2)**2 + dvec(i,j,2,2)**2 + &
- dvec(i,j,3,2)**2)/dddarr(i,j)
- a1dta2(i,j) = (dvec(i,j,1,1)*dvec(i,j,1,2) + &
- dvec(i,j,2,1)*dvec(i,j,2,2) + &
- dvec(i,j,3,1)*dvec(i,j,3,2))/dddarr(i,j)
- enddo ! i=lon0,lon1
-
- enddo ! j=lat0,lat1
-
- call savefld_waccm_switch(adota1 ,'ADOTA1' ,1,lon0,lon1,lat0,lat1)
-
- end subroutine calc_adotv
!-----------------------------------------------------------------------
subroutine alloc_edyn
- use edyn_geogrid,only: nlev
+ use edyn_geogrid, only: nlev
!
! Allocate and initialize arrays for parallel dynamo (module data)
! (called once per run)
@@ -712,13 +388,13 @@ subroutine alloc_edyn
phim3d = finit
allocate(ed13d(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: ed13d')
- ed13d = finit
+ ed13d = finit
allocate(ed23d(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: ed23d')
ed23d = finit
allocate(ephi3d(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: ephi3d')
- ephi3d = finit
+ ephi3d = finit
allocate(elam3d(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: elam3d')
elam3d = finit
@@ -728,6 +404,9 @@ subroutine alloc_edyn
allocate(zpotm3d(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: zpotm3d')
zpotm3d = finit
+ allocate(zpot_mag(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1),stat=istat)
+ if (istat /= 0) call endrun('alloc_edyn: zpot_mag')
+ zpot_mag = finit
!
! Fields at mag equator (subdomain longitudes and full column):
!
@@ -743,9 +422,6 @@ subroutine alloc_edyn
allocate(adotv2_meq(mlon0:mlon1,mlev0:mlev1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: adotv2_meq')
adotv2_meq = finit
- allocate(zpot_meq(mlon0:mlon1,mlev0:mlev1),stat=istat)
- if (istat /= 0) call endrun('alloc_edyn: zpot_meq')
- zpot_meq = finit
!
! Fields input to mp_mageq (4 fields at full mag subdomain i,j,k):
!
@@ -780,7 +456,7 @@ subroutine alloc_edyn
phig3d = finit
!
! 2d electric field components on mag grid (these may be input to time3d):
-! real(r8),dimension(:,:) :: ed1,ed2 ! (mlon0-1:mlon1+1,mlat0-1:mlat1+1)
+! real(r8), dimension(:,:) :: ed1,ed2 ! (mlon0-1:mlon1+1,mlat0-1:mlat1+1)
!
allocate(ed1(mlon0-1:mlon1+1,mlat0-1:mlat1+1),stat=istat)
if (istat /= 0) call endrun('alloc_edyn: ed1')
@@ -790,15 +466,38 @@ subroutine alloc_edyn
if (istat /= 0) call endrun('alloc_edyn: ed2')
ed2 = finit
+ allocate(unitvm(nmlon))
+ unitvm = 1._r8
+
+ allocate(feq_jpm1(nmlonp1,2,nf2d))
+ allocate(fpole_jpm2(nmlonp1,4,nf2d))
+ allocate(ed1_glb(nmlat,nmlonp1), ed2_glb(nmlat,nmlonp1))
+
end subroutine alloc_edyn
!-----------------------------------------------------------------------
- subroutine fieldline_integrals
+ subroutine fieldline_integrals( ped_mag, hal_mag, adotv1_mag, adotv2_mag, &
+ adota1_mag, adota2_mag, a1dta2_mag, be3_mag, sini_mag )
!
! Integrate along magnetic field lines, saving conductances and rims.
!
use edyn_params, only: r0,h0,finit,kbotdyn
use edyn_maggrid, only: ylatm
!
+! Args:
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1), intent(in) :: &
+ ped_mag, & ! pedersen conductivity (S/m)
+ hal_mag, & ! hall conductivity (S/m)
+ adotv1_mag,& ! ue1 (m/s)
+ adotv2_mag ! ue2 (m/s)
+
+ real(r8), dimension(mlon0:mlon1,mlat0:mlat1), intent(in) :: &
+ adota1_mag, &
+ adota2_mag, &
+ a1dta2_mag, &
+ be3_mag, &
+ sini_mag
+
+!
! Local:
integer :: i,j,k
real(r8) :: &
@@ -811,9 +510,10 @@ subroutine fieldline_integrals
htfac ! sqrt(R_A -3/4*R_0)
real(r8) :: rora,del,omdel,sig1,sig2,ue1,ue2
- real(r8),dimension(mlon0:mlon1) :: aam
- real(r8),dimension(mlon0:mlon1,mlev0:mlev1) :: rrm, &
+ real(r8), dimension(mlon0:mlon1) :: aam
+ real(r8), dimension(mlon0:mlon1,mlev0:mlev1) :: rrm, &
rtramrm, htfunc, htfunc2
+
!
! Initialize coefficients:
!
@@ -902,6 +602,7 @@ subroutine fieldline_integrals
!
zigm11(i,j) = zigm11(i,j) + sig1*rtramrm(i,k)
zigm22(i,j) = zigm22(i,j) + sig1*rtramrm(i,k)*htfunc2(i,k)
+
!
! zigmc: int (sigma_p*d_1*d_2/D) ds
! zigm2: int (sigma_h) ds
@@ -943,18 +644,18 @@ subroutine fieldline_integrals
rim1(i,j) = 1.e-2_r8*rim1(i,j)*aam(i)*be3_mag(i,j)
rim2(i,j) = 1.e-2_r8*rim2(i,j)*aam(i)*be3_mag(i,j)
enddo ! i = 1,nmlon
- enddo ! j=mlat0,mlat1 (without poles)
+ enddo ! j=mlat0,mlat1 (without poles)
-! call savefld_waccm_switch(adota1_mag(mlon0:mlon1,mlat0:mlat1) ,'adota1_mag_a' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(adota1_mag(mlon0:mlon1,mlat0:mlat1) ,'adota1_mag_a' ,1,mlon0,mlon1,mlat0,mlat1)
-! call savefld_waccm_switch(zigm11(mlon0:mlon1,mlat0:mlat1) ,'ZIGM11_a' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(zigm11(mlon0:mlon1,mlat0:mlat1) ,'ZIGM11_a' ,1,mlon0,mlon1,mlat0,mlat1)
end subroutine fieldline_integrals
!-----------------------------------------------------------------------
subroutine complete_integrals
- use edyn_mpi,only: mlat0,mlat1,mlon0,mlon1,mp_mageq_jpm1,mp_magpole_2d,&
- mp_mag_foldhem,mp_mag_periodic_f2d
- use edyn_maggrid,only: rcos0s,dt1dts
+ use edyn_mpi, only: mlat0, mlat1, mlon0, mlon1, mp_mageq_jpm1
+ use edyn_mpi, only: mp_magpole_2d, mp_mag_foldhem, mp_mag_periodic_f2d
+ use edyn_maggrid, only: rcos0s,dt1dts
!
! Field line integrals for each hemisphere have been calculated in
! mag subdomains. Now, complete these arrays with equator and polar
@@ -968,17 +669,12 @@ subroutine complete_integrals
integer :: i,j,ii,lonend
real(r8) :: fmsub(mlon0:mlon1,mlat0:mlat1,nf2d)
real(r8) :: corfac
- real(r8),parameter :: r8_nmlon = dble(nmlon)
+ real(r8) :: r8_nmlon
+ r8_nmlon = real(nmlon, r8)
!
-! If do_integ==.false. (meaning use_time3d_integ=.true.), then these were passed in
-! from time3d (in this case, dynamo did not call fieldline_integrals). Otherwise,
-! they were calculated by this module, in sub fieldline_integrals and this routine.
-!
-
-!
! For equatorial values, we need latitudes eq+1 and eq-1:
! Local feq_jpm1(nmlonp1,2,6) is returned by mp_mageq_jpm1,
-! where the 2 dim contains lats nmlath-1, nmlath+1. These
+! where the 2 dim contains lats nmlath-1, nmlath+1. These
! are global in lon, even tho each subd uses only its own i's.
! These mag equator values do not show up on plots because
! of the small factor .06 and .125.
@@ -1091,20 +787,20 @@ subroutine complete_integrals
rim2(i,j) = rim2(i,j)/rcos0s(j)
enddo
enddo
-!
+!
! For polar values, we need south pole plus 1 and 2 (j==2,3),
-! and north pole minus 1 and 2 (j==nmlat-1,nmlat-2). These
+! and north pole minus 1 and 2 (j==nmlat-1,nmlat-2). These
! are returned by sub mp_magpole_jpm2 (mpi.F):
! Must specify (mlon0:mlon1,mlat0:mlat1) because zigmxx and rims
-! are allocated to include halo cells.
-!
+! are allocated to include halo cells.
+!
fmsub(:,:,1) = zigm11(mlon0:mlon1,mlat0:mlat1)
fmsub(:,:,2) = zigm22(mlon0:mlon1,mlat0:mlat1)
fmsub(:,:,3) = zigmc (mlon0:mlon1,mlat0:mlat1)
fmsub(:,:,4) = zigm2 (mlon0:mlon1,mlat0:mlat1)
fmsub(:,:,5) = rim1 (mlon0:mlon1,mlat0:mlat1)
fmsub(:,:,6) = rim2 (mlon0:mlon1,mlat0:mlat1)
-!
+!
! mp_magpole_2d returns fpole_jpm2(nmlonp1,1->4,nf) as:
! 1: j = 2 (spole+1)
! 2: j = 3 (spole+2)
@@ -1152,7 +848,7 @@ subroutine complete_integrals
zigmc (i,j) = zigmc (mlon0,j)
zigm2 (i,j) = zigm2 (mlon0,j)
enddo ! i=mlon0,mlon1
-!
+!
! RHS vector (I_1,I_2): average over south pole:
! (use fpole_jpm2(i,1,nf), i.e. j==2, and lons across the pole)
lonend = mlon1
@@ -1162,7 +858,7 @@ subroutine complete_integrals
rim1(i,j) = 0.5_r8*(fpole_jpm2(i,1,5)-fpole_jpm2(ii,1,5))
rim2(i,j) = 0.5_r8*(fpole_jpm2(i,1,6)-fpole_jpm2(ii,1,6))
enddo
-!
+!
! North pole:
elseif (j==nmlat) then ! north pole (use fpole_jpm2(nmlon,3->4,1,nf)
zigm11(mlon0,j)=(4._r8* &
@@ -1181,7 +877,7 @@ subroutine complete_integrals
dot_product(unitvm,fpole_jpm2(1:nmlon,3,4))- &
dot_product(unitvm,fpole_jpm2(1:nmlon,4,4)))/ &
(3._r8*r8_nmlon)
-!
+!
! Extend north pole over longitude:
do i=mlon0+1,mlon1
zigm11(i,j) = zigm11(mlon0,j)
@@ -1189,7 +885,7 @@ subroutine complete_integrals
zigmc (i,j) = zigmc (mlon0,j)
zigm2 (i,j) = zigm2 (mlon0,j)
enddo ! i=mlon0,mlon1
-!
+!
! RHS vector (I_1,I_2): average over north pole:
! (use fpole_jpm2(i,3,nf), i.e. j==nmlat-1, and lons across the pole)
lonend = mlon1
@@ -1229,7 +925,7 @@ subroutine complete_integrals
call outfld('EDYN_RIM2',rim2(mlon0:omlon1,j),omlon1-mlon0+1,j)
enddo
- if (debug.and.masterproc) then
+ if (debug.and.masterproc) then
write(iulog,"('complete_integrals: nstep=',i4)") nstep
write(iulog,"(' zigm11 min,max=',2e12.4)") &
minval(zigm11(mlon0:mlon1,mlat0:mlat1)),maxval(zigm11(mlon0:mlon1,mlat0:mlat1))
@@ -1245,18 +941,18 @@ subroutine complete_integrals
minval(rim2 (mlon0:mlon1,mlat0:mlat1)),maxval(rim2 (mlon0:mlon1,mlat0:mlat1))
endif
- call savefld_waccm_switch(zigm11(mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM11' ,1,mlon0,mlon1,mlat0,mlat1)
- call savefld_waccm_switch(zigm22(mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM22' ,1,mlon0,mlon1,mlat0,mlat1)
- call savefld_waccm_switch(zigmc (mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGMC' ,1,mlon0,mlon1,mlat0,mlat1)
- call savefld_waccm_switch(zigm2 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM2' ,1,mlon0,mlon1,mlat0,mlat1)
- call savefld_waccm_switch(rim1 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_RIM1' ,1,mlon0,mlon1,mlat0,mlat1)
- call savefld_waccm_switch(rim2 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_RIM2' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(zigm11(mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM11' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(zigm22(mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM22' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(zigmc (mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGMC' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(zigm2 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_ZIGM2' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(rim1 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_RIM1' ,1,mlon0,mlon1,mlat0,mlat1)
+ call savefld_waccm(rim2 (mlon0:mlon1,mlat0:mlat1) ,'EDYN_RIM2' ,1,mlon0,mlon1,mlat0,mlat1)
end subroutine complete_integrals
!-----------------------------------------------------------------------
- subroutine rhspde
- use edyn_params ,only: pi_dyn,r0
- use edyn_maggrid ,only: dlatm,dlonm,rcos0s,dt1dts
+ subroutine rhspde()
+ use edyn_params, only: pi_dyn, r0
+ use edyn_maggrid, only: dlatm, dlonm, rcos0s, dt1dts
!
! Calculate right-hand side from rim1,2 on mag subdomains.
! Use global longitude arrays for poles and equator obtained
@@ -1264,7 +960,7 @@ subroutine rhspde
!
! Local:
integer :: j,i
- real(r8),dimension(nmlat) :: tint1
+ real(r8), dimension(nmlat) :: tint1
real(r8) :: &
rim2_npm1(nmlonp1), & ! global rim2 at nmlat-1
rim2_eqp1(nmlonp1), & ! global rim2 at meq+1
@@ -1274,7 +970,8 @@ subroutine rhspde
zigm2_meq(nmlonp1), & ! needed for rim1_meq
zigmc_meq(nmlonp1), & ! needed for rim1_meq
zigm22_meq(nmlonp1) ! needed for rim1_meq
- real(r8),parameter :: r8_nmlon = dble(nmlon)
+ real(r8) :: r8_nmlon
+ r8_nmlon = real(nmlon, r8)
do j=1,nmlat
tint1(j) = cos(-pi_dyn/2._r8+(j-1)*dlatm)
@@ -1382,24 +1079,24 @@ subroutine rhspde
end subroutine rhspde
!-----------------------------------------------------------------------
- subroutine gather_edyn
+ subroutine gather_edyn()
!
! Gather needed global arrays to root task, so it can finish non-parallel
! part of dynamo (beginning after sub rhspde) as in original code
!
- use edyn_mpi, only: mp_gather_edyn
- use edyn_solve,only: & ! (nmlonp1,nmlat)
- zigm11_glb ,&
- zigm22_glb ,&
- zigmc_glb ,&
- zigm2_glb ,&
- rhs_glb
- use edyn_solve ,only: rim_glb ! pde solver output (nmlonp1,nmlat,2)
+ use edyn_mpi, only: mp_gather_edyn
+ use edyn_solve, only: & ! (nmlonp1,nmlat)
+ zigm11_glb, &
+ zigm22_glb, &
+ zigmc_glb, &
+ zigm2_glb, &
+ rhs_glb
+ use edyn_solve, only: rim_glb ! pde solver output (nmlonp1,nmlat,2)
!
! Local:
! 7 fields to gather: zigm11,zigm22,zigmc,zigm2,rim1,rim2,rhs
!
- integer,parameter :: nf = 7
+ integer, parameter :: nf = 7
real(r8) :: fmsub(mlon0:mlon1,mlat0:mlat1,nf)
real(r8) :: fmglb(nmlonp1,nmlat,nf)
real(r8) :: rhs_nhem(nmlonp1,nmlat)
@@ -1457,10 +1154,10 @@ subroutine gather_edyn
endif ! mytid==0
end subroutine gather_edyn
!-----------------------------------------------------------------------
- subroutine highlat_poten
- use edyn_solve,only: &
- phihm ,& ! high-latitude potential (nmlonp1,nmlat)
- pfrac ! NH fraction of potential (nmlonp1,nmlat0)
+ subroutine highlat_poten()
+ use edyn_solve, only: &
+ phihm, & ! high-latitude potential (nmlonp1,nmlat)
+ pfrac ! NH fraction of potential (nmlonp1,nmlat0)
!
! Global PDE solution rim_glb(:,:,1) has been scattered to mag subdomains
! in rim1, and halos set (this overwrites previous rim1 from fieldline
@@ -1478,7 +1175,7 @@ subroutine highlat_poten
! phihm is on 2d global mag grid, pfrac is in north hemisphere only
!
! Local:
- logical,parameter :: mod_heelis = .false. ! true == modified
+ logical, parameter :: mod_heelis = .false. ! true == modified
integer :: i,j,jn,js
real(r8) :: fac
!
@@ -1509,52 +1206,52 @@ subroutine highlat_poten
end subroutine highlat_poten
!-----------------------------------------------------------------------
- subroutine pthreed
+ subroutine pthreed()
!
! phim2d is now 2d electric potential solution on mag subdomains,
! with high-latitude potential added from empirical model (see subs
-! heelis and highlat_poten), and mag halos set. Now expand phim2d in
-! vertical, defining phim3d. Also calculate electric field ed13d, ed23d
+! heelis and highlat_poten), and mag halos set. Now expand phim2d in
+! vertical, defining phim3d. Also calculate electric field ed13d, ed23d
! for later current calculations, and ephi3d, elam3d and emz3d for conversion
-! to geographic grid (sub pefield), and subsequent calculation of ion drifts
+! to geographic grid (sub pefield), and subsequent calculation of ion drifts
! by sub ionvel (not in edynamo).
!
- use edyn_params ,only: re,pi_dyn,r0,kbotdyn
- use edyn_maggrid,only: ylatm,dlatm,dlonm,rcos0s,dt1dts,dt0dts,table
- use edyn_mpi ,only: &
- mp_mag_halos ,&
- mp_magpole_2d ,&
- mp_mageq_jpm3 ,&
- mp_mag_jslot ,&
- mp_magpoles ,&
- mp_mag_periodic_f2d ,&
+ use edyn_params, only: Rearth, pi_dyn, r0, kbotdyn
+ use edyn_maggrid, only: ylatm, dlatm, dlonm, rcos0s, dt1dts, dt0dts, table
+ use edyn_mpi, only: &
+ mp_mag_halos, &
+ mp_magpole_2d, &
+ mp_mageq_jpm3, &
+ mp_mag_jslot, &
+ mp_magpoles, &
+ mp_mag_periodic_f2d, &
ixfind
!
! Local:
- real(r8),parameter :: eps = 1.e-10_r8, unitvm(nmlon)=1._r8
- integer,parameter :: mxneed=nmlat+2
- integer :: i,j,k,n,mlon00,mlon11,mlat00,mlat11
- real(r8) :: csth0,cosltm,sym,pi,phims,phimn,real8
- real(r8),dimension(nmlonp1) :: thetam,pslot,qslot
- integer,dimension(nmlonp1) :: islot,jslot,ip1f,ip2f,ip3f
+ real(r8), parameter :: eps = 1.e-10_r8
+ integer :: mxneed
+ integer :: i,j,k,n,mlon00,mlon11,mlat00,mlat11
+ real(r8) :: csth0, cosltm, sym, pi, phims, phimn, rind
+ real(r8), dimension(nmlonp1) :: thetam,pslot,qslot
+ integer, dimension(nmlonp1) :: islot,jslot,ip1f,ip2f,ip3f
-! real(r8),dimension(mlon0-1:mlon1+1,mlat0-1:mlat1+1) :: ed1,ed2
+! real(r8), dimension(mlon0-1:mlon1+1,mlat0-1:mlat1+1) :: ed1,ed2
- real(r8),dimension(mlon0-1:mlon1+1,mlat0-1:mlat1+1) :: ephi,elam
+ real(r8), dimension(mlon0-1:mlon1+1,mlat0-1:mlat1+1) :: ephi,elam
real(r8) :: fpole2d_jpm2(nmlonp1,4,4) ! global lons at S pole+1,2 and N pole-1,2
real(r8) :: fpoles(nmlonp1,2,1) ! global lons at poles (1 field only)
real(r8) :: fmsub(mlon0:mlon1,mlat0:mlat1,4)
real(r8) :: fmsub1(mlon0-1:mlon1+1,mlat0-1:mlat1+1,5)
real(r8) :: feq_jpm3(nmlonp1,-3:3,1) ! global lons at equator +/- 3
- integer :: jneed(mxneed) ! lats needed from other tasks for interp
+ integer :: jneed(nmlat+2) ! lats needed from other tasks for interp
integer :: njneed,icount
- real(r8),dimension(mlon0-1:mlon1+1,mxneed) :: &
+ real(r8), dimension(mlon0-1:mlon1+1,nmlat+2) :: &
phineed, & ! phim2d at needed latitudes
ed1need, & ! ed1 at needed latitudes
ed2need, & ! ed2 at needed latitudes
ephineed, & ! ephi at needed latitudes
elamneed ! elam at needed latitudes
- real(r8),dimension(mlon0-1:mlon1+1,mxneed,5) :: fmneed
+ real(r8), dimension(mlon0-1:mlon1+1,nmlat+2,5) :: fmneed
real(r8) :: phi0j0,phi1j0,phi0j1,phi1j1
real(r8) :: ed1i0j0,ed1i1j0,ed1i0j1,ed1i1j1
real(r8) :: ed2i0j0,ed2i1j0,ed2i0j1,ed2i1j1
@@ -1562,6 +1259,7 @@ subroutine pthreed
real(r8) :: elam0j0,elam1j0,elam0j1,elam1j1
real(r8) :: fac_elam
!
+ mxneed=nmlat+2
pi = pi_dyn
mlon00=mlon0-1 ; mlon11=mlon1+1
mlat00=mlat0-1 ; mlat11=mlat1+1
@@ -1708,7 +1406,7 @@ subroutine pthreed
! outside a task's latitudinal subdomain:
!
if (debug) write(iulog,*) "pthreed: kbotdyn ", kbotdyn
-
+
njneed = 0 ! number of unique latitudes needed
jneed(:) = -1 ! j-indices of needed latitudes
do k=kbotdyn,nmlev
@@ -1720,13 +1418,13 @@ subroutine pthreed
do i=mlon0,mlon1
if (i==nmlonp1) cycle
- thetam(i)=(re+zpotm3d(i,j,kbotdyn))/(re+zpotm3d(i,j,k))
+ thetam(i)=(Rearth+zpotm3d(i,j,kbotdyn))/(Rearth+zpotm3d(i,j,k))
thetam(i) = acos(sqrt(thetam(i))*cosltm*(1._r8-eps))
pslot(i) = thetam(i)*180._r8/pi+1._r8
islot(i) = pslot(i)
- real8 = dble(islot(i))
- pslot(i) = pslot(i)-real8
+ rind = real(islot(i), kind=r8)
+ pslot(i) = pslot(i)-rind
thetam(i) = ((1._r8-pslot(i))*table(islot(i),2)+pslot(i)* &
table(islot(i)+1,2))*sym ! thetam negative for south hem
@@ -1735,8 +1433,8 @@ subroutine pthreed
pslot(i) = 0._r8
qslot(i) = (thetam(i)+pi/2._r8)/dlatm+1._r8
jslot(i) = qslot(i)
- real8 = dble(jslot(i))
- qslot(i) = qslot(i)-real8
+ rind = real(jslot(i), kind=r8)
+ qslot(i) = qslot(i)-rind
! Save j index if outside subdomain w/ halos:
if ((jslot(i) < mlat00 .or. jslot(i) > mlat11).and. &
@@ -1756,7 +1454,7 @@ subroutine pthreed
enddo ! i=mlon0,mlon1
enddo ! j=mlat0,mlat1
enddo ! k=kbotdyn,nmlev
-!
+!
! Get phim2 at needed latitudes (note inclusion of phim2d halos).
! real,intent(in) :: fin(mlon00:mlon11,mlat00:mlat11,nf) ! data at current subdomain
! real,intent(out) :: fout(mlon00:mlon11,mxneed,nf) ! returned data at needed lats
@@ -1785,14 +1483,14 @@ subroutine pthreed
do i=mlon0,mlon1
if (i==nmlonp1) cycle
- thetam(i)=(re+zpotm3d(i,j,kbotdyn))/(re+zpotm3d(i,j,k))
+ thetam(i)=(Rearth+zpotm3d(i,j,kbotdyn))/(Rearth+zpotm3d(i,j,k))
thetam(i) = acos(sqrt(thetam(i))*cosltm*(1._r8-eps))
fac_elam = tan(ylatm(j))/tan(thetam(i)*sym) ! tan(lambda_q)/tan(lambda_m)
pslot(i) = thetam(i)*180._r8/pi+1._r8
islot(i) = pslot(i)
- real8 = dble(islot(i))
- pslot(i) = pslot(i)-real8
+ rind = real(islot(i), kind=r8)
+ pslot(i) = pslot(i)-rind
thetam(i) = ((1._r8-pslot(i))*table(islot(i),2)+pslot(i)* &
table(islot(i)+1,2))*sym ! thetam negative for south hem
@@ -1801,8 +1499,8 @@ subroutine pthreed
pslot(i) = 0._r8
qslot(i) = (thetam(i)+pi/2._r8)/dlatm+1._r8
jslot(i) = qslot(i)
- real8 = dble(jslot(i))
- qslot(i) = qslot(i)-real8
+ rind = real(jslot(i), kind=r8)
+ qslot(i) = qslot(i)-rind
!
! Check for jslot in subdomain:
if (jslot(i) >= mlat00.and.jslot(i) <= mlat11) then ! within subdomain
@@ -1892,15 +1590,15 @@ subroutine pthreed
enddo ! k=kbotdyn,nmlev
!
-! Mag poles for phim:
+! Mag poles for phim:
! mp_magpoles returns global longitudes at S,N poles in fpoles(nglblon,2,nf)
!
call mp_magpoles(phim2d(mlon0:mlon1,mlat0:mlat1), &
mlon0,mlon1,mlat0,mlat1,nmlonp1,1,nmlat,fpoles,1)
- real8 = dble(nmlon)
- phims=dot_product(unitvm,fpoles(1:nmlon,1,1))/real8
- phimn=dot_product(unitvm,fpoles(1:nmlon,2,1))/real8
+ rind = real(nmlon, kind=r8)
+ phims=dot_product(unitvm,fpoles(1:nmlon,1,1))/rind
+ phimn=dot_product(unitvm,fpoles(1:nmlon,2,1))/rind
do k=kbotdyn,nmlev
do j=mlat0,mlat1
@@ -1915,7 +1613,7 @@ subroutine pthreed
elseif (j==nmlat) then
do i=mlon0,mlon1
phim3d(i,j,k) = phimn
- ed13d(i,j,k) = ed1(i,j)
+ ed13d(i,j,k) = ed1(i,j)
ed23d(i,j,k) = ed2(i,j)
ephi3d(i,j,k) = ephi(i,j)
elam3d(i,j,k) = -ed2(i,j)*(r0*1.e-2_r8)
@@ -1939,7 +1637,7 @@ subroutine pthreed
enddo
enddo
enddo
-!
+!
do k=mlev0,mlev1
call mp_mag_periodic_f2d(phim3d(:,:,k),mlon0,mlon1,mlat0,mlat1,1)
enddo
@@ -1955,30 +1653,30 @@ subroutine pthreed
enddo
end subroutine pthreed
!-----------------------------------------------------------------------
- subroutine pefield
- use edyn_params ,only: pi
- use edyn_maggrid,only: dt0dts,dlatm,dlonm,rcos0s
- use edyn_geogrid,only: nlev
- use edyn_mpi ,only: mp_magpole_3d,mp_mag_halos,mp_magpoles
- use edyn_esmf ,only: mag_ephi3d,mag_elam3d,mag_emz3d,mag_phi3d,&
- geo_ephi3d,geo_elam3d,geo_emz3d,geo_phi3d
+ subroutine pefield()
+ use edyn_params, only: pi
+ use edyn_maggrid, only: dt0dts, dlatm, dlonm, rcos0s
+ use edyn_geogrid, only: nlev
+ use edyn_mpi, only: mp_magpole_3d, mp_mag_halos, mp_magpoles
+ use regridder, only: regrid_mag2geo_3d
!
! Local:
- integer :: i,ii,j,k
- real(r8) :: &
- phi3d(mlon0-1:mlon1+1,mlat0-1:mlat1+1,nmlev), & ! local phi w/ halos
- fpole3d_jpm2(nmlonp1,4,nmlev,1) ! global lons at S pole+1,2 and N pole-1,2
- real(r8) :: csth0,real8
- real(r8) :: fpoles(nmlonp1,2,nmlev) ! global lons at poles
- real(r8),dimension(lon0:lon1,lat0:lat1,nlev) :: exgeo,eygeo,ezgeo
+ integer :: i, ii, j, k
+ real(r8) :: &
+ phi3d(mlon0-1:mlon1+1,mlat0-1:mlat1+1,nmlev), & ! local phi w/ halos
+ fpole3d_jpm2(nmlonp1,4,nmlev,1) ! global lons at S pole+1,2 and N pole-1,2
+ real(r8) :: csth0
+ real(r8) :: fpoles(nmlonp1,2,nmlev) ! global lons at poles
+ real(r8), dimension(lon0:lon1,lat0:lat1,nlev) :: exgeo, eygeo, ezgeo
+
!
! Copy phim3d to local phi3d, and set halo points:
- do j=mlat0,mlat1
- do i=mlon0,mlon1
- phi3d(i,j,:) = phim3d(i,j,:)
- enddo
- enddo
- call mp_mag_halos(phi3d,mlon0,mlon1,mlat0,mlat1,nmlev)
+ do j = mlat0, mlat1
+ do i = mlon0, mlon1
+ phi3d(i,j,:) = phim3d(i,j,:)
+ end do
+ end do
+ call mp_mag_halos(phi3d, mlon0, mlon1, mlat0, mlat1, nmlev)
!
! Return fpole3d_jpm2(nglblon,1->4,nlev,nf) as:
! 1: j = jspole+1 (spole+1)
@@ -1986,136 +1684,137 @@ subroutine pefield
! 3: j = jnpole-1 (npole-1)
! 4: j = jnpole-2 (npole-2) not used here
!
- call mp_magpole_3d(phim3d(mlon0:mlon1,mlat0:mlat1,:),mlon0,&
- mlon1,mlat0,mlat1,nmlev,nmlonp1,1,nmlat,fpole3d_jpm2,1)
+ call mp_magpole_3d(phim3d(mlon0:mlon1,mlat0:mlat1,:), mlon0, &
+ mlon1, mlat0, mlat1, nmlev, nmlonp1, 1, nmlat, fpole3d_jpm2, 1)
!
! Set j=0 and j=nmlat+1 of local phi3d. This overwrites the far
! north and south halo points set by mp_mag_halos above.
- do j=mlat0,mlat1
- if (j==1) then
- do i=mlon0,mlon1
- ii = 1+mod(i-1+nmlon/2,nmlon) ! over the south pole
- phi3d(i,j-1,:) = fpole3d_jpm2(ii,1,:,1)
- enddo
- elseif (j==nmlat) then
- do i=mlon0,mlon1
- ii = 1+mod(i-1+nmlon/2,nmlon) ! over the north pole
- phi3d(i,j+1,:) = fpole3d_jpm2(ii,3,:,1)
- enddo
- endif ! poles or not
- enddo ! j=mlat0,mlat1
+ do j = mlat0, mlat1
+ if (j==1) then
+ do i = mlon0, mlon1
+ ii = 1 + mod(i-1+nmlon/2,nmlon) ! over the south pole
+ phi3d(i,j-1,:) = fpole3d_jpm2(ii,1,:,1)
+ end do
+ else if (j == nmlat) then
+ do i = mlon0, mlon1
+ ii = 1 + mod(i-1+nmlon/2,nmlon) ! over the north pole
+ phi3d(i,j+1,:) = fpole3d_jpm2(ii,3,:,1)
+ end do
+ end if ! poles or not
+ end do ! j=mlat0,mlat1
!
! Meridional component of electric field:
- do j=mlat0,mlat1
- do i=mlon0,mlon1
- elam3d(i,j,:) = -(phi3d(i,j+1,:)-phi3d(i,j-1,:))/ &
- (2._r8*dlatm)*dt0dts(j)
- enddo
- enddo
+ do j = mlat0, mlat1
+ do i = mlon0, mlon1
+ elam3d(i,j,:) = -(phi3d(i,j+1,:)-phi3d(i,j-1,:)) / &
+ (2._r8*dlatm)*dt0dts(j)
+ end do
+ end do
!
! Zonal component of electric field:
- do j=mlat0,mlat1
- if (j==1.or.j==nmlat) cycle
- real8 = dble(j-1)
- csth0 = cos(-pi/2._r8+real8*dlatm)
- do i=mlon0,mlon1
- ephi3d(i,j,:) = -(phi3d(i+1,j,:)-phi3d(i-1,j,:))/ &
- (2._r8*dlonm*csth0)*rcos0s(j)
- enddo
- enddo
-!
-! Polar values for ephi3d (need global lons at poles of elam3d):
- call mp_magpoles(elam3d,mlon0,mlon1,mlat0,mlat1,nmlonp1,1,nmlat,fpoles,nmlev)
- do j=mlat0,mlat1
- if (j==1) then ! south pole
- do i=mlon0,mlon1
- ii = 1+mod(i-1+(nmlon/4),nmlon) ! over the south pole
- ephi3d(i,j,:) = fpoles(ii,1,:)
- enddo
- elseif (j==nmlat) then ! north pole
- do i=mlon0,mlon1
- ii = 1+mod(i-1+((3*nmlon)/4),nmlon) ! over the north pole
- ephi3d(i,j,:) = fpoles(ii,2,:)
- enddo
- endif ! poles or not
- enddo ! j=mlat0,mlat1
-!
-! emz = d(phi)/dz
- do k=2,nmlev-1
- do j=mlat0,mlat1
- do i=mlon0,mlon1
- emz3d(i,j,k) = -(phim3d(i,j,k+1)-phi3d(i,j,k-1))
- enddo
- enddo
- enddo ! k=2,nmlev-1
+ do j = mlat0, mlat1
+ if (j==1 .or. j==nmlat) cycle
+ csth0 = cos((-pi / 2._r8) + (real(j-1,kind=r8) * dlatm))
+ do i = mlon0, mlon1
+ ephi3d(i,j,:) = -(phi3d(i+1,j,:) - phi3d(i-1,j,:)) / &
+ (2._r8 * dlonm * csth0) * rcos0s(j)
+ end do
+ end do
!
-! btf 6/18/14: mag2geo is not working due to error return rc=51 from
-! ESMF_FieldSMM for 3d mag2geo (see sub esmf_regrid in edyn_esmf.F90)
-! (this is the call to do the data regridding, not the init call)
+! Polar values for ephi3d (need global lons at poles of elam3d):
+ call mp_magpoles(elam3d,mlon0,mlon1,mlat0,mlat1,nmlonp1,1,nmlat,fpoles,nmlev)
+ do j = mlat0, mlat1
+ if (j == 1) then ! south pole
+ do i = mlon0, mlon1
+ ii = 1 + mod(i-1+(nmlon/4),nmlon) ! over the south pole
+ ephi3d(i,j,:) = fpoles(ii,1,:)
+ end do
+ else if (j == nmlat) then ! north pole
+ do i = mlon0, mlon1
+ ii = 1+mod(i-1+((3*nmlon)/4),nmlon) ! over the north pole
+ ephi3d(i,j,:) = fpoles(ii,2,:)
+ end do
+ end if ! poles or not
+ end do ! j=mlat0,mlat1
!
-! Use ESMF to regrid the electric field to the geographic grid:
- call mag2geo_3d(ephi3d,exgeo ,mag_ephi3d,geo_ephi3d,'EPHI3D ')
- call mag2geo_3d(elam3d,eygeo ,mag_elam3d,geo_elam3d,'ELAM3D ')
- call mag2geo_3d(emz3d ,ezgeo ,mag_emz3d ,geo_emz3d ,'EMZ3D ')
- call mag2geo_3d(phim3d,phig3d,mag_phi3d ,geo_phi3d ,'PHIM3D ')
+! emz = d(phi)/dz
+ do k = 2, nmlev-1
+ do j = mlat0, mlat1
+ do i = mlon0, mlon1
+ emz3d(i,j,k) = -(phim3d(i,j,k+1)-phi3d(i,j,k-1))
+ end do
+ end do
+ end do ! k=2,nmlev-1
+
+! regrid from mag grid to geo grid
+ call regrid_mag2geo_3d( ephi3d, exgeo )
+ call regrid_mag2geo_3d( elam3d, eygeo )
+ call regrid_mag2geo_3d( emz3d, ezgeo )
+ call regrid_mag2geo_3d( phim3d, phig3d )
!
! Define ex,ey,ez on geographic subdomains for ionvel:
- do j=lat0,lat1
- do i=lon0,lon1
- ex(:,i,j) = exgeo(i,j,:)
- ey(:,i,j) = eygeo(i,j,:)
- ez(:,i,j) = ezgeo(i,j,:)
- poten(:,i,j) = phig3d(i,j,:)
- enddo
- enddo
+ do j = lat0, lat1
+ do i = lon0, lon1
+ ex(:,i,j) = exgeo(i,j,:)
+ ey(:,i,j) = eygeo(i,j,:)
+ ez(:,i,j) = ezgeo(i,j,:)
+ poten(:,i,j) = phig3d(i,j,:)
+ end do
+ end do
! ex,ey,ez(nlev,lon0-2,lon1+2,lat0:lat1)
- if (debug) then
- write(iulog,"('pefield after mag2geo: ex=',2e12.4,' ey=',2e12.4,' ez=',2e12.4)") &
- minval(ex(:,lon0:lon1,:)),maxval(ex(:,lon0:lon1,:)), &
- minval(ey(:,lon0:lon1,:)),maxval(ey(:,lon0:lon1,:)), &
- minval(ez(:,lon0:lon1,:)),maxval(ez(:,lon0:lon1,:))
- endif
-
- call savefld_waccm_switch(poten(1:nlev,lon0:lon1,lat0:lat1),'POTEN',&
- nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(ex(1:nlev,lon0:lon1,lat0:lat1),'EX',&
- nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(ey(1:nlev,lon0:lon1,lat0:lat1),'EY',&
- nlev,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(ez(1:nlev,lon0:lon1,lat0:lat1),'EZ',&
- nlev,lon0,lon1,lat0,lat1)
+ if (debug) then
+ write(iulog,"(a,2e12.4,' ey=',2e12.4,' ez=',2e12.4)") &
+ 'pefield after mag2phys: ex=', &
+ minval(ex(:,lon0:lon1,:)),maxval(ex(:,lon0:lon1,:)), &
+ minval(ey(:,lon0:lon1,:)),maxval(ey(:,lon0:lon1,:)), &
+ minval(ez(:,lon0:lon1,:)),maxval(ez(:,lon0:lon1,:))
+ end if
+
+ call savefld_waccm(poten(1:nlev,lon0:lon1,lat0:lat1),'POTEN', &
+ nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(ex(1:nlev,lon0:lon1,lat0:lat1),'EX', &
+ nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(ey(1:nlev,lon0:lon1,lat0:lat1),'EY', &
+ nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(ez(1:nlev,lon0:lon1,lat0:lat1),'EZ', &
+ nlev,lon0,lon1,lat0,lat1)
end subroutine pefield
-!-----------------------------------------------------------------------
- subroutine ionvel(z,ui,vi,wi)
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+
+ subroutine ionvel(z,ui,vi,wi,lon0,lon1, lat0,lat1, lev0,lev1)
!
! Calculate 3d ExB ion drifts from electric field (sub pefield)
! on geographic grid.
!
- use edyn_params ,only: re
- use edyn_geogrid ,only: nlev
- use getapex ,only: &
- rjac ,& ! (nlon+1,jspole:jnpole,2,2)
- bmod ,& ! magnitude of magnetic field (nlon+1,jspole:jnpole)
- xb,yb,zb ! north,east,down magnetic field (nlon+1,jspole:jnpole)
+ use edyn_params, only: Rearth
+ use edyn_geogrid, only: nlev
+ use getapex, only: rjac ! (nlon+1,jspole:jnpole,2,2)
+ use getapex, only: bmod ! magnitude of mag field (nlon+1,jspole:jnpole)
+ use getapex, only: xb,yb,zb ! north,east,down mag field (nlon+1,jspole:jnpole)
!
! Args:
- real(r8),intent(in),dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
+ integer,intent(in) :: & ! geographic subdomain
+ lon0, lon1, & ! first,last longitude indices of geographic subdomain
+ lat0, lat1, & ! first,last latitude indices of geographic subdomain
+ lev0, lev1 ! first,last level indices (not distributed)
+ real(r8),intent(in), dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
z ! geopotential from input (cm)
- real(r8),intent(out),dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
+ real(r8),intent(out), dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: &
ui,vi,wi
!
! Local:
integer :: i,ii,k,j
- real(r8),dimension(lev0:lev1,lon0:lon1) :: eex,eey,eez
- real(r8),dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: rjac_out
+ real(r8), dimension(lev0:lev1,lon0:lon1) :: eex,eey,eez
+ real(r8), dimension(lev0:lev1,lon0:lon1,lat0:lat1) :: rjac_out
! mag field diagnostics
- call savefld_waccm_switch(bmod(lon0:lon1,lat0:lat1),'BMOD',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(xb(lon0:lon1,lat0:lat1),'XB',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(yb(lon0:lon1,lat0:lat1),'YB',1,lon0,lon1,lat0,lat1)
- call savefld_waccm_switch(zb(lon0:lon1,lat0:lat1),'ZB',1,lon0,lon1,lat0,lat1)
+ call savefld_waccm(bmod(lon0:lon1,lat0:lat1),'BMOD',1,lon0,lon1,lat0,lat1)
+ call savefld_waccm(xb(lon0:lon1,lat0:lat1),'XB',1,lon0,lon1,lat0,lat1)
+ call savefld_waccm(yb(lon0:lon1,lat0:lat1),'YB',1,lon0,lon1,lat0,lat1)
+ call savefld_waccm(zb(lon0:lon1,lat0:lat1),'ZB',1,lon0,lon1,lat0,lat1)
!
! Scan geographic latitude subdomain:
@@ -2125,9 +1824,9 @@ subroutine ionvel(z,ui,vi,wi)
ii = i
do k=lev0,lev1
eex(k,i) = (rjac(ii,j,1,1)*ex(k,i,j)+ &
- rjac(ii,j,2,1)*ey(k,i,j))/(re+z(k,i,j))
+ rjac(ii,j,2,1)*ey(k,i,j))/(Rearth+z(k,i,j))
eey(k,i) = (rjac(ii,j,1,2)*ex(k,i,j)+ &
- rjac(ii,j,2,2)*ey(k,i,j))/(re+z(k,i,j))
+ rjac(ii,j,2,2)*ey(k,i,j))/(Rearth+z(k,i,j))
enddo ! k=lev0,lev1
enddo
!
@@ -2135,7 +1834,7 @@ subroutine ionvel(z,ui,vi,wi)
do k=lev0+1,lev1-1
eez(k,i) = ez(k,i,j)/(z(k+1,i,j)-z(k-1,i,j))
enddo ! k=lev0+1,lev1-1
- enddo
+ enddo
!
! Extrapolate for lower and upper boundaries:
do i=lon0,lon1
@@ -2148,7 +1847,7 @@ subroutine ionvel(z,ui,vi,wi)
j,minval(eex),maxval(eex),minval(eey),maxval(eey),minval(eez),maxval(eez)
endif
-!
+!
! ion velocities = (e x b/b**2) (x 1.e6 for m/sec)
! ui = zonal, vi = meridional, wi = vertical
!
@@ -2175,7 +1874,7 @@ subroutine ionvel(z,ui,vi,wi)
vi(:,i,j) = vi(:,i,j)*100._r8
wi(:,i,j) = wi(:,i,j)*100._r8
enddo
- enddo ! j=lat0,lat1
+ enddo ! j=lat0,lat1
if (debug.and.masterproc) then
write(iulog,"('ionvel: ion drifts on geo grid: ui=',2e12.4,' vi=',2e12.4,' wi=',2e12.4)") &
@@ -2186,71 +1885,31 @@ subroutine ionvel(z,ui,vi,wi)
do i=1,nlev
rjac_out(i,lon0:lon1,lat0:lat1) = rjac(lon0:lon1,lat0:lat1,1,1)
end do
- call savefld_waccm_switch(rjac_out,'RJAC11',nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(rjac_out,'RJAC11',nlev,lon0,lon1,lat0,lat1)
endif
if (hist_fld_active('RJAC12')) then
do i=1,nlev
rjac_out(i,lon0:lon1,lat0:lat1) = rjac(lon0:lon1,lat0:lat1,1,2)
end do
- call savefld_waccm_switch(rjac_out,'RJAC12',nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(rjac_out,'RJAC12',nlev,lon0,lon1,lat0,lat1)
endif
if (hist_fld_active('RJAC21')) then
do i=1,nlev
rjac_out(i,lon0:lon1,lat0:lat1) = rjac(lon0:lon1,lat0:lat1,2,1)
end do
- call savefld_waccm_switch(rjac_out,'RJAC21',nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(rjac_out,'RJAC21',nlev,lon0,lon1,lat0,lat1)
endif
if (hist_fld_active('RJAC22')) then
do i=1,nlev
rjac_out(i,lon0:lon1,lat0:lat1) = rjac(lon0:lon1,lat0:lat1,2,2)
end do
- call savefld_waccm_switch(rjac_out,'RJAC22',nlev,lon0,lon1,lat0,lat1)
+ call savefld_waccm(rjac_out,'RJAC22',nlev,lon0,lon1,lat0,lat1)
endif
end subroutine ionvel
-!-----------------------------------------------------------------------
- subroutine mag2geo_3d(fmag,fgeo,ESMF_mag,ESMF_geo,fname)
-!
-! Convert field on geomagnetic grid fmag to geographic grid in fgeo.
-!
- use edyn_esmf,only: edyn_esmf_set3d_mag,edyn_esmf_regrid,edyn_esmf_get_3dfield
- use edyn_geogrid,only: nlev
-!
-! Args:
-! integer,intent(in) :: mlon0,mlon1,mlat0,mlat1,nmlev,lon0,lon1,lat0,lat1,nlev
- character(len=*) :: fname
- type(ESMF_Field),intent(inout) :: ESMF_mag, ESMF_geo
- real(r8),intent(in) :: fmag(mlon0:mlon1,mlat0:mlat1,nmlev)
- real(r8),intent(out) :: fgeo(lon0:lon1,lat0:lat1,nlev)
-!
-! Local:
- integer :: j
- character(len=8) :: fnames(1)
- type(ESMF_Field) :: magfields(1)
- real(r8),pointer,dimension(:,:,:) :: fptr
-
- fgeo = finit
- fnames(1) = fname
- magfields(1) = ESMF_mag
-!
-! Put fmag into ESMF mag field on mag source grid:
- call edyn_esmf_set3d_mag(magfields,fnames,fmag,1,1,nmlev,mlon0,mlon1,mlat0,mlat1)
-!
-! Regrid to geographic destination grid, defining ESMF_geo:
- call edyn_esmf_regrid(ESMF_mag,ESMF_geo,'mag2geo',3)
-!
-! Put regridded geo field into pointer:
- call edyn_esmf_get_3dfield(ESMF_geo,fptr,fname)
-!
-! Transfer from pointer to output arg:
- do j=lat0,lat1
- fgeo(:,j,:) = fptr(:,j,:)
- enddo
- end subroutine mag2geo_3d
-#endif
!-----------------------------------------------------------------------
end module edynamo
diff --git a/src/ionosphere/waccmx/getapex.F90 b/src/ionosphere/waccmx/getapex.F90
index a67a7463a0..e93d91d502 100644
--- a/src/ionosphere/waccmx/getapex.F90
+++ b/src/ionosphere/waccmx/getapex.F90
@@ -1,61 +1,49 @@
module getapex
!
-! Calculate quantities needed to transform scalar fields between geographic
-! and geomagnetic coordinate systems.
-!
- use shr_kind_mod ,only : r8 => shr_kind_r8
- use cam_logfile ,only: iulog
- use cam_abortutils ,only: endrun
- use edyn_geogrid ,only: nlon,nlonp1,ylatg,ylong,dlong,&
- jspole,jnpole
- use edyn_maggrid ,only: nmlon,nmlonp1,nmlat,ylatm,ylonm,dlonm
-
- implicit none
- save
+! Calculate quantities needed to transform scalar fields between geographic
+! and geomagnetic coordinate systems.
+!
+ use shr_kind_mod, only : r8 => shr_kind_r8
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+ use edyn_geogrid, only: nlon,nlonp1,jspole,jnpole
+ use edyn_maggrid, only: nmlonp1,nmlat,ylatm,ylonm
- private
+ implicit none
+ save
- public :: get_apex
- public :: magfield, bx, by, bz, bmod2, bmod, xb, yb, zb, be3arr, dddarr, dvec
- public :: alatm, alonm, gdlondeg, gdlatdeg
- public :: rjac
+ private
- integer :: &
- ig(nmlonp1,nmlat), & ! geog lon grid containing each geomag point
- jg(nmlonp1,nmlat) ! geog lat grid containing each geomag point
+ public :: get_apex ! Allocate and initialize apex data
+ public :: magfield, bx, by, bz
+ public :: bmod2, bmod
+ public :: xb, yb, zb
+ public :: be3arr, dddarr, dvec
+ public :: alatm, alonm
+ public :: gdlatdeg, gdlondeg
+ public :: rjac
- real(r8) :: &
- wt(4,nmlonp1,nmlat) ! interpolation weights for geo2mag
-
- real(r8),dimension(nmlonp1,nmlat) :: & ! geo lat,lon coords on mag grid
- gdlatdeg, & ! geographic latitude of each magnetic grid point (deg)
- gdlondeg ! geographic longitude of each magnetic grid point (deg)
+ real(r8),dimension(:,:), allocatable :: & ! geo lat,lon coords on mag grid
+ gdlatdeg, & ! geographic latitude of each magnetic grid point (deg)
+ gdlondeg ! geographic longitude of each magnetic grid point (deg)
!
! Variables on geographic grid needed by other modules must
! be allocated dynamically to be grid-independent (sub alloc_apex):
!
- integer,allocatable :: & ! (nlonp1,jspole:jnpole))
- im(:,:), & ! geomag lon grid containing each geog point
- jm(:,:) ! geomag lat grid containing each geog point
-
- real(r8),allocatable :: & ! (nlonp1,jspole:jnpole)
- dim(:,:), & ! fraction in lon for grid interp
- djm(:,:) ! fraction in lat for grid interp
-
- real(r8),allocatable :: & ! (nlonp1,jspole:jnpole,3,2)
- dvec(:,:,:,:) ! vectors from apxmall
+ real(r8),allocatable :: & ! (nlonp1,jspole:jnpole,3,2)
+ dvec(:,:,:,:) ! vectors from apxmall
- real(r8),allocatable :: & ! (nlonp1,jspole:jnpole)
- dddarr(:,:), & ! from apxmall
- be3arr(:,:) ! from apxmall
+ real(r8),allocatable :: & ! (nlonp1,jspole:jnpole)
+ dddarr(:,:), & ! from apxmall
+ be3arr(:,:) ! from apxmall
- real(r8),allocatable :: & ! (nlonp1,jspole:jnpole)
- alatm(:,:), & ! geomagnetic latitude at each geographic grid point (radians)
- alonm(:,:), & ! geomagnetic longitude at each geographic grid point (radians)
- xb(:,:), & ! northward component of magnetic field
- yb(:,:), & ! eastward component of magnetic field
- zb(:,:), & ! downward component of magnetic field (gauss)
- bmod(:,:) ! magnitude of magnetic field (gauss)
+ real(r8),allocatable :: & ! (nlonp1,jspole:jnpole)
+ alatm(:,:), & ! geomagnetic latitude at each geographic grid point (radians)
+ alonm(:,:), & ! geomagnetic longitude at each geographic grid point (radians)
+ xb(:,:), & ! northward component of magnetic field
+ yb(:,:), & ! eastward component of magnetic field
+ zb(:,:), & ! downward component of magnetic field (gauss)
+ bmod(:,:) ! magnitude of magnetic field (gauss)
!
! rjac: scaled derivatives of geomagnetic coords wrt geographic coordinates.
! rjac(1,1) = cos(thetas)/cos(theta)*d(lamdas)/d(lamda)
@@ -65,169 +53,128 @@ module getapex
! where (lamda,theta) are geographic coordinates
! (lamdas,thetas) are geomagnetic coordinates
!
- real(r8),allocatable :: &
- rjac(:,:,:,:) ! (nlon+1,jspole:jnpole,2,2)
+ real(r8),allocatable :: &
+ rjac(:,:,:,:) ! (nlon+1,jspole:jnpole,2,2)
!
! Parameters defined by sub magfield (allocated in alloc_magfield):
!
- real(r8),allocatable,dimension(:,:) :: & ! (0:nlon+1,jspole-1:jnpole+1)
- bx,by,bz,bmod2
+ real(r8),allocatable,dimension(:,:) :: & ! (0:nlon+1,jspole-1:jnpole+1)
+ bx,by,bz,bmod2
- contains
+contains
!-----------------------------------------------------------------------
- subroutine get_apex( )
-!
-! This is called once per run from main.
-!
- use edyn_params,only: re_dyn,h0,hs,dtr,rtd
- use apex, only: apex_mall,apex_q2g
- use edyn_geogrid,only: glat_edyn_geo => glat, glon_edyn_geo => glon
-
-!
-! Local:
- integer :: i,j,ier,jjm,jjg
- integer,parameter :: nalt=2
- real(r8) :: real8
+ subroutine get_apex( )
+ !
+ ! This is called once per run from main.
+ !
+ use edyn_params, only: re_dyn, h0, hs, dtr, rtd, cm2km
+ use apex, only: apex_mall, apex_q2g
+ use edyn_geogrid, only: glat_edyn_geo=>glat, glon_edyn_geo=>glon
- real(r8) :: rekm,h0km,alt,hr,ror03,glat,glon,&
- xlonmi,qdlon,qdlat,gdlon,gdlat,xlongi,frki,frkj
+ !
+ ! Local:
+ integer :: i, j, ier
+ real(r8) :: rekm, h0km, alt, hr, ror03, glat, glon
+ real(r8) :: qdlon, qdlat, gdlon, gdlat
+ integer, parameter :: nalt=2
-!
-! Non-scalar arguments returned by apxmall:
- real(r8) :: &
- b(3),bhat(3), &
- d1(3),d2(3),d3(3), &
- e1(3),e2(3),e3(3), &
- f1(2),f2(2)
- real(r8) :: bmag,alon,xlatm,vmp,w,d,be3,si,sim,xlatqd,f
+ !
+ ! Non-scalar arguments returned by apxmall:
+ real(r8) :: &
+ b(3), bhat(3), &
+ d1(3), d2(3),d3(3), &
+ e1(3), e2(3),e3(3), &
+ f1(2), f2(2)
+ real(r8) :: bmag, alon, xlatm, vmp, w, d, be3, si, sim, xlatqd, f
-!
-! Allocate arrays that are needed by other modules:
- call alloc_apex
- call alloc_magfield
+ !
+ ! Allocate arrays that are needed by other modules:
+ call alloc_apex
+ call alloc_magfield
- rekm = re_dyn*1.e-5_r8 ! earth radius (km)
- h0km = h0*1.e-5_r8
- alt = hs*1.e-5_r8 ! modified apex reference altitude (km)
- hr = alt
- ror03= ((rekm + alt)/(rekm + h0km))**3
-!
-! Loop over 2d geographic grid:
-!
- do j=jspole,jnpole
- glat = glat_edyn_geo(j)
- do i=1,nlonp1
- if (i.eq.nlonp1) then
- glon = glon_edyn_geo(1)
- else
- glon = glon_edyn_geo(i)
- endif
+ dddarr = 0._r8
+ dvec = 0._r8
+ rekm = re_dyn*cm2km ! earth radius (km)
+ h0km = h0*cm2km
+ alt = hs*cm2km ! modified apex reference altitude (km)
+ hr = alt
+ ror03= ((rekm + alt)/(rekm + h0km))**3
+ !
+ ! Loop over 2d geographic grid:
+ !
+ do j = jspole, jnpole
+ glat = glat_edyn_geo(j)
+ do i = 1, nlonp1
+ if (i == nlonp1) then
+ glon = glon_edyn_geo(1)
+ else
+ glon = glon_edyn_geo(i)
+ end if
- call apex_mall ( &
- glat,glon,alt,hr, & !Inputs
- b,bhat,bmag,si, & !Mag Fld
- alon, & !Apx Lon
- xlatm,vmp,w,d,be3,sim,d1,d2,d3,e1,e2,e3, & !Mod Apx
- xlatqd,f,f1,f2 , ier) !Qsi-Dpl
+ call apex_mall ( &
+ glat,glon,alt,hr, & !Inputs
+ b,bhat,bmag,si, & !Mag Fld
+ alon, & !Apx Lon
+ xlatm,vmp,w,d,be3,sim,d1,d2,d3,e1,e2,e3, & !Mod Apx
+ xlatqd,f,f1,f2, ier) !Qsi-Dpl
- if (ier /= 0) call endrun('get_apex: apxmall error')
+ if (ier /= 0) then
+ call endrun('get_apex: apxmall error')
+ end if
- alatm(i,j) = xlatm*dtr
- alonm(i,j) = alon *dtr
- xb (i,j) = b(2)*1.e-5_r8 ! nT -> gauss
- yb (i,j) = b(1)*1.e-5_r8 ! nT -> gauss
- zb (i,j) = -b(3)*1.e-5_r8 ! nT -> gauss
- bmod (i,j) = bmag*1.e-5_r8 ! nT -> gauss
+ alatm(i,j) = xlatm*dtr
+ alonm(i,j) = alon *dtr
+ xb (i,j) = b(2)*1.e-5_r8 ! nT -> gauss
+ yb (i,j) = b(1)*1.e-5_r8 ! nT -> gauss
+ zb (i,j) = -b(3)*1.e-5_r8 ! nT -> gauss
+ bmod (i,j) = bmag*1.e-5_r8 ! nT -> gauss
- rjac (i,j,1,1) = f2(2)
- rjac (i,j,1,2) = -f2(1)
- rjac (i,j,2,1) = -f1(2)
- rjac (i,j,2,2) = f1(1)
-!
-! Set up parameters for magnetic to geographic interpolation.
-!
- xlonmi = (alonm(i,j) - ylonm(1))/dlonm
- real8 = dble(nmlon)
- if (xlonmi < 0._r8) xlonmi = xlonmi + real8
- im(i,j) = xlonmi
- real8 = dble(im(i,j))
- dim(i,j) = xlonmi - real8
- im(i,j) = im(i,j) + 1
- if (im(i,j) >= nmlonp1) im(i,j) = im(i,j) - nmlon
- alatm(i,j) = min(alatm(i,j),ylatm(nmlat))
- do jjm=2,nmlat
- if (alatm(i,j) > ylatm(jjm)) cycle
- jm(i,j) = jjm - 1
- djm(i,j) = (alatm(i,j) - ylatm(jm(i,j)))/ &
- (ylatm(jjm) - ylatm(jm(i,j)))
- exit
- enddo
- if (j /= jspole .and. j /= jnpole) then
- dvec(i,j,1,1) = d1(1)
- dvec(i,j,2,1) = d1(2)
- dvec(i,j,3,1) = d1(3)
- dvec(i,j,1,2) = d2(1)
- dvec(i,j,2,2) = d2(2)
- dvec(i,j,3,2) = d2(3)
- dddarr(i,j) = d
-!
-! Scale be3 from 130 km to a reference height of 90 km.
- be3arr(i,j) = be3*ror03
- endif
- enddo ! i=1,nlonp1
- enddo ! j=jspole,jnpole
-!
-! Set up parameters for geographic to magnetic interpolation
- do i=1,nmlonp1
- qdlon = ylonm(i)*rtd
- do j=1,nmlat
- qdlat = ylatm(j)*rtd
-!
-! Convert from Quasi-Dipole to geographic coordinates.
-! gdlat,gdlon are returned by apxq2g.
-!
- call apex_q2g(qdlat,qdlon,alt,gdlat,gdlon,ier)
- if (ier /= 0) then
- write(iulog,"(i3,i3,i3)") '>>> Error from apex_q2g: ier=',ier, &
- ' i=',i,' j=',j
- call endrun('get_apex: apex_q2g ier')
- endif
- gdlat = gdlat*dtr
- gdlon = gdlon*dtr
- xlongi = (gdlon - ylong(1))/dlong
- real8 = dble(nlon)
- if (xlongi < 0._r8) xlongi = xlongi + real8
- ig(i,j) = xlongi
- real8 = dble(ig(i,j))
- frki = xlongi - real8
- ig(i,j) = ig(i,j) + 1
- if (ig(i,j) >= nlonp1) ig(i,j) = ig(i,j) - nlon
- gdlat = min(gdlat,ylatg(jnpole))
- do jjg=1,jnpole
- if (gdlat > ylatg(jjg)) cycle
- jg(i,j) = jjg - 1
- frkj = (gdlat - ylatg(jg(i,j)))/(ylatg(jjg) - ylatg(jg(i,j)))
-!
-! 99/2/25b Add one to JG to account for the fact that AG in geo2mag has
-! a second (J) index starting at 1, while the second index of the
-! array in the calling arguments begins at 0.
-!
- jg(i,j) = jg(i,j) + 1
- exit
- enddo
- wt(1,i,j) = (1._r8 - frki)*(1._r8 - frkj)
- wt(2,i,j) = frki *(1._r8 - frkj)
- wt(3,i,j) = frki *frkj
- wt(4,i,j) = (1._r8 - frki)*frkj
-!
-! gdlatdeg,gdlondeg will be coordY,coordX of the mag grid for ESMF
-! regridding (see edyn_esmf.F)
-!
- gdlatdeg(i,j) = gdlat*rtd
- gdlondeg(i,j) = gdlon*rtd
- enddo ! j=1,nmlat
- enddo ! i=1,nmlonp1
- end subroutine get_apex
+ rjac (i,j,1,1) = f2(2)
+ rjac (i,j,1,2) = -f2(1)
+ rjac (i,j,2,1) = -f1(2)
+ rjac (i,j,2,2) = f1(1)
+ !
+ ! Set up parameters for magnetic to geographic interpolation.
+ !
+ dvec(i,j,1,1) = d1(1)
+ dvec(i,j,2,1) = d1(2)
+ dvec(i,j,3,1) = d1(3)
+ dvec(i,j,1,2) = d2(1)
+ dvec(i,j,2,2) = d2(2)
+ dvec(i,j,3,2) = d2(3)
+ dddarr(i,j) = d
+ !
+ ! Scale be3 from 130 km to a reference height of 90 km.
+ be3arr(i,j) = be3 * ror03
+ end do ! i=1,nlonp1
+ end do ! j=jspole,jnpole
+ !
+ ! Set up parameters for geographic to magnetic interpolation
+ do i = 1, nmlonp1
+ qdlon = ylonm(i)*rtd
+ do j = 1, nmlat
+ qdlat = ylatm(j)*rtd
+ !
+ ! Convert from Quasi-Dipole to geographic coordinates.
+ ! gdlat,gdlon are returned by apxq2g.
+ !
+ call apex_q2g(qdlat, qdlon, alt, gdlat, gdlon, ier)
+ if (ier /= 0) then
+ write(iulog,"(i3,i3,i3)") '>>> Error from apex_q2g: ier=',ier, &
+ ' i=',i,' j=',j
+ call endrun('get_apex: apex_q2g ier')
+ end if
+ gdlat = gdlat * dtr
+ gdlon = gdlon * dtr
+ !
+ ! gdlatdeg,gdlondeg will be coordY,coordX of the mag grid for ESMF
+ ! regridding (see edyn_esmf.F)
+ !
+ gdlatdeg(i,j) = gdlat*rtd
+ gdlondeg(i,j) = gdlon*rtd
+ enddo ! j=1,nmlat
+ enddo ! i=1,nmlonp1
+ end subroutine get_apex
!-----------------------------------------------------------------------
subroutine magfield
!
@@ -312,16 +259,12 @@ subroutine alloc_magfield
end subroutine alloc_magfield
!-----------------------------------------------------------------------
-
+
subroutine alloc_apex
!------------------------------------------------------------------------------------------
! Do allocations, checking if previously allocated in case of year boundary crossing
!------------------------------------------------------------------------------------------
- if (.not.allocated(im)) allocate(im (nlonp1,jspole:jnpole))
- if (.not.allocated(jm)) allocate(jm (nlonp1,jspole:jnpole))
- if (.not.allocated(dim)) allocate(dim(nlonp1,jspole:jnpole))
- if (.not.allocated(djm)) allocate(djm(nlonp1,jspole:jnpole))
if (.not.allocated(xb)) allocate(xb (nlonp1,jspole:jnpole))
if (.not.allocated(yb)) allocate(yb (nlonp1,jspole:jnpole))
@@ -336,6 +279,9 @@ subroutine alloc_apex
if (.not.allocated(rjac)) allocate(rjac(nlon+1,jspole:jnpole,2,2))
+ if (.not.allocated(gdlatdeg)) allocate(gdlatdeg(nmlonp1,nmlat))
+ if (.not.allocated(gdlondeg)) allocate(gdlondeg(nmlonp1,nmlat))
+
end subroutine alloc_apex
!-----------------------------------------------------------------------
end module getapex
diff --git a/src/ionosphere/waccmx/heelis.F90 b/src/ionosphere/waccmx/heelis.F90
index 7b07177865..50f397ec6e 100644
--- a/src/ionosphere/waccmx/heelis.F90
+++ b/src/ionosphere/waccmx/heelis.F90
@@ -1,7 +1,6 @@
module heelis
use shr_kind_mod ,only: r8 => shr_kind_r8 ! 8-byte reals
use edyn_maggrid ,only: nmlon,nmlonp1,nmlat,ylonm,ylatm
- use edyn_geogrid ,only: nlat
use heelis_mod ,only: heelis_update, heelis_flwv32
!
! phihm and pfrac are output of this module:
@@ -20,13 +19,13 @@ module heelis
contains
!-----------------------------------------------------------------------
- subroutine heelis_model(sunlons)
+ subroutine heelis_model(sunlon)
use aurora_params, only: aurora_params_set
! Driver for Heelis empirical model to calculate high-latitude potential.
!
! Args:
- real(r8),intent(in) :: sunlons(nlat) ! sun's location
+ real(r8),intent(in) :: sunlon ! sun's location
!
! Set auroral parameters:
@@ -38,18 +37,18 @@ subroutine heelis_model(sunlons)
! Calculate the heelis potential phihm in geomagnetic coordinates:
! (potm calls sub flwv32)
!
- call potm(sunlons)
+ call potm(sunlon)
end subroutine heelis_model
!-----------------------------------------------------------------------
- subroutine potm(sunlons)
+ subroutine potm(sunlon)
use edyn_params, only: pi_dyn ! pi used in dynamo calculations
!
! Calculate heelis potential in geomagnetic coordinates.
!
! Args:
- real(r8),intent(in) :: sunlons(nlat)
+ real(r8),intent(in) :: sunlon
!
! Local:
integer :: j
@@ -60,7 +59,7 @@ subroutine potm(sunlons)
do j=1,nmlat
iflag(:) = 1 ! must be updated at each j
dlat(:) = ylatm(j)
- dlon(:) = ylonm(1:nmlon)-sunlons(1)
+ dlon(:) = ylonm(1:nmlon)-sunlon
!
! flwv32 returns single-level Heelis potential in geomag coords:
!
diff --git a/src/ionosphere/waccmx/ionosphere_interface.F90 b/src/ionosphere/waccmx/ionosphere_interface.F90
index f6b3463e28..a93e2b4687 100644
--- a/src/ionosphere/waccmx/ionosphere_interface.F90
+++ b/src/ionosphere/waccmx/ionosphere_interface.F90
@@ -1,1258 +1,1165 @@
module ionosphere_interface
- use shr_kind_mod, only: r8 => shr_kind_r8
- use phys_grid, only: begchunk, endchunk, get_ncols_p
- use pmgrid, only: plat, plon, plev
- use ppgrid, only: pcols, pver
-
- use dpie_coupling, only: d_pie_init
- use dpie_coupling, only: d_pie_epotent
- use dpie_coupling, only: d_pie_coupling ! WACCM-X ionosphere/electrodynamics coupling
- use short_lived_species, only: slvd_index,slvd_pbf_ndx => pbf_idx ! Routines to access short lived species
-
- use chem_mods, only: adv_mass ! Array holding mass values for short lived species
- use mo_chem_utls, only: get_spc_ndx ! Routine to get index of adv_mass array for short lived species
- use physics_buffer, only: pbuf_get_chunk, pbuf_get_field, pbuf_get_index
-
- use cam_abortutils, only: endrun
- use constituents, only: cnst_get_ind, cnst_mw !Needed to access constituent molecular weights
- use phys_grid, only: get_lon_all_p, get_lat_all_p, transpose_block_to_chunk, transpose_chunk_to_block
- use phys_grid, only: chunk_to_block_send_pters, chunk_to_block_recv_pters, block_to_chunk_send_pters, &
- block_to_chunk_recv_pters
- use physconst, only: gravit
- use oplus, only: oplus_init
- use edyn_init, only: edynamo_init
- use pio, only: var_desc_t
- use spmd_dyn, only: local_dp_map, block_buf_nrecs, chunk_buf_nrecs
- use dyn_internal_state, only: get_dyn_state_grid
- use dynamics_vars, only: t_fvdycore_grid
- use perf_mod
- use epotential_params, only: epot_active, epot_crit_colats
- implicit none
-
- private
-
- public :: ionosphere_readnl
- public :: ionosphere_init
- public :: ionosphere_run1
- public :: ionosphere_run2
- public :: ionosphere_init_restart
- public :: ionosphere_write_restart
- public :: ionosphere_read_restart
- public :: ionosphere_final
-
- ! private data
-
- ! this needs to persist from time-step to time-step and across restarts
- real(r8), allocatable :: opmmrtm1_blck(:,:,:) ! O+ at previous time step(blocks)
-
- type(var_desc_t) :: Optm1_vdesc
- integer :: index_ped, index_hall, index_te, index_ti
- integer :: index_ui, index_vi, index_wi
-
- integer :: ixo2=-1, ixo=-1, ixh=-1
- integer :: ixo2p=-1, ixnop=-1, ixn2p=-1, ixop=-1
-
- ! indices for accessing ions in pbuf when non-advected
- integer :: sIndxOp=-1, sIndxO2p=-1, sIndxNOp=-1, sIndxN2p=-1
-
- real(r8) :: rmassO2 ! O2 molecular weight kg/kmol
- real(r8) :: rmassO1 ! O atomic weight kg/kmol
- real(r8) :: rmassH ! H atomic weight kg/kmol
- real(r8) :: rmassN2 ! N2 molecular weight kg/kmol
- real(r8) :: rmassO2p ! O2+ molecular weight kg/kmol
- real(r8) :: rmassNOp ! NO+ molecular weight kg/kmol
- real(r8) :: rmassN2p ! N2+ molecular weight kg/kmol
- real(r8) :: rmassOp ! O+ molecular weight kg/kmol
-
- logical, public, protected :: ionos_edyn_active = .true. ! if true, edynamo will generate ion drifts
- logical, public, protected :: ionos_xport_active = .true. ! if true, call d_pie_coupling from dp_coupling
- !
- ! ionos_edyn_active = .true. will activate the edynamo which will generate ion drift velocities
- ! used in oplus transport, otherwise empirical ion drifts calculated in exbdrift (physics) will be used.
- !
- logical, public, protected :: ionos_oplus_xport = .true. ! if true, call sub oplus (based on tiegcm oplus.F)
- integer, public, protected :: ionos_xport_nsplit = 5 ! number of substeps for O+ transport per model time step
-
- real(r8), public, protected :: oplus_adiff_limiter = 1.5e+8_r8 ! limiter for ambipolar diffusion coefficient
- real(r8), public, protected :: oplus_shapiro_const = 0.03_r8 ! shapiro constant for spatial smoother
- logical, public, protected :: oplus_enforce_floor = .true. ! switch to apply Stan's floor
- logical, public, protected :: oplus_ring_polar_filter = .false. ! switch to apply ring polar filter
-
- character(len=256) :: wei05_coefs_file = 'NONE' !'wei05sc.nc'
- character(len=256) :: amienh_file = 'NONE'
- character(len=256) :: amiesh_file = 'NONE'
-
- character(len=16), public, protected :: ionos_epotential_model = 'none'
- logical, public, protected :: ionos_epotential_amie = .false.
- integer :: indxAMIEefxg=-1, indxAMIEkevg=-1
-
-contains
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_readnl( nlfile )
-
- use namelist_utils, only: find_group_name
- use units, only: getunit, freeunit
- use spmd_utils, only: mpicom, masterprocid, mpi_real8, mpi_logical, mpi_integer, mpi_character
- use cam_logfile, only: iulog
- use spmd_utils, only: masterproc
-
- character(len=*), intent(in) :: nlfile ! filepath for file containing namelist input
-
- ! Local variables
- integer :: unitn, ierr
- character(len=*), parameter :: subname = 'ionosphere_readnl'
-
- namelist /ionosphere_nl/ ionos_xport_active, ionos_edyn_active, ionos_oplus_xport, ionos_xport_nsplit
- namelist /ionosphere_nl/ oplus_adiff_limiter, oplus_shapiro_const, oplus_enforce_floor, oplus_ring_polar_filter
- namelist /ionosphere_nl/ ionos_epotential_model, ionos_epotential_amie, wei05_coefs_file
- namelist /ionosphere_nl/ amienh_file, amiesh_file, wei05_coefs_file
- namelist /ionosphere_nl/ epot_crit_colats
-
- ! Read namelist
- if (masterproc) then
- unitn = getunit()
- open( unitn, file=trim(nlfile), status='old' )
- call find_group_name(unitn, 'ionosphere_nl', status=ierr)
- if (ierr == 0) then
- read(unitn, ionosphere_nl, iostat=ierr)
- if (ierr /= 0) then
- call endrun(subname // ':: ERROR reading namelist')
- end if
- end if
- close(unitn)
- call freeunit(unitn)
- end if
-
- ! Broadcast namelist variables
- call mpi_bcast(ionos_xport_active, 1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(ionos_edyn_active, 1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(ionos_oplus_xport, 1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(ionos_xport_nsplit, 1, mpi_integer, masterprocid, mpicom, ierr)
- call mpi_bcast(oplus_adiff_limiter, 1, mpi_real8, masterprocid, mpicom, ierr)
- call mpi_bcast(ionos_epotential_model, len(ionos_epotential_model), mpi_character, masterprocid, mpicom, ierr)
- call mpi_bcast(ionos_epotential_amie,1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(wei05_coefs_file, len(wei05_coefs_file), mpi_character, masterprocid, mpicom, ierr)
- call mpi_bcast(amienh_file, len(amienh_file), mpi_character, masterprocid, mpicom, ierr)
- call mpi_bcast(amiesh_file, len(amiesh_file), mpi_character, masterprocid, mpicom, ierr)
- call mpi_bcast(oplus_shapiro_const, 1, mpi_real8, masterprocid, mpicom, ierr)
- call mpi_bcast(oplus_enforce_floor, 1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(oplus_ring_polar_filter,1, mpi_logical, masterprocid, mpicom, ierr)
- call mpi_bcast(epot_crit_colats, 2, mpi_real8, masterprocid, mpicom, ierr)
-
- ! log the user settings
- if (masterproc) then
- write(iulog,*) 'ionosphere_readnl: ionos_xport_active = ', ionos_xport_active
- write(iulog,*) 'ionosphere_readnl: ionos_edyn_active = ', ionos_edyn_active
- write(iulog,*) 'ionosphere_readnl: ionos_oplus_xport = ', ionos_oplus_xport
- write(iulog,*) 'ionosphere_readnl: ionos_xport_nsplit = ', ionos_xport_nsplit
- write(iulog,*) 'ionosphere_readnl: ionos_epotential_model = ', trim(ionos_epotential_model)
- write(iulog,*) 'ionosphere_readnl: ionos_epotential_amie = ', ionos_epotential_amie
- write(iulog,'(a,2(g12.4))') &
- ' ionosphere_readnl: epot_crit_colats = ', epot_crit_colats
- write(iulog,*) 'ionosphere_readnl: oplus_adiff_limiter = ', oplus_adiff_limiter
- write(iulog,*) 'ionosphere_readnl: oplus_shapiro_const = ', oplus_shapiro_const
- write(iulog,*) 'ionosphere_readnl: oplus_enforce_floor = ', oplus_enforce_floor
- write(iulog,*) 'ionosphere_readnl: oplus_ring_polar_filter= ', oplus_ring_polar_filter
- endif
- epot_active = .true.
-
- end subroutine ionosphere_readnl
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_init()
- use physics_buffer, only: pbuf_add_field, dtype_r8
- use cam_history, only: addfld, add_default, horiz_only
- use mo_apex, only: mo_apex_init1
- use cam_control_mod,only: initial_run
- use dyn_grid, only: get_horiz_grid_d
- use ref_pres, only : & ! Hybrid level definitions:
- pref_mid, & ! target alev(plev) midpoint levels coord
- pref_edge ! target ailev(plevp) interface levels coord
- use amie_module, only: init_amie
- use wei05sc, only: weimer05_init
-
- ! local variables:
- type (t_fvdycore_grid), pointer :: grid
- integer :: sIndx
-
- integer :: mpicomm ! MPI communicator
- integer :: ntaski, ntaskj ! number of MPI tasks in lon,lat dimensions
- integer :: lat0,lat1 ! first and last latitude indices
- integer :: lon0,lon1 ! first and last longitude indices
- integer :: lev0,lev1 ! first and last pressure indices
- real(r8), allocatable :: glon(:) ! global geo-graphic longitudes (degrees)
- real(r8), allocatable :: glat(:) ! global geo-graphic latitudes (degrees)
-
- if ( ionos_epotential_amie ) then
- call pbuf_add_field('AMIE_efxg', 'global', dtype_r8, (/pcols/), indxAMIEefxg) ! Energy flux from AMIE
- call pbuf_add_field('AMIE_kevg', 'global', dtype_r8, (/pcols/), indxAMIEkevg) ! Mean energy from AMIE
- endif
- if (initial_run) then
- call ionosphere_read_ic()
- endif
-
- call mo_apex_init1()
-
- op_transport: if (ionos_xport_active) then
-
- grid => get_dyn_state_grid()
-
- index_ped = pbuf_get_index('PedConduct')
- index_hall = pbuf_get_index('HallConduct')
-
- index_te = pbuf_get_index('TElec')
- index_ti = pbuf_get_index('TIon')
- !
- ! pbuf indices to empirical ion drifts, to be passed to oplus_xport,
- ! if ionos_edyn_active is false.
- !
- index_ui = pbuf_get_index('UI')
- index_vi = pbuf_get_index('VI')
- index_wi = pbuf_get_index('WI')
-
- !-----------------------------------------------------------------------
- ! Get indices for neutrals to get mixing ratios from state%q and masses
- !-----------------------------------------------------------------------
- call cnst_get_ind('O2' ,ixo2 )
- call cnst_get_ind('O' ,ixo )
- call cnst_get_ind('H' ,ixh )
- !------------------------------------
- ! Get neutral molecular weights
- !------------------------------------
- rmassO2 = cnst_mw(ixo2)
- rmassO1 = cnst_mw(ixo)
- rmassH = cnst_mw(ixh)
- rmassN2 = 28._r8
-
- call cnst_get_ind('Op',ixop, abort=.false.)
- if (ixop > 0) then
- rMassOp = cnst_mw(ixop)
- else
- sIndxOp = slvd_index( 'Op' )
- if (sIndxOp > 0) then
- sIndx = get_spc_ndx( 'Op' )
- rmassOp = adv_mass(sIndx)
- else
- call endrun('ionosphere_init: Cannot find state or pbuf index for Op')
- endif
- endif
-
- call cnst_get_ind('O2p',ixo2p, abort=.false.)
- if (ixo2p > 0) then
- rMassO2p = cnst_mw(ixo2p)
- else
- sIndxO2p = slvd_index( 'O2p' )
- if (sIndxO2p > 0) then
- sIndx = get_spc_ndx( 'O2p' )
- rmassO2p = adv_mass(sIndx)
- else
- call endrun('ionosphere_init: Cannot find state or pbuf index for O2p')
- endif
- endif
-
- call cnst_get_ind('NOp',ixnop, abort=.false.)
- if (ixnop > 0) then
- rMassNOp = cnst_mw(ixnop)
- else
- sIndxNOp = slvd_index( 'NOp' )
- if (sIndxNOp > 0) then
- sIndx = get_spc_ndx( 'NOp' )
- rmassNOp = adv_mass(sIndx)
- else
- call endrun('ionosphere_init: Cannot find state or pbuf index for NOp')
- endif
- endif
-
- call cnst_get_ind('N2p',ixn2p, abort=.false.)
- if (ixn2p > 0) then
- rMassN2p = cnst_mw(ixn2p)
- else
- sIndxN2p = slvd_index( 'N2p' )
- if (sIndxN2p > 0) then
- sIndx = get_spc_ndx( 'N2p' )
- rmassN2p = adv_mass(sIndx)
- else
- call endrun('ionosphere_init: Cannot find state or pbuf index for N2p')
- endif
- endif
-
- call d_pie_init( ionos_edyn_active, ionos_oplus_xport, ionos_xport_nsplit, epot_crit_colats )
-
- if ( grid%iam < grid%npes_xy ) then
-
- allocate(glon(plon))
- allocate(glat(plat))
- call get_horiz_grid_d( plon, lon_d_out=glon )
- call get_horiz_grid_d( plat, lat_d_out=glat )
-
- mpicomm = grid%commxy
- lon0 = grid%ifirstxy ; lon1 = grid%ilastxy
- lat0 = grid%jfirstxy ; lat1 = grid%jlastxy
- lev0 = 1 ; lev1 = grid%km
- ntaski = grid%nprxy_x
- ntaskj = grid%nprxy_y
-
- call edynamo_init( mpicomm, plon, plat, plev, lon0,lon1,lat0,lat1,lev0,lev1, ntaski,ntaskj, &
- glon, glat, pref_mid,pref_edge )
- call ionosphere_alloc()
- call oplus_init( oplus_adiff_limiter, oplus_shapiro_const, oplus_enforce_floor, oplus_ring_polar_filter )
-
- deallocate(glon,glat)
- endif
-
- call addfld ('OpTM1&IC', (/ 'lev' /),'I','kg/kg','O+ at time step minus 1',gridname='fv_centers')
- call add_default ('OpTM1&IC',0, 'I')
-
- endif op_transport
-
- if (ionos_edyn_active) then
- call addfld ('UI',(/ 'lev' /),'I','m/s', 'UI Zonal ion drift from edynamo')
- call addfld ('VI',(/ 'lev' /),'I','m/s', 'VI Meridional ion drift from edynamo')
- call addfld ('WI',(/ 'lev' /),'I','m/s', 'WI Vertical ion drift from edynamo')
- call addfld ('UI&IC', (/ 'lev' /), 'I','m/s', 'Zonal ion drift velocity')
- call addfld ('VI&IC', (/ 'lev' /), 'I','m/s', 'Meridional ion drift velocity')
- call addfld ('WI&IC', (/ 'lev' /), 'I','m/s', 'Vertical ion drift velocity')
- call add_default ('UI&IC', 0, ' ')
- call add_default ('VI&IC', 0, ' ')
- call add_default ('WI&IC', 0, ' ')
- endif
- if ( ionos_epotential_amie ) then
- call init_amie(amienh_file,amiesh_file)
- call addfld ('amie_efx_phys',horiz_only,'I','mW/m2', 'AMIE energy flux')
- call addfld ('amie_kev_phys',horiz_only,'I','keV' , 'AMIE mean energy')
- end if
- if ( trim(ionos_epotential_model) == 'weimer' ) then
- call weimer05_init(wei05_coefs_file)
- endif
-
- end subroutine ionosphere_init
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_run1(pbuf2d)
- use physics_buffer, only: physics_buffer_desc
- use cam_history, only: outfld, write_inithist
- use phys_grid, only: get_ncols_p
-
- ! args
- type(physics_buffer_desc), pointer :: pbuf2d(:,:)
-
- ! local vars
- integer :: i, j, k, lchnk ! indices
- integer :: ifirstxy, ilastxy, jfirstxy, jlastxy, km, idim
- real(r8), allocatable :: tmp(:,:)
- type(physics_buffer_desc), pointer :: pbuf_chnk(:)
-
- type(t_fvdycore_grid), pointer :: grid
-
- real(r8), pointer :: pbuf_amie_efxg(:) ! Pointer to access AMIE energy flux in pbuf
- real(r8), pointer :: pbuf_amie_kevg(:) ! Pointer to access AMIE mean energy in pbuf
-
- integer :: lats(pcols) ! array of latitude indices
- integer :: lons(pcols) ! array of longitude in
- integer :: blksiz ! number of columns in 2D block
- integer :: tsize ! amount of data per grid point passed to physics
- integer :: iam, astat
- integer :: ib, ic, jc,ncol
- integer, allocatable, dimension(:,:) :: bpter
- ! offsets into block buffer for packing data
- integer :: cpter(pcols,0:pver) ! offsets into chunk buffer for unpacking data
- real(r8), allocatable, dimension(:) :: bbuffer, cbuffer
- real(r8), allocatable :: amie_efxg(:,:) ! energy flux from AMIE
- real(r8), allocatable :: amie_kevg(:,:) ! characteristic mean energy from AMIE
-
- grid => get_dyn_state_grid()
- iam = grid%iam
-
- ifirstxy = grid%ifirstxy
- ilastxy = grid%ilastxy
- jfirstxy = grid%jfirstxy
- jlastxy = grid%jlastxy
- km = grid%km
-
- if( write_inithist() .and. ionos_xport_active ) then
-
- allocate( tmp(ifirstxy:ilastxy,km) )
-
- idim = ilastxy - ifirstxy + 1
- do j = jfirstxy, jlastxy
- do k = 1, km
- do i = ifirstxy, ilastxy
- tmp(i,k) = opmmrtm1_blck(i,j,k)
- enddo
- enddo
- call outfld ('OpTM1&IC', tmp, idim, j)
- enddo
-
- deallocate( tmp )
-
- endif
-
- amie_active: if ( ionos_epotential_amie ) then
- allocate(amie_efxg(ifirstxy:ilastxy,jfirstxy:jlastxy))
- allocate(amie_kevg(ifirstxy:ilastxy,jfirstxy:jlastxy))
-
- ! data assimilated potential
- call d_pie_epotent( ionos_epotential_model, epot_crit_colats, &
- i0=ifirstxy,i1=ilastxy,j0=jfirstxy,j1=jlastxy, &
- efxg=amie_efxg,kevg=amie_kevg )
-
- ! transform to physics grid for aurora...
-
- ! blocks --> physics chunks
-
- blcks2phys_local: if (local_dp_map) then
-
- chnk_loop1 : do lchnk = begchunk,endchunk
- ncol = get_ncols_p(lchnk)
- call get_lon_all_p(lchnk, ncol, lons)
- call get_lat_all_p(lchnk, ncol, lats)
-
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
- call pbuf_get_field(pbuf_chnk, indxAMIEefxg, pbuf_amie_efxg)
- call pbuf_get_field(pbuf_chnk, indxAMIEkevg, pbuf_amie_kevg)
-
- do i=1,ncol
- ic = lons(i)
- jc = lats(i)
- pbuf_amie_efxg(i) = amie_efxg(ic,jc)
- pbuf_amie_kevg(i) = amie_kevg(ic,jc)
- end do
- call outfld ( 'amie_efx_phys', pbuf_amie_efxg, pcols, lchnk )
- call outfld ( 'amie_kev_phys', pbuf_amie_kevg, pcols, lchnk )
- end do chnk_loop1
-
- else ! blcks2phys_local
-
- tsize = 2
- blksiz = (jlastxy-jfirstxy+1)*(ilastxy-ifirstxy+1)
- allocate( bpter(blksiz,0:km),stat=astat )
- allocate( bbuffer(tsize*block_buf_nrecs),stat=astat )
- allocate( cbuffer(tsize*chunk_buf_nrecs),stat=astat )
-
- if (iam < grid%npes_xy) then
- call block_to_chunk_send_pters(iam+1,blksiz,pver+1,tsize,bpter)
- endif
-
- do j=jfirstxy,jlastxy
- do i=ifirstxy,ilastxy
- ib = (j-jfirstxy)*(ilastxy-ifirstxy+1) + (i-ifirstxy+1)
- bbuffer(bpter(ib,0)+0) = amie_efxg(i,j)
- bbuffer(bpter(ib,0)+1) = amie_kevg(i,j)
- end do
- end do
-
- call transpose_block_to_chunk(tsize, bbuffer, cbuffer)
-
- chnk_loop2: do lchnk = begchunk,endchunk
- ncol = get_ncols_p(lchnk)
-
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
- call pbuf_get_field(pbuf_chnk, indxAMIEefxg, pbuf_amie_efxg)
- call pbuf_get_field(pbuf_chnk, indxAMIEkevg, pbuf_amie_kevg)
- call block_to_chunk_recv_pters(lchnk,pcols,pver+1,tsize,cpter)
- do i=1,ncol
- pbuf_amie_efxg(i) = cbuffer(cpter(i,0)+0)
- pbuf_amie_kevg(i) = cbuffer(cpter(i,0)+1)
- end do
- call outfld ( 'amie_efx_phys', pbuf_amie_efxg, pcols, lchnk )
- call outfld ( 'amie_kev_phys', pbuf_amie_kevg, pcols, lchnk )
- end do chnk_loop2
-
- deallocate(bpter)
- deallocate(bbuffer)
- deallocate(cbuffer)
-
-
- end if blcks2phys_local
-
- deallocate(amie_efxg,amie_kevg)
-
- else
-
- ! set cross tail potential before physics -- aurora uses weimer derived potential
- call d_pie_epotent( ionos_epotential_model, epot_crit_colats )
-
- end if amie_active
-
- end subroutine ionosphere_run1
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_run2( phys_state, dyn_in, pbuf2d )
-
- use physics_types, only: physics_state
- use physics_buffer, only: physics_buffer_desc
- use dyn_comp, only: dyn_import_t
- use cam_history, only: outfld, write_inithist
-
- ! - pull some fields from pbuf and dyn_in
- ! - invoke ionosphere/electro-dynamics coupling
- ! - push some fields back to physics via pbuf...
-
- ! args
- type(physics_state), intent(in) :: phys_state(begchunk:endchunk)
- type(dyn_import_t), intent(inout) :: dyn_in ! dynamics inputs
- type(physics_buffer_desc), pointer :: pbuf2d(:,:)
-
- ! local vars
- integer :: i,j,k, lchnk
- integer :: astat
-
- integer, allocatable, dimension(:,:) :: bpter
- ! offsets into block buffer for packing data
- integer :: cpter(pcols,0:pver) ! offsets into chunk buffer for unpacking data
- real(r8), allocatable, dimension(:) :: bbuffer, cbuffer
-
- type(physics_buffer_desc), pointer :: pbuf_chnk(:)
-
- real(r8), pointer :: sigma_ped_phys(:,:) ! physics pointer to Pedersen Conductivity
- real(r8), pointer :: sigma_hall_phys(:,:) ! physics pointer fo Hall Conductivity
- real(r8), pointer :: te_phys(:,:) ! te from pbuf
- real(r8), pointer :: ti_phys(:,:) ! ti from pbuf
- real(r8), pointer :: mmrPO2p_phys(:,:) ! Pointer to access O2+ in pbuf
- real(r8), pointer :: mmrPNOp_phys(:,:) ! Pointer to access NO+ in pbuf
- real(r8), pointer :: mmrPN2p_phys(:,:) ! Pointer to access N2+ in pbuf
- real(r8), pointer :: mmrPOp_phys(:,:) ! Pointer to access O+ in pbuf
-!
-! Empirical ion drifts from exbdrift (to be converted to blocked for dpie_coupling):
- real(r8), pointer :: ui_phys(:,:) ! zonal ion drift from pbuf
- real(r8), pointer :: vi_phys(:,:) ! meridional ion drift from pbuf
- real(r8), pointer :: wi_phys(:,:) ! vertical ion drift from pbuf
-
- real(r8), pointer :: o2pmmr_blck(:,:,:) => null() ! O2+ (blocks)
- real(r8), pointer :: nopmmr_blck(:,:,:) => null() ! NO+ (blocks)
- real(r8), pointer :: n2pmmr_blck(:,:,:) => null() ! N2+ (blocks)
- real(r8), pointer :: opmmr_blck(:,:,:) => null() ! O+ (blocks)
-
- real(r8), pointer :: tracer(:,:,:,:)
- real(r8), pointer :: u3s(:,:,:)
- real(r8), pointer :: v3s(:,:,:)
- real(r8), pointer :: pexy(:,:,:)
-
- real(r8), pointer :: phis(:,:) ! surface geopotential
-
- real(r8), pointer :: o2mmr_blck(:,:,:)
- real(r8), pointer :: o1mmr_blck(:,:,:)
- real(r8), pointer :: h1mmr_blck(:,:,:)
-
- integer :: ib, ic, jc, ifirstxy, ilastxy, jfirstxy, jlastxy, km, ncol
-
- integer :: lats(pcols) ! array of latitude indices
- integer :: lons(pcols) ! array of longitude indices
- integer :: nSIons ! number of ions set to non-advected
- integer :: ibuffOp,ibuffO2p,ibuffNOp, ibuffN2p ! Buffer indices for non-advected ions
-
- integer :: blksiz ! number of columns in 2D block
- integer :: tsize ! amount of data per grid point passed to physics
- integer :: iam
-
- real(r8), allocatable :: wuxy(:,:,:)
- real(r8), allocatable :: wvxy(:,:,:)
- real(r8), allocatable :: sigma_ped_blck (:,:,:)
- real(r8), allocatable :: sigma_hall_blck(:,:,:)
- real(r8), allocatable :: ti_blck(:,:,:)
- real(r8), allocatable :: te_blck(:,:,:)
- real(r8), allocatable :: zi_blck(:,:,:)
- real(r8), allocatable :: zm_blck(:,:,:)
- real(r8), allocatable :: ui_blck(:,:,:)
- real(r8), allocatable :: vi_blck(:,:,:)
- real(r8), allocatable :: wi_blck(:,:,:)
- real(r8), allocatable :: omega_blck(:,:,:)
- real(r8), allocatable :: tn_blck(:,:,:)
-
- type (t_fvdycore_grid), pointer :: grid
-
- ionos_cpl: if (ionos_xport_active) then
-
- grid => get_dyn_state_grid()
- iam = grid%iam
-
- allocate( wuxy(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( wvxy(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( sigma_ped_blck (grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( sigma_hall_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( ti_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( te_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( zi_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( zm_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( ui_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( vi_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( wi_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( omega_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
- allocate( tn_blck(grid%ifirstxy:grid%ilastxy, grid%jfirstxy:grid%jlastxy, grid%km) )
-
- ifirstxy = grid%ifirstxy
- ilastxy = grid%ilastxy
- jfirstxy = grid%jfirstxy
- jlastxy = grid%jlastxy
- km = grid%km
-
- phis => dyn_in%phis
-
- tracer => dyn_in%tracer
- pexy => dyn_in%pe
-
- u3s => dyn_in%u3s
- v3s => dyn_in%v3s
-
- if (iam < grid%npes_xy) then
- call d2a3dijk( grid, u3s, v3s, wuxy, wvxy )
- endif
-
- if (sIndxOp>0) then
- allocate(opmmr_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,km),stat=astat)
- if (astat /= 0) call endrun('ionos_intr_d_p_cplng: failed to allocate opmmr_blck')
- endif
- if (sIndxO2p>0) then
- allocate(o2pmmr_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,km),stat=astat)
- if (astat /= 0) call endrun('ionos_intr_d_p_cplng: failed to allocate o2pmmr_blck')
- endif
- if (sIndxNOp>0) then
- allocate(nopmmr_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,km),stat=astat)
- if (astat /= 0) call endrun('ionos_intr_d_p_cplng: failed to allocate nopmmr_blck')
- endif
- if (sIndxN2p>0) then
- allocate(n2pmmr_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,km),stat=astat)
- if (astat /= 0) call endrun('ionos_intr_d_p_cplng: failed to allocate n2pmmr_blck')
- endif
-
- phys2blcks_local: if (local_dp_map) then
-
- do lchnk = begchunk,endchunk
-
- ncol = get_ncols_p(lchnk)
- call get_lon_all_p(lchnk, ncol, lons)
- call get_lat_all_p(lchnk, ncol, lats)
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
-
- ! Get Pedersen and Hall conductivities:
- call pbuf_get_field(pbuf_chnk, index_ped, sigma_ped_phys)
- call pbuf_get_field(pbuf_chnk, index_hall, sigma_hall_phys)
- do k=1,km
- do i=1,ncol
- sigma_ped_blck(lons(i),lats(i),k) = sigma_ped_phys(i,k)
- sigma_hall_blck(lons(i),lats(i),k) = sigma_hall_phys(i,k)
- end do
- enddo
-
- ! Get ion and electron temperatures
- call pbuf_get_field(pbuf_chnk, index_te, te_phys)
- call pbuf_get_field(pbuf_chnk, index_ti, ti_phys)
- do k=1,km
- do i=1,ncol
- te_blck(lons(i),lats(i),k) = te_phys(i,k)
- ti_blck(lons(i),lats(i),k) = ti_phys(i,k)
- end do
- enddo
-
- ! Get components of ion drift velocities
- call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
- call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
- call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
- do k=1,km
- do i=1,ncol
- ui_blck(lons(i),lats(i),k) = ui_phys(i,k)
- vi_blck(lons(i),lats(i),k) = vi_phys(i,k)
- wi_blck(lons(i),lats(i),k) = wi_phys(i,k)
- zi_blck(lons(i),lats(i),k) = phys_state(lchnk)%zi(i,k)
- zm_blck(lons(i),lats(i),k) = phys_state(lchnk)%zm(i,k)
- omega_blck(lons(i),lats(i),k) = phys_state(lchnk)%omega(i,k)
- tn_blck(lons(i),lats(i),k) = phys_state(lchnk)%t(i,k)
- enddo
- enddo
-
- !--------------------------------------------------------
- ! Get ions from physics buffer if non-transported
- !--------------------------------------------------------
- if (sIndxO2p > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPO2p_phys, &
- start=(/1,1,sIndxO2p/), kount=(/pcols,pver,1/) )
- do k=1,km
- do i=1,ncol
- o2pmmr_blck(lons(i),lats(i),k) = mmrPO2p_phys(i,k)
- end do
- enddo
- endif
- if (sIndxNOp > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPNOp_phys, &
- start=(/1,1,sIndxNOp/), kount=(/pcols,pver,1/) )
- do k=1,km
- do i=1,ncol
- nopmmr_blck(lons(i),lats(i),k) = mmrPNOp_phys(i,k)
- end do
- enddo
- endif
- if (sIndxN2p > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPN2p_phys, &
- start=(/1,1,sIndxN2p/), kount=(/pcols,pver,1/) )
- do k=1,km
- do i=1,ncol
- n2pmmr_blck(lons(i),lats(i),k) = mmrPN2p_phys(i,k)
- end do
- enddo
- endif
- if (sIndxOp > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
- start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
- do k=1,km
- do i=1,ncol
- opmmr_blck(lons(i),lats(i),k) = mmrPOp_phys(i,k)
- end do
- enddo
- endif
-
- enddo ! do lchnk = begchunk,endchunk
-
- else ! phys2blcks_local
-
- tsize = 11
-
- nSIons = 0
- if (sIndxOp > 0) then
- ibuffOp = tsize + nSIons
- nSIons = nSIons + 1
- endif
- if (sIndxO2p > 0) then
- ibuffO2p = tsize + nSIons
- nSIons = nSIons + 1
- endif
- if (sIndxNOp > 0) then
- ibuffNOp = tsize + nSIons
- nSIons = nSIons + 1
- endif
- if (sIndxN2p > 0) then
- ibuffN2p = tsize + nSIons
- nSIons = nSIons + 1
- endif
- tsize = tsize + nSIons
-
- blksiz = (jlastxy-jfirstxy+1)*(ilastxy-ifirstxy+1)
- allocate(bpter(blksiz,0:km))
- allocate(bbuffer(tsize*block_buf_nrecs))
- allocate(cbuffer(tsize*chunk_buf_nrecs))
-
- do lchnk = begchunk,endchunk
- ncol = get_ncols_p(lchnk)
-
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
-
- ! Get Pedersen and Hall conductivities:
- call pbuf_get_field(pbuf_chnk, index_ped, sigma_ped_phys)
- call pbuf_get_field(pbuf_chnk, index_hall, sigma_hall_phys)
-
- ! Get ion and electron temperatures
- call pbuf_get_field(pbuf_chnk, index_te, te_phys)
- call pbuf_get_field(pbuf_chnk, index_ti, ti_phys)
-
- ! Get components of ion drift velocities
- call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
- call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
- call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
-
- !--------------------------------------------------------
- ! Get ions from physics buffer if non-transported
- !--------------------------------------------------------
-
- if (sIndxOp > 0) call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
- start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
- if (sIndxO2p > 0) call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPO2p_phys, &
- start=(/1,1,sIndxO2p/), kount=(/pcols,pver,1/) )
- if (sIndxNOp > 0) call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPNOp_phys, &
- start=(/1,1,sIndxNOp/), kount=(/pcols,pver,1/) )
- if (sIndxN2p > 0) call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPN2p_phys, &
- start=(/1,1,sIndxN2p/), kount=(/pcols,pver,1/) )
-
- call chunk_to_block_send_pters(lchnk,pcols,pver+1,tsize,cpter)
-
- do i=1,ncol
- cbuffer(cpter(i,0):cpter(i,0)+tsize-1) = 0.0_r8
- end do
-
- do k=1,km
- do i=1,ncol
-
- cbuffer(cpter(i,k)+0) = sigma_ped_phys(i,k)
- cbuffer(cpter(i,k)+1) = sigma_hall_phys(i,k)
- cbuffer(cpter(i,k)+2) = te_phys(i,k)
- cbuffer(cpter(i,k)+3) = ti_phys(i,k)
- cbuffer(cpter(i,k)+4) = phys_state(lchnk)%zi(i,k)
- cbuffer(cpter(i,k)+5) = phys_state(lchnk)%zm(i,k)
- cbuffer(cpter(i,k)+6) = ui_phys(i,k)
- cbuffer(cpter(i,k)+7) = vi_phys(i,k)
- cbuffer(cpter(i,k)+8) = wi_phys(i,k)
- cbuffer(cpter(i,k)+9) = phys_state(lchnk)%omega(i,k)
- cbuffer(cpter(i,k)+10) = phys_state(lchnk)%t(i,k)
-
- if (sIndxO2p > 0)cbuffer(cpter(i,k)+ibuffO2p) = mmrPO2p_phys(i,k)
- if (sIndxNOp > 0)cbuffer(cpter(i,k)+ibuffNOp) = mmrPNOp_phys(i,k)
- if (sIndxN2p > 0)cbuffer(cpter(i,k)+ibuffN2p) = mmrPN2p_phys(i,k)
- if (sIndxOp > 0) cbuffer(cpter(i,k)+ibuffOp) = mmrPOp_phys(i,k)
-
- end do
-
- end do
-
- end do
-
- call t_barrierf('sync_chk_to_blk', grid%commxy)
- call t_startf ('chunk_to_block')
- call transpose_chunk_to_block(tsize, cbuffer, bbuffer)
- call t_stopf ('chunk_to_block')
-
- if (iam < grid%npes_xy) then
- call chunk_to_block_recv_pters(iam+1,blksiz,pver+1,tsize,bpter)
- endif
-
- do j=jfirstxy,jlastxy
- do k=1,km
- do i=ifirstxy,ilastxy
- ib = (j-jfirstxy)*(ilastxy-ifirstxy+1) + (i-ifirstxy+1)
-
- sigma_ped_blck(i,j,k) = bbuffer(bpter(ib,k)+0)
- sigma_hall_blck(i,j,k) = bbuffer(bpter(ib,k)+1)
- te_blck(i,j,k) = bbuffer(bpter(ib,k)+2)
- ti_blck(i,j,k) = bbuffer(bpter(ib,k)+3)
- zi_blck(i,j,k) = bbuffer(bpter(ib,k)+4)
- zm_blck(i,j,k) = bbuffer(bpter(ib,k)+5)
- ui_blck(i,j,k) = bbuffer(bpter(ib,k)+6)
- vi_blck(i,j,k) = bbuffer(bpter(ib,k)+7)
- wi_blck(i,j,k) = bbuffer(bpter(ib,k)+8)
- omega_blck(i,j,k) = bbuffer(bpter(ib,k)+9)
- tn_blck(i,j,k) = bbuffer(bpter(ib,k)+10)
-
- if (sIndxO2p > 0) o2pmmr_blck(i,j,k) = bbuffer(bpter(ib,k)+ibuffO2p)
- if (sIndxNOp > 0) nopmmr_blck(i,j,k) = bbuffer(bpter(ib,k)+ibuffNOp)
- if (sIndxN2p > 0) n2pmmr_blck(i,j,k) = bbuffer(bpter(ib,k)+ibuffN2p)
- if (sIndxOp > 0) opmmr_blck(i,j,k) = bbuffer(bpter(ib,k)+ibuffOp)
-
- enddo
- enddo
- enddo
-
- deallocate(bpter)
- deallocate(bbuffer)
- deallocate(cbuffer)
-
- endif phys2blcks_local
-
- !-------------------------------------------------------------------------------------------
- ! Set dpie_coupling input ions if they are advected ...
- !-------------------------------------------------------------------------------------------
- if (ixo2p > 0) then
- o2pmmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixo2p)
- endif
- if (ixnop > 0) then
- nopmmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixnop)
- endif
- if (ixn2p > 0) then
- n2pmmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixn2p)
- endif
- if (ixop > 0) then
- opmmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixop)
- endif
-
- !------------------------------------
- ! Get neutrals from advected tracers array
- !------------------------------------
-
- o2mmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixo2)
- o1mmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixo)
- h1mmr_blck => tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixh)
-
- !
- ! Make geopotential height (m) for d_pie_coupling.
- !
- do k=1,km
- do j=jfirstxy,jlastxy
- do i=ifirstxy,ilastxy
- zi_blck(i,j,k) = zi_blck(i,j,k)+phis(i,j)/gravit ! phis is redundant in k
- zm_blck(i,j,k) = zm_blck(i,j,k)+phis(i,j)/gravit ! phis is redundant in k
- enddo
- enddo
- enddo
-
- call t_startf('d_pie_coupling')
-
- if (iam < grid%npes_xy) then
- ! waccmx ionosphere electro-dynamics -- transports O+ and provides updates to ion drift velocities
- call d_pie_coupling(omega_blck,pexy,zi_blck,zm_blck,wuxy,wvxy,tn_blck, &
- sigma_ped_blck,sigma_hall_blck,te_blck,ti_blck, &
- o2mmr_blck,o1mmr_blck,h1mmr_blck,o2pmmr_blck,nopmmr_blck,n2pmmr_blck, &
- opmmr_blck,opmmrtm1_blck,ui_blck,vi_blck,wi_blck, &
- rmassO2,rmassO1,rmassH,rmassN2,rmassO2p,rmassNOp,rmassN2p, rmassOp, &
- ifirstxy,ilastxy, jfirstxy,jlastxy)
- endif
-
- call t_stopf ('d_pie_coupling')
-
- !
- !----------------------------------------
- ! Put data back in to state%q or pbuf
- !----------------------------------------
- if (ixop > 0) then
- tracer(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km,ixop) = opmmr_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,1:km)
- endif
-
- ! blocks --> physics chunks
-
- blcks2phys_local: if (local_dp_map) then
-
- chnk_loop1 : do lchnk = begchunk,endchunk
- ncol = phys_state(lchnk)%ncol
- call get_lon_all_p(lchnk, ncol, lons)
- call get_lat_all_p(lchnk, ncol, lats)
-
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
-
- call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
- call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
- call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
- if (sIndxOp > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
- start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
- endif
- do k=1,km
- do i=1,ncol
- ic = lons(i)
- jc = lats(i)
- ui_phys(i,k) = ui_blck(ic,jc,k)
- vi_phys(i,k) = vi_blck(ic,jc,k)
- wi_phys(i,k) = wi_blck(ic,jc,k)
- if (sIndxOp > 0) mmrPOp_phys(i,k) = opmmr_blck(ic,jc,k)
- end do
- end do
-
- if (ionos_edyn_active) then
- call outfld ( 'UI', ui_phys, pcols, lchnk )
- call outfld ( 'VI', vi_phys, pcols, lchnk )
- call outfld ( 'WI', wi_phys, pcols, lchnk )
- if (write_inithist()) then
- call outfld ( 'UI&IC', ui_phys, pcols, lchnk )
- call outfld ( 'VI&IC', vi_phys, pcols, lchnk )
- call outfld ( 'WI&IC', wi_phys, pcols, lchnk )
- endif
- endif
-
- end do chnk_loop1
-
- else ! blcks2phys_local
-
- if (sIndxOp > 0) then
- tsize = 4 ! for ui,vi,wi,op
- else
- tsize = 3 ! for ui,vi,wi
- endif
- tsize=tsize+1
-
- blksiz = (jlastxy-jfirstxy+1)*(ilastxy-ifirstxy+1)
- allocate( bpter(blksiz,0:km),stat=astat )
- allocate( bbuffer(tsize*block_buf_nrecs),stat=astat )
- allocate( cbuffer(tsize*chunk_buf_nrecs),stat=astat )
-
- if (iam < grid%npes_xy) then
- call block_to_chunk_send_pters(iam+1,blksiz,km+1,tsize,bpter)
- endif
-
- do j=jfirstxy,jlastxy
- do i=ifirstxy,ilastxy
- ib = (j-jfirstxy)*(ilastxy-ifirstxy+1) + (i-ifirstxy+1)
-
- do k=1,km
-
- bbuffer(bpter(ib,k)) = ui_blck(i,j,k)
- bbuffer(bpter(ib,k)+1) = vi_blck(i,j,k)
- bbuffer(bpter(ib,k)+2) = wi_blck(i,j,k)
- if (sIndxOp > 0) bbuffer(bpter(ib,k)+3) = opmmr_blck(i,j,k)
-
- end do
- end do
- end do
-
- call t_barrierf('sync_ionos_blk_to_chk', grid%commxy)
- call t_startf ('ionos_block_to_chunk')
- call transpose_block_to_chunk(tsize, bbuffer, cbuffer)
- call t_stopf ('ionos_block_to_chunk')
-
- chnk_loop2: do lchnk = begchunk,endchunk
- ncol = phys_state(lchnk)%ncol
-
- pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
-
- call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
- call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
- call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
- if (sIndxOp > 0) then
- call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
- start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
- endif
-
- call block_to_chunk_recv_pters(lchnk,pcols,pver+1,tsize,cpter)
-
- do i=1,ncol
-
- do k=1,km
- ui_phys(i,k) = cbuffer(cpter(i,k))
- vi_phys(i,k) = cbuffer(cpter(i,k)+1)
- wi_phys(i,k) = cbuffer(cpter(i,k)+2)
- if (sIndxOp > 0) then
- mmrPOp_phys(i,k) = cbuffer(cpter(i,k)+3)
- endif
- end do ! k=1,km
- end do ! i=1,ncol
-
- if (ionos_edyn_active) then
- call outfld ( 'UI', ui_phys, pcols, lchnk )
- call outfld ( 'VI', vi_phys, pcols, lchnk )
- call outfld ( 'WI', wi_phys, pcols, lchnk )
- if (write_inithist()) then
- call outfld ( 'UI&IC', ui_phys, pcols, lchnk )
- call outfld ( 'VI&IC', vi_phys, pcols, lchnk )
- call outfld ( 'WI&IC', wi_phys, pcols, lchnk )
- endif
- endif
-
- end do chnk_loop2
-
- deallocate(bpter)
- deallocate(bbuffer)
- deallocate(cbuffer)
-
- endif blcks2phys_local
-
- if (sIndxOp>0) then
- deallocate(opmmr_blck)
- nullify(opmmr_blck)
- endif
- if (sIndxO2p>0) then
- deallocate(o2pmmr_blck)
- nullify(o2pmmr_blck)
- endif
- if (sIndxNOp>0) then
- deallocate(nopmmr_blck)
- nullify(nopmmr_blck)
- endif
- if (sIndxN2p>0) then
- deallocate(n2pmmr_blck)
- nullify(n2pmmr_blck)
- endif
-
- deallocate( wuxy )
- deallocate( wvxy )
- deallocate( sigma_ped_blck )
- deallocate( sigma_hall_blck )
- deallocate( ti_blck )
- deallocate( te_blck )
- deallocate( zi_blck )
- deallocate( ui_blck )
- deallocate( vi_blck )
- deallocate( wi_blck )
- deallocate( omega_blck )
- deallocate( tn_blck )
-
- endif ionos_cpl
-
- end subroutine ionosphere_run2
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_init_restart(File)
- use pio, only: file_desc_t, pio_double, pio_def_var
- use cam_pio_utils, only: cam_pio_def_dim
- use dyn_grid, only: get_horiz_grid_dim_d
-
- type(File_desc_t), intent(inout) :: File
-
- integer :: ierr,hdim1,hdim2, dimids(3)
-
- call get_horiz_grid_dim_d(hdim1, hdim2)
-
- call cam_pio_def_dim(File, 'lon', hdim1, dimids(1), existOK=.true.)
- call cam_pio_def_dim(File, 'lat', hdim2, dimids(2), existOK=.true.)
- call cam_pio_def_dim(File, 'lev', pver, dimids(3), existOK=.true.)
-
- if (ionos_xport_active) then
- ierr = PIO_Def_Var(File, 'Optm1', pio_double, dimids, Optm1_vdesc)
- endif
- end subroutine ionosphere_init_restart
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_write_restart(File)
- use pio, only: io_desc_t, file_desc_t, pio_write_darray, pio_initdecomp, pio_double
- use cam_pio_utils, only: pio_subsystem
- use dyn_grid, only: get_horiz_grid_dim_d
-
- type(File_desc_t), intent(inout) :: File
-
- type(io_desc_t) :: iodesc3d
- integer :: hdim1, hdim2
- integer, pointer :: ldof(:)
- integer :: ierr
-
- if (ionos_xport_active) then
- call get_horiz_grid_dim_d(hdim1, hdim2)
- ldof => get_restart_decomp(hdim1, hdim2, pver)
- call pio_initdecomp(pio_subsystem, pio_double, (/hdim1, hdim2, pver/), ldof, iodesc3d)
- deallocate(ldof)
-
- call pio_write_darray(File, Optm1_vdesc, iodesc3d, opmmrtm1_blck, ierr)
- endif
-
- end subroutine ionosphere_write_restart
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_read_restart(File)
- use pio, only: io_desc_t, file_desc_t, pio_inq_varid, pio_read_darray, pio_initdecomp, pio_double
- use cam_pio_utils, only: pio_subsystem
- use dyn_grid, only: get_horiz_grid_dim_d
-
- type(file_desc_t), intent(inout) :: File
-
- integer :: ierr
- type(io_desc_t) :: iodesc3d
- integer :: hdim1, hdim2
- integer, pointer :: ldof(:)
-
- if (ionos_xport_active) then
- call ionosphere_alloc
-
- call get_horiz_grid_dim_d(hdim1, hdim2)
- ldof => get_restart_decomp(hdim1, hdim2, pver)
- call pio_initdecomp(pio_subsystem, pio_double, (/hdim1, hdim2, pver/), ldof, iodesc3d)
- deallocate(ldof)
-
- ierr = pio_inq_varid(File, 'Optm1', Optm1_vdesc)
- call pio_read_darray(File, Optm1_vdesc, iodesc3d, opmmrtm1_blck, ierr)
- endif
-
- end subroutine ionosphere_read_restart
-
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_final
-
-#ifdef WACCMX_EDYN_ESMF
- use edyn_esmf, only: edyn_esmf_final
-
- call edyn_esmf_final()
-#endif
-
- if (allocated(opmmrtm1_blck)) deallocate(opmmrtm1_blck)
-
- end subroutine ionosphere_final
-
-!=========================================================================================
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_read_ic()
-
- use pio, only: file_desc_t
- use ncdio_atm, only: infld
- use cam_initfiles, only: initial_file_get_id
-
- type(file_desc_t), pointer :: fh_ini ! PIO filehandle
-
- type (t_fvdycore_grid), pointer :: grid
- integer :: ifirstxy,ilastxy,jfirstxy,jlastxy,km
- logical :: readvar
-
- if ( ionos_xport_active ) then
- call ionosphere_alloc()
-
- fh_ini => initial_file_get_id()
- grid => get_dyn_state_grid()
- ifirstxy = grid%ifirstxy
- ilastxy = grid%ilastxy
- jfirstxy = grid%jfirstxy
- jlastxy = grid%jlastxy
- km = grid%km
-
- ! try reading in OpTM1 from the IC file
- call infld('OpTM1', fh_ini, 'lon', 'lat', 'lev', ifirstxy, ilastxy, jfirstxy, jlastxy, &
- 1, km, opmmrtm1_blck, readvar, gridname='fv_centers')
-
- if (.not.readvar) then
- ! if OpTM1 is not included in the IC file then try using O+
- call infld('Op', fh_ini, 'lon', 'lat', 'lev', ifirstxy, ilastxy, jfirstxy, jlastxy, &
- 1, km, opmmrtm1_blck, readvar, gridname='fv_centers')
- endif
- endif
-
- end subroutine ionosphere_read_ic
+ use shr_kind_mod, only: r8 => shr_kind_r8, cl=>shr_kind_cl
+ use cam_abortutils, only: endrun
+ use ppgrid, only: begchunk, endchunk, pcols, pver
+ use phys_grid, only: get_ncols_p
+
+ use dpie_coupling, only: d_pie_init
+ use short_lived_species, only: slvd_index, slvd_pbf_ndx => pbf_idx ! Routines to access short lived species
+
+ use chem_mods, only: adv_mass ! Array holding mass values for short lived species
+ use mo_chem_utls, only: get_spc_ndx ! Routine to get index of adv_mass array for short lived species
+ use physics_buffer, only: pbuf_get_chunk, pbuf_get_field
+ use physics_buffer, only: pbuf_get_index
+
+ use constituents, only: cnst_get_ind, cnst_mw
+ use physconst, only: gravit
+ use oplus, only: oplus_init
+ use edyn_init, only: edynamo_init
+ use pio, only: var_desc_t
+ use perf_mod, only: t_startf, t_stopf
+ use epotential_params, only: epot_active, epot_crit_colats
+ implicit none
+
+ private
+
+ public :: ionosphere_readnl
+ public :: ionosphere_init
+ public :: ionosphere_run1
+ public :: ionosphere_run2
+ public :: ionosphere_init_restart
+ public :: ionosphere_write_restart
+ public :: ionosphere_read_restart
+ public :: ionosphere_final
+
+ ! private data
+
+ ! opmmrtm1_phys is O+ at previous time step (phys grid decomposed)
+ ! It needs to persist from time-step to time-step and across restarts
+ ! On physics grid
+ real(r8), allocatable :: opmmrtm1_phys(:,:,:)
+ type(var_desc_t) :: Optm1_vdesc
+ logical :: opmmrtm1_initialized
+
+ integer :: index_ped, index_hall, index_te, index_ti
+ integer :: index_ui, index_vi, index_wi
+
+ integer :: ixo2=-1, ixo=-1, ixh=-1
+ integer :: ixo2p=-1, ixnop=-1, ixn2p=-1, ixop=-1
+
+ ! indices for accessing ions in pbuf when non-advected
+ integer :: sIndxOp=-1, sIndxO2p=-1, sIndxNOp=-1, sIndxN2p=-1
+
+ real(r8) :: rmassO2 ! O2 molecular weight kg/kmol
+ real(r8) :: rmassO1 ! O atomic weight kg/kmol
+ real(r8) :: rmassH ! H atomic weight kg/kmol
+ real(r8) :: rmassN2 ! N2 molecular weight kg/kmol
+ real(r8) :: rmassO2p ! O2+ molecular weight kg/kmol
+ real(r8) :: rmassNOp ! NO+ molecular weight kg/kmol
+ real(r8) :: rmassN2p ! N2+ molecular weight kg/kmol
+ real(r8) :: rmassOp ! O+ molecular weight kg/kmol
+
+ ! ionos_edyn_active == .true. will activate the edynamo which will
+ ! generate ion drift velocities used in oplus transport, otherwise
+ ! empirical ion drifts calculated in exbdrift (physics) will be used.
+ logical, public, protected :: ionos_edyn_active = .true.
+ logical, protected :: ionos_xport_active = .true. ! if true, call d_pie_coupling
+ !
+ logical, public, protected :: ionos_oplus_xport = .true. ! if true, call sub oplus (based on tiegcm oplus.F)
+ integer, public, protected :: ionos_xport_nsplit = 5 ! number of substeps for O+ transport per model time step
+ logical, public, protected :: oplus_ring_polar_filter = .false. ! switch to apply ring polar filter
+
+ real(r8) :: oplus_adiff_limiter = 1.5e+8_r8 ! limiter for ambipolar diffusion coefficient
+ real(r8) :: oplus_shapiro_const = 0.03_r8 ! shapiro constant for spatial smoother
+ logical :: oplus_enforce_floor = .true. ! switch to apply Stan's floor
+
+ integer, parameter :: max_num_files = 20
+ character(len=cl) :: wei05_coefs_file = 'NONE' !'wei05sc.nc'
+ character(len=cl) :: amienh_files(max_num_files) = 'NONE'
+ character(len=cl) :: amiesh_files(max_num_files) = 'NONE'
+ character(len=cl) :: ltr_files(max_num_files) = 'NONE'
+
+
+ character(len=16) :: ionos_epotential_model = 'none'
+ logical :: ionos_epotential_amie = .false.
+ logical :: ionos_epotential_ltr = .false.
+ integer :: indxefx=-1, indxkev=-1
+
+ integer :: oplus_nlon, oplus_nlat ! Oplus grid
+ integer :: ionos_npes = -1
+
+ logical :: state_debug_checks = .false.
+
+ integer :: mag_nlon=0, mag_nlat=0, mag_nlev=0, mag_ngrid=0
+
+ contains
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_readnl( nlfile )
+
+ use namelist_utils, only: find_group_name
+ use units, only: getunit, freeunit
+ use spmd_utils, only: masterproc, mpicom, masterprocid
+ use spmd_utils, only: mpi_real8, mpi_logical, mpi_integer, mpi_character
+ use cam_logfile, only: iulog
+
+ character(len=*), intent(in) :: nlfile ! filepath for file containing namelist input
+
+ ! Local variables
+ integer :: unitn, ierr, ipos
+ integer :: oplus_grid(2)
+ character(len=8) :: edyn_grid
+ integer :: total_pes
+ character(len=*), parameter :: subname = 'ionosphere_readnl'
+
+ namelist /ionosphere_nl/ ionos_xport_active, ionos_edyn_active, ionos_oplus_xport, ionos_xport_nsplit
+ namelist /ionosphere_nl/ oplus_adiff_limiter, oplus_shapiro_const, oplus_enforce_floor, oplus_ring_polar_filter
+ namelist /ionosphere_nl/ ionos_epotential_model, ionos_epotential_amie, ionos_epotential_ltr, wei05_coefs_file
+ namelist /ionosphere_nl/ amienh_files, amiesh_files, wei05_coefs_file, ltr_files
+ namelist /ionosphere_nl/ epot_crit_colats
+ namelist /ionosphere_nl/ ionos_npes
+ namelist /ionosphere_nl/ oplus_grid, edyn_grid
+
+ oplus_grid = 0
+
+ ! Read namelist
+ if (masterproc) then
+ unitn = getunit()
+ open( unitn, file=trim(nlfile), status='old' )
+ call find_group_name(unitn, 'ionosphere_nl', status=ierr)
+ if (ierr == 0) then
+ read(unitn, ionosphere_nl, iostat=ierr)
+ if (ierr /= 0) then
+ call endrun(subname // ':: ERROR reading namelist')
+ end if
+ end if
+ close(unitn)
+ call freeunit(unitn)
+ end if
+
+ ! Broadcast namelist variables
+ call mpi_bcast(ionos_xport_active, 1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_edyn_active, 1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_oplus_xport, 1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_xport_nsplit, 1, mpi_integer, masterprocid, mpicom, ierr)
+ call mpi_bcast(oplus_adiff_limiter, 1, mpi_real8, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_epotential_model, len(ionos_epotential_model), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_epotential_amie,1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_epotential_ltr,1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(wei05_coefs_file, len(wei05_coefs_file), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(amienh_files, max_num_files*len(amienh_files(1)), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(amiesh_files, max_num_files*len(amiesh_files(1)), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(ltr_files, max_num_files*len(ltr_files(1)), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(oplus_shapiro_const, 1, mpi_real8, masterprocid, mpicom, ierr)
+ call mpi_bcast(oplus_enforce_floor, 1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(oplus_ring_polar_filter,1, mpi_logical, masterprocid, mpicom, ierr)
+ call mpi_bcast(epot_crit_colats, 2, mpi_real8, masterprocid, mpicom, ierr)
+ call mpi_bcast(ionos_npes, 1, mpi_integer, masterprocid, mpicom, ierr)
+ call mpi_bcast(oplus_grid, 2, mpi_integer, masterprocid, mpicom, ierr)
+ call mpi_bcast(edyn_grid, 8, mpi_character, masterprocid, mpicom, ierr)
+
+ ! Extract grid settings
+ oplus_nlon = oplus_grid(1)
+ oplus_nlat = oplus_grid(2)
+
+ ipos = scan(edyn_grid,'x')
+ read(edyn_grid(:ipos-1),*) mag_nlon
+ read(edyn_grid(ipos+1:),*) mag_nlat
+
+ mag_nlev = 5 + int(log(real(mag_nlon,r8)/80._r8)/log(2._r8))
+ mag_ngrid = (mag_nlon/10)*2
+
+ ! Set npes in case of default settings
+ call mpi_comm_size(mpicom, total_pes, ierr)
+ if (ionos_npes<1) then
+ ionos_npes = total_pes
+ else if (ionos_npes>total_pes) then
+ call endrun('ionosphere_readnl: ionos_npes > total_pes')
+ end if
+
+ ! log the user settings
+ if (masterproc) then
+ write(iulog,*) 'ionosphere_readnl: ionos_xport_active = ', ionos_xport_active
+ write(iulog,*) 'ionosphere_readnl: ionos_edyn_active = ', ionos_edyn_active
+ write(iulog,*) 'ionosphere_readnl: ionos_oplus_xport = ', ionos_oplus_xport
+ write(iulog,*) 'ionosphere_readnl: ionos_xport_nsplit = ', ionos_xport_nsplit
+ write(iulog,*) 'ionosphere_readnl: ionos_epotential_model = ', trim(ionos_epotential_model)
+ write(iulog,*) 'ionosphere_readnl: ionos_epotential_amie = ', ionos_epotential_amie
+ write(iulog,*) 'ionosphere_readnl: ionos_epotential_ltr = ', ionos_epotential_ltr
+ write(iulog,'(a,2(g12.4))') &
+ 'ionosphere_readnl: epot_crit_colats = ', epot_crit_colats
+ write(iulog,'(a,i0)') 'ionosphere_readnl: ionos_npes = ',ionos_npes
+ write(iulog,*) 'ionosphere_readnl: oplus_adiff_limiter = ', oplus_adiff_limiter
+ write(iulog,*) 'ionosphere_readnl: oplus_shapiro_const = ', oplus_shapiro_const
+ write(iulog,*) 'ionosphere_readnl: oplus_enforce_floor = ', oplus_enforce_floor
+ write(iulog,*) 'ionosphere_readnl: oplus_ring_polar_filter= ', oplus_ring_polar_filter
+ if (ionos_xport_active) then
+ write(iulog,'(a,i0)') 'ionosphere_readnl: oplus_nlon = ',oplus_nlon
+ write(iulog,'(a,i0)') 'ionosphere_readnl: oplus_nlat = ',oplus_nlat
+ write(iulog,'(a,i0)') 'ionosphere_readnl: edyn_grid = '//edyn_grid
+ write(iulog,'(a,i0)') 'ionosphere_readnl: mag_nlon = ',mag_nlon
+ write(iulog,'(a,i0)') 'ionosphere_readnl: mag_nlat = ',mag_nlat
+ write(iulog,'(a,i0)') 'ionosphere_readnl: mag_nlev = ',mag_nlev
+ write(iulog,'(a,i0)') 'ionosphere_readnl: mag_ngrid = ',mag_ngrid
+ end if
+ end if
+ epot_active = .true.
+
+ end subroutine ionosphere_readnl
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_init()
+ use spmd_utils, only: mpicom, iam
+ use physics_buffer, only: pbuf_add_field, dtype_r8
+ use cam_control_mod, only: initial_run
+ use cam_history, only: addfld, add_default, horiz_only
+ use edyn_mpi, only: mp_init
+ use edyn_geogrid, only: set_geogrid
+ use edyn_maggrid, only: alloc_maggrid
+ use mo_apex, only: mo_apex_init1
+ ! Hybrid level definitions:
+ use ref_pres, only: pref_mid ! target alev(pver) midpoint levels
+ use ref_pres, only: pref_edge ! target ailev(pverp) interface levels
+ use amie_module, only: init_amie
+ use ltr_module, only: init_ltr
+ use wei05sc, only: weimer05_init
+ use phys_control, only: phys_getopts
+
+ ! local variables:
+ integer :: sIndx
+ character(len=*), parameter :: subname = 'ionosphere_init'
+
+ call phys_getopts(state_debug_checks_out=state_debug_checks)
+
+ if ( ionos_epotential_amie .or. ionos_epotential_ltr) then
+ call pbuf_add_field('AUREFX', 'global', dtype_r8, (/pcols/), indxefx) ! Prescribed Energy flux
+ call pbuf_add_field('AURKEV', 'global', dtype_r8, (/pcols/), indxkev) ! Prescribed Mean energy
+ end if
+ if (initial_run) then
+ ! Read initial conditions (O+) on physics grid
+ call ionosphere_read_ic()
+ end if
+
+ op_transport: if (ionos_xport_active) then
+
+ index_ped = pbuf_get_index('PedConduct')
+ index_hall = pbuf_get_index('HallConduct')
+
+ index_te = pbuf_get_index('TElec')
+ index_ti = pbuf_get_index('TIon')
+ !
+ ! pbuf indices to empirical ion drifts, to be passed to oplus_xport,
+ ! if ionos_edyn_active is false.
+ !
+ index_ui = pbuf_get_index('UI')
+ index_vi = pbuf_get_index('VI')
+ index_wi = pbuf_get_index('WI')
+
+ !---------------------------------------------------------------------
+ ! Get indices for neutrals to get mixing ratios from state%q and masses
+ !---------------------------------------------------------------------
+ call cnst_get_ind('O2' ,ixo2 )
+ call cnst_get_ind('O' ,ixo )
+ call cnst_get_ind('H' ,ixh )
+ !------------------------------------
+ ! Get neutral molecular weights
+ !------------------------------------
+ rmassO2 = cnst_mw(ixo2)
+ rmassO1 = cnst_mw(ixo)
+ rmassH = cnst_mw(ixh)
+ rmassN2 = 28._r8
+
+ call cnst_get_ind('Op',ixop, abort=.false.)
+ if (ixop > 0) then
+ rMassOp = cnst_mw(ixop)
+ else
+ sIndxOp = slvd_index( 'Op' )
+ if (sIndxOp > 0) then
+ sIndx = get_spc_ndx( 'Op' )
+ rmassOp = adv_mass(sIndx)
+ else
+ call endrun(subname//': Cannot find state or pbuf index for Op')
+ end if
+ end if
+
+ call cnst_get_ind('O2p',ixo2p, abort=.false.)
+ if (ixo2p > 0) then
+ rMassO2p = cnst_mw(ixo2p)
+ else
+ sIndxO2p = slvd_index( 'O2p' )
+ if (sIndxO2p > 0) then
+ sIndx = get_spc_ndx( 'O2p' )
+ rmassO2p = adv_mass(sIndx)
+ else
+ call endrun(subname//': Cannot find state or pbuf index for O2p')
+ end if
+ end if
+
+ call cnst_get_ind('NOp',ixnop, abort=.false.)
+ if (ixnop > 0) then
+ rMassNOp = cnst_mw(ixnop)
+ else
+ sIndxNOp = slvd_index( 'NOp' )
+ if (sIndxNOp > 0) then
+ sIndx = get_spc_ndx( 'NOp' )
+ rmassNOp = adv_mass(sIndx)
+ else
+ call endrun(subname//': Cannot find state or pbuf index for NOp')
+ end if
+ end if
+
+ call cnst_get_ind('N2p',ixn2p, abort=.false.)
+ if (ixn2p > 0) then
+ rMassN2p = cnst_mw(ixn2p)
+ else
+ sIndxN2p = slvd_index( 'N2p' )
+ if (sIndxN2p > 0) then
+ sIndx = get_spc_ndx( 'N2p' )
+ rmassN2p = adv_mass(sIndx)
+ else
+ call endrun(subname//': Cannot find state or pbuf index for N2p')
+ end if
+ end if
+
+ call d_pie_init(ionos_edyn_active, ionos_oplus_xport, &
+ ionos_xport_nsplit, epot_crit_colats)
+
+ call alloc_maggrid( mag_nlon, mag_nlat, mag_nlev, mag_ngrid )
+
+ call mp_init(mpicom, ionos_npes, oplus_nlon, oplus_nlat, pver) ! set ntask,mytid
+ ! set global geographic grid (sets coordinate distribution)
+ ! lon0, lon1, etc. are set here
+ call set_geogrid(oplus_nlon, oplus_nlat, pver, ionos_npes, iam, &
+ pref_mid, pref_edge)
+
+ call edynamo_init(mpicom)
+
+ call ionosphere_alloc()
+
+ call oplus_init(oplus_adiff_limiter, oplus_shapiro_const, &
+ oplus_enforce_floor, oplus_ring_polar_filter)
+
+ call addfld('OpTM1&IC', (/ 'lev' /), 'I', 'kg/kg', &
+ 'O+ at time step minus 1', gridname='physgrid')
+ call add_default ('OpTM1&IC',0, 'I')
+
+ end if op_transport
+
+ ! This has to be after edynamo_init (where maggrid is initialized)
+ call mo_apex_init1()
+
+ if (ionos_edyn_active) then
+ call addfld ('UI',(/ 'lev' /),'I','m/s', 'UI Zonal ion drift from edynamo')
+ call addfld ('VI',(/ 'lev' /),'I','m/s', 'VI Meridional ion drift from edynamo')
+ call addfld ('WI',(/ 'lev' /),'I','m/s', 'WI Vertical ion drift from edynamo')
+ call addfld ('UI&IC', (/ 'lev' /), 'I','m/s', 'Zonal ion drift velocity')
+ call addfld ('VI&IC', (/ 'lev' /), 'I','m/s', 'Meridional ion drift velocity')
+ call addfld ('WI&IC', (/ 'lev' /), 'I','m/s', 'Vertical ion drift velocity')
+ call add_default ('UI&IC', 0, ' ')
+ call add_default ('VI&IC', 0, ' ')
+ call add_default ('WI&IC', 0, ' ')
+ end if
+ if ( ionos_epotential_amie ) then
+ call init_amie(amienh_files,amiesh_files)
+ call addfld ('amie_efx_phys', horiz_only, 'I', 'mW/m2', 'AMIE energy flux')
+ call addfld ('amie_kev_phys', horiz_only, 'I', 'keV', 'AMIE mean energy')
+ end if
+ if ( ionos_epotential_ltr ) then
+ call init_ltr(ltr_files)
+ call addfld ('ltr_efx_phys', horiz_only, 'I', 'mW/m2', 'LTR energy flux')
+ call addfld ('ltr_kev_phys', horiz_only, 'I', 'keV', 'LTR mean energy')
+ end if
+ if ( trim(ionos_epotential_model) == 'weimer' ) then
+ call weimer05_init(wei05_coefs_file)
+ end if
+
+ ! d_pie_coupling diagnostics
+ call addfld ('Z3GM', (/ 'lev' /), 'I', 'm', &
+ 'Geometric height', gridname='physgrid')
+ call addfld ('Z3GMI', (/ 'lev' /), 'I', 'm', &
+ 'Geometric height (Interfaces)', gridname='physgrid')
+
+ end subroutine ionosphere_init
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
- subroutine ionosphere_alloc
-
- type(T_FVDYCORE_GRID),pointer :: grid ! FV Dynamics grid
- integer :: ifirstxy, ilastxy, jfirstxy, jlastxy, km
- integer :: astat
-
- if (.not. allocated(opmmrtm1_blck)) then
-
- grid => get_dyn_state_grid()
- ifirstxy = grid%ifirstxy
- ilastxy = grid%ilastxy
- jfirstxy = grid%jfirstxy
- jlastxy = grid%jlastxy
- km = grid%km
+ !----------------------------------------------------------------------------
+ !----------------------------------------------------------------------------
+ subroutine ionosphere_run1(pbuf2d)
+ use physics_buffer, only: physics_buffer_desc
+ use cam_history, only: outfld, write_inithist
+ ! Gridded component call
+ use edyn_grid_comp, only: edyn_grid_comp_run1
- allocate(opmmrtm1_blck(ifirstxy:ilastxy,jfirstxy:jlastxy,km),stat=astat)
- if (astat /= 0) call endrun('ionosphere_init: failed to allocate opmmrtm1_blck')
- opmmrtm1_blck = 0._r8
+ ! args
+ type(physics_buffer_desc), pointer :: pbuf2d(:,:)
- endif
+ ! local vars
+ integer :: i, j, lchnk, blksize ! indices
+ type(physics_buffer_desc), pointer :: pbuf_chnk(:)
- end subroutine ionosphere_alloc
+ real(r8), pointer :: pbuf_efx(:) ! Pointer to prescribed energy flux in pbuf
+ real(r8), pointer :: pbuf_kev(:) ! Pointer to prescribed mean energy in pbuf
+ integer :: ncol
+ real(r8), pointer :: prescr_efx(:) ! prescribed energy flux
+ real(r8), pointer :: prescr_kev(:) ! prescribed characteristic mean energy
- !--------------------------------------------------------------------------------
- !--------------------------------------------------------------------------------
-function get_restart_decomp(hdim1, hdim2, nlev) result(ldof)
- use dyn_grid, only: get_dyn_grid_parm
+ if( write_inithist() .and. ionos_xport_active ) then
+ do lchnk = begchunk, endchunk
+ call outfld ('OpTM1&IC', opmmrtm1_phys(:,:,lchnk), pcols, lchnk)
+ end do
+ end if
+
+ nullify(prescr_efx)
+ nullify(prescr_kev)
+ prescribed_epot: if ( ionos_epotential_amie .or. ionos_epotential_ltr ) then
+ blksize = 0
+ do lchnk = begchunk, endchunk
+ blksize = blksize + get_ncols_p(lchnk)
+ end do
- ! Get the integer mapping of a variable in the dynamics decomp in memory.
- ! The canonical ordering is as on the file. A 0 value indicates that the
- ! variable is not on the file (eg halo or boundary values)
+ allocate(prescr_efx(blksize))
+ allocate(prescr_kev(blksize))
+
+ ! data assimilated potential
+ call edyn_grid_comp_run1(ionos_epotential_model, &
+ cols=1, cole=blksize, efx_phys=prescr_efx, kev_phys=prescr_kev, &
+ amie_in=ionos_epotential_amie, ltr_in=ionos_epotential_ltr )
+
+ ! transform to pbuf for aurora...
+
+ j = 0
+ chnk_loop1: do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
+ call pbuf_get_field(pbuf_chnk, indxefx, pbuf_efx)
+ call pbuf_get_field(pbuf_chnk, indxkev, pbuf_kev)
+
+ do i = 1, ncol
+ j = j + 1
+ pbuf_efx(i) = prescr_efx(j)
+ pbuf_kev(i) = prescr_kev(j)
+ end do
+
+ if ( ionos_epotential_amie ) then
+ call outfld('amie_efx_phys', pbuf_efx, pcols, lchnk)
+ call outfld('amie_kev_phys', pbuf_kev, pcols, lchnk)
+ endif
+ if ( ionos_epotential_ltr) then
+ call outfld('ltr_efx_phys', pbuf_efx, pcols, lchnk )
+ call outfld('ltr_kev_phys', pbuf_kev, pcols, lchnk )
+ end if
+ end do chnk_loop1
+
+ deallocate(prescr_efx, prescr_kev)
+ nullify(prescr_efx)
+ nullify(prescr_kev)
+
+ else
+
+ ! set cross tail potential before physics --
+ ! aurora uses weimer derived potential
+ call edyn_grid_comp_run1(ionos_epotential_model)
+
+ end if prescribed_epot
+
+ end subroutine ionosphere_run1
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_run2(phys_state, pbuf2d)
+
+ use physics_types, only: physics_state
+ use physics_buffer, only: physics_buffer_desc
+ use cam_history, only: outfld, write_inithist, hist_fld_active
+ ! Gridded component call
+ use edyn_grid_comp, only: edyn_grid_comp_run2
+ use shr_assert_mod, only: shr_assert_in_domain
+ use shr_const_mod, only: SHR_CONST_REARTH ! meters
+
+ ! - pull some fields from pbuf and dyn_in
+ ! - invoke ionosphere/electro-dynamics coupling
+ ! - push some fields back to physics via pbuf...
+
+ ! args
+ type(physics_state), intent(inout) :: phys_state(begchunk:endchunk)
+ type(physics_buffer_desc), pointer :: pbuf2d(:,:)
+
+ ! local vars
+ integer :: i,j,k, lchnk
+ integer :: astat
+
+ type(physics_buffer_desc), pointer :: pbuf_chnk(:)
+
+ real(r8), pointer :: sigma_ped_phys(:,:) ! Pedersen Conductivity from pbuf
+ real(r8), pointer :: sigma_hall_phys(:,:) ! Hall Conductivity from pbuf
+ real(r8), pointer :: te_phys(:,:) ! te from pbuf
+ real(r8), pointer :: ti_phys(:,:) ! ti from pbuf
+ real(r8), pointer :: mmrPO2p_phys(:,:) ! O2+ from pbuf
+ real(r8), pointer :: mmrPNOp_phys(:,:) ! NO+ from pbuf
+ real(r8), pointer :: mmrPN2p_phys(:,:) ! N2+ from pbuf
+ real(r8), pointer :: mmrPOp_phys(:,:) ! O+ from pbuf
+ !
+ ! Empirical ion drifts from exbdrift (to be converted to blocked for dpie_coupling):
+ real(r8), pointer :: ui_phys(:,:) ! zonal ion drift from pbuf
+ real(r8), pointer :: vi_phys(:,:) ! meridional ion drift from pbuf
+ real(r8), pointer :: wi_phys(:,:) ! vertical ion drift from pbuf
+
+ integer :: ncol
+
+ integer :: blksize ! number of columns in 2D block
+
+ real(r8), pointer :: sigma_ped_blck (:,:)
+ real(r8), pointer :: sigma_hall_blck(:,:)
+ real(r8), pointer :: ti_blck(:,:)
+ real(r8), pointer :: te_blck(:,:)
+ real(r8), pointer :: zi_blck(:,:) ! Geopotential on interfaces
+ real(r8), pointer :: hi_blck(:,:) ! Geometric height on interfaces
+ real(r8), pointer :: ui_blck(:,:)
+ real(r8), pointer :: vi_blck(:,:)
+ real(r8), pointer :: wi_blck(:,:)
+ real(r8), pointer :: omega_blck(:,:)
+ real(r8), pointer :: tn_blck(:,:)
+
+ ! From physics state
+ real(r8), pointer :: u_blck(:,:)
+ real(r8), pointer :: v_blck(:,:)
+ real(r8), pointer :: pmid_blck(:,:)
+ real(r8), pointer :: phis(:) ! surface geopotential
+ ! Constituents
+ real(r8), pointer :: n2mmr_blck(:,:)
+ real(r8), pointer :: o2mmr_blck(:,:)
+ real(r8), pointer :: o1mmr_blck(:,:)
+ real(r8), pointer :: h1mmr_blck(:,:)
+ real(r8), pointer :: o2pmmr_blck(:,:) ! O2+ (blocks)
+ real(r8), pointer :: nopmmr_blck(:,:) ! NO+ (blocks)
+ real(r8), pointer :: n2pmmr_blck(:,:) ! N2+ (blocks)
+ real(r8), pointer :: opmmr_blck(:,:) ! O+ (blocks)
+ real(r8), pointer :: opmmrtm1_blck(:,:) ! O+ previous time step (blocks)
+ real(r8), pointer :: mbar_blck(:,:) ! mean molecular weight
+ ! Temp fields for outfld
+ real(r8) :: r8tmp
+ real(r8), pointer :: tempm(:,:) => null() ! Temp midpoint field for outfld
+ real(r8), pointer :: tempi(:,:) => null() ! Temp interface field for outfld
+ real(r8), parameter :: rearth_inv = 1._r8/SHR_CONST_REARTH ! /meters
+ real(r8), parameter :: n2min = 1.e-6_r8 ! lower limit of N2 mixing ratios
+
+ character(len=*), parameter :: subname = 'ionosphere_run2'
+
+ ionos_cpl: if (ionos_xport_active) then
+
+ blksize = 0
+ do lchnk = begchunk, endchunk
+ blksize = blksize + get_ncols_p(lchnk)
+ end do
- ! arguments
- integer, intent(in) :: hdim1, hdim2, nlev
- integer, pointer :: ldof(:)
+ allocate(phis(pcols), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate phis')
+ end if
+ allocate(u_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate u_blck')
+ end if
+ allocate(v_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate v_blck')
+ end if
+ allocate(sigma_ped_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate sigma_ped_blck')
+ end if
+ allocate(sigma_hall_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate sigma_hall_blck')
+ end if
+ allocate(ti_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate ti_blck')
+ end if
+ allocate(hi_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate hi_blck')
+ end if
+ allocate(te_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate te_blck')
+ end if
+ allocate(zi_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate zi_blck')
+ end if
+ allocate(ui_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate ui_blck')
+ end if
+ allocate(vi_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate vi_blck')
+ end if
+ allocate(wi_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate wi_blck')
+ end if
+ allocate(omega_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate omega_blck')
+ end if
+ allocate(tn_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate tn_blck')
+ end if
+ allocate(n2mmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate n2mmr_blck')
+ end if
+ allocate(o2mmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate o2mmr_blck')
+ end if
+ allocate(o1mmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate o1mmr_blck')
+ end if
+ allocate(h1mmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate h1mmr_blck')
+ end if
+ allocate(mbar_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate mbar_blck')
+ end if
+ allocate(pmid_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate pmid_blck')
+ end if
+
+ allocate(opmmrtm1_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate opmmrtm1_blck')
+ end if
+
+ if (sIndxOp > 0) then
+ allocate(opmmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate opmmr_blck')
+ end if
+ end if
+ if (sIndxO2p > 0) then
+ allocate(o2pmmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate o2pmmr_blck')
+ end if
+ end if
+ if (sIndxNOp > 0) then
+ allocate(nopmmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate nopmmr_blck')
+ end if
+ end if
+ if (sIndxN2p > 0) then
+ allocate(n2pmmr_blck(pver, blksize), stat=astat)
+ if (astat /= 0) then
+ call endrun(subname//': failed to allocate n2pmmr_blck')
+ end if
+ end if
+
+ if (hist_fld_active('Z3GM')) then
+ allocate(tempm(pcols, pver))
+ end if
+
+ if (hist_fld_active('Z3GMI')) then
+ allocate(tempi(pcols, pver))
+ end if
+
+ if (.not.opmmrtm1_initialized) then
+ do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+
+ if (sIndxOp > 0) then
+ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
+ opmmrtm1_phys(:ncol,:pver,lchnk) = mmrPOp_phys(:ncol,:pver)
+ else
+ opmmrtm1_phys(:ncol,:pver,lchnk) = phys_state(lchnk)%q(:ncol,:pver, ixop)
+ endif
+ enddo
+ opmmrtm1_initialized=.true.
+ endif
+
+ j = 0
+ do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
+
+ ! Gather data stored in pbuf and collect into blocked arrays
+ ! Get Pedersen and Hall conductivities:
+ call pbuf_get_field(pbuf_chnk, index_ped, sigma_ped_phys)
+ call pbuf_get_field(pbuf_chnk, index_hall, sigma_hall_phys)
+ ! Get ion and electron temperatures
+ call pbuf_get_field(pbuf_chnk, index_te, te_phys)
+ call pbuf_get_field(pbuf_chnk, index_ti, ti_phys)
+ ! Get components of ion drift velocities
+ call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
+ call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
+ call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
+ !--------------------------------------------------------
+ ! Get ions from physics buffer if non-transported
+ !--------------------------------------------------------
+ if (sIndxO2p > 0) then
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPO2p_phys, &
+ start=(/1,1,sIndxO2p/), kount=(/pcols,pver,1/) )
+ end if
+ if (sIndxNOp > 0) then
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPNOp_phys, &
+ start=(/1,1,sIndxNOp/), kount=(/pcols,pver,1/) )
+ end if
+ if (sIndxN2p > 0) then
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPN2p_phys, &
+ start=(/1,1,sIndxN2p/), kount=(/pcols,pver,1/) )
+ end if
+ if (sIndxOp > 0) then
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
+ start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/) )
+ end if
+
+ ! PHIS is from physics state
+ phis(:ncol) = phys_state(lchnk)%phis(:ncol)
+ do i = 1, ncol
+ j = j + 1
+ do k = 1, pver
+ ! physics state fields on levels
+ u_blck(k, j) = phys_state(lchnk)%u(i, k)
+ v_blck(k, j) = phys_state(lchnk)%v(i, k)
+ !------------------------------------------------------------
+ ! Might need geometric height on midpoints for output
+ !------------------------------------------------------------
+ if (hist_fld_active('Z3GM')) then
+ r8tmp = phys_state(lchnk)%zm(i, k)
+ tempm(i, k) = r8tmp * (1._r8 + (r8tmp * rearth_inv))
+ end if
+ ! physics state fields on interfaces (but only to pver)
+ zi_blck(k, j) = phys_state(lchnk)%zi(i, k) + phis(i)/gravit
+ !------------------------------------------------------------
+ ! Convert geopotential to geometric height at interfaces:
+ !------------------------------------------------------------
+ ! Note: zht is pver instead of pverp because dynamo does not
+ ! use bottom interface
+ hi_blck(k, j) = zi_blck(k, j) * (1._r8 + (zi_blck(k, j) * rearth_inv))
+ if (hist_fld_active('Z3GMI')) then
+ tempi(i, k) = hi_blck(k, j)
+ end if
+ omega_blck(k, j) = phys_state(lchnk)%omega(i, k)
+ tn_blck(k, j) = phys_state(lchnk)%t(i, k)
+ pmid_blck(k, j) = phys_state(lchnk)%pmid(i, k)
+ ! Pedersen and Hall conductivities:
+ sigma_ped_blck(k, j) = sigma_ped_phys(i, k)
+ sigma_hall_blck(k, j) = sigma_hall_phys(i, k)
+ ! ion and electron temperatures
+ te_blck(k, j) = te_phys(i, k)
+ ti_blck(k, j) = ti_phys(i, k)
+ ! components of ion drift velocities
+ ui_blck(k, j) = ui_phys(i, k)
+ vi_blck(k, j) = vi_phys(i, k)
+ wi_blck(k, j) = wi_phys(i, k)
+ !------------------------------------------------------------
+ ! ions from physics state if transported, otherwise from pbuf
+ !------------------------------------------------------------
+ if (ixo2p > 0) then
+ o2pmmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixo2p)
+ else if (sIndxO2p > 0) then
+ o2pmmr_blck(k, j) = mmrPO2p_phys(i, k)
+ else
+ call endrun(subname//': No source for O2p')
+ end if
+ if (ixnop > 0) then
+ nopmmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixnop)
+ else if (sIndxNOp > 0) then
+ nopmmr_blck(k, j) = mmrPNOp_phys(i, k)
+ else
+ call endrun(subname//': No source for NOp')
+ end if
+ if (ixn2p > 0) then
+ n2pmmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixn2p)
+ else if (sIndxN2p > 0) then
+ n2pmmr_blck(k, j) = mmrPN2p_phys(i, k)
+ else
+ call endrun(subname//': No source for N2p')
+ end if
+ if (ixop > 0) then
+ opmmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixop)
+ else if (sIndxOp > 0) then
+ opmmr_blck(k, j) = mmrPOp_phys(i, k)
+ else
+ call endrun(subname//': No source for Op')
+ end if
+ opmmrtm1_blck(k, j) = opmmrtm1_phys(i, k, lchnk)
+ !------------------------------------
+ ! neutrals from advected tracers array
+ !------------------------------------
+ o2mmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixo2)
+ o1mmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixo)
+ h1mmr_blck(k, j) = phys_state(lchnk)%q(i, k, ixh)
+ end do
+ end do ! do i = 1, ncol
+
+ !------------------------------------------------------------------
+ ! Save OMEGA and analytically derived geometric height
+ !------------------------------------------------------------------
+ if (hist_fld_active('Z3GM')) then
+ tempm(ncol+1:, :) = 0.0_r8
+ call outfld('Z3GM', tempm, pcols, lchnk)
+ end if
+ if (hist_fld_active('Z3GMI')) then
+ tempi(ncol+1:, :) = 0.0_r8
+ call outfld('Z3GMI', tempi, pcols, lchnk)
+ end if
+ end do ! do lchnk = begchunk, endchunk
+
+ !---------------------------------------------------------------------
+ ! Compute and save mean molecular weight:
+ !---------------------------------------------------------------------
+ j = 0
+ do lchnk = begchunk, endchunk
+ ncol = get_ncols_p(lchnk)
+ do i = 1, ncol
+ j = j + 1
+ do k = 1, pver
+ r8tmp = o1mmr_blck(k,j) + o2mmr_blck(k,j) + h1mmr_blck(k,j)
+ n2mmr_blck(k, j) = max(1.0_r8 - r8tmp, n2min)
+ r8tmp = o1mmr_blck(k, j) / rmassO1
+ r8tmp = r8tmp + (o2mmr_blck(k, j) / rmassO2)
+ r8tmp = r8tmp + (h1mmr_blck(k, j) / rmassH)
+ r8tmp = r8tmp + (n2mmr_blck(k, j) / rmassN2)
+ mbar_blck(k, j) = 1.0_r8 / r8tmp
+ end do
+ end do
+ end do
- ! local variables
- integer :: i, k, j
- integer :: lcnt
- integer :: beglatxy, beglonxy, endlatxy, endlonxy
- !----------------------------------------------------------------------------
+ call t_startf('d_pie_coupling')
- beglonxy = get_dyn_grid_parm('beglonxy')
- endlonxy = get_dyn_grid_parm('endlonxy')
- beglatxy = get_dyn_grid_parm('beglatxy')
- endlatxy = get_dyn_grid_parm('endlatxy')
-
- lcnt = (endlatxy-beglatxy+1)*nlev*(endlonxy-beglonxy+1)
- allocate(ldof(lcnt))
- ldof(:) = 0
-
- lcnt = 0
- do k = 1, nlev
- do j = beglatxy, endlatxy
- do i = beglonxy, endlonxy
- lcnt = lcnt + 1
- ldof(lcnt) = i + (j-(plat-hdim2+1))*hdim1+(k-1)*hdim1*hdim2
+ ! Compute geometric height and some diagnostic fields needed by
+ ! the dynamo. Output some fields from physics grid
+ ! This code is inside the timer as it is part of the coupling
+!
+ ! waccmx ionosphere electro-dynamics -- transports O+ and
+ ! provides updates to ion drift velocities (on physics grid)
+ ! All fields are on physics mesh, (pver, blksize),
+ ! where blksize is the total number of columns on this task
+ call edyn_grid_comp_run2(omega_blck, pmid_blck, zi_blck, hi_blck, &
+ u_blck, v_blck, tn_blck, sigma_ped_blck, sigma_hall_blck, &
+ te_blck, ti_blck, mbar_blck, n2mmr_blck, o2mmr_blck, o1mmr_blck, &
+ o2pmmr_blck, nopmmr_blck, n2pmmr_blck, &
+ opmmr_blck, opmmrtm1_blck, ui_blck, vi_blck, wi_blck, &
+ rmassO2p, rmassNOp, rmassN2p, rmassOp, 1, blksize, pver)
+
+ call t_stopf ('d_pie_coupling')
+
+ if (state_debug_checks) then
+ call shr_assert_in_domain(ui_blck, is_nan=.false., varname="ui_blck", msg="NaN found in ionosphere_run2")
+ call shr_assert_in_domain(vi_blck, is_nan=.false., varname="vi_blck", msg="NaN found in ionosphere_run2")
+ call shr_assert_in_domain(wi_blck, is_nan=.false., varname="wi_blck", msg="NaN found in ionosphere_run2")
+ call shr_assert_in_domain(opmmr_blck, is_nan=.false., varname="opmmr_blck", msg="NaN found in ionosphere_run2")
+ end if
+
+ !
+ !----------------------------------------
+ ! Put data back in to state or pbuf
+ !----------------------------------------
+ ! blocks --> physics chunks
+
+ j = 0
+ do lchnk = begchunk, endchunk
+ ncol = phys_state(lchnk)%ncol
+ pbuf_chnk => pbuf_get_chunk(pbuf2d, lchnk)
+
+ call pbuf_get_field(pbuf_chnk, index_ui, ui_phys)
+ call pbuf_get_field(pbuf_chnk, index_vi, vi_phys)
+ call pbuf_get_field(pbuf_chnk, index_wi, wi_phys)
+ if (sIndxOp > 0) then
+ call pbuf_get_field(pbuf_chnk, slvd_pbf_ndx, mmrPOp_phys, &
+ start=(/1,1,sIndxOp/), kount=(/pcols,pver,1/))
+ end if
+ do i = 1, ncol
+ j = j + 1
+ do k = 1, pver
+ ui_phys(i, k) = ui_blck(k, j)
+ vi_phys(i, k) = vi_blck(k, j)
+ wi_phys(i, k) = wi_blck(k, j)
+ if (ixop > 0) then
+ phys_state(lchnk)%q(i, k, ixop) = opmmr_blck(k, j)
+ else if (sIndxOp > 0) then
+ mmrPOp_phys(i, k) = opmmr_blck(k, j)
+ else
+ call endrun(subname//': No destination for Op')
+ end if
+ opmmrtm1_phys(i,k,lchnk) = opmmrtm1_blck(k,j)
+ end do
+ end do
+
+ if (ionos_edyn_active) then
+ call outfld('UI', ui_phys, pcols, lchnk)
+ call outfld('VI', vi_phys, pcols, lchnk)
+ call outfld('WI', wi_phys, pcols, lchnk)
+ if (write_inithist()) then
+ call outfld('UI&IC', ui_phys, pcols, lchnk)
+ call outfld('VI&IC', vi_phys, pcols, lchnk)
+ call outfld('WI&IC', wi_phys, pcols, lchnk)
+ end if
+ end if
end do
- end do
- end do
-
-end function get_restart_decomp
-!=========================================================================================
+ if (associated(opmmr_blck)) then
+ deallocate(opmmr_blck)
+ nullify(opmmr_blck)
+ end if
+ if (associated(o2pmmr_blck)) then
+ deallocate(o2pmmr_blck)
+ nullify(o2pmmr_blck)
+ end if
+ if (associated(nopmmr_blck)) then
+ deallocate(nopmmr_blck)
+ nullify(nopmmr_blck)
+ end if
+ if (associated(n2pmmr_blck)) then
+ deallocate(n2pmmr_blck)
+ nullify(n2pmmr_blck)
+ end if
+ if (associated(tempi)) then
+ deallocate(tempi)
+ nullify(tempi)
+ end if
+ if (associated(tempm)) then
+ deallocate(tempm)
+ nullify(tempm)
+ end if
+ deallocate(opmmrtm1_blck)
+ nullify(opmmrtm1_blck)
+ deallocate(phis)
+ nullify(phis)
+ deallocate(u_blck)
+ nullify(u_blck)
+ deallocate(v_blck)
+ nullify(v_blck)
+ deallocate(sigma_ped_blck)
+ nullify(sigma_ped_blck)
+ deallocate(sigma_hall_blck)
+ nullify(sigma_hall_blck)
+ deallocate(ti_blck)
+ nullify(ti_blck)
+ deallocate(hi_blck)
+ nullify(hi_blck)
+ deallocate(te_blck)
+ nullify(te_blck)
+ deallocate(zi_blck)
+ nullify(zi_blck)
+ deallocate(ui_blck)
+ nullify(ui_blck)
+ deallocate(vi_blck)
+ nullify(vi_blck)
+ deallocate(wi_blck)
+ nullify(wi_blck)
+ deallocate(omega_blck)
+ nullify(omega_blck)
+ deallocate(tn_blck)
+ nullify(tn_blck)
+ deallocate(n2mmr_blck)
+ nullify(n2mmr_blck)
+ deallocate(o2mmr_blck)
+ nullify(o2mmr_blck)
+ deallocate(o1mmr_blck)
+ nullify(o1mmr_blck)
+ deallocate(h1mmr_blck)
+ nullify(h1mmr_blck)
+ deallocate(mbar_blck)
+ nullify(mbar_blck)
+ deallocate(pmid_blck)
+ nullify(pmid_blck)
+
+ end if ionos_cpl
+
+ end subroutine ionosphere_run2
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_init_restart(File)
+ use pio, only: file_desc_t, pio_double, pio_def_var
+ use cam_pio_utils, only: cam_pio_def_dim
+ use cam_grid_support, only: cam_grid_id, cam_grid_write_attr
+ use cam_grid_support, only: cam_grid_header_info_t
+
+ type(File_desc_t), intent(inout) :: File
+
+ integer :: grid_id
+ integer :: hdimcnt, ierr, i
+ integer :: dimids(3), ndims
+ type(cam_grid_header_info_t) :: info
+
+ if (ionos_xport_active) then
+ grid_id = cam_grid_id('physgrid')
+ call cam_grid_write_attr(File, grid_id, info)
+ hdimcnt = info%num_hdims()
+ do i = 1, hdimcnt
+ dimids(i) = info%get_hdimid(i)
+ end do
+ ndims = hdimcnt + 1
+
+ call cam_pio_def_dim(File, 'lev', pver, dimids(ndims), &
+ existOK=.true.)
+
+ ierr = pio_def_var(File, 'Optm1', pio_double, dimids(1:ndims), &
+ Optm1_vdesc)
+ end if
+ end subroutine ionosphere_init_restart
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_write_restart(File)
+ use pio, only: io_desc_t, file_desc_t, pio_write_darray
+ use pio, only: pio_double
+ use cam_grid_support, only: cam_grid_id, cam_grid_write_var
+ use cam_grid_support, only: cam_grid_get_decomp, cam_grid_dimensions
+ use phys_grid, only: phys_decomp
+
+ type(file_desc_t), intent(inout) :: File
+
+ integer :: ierr
+ integer :: physgrid
+ integer :: dims(3), gdims(3)
+ integer :: nhdims
+ type(io_desc_t), pointer :: iodesc3d
+
+ if (ionos_xport_active) then
+
+ ! Write grid vars
+ call cam_grid_write_var(File, phys_decomp)
+
+ physgrid = cam_grid_id('physgrid')
+ call cam_grid_dimensions(physgrid, gdims(1:2), nhdims)
+ nhdims = nhdims + 1
+ gdims(nhdims) = pver
+ dims(1) = pcols
+ dims(2) = pver
+ dims(3) = endchunk - begchunk + 1
+ call cam_grid_get_decomp(physgrid, dims(1:3), gdims(1:nhdims), &
+ pio_double, iodesc3d)
+
+ call pio_write_darray(File, Optm1_vdesc, iodesc3d, opmmrtm1_phys, ierr)
+ end if
+
+ end subroutine ionosphere_write_restart
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_read_restart(File)
+ use pio, only: io_desc_t, file_desc_t, pio_inq_varid
+ use pio, only: pio_read_darray, pio_double
+ use cam_grid_support, only: cam_grid_id
+ use cam_grid_support, only: cam_grid_get_decomp, cam_grid_dimensions
+
+ type(file_desc_t), intent(inout) :: File
+
+ integer :: ierr
+ integer :: physgrid
+ integer :: dims(3), gdims(3)
+ integer :: nhdims
+ type(io_desc_t), pointer :: iodesc3d
+
+ if (ionos_xport_active) then
+ call ionosphere_alloc()
+
+ physgrid = cam_grid_id('physgrid')
+ call cam_grid_dimensions(physgrid, gdims(1:2), nhdims)
+ nhdims = nhdims + 1
+ gdims(nhdims) = pver
+ dims(1) = pcols
+ dims(2) = pver
+ dims(3) = endchunk - begchunk + 1
+ call cam_grid_get_decomp(physgrid, dims(1:3), gdims(1:nhdims), &
+ pio_double, iodesc3d)
+
+ ierr = pio_inq_varid(File, 'Optm1', Optm1_vdesc)
+ call pio_read_darray(File, Optm1_vdesc, iodesc3d, opmmrtm1_phys, ierr)
+ opmmrtm1_initialized = .true.
+ end if
+
+ end subroutine ionosphere_read_restart
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_final
+
+ use edyn_esmf, only: edyn_esmf_final
+
+ call edyn_esmf_final()
+
+ if (allocated(opmmrtm1_phys)) then
+ deallocate(opmmrtm1_phys)
+ end if
+
+ end subroutine ionosphere_final
+
+ !===========================================================================
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_read_ic()
+
+ use pio, only: file_desc_t
+ use ncdio_atm, only: infld
+ use cam_initfiles, only: initial_file_get_id
+ use cam_grid_support, only: cam_grid_check, cam_grid_id
+ use cam_grid_support, only: cam_grid_get_dim_names
+
+ type(file_desc_t), pointer :: fh_ini ! PIO filehandle
+
+ integer :: grid_id ! grid ID for data mapping
+ character(len=8) :: dim1name, dim2name
+ logical :: readvar
+ character(len=*), parameter :: subname = 'ionosphere_read_ic'
+
+ if ( ionos_xport_active ) then
+ call ionosphere_alloc()
+
+ fh_ini => initial_file_get_id()
+ grid_id = cam_grid_id('physgrid')
+ if (.not. cam_grid_check(grid_id)) then
+ call endrun(trim(subname)//': Internal error, no "physgrid" grid')
+ end if
+ call cam_grid_get_dim_names(grid_id, dim1name, dim2name)
+
+ ! try reading in OpTM1 from the IC file
+ call infld('OpTM1', fh_ini, dim1name, 'lev', dim2name, 1, pcols, 1, pver, &
+ begchunk, endchunk, opmmrtm1_phys, readvar, gridname='physgrid')
+ if (.not. readvar) then
+ ! if OpTM1 is not included in the IC file then try using O+
+ call infld('Op', fh_ini, dim1name, 'lev', dim2name, 1, pcols, 1, pver, &
+ begchunk, endchunk, opmmrtm1_phys, readvar, gridname='physgrid')
+ end if
+ opmmrtm1_initialized = readvar
+ end if
+
+ end subroutine ionosphere_read_ic
+
+ !---------------------------------------------------------------------------
+ !---------------------------------------------------------------------------
+ subroutine ionosphere_alloc()
+ use infnan, only: nan, assignment(=)
+ integer :: astat
+
+ if (.not. allocated(opmmrtm1_phys)) then
+ allocate(opmmrtm1_phys(pcols, pver, begchunk:endchunk), stat=astat)
+ if (astat /= 0) then
+ call endrun('ionosphere_alloc: failed to allocate opmmrtm1_phys')
+ end if
+ opmmrtm1_phys = nan
+ opmmrtm1_initialized = .false.
+ end if
+
+ end subroutine ionosphere_alloc
+
+ !==========================================================================
end module ionosphere_interface
diff --git a/src/ionosphere/waccmx/ltr_module.F90 b/src/ionosphere/waccmx/ltr_module.F90
new file mode 100644
index 0000000000..52713acfa2
--- /dev/null
+++ b/src/ionosphere/waccmx/ltr_module.F90
@@ -0,0 +1,539 @@
+module ltr_module
+ !
+ ! Module used to read data from the LFM/LTR outputs (POT,mean energy,
+ ! and energy flux).
+ !
+
+ use shr_kind_mod, only: r8 => shr_kind_r8, cl => shr_kind_cl
+ use cam_logfile, only: iulog
+ use spmd_utils, only: masterproc
+ use edyn_maggrid, only: nmlat, nmlonp1
+ use edyn_maggrid, only: ylonm ! magnetic latitudes (nmlat) (radians)
+ use edyn_maggrid, only: ylatm ! magnetic longtitudes (nmlonp1) (radians)
+ use cam_pio_utils, only: cam_pio_openfile, cam_pio_closefile
+ use pio, only: pio_inq_dimid, pio_inquire_dimension
+ use pio, only: pio_inquire, pio_inq_varid
+ use pio, only: file_desc_t, pio_noerr, pio_nowrite, pio_get_var
+ use utils_mod, only: check_ncerr, check_alloc
+ use edyn_mpi, only: ntask, mytid
+ use edyn_params, only: pi, dtr, rtd
+
+ implicit none
+
+ private
+ public :: init_ltr
+ public :: getltr
+
+ ! Define parameters for LTR input data file:
+ integer, parameter :: &
+ ithmx = 91, & ! maximum number of latitudes of LTR data
+ jmxm = 2*ithmx-1, & ! maximum number of global latitudes
+ lonmx = 361 ! maximum number of longitudes of LTR data
+ integer :: lonp1,latp1
+ !
+ ! Define fields for LTR input data file:
+ ! electric potential in Volt
+ ! mean energy in KeV
+ ! energy flux in W/m^2
+ ! Time interpolated LTR outputs with suffix _ltr
+ !
+ real(r8),allocatable,dimension(:,:,:) :: & ! (lonp1,latp1,ntimes)
+ pot_input, ekv_input, efx_input
+ real(r8),allocatable,dimension(:,:) :: & ! (lonp1,latp1)
+ pot_ltr, ekv_ltr, efx_ltr
+ integer, allocatable,dimension(:) :: & ! (ntimes)
+ year,month,day,jday
+ real(r8), allocatable,dimension(:) :: & ! (ntimes)
+ hpi_input, pcp_input, ltr_ut
+ real(r8) :: hpi_ltr, pcp_ltr
+ !
+ type(file_desc_t) :: ncid
+
+ character(len=cl), allocatable :: ltr_files(:)
+ integer :: num_files, file_ndx
+
+contains
+
+ !-----------------------------------------------------------------------
+ subroutine init_ltr(ltr_list)
+
+ character(len=*),intent(in) :: ltr_list(:)
+
+ integer :: n, nfiles
+
+ nfiles = size(ltr_list)
+ num_files = 0
+
+ count_files: do n = 1,nfiles
+ if (len_trim(ltr_list(n))<1 .or. &
+ trim(ltr_list(n))=='NONE') then
+ exit count_files
+ else
+ num_files = num_files + 1
+ end if
+ end do count_files
+
+ allocate(ltr_files(num_files))
+ ltr_files(:num_files) = ltr_list(:num_files)
+ file_ndx = 1
+ call open_files()
+
+ end subroutine init_ltr
+
+ !-----------------------------------------------------------------------
+ subroutine rdltr(ltrfile)
+ !
+ ! Read LTR data
+ !
+ character(len=*), intent(in) :: ltrfile
+ ! Local:
+ integer :: istat, ntimes, ndims, nvars, ngatts, ier
+ integer :: idunlim
+ integer :: id_lon, id_lat, id_time
+ integer :: idv_year, idv_mon, idv_day, idv_jday
+ integer :: idv_ut, idv_hpi, idv_pcp
+ character(len=*), parameter :: subname = 'rdltr'
+
+ !
+ !
+ if (masterproc) then
+ write(iulog, "(/, 72('-'))")
+ write(iulog, "(a, ': read LTR data:')") subname
+ end if
+ !
+ ! Open netcdf file:
+ call cam_pio_openfile(ncid, ltrfile, pio_nowrite)
+ !
+ ! Get LTR grid dimension:
+ istat = pio_inq_dimid(ncid, 'lon', id_lon)
+ istat = pio_inquire_dimension(ncid, id_lon, len=lonp1)
+ call check_ncerr(istat, subname, 'LTR longitude dimension')
+
+ istat = pio_inq_dimid(ncid, 'lat', id_lat)
+ istat = pio_inquire_dimension(ncid, id_lat, len=latp1)
+ call check_ncerr(istat, subname, 'LTR latitude dimension')
+ !
+ ! Get time dimension:
+ istat = pio_inquire(ncid, unlimiteddimid=id_time)
+ istat = pio_inquire_dimension(ncid, id_time, len=ntimes)
+ call check_ncerr(istat, subname, 'LTR time dimension')
+ !
+ ! Search for requested LTR output fields
+ istat = pio_inquire(ncid,ndims,nvars,ngatts,idunlim)
+ !
+ ! Get 1-D LTR fields (ntimes)
+ if (.not. allocated(year)) then
+ allocate(year(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'year', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid, 'year', idv_year)
+ call check_ncerr(istat, subname, 'LTR year id')
+ istat = pio_get_var(ncid, idv_year, year)
+ call check_ncerr(istat, subname, 'LTR year')
+
+ if (.not. allocated(month)) then
+ allocate(month(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'month', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid, 'month', idv_mon)
+ call check_ncerr(istat, subname, 'LTR month id')
+ istat = pio_get_var(ncid, idv_mon, month)
+ call check_ncerr(istat, subname, 'LTR month')
+ if (.not. allocated(day)) then
+ allocate(day(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'day', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid, 'day', idv_day)
+ call check_ncerr(istat, subname, 'LTR day id')
+ istat = pio_get_var(ncid, idv_day, day)
+ call check_ncerr(istat, subname, 'LTR day')
+
+ if (.not. allocated(jday)) then
+ allocate(jday(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'jday', ntimes=ntimes)
+ end if
+ istat = pio_inq_varid(ncid, 'jday', idv_jday)
+ call check_ncerr(istat, subname, 'LTR jday id')
+ istat = pio_get_var(ncid, idv_jday, jday)
+ call check_ncerr(istat, subname, 'LTR jday')
+ !
+ ! Allocate 1-d fields:
+ if (.not. allocated(ltr_ut)) then
+ allocate(ltr_ut(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'ltr_ut', ntimes=ntimes)
+ end if
+ if (.not. allocated(hpi_input)) then
+ allocate(hpi_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'hpi_input', ntimes=ntimes)
+ end if
+ if (.not. allocated(pcp_input)) then
+ allocate(pcp_input(ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pcp_input', ntimes=ntimes)
+ end if
+ !
+ ! Get ut
+ istat = pio_inq_varid(ncid, 'ut', idv_ut)
+ call check_ncerr(istat, subname, 'LTR ut id')
+ istat = pio_get_var(ncid, idv_ut, ltr_ut)
+ call check_ncerr(istat, subname, 'LTR ut')
+ !
+ ! Get HPI
+ istat = pio_inq_varid(ncid, 'hpiN', idv_hpi)
+ call check_ncerr(istat, subname, 'LTR hpi id')
+ istat = pio_get_var(ncid, idv_hpi, hpi_input)
+ call check_ncerr(istat, subname, 'LTR hpi')
+ !
+ ! Get PCP
+ istat = pio_inq_varid(ncid, 'pcpN', idv_pcp)
+ call check_ncerr(istat, subname, 'LTR pcp id')
+ istat = pio_get_var(ncid, idv_pcp, pcp_input)
+ call check_ncerr(istat, subname, 'LTR pcp')
+ !
+ ! Allocate 2-d fields:
+ if (.not. allocated(pot_ltr)) then
+ allocate(pot_ltr(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'pot_ltr', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(ekv_ltr)) then
+ allocate(ekv_ltr(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'ekv_ltr', lonp1=lonp1, latp1=latp1)
+ end if
+ if (.not. allocated(efx_ltr)) then
+ allocate(efx_ltr(lonp1, latp1), stat=ier)
+ call check_alloc(ier, subname, 'efx_ltr', lonp1=lonp1, latp1=latp1)
+ end if
+ !
+ ! Allocate 3-d fields:
+ if (.not. allocated(pot_input)) then
+ allocate(pot_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'pot_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(ekv_input)) then
+ allocate(ekv_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'ekv_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ if (.not. allocated(efx_input)) then
+ allocate(efx_input(lonp1, latp1, ntimes), stat=ier)
+ call check_alloc(ier, subname, 'efx_input', &
+ lonp1=lonp1, latp1=latp1, ntimes=ntimes)
+ end if
+ end subroutine rdltr
+
+ !-----------------------------------------------------------------------
+ subroutine update_3d_fields( ncid, offset, kount, pot_3d,ekv_3d,efx_3d )
+
+ type(file_desc_t), intent(in) :: ncid
+ integer, intent(in) :: offset(:)
+ integer, intent(in) :: kount(:)
+ real(r8),intent(out) :: pot_3d(:,:,:)
+ real(r8),intent(out) :: ekv_3d(:,:,:)
+ real(r8),intent(out) :: efx_3d(:,:,:)
+
+
+ integer :: istat
+ integer :: idv_pot,idv_ekv, idv_efx
+ character(len=*), parameter :: subname = 'update_3d_fields'
+
+ !
+ ! Get 3-D fields (lon,lat,ntimes)
+ !
+ ! electric potential
+ istat = pio_inq_varid(ncid, 'pot', idv_pot)
+ call check_ncerr(istat, subname, 'LTR pot id')
+ istat = pio_get_var(ncid, idv_pot, offset, kount, pot_3d)
+ call check_ncerr(istat, subname, 'LTR pot')
+ !
+ ! mean energy
+ istat = pio_inq_varid(ncid, 'ekv', idv_ekv)
+ call check_ncerr(istat, subname, 'LTR ekv id')
+ istat = pio_get_var(ncid, idv_ekv, offset, kount, ekv_3d)
+ call check_ncerr(istat, subname, 'LTR ekv')
+ !
+ ! energy flux
+ istat = pio_inq_varid(ncid, 'efx', idv_efx)
+ call check_ncerr(istat, subname, 'LTR efx id')
+ istat = pio_get_var(ncid, idv_efx, offset, kount, efx_3d)
+ call check_ncerr(istat, subname, 'LTR efx')
+
+ end subroutine update_3d_fields
+
+ !-----------------------------------------------------------------------
+ subroutine getltr(iyear, imo, iday, iutsec, sunlon, iprint, &
+ iltr, phihm, ltr_efxm, ltr_kevm)
+ use cam_history_support, only: fillvalue
+ use rgrd_mod, only: rgrd2
+ !
+ ! Read LTR outputs from ltr_ncfile file, returning electric potential,
+ ! auroral mean energy and energy flux at current date and time,
+ ! and the data is linearly interpolated to the model time
+ !
+ !
+ ! Args:
+
+ integer, intent(in) :: iyear
+ integer, intent(in) :: imo
+ integer, intent(in) :: iday
+ real(r8), intent(in) :: sunlon
+ integer, intent(in) :: iutsec
+ integer, intent(in) :: iprint
+ integer, intent(out) :: iltr
+ real(r8), intent(out) :: phihm(nmlonp1,nmlat)
+ real(r8), intent(out) :: ltr_efxm(nmlonp1,nmlat) ! on geomag grid
+ real(r8), intent(out) :: ltr_kevm(nmlonp1,nmlat) ! on geomag grid
+ !
+ !
+ ! Local:
+ real(r8) :: potm(lonp1,jmxm)
+ real(r8) :: efxm(lonp1,jmxm), ekvm(lonp1,jmxm)
+ real(r8) :: alat(jmxm), alon(lonp1)
+ real(r8) :: alatm(jmxm), alonm(lonp1)
+ integer :: ier, lw, liw, intpol(2)
+ integer, allocatable :: iw(:)
+ real(r8), allocatable :: w(:)
+ integer :: i, j
+ integer :: nn, iset, iset1, m, mp1, n
+ integer :: idate, bdate, edate
+ real(r8) :: model_ut, denoma, f1, f2
+ real(r8) :: del, xmlt, dmlat, dlatm, dlonm, dmltm, rot
+ integer :: offset(3), kount(3)
+ character(len=*), parameter :: subname = 'getltr'
+
+ phihm = fillvalue
+ ltr_efxm = fillvalue
+ ltr_kevm = fillvalue
+
+ if (iprint > 0 .and. masterproc) then
+ write(iulog,"(/,72('-'))")
+ write(iulog,"(a,':')") subname
+ write(iulog,"(a,i4,', iday = ',i3,', iutsec = ',i10)") &
+ 'Initial requested iyear= ', iyear, iday, iutsec
+ end if
+
+ nn = size(ltr_ut)
+ bdate = year(1)*10000+month(1)*100+day(1)
+ edate = year(nn)*10000+month(nn)*100+day(nn)
+ idate = iyear*10000+imo*100+iday
+ model_ut = real(iutsec, kind=r8) / 3600._r8
+
+ !
+ ! Check times:
+ !
+ iltr=-1
+ check_loop: do while( iltr/=1 )
+ if (masterproc) write(iulog,*) 'file_ndx = ',file_ndx
+
+ iltr = 1
+
+ if (idateedate*24._r8+ltr_ut(nn)) then
+ if (masterproc) then
+ write(iulog, "(a,': Model date beyond the LTR last Data:',3I5)") &
+ subname, year(nn), month(nn), day(nn)
+ end if
+ iltr = 0
+
+ if (file_ndx (model_ut+(iday-day(1))*24._r8)) then
+ iltr = 2
+ return
+ endif
+
+ !
+ ! get LTR data
+ pot_ltr(:,:) = 0._r8
+ ekv_ltr(:,:) = 0._r8
+ efx_ltr(:,:) = 0._r8
+ hpi_ltr = 0._r8
+ pcp_ltr = 0._r8
+
+ iset = 0
+ iset1 = nn
+ do i=1,nn
+ if (ltr_ut(i) <= model_ut+(iday-day(i))*24._r8) iset = i
+ end do
+
+ if (iset == 0) iset = 1
+ if (iset == nn) iset = nn-1
+ iset1 = iset + 1
+
+ denoma = ltr_ut(iset1)+day(iset1)*24._r8 - (ltr_ut(iset)+day(iset)*24._r8)
+
+ if (denoma > 0.1_r8) then
+ write(iulog, "('getltr: Finding a gap in the LTR Data set:', &
+ 'modelday, ltrday =',2I5)") iday,day(n)
+ iltr = 2
+ return
+ end if
+ if (denoma == 0._r8) then
+ f1 = 1._r8
+ f2 = 0._r8
+ else
+ f1 = (ltr_ut(iset1) - (model_ut+(iday- &
+ day(iset1))*24._r8))/denoma
+ f2 = (model_ut+(iday-day(iset))*24._r8 - &
+ ltr_ut(iset))/denoma
+ end if
+ if (masterproc) &
+ write(iulog,"('getltr: LTR Data model_day,model_ut,ltr_day,', &
+ 'ltr_ut,denoma,f1,f2,iset,iset1 =',i2,f7.3,i3,f7.3,3f5.2,2i6)") &
+ iday,model_ut,day(iset),ltr_ut(iset),denoma,f1,f2,iset,iset1
+
+ hpi_ltr = (f1*hpi_input(iset1) + f2*hpi_input(iset))
+ pcp_ltr = (f1*pcp_input(iset1) + f2*pcp_input(iset))
+
+ offset = (/1,1,iset/)
+ kount = (/lonp1,latp1,2/)
+ call update_3d_fields( ncid, offset, kount, pot_input,ekv_input,efx_input )
+ pot_ltr(:,:) = (f1*pot_input(:,:,2) + f2*pot_input(:,:,1))
+ ekv_ltr(:,:) = (f1*ekv_input(:,:,2) + f2*ekv_input(:,:,1))
+ efx_ltr(:,:) = (f1*efx_input(:,:,2) + f2*efx_input(:,:,1))
+
+ active_task: if ( mytid lonp1) mp1 = 2
+ del = xmlt - (m-1)*dmltm
+ ! Put in LTR arrays from south pole to north pole
+ do j=1,jmxm
+ potm(i,j) = (1._r8-del)*pot_ltr(m,j) + &
+ del*pot_ltr(mp1,j)
+ ekvm(i,j) = (1._r8-del)*ekv_ltr(m,j) + &
+ del*ekv_ltr(mp1,j)
+ if (ekvm(i,j) == 0._r8) ekvm(i,j)=1._r8
+ efxm(i,j) = (1._r8-del)*efx_ltr(m,j) + &
+ del*efx_ltr(mp1,j)
+ end do
+
+ end do
+
+ ! Set up coeffs to go between EPOTM(IMXMP,JMNH) and TIEPOT(IMAXM,JMAXMH)
+
+ ! **** SET GRID SPACING DLATM, DLONG, DLONM
+ ! DMLAT=lat spacing in degrees of LTR apex grid
+ dmlat = 180._r8 / real(jmxm-1, kind=r8)
+ dlatm = dmlat * dtr
+ dlonm = 2._r8 * pi / real(lonmx, kind=r8)
+ dmltm = 24._r8 / real(lonmx, kind=r8)
+ ! ****
+ ! **** SET ARRAY YLATM (LATITUDE VALUES FOR GEOMAGNETIC GRID
+ ! ****
+ alatm(1) = -pi / 2._r8
+ alat(1) = -90._r8
+ alatm(jmxm) = pi / 2._r8
+ alat(jmxm) = 90._r8
+ do i = 2, ithmx
+ alat(i) = alat(i-1)+dlatm*rtd
+ alat(jmxm+1-i) = alat(jmxm+2-i)-dlatm*rtd
+ alatm(i) = alatm(i-1)+dlatm
+ alatm(jmxm+1-i) = alatm(jmxm+2-i)-dlatm
+ end do
+ alon(1) = -pi*rtd
+ alonm(1) = -pi
+ do i = 2, lonp1
+ alon(i) = alon(i-1) + dlonm*rtd
+ alonm(i) = alonm(i-1) + dlonm
+ end do
+
+ ! ylatm and ylonm are arrays of latitudes and longitudes of the
+ ! distorted magnetic grids in radian - from consdyn.h
+ ! Convert from apex magnetic grid to distorted magnetic grid
+ !
+ ! Allocate workspace for regrid routine rgrd_mod:
+ lw = nmlonp1+nmlat+2*nmlonp1
+ if (.not. allocated(w)) then
+ allocate(w(lw), stat=ier)
+ call check_alloc(ier, 'getltr', 'w', lw=lw)
+ end if
+ liw = nmlonp1 + nmlat
+ if (.not. allocated(iw)) then
+ allocate(iw(liw), stat=ier)
+ call check_alloc(ier, 'getltr', 'iw', lw=liw)
+ end if
+ intpol(:) = 1 ! linear (not cubic) interp in both dimensions
+ if (alatm(1) > ylatm(1)) then
+ alatm(1) = ylatm(1)
+ end if
+ if (alatm(jmxm) < ylatm(nmlat)) then
+ alatm(jmxm) = ylatm(nmlat)
+ end if
+ if (alonm(1) > ylonm(1)) then
+ alonm(1) = ylonm(1)
+ end if
+ if (alonm(lonp1) < ylonm(nmlonp1)) then
+ alonm(lonp1) = ylonm(nmlonp1)
+ end if
+
+ ! ylatm from -pi/2 to pi/2, and ylonm from -pi to pi
+ call rgrd2(lonp1, jmxm, alonm, alatm, potm, nmlonp1, nmlat, &
+ ylonm, ylatm, phihm, intpol, w, lw, iw, liw, ier)
+ call rgrd2(lonp1, jmxm, alonm, alatm, ekvm, nmlonp1, nmlat, &
+ ylonm, ylatm, ltr_kevm, intpol, w, lw, iw, liw, ier)
+ call rgrd2(lonp1, jmxm, alonm, alatm, efxm, nmlonp1, nmlat, &
+ ylonm, ylatm, ltr_efxm, intpol, w, lw, iw, liw, ier)
+
+ if (iprint > 0 .and. masterproc) then
+ write(iulog, *) subname, ': Max, min ltr_efxm = ', &
+ maxval(ltr_efxm), minval(ltr_efxm)
+ write(iulog, "('getltr: LTR data interpolated to date and time')")
+ write(iulog,"('getltr: iyear,imo,iday,iutsec = ',3i6,i10)") &
+ iyear,imo,iday,iutsec
+ write(iulog,"('getltr: LTR iset f1,f2,year,mon,day,ut = ', &
+ i6,2F9.5,3I6,f10.4)") &
+ iset,f1,f2,year(iset),month(iset),day(iset),ltr_ut(iset)
+ write(iulog,*)'getltr: max,min phihm= ', maxval(phihm),minval(phihm)
+ end if
+
+ end if active_task
+
+ end subroutine getltr
+ !-------------------------------------------------------------------
+
+ subroutine close_files
+
+ deallocate( year,month,day )
+ deallocate( hpi_input, pcp_input, ltr_ut )
+
+ call cam_pio_closefile(ncid)
+
+ end subroutine close_files
+ !-----------------------------------------------------------------------
+ subroutine open_files()
+
+ call rdltr(ltr_files(file_ndx))
+
+ end subroutine open_files
+
+end module ltr_module
diff --git a/src/ionosphere/waccmx/oplus.F90 b/src/ionosphere/waccmx/oplus.F90
index 5a50c7d456..e25dea2108 100644
--- a/src/ionosphere/waccmx/oplus.F90
+++ b/src/ionosphere/waccmx/oplus.F90
@@ -1,29 +1,31 @@
module oplus
!
-! Horizontally transport the O+ ion, adapted for WACCM-X from TIEGCM.
-! Input O+ is received from WACCM physics/chemistry, transported O+
+! Horizontally transport the O+ ion, adapted for WACCM-X from TIEGCM.
+! Input O+ is received from WACCM physics/chemistry, transported O+
! (op_out and opnm_out) are passed back to chemistry.
!
! B. Foster (foster@ucar.edu), May, 2015.
!
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use cam_abortutils ,only: endrun
- use cam_logfile ,only: iulog
- use savefield_waccm,only: savefld_waccm, savefld_waccm_switch ! save field to waccm history
- use edyn_geogrid ,only: dphi,dlamda,cs,zp,expz,p0 !, nlon, nlat, nlev
- use getapex ,only: bx,by,bz,bmod2 ! (0:nlonp1,jspole-1:jnpole+1)
- use edyn_params ,only: re
- use time_manager ,only: get_step_size,is_first_step,is_first_restart_step
- use edyn_mpi ,only: array_ptr_type
- use shr_const_mod ,only: shr_const_g ! gravitational constant (m/s^2)
- use spmd_utils ,only: masterproc
+ use shr_kind_mod, only: r8 => shr_kind_r8
+ use shr_const_mod, only: shr_const_g ! gravitational constant (m/s^2)
+ use cam_abortutils, only: endrun
+ use cam_logfile, only: iulog
+ use spmd_utils, only: masterproc
+ use savefield_waccm, only: savefld_waccm ! save field to waccm history
+ use edyn_geogrid, only: dphi, dlamda, cs, p0
+ use getapex, only: bx, by, bz, bmod2 ! (0:nlonp1,jspole-1:jnpole+1)
+ use edyn_params, only: Rearth ! Radius of Earth (cm)
+ use time_manager, only: get_step_size, is_first_step, is_first_restart_step
+ use edyn_mpi, only: array_ptr_type
+ use infnan, only: nan, assignment(=)
implicit none
private
+
public :: oplus_xport, oplus_init
public :: kbot
- real(r8) :: pi,rtd
+ real(r8) :: pi, rtd
!
! Constants in CGS:
!
@@ -50,19 +52,22 @@ module oplus
! The shapiro constant .03 is used for spatial smoothing of oplus,
! (shapiro is tuneable, and maybe should be a function of timestep size).
! dtsmooth and dtsmooth_div2 are used in the time smoothing.
-! To turn off all smoothing here, set shapiro=0. and dtsmooth = 1.
+! To turn off all smoothing here, set shapiro=0. and dtsmooth = 1.
!
- real(r8),parameter :: &
+ real(r8),parameter :: &
dtsmooth = 0.95_r8, & ! for time smoother
dtsmooth_div2 = 0.5_r8*(1._r8-dtsmooth)
-
+
real(r8) :: adiff_limiter
real(r8) :: shapiro_const
logical :: enforce_floor
logical :: ring_polar_filter = .false.
logical, parameter :: debug = .false.
-
+
+ real(r8), allocatable :: expz(:) ! exp(-zp)
+ real(r8), allocatable :: zp(:) ! log pressure (as in tiegcm lev(nlev))
+
contains
!-----------------------------------------------------------------------
@@ -70,112 +75,120 @@ subroutine oplus_init( adiff_limiter_in, shapiro_const_in, enforce_floor_in, rin
use cam_history, only : addfld, horiz_only
use filter_module,only : filter_init
- use edyn_geogrid ,only : nlon
+ use edyn_geogrid, only : nlev
real(r8), intent(in) :: adiff_limiter_in
real(r8), intent(in) :: shapiro_const_in
logical , intent(in) :: enforce_floor_in
logical , intent(in) :: ring_polar_filter_in
-
+
shapiro_const = shapiro_const_in
enforce_floor = enforce_floor_in
adiff_limiter = adiff_limiter_in
ring_polar_filter = ring_polar_filter_in
-
+
call filter_init( ring_polar_filter )
!
! Save fields from oplus module:
!
- call addfld ('OPLUS_Z' ,(/ 'lev' /), 'I', 'cm ','OPLUS_Z' , gridname='fv_centers')
- call addfld ('OPLUS_TN' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TN' , gridname='fv_centers')
- call addfld ('OPLUS_TE' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TE' , gridname='fv_centers')
- call addfld ('OPLUS_TI' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TI' , gridname='fv_centers')
- call addfld ('OPLUS_UN' ,(/ 'lev' /), 'I', 'cm/s' ,'OPLUS_UN' , gridname='fv_centers')
- call addfld ('OPLUS_VN' ,(/ 'lev' /), 'I', 'cm/s' ,'OPLUS_VN' , gridname='fv_centers')
- call addfld ('OPLUS_OM' ,(/ 'lev' /), 'I', 'Pa/s' ,'OPLUS_OM' , gridname='fv_centers')
- call addfld ('OPLUS_O2' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_O2' , gridname='fv_centers')
- call addfld ('OPLUS_O1' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_O1' , gridname='fv_centers')
-
- call addfld ('OPLUS_N2' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_N2' , gridname='fv_centers')
- call addfld ('OPLUS_OP' ,(/ 'lev' /), 'I', 'cm^3' ,'OPLUS_OP' , gridname='fv_centers')
- call addfld ('OPLUS_UI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_UI' , gridname='fv_centers')
- call addfld ('OPLUS_VI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_VI' , gridname='fv_centers')
- call addfld ('OPLUS_WI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_WI' , gridname='fv_centers')
- call addfld ('OPLUS_MBAR' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_MBAR' , gridname='fv_centers')
- call addfld ('OPLUS_TR' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TR' , gridname='fv_centers')
- call addfld ('OPLUS_TP0' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP0' , gridname='fv_centers')
- call addfld ('OPLUS_TP1' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP1' , gridname='fv_centers')
- ! call addfld ('OPLUS_TP2' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP2' , gridname='fv_centers')
- call addfld ('OPLUS_DJ' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_DJ' , gridname='fv_centers')
- call addfld ('OPLUS_HJ' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_HJ' , gridname='fv_centers')
- call addfld ('OPLUS_BVEL' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_BVEL' , gridname='fv_centers')
- call addfld ('OPLUS_DIFFJ',(/ 'lev' /), 'I', ' ' ,'OPLUS_DIFFJ' , gridname='fv_centers')
- call addfld ('OPLUS_OPNM' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_OPNM' , gridname='fv_centers')
- call addfld ('OPNM_SMOOTH',(/ 'lev' /), 'I', ' ' ,'OPNM_SMOOTH' , gridname='fv_centers')
- call addfld ('BDOTDH_OP' ,(/ 'lev' /), 'I', ' ' ,'BDOTDH_OP' , gridname='fv_centers')
- call addfld ('BDOTDH_OPJ' ,(/ 'lev' /), 'I', ' ' ,'BDOTDH_OPJ' , gridname='fv_centers')
- call addfld ('BDOTDH_DIFF',(/ 'lev' /), 'I', ' ' ,'BDOTDH_DIFF' , gridname='fv_centers')
- call addfld ('BDZDVB_OP' ,(/ 'lev' /), 'I', ' ' ,'BDZDVB_OP' , gridname='fv_centers')
- call addfld ('EXPLICIT0' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT0' , gridname='fv_centers')
-
- call addfld ('EXPLICITa' ,(/ 'lev' /), 'I', ' ' ,'EXPLICITa' , gridname='fv_centers') ! part a
- call addfld ('EXPLICITb' ,(/ 'lev' /), 'I', ' ' ,'EXPLICITb' , gridname='fv_centers') ! part b
- call addfld ('EXPLICIT1' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT1' , gridname='fv_centers') ! complete
- call addfld ('EXPLICIT' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT' , gridname='fv_centers') ! final w/ poles
-
- call addfld ('EXPLICIT2' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT2' , gridname='fv_centers')
- call addfld ('EXPLICIT3' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT3' , gridname='fv_centers')
- call addfld ('TPHDZ0' ,(/ 'lev' /), 'I', ' ' ,'TPHDZ0' , gridname='fv_centers')
- call addfld ('TPHDZ1' ,(/ 'lev' /), 'I', ' ' ,'TPHDZ1' , gridname='fv_centers')
- call addfld ('DIVBZ' ,(/ 'lev' /), 'I', ' ' ,'DIVBZ' , gridname='fv_centers')
- call addfld ('HDZMBZ' ,(/ 'lev' /), 'I', ' ' ,'HDZMBZ' , gridname='fv_centers')
- call addfld ('HDZPBZ' ,(/ 'lev' /), 'I', ' ' ,'HDZPBZ' , gridname='fv_centers')
- call addfld ('P_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF0' , gridname='fv_centers')
- call addfld ('Q_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF0' , gridname='fv_centers')
- call addfld ('R_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF0' , gridname='fv_centers')
- call addfld ('P_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF0a' , gridname='fv_centers')
- call addfld ('Q_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF0a' , gridname='fv_centers')
- call addfld ('DJINT' ,(/ 'lev' /), 'I', ' ' ,'DJINT' , gridname='fv_centers')
- call addfld ('BDOTU' ,(/ 'lev' /), 'I', ' ' ,'BDOTU' , gridname='fv_centers')
- call addfld ('R_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF0a' , gridname='fv_centers')
- call addfld ('P_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF1' , gridname='fv_centers')
- call addfld ('Q_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF1' , gridname='fv_centers')
- call addfld ('R_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF1' , gridname='fv_centers')
- call addfld ('P_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF2' , gridname='fv_centers')
- call addfld ('Q_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF2' , gridname='fv_centers')
- call addfld ('R_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF2' , gridname='fv_centers')
-
- call addfld ('P_COEFF' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF' , gridname='fv_centers') ! final w/ poles
- call addfld ('Q_COEFF' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF' , gridname='fv_centers') ! final w/ poles
- call addfld ('R_COEFF' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF' , gridname='fv_centers') ! final w/ poles
-
- call addfld ('OP_SOLVE' ,(/ 'lev' /), 'I', ' ' ,'OP_SOLVE' , gridname='fv_centers')
-
- call addfld ('OP_OUT' ,(/ 'lev' /), 'I', 'cm^3' ,'OPLUS (oplus_xport output)', gridname='fv_centers')
- call addfld ('OPNM_OUT' ,(/ 'lev' /), 'I', 'cm^3' ,'OPNM_OUT' , gridname='fv_centers')
- call addfld ('BMOD2' ,(/ 'lev' /), 'I', ' ' ,'BMOD2' , gridname='fv_centers')
-
- call addfld ('OPLUS_FLUX', horiz_only , 'I', ' ','OPLUS_FLUX', gridname='fv_centers')
- call addfld ('OPLUS_DIVB', horiz_only , 'I', ' ','OPLUS_DIVB', gridname='fv_centers')
- call addfld ('OPLUS_BX' , horiz_only , 'I', ' ','OPLUS_BX' , gridname='fv_centers')
- call addfld ('OPLUS_BY' , horiz_only , 'I', ' ','OPLUS_BY' , gridname='fv_centers')
- call addfld ('OPLUS_BZ' , horiz_only , 'I', ' ','OPLUS_BZ' , gridname='fv_centers')
- call addfld ('OPLUS_BMAG', horiz_only , 'I', ' ','OPLUS_BMAG', gridname='fv_centers')
-
+ call addfld ('OPLUS_Z' ,(/ 'lev' /), 'I', 'cm ','OPLUS_Z' , gridname='geo_grid')
+ call addfld ('OPLUS_TN' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TN' , gridname='geo_grid')
+ call addfld ('OPLUS_TE' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TE' , gridname='geo_grid')
+ call addfld ('OPLUS_TI' ,(/ 'lev' /), 'I', 'deg K','OPLUS_TI' , gridname='geo_grid')
+ call addfld ('OPLUS_UN' ,(/ 'lev' /), 'I', 'cm/s' ,'OPLUS_UN' , gridname='geo_grid')
+ call addfld ('OPLUS_VN' ,(/ 'lev' /), 'I', 'cm/s' ,'OPLUS_VN' , gridname='geo_grid')
+ call addfld ('OPLUS_OM' ,(/ 'lev' /), 'I', 'Pa/s' ,'OPLUS_OM' , gridname='geo_grid')
+ call addfld ('OPLUS_O2' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_O2' , gridname='geo_grid')
+ call addfld ('OPLUS_O1' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_O1' , gridname='geo_grid')
+
+ call addfld ('OPLUS_N2' ,(/ 'lev' /), 'I', 'mmr' ,'OPLUS_N2' , gridname='geo_grid')
+ call addfld ('OPLUS_OP' ,(/ 'lev' /), 'I', 'cm^3' ,'OPLUS_OP' , gridname='geo_grid')
+ call addfld ('OPLUS_UI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_UI' , gridname='geo_grid')
+ call addfld ('OPLUS_VI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_VI' , gridname='geo_grid')
+ call addfld ('OPLUS_WI' ,(/ 'lev' /), 'I', 'm/s' ,'OPLUS_WI' , gridname='geo_grid')
+ call addfld ('OPLUS_MBAR' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_MBAR' , gridname='geo_grid')
+ call addfld ('OPLUS_TR' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TR' , gridname='geo_grid')
+ call addfld ('OPLUS_TP0' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP0' , gridname='geo_grid')
+ call addfld ('OPLUS_TP1' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP1' , gridname='geo_grid')
+ ! call addfld ('OPLUS_TP2' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_TP2' , gridname='geo_grid')
+ call addfld ('OPLUS_DJ' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_DJ' , gridname='geo_grid')
+ call addfld ('OPLUS_HJ' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_HJ' , gridname='geo_grid')
+ call addfld ('OPLUS_BVEL' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_BVEL' , gridname='geo_grid')
+ call addfld ('OPLUS_DIFFJ',(/ 'lev' /), 'I', ' ' ,'OPLUS_DIFFJ' , gridname='geo_grid')
+ call addfld ('OPLUS_OPNM' ,(/ 'lev' /), 'I', ' ' ,'OPLUS_OPNM' , gridname='geo_grid')
+ call addfld ('OPNM_SMOOTH',(/ 'lev' /), 'I', ' ' ,'OPNM_SMOOTH' , gridname='geo_grid')
+ call addfld ('BDOTDH_OP' ,(/ 'lev' /), 'I', ' ' ,'BDOTDH_OP' , gridname='geo_grid')
+ call addfld ('BDOTDH_OPJ' ,(/ 'lev' /), 'I', ' ' ,'BDOTDH_OPJ' , gridname='geo_grid')
+ call addfld ('BDOTDH_DIFF',(/ 'lev' /), 'I', ' ' ,'BDOTDH_DIFF' , gridname='geo_grid')
+ call addfld ('BDZDVB_OP' ,(/ 'lev' /), 'I', ' ' ,'BDZDVB_OP' , gridname='geo_grid')
+ call addfld ('EXPLICIT0' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT0' , gridname='geo_grid')
+
+ call addfld ('EXPLICITa' ,(/ 'lev' /), 'I', ' ' ,'EXPLICITa' , gridname='geo_grid') ! part a
+ call addfld ('EXPLICITb' ,(/ 'lev' /), 'I', ' ' ,'EXPLICITb' , gridname='geo_grid') ! part b
+ call addfld ('EXPLICIT1' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT1' , gridname='geo_grid') ! complete
+ call addfld ('EXPLICIT' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT' , gridname='geo_grid') ! final w/ poles
+
+ call addfld ('EXPLICIT2' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT2' , gridname='geo_grid')
+ call addfld ('EXPLICIT3' ,(/ 'lev' /), 'I', ' ' ,'EXPLICIT3' , gridname='geo_grid')
+ call addfld ('TPHDZ0' ,(/ 'lev' /), 'I', ' ' ,'TPHDZ0' , gridname='geo_grid')
+ call addfld ('TPHDZ1' ,(/ 'lev' /), 'I', ' ' ,'TPHDZ1' , gridname='geo_grid')
+ call addfld ('DIVBZ' ,(/ 'lev' /), 'I', ' ' ,'DIVBZ' , gridname='geo_grid')
+ call addfld ('HDZMBZ' ,(/ 'lev' /), 'I', ' ' ,'HDZMBZ' , gridname='geo_grid')
+ call addfld ('HDZPBZ' ,(/ 'lev' /), 'I', ' ' ,'HDZPBZ' , gridname='geo_grid')
+ call addfld ('P_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF0' , gridname='geo_grid')
+ call addfld ('Q_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF0' , gridname='geo_grid')
+ call addfld ('R_COEFF0' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF0' , gridname='geo_grid')
+ call addfld ('P_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF0a' , gridname='geo_grid')
+ call addfld ('Q_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF0a' , gridname='geo_grid')
+ call addfld ('DJINT' ,(/ 'lev' /), 'I', ' ' ,'DJINT' , gridname='geo_grid')
+ call addfld ('BDOTU' ,(/ 'lev' /), 'I', ' ' ,'BDOTU' , gridname='geo_grid')
+ call addfld ('R_COEFF0a' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF0a' , gridname='geo_grid')
+ call addfld ('P_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF1' , gridname='geo_grid')
+ call addfld ('Q_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF1' , gridname='geo_grid')
+ call addfld ('R_COEFF1' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF1' , gridname='geo_grid')
+ call addfld ('P_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF2' , gridname='geo_grid')
+ call addfld ('Q_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF2' , gridname='geo_grid')
+ call addfld ('R_COEFF2' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF2' , gridname='geo_grid')
+
+ call addfld ('P_COEFF' ,(/ 'lev' /), 'I', ' ' ,'P_COEFF' , gridname='geo_grid') ! final w/ poles
+ call addfld ('Q_COEFF' ,(/ 'lev' /), 'I', ' ' ,'Q_COEFF' , gridname='geo_grid') ! final w/ poles
+ call addfld ('R_COEFF' ,(/ 'lev' /), 'I', ' ' ,'R_COEFF' , gridname='geo_grid') ! final w/ poles
+
+ call addfld ('OP_SOLVE' ,(/ 'lev' /), 'I', ' ' ,'OP_SOLVE' , gridname='geo_grid')
+ call addfld ('op_dt' , (/ 'lev' /), 'I', ' ' ,'op_dt' , gridname='geo_grid')
+ call addfld ('amb_diff' , (/ 'lev' /), 'I', ' ' ,'amb_diff' , gridname='geo_grid')
+ call addfld ('dfield' , (/ 'lev' /), 'I', ' ' ,'dfield' , gridname='geo_grid')
+ call addfld ('dwind' , (/ 'lev' /), 'I', ' ' ,'dwind' , gridname='geo_grid')
+
+ call addfld ('OP_OUT' ,(/ 'lev' /), 'I', 'cm^3' ,'OPLUS (oplus_xport output)', gridname='geo_grid')
+ call addfld ('OPNM_OUT' ,(/ 'lev' /), 'I', 'cm^3' ,'OPNM_OUT' , gridname='geo_grid')
+ call addfld ('BMOD2' ,(/ 'lev' /), 'I', ' ' ,'BMOD2' , gridname='geo_grid')
+
+ call addfld ('OPLUS_FLUX', horiz_only , 'I', ' ','OPLUS_FLUX', gridname='geo_grid')
+ call addfld ('OPLUS_DIVB', horiz_only , 'I', ' ','OPLUS_DIVB', gridname='geo_grid')
+ call addfld ('OPLUS_BX' , horiz_only , 'I', ' ','OPLUS_BX' , gridname='geo_grid')
+ call addfld ('OPLUS_BY' , horiz_only , 'I', ' ','OPLUS_BY' , gridname='geo_grid')
+ call addfld ('OPLUS_BZ' , horiz_only , 'I', ' ','OPLUS_BZ' , gridname='geo_grid')
+ call addfld ('OPLUS_BMAG', horiz_only , 'I', ' ','OPLUS_BMAG', gridname='geo_grid')
+
+ allocate(zp(nlev)) ! log pressure (as in TIEGCM)
+ allocate(expz(nlev)) ! exp(-zp)
+ zp = nan
+ expz = nan
end subroutine oplus_init
!-----------------------------------------------------------------------
- subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
- mbar,ui,vi,wi,pmid,op_out,opnm_out, &
- i0,i1,j0,j1,nspltop,ispltop )
+ subroutine oplus_xport(tn, te, ti, un, vn, om, zg, o2, o1, n2, op_in, &
+ opnm_in, mbar, ui, vi, wi, pmid, op_out, opnm_out, &
+ i0, i1, j0, j1, nspltop, ispltop)
!
-! All input fields from dpie_coupling are in "TIEGCM" format, i.e.,
+! All input fields from dpie_coupling are in "TIEGCM" format, i.e.,
! longitude (-180->180), vertical (bot2top), and units (CGS).
!
- use edyn_mpi,only: mp_geo_halos,mp_pole_halos,setpoles
- use edyn_geogrid,only : glat, nlat, nlev
- use trsolv_mod, only : trsolv
+ use edyn_mpi, only: mp_geo_halos,mp_pole_halos,setpoles
+ use edyn_geogrid, only: glat, nlat, nlev
+ use trsolv_mod, only: trsolv
!
! Transport O+ ion.
! March-May, 2015 B.Foster: Adapted from TIEGCM (oplus.F) for WACCM-X.
@@ -218,66 +231,73 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
! Output:
!
- real(r8),intent(out) :: &
- op_out (nlev,i0:i1,j0:j1), & ! O+ output
+ real(r8),intent(out) :: &
+ op_out (nlev,i0:i1,j0:j1), & ! O+ output
opnm_out(nlev,i0:i1,j0:j1) ! O+ output at time n-1
!
! Local:
!
- integer :: i,j,k,lat,jm1,jp1,jm2,jp2,lat0,lat1
- real(r8),dimension(i0:i1,j0:j1) :: &
- opflux, & ! upward number flux of O+ (returned by sub oplus_flux)
- dvb ! divergence of B-field
+ integer :: i, j, k, lat, jm1, jp1, jm2, jp2, lat0, lat1
+ real(r8), dimension(i0:i1,j0:j1) :: &
+ opflux, & ! upward number flux of O+ (returned by sub oplus_flux)
+ dvb ! divergence of B-field
!
! Local inputs with added halo points in lat,lon:
-!
+!
real(r8),dimension(nlev,i0-2:i1+2,j0-2:j1+2),target :: op, opnm
- real(r8),dimension(nlev,i0-2:i1+2,j0-2:j1+2),target :: &
- tr ,& ! Reduced temperature (.5*(tn+ti))
- tp ,& ! Plasma temperature N(O+)*(te+ti)
- dj ,& ! diffusion coefficients
- bvel ,& ! bvel @ j = (B.U)*N(O+)
- diffj ,& ! (D/(H*DZ)*2.*TP+M*G/R)*N(O+)
- bdotdh_op ,& ! (b(h)*del(h))*phi
- bdotdh_opj ,& ! (b(h)*del(h))*phi
- bdotdh_diff ,& ! (b(h)*del(h))*phi
- opnm_smooth ! O+ at time-1, smoothed
-
- real(r8),dimension(nlev,i0:i1,j0:j1) :: & ! for saving to histories
- diag0,diag1,diag2,diag3,diag4,diag5,diag6,diag7,diag8,diag9,&
- diag10,diag11,diag12,diag13,diag14,diag15,diag16,diag17,&
- diag18,diag19,diag20,diag21,diag22,diag23,diag24,diag25,&
- diag26,diag27
- real(r8),dimension(nlev,i0:i1,j0-1:j1+1) :: hj ! scale height
+ real(r8),dimension(nlev,i0-2:i1+2,j0-2:j1+2),target :: &
+ tr ,& ! Reduced temperature (.5*(tn+ti))
+ tp ,& ! Plasma temperature N(O+)*(te+ti)
+ dj ,& ! diffusion coefficients
+ bvel ,& ! bvel @ j = (B.U)*N(O+)
+ diffj ,& ! (D/(H*DZ)*2.*TP+M*G/R)*N(O+)
+ bdotdh_op ,& ! (b(h)*del(h))*phi
+ bdotdh_opj ,& ! (b(h)*del(h))*phi
+ bdotdh_diff ,& ! (b(h)*del(h))*phi
+ opnm_smooth ! O+ at time-1, smoothed
+
+ real(r8), dimension(nlev,i0:i1,j0:j1) :: & ! for saving to histories
+ diag0, diag1, diag2, diag3, diag4, diag5, diag6, diag7, diag8, diag9, &
+ diag10, diag11, diag12, diag13, diag14, diag15, diag16, diag17, &
+ diag18, diag19, diag20, diag21, diag22, diag23, diag24, diag25, &
+ diag26, diag27
+ real(r8), dimension(nlev,i0:i1,j0-1:j1+1) :: hj ! scale height
real(r8) :: gmr,dtime,dtx2,dtx2inv
- real(r8),dimension(nlev,i0:i1) :: &
+ real(r8), dimension(nlev,i0:i1) :: &
bdzdvb_op, &
- hdz, &
tp1, &
+ divbz
+ real(r8),dimension(nlev,i0:i1,j0:j1) :: &
+ hdz, &
tphdz0, &
tphdz1, &
djint, &
- divbz, &
hdzmbz, &
hdzpbz, &
bdotu
+! for term analysis, lei, 07
+ real(r8),dimension(nlev,i0:i1,j0:j1) :: &
+ op_dt, & ! dn/dt
+ amb_diff,& ! ambipole diffion
+ dwind, & ! neutral wind transport
+ dfield ! electric field transport
!
! Arguments for tridiagonal solver trsolv (no halos):
- real(r8),dimension(nlev,i0:i1,j0:j1) :: &
- explicit,explicit_a,explicit_b,p_coeff,q_coeff,r_coeff
+ real(r8), dimension(nlev,i0:i1,j0:j1) :: &
+ explicit, explicit_a, explicit_b, p_coeff, q_coeff, r_coeff
- real(r8),dimension(i0:i1) :: ubca, ubcb ! O+ upper boundary
- real(r8),parameter :: one=1._r8
- logical :: calltrsolv
+ real(r8), dimension(i0:i1) :: ubca, ubcb ! O+ upper boundary
+ real(r8), parameter :: one=1._r8
+ logical :: calltrsolv
!
! Pointers for multiple-field calls (e.g., mp_geo_halos)
- integer :: nfields
- real(r8),allocatable :: polesign(:)
- type(array_ptr_type),allocatable :: ptrs(:)
+ integer :: nfields
+ real(r8), allocatable :: polesign(:)
+ type(array_ptr_type), allocatable :: ptrs(:)
- real(r8) :: zpmid(nlev), opfloor
- real(r8),parameter :: opmin=3000.0_r8
+ real(r8) :: zpmid(nlev), opfloor
+ real(r8), parameter :: opmin=3000.0_r8
!
! Execute:
!
@@ -286,19 +306,22 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
dtx2 = 2._r8*dtime
dtx2inv = 1._r8/dtx2
- if ((is_first_step().or.is_first_restart_step()).and.ispltop==1) then
- if (masterproc) write(iulog,"('oplus: shapiro=',es12.4,' dtsmooth=',es12.4,' dtsmooth_div2=',es12.4)") &
- shapiro_const,dtsmooth,dtsmooth_div2
- if (masterproc) write(iulog,"('oplus: shr_const_g=',f8.3)") shr_const_g
- endif
+ if ((is_first_step() .or. is_first_restart_step()) .and. ispltop==1) then
+ if (masterproc) then
+ write(iulog,"(a,es12.4,a,es12.4,a,es12.4)") &
+ 'oplus: shapiro=', shapiro_const, ', dtsmooth=', dtsmooth, &
+ ', dtsmooth_div2=', dtsmooth_div2
+ write(iulog,"('oplus: shr_const_g=',f8.3)") shr_const_g
+ end if
+ end if
!
- ! zp,expz are declared in edyn_geogrid.F90, and allocated in sub
+ ! zp,expz are declared in edyn_geogrid.F90, and allocated in sub
! set_geogrid (edyn_init.F90). pmid was passed in here (bot2top)
! from dpie_coupling.
!
! kbot is the k-index at the bottom of O+ transport calculations,
- ! corresponding to pressure pbot.
+ ! corresponding to pressure pbot.
!
if ((is_first_step().or.is_first_restart_step()).and.ispltop==1) then
kloop: do k=1,nlev
@@ -312,34 +335,35 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
expz(k) = exp(-zp(k))
enddo
if (debug.and.masterproc) then
- write(iulog,"('oplus: kbot=',i4,' pmid(kbot)=',es12.4,' zp(kbot)=',es12.4)") &
- kbot,pmid(kbot),zp(kbot)
- endif
- endif
+ write(iulog,"(a,i4,a,es12.4,a,es12.4)") &
+ 'oplus: kbot=', kbot, ', pmid(kbot)=', pmid(kbot), &
+ ', zp(kbot)=', zp(kbot)
+ end if
+ end if
if (kbot < 1) then
call endrun('oplus_xport: kbot is not set')
endif
- dzp = zp(nlev)-zp(nlev-1) ! use top 2 levels (typically dzp=0.5)
+ dzp = zp(nlev) - zp(nlev-1) ! use top 2 levels (typically dzp=0.5)
- if (debug.and.masterproc) then
- write(iulog,"('oplus: nlev=',i3,' zp (bot2top) =',/,(6es12.3))") nlev,zp
- write(iulog,"('oplus: nlev=',i3,' expz (bot2top) =',/,(6es12.3))") nlev,expz
- write(iulog,"('oplus: nlev=',i3,' dzp =',/,(6es12.3))") nlev,dzp
- endif
+ if (debug .and. masterproc) then
+ write(iulog,"('oplus: nlev=',i3,' zp (bot2top) =',/,(6es12.3))") nlev, zp
+ write(iulog,"('oplus: nlev=',i3,' expz (bot2top) =',/,(6es12.3))") nlev, expz
+ write(iulog,"('oplus: nlev=',i3,' dzp =',/,(6es12.3))") nlev, dzp
+ end if
!
! Set subdomain blocks from input (composition is in mmr):
!
!$omp parallel do private(i, j, k)
- do k=1,nlev
- do j=j0,j1
- do i=i0,i1
+ do k = 1, nlev
+ do j = j0, j1
+ do i = i0, i1
op(k,i,j) = op_in(k,i,j)
opnm(k,i,j) = opnm_in(k,i,j)
- enddo
- enddo
- enddo
+ end do
+ end do
+ end do
!
! Define halo points on inputs:
@@ -362,7 +386,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
! Set latitude halo points over the poles (this does not change the poles).
! (the 2nd halo over the poles will not actually be used (assuming lat loops
-! are lat=2,nlat-1), because jp1,jm1 will be the pole itself, and jp2,jm2
+! are lat=2,nlat-1), because jp1,jm1 will be the pole itself, and jp2,jm2
! will be the first halo over the pole)
!
! mp_pole_halos first arg:
@@ -382,29 +406,29 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
! Save input fields to WACCM histories. Sub savefld_waccm_switch converts
! fields from tiegcm-format to waccm-format before saving to waccm histories.
!
- call savefld_waccm_switch(tn(:,i0:i1,j0:j1),'OPLUS_TN',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(te(:,i0:i1,j0:j1),'OPLUS_TE',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(ti(:,i0:i1,j0:j1),'OPLUS_TI',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(un(:,i0:i1,j0:j1),'OPLUS_UN',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(vn(:,i0:i1,j0:j1),'OPLUS_VN',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(om(:,i0:i1,j0:j1),'OPLUS_OM',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(zg(:,i0:i1,j0:j1),'OPLUS_Z' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(o2(:,i0:i1,j0:j1),'OPLUS_O2',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(o1(:,i0:i1,j0:j1),'OPLUS_O1',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(n2(:,i0:i1,j0:j1),'OPLUS_N2',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(op(:,i0:i1,j0:j1),'OPLUS_OP',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(ui(:,i0:i1,j0:j1),'OPLUS_UI',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(vi(:,i0:i1,j0:j1),'OPLUS_VI',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(wi(:,i0:i1,j0:j1),'OPLUS_WI',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(mbar(:,i0:i1,j0:j1),'OPLUS_MBAR',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(opnm(:,i0:i1,j0:j1),'OPLUS_OPNM',nlev,i0,i1,j0,j1)
-!
-! Initialize output op_out with input op at 1:kbot-1, to retain values from
-! bottom of column up to kbot. This routine will change (transport) these
+ call savefld_waccm(tn(:,i0:i1,j0:j1),'OPLUS_TN',nlev,i0,i1,j0,j1)
+ call savefld_waccm(te(:,i0:i1,j0:j1),'OPLUS_TE',nlev,i0,i1,j0,j1)
+ call savefld_waccm(ti(:,i0:i1,j0:j1),'OPLUS_TI',nlev,i0,i1,j0,j1)
+ call savefld_waccm(un(:,i0:i1,j0:j1),'OPLUS_UN',nlev,i0,i1,j0,j1)
+ call savefld_waccm(vn(:,i0:i1,j0:j1),'OPLUS_VN',nlev,i0,i1,j0,j1)
+ call savefld_waccm(om(:,i0:i1,j0:j1),'OPLUS_OM',nlev,i0,i1,j0,j1)
+ call savefld_waccm(zg(:,i0:i1,j0:j1),'OPLUS_Z' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(o2(:,i0:i1,j0:j1),'OPLUS_O2',nlev,i0,i1,j0,j1)
+ call savefld_waccm(o1(:,i0:i1,j0:j1),'OPLUS_O1',nlev,i0,i1,j0,j1)
+ call savefld_waccm(n2(:,i0:i1,j0:j1),'OPLUS_N2',nlev,i0,i1,j0,j1)
+ call savefld_waccm(op(:,i0:i1,j0:j1),'OPLUS_OP',nlev,i0,i1,j0,j1)
+ call savefld_waccm(ui(:,i0:i1,j0:j1),'OPLUS_UI',nlev,i0,i1,j0,j1)
+ call savefld_waccm(vi(:,i0:i1,j0:j1),'OPLUS_VI',nlev,i0,i1,j0,j1)
+ call savefld_waccm(wi(:,i0:i1,j0:j1),'OPLUS_WI',nlev,i0,i1,j0,j1)
+ call savefld_waccm(mbar(:,i0:i1,j0:j1),'OPLUS_MBAR',nlev,i0,i1,j0,j1)
+ call savefld_waccm(opnm(:,i0:i1,j0:j1),'OPLUS_OPNM',nlev,i0,i1,j0,j1)
+!
+! Initialize output op_out with input op at 1:kbot-1, to retain values from
+! bottom of column up to kbot. This routine will change (transport) these
! outputs only from kbot to the top (nlev).
!
- op_out = 0._r8
- opnm_out = 0._r8
+ op_out = 0._r8
+ opnm_out = 0._r8
op_out (1:kbot-1,i0:i1,j0:j1) = op (1:kbot-1,i0:i1,j0:j1)
opnm_out(1:kbot-1,i0:i1,j0:j1) = opnm(1:kbot-1,i0:i1,j0:j1)
!
@@ -421,7 +445,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
! The solver will be called only if calltrsolv=true. It is sometimes
! set false when skipping parts of the code for debug purposes.
-!
+!
calltrsolv = .true.
tr = 0._r8
@@ -442,13 +466,13 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
jp2 = lat+2
!
! as of April, 2015, TIEGCM incorrectly uses te+ti instead of tn+ti
-! This has not been fixed in TIEGCM, because fixing it causes a tuning
+! This has not been fixed in TIEGCM, because fixing it causes a tuning
! problem (ask Hanli and Wenbin). For WACCM, it is correct as below.
! (see also tp)
!
!$omp parallel do private(i,k)
do i=i0,i1
-!
+!
! Reduced temperature (tpj in tiegcm):
! 'OPLUS_TR' (has constants at poles)
!
@@ -574,21 +598,21 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!------------------------- End first latitude scan ---------------------
!
! Set pole values for opnm_smooth. Do this before savefld calls, so plots will
-! include the poles. All other fields in 1st lat scan got values at the poles
+! include the poles. All other fields in 1st lat scan got values at the poles
! via jm1,jp1 above.
!
call setpoles(opnm_smooth(kbot:nlev,i0:i1,j0:j1),kbot,nlev,i0,i1,j0,j1)
!
! Save to history file (exclude halo points)
!
- call savefld_waccm_switch(tr (:,i0:i1,j0:j1),'OPLUS_TR' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(dj (:,i0:i1,j0:j1),'OPLUS_DJ' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(hj (:,i0:i1,j0:j1),'OPLUS_HJ' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(bvel (:,i0:i1,j0:j1),'OPLUS_BVEL' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diffj(:,i0:i1,j0:j1),'OPLUS_DIFFJ',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag0(:,i0:i1,j0:j1),'OPLUS_TP0' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(tp (:,i0:i1,j0:j1),'OPLUS_TP1' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(opnm_smooth(:,i0:i1,j0:j1),'OPNM_SMOOTH',nlev,i0,i1,j0,j1)
+ call savefld_waccm(tr (:,i0:i1,j0:j1),'OPLUS_TR' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(dj (:,i0:i1,j0:j1),'OPLUS_DJ' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(hj (:,i0:i1,j0:j1),'OPLUS_HJ' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(bvel (:,i0:i1,j0:j1),'OPLUS_BVEL' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diffj(:,i0:i1,j0:j1),'OPLUS_DIFFJ',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag0(:,i0:i1,j0:j1),'OPLUS_TP0' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(tp (:,i0:i1,j0:j1),'OPLUS_TP1' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(opnm_smooth(:,i0:i1,j0:j1),'OPNM_SMOOTH',nlev,i0,i1,j0,j1)
!
! Set halo points where needed.
!
@@ -668,11 +692,11 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
! bdotdh_opj already has non-constant polar values, but bdotdh_op poles are zero.
! Sub setpoles will set poles to the zonal average of the latitude below each pole.
!
-! This may not be necessary, but do it for plotting:
+! This may not be necessary, but do it for plotting:
call setpoles(bdotdh_op(kbot:nlev,i0:i1,j0:j1),kbot,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(bdotdh_op (:,i0:i1,j0:j1),'BDOTDH_OP' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(bdotdh_opj(:,i0:i1,j0:j1),'BDOTDH_OPJ',nlev,i0,i1,j0,j1)
+ call savefld_waccm(bdotdh_op (:,i0:i1,j0:j1),'BDOTDH_OP' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(bdotdh_opj(:,i0:i1,j0:j1),'BDOTDH_OPJ',nlev,i0,i1,j0,j1)
!
! Note mp_geo_halos will overwrite jm1,jp1 that was set above.
! bdotdh_opj needs longitude halos for the bdotdh call below.
@@ -701,11 +725,15 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
q_coeff(1:nlev,i0:i1,j0:j1) = 0._r8
r_coeff(1:nlev,i0:i1,j0:j1) = 0._r8
bdotu = 0._r8
+ op_dt = 0._r8
+ amb_diff = 0._r8
+ dwind = 0._r8
+ dfield = 0._r8
diag1 = 0._r8 ; diag2 = 0._r8 ; diag3 = 0._r8 ; diag4 = 0._r8 ; diag5 = 0._r8
- diag6 = 0._r8 ; diag7 = 0._r8 ; diag8 = 0._r8 ; diag9 = 0._r8 ; diag10= 0._r8
- diag11 = 0._r8 ; diag12= 0._r8 ; diag13= 0._r8 ; diag14= 0._r8 ; diag15= 0._r8
- diag16 = 0._r8 ; diag17= 0._r8 ; diag18= 0._r8 ; diag19= 0._r8 ; diag20= 0._r8
+ diag6 = 0._r8 ; diag7 = 0._r8 ; diag8 = 0._r8 ; diag9 = 0._r8 ; diag10= 0._r8
+ diag11 = 0._r8 ; diag12= 0._r8 ; diag13= 0._r8 ; diag14= 0._r8 ; diag15= 0._r8
+ diag16 = 0._r8 ; diag17= 0._r8 ; diag18= 0._r8 ; diag19= 0._r8 ; diag20= 0._r8
diag21 = 0._r8 ; diag22= 0._r8 ; diag23= 0._r8 ; diag24= 0._r8 ; diag25= 0._r8
diag26 = 0._r8 ; diag27= 0._r8
@@ -752,13 +780,13 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
! Collect explicit terms:
! 'EXPLICIT0' (this will have poles set after third lat scan, before
! plotting. The poles will be constant in longitude, and
-! may differ structurally from adjacent latitudes.
+! may differ structurally from adjacent latitudes.
!
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- explicit(k,i,lat) = -one*(bdzdvb_op(k,i)+bdotdh_diff(k,i,lat)+ &
- bdotdh_op(k,i,lat))
+ explicit(k,i,lat) = -one*(bdzdvb_op(k,i)+bdotdh_diff(k,i,lat)+bdotdh_op(k,i,lat))
+ amb_diff(k,i,lat) = -explicit(k,i,lat)
enddo ! k=kbot,nlev
enddo ! i=i0,i1
diag2(:,i0:i1,lat) = explicit(:,i0:i1,lat) ! EXPLICIT0
@@ -783,7 +811,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
do k=kbot,nlev-1
!
! Original TIEGCM statement:
-! explicit(k,i) = explicit(k,i)+1._r8/(2._r8*re)* &
+! explicit(k,i) = explicit(k,i)+1._r8/(2._r8*Rearth)* &
! (1._r8/(cs(lat)*dlamda)*(bx(i,lat)* &
! (bvel(k,i+1,lat)-bvel(k,i-1,lat))+ &
! 0.5_r8*(ui(k,i,lat)+ui(k+1,i,lat))*bmod2(i,lat)**2* &
@@ -802,7 +830,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
(bvel(k,i+1,lat)-bvel(k,i-1,lat))+ &
0.5_r8*(ui(k,i,lat)+ui(k+1,i,lat))*bmod2(i,lat)**2* &
(op(k,i+1,lat)/bmod2(i+1,lat)**2- &
- op(k,i-1,lat)/bmod2(i-1,lat)**2))
+ op(k,i-1,lat)/bmod2(i-1,lat)**2))
!
! 'EXPLICITb'
!
@@ -815,10 +843,11 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
! 'EXPLICIT1'
! explicit will receive polar values after this latitude scan.
!
- explicit(k,i,lat) = explicit(k,i,lat)+1._r8/(2._r8*re)* &
- (1._r8/(cs(lat)*dlamda)*explicit_a(k,i,lat)+ &
+ explicit(k,i,lat) = explicit(k,i,lat)+1._r8/(2._r8*Rearth)* &
+ (1._r8/(cs(lat)*dlamda)*explicit_a(k,i,lat)+ &
1._r8/dphi*explicit_b(k,i,lat))
+ dfield(k,i,lat) = -(explicit(k,i,lat)+amb_diff(k,i,lat))
!
! explicit is bad at i=1,72,73,144 near south pole (npole appears to be ok)
! This does not appear to adversely affect the final O+ output, and TIEGCM
@@ -858,11 +887,11 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
do i=i0,i1
dvb(i,lat) = dvb(i,lat)/bz(i,lat)
enddo ! i=i0,i1
-
+
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- hdz(k,i) = 1._r8/(hj(k,i,lat)*dzp)
+ hdz(k,i,lat) = 1._r8/(hj(k,i,lat)*dzp)
tp1(k,i) = 0.5_r8*(ti(k,i,lat)+te(k,i,lat))
enddo ! k=kbot,nlev
enddo ! i=i0,i1
@@ -870,8 +899,8 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev-1
- tphdz1(k+1,i) = 2._r8*tp1(k+1,i)*(0.5_r8*(hdz(k,i)+hdz(k+1,i)))+gmr
- tphdz0(k+1,i) = 2._r8*tp1(k ,i)*(0.5_r8*(hdz(k,i)+hdz(k+1,i)))-gmr
+ tphdz1(k+1,i,lat) = 2._r8*tp1(k+1,i)*(0.5_r8*(hdz(k,i,lat)+hdz(k+1,i,lat)))+gmr
+ tphdz0(k+1,i,lat) = 2._r8*tp1(k ,i)*(0.5_r8*(hdz(k,i,lat)+hdz(k+1,i,lat)))-gmr
enddo ! k=kbot,nlev-1
enddo ! i=lon0,lon1
!
@@ -882,17 +911,17 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
!$omp parallel do private( i )
do i=i0,i1
- tphdz1(kbot,i) = 2._r8*tp1(kbot,i)* &
- (1.5_r8*hdz(kbot,i)-0.5_r8*hdz(kbot+1,i))+gmr
- tphdz1(nlev,i) = 2._r8*(2._r8*tp1(nlev-1,i)-tp1(nlev-2,i))* &
- (1.5_r8*hdz(nlev-1,i)-0.5_r8*hdz(nlev-2,i))+gmr
- tphdz0(kbot,i) = 2._r8*(2._r8*tp1(kbot,i)-tp1(kbot+1,i))* &
- (1.5_r8*hdz(kbot,i)-0.5_r8*hdz(kbot+1,i))-gmr
- tphdz0(nlev,i) = 2._r8*tp1(nlev-1,i)* &
- (1.5_r8*hdz(nlev-1,i)-0.5_r8*hdz(nlev-2,i))-gmr
+ tphdz1(kbot,i,lat) = 2._r8*tp1(kbot,i)* &
+ (1.5_r8*hdz(kbot,i,lat)-0.5_r8*hdz(kbot+1,i,lat))+gmr
+ tphdz1(nlev,i,lat) = 2._r8*(2._r8*tp1(nlev-1,i)-tp1(nlev-2,i))* &
+ (1.5_r8*hdz(nlev-1,i,lat)-0.5_r8*hdz(nlev-2,i,lat))+gmr
+ tphdz0(kbot,i,lat) = 2._r8*(2._r8*tp1(kbot,i)-tp1(kbot+1,i))* &
+ (1.5_r8*hdz(kbot,i,lat)-0.5_r8*hdz(kbot+1,i,lat))-gmr
+ tphdz0(nlev,i,lat) = 2._r8*tp1(nlev-1,i)* &
+ (1.5_r8*hdz(nlev-1,i,lat)-0.5_r8*hdz(nlev-2,i,lat))-gmr
enddo ! i=i0,i1
- diag4(:,i0:i1,lat) = tphdz0(:,i0:i1) ! TPHDZ0
- diag5(:,i0:i1,lat) = tphdz1(:,i0:i1) ! TPHDZ1
+ diag4(:,i0:i1,lat) = tphdz0(:,i0:i1,lat) ! TPHDZ0
+ diag5(:,i0:i1,lat) = tphdz1(:,i0:i1,lat) ! TPHDZ1
!
! djint = dj diffusion at interfaces:
! 'DJINT' (zero at the poles - messes up the plots - may give
@@ -901,12 +930,12 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev-1
- djint(k+1,i) = 0.5_r8*(dj(k,i,lat)+dj(k+1,i,lat))
- enddo
- djint(kbot,i) = (1.5_r8*dj(kbot ,i,lat)-0.5_r8*dj(kbot+1,i,lat))
- djint(nlev,i) = (1.5_r8*dj(nlev-1,i,lat)-0.5_r8*dj(nlev-2,i,lat))
+ djint(k+1,i,lat) = 0.5_r8*(dj(k,i,lat)+dj(k+1,i,lat))
+ enddo
+ djint(kbot,i,lat) = (1.5_r8*dj(kbot ,i,lat)-0.5_r8*dj(kbot+1,i,lat))
+ djint(nlev,i,lat) = (1.5_r8*dj(nlev-1,i,lat)-0.5_r8*dj(nlev-2,i,lat))
enddo ! i=i0,i1
- diag6(:,i0:i1,lat) = djint(:,i0:i1) ! DJINT
+ diag6(:,i0:i1,lat) = djint(:,i0:i1,lat) ! DJINT
!
! divbz = (DIV(B)+(DH*D*BZ)/(D*BZ)
! 'DIVBZ' Field appears as a line following mins along magnetic equator (zero at poles)
@@ -915,10 +944,10 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- divbz(k,i) = &
- dvb(i,lat)+1._r8/(re*dj(k,i,lat)*bz(i,lat)**2)*(bx(i,lat)/ &
- cs(lat)*(dj(k,i+1,lat)*bz(i+1,lat)-dj(k,i-1,lat)* &
- bz(i-1,lat))/(2._r8*dlamda)+by(i,lat)*(dj(k,i,jp1)* &
+ divbz(k,i) = &
+ dvb(i,lat)+1._r8/(Rearth*dj(k,i,lat)*bz(i,lat)**2)*(bx(i,lat)/ &
+ cs(lat)*(dj(k,i+1,lat)*bz(i+1,lat)-dj(k,i-1,lat)* &
+ bz(i-1,lat))/(2._r8*dlamda)+by(i,lat)*(dj(k,i,jp1)* &
bz(i,jp1)-dj(k,i,jm1)*bz(i,jm1))/(2._r8*dphi))
enddo ! k=kbot,nlev
enddo ! i=i0,i1
@@ -931,12 +960,12 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- hdzmbz(k,i) = (hdz(k,i)-0.5_r8*divbz(k,i))*bz(i,lat)**2
- hdzpbz(k,i) = (hdz(k,i)+0.5_r8*divbz(k,i))*bz(i,lat)**2
+ hdzmbz(k,i,lat) = (hdz(k,i,lat)-0.5_r8*divbz(k,i))*bz(i,lat)**2
+ hdzpbz(k,i,lat) = (hdz(k,i,lat)+0.5_r8*divbz(k,i))*bz(i,lat)**2
enddo ! k=kbot,nlev
enddo ! i=i0,i1
- diag8(:,i0:i1,lat) = hdzmbz(:,i0:i1) ! HDZMBZ
- diag9(:,i0:i1,lat) = hdzpbz(:,i0:i1) ! HDZPBZ
+ diag8(:,i0:i1,lat) = hdzmbz(:,i0:i1,lat) ! HDZMBZ
+ diag9(:,i0:i1,lat) = hdzpbz(:,i0:i1,lat) ! HDZPBZ
!
! Sum O+ at time n-1 to explicit terms: N(O+)/(2*DT) (N-1)
! 'EXPLICIT2' (zero at the poles)
@@ -948,6 +977,10 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
(opnm_smooth(k,i+2,lat)+opnm_smooth(k,i-2,lat)-4._r8* &
(opnm_smooth(k,i+1,lat)+opnm_smooth(k,i-1,lat))+6._r8* &
opnm_smooth(k,i,lat)))*dtx2inv
+ op_dt(k,i,lat) = -(opnm_smooth(k,i,lat)-shapiro_const* &
+ (opnm_smooth(k,i+2,lat)+opnm_smooth(k,i-2,lat)-4._r8* &
+ (opnm_smooth(k,i+1,lat)+opnm_smooth(k,i-1,lat))+6._r8* &
+ opnm_smooth(k,i,lat)))*dtx2inv
enddo ! k=kbot,nlev
enddo ! i=i0,i1
diag10(:,i0:i1,lat) = explicit(:,i0:i1,lat) ! EXPLICIT2
@@ -957,10 +990,10 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev-1
- p_coeff(k,i,lat) = hdzmbz(k,i)*djint(k ,i)*tphdz0(k ,i)
- q_coeff(k,i,lat) = -(hdzpbz(k,i)*djint(k+1,i)*tphdz0(k+1,i)+ &
- hdzmbz(k,i)*djint(k ,i)*tphdz1(k ,i))
- r_coeff(k,i,lat) = hdzpbz(k,i)*djint(k+1,i)*tphdz1(k+1,i)
+ p_coeff(k,i,lat) = hdzmbz(k,i,lat)*djint(k ,i,lat)*tphdz0(k ,i,lat)
+ q_coeff(k,i,lat) = -(hdzpbz(k,i,lat)*djint(k+1,i,lat)*tphdz0(k+1,i,lat)+ &
+ hdzmbz(k,i,lat)*djint(k ,i,lat)*tphdz1(k ,i,lat))
+ r_coeff(k,i,lat) = hdzpbz(k,i,lat)*djint(k+1,i,lat)*tphdz1(k+1,i,lat)
enddo ! k=kbot,nlev-1
enddo ! i=i0,i1
@@ -976,11 +1009,11 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- bdotu(k,i) = bx(i,lat)*un(k,i,lat)+by(i,lat)*vn(k,i,lat)+ &
+ bdotu(k,i,lat) = bx(i,lat)*un(k,i,lat)+by(i,lat)*vn(k,i,lat)+ &
hj(k,i,lat)*bz(i,lat)*om(k,i,lat)
enddo ! k=kbot,nlev
enddo ! i=i0,i1
- diag14(:,i0:i1,lat) = bdotu(:,i0:i1) ! BDOTU
+ diag14(:,i0:i1,lat) = bdotu(:,i0:i1,lat) ! BDOTU
!
! Continue coefficients with vertical ion drift:
! wi is converted from interfaces to midpoints (first use of wi).
@@ -990,13 +1023,13 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
do i=i0,i1
do k=kbot,nlev-2
- p_coeff(k+1,i,lat) = p_coeff(k+1,i,lat)+(bz(i,lat)*bdotu(k,i)+ &
- 0.5_r8*(wi(k+1,i,lat)+wi(k+2,i,lat)))*0.5_r8*hdz(k+1,i)
+ p_coeff(k+1,i,lat) = p_coeff(k+1,i,lat)+(bz(i,lat)*bdotu(k,i,lat)+ &
+ 0.5_r8*(wi(k+1,i,lat)+wi(k+2,i,lat)))*0.5_r8*hdz(k+1,i,lat)
- q_coeff(k,i,lat) = q_coeff(k,i,lat)-0.5_r8*(wi(k,i,lat)+wi(k+1,i,lat))*6._r8/re
+ q_coeff(k,i,lat) = q_coeff(k,i,lat)-0.5_r8*(wi(k,i,lat)+wi(k+1,i,lat))*6._r8/Rearth
- r_coeff(k,i,lat) = r_coeff(k,i,lat)-(bz(i,lat)*bdotu(k+1,i)+ &
- 0.5_r8*(wi(k,i,lat)+wi(k+1,i,lat)))*0.5_r8*hdz(k,i)
+ r_coeff(k,i,lat) = r_coeff(k,i,lat)-(bz(i,lat)*bdotu(k+1,i,lat)+ &
+ 0.5_r8*(wi(k,i,lat)+wi(k+1,i,lat)))*0.5_r8*hdz(k,i,lat)
enddo ! k=kbot,nlev-1
enddo ! i=i0,i1
@@ -1013,16 +1046,16 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
!$omp parallel do private( i )
do i=i0,i1
- p_coeff(kbot,i,lat) = p_coeff(kbot,i,lat)+(bz(i,lat)* & ! reset p_coeff lbc
- (2._r8*bdotu(kbot,i)-bdotu(kbot+1,i))+ &
- 0.5_r8*(wi(kbot,i,lat)+wi(kbot+1,i,lat)))*0.5_r8*hdz(kbot,i)
+ p_coeff(kbot,i,lat) = p_coeff(kbot,i,lat)+(bz(i,lat)* & ! reset p_coeff lbc
+ (2._r8*bdotu(kbot,i,lat)-bdotu(kbot+1,i,lat))+ &
+ 0.5_r8*(wi(kbot,i,lat)+wi(kbot+1,i,lat)))*0.5_r8*hdz(kbot,i,lat)
- q_coeff(nlev-1,i,lat) = q_coeff(nlev-1,i,lat)- &
- 0.5_r8*(wi(nlev,i,lat)+wi(nlev-1,i,lat))*6._r8/re
+ q_coeff(nlev-1,i,lat) = q_coeff(nlev-1,i,lat)- &
+ 0.5_r8*(wi(nlev,i,lat)+wi(nlev-1,i,lat))*6._r8/Rearth
- r_coeff(nlev-1,i,lat) = r_coeff(nlev-1,i,lat)-(bz(i,lat)* &
- (2._r8*bdotu(nlev-1,i)-bdotu(nlev-2,i))+ &
- 0.5_r8*(wi(nlev,i,lat)+wi(nlev-1,i,lat)))*0.5_r8*hdz(nlev-1,i)
+ r_coeff(nlev-1,i,lat) = r_coeff(nlev-1,i,lat)-(bz(i,lat)* &
+ (2._r8*bdotu(nlev-1,i,lat)-bdotu(nlev-2,i,lat))+ &
+ 0.5_r8*(wi(nlev,i,lat)+wi(nlev-1,i,lat)))*0.5_r8*hdz(nlev-1,i,lat)
enddo ! i=i0,i1
!
! Extrapolate to top level (tiegcm does not do this):
@@ -1042,7 +1075,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i, k )
do i=i0,i1
do k=kbot,nlev
- q_coeff(k,i,lat) = q_coeff(k,i,lat)-bdotu(k,i)*dvb(i,lat)*bz(i,lat)-dtx2inv
+ q_coeff(k,i,lat) = q_coeff(k,i,lat)-bdotu(k,i,lat)*dvb(i,lat)*bz(i,lat)-dtx2inv
enddo ! k=kbot,nlev-1
enddo ! i=i0,i1
!
@@ -1053,8 +1086,8 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!$omp parallel do private( i )
do i=i0,i1
ubca(i) = 0._r8
- ubcb(i) = -bz(i,lat)**2*djint(nlev,i)*tphdz0(nlev,i)-ubca(i)
- ubca(i) = -bz(i,lat)**2*djint(nlev,i)*tphdz1(nlev,i)+ubca(i)
+ ubcb(i) = -bz(i,lat)**2*djint(nlev,i,lat)*tphdz0(nlev,i,lat)-ubca(i)
+ ubca(i) = -bz(i,lat)**2*djint(nlev,i,lat)*tphdz1(nlev,i,lat)+ubca(i)
!
! Q = Q+B/A*R
q_coeff(nlev,i,lat) = q_coeff(nlev,i,lat)+ubcb(i)/ubca(i)* &
@@ -1094,7 +1127,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
call setpoles(diag6 (kbot:nlev,i0:i1,j0:j1),kbot,nlev,i0,i1,j0,j1) ! DJINT
!
! All tasks have global 2d bmod2.
-! bmod2 was set by sub magfield (getapex.F90)
+! bmod2 was set by sub magfield (getapex.F90)
! allocate(bmod2(0:nlonp1,jspole-1:jnpole+1))
! Copy bmod2 poles to diagnostic array.
!
@@ -1105,7 +1138,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
diag25(k,i,j1) = bmod2(i,j1)
enddo
enddo
- call savefld_waccm_switch(diag25,'BMOD2' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag25,'BMOD2' ,nlev,i0,i1,j0,j1)
!
! Assign polar values to coefficients for trsolv.
!
@@ -1117,7 +1150,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
! Call solver, defining O+ output op_out:
!
! Its best not to call this unless the coefficients and explicit terms
-! have been properly set in the third latitude scan above (e.g., during
+! have been properly set in the third latitude scan above (e.g., during
! "goto 300" debugging above, where the coeffs may not have been calculated).
!
if (calltrsolv) then
@@ -1132,9 +1165,56 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
op_out (kbot:nlev,i0:i1,lat), &
kbot,nlev,kbot,nlev,i0,i1 )
+
+
+
+! for term analysis
+ do k=kbot,nlev-1
+! diffusion
+ amb_diff(k,i0:i1,lat) = amb_diff(k,i0:i1,lat) + &
+ hdzmbz(k,i0:i1,lat)*djint(k, i0:i1,lat)*tphdz0(k, i0:i1,lat)* op_out(k-1,i0:i1,lat) &
+ -(hdzpbz(k,i0:i1,lat)*djint(k+1,i0:i1,lat)*tphdz0(k+1,i0:i1,lat)+ &
+ hdzmbz(k,i0:i1,lat)*djint(k ,i0:i1,lat)*tphdz1(k ,i0:i1,lat))* op_out(k,i0:i1,lat) &
+ +hdzpbz(k,i0:i1,lat)*djint(k+1,i0:i1,lat)*tphdz1(k+1,i0:i1,lat)* op_out(k+1,i0:i1,lat)
+
+! electric field transport
+ if (k <= nlev-2) then
+ dfield(k,i0:i1,lat) = dfield(k,i0:i1,lat)+ &
+ (0.5_r8*(wi(k+1,i0:i1,lat)+wi(k+2,i0:i1,lat)))* &
+ 0.5_r8*hdz(k+1,i0:i1,lat)* op_out(k-1,i0:i1,lat) &
+ -0.5_r8*(wi(k,i0:i1,lat)+wi(k+1,i0:i1,lat))* &
+ 6._r8/Rearth* op_out(k,i0:i1,lat) &
+ -(0.5_r8*(wi(k,i0:i1,lat)+wi(k+1,i0:i1,lat)))*0.5_r8*hdz(k,i0:i1,lat) &
+ * op_out(k+1,i0:i1,lat)
+ else
+ dfield(k,i0:i1,lat) = dfield(k,i0:i1,lat)+ &
+ (1*(wi(k+1,i0:i1,lat)))* &
+ 0.5_r8*hdz(k+1,i0:i1,lat)* op_out(k-1,i0:i1,lat) &
+ -0.5_r8*(wi(k,i0:i1,lat)+wi(k+1,i0:i1,lat))* &
+ 6._r8/Rearth* op_out(k,i0:i1,lat) &
+ -(0.5_r8*(wi(k,i0:i1,lat)+wi(k+1,i0:i1,lat)))*0.5_r8*hdz(k,i0:i1,lat) &
+ * op_out(k+1,i0:i1,lat)
+ endif
+! wind transport
+ dwind(k,i0:i1,lat)= &
+ (bz(i0:i1,lat)*bdotu(k,i0:i1,lat))* 0.5_r8*hdz(k+1,i0:i1,lat) * op_out(k-1,i0:i1,lat) &
+ -bdotu(k,i0:i1,lat)*dvb(i0:i1,lat)*bz(i0:i1,lat)* op_out(k,i0:i1,lat) &
+ -(bz(i0:i1,lat)*bdotu(k+1,i0:i1,lat))*0.5_r8*hdz(k,i0:i1,lat)* op_out(k+1,i0:i1,lat)
+
+! dO+/dt
+ op_dt(k,i0:i1,lat)= dtx2inv* op_out(k,i0:i1,lat) + op_dt(k,i0:i1,lat)
+!
+ enddo ! k=lev0+1,lev1-1
+
+
enddo
- call savefld_waccm_switch(op_out,'OP_SOLVE',nlev,i0,i1,j0,j1)
+ call savefld_waccm(op_out,'OP_SOLVE',nlev,i0,i1,j0,j1)
+
+ call savefld_waccm(op_dt,'op_dt',nlev,i0,i1,j0,j1)
+ call savefld_waccm(amb_diff,'amb_diff',nlev,i0,i1,j0,j1)
+ call savefld_waccm(dfield,'dfield',nlev,i0,i1,j0,j1)
+ call savefld_waccm(dwind,'dwind',nlev,i0,i1,j0,j1)
else ! trsolv not called (debug only)
op_out (kbot:nlev,i0:i1,j0:j1) = op (kbot:nlev,i0:i1,j0:j1)
@@ -1143,38 +1223,38 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
! Write fields from third latitude scan to waccm history:
!
- call savefld_waccm_switch(explicit,'EXPLICIT',nlev,i0,i1,j0,j1) ! non-zero at ubc
- call savefld_waccm_switch(p_coeff ,'P_COEFF' ,nlev,i0,i1,j0,j1) ! zero at ubc?
- call savefld_waccm_switch(q_coeff ,'Q_COEFF' ,nlev,i0,i1,j0,j1) ! non-zero at ubc
- call savefld_waccm_switch(r_coeff ,'R_COEFF' ,nlev,i0,i1,j0,j1) ! is set zero at ubc
-
- call savefld_waccm_switch(bdotdh_diff(:,i0:i1,j0:j1), 'BDOTDH_DIFF',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag1 ,'BDZDVB_OP',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag2 ,'EXPLICIT0',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag26,'EXPLICITa',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag27,'EXPLICITb',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag3 ,'EXPLICIT1',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag4 ,'TPHDZ0' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag5 ,'TPHDZ1' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag6 ,'DJINT' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag7 ,'DIVBZ' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag8 ,'HDZMBZ' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag9 ,'HDZPBZ' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag10,'EXPLICIT2',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag11,'P_COEFF0' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag12,'Q_COEFF0' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag13,'R_COEFF0' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag14,'BDOTU' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag15,'P_COEFF1' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag16,'Q_COEFF1' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag17,'R_COEFF1' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag18,'EXPLICIT3',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag19,'P_COEFF2' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag20,'Q_COEFF2' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag21,'R_COEFF2' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag22,'P_COEFF0a',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag23,'Q_COEFF0a',nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(diag24,'R_COEFF0a',nlev,i0,i1,j0,j1)
+ call savefld_waccm(explicit,'EXPLICIT',nlev,i0,i1,j0,j1) ! non-zero at ubc
+ call savefld_waccm(p_coeff ,'P_COEFF' ,nlev,i0,i1,j0,j1) ! zero at ubc?
+ call savefld_waccm(q_coeff ,'Q_COEFF' ,nlev,i0,i1,j0,j1) ! non-zero at ubc
+ call savefld_waccm(r_coeff ,'R_COEFF' ,nlev,i0,i1,j0,j1) ! is set zero at ubc
+
+ call savefld_waccm(bdotdh_diff(:,i0:i1,j0:j1), 'BDOTDH_DIFF',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag1 ,'BDZDVB_OP',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag2 ,'EXPLICIT0',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag26,'EXPLICITa',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag27,'EXPLICITb',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag3 ,'EXPLICIT1',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag4 ,'TPHDZ0' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag5 ,'TPHDZ1' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag6 ,'DJINT' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag7 ,'DIVBZ' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag8 ,'HDZMBZ' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag9 ,'HDZPBZ' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag10,'EXPLICIT2',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag11,'P_COEFF0' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag12,'Q_COEFF0' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag13,'R_COEFF0' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag14,'BDOTU' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag15,'P_COEFF1' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag16,'Q_COEFF1' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag17,'R_COEFF1' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag18,'EXPLICIT3',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag19,'P_COEFF2' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag20,'Q_COEFF2' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag21,'R_COEFF2' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag22,'P_COEFF0a',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag23,'Q_COEFF0a',nlev,i0,i1,j0,j1)
+ call savefld_waccm(diag24,'R_COEFF0a',nlev,i0,i1,j0,j1)
!
!------------------------------------------------------------------------
!
@@ -1185,10 +1265,9 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
if (ring_polar_filter) then
call ringfilter_op(op_out(kbot:nlev,i0:i1,j0:j1),kbot,nlev,i0,i1,j0,j1)
- else
+ else
call filter2_op(op_out(kbot:nlev,i0:i1,j0:j1),kbot,nlev,i0,i1,j0,j1)
endif
-
!
!----------------------- Begin fourth latitude scan ---------------------
!
@@ -1210,7 +1289,7 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
enddo ! i=lon0,lon1
!
! Enforce O+ minimum if enforce_opfloor is true.
-! Opfloor is Stan's "smooth floor" (product of two Gaussians,
+! Opfloor is Stan's "smooth floor" (product of two Gaussians,
! dependent on latitude and pressure level) (opmin=3000.0):
!
if (enforce_floor) then
@@ -1230,8 +1309,8 @@ subroutine oplus_xport(tn,te,ti,un,vn,om,zg,o2,o1,n2,op_in,opnm_in, &
!
! Save O+ output to WACCM history (cm^3):
- call savefld_waccm_switch(op_out (:,i0:i1,j0:j1),'OP_OUT' ,nlev,i0,i1,j0,j1)
- call savefld_waccm_switch(opnm_out(:,i0:i1,j0:j1),'OPNM_OUT',nlev,i0,i1,j0,j1)
+ call savefld_waccm(op_out (:,i0:i1,j0:j1),'OP_OUT' ,nlev,i0,i1,j0,j1)
+ call savefld_waccm(opnm_out(:,i0:i1,j0:j1),'OPNM_OUT',nlev,i0,i1,j0,j1)
end subroutine oplus_xport
!-----------------------------------------------------------------------
subroutine oplus_flux(opflux,lon0,lon1,lat0,lat1)
@@ -1254,9 +1333,9 @@ subroutine oplus_flux(opflux,lon0,lon1,lat0,lat1)
phin = -2.0e8_r8, &
! phin = 0._r8, &
ppolar = 0._r8
- real(r8) :: a(lon0:lon1)
- real(r8) :: fed(lon0:lon1)
- real(r8) :: fen(lon0:lon1)
+ real(r8) :: a(lon0:lon1)
+ real(r8) :: fed(lon0:lon1)
+ real(r8) :: fen(lon0:lon1)
!
! Set some paramaters:
pi = 4._r8*atan(1._r8)
@@ -1324,7 +1403,7 @@ subroutine get_zenith(chi,i0,i1,j0,j1)
do i=i0,i1
call zenith(calday,(/dtr*glat(j)/),(/dtr*glon(i)/),cosZenAngR,1)
chi(i,j) = acos(cosZenAngR(1))
- enddo
+ enddo
enddo
end subroutine get_zenith
!-----------------------------------------------------------------------
@@ -1335,11 +1414,10 @@ subroutine divb(dvb,i0,i1,j0,j1)
!
! Args:
integer,intent(in) :: i0,i1,j0,j1
- real(r8),intent(out) :: dvb(i0:i1,j0:j1)
+ real(r8),intent(out) :: dvb(i0:i1,j0:j1)
!
! Local:
integer :: i,j,jm1,jp1
- real(r8),parameter :: re = 6.37122e8_r8 ! earth radius (cm)
dvb = 0._r8
@@ -1348,7 +1426,7 @@ subroutine divb(dvb,i0,i1,j0,j1)
call savefld_waccm(bz(i0:i1,j0:j1),'OPLUS_BZ',1,i0,i1,j0,j1)
call savefld_waccm(bmod2(i0:i1,j0:j1),'OPLUS_BMAG',1,i0,i1,j0,j1)
!
-! Note re is in cm.
+! Note Rearth is in cm.
! (bx,by,bz are set by sub magfield (getapex.F90))
! (dphi,dlamda, and cs are set by sub set_geogrid (edyn_init.F90))
!
@@ -1358,7 +1436,7 @@ subroutine divb(dvb,i0,i1,j0,j1)
do i=i0,i1
dvb(i,j) = (((bx(i+1,j)-bx(i-1,j))/(2._r8*dlamda)+ &
(cs(jp1)*by(i,jp1)-cs(jm1)*by(i,jm1))/(2._r8*dphi))/ &
- cs(j)+2._r8*bz(i,j))/re
+ cs(j)+2._r8*bz(i,j))/Rearth
enddo ! i=i0,i1
enddo ! j=j0,j1
end subroutine divb
@@ -1380,7 +1458,7 @@ subroutine rrk(t,rms,ps1,ps2,n2,tr,ans,lon0,lon1,lev0,lev1)
!$omp parallel do private(i,k)
do i=lon0,lon1
do k=lev0,lev1-1
-
+
ans(k,i) = 1.42e17_r8*boltz*t(k,i)/(p0*expz(k)*.5_r8*(rms(k,i)+ &
rms(k+1,i))*(ps2(k,i)*rmassinv_o1*sqrt(tr(k,i))*(1._r8-0.064_r8* &
log10(tr(k,i)))**2*colfac+18.6_r8*n2(k,i)*rmassinv_n2+18.1_r8* &
@@ -1390,9 +1468,9 @@ subroutine rrk(t,rms,ps1,ps2,n2,tr,ans,lon0,lon1,lev0,lev1)
ans(lev1,i) = ans(lev1-1,i) ! should not need to do this
enddo ! i=lon0,lon1
-!
+!
! Cap ambipolar diffusion coefficient in ans.
-!
+!
! acceptable range for limiter 1.e8 to 1.e9 ...
where( ans(:,:) > adiff_limiter )
ans(:,:) = adiff_limiter
@@ -1404,7 +1482,7 @@ subroutine diffus(tp,en,hj,ans,i0,i1,lev0,lev1,lat)
! kbot,nlev
! Evaluates ans = (d/(h*dz)*tp+m*g/r)*en
! Remember: "bot2top": lev0=kbot=bottom, lev1=nlev=top
-!
+!
! Args:
integer :: i0,i1,lev0,lev1,lat
real(r8),dimension(lev0:lev1,i0:i1),intent(in) :: tp,en,hj
@@ -1463,8 +1541,8 @@ subroutine bdotdh(phijm1,phij,phijp1,ans,lon0,lon1,lev0,lev1,lat)
!$omp parallel do private( i, k )
do i=lon0,lon1
do k=lev0,lev1
- ans(k,i) = 1._r8/re*(bx(i,lat)/(cs(lat)*2._r8*dlamda)* &
- (phij(k,i+1)-phij(k,i-1))+by(i,lat)* &
+ ans(k,i) = 1._r8/Rearth*(bx(i,lat)/(cs(lat)*2._r8*dlamda)* &
+ (phij(k,i+1)-phij(k,i-1))+by(i,lat)* &
(phijp1(k,i)-phijm1(k,i))/(2._r8*dphi))
enddo ! k=lev0,lev1
enddo ! i=lon0,lon1
@@ -1571,7 +1649,7 @@ subroutine filter1_op(f,k0,k1,i0,i1,j0,j1)
do k=k0,k1
fik(i,k-k0+1) = fkij(1)%ptr(k-k0+1,i,j)
enddo
- enddo
+ enddo
!
! Remove wave numbers > kut(lat):
!
@@ -1650,7 +1728,7 @@ subroutine filter2_op(f,k0,k1,i0,i1,j0,j1)
do k=k0,k1
fik(i,k-k0+1) = fkij(1)%ptr(k-k0+1,i,j)
enddo
- enddo
+ enddo
!
! Remove wave numbers > kut(lat):
!
@@ -1681,6 +1759,7 @@ subroutine filter2_op(f,k0,k1,i0,i1,j0,j1)
enddo
deallocate(fkij(1)%ptr)
end subroutine filter2_op
+!-----------------------------------------------------------------------
!-----------------------------------------------------------------------
subroutine ringfilter_op(f,k0,k1,i0,i1,j0,j1)
use filter_module,only: ringfilter
@@ -1728,7 +1807,7 @@ subroutine ringfilter_op(f,k0,k1,i0,i1,j0,j1)
do k=k0,k1
fik(i,k-k0+1) = fkij(1)%ptr(k-k0+1,i,j)
enddo
- enddo
+ enddo
!
! Remove wave numbers > kut(lat):
!
diff --git a/src/ionosphere/waccmx/regridder.F90 b/src/ionosphere/waccmx/regridder.F90
new file mode 100644
index 0000000000..011685ae36
--- /dev/null
+++ b/src/ionosphere/waccmx/regridder.F90
@@ -0,0 +1,137 @@
+!-------------------------------------------------------------------------------
+! Utility module for mapping fields between CAM physics, oplus transport, and
+! geomagnetic grids
+!-------------------------------------------------------------------------------
+module regridder
+ use shr_kind_mod,only: r8 => shr_kind_r8 ! 8-byte reals
+ use cam_abortutils, only: endrun
+
+ use edyn_mpi, only: mlon0, mlon1, mlat0, mlat1, mlev0, mlev1
+ use edyn_mpi, only: lon0, lon1, lat0, lat1, lev0, lev1
+
+ use edyn_esmf, only: edyn_esmf_set3d_phys, edyn_esmf_regrid_phys2mag
+ use edyn_esmf, only: edyn_esmf_regrid_phys2geo, edyn_esmf_get_3dfield
+ use edyn_esmf, only: edyn_esmf_set2d_phys, edyn_esmf_get_2dfield, edyn_esmf_get_2dphysfield, edyn_esmf_set3d_geo
+ use edyn_esmf, only: edyn_esmf_regrid_geo2mag, edyn_esmf_regrid_geo2phys
+ use edyn_esmf, only: edyn_esmf_set2d_geo, edyn_esmf_set3d_mag, edyn_esmf_regrid_mag2geo
+ use edyn_esmf, only: phys_3dfld, phys_2dfld
+ use edyn_esmf, only: geo_3dfld, geo_2dfld
+ use edyn_esmf, only: mag_des_3dfld, mag_des_2dfld
+ use edyn_esmf, only: mag_src_3dfld, mag_src_2dfld
+ use edyn_esmf, only: edyn_esmf_set2d_mag, edyn_esmf_regrid_mag2phys, edyn_esmf_get_1dfield
+
+ implicit none
+
+contains
+
+ !-----------------------------------------------------------------------------
+ ! map horizontal 2D fields from magnetic grid to physcis grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_mag2phys_2d(magfld, physfld, cols, cole)
+ integer, intent(in) :: cols, cole
+ real(r8), intent(in) :: magfld(mlon0:mlon1,mlat0:mlat1)
+ real(r8), intent(out) :: physfld(cols:cole)
+
+ call edyn_esmf_set2d_mag( mag_src_2dfld, magfld, mlon0, mlon1, mlat0, mlat1 )
+ call edyn_esmf_regrid_mag2phys( mag_src_2dfld, phys_2dfld, 2)
+ call edyn_esmf_get_1dfield(phys_2dfld, physfld, cols, cole )
+
+ end subroutine regrid_mag2phys_2d
+
+ !-----------------------------------------------------------------------------
+ ! map 3D feilds from magnetic grid to oplus grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_mag2geo_3d(magfld,geofld)
+ real(r8), intent(in) :: magfld(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1)
+ real(r8), intent(out) :: geofld(lon0:lon1,lat0:lat1,lev0:lev1)
+
+ call edyn_esmf_set3d_mag( mag_src_3dfld, magfld, mlon0, mlon1, mlat0, mlat1, mlev0, mlev1 )
+ call edyn_esmf_regrid_mag2geo(mag_src_3dfld, geo_3dfld, 3)
+ call edyn_esmf_get_3dfield(geo_3dfld, geofld, lon0, lon1, lat0, lat1, lev0, lev1)
+
+ end subroutine regrid_mag2geo_3d
+
+ !-----------------------------------------------------------------------------
+ ! map horizontal 2D fields from physcis grid to oplus grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_phys2geo_2d( physfld, geofld, cols, cole )
+ integer, intent(in) :: cols, cole
+ real(r8), intent(in) :: physfld(cols:cole)
+ real(r8), intent(out) :: geofld(lon0:lon1,lat0:lat1)
+
+ call edyn_esmf_set2d_phys( phys_2dfld , physfld, cols, cole)
+ call edyn_esmf_regrid_phys2geo(phys_2dfld, geo_2dfld, 2)
+ call edyn_esmf_get_2dfield(geo_2dfld, geofld, lon0, lon1, lat0, lat1 )
+
+ end subroutine regrid_phys2geo_2d
+
+ !-----------------------------------------------------------------------------
+ ! map 3D fields from physcis grid to oplus grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_phys2geo_3d( physfld, geofld, plev, cols, cole )
+ integer, intent(in) :: plev, cols, cole
+ real(r8), intent(in) :: physfld(1:plev,cols:cole)
+ real(r8), intent(out) :: geofld(lon0:lon1,lat0:lat1,lev0:lev1)
+
+ call edyn_esmf_set3d_phys( phys_3dfld, physfld, 1, plev, cols, cole)
+ call edyn_esmf_regrid_phys2geo(phys_3dfld, geo_3dfld, 3)
+ call edyn_esmf_get_3dfield(geo_3dfld, geofld, lon0, lon1, lat0, lat1, lev0, lev1 )
+
+ end subroutine regrid_phys2geo_3d
+
+ !-----------------------------------------------------------------------------
+ ! map 3D fields from oplus grid to magnetic grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_geo2mag_3d( geofld, magfld )
+ real(r8), intent(in) :: geofld(lon0:lon1,lat0:lat1,lev0:lev1)
+ real(r8), intent(out) :: magfld(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1)
+
+ call edyn_esmf_set3d_geo( geo_3dfld, geofld, lon0, lon1, lat0, lat1, lev0, lev1 )
+ call edyn_esmf_regrid_geo2mag(geo_3dfld, mag_des_3dfld, 3)
+ call edyn_esmf_get_3dfield(mag_des_3dfld, magfld, mlon0, mlon1, mlat0, mlat1, mlev0, mlev1 )
+
+ end subroutine regrid_geo2mag_3d
+
+ !-----------------------------------------------------------------------------
+ ! map horizontal 2D fields from oplus grid to magnetic grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_geo2mag_2d( geofld, magfld )
+ real(r8), intent(in) :: geofld(lon0:lon1,lat0:lat1)
+ real(r8), intent(out) :: magfld(mlon0:mlon1,mlat0:mlat1)
+
+ call edyn_esmf_set2d_geo( geo_2dfld, geofld, lon0, lon1, lat0, lat1 )
+ call edyn_esmf_regrid_geo2mag(geo_2dfld, mag_des_2dfld, 2)
+ call edyn_esmf_get_2dfield(mag_des_2dfld, magfld, mlon0, mlon1, mlat0, mlat1 )
+
+ end subroutine regrid_geo2mag_2d
+
+ !-----------------------------------------------------------------------------
+ ! map 3D fields from oplus grid to physics grid
+ !-----------------------------------------------------------------------------
+ subroutine regrid_geo2phys_3d( geofld, physfld, plev, cols, cole )
+ integer, intent(in) :: plev, cols, cole
+ real(r8), intent(in) :: geofld(lon0:lon1,lat0:lat1,lev0:lev1)
+ real(r8), intent(out) :: physfld(1:plev,cols:cole)
+
+
+ call edyn_esmf_set3d_geo( geo_3dfld, geofld, lon0, lon1, lat0, lat1, lev0, lev1 )
+ call edyn_esmf_regrid_geo2phys(geo_3dfld, phys_3dfld, 3)
+ call edyn_esmf_get_2dphysfield(phys_3dfld, physfld, 1, plev, cols, cole )
+
+ end subroutine regrid_geo2phys_3d
+
+ !-----------------------------------------------------------------------------
+ ! map 3D fields from physics grid to magnetic
+ !-----------------------------------------------------------------------------
+ subroutine regrid_phys2mag_3d( physfld, magfld, plev, cols, cole )
+ integer, intent(in) :: plev, cols, cole
+ real(r8), intent(in) :: physfld(1:plev,cols:cole)
+ real(r8), intent(out) :: magfld(mlon0:mlon1,mlat0:mlat1,mlev0:mlev1)
+
+ call edyn_esmf_set3d_phys( phys_3dfld, physfld, 1, plev, cols, cole)
+ call edyn_esmf_regrid_phys2mag(phys_3dfld, mag_des_3dfld, 3)
+ call edyn_esmf_get_3dfield(mag_des_3dfld, magfld, mlon0, mlon1, mlat0, mlat1, mlev0, mlev1 )
+
+ end subroutine regrid_phys2mag_3d
+
+end module regridder
diff --git a/src/ionosphere/waccmx/savefield_waccm.F90 b/src/ionosphere/waccmx/savefield_waccm.F90
index f968700f1c..e6d6f4877d 100644
--- a/src/ionosphere/waccmx/savefield_waccm.F90
+++ b/src/ionosphere/waccmx/savefield_waccm.F90
@@ -1,15 +1,14 @@
module savefield_waccm
use shr_kind_mod,only: r8 => shr_kind_r8 ! 8-byte reals
use cam_history ,only: hist_fld_active,outfld ! Routine to output fields to history files
- use edyn_mpi ,only: array_ptr_type
!
! Save fields to WACCM output history file.
!
implicit none
- save
private
- public savefld_waccm,savefld_waccm_switch
- contains
+ public :: savefld_waccm
+
+contains
!-----------------------------------------------------------------------
subroutine savefld_waccm(f,name,nlev,i0,i1,j0,j1)
!
@@ -32,7 +31,7 @@ subroutine savefld_waccm(f,name,nlev,i0,i1,j0,j1)
do j=j0,j1
do i=i0,i1
do k=1,nlev
- diag_ik(i,k) = f(k,i,j)
+ diag_ik(i,k) = f(nlev-k+1,i,j)
enddo
enddo
call outfld(name,diag_ik,i1-i0+1,j)
@@ -46,53 +45,5 @@ subroutine savefld_waccm(f,name,nlev,i0,i1,j0,j1)
enddo
endif
end subroutine savefld_waccm
-!-----------------------------------------------------------------------
- subroutine savefld_waccm_switch(f,name,plev,i0,i1,j0,j1)
-!
-! Copy input array to a local array, associate a pointer to the local array,
-! switch the "model format" of the pointer (shift longitude and invert vertical),
-! (TIEGCM to WACCM in this case), and save the local array to WACCM history.
-! (Input array is unchanged)
-!
- use edyn_mpi ,only: switch_model_format
-!
-! Args:
- integer,intent(in) :: plev,i0,i1,j0,j1
- real(r8),intent(in) :: f(plev,i0:i1,j0:j1)
- character(len=*),intent(in) :: name
-!
-! Local:
- integer :: i,j
- real(r8),target :: ftmp(plev,i0:i1,j0:j1)
- type(array_ptr_type) :: ptr(1)
-
- if (.not.hist_fld_active(name)) return
-!
-! Copy input to local array:
- do j=j0,j1
- do i=i0,i1
- ftmp(:,i,j) = f(:,i,j)
- enddo
- enddo
-!
-! Associate local pointer (lonshift_blocks expects an array_ptr_type)
- ptr(1)%ptr => ftmp
-!
-! Switch from TIEGCM format to WACCM format:
-!
- call switch_model_format(ptr,1,plev,i0,i1,j0,j1,1)
-!
-! Return data to local array, and save on WACCM history:
-!
- do j=j0,j1
- do i=i0,i1
- ftmp(1:plev,i,j) = ptr(1)%ptr(1:plev,i,j)
- enddo
- enddo
-
- call savefld_waccm(ftmp(:,i0:i1,j0:j1),trim(name),plev,i0,i1,j0,j1)
-
- end subroutine savefld_waccm_switch
-!-----------------------------------------------------------------------
end module savefield_waccm
diff --git a/src/ionosphere/waccmx/utils_mod.F90 b/src/ionosphere/waccmx/utils_mod.F90
new file mode 100644
index 0000000000..0158ccdc07
--- /dev/null
+++ b/src/ionosphere/waccmx/utils_mod.F90
@@ -0,0 +1,116 @@
+module utils_mod
+ use shr_kind_mod ,only: r8 => shr_kind_r8, cl=>shr_kind_cl
+ use cam_logfile ,only: iulog
+ use cam_abortutils ,only: endrun
+ use esmf ,only: ESMF_FIELD
+ use edyn_mpi ,only: mlon0,mlon1,mlat0,mlat1, lon0,lon1,lat0,lat1
+ use edyn_params ,only: finit
+
+ implicit none
+ private
+
+ public :: boxcar_ave
+ public :: check_ncerr
+ public :: check_alloc
+
+contains
+
+ !-----------------------------------------------------------------------
+ subroutine boxcar_ave(x,y,lon,lat,mtime,itime,ibox)
+ !
+ ! perform boxcar average
+ !
+ ! Args:
+ integer, intent(in) :: lon
+ integer, intent(in) :: lat
+ integer, intent(in) :: mtime
+ integer, intent(in) :: itime
+ integer, intent(in) :: ibox
+ real(r8), intent(in) :: x(lon,lat,mtime)
+ real(r8), intent(out) :: y(lon,lat)
+
+ ! Local:
+ integer :: i, iset, iset1
+
+ if (ibox > mtime) then
+ call endrun('boxcar_ave: ibox > mtime')
+ endif
+ !
+ iset = itime - ibox/2
+ if (iset < 1) iset = 1
+ iset1 = iset + ibox
+ if (iset1 > mtime) then
+ iset1 = mtime
+ iset = iset1 - ibox
+ end if
+ y(:,:) = 0._r8
+ do i=iset,iset1
+ y(:,:) = y(:,:) + x(:,:,i)
+ end do
+ if (ibox > 0) y(:,:) = y(:,:)/ibox
+ !
+ end subroutine boxcar_ave
+
+ !-----------------------------------------------------------------------
+ subroutine check_alloc(ierror, subname, varname, lonp1, latp1, ntimes, lw)
+ use spmd_utils, only: masterproc
+ integer, intent(in) :: ierror
+ character(len=*), intent(in) :: subname
+ character(len=*), intent(in) :: varname
+ integer, optional, intent(in) :: lonp1
+ integer, optional, intent(in) :: latp1
+ integer, optional, intent(in) :: ntimes
+ integer, optional, intent(in) :: lw
+ ! Local variable
+ character(len=cl) :: errmsg
+
+ if (ierror /= 0) then
+ write(errmsg, '(">>> ",a,": error allocating ",a)') &
+ trim(subname), trim(varname)
+ if (present(lonp1)) then
+ write(errmsg(len_trim(errmsg)+1:), '(", lonp1 = ",i0)') lonp1
+ end if
+ if (present(latp1)) then
+ write(errmsg(len_trim(errmsg)+1:), '(", latp1 = ",i0)') latp1
+ end if
+ if (present(ntimes)) then
+ write(errmsg(len_trim(errmsg)+1:), '(", ntimes = ",i0)') ntimes
+ end if
+ if (present(lw)) then
+ write(errmsg(len_trim(errmsg)+1:), '(", lw = ",i0)') lw
+ end if
+ if (masterproc) then
+ write(iulog, *) trim(errmsg)
+ end if
+ call endrun(trim(errmsg))
+ end if
+
+ end subroutine check_alloc
+
+ !-----------------------------------------------------------------------
+ subroutine check_ncerr(istat, subname, msg)
+ use pio, only: pio_noerr
+ !
+ ! Handle a netcdf lib error:
+ !
+ integer, intent(in) :: istat
+ character(len=*), intent(in) :: subname
+ character(len=*), intent(in) :: msg
+ !
+ ! Local variable
+ character(len=cl) :: errmsg
+ !
+ if (istat /= pio_noerr) then
+ write(iulog,"(/72('-'))")
+ write(iulog,"('>>> Error from netcdf library:')")
+ write(iulog,"(a,': Error getting ',a)") trim(subname), trim(msg)
+
+ write(iulog,"('istat=',i5)") istat
+ write(iulog,"(72('-')/)")
+ write(errmsg, '("NetCDF Error in ",a,": ",2a,", istat = ",i0)') &
+ trim(subname), 'Error getting ', trim(msg), istat
+ call endrun(trim(errmsg))
+ end if
+ end subroutine check_ncerr
+
+end module utils_mod
diff --git a/src/ionosphere/waccmx/wei05sc.F90 b/src/ionosphere/waccmx/wei05sc.F90
index 52b9c7bc07..e3c32b0743 100644
--- a/src/ionosphere/waccmx/wei05sc.F90
+++ b/src/ionosphere/waccmx/wei05sc.F90
@@ -1,8 +1,8 @@
module wei05sc
!
-! The Weimer model of high-latitude potential created by Daniel Weimer and
-! if extracted, distributed, or used for any purpose other than as implemented
-! in the NCAR TIEGCM and CESM/WACCM models, please contact Dan Weimer for
+! The Weimer model of high-latitude potential created by Daniel Weimer and
+! if extracted, distributed, or used for any purpose other than as implemented
+! in the NCAR TIEGCM and CESM/WACCM models, please contact Dan Weimer for
! further information and discussion.
!
! 2005 Version of the electric and magnetic potential (FAC) models
@@ -30,42 +30,41 @@ module wei05sc
! September, 2015 btf:
! Modified for free-format fortran, and for CESM/WACCM (r8, etc).
!
- use shr_kind_mod ,only: r8 => shr_kind_r8
- use shr_kind_mod ,only: shr_kind_cl
- use spmd_utils ,only: masterproc
-#ifdef WACCMX_IONOS
- use cam_logfile ,only: iulog
- use cam_abortutils,only: endrun
- use time_manager ,only: get_curr_date
- use edyn_maggrid ,only: nmlat,nmlon,nmlonp1
-#endif
-
- use edyn_maggrid,only: &
+ use shr_kind_mod, only: r8 => shr_kind_r8
+ use shr_kind_mod, only: shr_kind_cl
+ use spmd_utils, only: masterproc
+ use cam_logfile, only: iulog
+ use cam_abortutils, only: endrun
+ use time_manager, only: get_curr_date
+ use edyn_maggrid, only: nmlat,nmlon,nmlonp1
+
+ use edyn_maggrid, only: &
ylonm, & ! magnetic latitudes (nmlat) (radians)
ylatm ! magnetic longtitudes (nmlonp1) (radians)
- use edyn_solve,only: &
+ use edyn_solve, only: &
nmlat0, & ! (nmlat+1)/2
phihm ! output: high-latitude potential (nmlonp1,nmlat)
- use physconst, only: pi
- use aurora_params, only: aurora_params_set, hpower, ctpoten, theta0
- use aurora_params, only: offa, dskofa, dskofc, phid, rrad, offc, phin
+ use physconst, only: pi
+ use aurora_params, only: aurora_params_set, hpower, ctpoten, theta0
+ use aurora_params, only: offa, dskofa, dskofc, phid, rrad, offc, phin
implicit none
private
-#ifdef WACCMX_IONOS
!
! Coefficients read from netcdf data file wei05sc.nc:
!
- integer,parameter :: &
- na=6, nb=7, nex=2, n1_scha=19, n2_scha=7, n3_scha=68, &
+ integer,parameter :: &
+ na=6, nb=7, nex=2, n1_scha=19, n2_scha=7, n3_scha=68, &
csize=28, n_schfits=15, n_alschfits=18
- integer :: maxk_scha, maxm_scha, maxl_pot, maxm_pot
+ integer :: maxk_scha, maxm_scha, maxl_pot, maxm_pot
real(r8) :: bndya(na), bndyb(nb), ex_bndy(nex), ex_epot(nex),ex_bpot(nex)
real(r8) :: th0s(n3_scha), allnkm(n1_scha,n2_scha,n3_scha)
- integer :: ab(csize), ls(csize), ms(csize)
- real(r8) :: epot_alschfits(n_alschfits,csize), bpot_alschfits(n_alschfits,csize)
- real(r8) :: bpot_schfits(n_schfits,csize),epot_schfits(n_schfits,csize)
+ integer :: ab(csize), ls(csize), ms(csize)
+ real(r8) :: epot_alschfits(n_alschfits,csize)
+ real(r8) :: bpot_alschfits(n_alschfits,csize)
+ real(r8) :: epot_schfits(n_schfits,csize)
+ real(r8) :: bpot_schfits(n_schfits,csize)
!
! Intermediate calculations:
!
@@ -73,16 +72,14 @@ module wei05sc
real(r8) :: rad2deg,deg2rad ! set by setmodel
real(r8) :: bndyfitr ! calculated by setboundary
real(r8) :: esphc(csize),bsphc(csize) ! calculated by setmodel
- real(r8) :: tmat(3,3) !,ttmat(3,3) ! from setboundary
+ real(r8) :: tmat(3,3) ! from setboundary
real(r8) :: plmtable(mxtablesize,csize),colattable(mxtablesize)
real(r8) :: nlms(csize)
- real(r8) :: wei05sc_fac(nmlonp1,nmlat) ! field-aligned current output
+ real(r8),allocatable :: wei05sc_fac(:,:) ! field-aligned current output
! 05/08 bae: Have ctpoten from both hemispheres from Weimer
real(r8) :: weictpoten(2),phimin,phimax
- real(r8) :: real8,real8a ! for type conversion to 8-byte real
-
!
! Several items in the public list are for efield.F90 (chemistry/mozart)
! (dpie_coupling calls the weimer05 driver, but efield calls the individual
@@ -91,537 +88,568 @@ module wei05sc
public :: weimer05
public :: weimer05_init
-#endif
-
real(r8), parameter :: r2d = 180._r8/pi ! radians to degrees
real(r8), parameter :: d2r = pi/180._r8 ! degrees to radians
- contains
+ logical :: debug = .false.
-!-----------------------------------------------------------------------
- subroutine weimer05_init(wei05_ncfile)
- use infnan, only: nan, assignment(=)
-
- character(len=*),intent(in) :: wei05_ncfile
-
- hpower = nan
- ctpoten = nan
- phin = nan
- phid = nan
- theta0 = nan
- offa = nan
- dskofa = nan
- rrad = nan
- offc = nan
- dskofc = nan
-
- bndya = nan
- bndyb = nan
- ex_bndy = nan
- ex_bpot = nan
- th0s = nan
- allnkm = nan
- bpot_schfits = nan
- bpot_alschfits = nan
-
- if (wei05_ncfile.ne.'NONE') then
- call read_wei05_ncfile(wei05_ncfile)
- aurora_params_set = .true.
- endif
-
- end subroutine weimer05_init
+contains
!-----------------------------------------------------------------------
- subroutine weimer05(by,bz_in,swvel,swden,sunlons)
-!
-! 9/16/15 btf: Driver to call Weimer 2005 model for waccm[x].
-!
+ subroutine weimer05_init(wei05_ncfile)
+ use infnan, only: nan, assignment(=)
- implicit none
-!
-! Args:
- real(r8),intent(in) :: bz_in,by,swvel,swden
- real(r8),intent(in) :: sunlons(:)
+ character(len=*),intent(in) :: wei05_ncfile
-#ifdef WACCMX_IONOS
-!
-! Local:
+ allocate(wei05sc_fac(nmlonp1,nmlat))
- real(r8) :: angl,angle,bt
- integer :: i,j
- real(r8) :: rmlt,mlat,tilt,htilt,hem,ut,secs
- real(r8),parameter :: fill=0._r8
- integer :: iyear,imon,iday,isecs
- logical :: debug = .false.
- real(r8) :: bz
+ hpower = nan
+ ctpoten = nan
+ phin = nan
+ phid = nan
+ theta0 = nan
+ offa = nan
+ dskofa = nan
+ rrad = nan
+ offc = nan
+ dskofc = nan
- bz = bz_in
+ bndya = nan
+ bndyb = nan
+ ex_bndy = nan
+ ex_bpot = nan
+ th0s = nan
+ allnkm = nan
+ bpot_schfits = nan
+ bpot_alschfits = nan
- hpower = hp_from_bz_swvel(bz,swvel)
-!
-! Get current date and time:
-!
- call get_curr_date(iyear,imon,iday,isecs)
-!
-! Get sun's location (longitude at all latitudes):
-!
- real8 = dble(isecs)
- secs = real8
+ if (wei05_ncfile.ne.'NONE') then
+ call read_wei05_ncfile(wei05_ncfile)
+ aurora_params_set = .true.
+ endif
-!
-! At least one of by,bz must be non-zero:
- if (by==0._r8.and.bz==0._r8) then
- if (masterproc) then
- write(iulog,"(/,'>>> WARNING: by and bz cannot both be zero',&
- ' when calling the Weimer model: am setting bz=0.01')")
- endif
- bz = 0.01_r8
- endif
-!
- bt = sqrt(by**2+bz**2)
- angl = atan2(by,bz)*r2d
-!
-! Convert from day-of-year to month,day and get tilt from date and ut:
-!
- ut = secs/3600._r8 ! decimal hours
-!
-! Given year and day-of-year, cvt2md returns month and day of month.
-! We do not need this, since get_curr_date returns month and day of month.
-! call cvt2md(iulog,iyear,idoy,imon,iday) ! given iyear,idoy, return imo,ida
-!
- if (debug) write(iulog,"('weimer05: iyear,imon,iday=',3i5,' ut=',f8.2)") &
- iyear,imon,iday,ut
- tilt = get_tilt(iyear,imon,iday,ut)
- if (debug) write(iulog,"('weimer05: tilt=',e12.4)") tilt
-
- phihm = 0._r8 ! whole-array init (nmlonp1,nmlat)
-!
-! Call Weimer model for southern hemisphere electric potential:
-!
- hem = -1._r8
- htilt = hem * tilt
- angle = hem * angl
- if (debug) write(iulog,"('weimer05 call setmodel for SH potential')")
- call setmodel(angle,bt,htilt,swvel,swden,'epot')
- if (debug) write(iulog,"('weimer05 after setmodel for SH potential')")
- do j=1,nmlat0 ! Spole to equator
- do i=1,nmlon
-!
-! sunlons(nlat): sun's longitude in dipole coordinates (see sub sunloc) in rad
-!
- rmlt = (ylonm(i)-sunlons(1)) * r2d / 15._r8 + 12._r8
- mlat = abs(ylatm(j))*r2d
-!
-! Obtain electric potential and convert from kV to V
-!
- call epotval(mlat,rmlt,fill,phihm(i,j))
- phihm(i,j) = phihm(i,j)*1000._r8
- enddo ! i=1,nmlon
- enddo ! j=1,nmlat0
- if (debug) write(iulog,"('weimer05: SH phihm min,max=',2es12.4)") &
- minval(phihm(1:nmlon,1:nmlat0)),maxval(phihm(1:nmlon,1:nmlat0))
-!
-! Re-calculate SH values of offa, dskofa, arad, and phid and phin from
-! Weimer 2005 setboundary values of offc, dskofc, and theta0
-!
- call wei05loc (1, by, hpower, sunlons)
-!
-! Call Weimer model for southern hemisphere fac:
-!
- if (debug) write(iulog,"('weimer05 call setmodel for SH fac')")
- call setmodel(angle,bt,htilt,swvel,swden,'bpot')
- if (debug) write(iulog,"('weimer05 after setmodel for SH fac')")
- do j=1,nmlat0
- do i=1,nmlon
- rmlt = (ylonm(i)-sunlons(1)) * r2d / 15._r8 + 12._r8
- mlat = abs(ylatm(j))*r2d
- call mpfac(mlat,rmlt,fill,wei05sc_fac(i,j))
- enddo ! i=1,nmlon
- enddo ! j=1,nmlat0
-!
-! Call Weimer model for northern hemisphere epot:
-!
- hem = 1._r8
- htilt = hem * tilt
- angle = hem * angl
- if (debug) write(iulog,"('weimer05 call setmodel for NH potential')")
- call setmodel(angle,bt,htilt,swvel,swden,'epot')
- if (debug) write(iulog,"('weimer05 after setmodel for NH potential')")
- do j=nmlat0+1,nmlat
- do i=1,nmlon
-!
-! sunlons(nlat): sun's longitude in dipole coordinates (see sub sunloc) in rad
- rmlt = (ylonm(i)-sunlons(1)) * r2d / 15._r8 + 12._r8
- mlat = abs(ylatm(j))*r2d
-!
-! Obtain electric potential and convert from kV to V
- call epotval(mlat,rmlt,fill,phihm(i,j))
- phihm(i,j) = phihm(i,j)*1000._r8
- enddo ! i=1,nmlon
- enddo ! j=1,nmlat0+1,nmlat
- if (debug) write(iulog,"('weimer05: NH phihm min,max=',2es12.4)") &
- minval(phihm(1:nmlon,nmlat0+1:nmlat)),maxval(phihm(1:nmlon,nmlat0+1:nmlat))
-!
-! Re-calculate NH values of offa, dskofa, arad, and Heelis phid and phin from
-! Weimer 2005 setboundary values of offc, dskofc, and theta0
-!
- call wei05loc (2, by, hpower, sunlons)
-!
-! Call Weimer model for northern hemisphere fac:
- if (debug) write(iulog,"('weimer05 call setmodel for NH fac')")
- call setmodel(angle,bt,htilt,swvel,swden,'bpot')
- if (debug) write(iulog,"('weimer05 after setmodel for NH fac')")
- do j=nmlat0+1,nmlat
- do i=1,nmlon
- rmlt = (ylonm(i)-sunlons(1)) * r2d / 15._r8 + 12._r8
- mlat = abs(ylatm(j))*r2d
- call mpfac(mlat,rmlt,fill,wei05sc_fac(i,j))
- enddo ! i=1,nmlon
- enddo ! j=1,nmlat0
-!
-! Periodic points:
- do j=1,nmlat
- phihm(nmlonp1,j) = phihm(1,j)
- wei05sc_fac(nmlonp1,j) = wei05sc_fac(1,j)
- enddo ! j=1,nmlat
-!
-! Calculate ctpoten for each hemisphere:
-! South:
-!
- phimax = -1.e36_r8
- phimin = 1.e36_r8
- do j=1,nmlat0 ! SH
- do i=1,nmlon
- if (phihm(i,j) > phimax) phimax = phihm(i,j)
- if (phihm(i,j) < phimin) phimin = phihm(i,j)
- enddo
- enddo
- weictpoten(1) = 0.001_r8 * (phimax - phimin)
-!
-! North:
-!
- phimax = -1.e36_r8
- phimin = 1.e36_r8
- do j=nmlat0+1,nmlat ! NH
- do i=1,nmlon
- if (phihm(i,j) > phimax) phimax = phihm(i,j)
- if (phihm(i,j) < phimin) phimin = phihm(i,j)
- enddo
- enddo
- weictpoten(2) = 0.001_r8 * (phimax - phimin)
-!
-! average of the SH and NH in ctpoten
- ctpoten = 0.5_r8*(weictpoten(1)+weictpoten(2))
-
- if (masterproc) then
- write(iulog,"('weimer05: ctpoten=',f8.2,' phihm min,max=',2es12.4)") ctpoten,minval(phihm),maxval(phihm)
- endif
-!
-
-#endif
- end subroutine weimer05
-!-----------------------------------------------------------------------
- subroutine read_wei05_ncfile(file)
+ end subroutine weimer05_init
- use ioFileMod, only: getfil
- use cam_pio_utils, only: cam_pio_openfile, cam_pio_closefile
- use pio, only: file_desc_t, pio_nowrite, pio_inq_dimid, pio_inquire_dimension, &
- pio_inq_varid, pio_get_var
-!
-! Read coefficients and other data from netcdf data file.
-!
- implicit none
-!
-! Arg:
- character(len=*),intent(in) :: file
-#ifdef WACCMX_IONOS
-!
-! Local:
- integer :: istat
- integer :: rd_na,rd_nb,rd_nex,rd_n1_scha,rd_n2_scha,rd_n3_scha,&
- rd_csize,rd_n_schfits,rd_n_alschfits
- integer :: id
- character(len=shr_kind_cl) :: filen
- type(file_desc_t) :: ncid
-!
-! Open netcdf file for reading:
-!
- call getfil( file, filen, 0 )
- call cam_pio_openfile(ncid, filen, PIO_NOWRITE)
-
- write(iulog,"('wei05sc: opened netcdf data file',a)") trim(filen)
-!
-! Read and check dimensions:
-!
-! na=6
- istat = pio_inq_dimid(ncid,'na',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_na)
- if (rd_na /= na) then
- write(iulog,"(/,'>>> wei05sc: rd_na /= na: rd_na=',i4,' na=',i4)") rd_na,na
- call endrun('wei05sc: rd_na /= na')
- endif
-!
-! nb=7
-!
- istat = pio_inq_dimid(ncid,'nb',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_nb)
- if (rd_na /= na) then
- write(iulog,"(/,'>>> wei05sc: rd_nb /= nb: rd_nb=',i4,' nb=',i4)") rd_nb,nb
- call endrun('wei05sc: rd_nb /= nb: rd_nb')
- endif
-!
-! nex=2
-!
- istat = pio_inq_dimid(ncid,'nex',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_nex)
- if (rd_nex /= nex) then
- write(iulog,"(/,'>>> wei05sc: rd_nex /= nex: rd_nex=',i4,' nex=',i4)") &
- rd_nex,nex
- call endrun('wei05sc')
- endif
-!
-! n1_scha=19
-!
- istat = pio_inq_dimid(ncid,'n1_scha',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_n1_scha)
- if (rd_n1_scha /= n1_scha) then
- write(iulog,"(/,'>>> wei05sc: rd_n1_scha /= n1_scha: rd_n1_scha=',i4,' n1_scha=',i4)") &
- rd_n1_scha,n1_scha
- call endrun('wei05sc')
- endif
-!
-! n2_scha=7
-!
- istat = pio_inq_dimid(ncid,'n2_scha',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_n2_scha)
- if (rd_n2_scha /= n2_scha) then
- write(iulog,"(/,'>>> wei05sc: rd_n2_scha /= n2_scha: rd_n2_scha=',i4,' n2_scha=',i4)") &
- rd_n2_scha,n2_scha
- call endrun('wei05sc')
- endif
-!
-! n3_scha=68
-!
- istat = pio_inq_dimid(ncid,'n3_scha',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_n3_scha)
- if (rd_n3_scha /= n3_scha) then
- write(6,"(/,'>>> wei05sc: rd_n3_scha /= n3_scha: rd_n3_scha=',i4,' n3_scha=',i4)") &
- rd_n3_scha,n3_scha
- call endrun('wei05sc')
- endif
-!
-! csize=28
-!
- istat = pio_inq_dimid(ncid,'csize',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_csize)
- if (rd_csize /= csize) then
- write(iulog,"(/,'>>> wei05sc: rd_csize /= csize: rd_csize=',i4,' csize=',i4)") &
- rd_csize,csize
- call endrun('wei05sc')
- endif
-!
-! n_schfits=15
-!
- istat = pio_inq_dimid(ncid,'n_schfits',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_n_schfits)
- if (rd_n_schfits /= n_schfits) then
- write(iulog,"(/,'>>> wei05sc: rd_n_schfits /= n_schfits: rd_n_schfits=',i4,' n_schfits=',i4)") &
- rd_n_schfits,n_schfits
- call endrun('wei05sc')
- endif
-!
-! n_alschfits=18
-!
- istat = pio_inq_dimid(ncid,'n_alschfits',id)
- istat = pio_inquire_dimension(ncid,id,len=rd_n_alschfits)
- if (rd_n_alschfits /= n_alschfits) then
- write(iulog,"(/,'>>> wei05sc: rd_n_alschfits /= n_alschfits: rd_n_alschfits=',i4,' n_alschfits=',i4)") &
- rd_n_alschfits,n_alschfits
- call endrun('wei05sc')
- endif
-!
-! integer :: maxk_scha, maxm_scha, maxl_pot, maxm_pot
-! maxk_scha = 18 ;
-! maxm_scha = 6 ;
-! maxl_pot = 12 ;
-! maxm_pot = 2 ;
-!
- istat = pio_inq_dimid(ncid,"maxk_scha",id)
- istat = pio_inquire_dimension(ncid,id,len=maxk_scha)
- istat = pio_inq_dimid(ncid,"maxm_scha",id)
- istat = pio_inquire_dimension(ncid,id,len=maxm_scha)
- istat = pio_inq_dimid(ncid,"maxl_pot",id)
- istat = pio_inquire_dimension(ncid,id,len=maxl_pot)
- istat = pio_inq_dimid(ncid,"maxm_pot",id)
- istat = pio_inquire_dimension(ncid,id,len=maxm_pot)
-
-! write(iulog,"('wei05sc: maxk_scha=',i3,' maxm_scha=',i3)") &
-! maxk_scha,maxm_scha
-! write(iulog,"('wei05sc: maxl_pot=',i3,' maxm_pot=',i3)") &
-! maxl_pot,maxm_pot
-!
-! Read variables:
-!
-! double bndya(na):
- istat = pio_inq_varid(ncid,'bndya',id)
- istat = pio_get_var(ncid,id,bndya)
-! write(iulog,"('wei05sc: bndya=',/,(8f8.3))") bndya
-!
-! double bndyb(nb):
- istat = pio_inq_varid(ncid,'bndyb',id)
- istat = pio_get_var(ncid,id,bndyb)
-! write(iulog,"('wei05sc: bndyb=',/,(8f8.3))") bndyb
-!
-! double ex_bndy(nex):
- istat = pio_inq_varid(ncid,'ex_bndy',id)
- istat = pio_get_var(ncid,id,ex_bndy)
-! write(iulog,"('wei05sc: ex_bndy=',/,(8f8.3))") ex_bndy
-!
-! double th0s(n3_scha):
- istat = pio_inq_varid(ncid,'th0s',id)
- istat = pio_get_var(ncid,id,th0s)
-! write(iulog,"('wei05sc: th0s=',/,(8f8.3))") th0s
-!
-! double allnkm(n1_scha,n2_scha,n3_scha):
- istat = pio_inq_varid(ncid,'allnkm',id)
- istat = pio_get_var(ncid,id,allnkm)
-! write(iulog,"('wei05sc: allnkm min,max=',2e12.4)") minval(allnkm),maxval(allnkm)
-!
-! int ab(csize):
- istat = pio_inq_varid(ncid,'ab',id)
- istat = pio_get_var(ncid,id,ab)
-! write(iulog,"('wei05sc: ab=',/,(10i4))") ab
-!
-! int ls(csize):
- istat = pio_inq_varid(ncid,'ls',id)
- istat = pio_get_var(ncid,id,ls)
-! write(iulog,"('wei05sc: ls=',/,(10i4))") ls
-!
-! int ms(csize):
- istat = pio_inq_varid(ncid,'ms',id)
- istat = pio_get_var(ncid,id,ms)
-! write(iulog,"('wei05sc: ms=',/,(10i4))") ms
-!
-! double ex_epot(nex):
- istat = pio_inq_varid(ncid,'ex_epot',id)
- istat = pio_get_var(ncid,id,ex_epot)
-! write(iulog,"('wei05sc: ex_epot=',/,(8f8.3))") ex_epot
-!
-! double ex_bpot(nex):
- istat = pio_inq_varid(ncid,'ex_bpot',id)
- istat = pio_get_var(ncid,id,ex_bpot)
-! write(iulog,"('wei05sc: ex_bpot=',/,(8f8.3))") ex_bpot
-!
-! double epot_schfits(csize,n_schfits):
- istat = pio_inq_varid(ncid,'epot_schfits',id)
- istat = pio_get_var(ncid,id,epot_schfits)
-! write(iulog,"('wei05sc: epot_schfits min,max=',2e12.4)") &
-! minval(epot_schfits),maxval(epot_schfits)
-!
-! double bpot_schfits(csize,n_schfits):
- istat = pio_inq_varid(ncid,'bpot_schfits',id)
- istat = pio_get_var(ncid,id,bpot_schfits)
-! write(iulog,"('wei05sc: bpot_schfits min,max=',2e12.4)") &
-! minval(bpot_schfits),maxval(bpot_schfits)
-!
-! double epot_alschfits(csize,n_alschfits):
- istat = pio_inq_varid(ncid,'epot_alschfits',id)
- istat = pio_get_var(ncid,id,epot_alschfits)
-! write(iulog,"('wei05sc: epot_alschfits min,max=',2e12.4)") &
-! minval(epot_alschfits),maxval(epot_alschfits)
-!
-! double bpot_alschfits(csize,n_alschfits):
- istat = pio_inq_varid(ncid,'bpot_alschfits',id)
- istat = pio_get_var(ncid,id,bpot_alschfits)
-! write(iulog,"('wei05sc: bpot_alschfits min,max=',2e12.4)") &
-! minval(bpot_alschfits),maxval(bpot_alschfits)
-!
-! Close file:
- call cam_pio_closefile(ncid)
- if(masterproc) write(iulog,"('wei05sc: completed read of file ',a)") trim(file)
-#endif
- end subroutine read_wei05_ncfile
-#ifdef WACCMX_IONOS
!-----------------------------------------------------------------------
- subroutine setmodel(angle,bt,tilt,swvel,swden,model)
-!
-! Calculate the complete set of the models' SCHA coeficients,
-! given an aribitrary IMF angle (degrees from northward toward +Y),
-! given byimf, bzimf, solar wind velocity (km/sec), and density.
-!
- implicit none
-!
-! Args:
- real(r8),intent(in) :: angle,bt,tilt,swvel,swden
- character(len=*),intent(in) :: model
-!
-! Local:
- integer :: i,j
- real(r8) :: pi,stilt,stilt2,sw,swp,swe,c0,rang,cosa,sina,cos2a,sin2a
- real(r8) :: a(n_schfits)
-!
- if (trim(model) /= 'epot'.and.trim(model) /= 'bpot') then
- write(iulog,"('>>> model=',a)") trim(model)
- write(iulog,"('>>> setmodel: model must be either','''epot'' or ''bpot''')")
- call endrun('setmodel')
- endif
-!
- pi = 4._r8*atan(1._r8)
- rad2deg = 180._r8/pi
- deg2rad = pi/180._r8
-!
-! write(iulog,"('setmodel call setboundary: model=',a,' swvel=',e12.4)") &
-! model, swvel
+ subroutine weimer05(by, bz_in, swvel, swden, sunlon)
+ !
+ ! 9/16/15 btf: Driver to call Weimer 2005 model for waccm[x].
+ !
- call setboundary(angle,bt,swvel,swden)
-!
- stilt = sin(tilt*deg2rad)
- stilt2 = stilt**2
- sw = bt*swvel/1000._r8
- if (trim(model) == 'epot') then
- swe = (1._r8-exp(-sw*ex_epot(2)))*sw**ex_epot(1)
- else
- swe = (1._r8-exp(-sw*ex_bpot(2)))*sw**ex_bpot(1)
- endif
- c0 = 1._r8
- swp = swvel**2 * swden*1.6726e-6_r8
- rang = angle*deg2rad
- cosa = cos(rang)
- sina = sin(rang)
- cos2a = cos(2._r8*rang)
- sin2a = sin(2._r8*rang)
- if (bt < 1._r8) then ! remove angle dependency for IMF under 1 nT
- cosa = -1._r8+bt*(cosa+1._r8)
- cos2a = 1._r8+bt*(cos2a-1._r8)
- sina = bt*sina
- sin2a = bt*sin2a
- endif
- a = (/c0 , swe , stilt , stilt2 , swp, &
- swe*cosa, stilt*cosa, stilt2*cosa, swp*cosa, &
- swe*sina, stilt*sina, stilt2*sina, swp*sina, &
- swe*cos2a,swe*sin2a/)
- if (trim(model) == 'epot') then
- esphc(:) = 0._r8
- do j=1,csize
- do i=1,n_schfits
- esphc(j) = esphc(j)+epot_schfits(i,j)*a(i)
- enddo
- enddo
-! write(iulog,"('setmodel: esphc=',/,(6e12.4))") esphc
- else
- bsphc(:) = 0._r8
- do j=1,csize
- do i=1,n_schfits
- bsphc(j) = bsphc(j)+bpot_schfits(i,j)*a(i)
- enddo
- enddo
-! write(iulog,"('setmodel: bsphc=',/,(6e12.4))") bsphc
- endif
- end subroutine setmodel
+ implicit none
+ !
+ ! Args:
+ real(r8), intent(in) :: bz_in, by, swvel, swden
+ real(r8), intent(in) :: sunlon
-!-----------------------------------------------------------------------
-!-----------------------------------------------------------------------
- subroutine wei05loc (ih, byimf, power, sunlons)
+ !
+ ! Local:
+
+ real(r8) :: angl, angle, bt
+ integer :: i, j
+ real(r8) :: rmlt, mlat, tilt, htilt, hem, ut, secs
+ real(r8), parameter :: fill = 0._r8
+ integer :: iyear, imon, iday, isecs
+ real(r8) :: bz
+
+ bz = bz_in
+
+ hpower = hp_from_bz_swvel(bz,swvel)
+ !
+ ! Get current date and time:
+ !
+ call get_curr_date(iyear,imon,iday,isecs)
+ !
+ ! Get sun's location (longitude at all latitudes):
+ !
+ secs = real(isecs, r8)
+
+ !
+ ! At least one of by,bz must be non-zero:
+ if (by==0._r8 .and. bz==0._r8) then
+ if (masterproc) then
+ write(iulog,"(/,'>>> WARNING: by and bz cannot both be zero',&
+ ' when calling the Weimer model: am setting bz=0.01')")
+ end if
+ bz = 0.01_r8
+ end if
+ !
+ bt = sqrt(by**2 + bz**2)
+ angl = atan2(by,bz) * r2d
+ !
+ ! Convert from day-of-year to month,day and get tilt from date and ut:
+ !
+ ut = secs / 3600._r8 ! decimal hours
+ !
+ ! Given year and day-of-year, cvt2md returns month and day of month.
+ ! We do not need this, since get_curr_date returns month and day of month.
+ ! call cvt2md(iulog,iyear,idoy,imon,iday) ! given iyear,idoy, return imo,ida
+ !
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05: iyear,imon,iday=',3i5,' ut=',f8.2)") &
+ iyear,imon,iday,ut
+ end if
+ tilt = get_tilt(iyear,imon,iday,ut)
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05: tilt=',e12.4)") tilt
+ end if
+
+ phihm = 0._r8 ! whole-array init (nmlonp1,nmlat)
+ !
+ ! Call Weimer model for southern hemisphere electric potential:
+ !
+ hem = -1._r8
+ htilt = hem * tilt
+ angle = hem * angl
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 call setmodel for SH potential')")
+ end if
+ call setmodel(angle, bt, htilt, swvel, swden, 'epot')
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 after setmodel for SH potential')")
+ end if
+ do j = 1, nmlat0 ! Spole to equator
+ do i = 1, nmlon
+ !
+ ! sunlon: sun's longitude in dipole coordinates
+ !
+ rmlt = (ylonm(i)-sunlon) * r2d / 15._r8 + 12._r8
+ mlat = abs(ylatm(j))*r2d
+ !
+ ! Obtain electric potential and convert from kV to V
+ !
+ call epotval(mlat,rmlt,fill,phihm(i,j))
+ phihm(i,j) = phihm(i,j)*1000._r8
+ end do ! i=1,nmlon
+ end do ! j=1,nmlat0
+ if (debug) write(iulog,"('weimer05: SH phihm min,max=',2es12.4)") &
+ minval(phihm(1:nmlon,1:nmlat0)),maxval(phihm(1:nmlon,1:nmlat0))
+ !
+ ! Re-calculate SH values of offa, dskofa, arad, and phid and phin from
+ ! Weimer 2005 setboundary values of offc, dskofc, and theta0
+ !
+ call wei05loc (1, by, hpower, sunlon)
+ !
+ ! Call Weimer model for southern hemisphere fac:
+ !
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 call setmodel for SH fac')")
+ end if
+ call setmodel(angle,bt,htilt,swvel,swden,'bpot')
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 after setmodel for SH fac')")
+ end if
+ do j = 1, nmlat0
+ do i = 1, nmlon
+ rmlt = (ylonm(i)-sunlon) * r2d / 15._r8 + 12._r8
+ mlat = abs(ylatm(j))*r2d
+ call mpfac(mlat,rmlt,fill,wei05sc_fac(i,j))
+ end do ! i=1,nmlon
+ end do ! j=1,nmlat0
+ !
+ ! Call Weimer model for northern hemisphere epot:
+ !
+ hem = 1._r8
+ htilt = hem * tilt
+ angle = hem * angl
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 call setmodel for NH potential')")
+ end if
+ call setmodel(angle,bt,htilt,swvel,swden,'epot')
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 after setmodel for NH potential')")
+ end if
+ do j = nmlat0+1, nmlat
+ do i = 1, nmlon
+ !
+ ! sunlon: sun's longitude in dipole coordinates
+ rmlt = ((ylonm(i) - sunlon) * r2d / 15._r8) + 12._r8
+ mlat = abs(ylatm(j)) * r2d
+ !
+ ! Obtain electric potential and convert from kV to V
+ call epotval(mlat, rmlt, fill, phihm(i,j))
+ phihm(i,j) = phihm(i,j) * 1000._r8
+ end do ! i=1,nmlon
+ end do ! j=1,nmlat0+1,nmlat
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05: NH phihm min,max=',2es12.4)") &
+ minval(phihm(1:nmlon,nmlat0+1:nmlat)), &
+ maxval(phihm(1:nmlon,nmlat0+1:nmlat))
+ end if
+ !
+ ! Re-calculate NH values of offa, dskofa, arad, and Heelis phid and phin
+ ! from Weimer 2005 setboundary values of offc, dskofc, and theta0
+ !
+ call wei05loc (2, by, hpower, sunlon)
+ !
+ ! Call Weimer model for northern hemisphere fac:
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 call setmodel for NH fac')")
+ end if
+ call setmodel(angle,bt,htilt,swvel,swden,'bpot')
+ if (debug .and. masterproc) then
+ write(iulog,"('weimer05 after setmodel for NH fac')")
+ end if
+ do j = nmlat0+1, nmlat
+ do i = 1, nmlon
+ rmlt = ((ylonm(i)-sunlon) * r2d / 15._r8) + 12._r8
+ mlat = abs(ylatm(j))*r2d
+ call mpfac(mlat,rmlt,fill,wei05sc_fac(i,j))
+ end do ! i=1,nmlon
+ end do ! j=1,nmlat0
+ !
+ ! Periodic points:
+ do j = 1, nmlat
+ phihm(nmlonp1,j) = phihm(1,j)
+ wei05sc_fac(nmlonp1,j) = wei05sc_fac(1,j)
+ end do ! j=1,nmlat
+ !
+ ! Calculate ctpoten for each hemisphere:
+ ! South:
+ !
+ phimax = -1.e36_r8
+ phimin = 1.e36_r8
+ do j = 1, nmlat0 ! SH
+ do i = 1, nmlon
+ if (phihm(i,j) > phimax) phimax = phihm(i,j)
+ if (phihm(i,j) < phimin) phimin = phihm(i,j)
+ end do
+ end do
+ weictpoten(1) = 0.001_r8 * (phimax - phimin)
+ !
+ ! North:
+ !
+ phimax = -1.e36_r8
+ phimin = 1.e36_r8
+ do j = nmlat0+1, nmlat ! NH
+ do i = 1, nmlon
+ if (phihm(i,j) > phimax) phimax = phihm(i,j)
+ if (phihm(i,j) < phimin) phimin = phihm(i,j)
+ end do
+ end do
+ weictpoten(2) = 0.001_r8 * (phimax - phimin)
+ !
+ ! average of the SH and NH in ctpoten
+ ctpoten = 0.5_r8*(weictpoten(1)+weictpoten(2))
+
+ if (masterproc) then
+ write(iulog,"(a,f8.2,a,2es12.4)") &
+ 'weimer05: ctpoten=', ctpoten, ', phihm min,max=', &
+ minval(phihm), maxval(phihm)
+ end if
+ !
+
+ end subroutine weimer05
+ !-----------------------------------------------------------------------
+ subroutine read_wei05_ncfile(file)
+
+ use ioFileMod, only: getfil
+ use cam_pio_utils, only: cam_pio_openfile, cam_pio_closefile
+ use pio, only: file_desc_t, pio_nowrite, pio_inq_dimid
+ use pio, only: pio_inquire_dimension, pio_inq_varid, pio_get_var
+ !
+ ! Read coefficients and other data from netcdf data file.
+ !
+ ! Arg:
+ character(len=*), intent(in) :: file
+ !
+ ! Local:
+ integer :: istat
+ integer :: rd_na, rd_nb, rd_nex, rd_n1_scha, rd_n2_scha, rd_n3_scha
+ integer :: rd_csize, rd_n_schfits, rd_n_alschfits
+ integer :: id
+ character(len=shr_kind_cl) :: filen
+ character(len=shr_kind_cl) :: errmsg
+ character(len=*), parameter :: prefix = 'read_wei05_ncfile: '
+ type(file_desc_t) :: ncid
+ !
+ ! Open netcdf file for reading:
+ !
+ call getfil( file, filen, 0 )
+ call cam_pio_openfile(ncid, filen, PIO_NOWRITE)
+
+ if (masterproc) then
+ write(iulog,"('wei05sc: opened netcdf data file',a)") trim(filen)
+ end if
+ !
+ ! Read and check dimensions:
+ !
+ ! na=6
+ istat = pio_inq_dimid(ncid, 'na', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_na)
+ if (rd_na /= na) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_na /= na: rd_na = ', rd_na,' na = ', na
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! nb=7
+ !
+ istat = pio_inq_dimid(ncid, 'nb', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_nb)
+ if (rd_nb /= nb) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_nb /= nb: rd_nb = ', rd_nb,' nb = ', nb
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! nex=2
+ !
+ istat = pio_inq_dimid(ncid, 'nex', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_nex)
+ if (rd_nex /= nex) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_nex /= nex rd_nex = ', rd_nex,' nex = ', nex
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! n1_scha=19
+ !
+ istat = pio_inq_dimid(ncid, 'n1_scha', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_n1_scha)
+ if (rd_n1_scha /= n1_scha) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_n1_scha /= n1_scha rd_n1_scha = ', rd_n1_scha,' n1_scha = ', n1_scha
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! n2_scha=7
+ !
+ istat = pio_inq_dimid(ncid, 'n2_scha', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_n2_scha)
+ if (rd_n2_scha /= n2_scha) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_n2_scha /= n2_scha rd_n2_scha = ', rd_n2_scha,' n2_scha = ', n2_scha
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! n3_scha=68
+ !
+ istat = pio_inq_dimid(ncid, 'n3_scha', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_n3_scha)
+ if (rd_n3_scha /= n3_scha) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_n3_scha /= n3_scha rd_n3_scha = ', rd_n3_scha,' n3_scha = ', n3_scha
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! csize=28
+ !
+ istat = pio_inq_dimid(ncid, 'csize', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_csize)
+ if (rd_csize /= csize) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_csize /= csize rd_csize = ', rd_csize,' csize = ', csize
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! n_schfits=15
+ !
+ istat = pio_inq_dimid(ncid, 'n_schfits', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_n_schfits)
+ if (rd_n_schfits /= n_schfits) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_n_schfits /= n_schfits rd_n_schfits = ', &
+ rd_n_schfits,' n_schfits = ', n_schfits
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! n_alschfits=18
+ !
+ istat = pio_inq_dimid(ncid, 'n_alschfits', id)
+ istat = pio_inquire_dimension(ncid, id, len=rd_n_alschfits)
+ if (rd_n_alschfits /= n_alschfits) then
+ write(errmsg,"(a,i4,a,i4)") prefix//'rd_n_alschfits /= n_alschfits rd_n_alschfits = ',&
+ rd_n_alschfits,' n_alschfits = ', n_alschfits
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
+ !
+ ! integer :: maxk_scha, maxm_scha, maxl_pot, maxm_pot
+ ! maxk_scha = 18 ;
+ ! maxm_scha = 6 ;
+ ! maxl_pot = 12 ;
+ ! maxm_pot = 2 ;
+ !
+ istat = pio_inq_dimid(ncid,"maxk_scha", id)
+ istat = pio_inquire_dimension(ncid, id, len=maxk_scha)
+ istat = pio_inq_dimid(ncid,"maxm_scha", id)
+ istat = pio_inquire_dimension(ncid, id, len=maxm_scha)
+ istat = pio_inq_dimid(ncid,"maxl_pot", id)
+ istat = pio_inquire_dimension(ncid, id, len=maxl_pot)
+ istat = pio_inq_dimid(ncid,"maxm_pot", id)
+ istat = pio_inquire_dimension(ncid, id, len=maxm_pot)
+
+ ! write(iulog,"('wei05sc: maxk_scha=',i3,' maxm_scha=',i3)") &
+ ! maxk_scha,maxm_scha
+ ! write(iulog,"('wei05sc: maxl_pot=',i3,' maxm_pot=',i3)") &
+ ! maxl_pot,maxm_pot
+ !
+ ! Read variables:
+ !
+ ! double bndya(na):
+ istat = pio_inq_varid(ncid, 'bndya', id)
+ istat = pio_get_var(ncid, id,bndya)
+ ! write(iulog,"('wei05sc: bndya=',/,(8f8.3))") bndya
+ !
+ ! double bndyb(nb):
+ istat = pio_inq_varid(ncid, 'bndyb', id)
+ istat = pio_get_var(ncid, id,bndyb)
+ ! write(iulog,"('wei05sc: bndyb=',/,(8f8.3))") bndyb
+ !
+ ! double ex_bndy(nex):
+ istat = pio_inq_varid(ncid, 'ex_bndy', id)
+ istat = pio_get_var(ncid, id,ex_bndy)
+ ! write(iulog,"('wei05sc: ex_bndy=',/,(8f8.3))") ex_bndy
+ !
+ ! double th0s(n3_scha):
+ istat = pio_inq_varid(ncid, 'th0s', id)
+ istat = pio_get_var(ncid, id,th0s)
+ ! write(iulog,"('wei05sc: th0s=',/,(8f8.3))") th0s
+ !
+ ! double allnkm(n1_scha,n2_scha,n3_scha):
+ istat = pio_inq_varid(ncid, 'allnkm', id)
+ istat = pio_get_var(ncid, id,allnkm)
+ ! write(iulog,"('wei05sc: allnkm min,max=',2e12.4)") minval(allnkm),maxval(allnkm)
+ !
+ ! int ab(csize):
+ istat = pio_inq_varid(ncid, 'ab', id)
+ istat = pio_get_var(ncid, id,ab)
+ ! write(iulog,"('wei05sc: ab=',/,(10i4))") ab
+ !
+ ! int ls(csize):
+ istat = pio_inq_varid(ncid, 'ls', id)
+ istat = pio_get_var(ncid, id,ls)
+ ! write(iulog,"('wei05sc: ls=',/,(10i4))") ls
+ !
+ ! int ms(csize):
+ istat = pio_inq_varid(ncid, 'ms', id)
+ istat = pio_get_var(ncid, id,ms)
+ ! write(iulog,"('wei05sc: ms=',/,(10i4))") ms
+ !
+ ! double ex_epot(nex):
+ istat = pio_inq_varid(ncid, 'ex_epot', id)
+ istat = pio_get_var(ncid, id,ex_epot)
+ ! write(iulog,"('wei05sc: ex_epot=',/,(8f8.3))") ex_epot
+ !
+ ! double ex_bpot(nex):
+ istat = pio_inq_varid(ncid, 'ex_bpot', id)
+ istat = pio_get_var(ncid, id,ex_bpot)
+ ! write(iulog,"('wei05sc: ex_bpot=',/,(8f8.3))") ex_bpot
+ !
+ ! double epot_schfits(csize,n_schfits):
+ istat = pio_inq_varid(ncid, 'epot_schfits', id)
+ istat = pio_get_var(ncid, id,epot_schfits)
+ ! write(iulog,"('wei05sc: epot_schfits min,max=',2e12.4)") &
+ ! minval(epot_schfits),maxval(epot_schfits)
+ !
+ ! double bpot_schfits(csize,n_schfits):
+ istat = pio_inq_varid(ncid, 'bpot_schfits', id)
+ istat = pio_get_var(ncid, id,bpot_schfits)
+ ! write(iulog,"('wei05sc: bpot_schfits min,max=',2e12.4)") &
+ ! minval(bpot_schfits),maxval(bpot_schfits)
+ !
+ ! double epot_alschfits(csize,n_alschfits):
+ istat = pio_inq_varid(ncid, 'epot_alschfits', id)
+ istat = pio_get_var(ncid, id,epot_alschfits)
+ ! write(iulog,"('wei05sc: epot_alschfits min,max=',2e12.4)") &
+ ! minval(epot_alschfits),maxval(epot_alschfits)
+ !
+ ! double bpot_alschfits(csize,n_alschfits):
+ istat = pio_inq_varid(ncid, 'bpot_alschfits', id)
+ istat = pio_get_var(ncid, id,bpot_alschfits)
+ ! write(iulog,"('wei05sc: bpot_alschfits min,max=',2e12.4)") &
+ ! minval(bpot_alschfits),maxval(bpot_alschfits)
+ !
+ ! Close file:
+ call cam_pio_closefile(ncid)
+ if(masterproc) then
+ write(iulog,"('wei05sc: completed read of file ',a)") trim(file)
+ end if
+
+ end subroutine read_wei05_ncfile
+
+ !-----------------------------------------------------------------------
+ subroutine setmodel(angle,bt,tilt,swvel,swden,model)
+ !
+ ! Calculate the complete set of the models' SCHA coeficients,
+ ! given an aribitrary IMF angle (degrees from northward toward +Y),
+ ! given byimf, bzimf, solar wind velocity (km/sec), and density.
+ !
+ ! Args:
+ real(r8), intent(in) :: angle, bt, tilt, swvel, swden
+ character(len=*), intent(in) :: model
+ !
+ ! Local:
+ integer :: i, j
+ real(r8) :: pi,stilt,stilt2,sw,swp,swe,c0,rang,cosa,sina,cos2a,sin2a
+ real(r8) :: a(n_schfits)
+ !
+ if (trim(model) /= 'epot'.and.trim(model) /= 'bpot') then
+ if (masterproc) then
+ write(iulog, "('>>> model=',a)") trim(model)
+ write(iulog, "(a)") &
+ '>>> setmodel: model must be either ''epot'' or ''bpot'''
+ end if
+ call endrun("setmodel: model must be either 'epot' or 'bpot'")
+ end if
+ !
+ pi = 4._r8 * atan(1._r8)
+ rad2deg = 180._r8 / pi
+ deg2rad = pi / 180._r8
+ !
+ ! write(iulog,"('setmodel call setboundary: model=',a,' swvel=',e12.4)") &
+ ! model, swvel
+
+ call setboundary(angle, bt, swvel, swden)
+ !
+ stilt = sin(tilt * deg2rad)
+ stilt2 = stilt**2
+ sw = bt * swvel/ 1000._r8
+ if (trim(model) == 'epot') then
+ swe = (1._r8-exp(-sw*ex_epot(2)))*sw**ex_epot(1)
+ else
+ swe = (1._r8-exp(-sw*ex_bpot(2)))*sw**ex_bpot(1)
+ end if
+ c0 = 1._r8
+ swp = swvel**2 * swden*1.6726e-6_r8
+ rang = angle*deg2rad
+ cosa = cos(rang)
+ sina = sin(rang)
+ cos2a = cos(2._r8*rang)
+ sin2a = sin(2._r8*rang)
+ if (bt < 1._r8) then ! remove angle dependency for IMF under 1 nT
+ cosa = -1._r8+bt*(cosa+1._r8)
+ cos2a = 1._r8+bt*(cos2a-1._r8)
+ sina = bt*sina
+ sin2a = bt*sin2a
+ end if
+ a = (/c0, swe, stilt, stilt2, swp, &
+ swe*cosa, stilt*cosa, stilt2*cosa, swp*cosa, &
+ swe*sina, stilt*sina, stilt2*sina, swp*sina, &
+ swe*cos2a, swe*sin2a/)
+ if (trim(model) == 'epot') then
+ esphc(:) = 0._r8
+ do j=1,csize
+ do i=1,n_schfits
+ esphc(j) = esphc(j)+epot_schfits(i,j)*a(i)
+ end do
+ end do
+ ! write(iulog,"('setmodel: esphc=',/,(6e12.4))") esphc
+ else
+ bsphc(:) = 0._r8
+ do j=1,csize
+ do i=1,n_schfits
+ bsphc(j) = bsphc(j)+bpot_schfits(i,j)*a(i)
+ end do
+ end do
+ ! write(iulog,"('setmodel: bsphc=',/,(6e12.4))") bsphc
+ end if
+ end subroutine setmodel
+
+ !-----------------------------------------------------------------------
+ !-----------------------------------------------------------------------
+ subroutine wei05loc (ih, byimf, power, sunlon)
! ih=1,2 for SH,NH called from weimer05
!
! (dimension 2 is for south, north hemispheres)
@@ -646,134 +674,139 @@ subroutine wei05loc (ih, byimf, power, sunlons)
! rrad(2), ! radius of auroral circle in radians
! offc(2), ! offset of convection towards 0 MLT relative to mag pole (rad)
! dskofc(2) ! offset of convection in radians towards 18 MLT (f(By))
-! sunlons(nlat): sun's longitude in dipole coordinates (see sub sunloc)
+! sunlon: sun's longitude in dipole coordinates (see sub sunloc)
!
-
-!
-! Args:
+ !
+ ! Args:
integer,intent(in) :: ih
real(r8),intent(in) :: byimf
real(r8),intent(in) :: power
- real(r8),intent(in) :: sunlons(:)
-!
-! Local:
- real(r8) :: rccp,racp,rahp,ramx,diffrac,plevel,tmltmin,tmltmax
+ real(r8),intent(in) :: sunlon
+ !
+ ! Local:
+ real(r8) :: rccp, racp, rahp, ramx, diffrac, plevel, tmltmin, tmltmax
real(r8) :: offcdegp(2)
- integer :: i,j,j1,j2
- real(r8) :: vnx(2,2),hem,mltd,mltn
+ integer :: i, j, j1, j2
+ real(r8) :: vnx(2,2), hem, mltd, mltn
integer :: inx(2,2)
- real(r8) :: offcdeg,dskof,arad,crad
+ real(r8) :: offcdeg, dskof, arad, crad
real(r8) :: byloc
-
-! Limit size of byimf in phin and phid calculations (as in aurora.F)
-! NOTE: This byloc is assymetric in hemisphere, which is probably not correct
+
+ ! Limit size of byimf in phin and phid calculations (as in aurora.F)
+ ! NOTE: This byloc is assymetric in hemisphere, which is probably not correct
byloc = byimf
if (byloc .gt. 7._r8) byloc = 7._r8
if (byloc .lt. -11._r8) byloc = -11._r8
-!
-! ih=1 is SH, ih=2 is NH
- if (ih .eq. 1) then
- j1 = 1
- j2 = nmlat0
- hem = -1._r8
- else
- j1 = nmlat0 + 1
- j2 = nmlat
- hem = 1._r8
- endif
-! Print out un-revised values:
-! write (6,"(1x,'Original convection/oval params (hem,By,off,dsk',
-! | ',rad,phid,n=',10f9.4)") hem,byimf,offc(ih)*rtd,offa(ih)*rtd,
-! | dskofc(ih)*rtd,dskofa(ih)*rtd,theta0(ih)*rtd,rrad(ih)*rtd,
-! | phid(ih)*rtd/15.+12.,phin(ih)*rtd/15.+12.
-! Find min/max
- vnx(ih,1) = 0._r8
- vnx(ih,2) = 0._r8
- do j=j1,j2
- do i=1,nmlonp1-1
- if (phihm(i,j) .gt. vnx(ih,2)) then
- vnx(ih,2) = phihm(i,j)
- inx(ih,2) = i
- endif
- if (phihm(i,j) .lt. vnx(ih,1)) then
- vnx(ih,1) = phihm(i,j)
- inx(ih,1) = i
- endif
- enddo ! i=1,nmlonp1-1
- enddo ! j=j1,j2
-! 05/08: Calculate weictpoten in kV from Weimer model min/max in V
- weictpoten(ih) = 0.001_r8 * (vnx(ih,2) - vnx(ih,1))
- tmltmin = (ylonm(inx(ih,1))-sunlons(1)) * r2d/15._r8 + 12._r8
- if (tmltmin .gt. 24._r8) tmltmin = tmltmin - 24._r8
- tmltmax = (ylonm(inx(ih,2))-sunlons(1)) * r2d/15._r8 + 12._r8
- if (tmltmax .gt. 24._r8) tmltmax = tmltmax - 24._r8
-! write (6,"('ih Bz By Hp ctpoten,wei min/max potV,lat,mlt=',i2,
-! | 5f8.2,2x,e12.4,2f8.2,2x,e12.4,2f8.2))") ih,bzimf,byimf,power,
-! | ctpoten,weictpoten(ih),
-! | vnx(ih,1),ylatm(jnx(ih,1))*rtd,tmltmin,
-! | vnx(ih,2),ylatm(jnx(ih,2))*rtd,tmltmax
-! 05/08: From aurora_cons, calculate convection and aurora radii using IMF convection
-! and power (plevel); racp (DMSP/NOAA) - rccp (AMIE) = 5.32 (Bz>0) to 6.62 (Bz<0) deg
-! Heelis et al [1980, JGR, 85, pp 3315-3324] Fig 8: ra=rc+2deg, and is 2.5 deg to dusk
- rccp = -3.80_r8+8.48_r8*(weictpoten(ih)**0.1875_r8)
- racp = -0.43_r8+9.69_r8*(weictpoten(ih)**0.1875_r8)
- plevel = 0._r8
- if (power >=1.00_r8) plevel = 2.09_r8*log(power)
- rahp = 14.20_r8 + 0.96_r8*plevel
- ramx = max(racp,rahp)
- diffrac = ramx - rccp
-
-! Set default values
-! Use parameterization defaults for phid (phid(MLT)=9.39 +/- 0.21By - 12)
-! and phin (phin(MLT)=23.50 +/- 0.15By - 12)
- mltd = 9.39_r8 - hem*0.21_r8*byloc
- mltn = 23.50_r8 - hem*0.15_r8*byloc
- phid(ih) = (mltd-12._r8) * 15._r8 *d2r
- phin(ih) = (mltn-12._r8) * 15._r8 *d2r
-! 05/18/08: Note that phid,phin are only for Heelis and are irrelevant for Weimer
-! write (6,"(1x,'mltd mltn phid,n =',4f8.2)")
-! | mltd,mltn,phid(ih)*rtd/15.,phin(ih)*rtd/15.
-! Use default constant value of offcdegp from setboundary in Weimer 2005
- offcdeg = 4.2_r8
- offcdegp(ih) = offcdeg
- offc(ih) = offcdegp(ih) *d2r
- offa(ih) = offcdegp(ih) *d2r
-! write (6,"(1x,'offcdeg,rad =',2e12.4)") offcdeg,offc(ih)
- dskof = 0._r8
- dskofc(ih) = dskof *d2r
-! oval offset is 2.5 deg towards dawn (more neg dskof)
- dskofa(ih) = (dskof-2.5_r8) *d2r
-! write (6,"(1x,'dskof,c,a=',3f8.2)")
-! | dskof,dskofc(ih)*rtd,dskofa(ih)*rtd
-! Set crad from bndyfitr/2 of setboundary of Weimer 2005
- crad = bndyfitr/2._r8
-! write (6,"(1x,'wei05loc: ih,bz,y,crad =',i2,3f8.2)")
-! | ih,bzimf,byimf,crad
-! Fig 8 Heelis et al [1980]: ra=rc+2deg, and shifted 2.5 deg to dusk
- arad = crad + 2._r8
-! 05/08: Make ra=rc+diffrac(=ramx-rccp) - same difference as in aurora.F
-! Choose to have arad=crad(Weimer) + diffrac(same diff as in aurora.F)
- arad = crad + diffrac
-! 08/08: OR make ra=ramx=max(racp,rahp) so diffrac=arad-crad
-! diffrac2 = ramx - crad
-! Choose to have arad=ramx (same as in aurora.F as determined by P/CP)
-! arad = ramx
- theta0(ih) = crad *d2r
- rrad(ih) = arad *d2r
-! write (6,"(1x,'radius: crad,rccp,racp,rahp diffa-c',
-! | '(aurF,ramx-Weic) ramx,Weic+d,arad deg=',9f8.2)") crad,rccp,
-! | racp,rahp,diffrac,diffrac2,ramx,crad+diffrac,arad
-
-! Print out revised values (revised 05/08):
-! write (6,"(1x,'Revised convection/oval params (off,dsk,',
-! | 'rad,phid,n=',8f9.4)")offc(ih)*rtd,offa(ih)*rtd,
-! | dskofc(ih)*rtd,dskofa(ih)*rtd,theta0(ih)*rtd,rrad(ih)*rtd,
-! | phid(ih)*rtd/15.+12.,phin(ih)*rtd/15.+12.
-
- end subroutine wei05loc
+ !
+ ! ih=1 is SH, ih=2 is NH
+ if (ih .eq. 1) then
+ j1 = 1
+ j2 = nmlat0
+ hem = -1._r8
+ else
+ j1 = nmlat0 + 1
+ j2 = nmlat
+ hem = 1._r8
+ end if
+ ! Print out un-revised values:
+ ! write (6,"(1x,'Original convection/oval params (hem,By,off,dsk',
+ ! | ',rad,phid,n=',10f9.4)") hem,byimf,offc(ih)*rtd,offa(ih)*rtd,
+ ! | dskofc(ih)*rtd,dskofa(ih)*rtd,theta0(ih)*rtd,rrad(ih)*rtd,
+ ! | phid(ih)*rtd/15.+12.,phin(ih)*rtd/15.+12.
+ ! Find min/max
+ vnx(ih,1) = 0._r8
+ vnx(ih,2) = 0._r8
+ do j=j1,j2
+ do i=1,nmlonp1-1
+ if (phihm(i,j) .gt. vnx(ih,2)) then
+ vnx(ih,2) = phihm(i,j)
+ inx(ih,2) = i
+ end if
+ if (phihm(i,j) .lt. vnx(ih,1)) then
+ vnx(ih,1) = phihm(i,j)
+ inx(ih,1) = i
+ end if
+ end do ! i=1,nmlonp1-1
+ end do ! j=j1,j2
+ ! 05/08: Calculate weictpoten in kV from Weimer model min/max in V
+ weictpoten(ih) = 0.001_r8 * (vnx(ih,2) - vnx(ih,1))
+ tmltmin = (ylonm(inx(ih,1))-sunlon) * r2d/15._r8 + 12._r8
+ if (tmltmin > 24._r8) then
+ tmltmin = tmltmin - 24._r8
+ end if
+ tmltmax = (ylonm(inx(ih,2))-sunlon) * r2d/15._r8 + 12._r8
+ if (tmltmax > 24._r8) then
+ tmltmax = tmltmax - 24._r8
+ end if
+ ! write (6,"('ih Bz By Hp ctpoten,wei min/max potV,lat,mlt=',i2,
+ ! | 5f8.2,2x,e12.4,2f8.2,2x,e12.4,2f8.2))") ih,bzimf,byimf,power,
+ ! | ctpoten,weictpoten(ih),
+ ! | vnx(ih,1),ylatm(jnx(ih,1))*rtd,tmltmin,
+ ! | vnx(ih,2),ylatm(jnx(ih,2))*rtd,tmltmax
+ ! 05/08: From aurora_cons, calculate convection and aurora radii using IMF convection
+ ! and power (plevel); racp (DMSP/NOAA) - rccp (AMIE) = 5.32 (Bz>0) to 6.62 (Bz<0) deg
+ ! Heelis et al [1980, JGR, 85, pp 3315-3324] Fig 8: ra=rc+2deg, and is 2.5 deg to dusk
+ rccp = -3.80_r8 + (8.48_r8*(weictpoten(ih)**0.1875_r8))
+ racp = -0.43_r8 + (9.69_r8*(weictpoten(ih)**0.1875_r8))
+ plevel = 0._r8
+ if (power >= 1.00_r8) then
+ plevel = 2.09_r8*log(power)
+ end if
+ rahp = 14.20_r8 + 0.96_r8*plevel
+ ramx = max(racp, rahp)
+ diffrac = ramx - rccp
+
+ ! Set default values
+ ! Use parameterization defaults for phid (phid(MLT)=9.39 +/- 0.21By - 12)
+ ! and phin (phin(MLT)=23.50 +/- 0.15By - 12)
+ mltd = 9.39_r8 - hem*0.21_r8*byloc
+ mltn = 23.50_r8 - hem*0.15_r8*byloc
+ phid(ih) = (mltd-12._r8) * 15._r8 *d2r
+ phin(ih) = (mltn-12._r8) * 15._r8 *d2r
+ ! 05/18/08: Note that phid,phin are only for Heelis and are irrelevant for Weimer
+ ! write (6,"(1x,'mltd mltn phid,n =',4f8.2)")
+ ! | mltd,mltn,phid(ih)*rtd/15.,phin(ih)*rtd/15.
+ ! Use default constant value of offcdegp from setboundary in Weimer 2005
+ offcdeg = 4.2_r8
+ offcdegp(ih) = offcdeg
+ offc(ih) = offcdegp(ih) *d2r
+ offa(ih) = offcdegp(ih) *d2r
+ ! write (6,"(1x,'offcdeg,rad =',2e12.4)") offcdeg,offc(ih)
+ dskof = 0._r8
+ dskofc(ih) = dskof *d2r
+ ! oval offset is 2.5 deg towards dawn (more neg dskof)
+ dskofa(ih) = (dskof-2.5_r8) *d2r
+ ! write (6,"(1x,'dskof,c,a=',3f8.2)")
+ ! | dskof,dskofc(ih)*rtd,dskofa(ih)*rtd
+ ! Set crad from bndyfitr/2 of setboundary of Weimer 2005
+ crad = bndyfitr/2._r8
+ ! write (6,"(1x,'wei05loc: ih,bz,y,crad =',i2,3f8.2)")
+ ! | ih,bzimf,byimf,crad
+ ! Fig 8 Heelis et al [1980]: ra=rc+2deg, and shifted 2.5 deg to dusk
+ arad = crad + 2._r8
+ ! 05/08: Make ra=rc+diffrac(=ramx-rccp) - same difference as in aurora.F
+ ! Choose to have arad=crad(Weimer) + diffrac(same diff as in aurora.F)
+ arad = crad + diffrac
+ ! 08/08: OR make ra=ramx=max(racp,rahp) so diffrac=arad-crad
+ ! diffrac2 = ramx - crad
+ ! Choose to have arad=ramx (same as in aurora.F as determined by P/CP)
+ ! arad = ramx
+ theta0(ih) = crad *d2r
+ rrad(ih) = arad *d2r
+ ! write (6,"(1x,'radius: crad,rccp,racp,rahp diffa-c',
+ ! | '(aurF,ramx-Weic) ramx,Weic+d,arad deg=',9f8.2)") crad,rccp,
+ ! | racp,rahp,diffrac,diffrac2,ramx,crad+diffrac,arad
+
+ ! Print out revised values (revised 05/08):
+ ! write (6,"(1x,'Revised convection/oval params (off,dsk,',
+ ! | 'rad,phid,n=',8f9.4)")offc(ih)*rtd,offa(ih)*rtd,
+ ! | dskofc(ih)*rtd,dskofa(ih)*rtd,theta0(ih)*rtd,rrad(ih)*rtd,
+ ! | phid(ih)*rtd/15.+12.,phin(ih)*rtd/15.+12.
+
+ end subroutine wei05loc
!-----------------------------------------------------------------------
-! for now this is here ... might need to move to a gen util module
+! for now this is here ... might need to move to a gen util module
!-----------------------------------------------------------------------
function hp_from_bz_swvel(bz,swvel) result(hp)
!
@@ -792,7 +825,7 @@ function hp_from_bz_swvel(bz,swvel) result(hp)
hp = 6.0_r8 + 3.3_r8*abs(bz) + (0.05_r8 + 0.003_r8*abs(bz))* (min(swvel,700._r8)-300._r8)
else
hp = 5.0_r8 + 0.05_r8 * (min(swvel,700._r8)-300._r8)
- endif
+ end if
hp = max(2.5_r8,hp)*fac
end function hp_from_bz_swvel
@@ -820,8 +853,8 @@ subroutine setboundary(angle,bt,swvel,swden)
ct = cos(theta)
st = sin(theta)
!
- tmat(1,:) = (/ ct, 0._r8, st/)
- tmat(2,:) = (/ 0._r8, 1._r8, 0._r8/)
+ tmat(1,:) = (/ ct, 0._r8, st/)
+ tmat(2,:) = (/ 0._r8, 1._r8, 0._r8/)
tmat(3,:) = (/-st, 0._r8, ct/)
!
! ttmat(1,:) = (/ct, 0._r8,-st/)
@@ -835,7 +868,7 @@ subroutine setboundary(angle,bt,swvel,swden)
btx = btx*bt**ex_bndy(2)
else
cosa = 1._r8+bt*(cosa-1._r8) ! remove angle dependency for IMF under 1 nT
- endif
+ end if
x = (/1._r8, cosa, btx, btx*cosa, swvel, swp/)
c = bndya
bndyfitr = 0._r8
@@ -844,15 +877,15 @@ subroutine setboundary(angle,bt,swvel,swden)
! write(iulog,"('setboundry: i=',i3,' bndyfitr=',e12.4)") i,bndyfitr
- enddo
+ end do
end subroutine setboundary
!-----------------------------------------------------------------------
subroutine epotval(lat,mlt,fill,epot)
!
-! Return the Potential (in kV) at given combination of def. latitude
-! (lat) and MLT, in geomagnetic apex coordinates (practically identical
-! to AACGM).
-! If the location is outside of the model's low-latitude boundary, then
+! Return the Potential (in kV) at given combination of def. latitude
+! (lat) and MLT, in geomagnetic apex coordinates (practically identical
+! to AACGM).
+! If the location is outside of the model's low-latitude boundary, then
! the value "fill" is returned.
!
implicit none
@@ -873,14 +906,14 @@ subroutine epotval(lat,mlt,fill,epot)
if (inside == 0) then
epot = fill
return
- endif
+ end if
!
-! IDL code:
+! IDL code:
! phim=phir # replicate(1,maxm) * ((indgen(maxm)+1) ## replicate(1,n_elements(phir)))
! where the '#' operator multiplies columns of first array by rows of second array,
! and the '##' operator multiplies rows of first array by columns of second array.
-! Here, maxm == maxm_pot == 2, and phir is a scalar. The above IDL statement then
-! becomes: phim = ([phir] # [1,1]) * ([1,2] ## [phir]) where phim will be
+! Here, maxm == maxm_pot == 2, and phir is a scalar. The above IDL statement then
+! becomes: phim = ([phir] # [1,1]) * ([1,2] ## [phir]) where phim will be
! dimensioned [1,2]
!
phim(1) = phir
@@ -894,7 +927,7 @@ subroutine epotval(lat,mlt,fill,epot)
if (skip == 1) then
skip = 0
cycle
- endif
+ end if
m = ms(j)
if (ab(j)==1) then
plm = scplm(j,colat,nlm) ! scplm function is in this module
@@ -904,10 +937,10 @@ subroutine epotval(lat,mlt,fill,epot)
else
z = z+plm*(esphc(j)*cospm(m)+esphc(j+1)*sinpm(m))
skip = 1
- endif
- endif ! ab(j)
- enddo
- epot = z
+ end if
+ end if ! ab(j)
+ end do
+ epot = z
end subroutine epotval
!-----------------------------------------------------------------------
subroutine mpfac(lat,mlt,fill,fac)
@@ -931,7 +964,7 @@ subroutine mpfac(lat,mlt,fill,fac)
if (inside == 0) then
fac = fill
return
- endif
+ end if
!
phim(1) = phir
phim(2) = phir*2._r8
@@ -944,7 +977,7 @@ subroutine mpfac(lat,mlt,fill,fac)
if (skip == 1) then
skip = 0
cycle
- endif
+ end if
if (ls(j) >= 11) exit jloop
m = ms(j)
if (ab(j) == 1) then
@@ -957,9 +990,9 @@ subroutine mpfac(lat,mlt,fill,fac)
else
z = z-(plm*(bsphc(j)*cospm(m)+bsphc(j+1)*sinpm(m)))
skip = 1
- endif
- endif
- enddo jloop ! j=1,csize
+ end if
+ end if
+ end do jloop ! j=1,csize
pi = 4._r8*atan(1._r8)
cfactor = -1.e5_r8/(4._r8*pi*re**2) ! convert to uA/m2
z = z*cfactor
@@ -969,7 +1002,7 @@ end subroutine mpfac
!-----------------------------------------------------------------------
real(r8) function scplm(index,colat,nlm)
!
-! Return Spherical Cap Harmonic Associated Legendre values, given colat
+! Return Spherical Cap Harmonic Associated Legendre values, given colat
! values and index i into array of L and M values.
!
implicit none
@@ -985,30 +1018,30 @@ real(r8) function scplm(index,colat,nlm)
real(r8) :: cth(mxtablesize)
real(r8),save :: prevth0=1.e36_r8
integer,save :: tablesize
+ character(len=shr_kind_cl) :: errmsg
!
scplm = 0._r8
skip = 0 ! Added by B.Foster, 4/23/14
th0 = bndyfitr
if (prevth0 /= th0) then
tablesize = 3*nint(th0)
- if (tablesize > mxtablesize) then
- write(iulog,"('>>> tablesize > mxtablesize: tablesize=',i8,' mxtablesize=',i8,' th0=',e12.4)") &
- tablesize,mxtablesize,th0
- call endrun('tablesize')
- endif
+ if (tablesize > mxtablesize) then
+ write(errmsg,"('>>> tablesize > mxtablesize: tablesize=',i8,' mxtablesize=',i8,' th0=',e12.4)") &
+ tablesize,mxtablesize,th0
+ write(iulog,*) trim(errmsg)
+ call endrun(errmsg)
+ end if
do i=1,tablesize
- real8 = dble(i-1)
- real8a = dble(tablesize-1)
- colattable(i) = real8*(th0/real8a)
- cth(i) = cos(colattable(i)*deg2rad)
- enddo
+ colattable(i) = real(i-1, r8) * (th0 / real(tablesize-1, r8))
+ cth(i) = cos(colattable(i) * deg2rad)
+ end do
prevth0 = th0
- nlms = 0._r8 ! whole array init
+ nlms = 0._r8 ! whole array init
do j=1,csize
if (skip == 1) then
skip = 0
cycle
- endif
+ end if
l = ls(j)
m = ms(j)
nlms(j) = nkmlookup(l,m,th0) ! nkmlookup in this module
@@ -1020,9 +1053,9 @@ real(r8) function scplm(index,colat,nlm)
plmtable(1,j+1) = plmtable(1,j)
nlms(j+1) = nlms(j)
skip = 1
- endif
- enddo ! j=1,csize
- endif ! prevth0
+ end if
+ end do ! j=1,csize
+ end if ! prevth0
nlm = nlms(index)
colata(1) = colat
call interpol_quad(plmtable(1:tablesize,index), &
@@ -1031,147 +1064,138 @@ real(r8) function scplm(index,colat,nlm)
end function scplm
!-----------------------------------------------------------------------
subroutine pm_n(m,r,cth,plmtable,tablesize)
-!
-! Another SCHA function, returns the SCHA version of the associated
-! Legendre Polynomial, Pmn
-!
- implicit none
-!
-! Args:
- integer,intent(in) :: m,tablesize
- real(r8),intent(in) :: r
- real(r8),intent(in) :: cth(tablesize)
- real(r8),intent(out) :: plmtable(tablesize)
-!
-! Local:
- integer :: i,k
- real(r8) :: rm,rk,div,ans,xn
- real(r8),dimension(tablesize) :: a,x,tmp,table
-!
- if (m == 0) then
- a = 1._r8 ! whole array op
- else
- do i=1,tablesize
- a(i) = sqrt(1._r8-cth(i)**2)**m
- enddo
- endif
- xn = r*(r+1._r8)
- x(:) = (1._r8-cth(:))/2._r8
- table = a ! whole array init
- k = 1
- pmn_loop: do ! repeat-until loop in idl code
- do i=1,tablesize
- real8 = dble(m)
- rm = real8
- real8 = dble(k)
- rk = real8
- a(i) = a(i)*(x(i)*((rk+rm-1._r8)*(rk+rm)-xn)/(rk*(rk+rm)))
- table(i) = table(i)+a(i) ! "result" in idl code
- enddo
- k = k+1
- do i=1,tablesize
- div = abs(table(i))
- if (div <= 1.e-6_r8) div = 1.e-6_r8
- tmp(i) = abs(a(i)) / div
- enddo
- if (maxval(tmp) < 1.e-6_r8) exit pmn_loop
- enddo pmn_loop
- ans = km_n(m,r)
-
- plmtable(:) = table(:)*ans
+ !
+ ! Another SCHA function, returns the SCHA version of the associated
+ ! Legendre Polynomial, Pmn
+ !
+ ! Args:
+ integer,intent(in) :: m,tablesize
+ real(r8),intent(in) :: r
+ real(r8),intent(in) :: cth(tablesize)
+ real(r8),intent(out) :: plmtable(tablesize)
+ !
+ ! Local:
+ integer :: i,k
+ real(r8) :: rm,rk,div,ans,xn
+ real(r8),dimension(tablesize) :: a,x,tmp,table
+ !
+ if (m == 0) then
+ a = 1._r8 ! whole array op
+ else
+ do i=1,tablesize
+ a(i) = sqrt(1._r8-cth(i)**2)**m
+ end do
+ end if
+ xn = r*(r+1._r8)
+ x(:) = (1._r8-cth(:))/2._r8
+ table = a ! whole array init
+ k = 1
+ pmn_loop: do ! repeat-until loop in idl code
+ do i=1,tablesize
+ rm = real(m, r8)
+ rk = real(k, r8)
+ a(i) = a(i)*(x(i)*((rk+rm-1._r8)*(rk+rm)-xn)/(rk*(rk+rm)))
+ table(i) = table(i)+a(i) ! "result" in idl code
+ end do
+ k = k+1
+ do i=1,tablesize
+ div = abs(table(i))
+ if (div <= 1.e-6_r8) div = 1.e-6_r8
+ tmp(i) = abs(a(i)) / div
+ end do
+ if (maxval(tmp) < 1.e-6_r8) exit pmn_loop
+ end do pmn_loop
+ ans = km_n(m,r)
+
+ plmtable(:) = table(:)*ans
end subroutine pm_n
!-----------------------------------------------------------------------
real(r8) function km_n(m,rn)
-!
-! A normalization function used by the SCHA routines. See Haines.
-!
- implicit none
-!
-! Args:
- integer,intent(in) :: m
- real(r8),intent(in) :: rn
-!
-! Local:
- real(r8) :: rm
-!
- if (m == 0) then
- km_n = 1._r8
- return
- endif
- real8 = dble(m)
- rm = real8
- km_n = sqrt(2._r8*exp(lngamma(rn+rm+1._r8)-lngamma(rn-rm+1._r8))) / &
- (2._r8**m*factorial(m))
+ !
+ ! A normalization function used by the SCHA routines. See Haines.
+ !
+ ! Args:
+ integer,intent(in) :: m
+ real(r8),intent(in) :: rn
+ !
+ ! Local:
+ real(r8) :: rm
+ !
+ if (m == 0) then
+ km_n = 1._r8
+ return
+ end if
+ rm = real(m, r8)
+ km_n = sqrt(2._r8*exp(lngamma(rn+rm+1._r8)-lngamma(rn-rm+1._r8))) / &
+ (2._r8**m*factorial(m))
end function km_n
!-----------------------------------------------------------------------
- real(r8) function nkmlookup(k,m,th0)
-!
-! Given the size of a spherical cap, defined by the polar cap angle, th0,
-! and also the values of integers k and m, returns the value of n, a
-! real number (see Haines).
-! It uses interpolation from a lookup table that had been precomputed,
-! in order to reduce the computation time.
-!
- implicit none
-!
-! Args:
- integer,intent(in) :: k,m
- real(r8),intent(in) :: th0
-!
-! Local:
- integer :: kk,mm
- real(r8) :: th0a(1),out(1)
+ real(r8) function nkmlookup(k, m, th0)
+ !
+ ! Given the size of a spherical cap, defined by the polar cap angle, th0,
+ ! and also the values of integers k and m, returns the value of n, a
+ ! real number (see Haines).
+ ! It uses interpolation from a lookup table that had been precomputed,
+ ! in order to reduce the computation time.
+ !
+ ! Args:
+ integer,intent(in) :: k,m
+ real(r8),intent(in) :: th0
+ !
+ ! Local:
+ integer :: kk,mm
+ real(r8) :: th0a(1),out(1)
- if (th0 == 90._r8) then
- real8 = dble(k)
- nkmlookup = real8
- return
- endif
- th0a(1) = th0
- kk = k+1
- mm = m+1
- if (kk > maxk_scha) then
- call interpol_quad(allnkm(maxk_scha,mm,:),th0s,th0a,out)
- endif
- if (mm > maxm_scha) then
- call interpol_quad(allnkm(kk,maxm_scha,:),th0s,th0a,out)
- endif
- if (th0 < th0s(1)) then
- write(iulog,"('>>> nkmlookup: th0 < th0s(1): th0=',e12.4,' th0s(1)=',e12.4)") &
- th0,th0s(1)
- endif
- call interpol_quad(allnkm(kk,mm,:),th0s,th0a,out)
- nkmlookup = out(1)
+ if (th0 == 90._r8) then
+ nkmlookup = real(k, r8)
+ return
+ end if
+ th0a(1) = th0
+ kk = k+1
+ mm = m+1
+ if (kk > maxk_scha) then
+ call interpol_quad(allnkm(maxk_scha,mm,:),th0s,th0a,out)
+ end if
+ if (mm > maxm_scha) then
+ call interpol_quad(allnkm(kk,maxm_scha,:),th0s,th0a,out)
+ end if
+ if (th0 < th0s(1)) then
+ write(iulog,"(a,e12.4,', th0s(1) = ',e12.4)") &
+ '>>> nkmlookup: th0 < th0s(1): th0 = ', th0, th0s(1)
+ end if
+ call interpol_quad(allnkm(kk,mm,:), th0s, th0a, out)
+ nkmlookup = out(1)
end function nkmlookup
!-----------------------------------------------------------------------
subroutine checkinputs(lat,mlt,inside,phir,colat)
- implicit none
-!
-! Args:
- real(r8),intent(in) :: lat,mlt
- integer,intent(out) :: inside
- real(r8),intent(out) :: phir,colat
-!
-! Local:
- real(r8) :: lon,tlat,tlon,radii
-!
- lon = mlt*15._r8
- call dorotation(lat,lon,tlat,tlon)
- radii = 90._r8-tlat
- inside = 0
- if (radii <= bndyfitr) inside = 1 ! bndyfitr from setboundary
- phir = tlon*deg2rad
- colat = radii
+ !
+ ! Args:
+ real(r8), intent(in) :: lat,mlt
+ integer, intent(out) :: inside
+ real(r8), intent(out) :: phir,colat
+ !
+ ! Local:
+ real(r8) :: lon, tlat, tlon, radii
+ !
+ lon = mlt*15._r8
+ call dorotation(lat,lon,tlat,tlon)
+ radii = 90._r8-tlat
+ inside = 0
+ if (radii <= bndyfitr) then
+ inside = 1 ! bndyfitr from setboundary
+ end if
+ phir = tlon*deg2rad
+ colat = radii
end subroutine checkinputs
!-----------------------------------------------------------------------
subroutine dorotation(latin,lonin,latout,lonout)
!
! Uses transformation matrices tmat and ttmat, to convert between
-! the given geomagnetic latatud/longitude, and the coordinate
+! the given geomagnetic latatud/longitude, and the coordinate
! system that is used within the model,that is offset from the pole.
!
-! Rotate Lat/Lon spherical coordinates with the transformation given
-! by saved matrix. The coordinates are assumed to be on a sphere of
+! Rotate Lat/Lon spherical coordinates with the transformation given
+! by saved matrix. The coordinates are assumed to be on a sphere of
! Radius=1. Uses cartesian coordinates as an intermediate step.
!
implicit none
@@ -1201,124 +1225,119 @@ subroutine dorotation(latin,lonin,latout,lonout)
!
do i=1,3
pos(i) = tmat(1,i)*a + tmat(2,i)*b + tmat(3,i)*stc
- enddo
+ end do
latout = asin(pos(3))*rad2deg
lonout = atan2(pos(2),pos(1))*rad2deg
end subroutine dorotation
!-----------------------------------------------------------------------
subroutine interpol_quad(v,x,u,p)
-!
-! f90 translation of IDL function interpol(v,x,u,/quadratic)
-!
- implicit none
-!
-! Args:
- real(r8),intent(in) :: v(:),x(:),u(:)
- real(r8),intent(out) :: p(:)
-!
-! Local:
- integer :: nv,nx,nu,i,ix
- real(r8) :: x0,x1,x2
-!
- nv = size(v)
- nx = size(x)
- nu = size(u)
- if (nx /= nv) then
- p(:) = 0._r8
- return
- endif
- do i=1,nu
- ix = value_locate(x,u(i))
-! 01/14 bae: interpol_quad in wei05sc.F is called when inside=1 or radii=nx assures epot is non-zero near
-! the pole (85.8mlat,0MLT) and the boundary (bndryfit).
- if (ix <=1) ix = 2 ! bug fix by bae 01/28/14
- if (ix >=nx) ix = nx-1 ! bug fix by bae 01/29/14
-! if (ix <= 1.or.ix >= nx) then ! bug fix by btf 12/23/09
-! p(i) = 0._r8
-! cycle ! bug fix by btf 12/23/09
-! endif
- x1 = x(ix)
- x0 = x(ix-1)
- x2 = x(ix+1)
- p(i) = v(ix-1) * (u(i)-x1) * (u(i)-x2) / ((x0-x1) * (x0-x2)) + &
- v(ix) * (u(i)-x0) * (u(i)-x2) / ((x1-x0) * (x1-x2)) + &
- v(ix+1) * (u(i)-x0) * (u(i)-x1) / ((x2-x0) * (x2-x1))
- enddo
+ !
+ ! f90 translation of IDL function interpol(v,x,u,/quadratic)
+ !
+ ! Args:
+ real(r8),intent(in) :: v(:),x(:),u(:)
+ real(r8),intent(out) :: p(:)
+ !
+ ! Local:
+ integer :: nv,nx,nu,i,ix
+ real(r8) :: x0,x1,x2
+ !
+ nv = size(v)
+ nx = size(x)
+ nu = size(u)
+ if (nx /= nv) then
+ p(:) = 0._r8
+ return
+ end if
+ do i = 1, nu
+ ix = value_locate(x,u(i))
+ ! 01/14 bae: interpol_quad in wei05sc.F is called when inside=1 or
+ ! radii=nx
+ ! assures epot is non-zero near the pole (85.8mlat,0MLT) and
+ ! the boundary (bndryfit).
+ if (ix <=1) ix = 2
+ if (ix >=nx) ix = nx-1
+ x1 = x(ix)
+ x0 = x(ix-1)
+ x2 = x(ix+1)
+ p(i) = v(ix-1) * (u(i)-x1) * (u(i)-x2) / ((x0-x1) * (x0-x2)) + &
+ v(ix) * (u(i)-x0) * (u(i)-x2) / ((x1-x0) * (x1-x2)) + &
+ v(ix+1) * (u(i)-x0) * (u(i)-x1) / ((x2-x0) * (x2-x1))
+ end do
end subroutine interpol_quad
!-----------------------------------------------------------------------
integer function value_locate(vec,val)
-!
-! f90 translation of IDL function value_locate
-! Return index i into vec for which vec(i) <= val >= vec(i+1)
-! Input vec must be monotonically increasing
-!
- implicit none
-!
-! Args:
- real(r8),intent(in) :: vec(:),val
-!
-! Local:
- integer :: n,i
-!
- value_locate = 0
- n = size(vec)
- if (val < vec(1)) return
- if (val > vec(n)) then
- value_locate = n
- return
- endif
- do i=1,n-1
- if (val >= vec(i) .and. val <= vec(i+1)) then
- value_locate = i
- return
- endif
- enddo
+ !
+ ! f90 translation of IDL function value_locate
+ ! Return index i into vec for which vec(i) <= val >= vec(i+1)
+ ! Input vec must be monotonically increasing
+ !
+ implicit none
+ !
+ ! Args:
+ real(r8),intent(in) :: vec(:),val
+ !
+ ! Local:
+ integer :: n,i
+ !
+ value_locate = 0
+ n = size(vec)
+ if (val < vec(1)) then
+ return
+ end if
+ if (val > vec(n)) then
+ value_locate = n
+ return
+ end if
+ do i = 1, n-1
+ if (val >= vec(i) .and. val <= vec(i+1)) then
+ value_locate = i
+ return
+ end if
+ end do
end function value_locate
!-----------------------------------------------------------------------
real(r8) function lngamma(xx)
-!
-! This is an f90 translation from C code copied from
-! www.fizyka.umk.pl/nrbook/c6-1.pdf (numerical recipes gammln)
-!
- implicit none
- real(r8),intent(in) :: xx
- real(r8) :: x,y,tmp,ser
- real(r8) :: cof(6) = (/76.18009172947146_r8, -86.50532032941677_r8, &
- 24.01409824083091_r8, -1.231739572450155_r8, 0.1208650973866179e-2_r8, &
- -0.5395239384953e-5_r8/)
- integer :: j
-!
- y = xx
- x = xx
- tmp = x+5.5_r8
- tmp = tmp-(x+0.5_r8)*log(tmp)
- ser = 1.000000000190015_r8
- do j=1,5
- y = y+1
- ser = ser+cof(j)/y
- enddo
- lngamma = -tmp+log(2.5066282746310005_r8*ser/x)
+ !
+ ! This is an f90 translation from C code copied from
+ ! gammln routine from "Numerical Recipes in C" Chapter 6.1.
+ ! see: http://numerical.recipes
+ !
+
+ real(r8), intent(in) :: xx
+ real(r8) :: x,y,tmp,ser
+ real(r8) :: cof(6) = (/76.18009172947146_r8, -86.50532032941677_r8, &
+ 24.01409824083091_r8, -1.231739572450155_r8, &
+ 0.1208650973866179e-2_r8, -0.5395239384953e-5_r8/)
+ integer :: j
+ !
+ y = xx
+ x = xx
+ tmp = x+5.5_r8
+ tmp = tmp-(x + 0.5_r8) * log(tmp)
+ ser = 1.000000000190015_r8
+ do j = 1, 5
+ y = y + 1
+ ser = ser + (cof(j) / y)
+ end do
+ lngamma = -tmp+log(2.5066282746310005_r8*ser/x)
end function lngamma
!-----------------------------------------------------------------------
real(r8) function factorial(n)
- implicit none
- integer,intent(in) :: n
- integer :: m
- if (n <= 0) then
- factorial = 0._r8
- return
- endif
- if (n == 1) then
- factorial = 1._r8
- return
- endif
- real8 = dble(n)
- factorial = real8
- do m = n-1,1,-1
- real8 = dble(m)
- factorial = factorial * real8
- enddo
+ integer,intent(in) :: n
+ integer :: m
+ if (n <= 0) then
+ factorial = 0._r8
+ return
+ end if
+ if (n == 1) then
+ factorial = 1._r8
+ return
+ end if
+ factorial = real(n, r8)
+ do m = n-1,1,-1
+ factorial = factorial * real(m, r8)
+ end do
end function factorial
!-----------------------------------------------------------------------
!*********************** Copyright 1996,2001 Dan Weimer/MRC ***********************
@@ -1336,7 +1355,7 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
INTEGER YEAR,MONTH,DAY,IDBUG
real(r8) :: HOUR
-!
+!
! THIS SUBROUTINE DERIVES THE ROTATION MATRICES AM(I,J,K) FOR 11
! TRANSFORMATIONS, IDENTIFIED BY K.
! K=1 TRANSFORMS GSE to GEO
@@ -1347,13 +1366,13 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
! K=6 " GSM to MAG
! K=7 " GSE to GEI
! K=8 " GEI to GEO
-! K=9 " GSM to SM
-! K=10 " GEO to SM
-! K=11 " MAG to SM
+! K=9 " GSM to SM
+! K=10 " GEO to SM
+! K=11 " MAG to SM
!
! IF IDBUG IS NOT 0, THEN OUTPUTS DIAGNOSTIC INFORMATION TO
! FILE UNIT=IDBUG
-!
+!
INTEGER GSEGEO,GEOGSE,GEOMAG,MAGGEO
INTEGER GSEMAG,MAGGSE,GSEGSM,GSMGSE
INTEGER GEOGSM,GSMGEO,GSMMAG,MAGGSM
@@ -1373,14 +1392,14 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
! MAG - Geomagnetic
! GSM - Geocentric Solar Magnetospheric
! SM - Solar Magnetic
-!
+!
! THE ARRAY CX(I) ENCODES VARIOUS ANGLES, STORED IN DEGREES
-! ST(I) AND CT(I) ARE SINES & COSINES.
+! ST(I) AND CT(I) ARE SINES & COSINES.
!
! Program author: D. R. Weimer
!
! Some of this code has been copied from subroutines which had been
-! obtained from D. Stern, NASA/GSFC. Other formulas are from "Space
+! obtained from D. Stern, NASA/GSFC. Other formulas are from "Space
! Physics Coordinate Transformations: A User Guide" by M. Hapgood (1991).
!
! The formulas for the calculation of Greenwich mean sidereal time (GMST)
@@ -1397,10 +1416,10 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
! the DATA statement for assignments (not block_data)
! COMMON/MFIELD/EPOCH,TH0,PH0,DIPOLE
! COMMON/TRANSDAT/CX(9),ST(6),CT(6),AM(3,3,11)
-!
+!
real(r8) TH0,PH0 !,DIPOLE
real(r8) CX(9),ST(6),CT(6),AM(3,3,11)
-!
+!
! TH0 = geog co-lat of NH magnetic pole
! PH0 = geog longitude of NH magnetic pole
! DIPOLE = magnitude of the B field in gauss at the equator
@@ -1418,12 +1437,11 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
IYR=1900+YEAR
ELSE
IYR=YEAR
- ENDIF
+ END IF
UT=HOUR
JD=JULDAY(MONTH,DAY,IYR)
MJD=JD-2400001
- real8 = dble(MJD)
- T0=(real8-51544.5_r8)/36525.0_r8
+ T0=(real(MJD, r8) - 51544.5_r8) / 36525.0_r8
GMSTD=100.4606184_r8 + 36000.770_r8*T0 + 3.87933E-4_r8*T0*T0 + &
15.0410686_r8*UT
CALL ADJUST(GMSTD)
@@ -1444,7 +1462,7 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
WRITE(IDBUG,*) 'MEAN ANOMALY=',MA
WRITE(IDBUG,*) 'MEAN LONGITUDE=',LAMD
WRITE(IDBUG,*) 'TRUE LONGITUDE=',SUNLON
- ENDIF
+ END IF
CX(1)= GMSTD
CX(2) = ECLIP
@@ -1452,7 +1470,7 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
CX(4) = TH0
CX(5) = PH0
! Derived later:
-! CX(6) = Dipole tilt angle
+! CX(6) = Dipole tilt angle
! CX(7) = Angle between sun and magnetic pole
! CX(8) = Subsolar point latitude
! CX(9) = Subsolar point longitude
@@ -1460,8 +1478,8 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
DO I=1,5
ST(I) = SIND(CX(I))
CT(I) = COSD(CX(I))
- ENDDO
-!
+ END DO
+!
AM(1,1,GSEGEI) = CT(3)
AM(1,2,GSEGEI) = -ST(3)
AM(1,3,GSEGEI) = 0._r8
@@ -1470,74 +1488,74 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
AM(2,3,GSEGEI) = -ST(2)
AM(3,1,GSEGEI) = ST(3)*ST(2)
AM(3,2,GSEGEI) = CT(3)*ST(2)
- AM(3,3,GSEGEI) = CT(2)
-!
- AM(1,1,GEIGEO) = CT(1)
- AM(1,2,GEIGEO) = ST(1)
- AM(1,3,GEIGEO) = 0._r8
- AM(2,1,GEIGEO) = -ST(1)
- AM(2,2,GEIGEO) = CT(1)
- AM(2,3,GEIGEO) = 0._r8
- AM(3,1,GEIGEO) = 0._r8
- AM(3,2,GEIGEO) = 0._r8
- AM(3,3,GEIGEO) = 1._r8
-!
- DO I=1,3
- DO J=1,3
+ AM(3,3,GSEGEI) = CT(2)
+!
+ AM(1,1,GEIGEO) = CT(1)
+ AM(1,2,GEIGEO) = ST(1)
+ AM(1,3,GEIGEO) = 0._r8
+ AM(2,1,GEIGEO) = -ST(1)
+ AM(2,2,GEIGEO) = CT(1)
+ AM(2,3,GEIGEO) = 0._r8
+ AM(3,1,GEIGEO) = 0._r8
+ AM(3,2,GEIGEO) = 0._r8
+ AM(3,3,GEIGEO) = 1._r8
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,GSEGEO) = AM(I,1,GEIGEO)*AM(1,J,GSEGEI) + &
AM(I,2,GEIGEO)*AM(2,J,GSEGEI) + AM(I,3,GEIGEO)*AM(3,J,GSEGEI)
- ENDDO
- ENDDO
-!
- AM(1,1,GEOMAG) = CT(4)*CT(5)
- AM(1,2,GEOMAG) = CT(4)*ST(5)
- AM(1,3,GEOMAG) =-ST(4)
- AM(2,1,GEOMAG) =-ST(5)
- AM(2,2,GEOMAG) = CT(5)
+ END DO
+ END DO
+!
+ AM(1,1,GEOMAG) = CT(4)*CT(5)
+ AM(1,2,GEOMAG) = CT(4)*ST(5)
+ AM(1,3,GEOMAG) =-ST(4)
+ AM(2,1,GEOMAG) =-ST(5)
+ AM(2,2,GEOMAG) = CT(5)
AM(2,3,GEOMAG) = 0._r8
- AM(3,1,GEOMAG) = ST(4)*CT(5)
- AM(3,2,GEOMAG) = ST(4)*ST(5)
- AM(3,3,GEOMAG) = CT(4)
-!
- DO I=1,3
- DO J=1,3
+ AM(3,1,GEOMAG) = ST(4)*CT(5)
+ AM(3,2,GEOMAG) = ST(4)*ST(5)
+ AM(3,3,GEOMAG) = CT(4)
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,GSEMAG) = AM(I,1,GEOMAG)*AM(1,J,GSEGEO) + &
AM(I,2,GEOMAG)*AM(2,J,GSEGEO) + AM(I,3,GEOMAG)*AM(3,J,GSEGEO)
- ENDDO
- ENDDO
-!
- B32 = AM(3,2,GSEMAG)
- B33 = AM(3,3,GSEMAG)
- B3 = SQRT(B32*B32+B33*B33)
- IF (B33.LE.0._r8) B3 = -B3
-!
- AM(2,2,GSEGSM) = B33/B3
- AM(3,3,GSEGSM) = AM(2,2,GSEGSM)
- AM(3,2,GSEGSM) = B32/B3
- AM(2,3,GSEGSM) =-AM(3,2,GSEGSM)
+ END DO
+ END DO
+!
+ B32 = AM(3,2,GSEMAG)
+ B33 = AM(3,3,GSEMAG)
+ B3 = SQRT(B32*B32+B33*B33)
+ IF (B33.LE.0._r8) B3 = -B3
+!
+ AM(2,2,GSEGSM) = B33/B3
+ AM(3,3,GSEGSM) = AM(2,2,GSEGSM)
+ AM(3,2,GSEGSM) = B32/B3
+ AM(2,3,GSEGSM) =-AM(3,2,GSEGSM)
AM(1,1,GSEGSM) = 1._r8
AM(1,2,GSEGSM) = 0._r8
AM(1,3,GSEGSM) = 0._r8
AM(2,1,GSEGSM) = 0._r8
AM(3,1,GSEGSM) = 0._r8
-!
- DO I=1,3
- DO J=1,3
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,GEOGSM) = AM(I,1,GSEGSM)*AM(J,1,GSEGEO) + &
AM(I,2,GSEGSM)*AM(J,2,GSEGEO) + AM(I,3,GSEGSM)*AM(J,3,GSEGEO)
- ENDDO
- ENDDO
-!
- DO I=1,3
- DO J=1,3
+ END DO
+ END DO
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,GSMMAG) = AM(I,1,GEOMAG)*AM(J,1,GEOGSM) + &
AM(I,2,GEOMAG)*AM(J,2,GEOGSM) + AM(I,3,GEOMAG)*AM(J,3,GEOGSM)
- ENDDO
- ENDDO
+ END DO
+ END DO
!
- ST(6) = AM(3,1,GSEMAG)
- CT(6) = SQRT(1._r8-ST(6)*ST(6))
- CX(6) = ASIND(ST(6))
+ ST(6) = AM(3,1,GSEMAG)
+ CT(6) = SQRT(1._r8-ST(6)*ST(6))
+ CX(6) = ASIND(ST(6))
AM(1,1,GSMSM) = CT(6)
AM(1,2,GSMSM) = 0._r8
@@ -1548,20 +1566,20 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
AM(3,1,GSMSM) = ST(6)
AM(3,2,GSMSM) = 0._r8
AM(3,3,GSMSM) = CT(6)
-!
- DO I=1,3
- DO J=1,3
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,GEOSM) = AM(I,1,GSMSM)*AM(1,J,GEOGSM) + &
AM(I,2,GSMSM)*AM(2,J,GEOGSM) + AM(I,3,GSMSM)*AM(3,J,GEOGSM)
- ENDDO
- ENDDO
-!
- DO I=1,3
- DO J=1,3
+ END DO
+ END DO
+!
+ DO I=1,3
+ DO J=1,3
AM(I,J,MAGSM) = AM(I,1,GSMSM)*AM(J,1,GSMMAG) + &
AM(I,2,GSMSM)*AM(J,2,GSMMAG) + AM(I,3,GSMSM)*AM(J,3,GSMMAG)
- ENDDO
- ENDDO
+ END DO
+ END DO
!
CX(7)=ATAN2D( AM(2,1,11) , AM(1,1,11) )
CX(8)=ASIND( AM(3,1,1) )
@@ -1577,11 +1595,11 @@ real(r8) FUNCTION GET_TILT (YEAR,MONTH,DAY,HOUR)
WRITE(IDBUG,1001) K
DO I=1,3
WRITE(IDBUG,1002) (AM(I,J,K),J=1,3)
- ENDDO
- ENDDO
+ END DO
+ END DO
1001 FORMAT(' ROTATION MATRIX ',I2)
1002 FORMAT(3F9.5)
- ENDIF
+ END IF
!NCAR Mar 96: return the dipole tilt from this function call.
GET_TILT = CX(6)
@@ -1602,12 +1620,12 @@ SUBROUTINE ADJUST(ANGLE)
IF(ANGLE.LT.0._r8)THEN
ANGLE=ANGLE+360._r8
GOTO 10
- ENDIF
+ END IF
20 CONTINUE
IF(ANGLE.GE.360._r8)THEN
ANGLE=ANGLE-360._r8
GOTO 20
- ENDIF
+ END IF
end subroutine adjust
!-----------------------------------------------------------------------
integer FUNCTION JULDAY(MM,ID,IYYY)
@@ -1622,12 +1640,12 @@ integer FUNCTION JULDAY(MM,ID,IYYY)
ELSE
JY=IYYY-1
JM=MM+13
- ENDIF
+ END IF
JULDAY=INT(365.25_r8*JY)+INT(30.6001_r8*JM)+ID+1720995
IF (ID+31*(MM+12*IYYY).GE.IGREG) THEN
JA=INT(0.01_r8*JY)
JULDAY=JULDAY+2-JA+INT(0.25_r8*JA)
- ENDIF
+ END IF
end function julday
!-----------------------------------------------------------------------
SUBROUTINE CVT2MD(iulog,IYEAR,NDA,MON,DAY)
@@ -1640,10 +1658,10 @@ SUBROUTINE CVT2MD(iulog,IYEAR,NDA,MON,DAY)
PARAMETER (MISS=-32767)
SAVE LMON
DATA LMON/31,28,31,30,31,30,31,31,30,31,30,31/
-
+
LMON(2)=28
IF(MOD(IYEAR,4) .EQ. 0)LMON(2)=29
-
+
NUMD=0
DO 100 I=1,12
IF(NDA.GT.NUMD .AND. NDA.LE.NUMD+LMON(I))GO TO 200
@@ -1694,6 +1712,5 @@ FUNCTION ATAN2D (RNUM1,RNUM2)
R2D = 57.2957795130823208767981548147_r8)
ATAN2D = R2D * ATAN2 (RNUM1,RNUM2)
end function atan2d
-#endif
!-----------------------------------------------------------------------
end module wei05sc
diff --git a/src/physics/cam/cam_diagnostics.F90 b/src/physics/cam/cam_diagnostics.F90
index 4ab35242c0..c1d11ae3f5 100644
--- a/src/physics/cam/cam_diagnostics.F90
+++ b/src/physics/cam/cam_diagnostics.F90
@@ -78,17 +78,17 @@ module cam_diagnostics
! Physics buffer indices
-integer :: psl_idx = 0
-integer :: relhum_idx = 0
-integer :: qcwat_idx = 0
-integer :: tcwat_idx = 0
-integer :: lcwat_idx = 0
-integer :: cld_idx = 0
-integer :: concld_idx = 0
-integer :: tke_idx = 0
-integer :: kvm_idx = 0
-integer :: kvh_idx = 0
-integer :: cush_idx = 0
+integer :: psl_idx = 0
+integer :: relhum_idx = 0
+integer :: qcwat_idx = 0
+integer :: tcwat_idx = 0
+integer :: lcwat_idx = 0
+integer :: cld_idx = 0
+integer :: concld_idx = 0
+integer :: tke_idx = 0
+integer :: kvm_idx = 0
+integer :: kvh_idx = 0
+integer :: cush_idx = 0
integer :: t_ttend_idx = 0
integer :: t_utend_idx = 0
integer :: t_vtend_idx = 0
@@ -170,7 +170,7 @@ subroutine diag_register()
end subroutine diag_register
!==============================================================================
-
+
subroutine diag_init_dry(pbuf2d)
! Declare the history fields for which this module contains outfld calls.
@@ -353,7 +353,7 @@ subroutine diag_init_dry(pbuf2d)
! State after physics (FV)
call add_default ('TAP ' , history_budget_histfile_num, ' ')
call add_default ('UAP ' , history_budget_histfile_num, ' ')
- call add_default ('VAP ' , history_budget_histfile_num, ' ')
+ call add_default ('VAP ' , history_budget_histfile_num, ' ')
call add_default (apcnst(1) , history_budget_histfile_num, ' ')
if ( dycore_is('LR') .or. dycore_is('SE') .or. dycore_is('FV3') ) then
call add_default ('TFIX ' , history_budget_histfile_num, ' ')
@@ -434,6 +434,9 @@ subroutine diag_init_dry(pbuf2d)
call addfld ('MO_pAM', horiz_only, 'A', 'kg*m2/s*rad2',&
'Total column mass axial angular momentum after dry mass correction')
+ call addfld( 'CPAIRV', (/ 'lev' /), 'I', 'J/K/kg', 'Variable specific heat cap air' )
+ call addfld( 'RAIRV', (/ 'lev' /), 'I', 'J/K/kg', 'Variable dry air gas constant' )
+
end subroutine diag_init_dry
subroutine diag_init_moist(pbuf2d)
@@ -935,6 +938,8 @@ subroutine diag_phys_writeout_dry(state, pbuf, p_surf_t)
use co2_cycle, only: c_i, co2_transport
use tidal_diag, only: tidal_diag_write
+ use physconst, only: cpairv,rairv
+
!-----------------------------------------------------------------------
!
! Arguments
@@ -981,6 +986,9 @@ subroutine diag_phys_writeout_dry(state, pbuf, p_surf_t)
call outfld('phis ',state%phis, pcols, lchnk )
#endif
+ call outfld( 'CPAIRV', cpairv(:ncol,:,lchnk), ncol, lchnk )
+ call outfld( 'RAIRV', rairv(:ncol,:,lchnk), ncol, lchnk )
+
do m = 1, pcnst
if (cnst_cam_outfld(m)) then
call outfld(cnst_name(m), state%q(1,1,m), pcols, lchnk)
diff --git a/src/physics/cam/phys_control.F90 b/src/physics/cam/phys_control.F90
index 6cb6ee527f..f6fad69911 100644
--- a/src/physics/cam/phys_control.F90
+++ b/src/physics/cam/phys_control.F90
@@ -13,7 +13,7 @@ module phys_control
use spmd_utils, only: masterproc
use cam_logfile, only: iulog
use cam_abortutils, only: endrun
-use shr_kind_mod, only: r8 => shr_kind_r8
+use shr_kind_mod, only: r8 => shr_kind_r8, cl=>shr_kind_cl
implicit none
private
@@ -44,6 +44,7 @@ module phys_control
character(len=16) :: microp_scheme = unset_str ! microphysics package
character(len=16) :: macrop_scheme = unset_str ! macrophysics package
character(len=16) :: radiation_scheme = unset_str ! radiation package
+character(len=cl) :: cam_physics_mesh = unset_str ! SCRIP file for phys
integer :: srf_flux_avg = unset_int ! 1 => smooth surface fluxes, 0 otherwise
logical :: use_subcol_microp = .false. ! if .true. then use sub-columns in microphysics
@@ -133,7 +134,7 @@ subroutine phys_ctl_readnl(nlfile)
do_clubb_sgs, state_debug_checks, use_hetfrz_classnuc, use_gw_oro, use_gw_front, &
use_gw_front_igw, use_gw_convect_dp, use_gw_convect_sh, cld_macmic_num_steps, &
offline_driver, convproc_do_aer, cam_snapshot_before_num, cam_snapshot_after_num, &
- cam_take_snapshot_before, cam_take_snapshot_after
+ cam_take_snapshot_before, cam_take_snapshot_after, cam_physics_mesh
!-----------------------------------------------------------------------------
if (masterproc) then
@@ -195,6 +196,7 @@ subroutine phys_ctl_readnl(nlfile)
call mpi_bcast(cam_snapshot_after_num, 1, mpi_integer, masterprocid, mpicom, ierr)
call mpi_bcast(cam_take_snapshot_before, len(cam_take_snapshot_before), mpi_character, masterprocid, mpicom, ierr)
call mpi_bcast(cam_take_snapshot_after, len(cam_take_snapshot_after), mpi_character, masterprocid, mpicom, ierr)
+ call mpi_bcast(cam_physics_mesh, len(cam_physics_mesh), mpi_character, masterprocid, mpicom, ierr)
use_spcam = ( cam_physpkg_is('spcam_sam1mom') &
.or. cam_physpkg_is('spcam_m2005'))
@@ -292,7 +294,7 @@ subroutine phys_getopts(deep_scheme_out, shallow_scheme_out, eddy_scheme_out, mi
cam_chempkg_out, prog_modal_aero_out, macrop_scheme_out, &
do_clubb_sgs_out, use_spcam_out, state_debug_checks_out, cld_macmic_num_steps_out, &
offline_driver_out, convproc_do_aer_out, cam_snapshot_before_num_out, cam_snapshot_after_num_out,&
- cam_take_snapshot_before_out, cam_take_snapshot_after_out)
+ cam_take_snapshot_before_out, cam_take_snapshot_after_out, physics_grid_out)
!-----------------------------------------------------------------------
! Purpose: Return runtime settings
! deep_scheme_out : deep convection scheme
@@ -339,6 +341,7 @@ subroutine phys_getopts(deep_scheme_out, shallow_scheme_out, eddy_scheme_out, mi
integer, intent(out), optional :: cam_snapshot_after_num_out
character(len=32), intent(out), optional :: cam_take_snapshot_before_out
character(len=32), intent(out), optional :: cam_take_snapshot_after_out
+ character(len=cl), intent(out), optional :: physics_grid_out
if ( present(deep_scheme_out ) ) deep_scheme_out = deep_scheme
if ( present(shallow_scheme_out ) ) shallow_scheme_out = shallow_scheme
@@ -377,6 +380,7 @@ subroutine phys_getopts(deep_scheme_out, shallow_scheme_out, eddy_scheme_out, mi
if ( present(cam_snapshot_after_num_out ) ) cam_snapshot_after_num_out = cam_snapshot_after_num
if ( present(cam_take_snapshot_before_out) ) cam_take_snapshot_before_out = cam_take_snapshot_before
if ( present(cam_take_snapshot_after_out ) ) cam_take_snapshot_after_out = cam_take_snapshot_after
+ if ( present(physics_grid_out ) ) physics_grid_out = cam_physics_mesh
end subroutine phys_getopts
diff --git a/src/physics/cam/physics_types.F90 b/src/physics/cam/physics_types.F90
index a36b8f4039..7b6e8811a6 100644
--- a/src/physics/cam/physics_types.F90
+++ b/src/physics/cam/physics_types.F90
@@ -200,7 +200,6 @@ subroutine physics_update(state, ptend, dt, tend)
!-----------------------------------------------------------------------
! Update the state and or tendency structure with the parameterization tendencies
!-----------------------------------------------------------------------
- use shr_sys_mod, only: shr_sys_flush
use constituents, only: cnst_get_ind
use scamMod, only: scm_crm_mode, single_column
use phys_control, only: phys_getopts
@@ -427,9 +426,6 @@ subroutine physics_update(state, ptend, dt, tend)
end do
end if
- ! Good idea to do this regularly.
- call shr_sys_flush(iulog)
-
if (state_debug_checks) call physics_state_check(state, ptend%name)
deallocate(cpairv_loc, rairv_loc)
diff --git a/src/physics/cam/rk_stratiform.F90 b/src/physics/cam/rk_stratiform.F90
index a951edd3fa..5d165acc40 100644
--- a/src/physics/cam/rk_stratiform.F90
+++ b/src/physics/cam/rk_stratiform.F90
@@ -628,7 +628,7 @@ subroutine rk_stratiform_tend( &
if ( do_psrhmin ) then
call tropopause_find(state, troplev, primary=TROP_ALG_TWMO, backup=TROP_ALG_CLIMATE)
call get_rlat_all_p(lchnk,ncol,rlat)
- dlat = rlat*rad2deg
+ dlat(:ncol) = rlat(:ncol)*rad2deg
endif
! ------------- !
diff --git a/src/physics/waccm/aurora_params.F90 b/src/physics/waccm/aurora_params.F90
index 2755184438..737ff608eb 100644
--- a/src/physics/waccm/aurora_params.F90
+++ b/src/physics/waccm/aurora_params.F90
@@ -20,6 +20,6 @@ module aurora_params
real(r8) :: dskofc(2) = -huge(1.0_r8)
real(r8) :: phin(2) = -huge(1.0_r8)
- logical :: amie_period = .false. ! true during a period of prescribed high-latitude electric potential
+ logical :: prescribed_period = .false. ! true during a period of prescribed high-latitude electric potential
end module aurora_params
diff --git a/src/physics/waccm/mo_aurora.F90 b/src/physics/waccm/mo_aurora.F90
index ac754294d1..e6fe8cd5c0 100644
--- a/src/physics/waccm/mo_aurora.F90
+++ b/src/physics/waccm/mo_aurora.F90
@@ -56,7 +56,7 @@ module mo_aurora
use spmd_utils, only: masterproc
use aurora_params, only: power=>hpower, plevel, aurora_params_set
use aurora_params, only: ctpoten, theta0, dskofa, offa, phid, rrad
- use aurora_params, only: amie_period
+ use aurora_params, only: prescribed_period
implicit none
@@ -136,8 +136,8 @@ module mo_aurora
logical :: aurora_active = .false.
integer :: indxAIPRS = -1
integer :: indxQTe = -1
- integer :: indxAMIEefxg = -1 ! am_amie_201712
- integer :: indxAMIEkevg = -1 ! am_amie_201712
+ integer :: indxEfx = -1
+ integer :: indxKev = -1
real(r8), parameter :: h2deg = 15._r8 ! hour to degree
@@ -152,8 +152,9 @@ subroutine aurora_register
! add ionization rates to phys buffer for waccmx ionosphere module
- call pbuf_add_field('AurIPRateSum', 'physpkg', dtype_r8, (/pcols,pver/), indxAIPRS) ! Sum of ion auroral production rates for O2
- call pbuf_add_field('QTeAur', 'physpkg', dtype_r8, (/pcols/), indxQTe) ! for electron temperature
+ ! Sum of ion auroral production rates for O2
+ call pbuf_add_field('AurIPRateSum', 'physpkg', dtype_r8, (/pcols,pver/), indxAIPRS)
+ call pbuf_add_field('QTeAur', 'physpkg', dtype_r8, (/pcols/), indxQTe) ! for electron temperature
endsubroutine aurora_register
@@ -187,13 +188,17 @@ subroutine aurora_inti(pbuf2d)
integer :: ierr
real(r8) :: x_nan
- indxAMIEefxg = pbuf_get_index('AMIE_efxg', errcode=ierr)
- indxAMIEkevg = pbuf_get_index('AMIE_kevg', errcode=ierr)
+ indxEfx = pbuf_get_index('AUREFX', errcode=ierr)
+ indxKev = pbuf_get_index('AURKEV', errcode=ierr)
- if (indxAMIEefxg>0 .and. indxAMIEkevg>0) then
+ if (indxEfx>0 .and. indxKev>0) then
x_nan = nan
- call pbuf_set_field(pbuf2d, indxAMIEefxg, x_nan)
- call pbuf_set_field(pbuf2d, indxAMIEkevg, x_nan)
+ call pbuf_set_field(pbuf2d, indxEfx, x_nan)
+ call pbuf_set_field(pbuf2d, indxKev, x_nan)
+ endif
+
+ if (indxAIPRS>0) then
+ call pbuf_set_field(pbuf2d, indxAIPRS, 0._r8)
endif
theta0(:) = nan
@@ -794,8 +799,8 @@ subroutine aurora_heat( flux, flux2, alfa, alfa2, &
wrk, & ! temp wrk array
dtheta ! latitudinal variation (Gaussian)
real(r8) :: ekev
- real(r8), pointer :: amie_efxg(:) ! Pointer to pbuf AMIE energy flux (mW m-2)
- real(r8), pointer :: amie_kevg(:) ! Pointer to pbuf AMIE mean energy (keV)
+ real(r8), pointer :: pr_efx(:) ! Pointer to pbuf prescribed energy flux (mW m-2)
+ real(r8), pointer :: pr_kev(:) ! Pointer to pbuf prescribed mean energy (keV)
real(r8), pointer :: qteaur(:) ! for electron temperature
integer :: n
@@ -856,16 +861,16 @@ subroutine aurora_heat( flux, flux2, alfa, alfa2, &
!----------------------------------------------------------------------------------------------
! ... If turned on, use amie energy flux and mean energy to replace flux(:) and alfa(:)
!----------------------------------------------------------------------------------------------
- if (amie_period .and. indxAMIEefxg>0 .and. indxAMIEkevg>0) then
+ if (prescribed_period .and. indxEfx>0 .and. indxKev>0) then
!---------------------------------------------------------------------------
- ! Overwrite with AMIE mean energy and energy flux in physics buffer
+ ! Overwrite with prescribed mean energy and energy flux in physics buffer
!---------------------------------------------------------------------------
- call pbuf_get_field(pbuf, indxAMIEefxg, amie_efxg)
- call pbuf_get_field(pbuf, indxAMIEkevg, amie_kevg)
+ call pbuf_get_field(pbuf, indxEfx, pr_efx)
+ call pbuf_get_field(pbuf, indxKev, pr_kev)
do n=1,ncol
- ekev = max(amie_kevg(n),1._r8)
+ ekev = max(pr_kev(n),1._r8)
alfa(n) = ekev/2._r8
- flux(n) = max(amie_efxg(n)/(ekev*1.602e-9_r8),1.e-20_r8)
+ flux(n) = max(pr_efx(n)/(ekev*1.602e-9_r8),1.e-20_r8)
enddo
endif
diff --git a/src/utils/physconst.F90 b/src/utils/physconst.F90
index 4da502f3eb..4e925955e2 100644
--- a/src/utils/physconst.F90
+++ b/src/utils/physconst.F90
@@ -142,7 +142,7 @@ module physconst
integer, protected, public :: thermodynamic_active_species_num
integer, allocatable, protected, public :: thermodynamic_active_species_idx(:)
-integer, allocatable, public :: thermodynamic_active_species_idx_dycore(:)
+integer, allocatable, public :: thermodynamic_active_species_idx_dycore(:)
real(r8), allocatable, protected, public :: thermodynamic_active_species_cp(:)
real(r8), allocatable, protected, public :: thermodynamic_active_species_cv(:)
real(r8), allocatable, protected, public :: thermodynamic_active_species_R(:)
@@ -273,8 +273,8 @@ subroutine physconst_readnl(nlfile)
! Read variable components of dry air and water species in air
dry_air_species = (/ (' ', i=1,num_names_max) /)
- water_species_in_air = (/ (' ', i=1,num_names_max) /)
-
+ water_species_in_air = (/ (' ', i=1,num_names_max) /)
+
if (masterproc) then
open( newunit=unitn, file=trim(nlfile), status='old' )
call find_group_name(unitn, 'air_composition_nl', status=ierr)
@@ -286,12 +286,12 @@ subroutine physconst_readnl(nlfile)
end if
close(unitn)
end if
-
+
call mpi_bcast(dry_air_species, len(dry_air_species)*num_names_max, mpi_character, &
masterprocid, mpicom, ierr)
call mpi_bcast(water_species_in_air, len(water_species_in_air)*num_names_max, mpi_character, &
masterprocid, mpicom, ierr)
-
+
dry_air_species_num = 0
water_species_in_air_num = 0
do i = 1, num_names_max
@@ -377,16 +377,16 @@ subroutine composition_init()
mmro2 = 0.235_r8
mmrn2 = 0.765_r8
mbar = 1._r8/(mmro2*o2_mwi + mmrn2*n2_mwi)
-
+
! init for variable composition dry air
- i = dry_air_species_num+water_species_in_air_num
+ i = dry_air_species_num+water_species_in_air_num
allocate(thermodynamic_active_species_idx(i))
allocate(thermodynamic_active_species_idx_dycore(i))
allocate(thermodynamic_active_species_cp(0:i))
allocate(thermodynamic_active_species_cv(0:i))
allocate(thermodynamic_active_species_R(0:i))
-
+
i = dry_air_species_num
allocate(thermodynamic_active_species_mwi(i))
allocate(thermodynamic_active_species_kv(i))
@@ -418,8 +418,8 @@ subroutine composition_init()
call cnst_get_ind('N' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' dry air component not found: ', dry_air_species(dry_air_species_num)
- call endrun(subname // ':: dry air component not found')
- else
+ call endrun(subname // ':: dry air component not found')
+ else
mw = 2.0_r8*cnst_mw(ix)
icnst = dry_air_species_num
thermodynamic_active_species_idx(icnst) = 1!note - this is not used since this tracer value is derived
@@ -463,7 +463,7 @@ subroutine composition_init()
call cnst_get_ind('O' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' dry air component not found: ', dry_air_species(i)
- call endrun(subname // ':: dry air component not found')
+ call endrun(subname // ':: dry air component not found')
else
mw = cnst_mw(ix)
thermodynamic_active_species_idx(icnst) = ix
@@ -482,7 +482,7 @@ subroutine composition_init()
call cnst_get_ind('O2' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' dry air component not found: ', dry_air_species(i)
- call endrun(subname // ':: dry air component not found')
+ call endrun(subname // ':: dry air component not found')
else
mw = cnst_mw(ix)
thermodynamic_active_species_idx(icnst) = ix
@@ -501,7 +501,7 @@ subroutine composition_init()
call cnst_get_ind('H' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' dry air component not found: ', dry_air_species(i)
- call endrun(subname // ':: dry air component not found')
+ call endrun(subname // ':: dry air component not found')
else
mw = cnst_mw(ix)
thermodynamic_active_species_idx(icnst) = ix
@@ -515,12 +515,12 @@ subroutine composition_init()
end if
!
! If support for more major species is to be included add code here
- !
+ !
case default
write(iulog, *) subname//' dry air component not found: ', dry_air_species(i)
- call endrun(subname // ':: dry air component not found')
+ call endrun(subname // ':: dry air component not found')
end select
-
+
if (masterproc) then
write(iulog, *) "Dry air composition ",TRIM(dry_air_species(i)),&
icnst-1,thermodynamic_active_species_idx(icnst-1),&
@@ -556,7 +556,7 @@ subroutine composition_init()
call cnst_get_ind('Q' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
mw = cnst_mw(ix)
thermodynamic_active_species_idx(icnst) = ix
@@ -572,7 +572,7 @@ subroutine composition_init()
call cnst_get_ind('CLDLIQ' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
thermodynamic_active_species_idx(icnst) = ix
thermodynamic_active_species_cp (icnst) = cpliq
@@ -586,7 +586,7 @@ subroutine composition_init()
call cnst_get_ind('CLDICE' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
thermodynamic_active_species_idx(icnst) = ix
thermodynamic_active_species_cp (icnst) = cpice
@@ -600,7 +600,7 @@ subroutine composition_init()
call cnst_get_ind('RAINQM' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
thermodynamic_active_species_idx(icnst) = ix
thermodynamic_active_species_cp (icnst) = cpliq
@@ -614,7 +614,7 @@ subroutine composition_init()
call cnst_get_ind('SNOWQM' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
thermodynamic_active_species_idx(icnst) = ix
thermodynamic_active_species_cp (icnst) = cpice
@@ -628,7 +628,7 @@ subroutine composition_init()
call cnst_get_ind('GRAUQM' ,ix, abort=.false.)
if (ix<1) then
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
else
mw = cnst_mw(ix)
thermodynamic_active_species_idx(icnst) = ix
@@ -638,10 +638,10 @@ subroutine composition_init()
end if
!
! If support for more major species is to be included add code here
- !
+ !
case default
write(iulog, *) subname//' moist air component not found: ', water_species_in_air(i)
- call endrun(subname // ':: moist air component not found')
+ call endrun(subname // ':: moist air component not found')
end select
!
!
@@ -653,23 +653,23 @@ subroutine composition_init()
thermodynamic_active_species_cv(icnst-1)
end if
end do
-
- end subroutine composition_init
+
+ end subroutine composition_init
!
!****************************************************************************************************************
!
! update species dependent constants for physics
!
!****************************************************************************************************************
- !
+ !
subroutine physconst_update(mmr, t, lchnk, ncol, to_moist_factor)
-
+
!-----------------------------------------------------------------------
! Update the physics "constants" that vary
!-----------------------------------------------------------------------
-
+
!------------------------------Arguments--------------------------------------------------------------
-
+
real(r8), intent(in) :: mmr(pcols,pver,pcnst) ! constituents q array from state structure
real(r8), intent(in) :: t(pcols,pver) ! temperature t array from state structure
integer, intent(in) :: lchnk ! Chunk number
@@ -679,13 +679,13 @@ subroutine physconst_update(mmr, t, lchnk, ncol, to_moist_factor)
!---------------------------Local storage-------------------------------------------------------------
real(r8):: to_moist_fact(ncol,pver)
real(r8):: sponge_factor(pver)
-
+
to_moist_fact(:,:) = 1._r8
-
+
if (present(to_moist_factor)) then
to_moist_fact(:ncol,:) = to_moist_factor(:ncol,:)
end if
-
+
!--------------------------------------------
! update cpairv, rairv, mbarv, and cappav
!--------------------------------------------
@@ -707,15 +707,15 @@ end subroutine physconst_update
! update species dependent kappa for FV dycore
!
!****************************************************************************************************************
- !
+ !
subroutine physconst_calc_kappav( i0,i1,j0,j1,k0,k1,ntotq, tracer, kappav, cpv )
! assumes moist MMRs
-
+
! args
integer, intent(in) :: i0,i1,j0,j1,k0,k1, ntotq
real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,ntotq) ! Tracer array
real(r8), intent(out) :: kappav(i0:i1,j0:j1,k0:k1)
- real(r8), optional, intent(out) :: cpv(i0:i1,j0:j1,k0:k1)
+ real(r8), optional, intent(out) :: cpv(i0:i1,j0:j1,k0:k1)
! local vars
real(r8), dimension(i0:i1,j0:j1,k0:k1) :: rgas_var, cp_var
@@ -754,7 +754,7 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx
real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0:k1,1:ntrac) ! tracers; quantity specified by mixing_ratio arg
integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio
! 2 => tracer is mass (q*dp)
- integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
+ integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
real(r8), intent(out) :: dp(i0:i1,j0:j1,k0:k1) ! pressure level thickness
real(r8), optional,intent(out) :: ps(i0:i1,j0:j1) ! surface pressure (if ps present then ptop
@@ -762,11 +762,11 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx
real(r8), optional,intent(in) :: ptop ! pressure at model top
integer :: i,j,k,m_cnst,nq
-
+
dp = dp_dry
if (mixing_ratio==1) then
- do nq=dry_air_species_num+1,thermodynamic_active_species_num
- m_cnst = active_species_idx(nq)
+ do nq=dry_air_species_num+1,thermodynamic_active_species_num
+ m_cnst = active_species_idx(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -774,10 +774,10 @@ subroutine get_dp(i0,i1,j0,j1,k0,k1,ntrac,tracer,mixing_ratio,active_species_idx
end do
end do
end do
- end do
+ end do
else
- do nq=dry_air_species_num+1,thermodynamic_active_species_num
- m_cnst = active_species_idx(nq)
+ do nq=dry_air_species_num+1,thermodynamic_active_species_num
+ m_cnst = active_species_idx(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -817,7 +817,7 @@ subroutine get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active
integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio
! 2 => tracer is mass (q*dp)
integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! model top pressure
real(r8), intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure
real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,nlev+1) ! half-level pressure
@@ -834,7 +834,7 @@ subroutine get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active
end do
call get_pmid_from_dp(i0,i1,j0,j1,1,nlev,dp_local,ptop,pmid,pint_local)
-
+
if (present(pint)) pint=pint_local
if (present(dp)) dp=dp_local
end subroutine get_pmid_from_dpdry
@@ -846,14 +846,14 @@ end subroutine get_pmid_from_dpdry
!*************************************************************************************************************************
!
subroutine get_pmid_from_dp(i0,i1,j0,j1,k0,k1,dp,ptop,pmid,pint)
- use dycore, only: dycore_is
+ use dycore, only: dycore_is
integer, intent(in) :: i0,i1,j0,j1,k0,k1 ! array bounds
- real(r8), intent(in) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
+ real(r8), intent(in) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(out) :: pmid(i0:i1,j0:j1,k0:k1) ! mid (full) level pressure
real(r8), optional, intent(out) :: pint(i0:i1,j0:j1,k0:k1+1) ! pressure at interfaces (half levels)
- real(r8) :: pint_local(i0:i1,j0:j1,k0:k1+1)
+ real(r8) :: pint_local(i0:i1,j0:j1,k0:k1+1)
integer :: k
pint_local(:,:,k0) = ptop
@@ -868,16 +868,16 @@ subroutine get_pmid_from_dp(i0,i1,j0,j1,k0,k1,dp,ptop,pmid,pint)
else
do k=k0,k1
pmid(:,:,k) = 0.5_r8*(pint_local(:,:,k)+pint_local(:,:,k+1))
- end do
+ end do
end if
- if (present(pint)) pint=pint_local
- end subroutine get_pmid_from_dp
+ if (present(pint)) pint=pint_local
+ end subroutine get_pmid_from_dp
+ !
+ !****************************************************************************************************************
!
- !****************************************************************************************************************
- !
! Compute Exner pressure
!
- !****************************************************************************************************************
+ !****************************************************************************************************************
!
subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,&
dp_dry,ptop,p00,inv_exner,exner,poverp0)
@@ -885,12 +885,12 @@ subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_i
real(r8), intent(in) :: tracer(i0:i1,j0:j1,nlev,1:ntrac) ! tracers; quantity specified by mixing_ratio arg
integer, intent(in) :: mixing_ratio ! 1 => tracer is mixing ratio
! 2 => tracer is mass (q*dp)
- integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa)
logical , intent(in) :: inv_exner ! logical for outputting inverse Exner or Exner pressure
- real(r8), intent(out) :: exner(i0:i1,j0:j1,nlev)
+ real(r8), intent(out) :: exner(i0:i1,j0:j1,nlev)
real(r8), optional, intent(out) :: poverp0(i0:i1,j0:j1,nlev)! for efficiency when a routine needs this variable
real(r8) :: pmid(i0:i1,j0:j1,nlev),kappa_dry(i0:i1,j0:j1,nlev)
@@ -907,19 +907,19 @@ subroutine get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_i
call get_kappa_dry(i0,i1,j0,j1,1,nlev,nlev,ntrac,tracer,active_species_idx,kappa_dry,1.0_r8/dp_dry)
end if
if (inv_exner) then
- exner(:,:,:) = (p00/pmid(:,:,:))**kappa_dry(:,:,:)
+ exner(:,:,:) = (p00/pmid(:,:,:))**kappa_dry(:,:,:)
else
exner(:,:,:) = (pmid(:,:,:)/p00)**kappa_dry(:,:,:)
end if
if (present(poverp0)) poverp0=pmid(:,:,:)/p00
end subroutine get_exner
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
! Compute virtual potential temperature from dp_dry, m, T and ptop.
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
subroutine get_virtual_theta(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
dp_dry,ptop,p00,temp,theta_v)
integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac
@@ -927,7 +927,7 @@ subroutine get_virtual_theta(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_s
integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio
! 2 => tracer is mass (q*dp)
integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa)
real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature
@@ -937,17 +937,17 @@ subroutine get_virtual_theta(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_s
call get_exner(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
dp_dry,ptop,p00,.true.,iexner)
-
+
theta_v(:,:,:) = temp(:,:,:)*iexner(:,:,:)
end subroutine get_virtual_theta
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
! Compute geopotential from dry pressure level thichkness, water tracers, model top pressure and temperature
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
subroutine get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
dp_dry,ptop,temp,phis,gz,pmid,dp,T_v)
integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac ! array bounds
@@ -955,7 +955,7 @@ subroutine get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,
integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio
! 2 => tracer is mass (q*dp)
integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature
real(r8), intent(in) :: phis(i0:i1,j0:j1) ! surface geopotential
@@ -963,14 +963,14 @@ subroutine get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,
real(r8), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) ! mid-level pressure
real(r8), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) ! pressure level thickness
real(r8), optional, intent(out) :: t_v(i0:i1,j0:j1,nlev) ! virtual temperature
-
+
real(r8), dimension(i0:i1,j0:j1,nlev) :: pmid_local, t_v_local, dp_local, R_dry
real(r8), dimension(i0:i1,j0:j1,nlev+1) :: pint
call get_pmid_from_dpdry(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
- dp_dry,ptop,pmid_local,pint=pint,dp=dp_local)
- if (mixing_ratio==1) then
+ dp_dry,ptop,pmid_local,pint=pint,dp=dp_local)
+ if (mixing_ratio==1) then
call get_virtual_temp(i0,i1,j0,j1,1,nlev,ntrac,tracer,t_v_local,temp=temp,&
active_species_idx_dycore=active_species_idx)
call get_R_dry(i0,i1,j0,j1,1,nlev,1,nlev,ntrac,tracer,active_species_idx,R_dry)
@@ -987,16 +987,16 @@ subroutine get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,
if (present(dp)) dp=dp_local
end subroutine get_gz
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
! Compute geopotential from pressure level thickness and virtual temperature
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pmid)
- use dycore, only: dycore_is
+ use dycore, only: dycore_is
integer, intent(in) :: i0,i1,j0,j1,nlev ! array bounds
- real(r8), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness
+ real(r8), intent(in) :: dp (i0:i1,j0:j1,nlev) ! pressure level thickness
real(r8), intent(in) :: T_v (i0:i1,j0:j1,nlev) ! virtual temperature
real(r8), intent(in) :: R_dry(i0:i1,j0:j1,nlev) ! R dry
real(r8), intent(in) :: phis (i0:i1,j0:j1) ! surface geopotential
@@ -1016,7 +1016,7 @@ subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pm
! integrate hydrostatic eqn
!
gzh = phis
- if (dycore_is ('LR').or.dycore_is ('SE')) then
+ if (dycore_is ('LR').or.dycore_is ('SE')) then
do k=nlev,1,-1
Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k)
gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*(1.0_r8-pint(:,:,k)/pmid_local(:,:,k))
@@ -1027,17 +1027,17 @@ subroutine get_gz_given_dp_Tv_Rdry(i0,i1,j0,j1,nlev,dp,T_v,R_dry,phis,ptop,gz,pm
Rdry_tv(:,:) = R_dry(:,:,k)*T_v(:,:,k)
gz(:,:,k) = gzh(:,:)+Rdry_tv(:,:)*0.5_r8*dp(:,:,k)/pmid_local(:,:,k)
gzh(:,:) = gzh(:,:) + Rdry_tv(:,:)*dp(:,:,k)/pmid_local(:,:,k)
- end do
+ end do
end if
if (present(pmid)) pmid=pmid_local
end subroutine get_gz_given_dp_Tv_Rdry
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
! Compute Richardson number at cell interfaces (half levels)
!
- !****************************************************************************************************************
- !
+ !****************************************************************************************************************
+ !
subroutine get_Richardson_number(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx,&
dp_dry,ptop,p00,temp,v,Richardson_number,pmid,dp)
integer, intent(in) :: i0,i1,j0,j1,nlev,ntrac
@@ -1045,30 +1045,30 @@ subroutine get_Richardson_number(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,acti
integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio
! 2 => tracer is mass (q*dp)
integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(in) :: p00 ! reference pressure for Exner pressure (usually 1000hPa)
real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature
real(r8), intent(in) :: v(i0:i1,j0:j1,2,nlev) ! velocity components
real(r8), intent(out) :: Richardson_number(i0:i1,j0:j1,nlev+1)!
real(r8), optional, intent(out) :: pmid(i0:i1,j0:j1,nlev) !
- real(r8), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) !
+ real(r8), optional, intent(out) :: dp(i0:i1,j0:j1,nlev) !
real(r8), dimension(i0:i1,j0:j1,nlev):: gz,theta_v
real(r8), dimension(i0:i1,j0:j1) :: pt1, pt2, phis
integer :: k,km1
- real(r8), parameter:: ustar2 = 1.E-4_r8
+ real(r8), parameter:: ustar2 = 1.E-4_r8
phis = 0.0_r8
if (present(pmid).and.present(dp)) then
call get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
- dp_dry,ptop,temp,phis,gz,pmid=pmid,dp=dp)
+ dp_dry,ptop,temp,phis,gz,pmid=pmid,dp=dp)
else if (present(pmid)) then
call get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
- dp_dry,ptop,temp,phis,gz,pmid=pmid)
+ dp_dry,ptop,temp,phis,gz,pmid=pmid)
else if (present(dp)) then
call get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
- dp_dry,ptop,temp,phis,gz,dp=dp)
+ dp_dry,ptop,temp,phis,gz,dp=dp)
else
call get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
dp_dry,ptop,temp,phis,gz)
@@ -1080,7 +1080,7 @@ subroutine get_Richardson_number(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,acti
do k=nlev-1,2,-1
km1=k-1
pt1(:,:) = theta_v(:,:,km1)
- pt2(:,:) = theta_v(:,:,k)
+ pt2(:,:) = theta_v(:,:,k)
Richardson_number(:,:,k) = (gz(:,:,km1)-gz(:,:,k))*(pt1-pt2)/( 0.5_r8*(pt1+pt2)* &
((v(:,:,1,km1)-v(:,:,1,k))**2+(v(:,:,2,km1)-v(:,:,2,k))**2+ustar2) )
end do
@@ -1093,7 +1093,7 @@ subroutine get_hydrostatic_static_energy(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ra
integer, intent(in) :: mixing_ratio ! 1 => tracer is dry mixing ratio
! 2 => tracer is mass (q*dp)
integer, intent(in) :: active_species_idx(:) ! index for thermodynamic species in tracer array
- real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
+ real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,nlev) ! dry pressure level thickness
real(r8), intent(in) :: ptop ! pressure at model top
real(r8), intent(in) :: phis(i0:i1,j0:j1) ! surface geopotential
real(r8), intent(in) :: temp(i0:i1,j0:j1,nlev) ! temperature
@@ -1106,22 +1106,22 @@ subroutine get_hydrostatic_static_energy(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ra
call get_gz(i0,i1,j0,j1,nlev,ntrac,tracer,mixing_ratio,active_species_idx, &
dp_dry,ptop,temp,phis,gz,T_v=T_v)
if (mixing_ratio==1) then
- call get_cp_dry(i0,i1,j0,j1,1,nlev,1,nlev,ntrac,tracer,active_species_idx,cp_dry)
+ call get_cp_dry(i0,i1,j0,j1,1,nlev,1,nlev,ntrac,tracer,active_species_idx,cp_dry)
else
call get_cp_dry(i0,i1,j0,j1,1,nlev,1,nlev,ntrac,tracer,active_species_idx,cp_dry, &
- fact=1.0_r8/dp_dry)
+ fact=1.0_r8/dp_dry)
end if
-
+
thermalE(:,:,:) = cp_dry(:,:,:)*T_v(:,:,:)
KE(:,:,:) = 0.5_r8*(v(:,:,2,:)**2+v(:,:,1,:)**2)
end subroutine get_hydrostatic_static_energy
!
- !****************************************************************************************************************
+ !****************************************************************************************************************
!
! get pressure from dry pressure and thermodynamic active species (e.g., forms of water: water vapor, cldliq, etc.)
!
!****************************************************************************************************************
- !
+ !
subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,ps,ptop)
integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac
real(r8), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,1:ntrac) ! Tracer array
@@ -1132,10 +1132,10 @@ subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,
integer :: i,j,k,m_cnst,nq
real(r8) :: dp(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
-
+
dp = dp_dry
- do nq=dry_air_species_num+1,thermodynamic_active_species_num
- m_cnst = active_species_idx(nq)
+ do nq=dry_air_species_num+1,thermodynamic_active_species_num
+ m_cnst = active_species_idx(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -1155,20 +1155,20 @@ subroutine get_ps(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,active_species_idx,dp_dry,
end subroutine get_ps
!
!****************************************************************************************************************
- !
+ !
! Compute dry air heaet capacity under constant pressure
!
!****************************************************************************************************************
- !
+ !
subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx,cp_dry,fact)
integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac
real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac) ! Tracer array
integer, intent(in) :: active_species_idx(:)
real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) ! dry pressure level thickness
- real(r8), intent(out) :: cp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
-
+ real(r8), intent(out) :: cp_dry(i0:i1,j0:j1,k0:k1) ! dry pressure level thickness
+
integer :: i,j,k,m_cnst,nq
- real(r8) :: factor(i0:i1,j0:j1,k0_trac:k1_trac) ! dry pressure level thickness
+ real(r8) :: factor(i0:i1,j0:j1,k0_trac:k1_trac) ! dry pressure level thickness
real(r8) :: residual(i0:i1,j0:j1,k0:k1), mm
!
! dry air not species dependent
@@ -1180,12 +1180,12 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec
factor = fact(:,:,:)
else
factor = 1.0_r8
- endif
+ endif
cp_dry = 0.0_r8
residual = 1.0_r8
do nq=1,dry_air_species_num-1
- m_cnst = active_species_idx(nq)
+ m_cnst = active_species_idx(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -1208,18 +1208,18 @@ subroutine get_cp_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_spec
end subroutine get_cp_dry
!
!****************************************************************************************************************
- !
+ !
! Compute generalized dry air gas constant R
!
!****************************************************************************************************************
- !
+ !
subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_idx_dycore,R_dry,fact)
integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac,k0_trac,k1_trac !array boundas
real(r8), intent(in) :: tracer(i0:i1,j0:j1,k0_trac:k1_trac,1:ntrac)!tracer array
integer, intent(in) :: active_species_idx_dycore(:) !index of active species in tracer
real(r8), intent(out) :: R_dry(i0:i1,j0:j1,k0:k1) !dry air R
real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio
-
+
integer :: i,j,k,m_cnst,nq
real(r8):: factor(i0:i1,j0:j1,k0_trac:k1_trac), residual(i0:i1,j0:j1,k0:k1), mm
if (dry_air_species_num==0) then
@@ -1232,12 +1232,12 @@ subroutine get_R_dry(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_speci
factor = fact(:,:,:)
else
factor = 1.0_r8
- endif
+ endif
R_dry = 0.0_r8
residual = 1.0_r8
do nq=1,dry_air_species_num-1
- m_cnst = active_species_idx_dycore(nq)
+ m_cnst = active_species_idx_dycore(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -1274,7 +1274,7 @@ subroutine get_R(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_i
integer, intent(in) :: active_species_idx(:) !index of active species in tracer
real(r8), intent(out) :: R(i0:i1,j0:j1,k0:k1) !generalized gas constant
real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,k0_trac:k1_trac) !factor for converting tracer to dry mixing ratio
-
+
integer :: nq,itrac
real(r8):: factor(i0:i1,j0:j1,k0_trac:k1_trac)
real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species
@@ -1289,11 +1289,11 @@ subroutine get_R(i0,i1,j0,j1,k0,k1,k0_trac,k1_trac,ntrac,tracer,active_species_i
idx_local = active_species_idx
sum_species = 1.0_r8 !all dry air species sum to 1
do nq=dry_air_species_num+1,thermodynamic_active_species_num
- itrac = idx_local(nq)
+ itrac = idx_local(nq)
sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:)
end do
do nq=dry_air_species_num+1,thermodynamic_active_species_num
- itrac = idx_local(nq)
+ itrac = idx_local(nq)
R(:,:,:) = R(:,:,:)+thermodynamic_active_species_R(nq)*tracer(:,:,:,itrac)*factor(:,:,:)
end do
R=R/sum_species
@@ -1311,7 +1311,7 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar
integer, intent(in) :: active_species_idx(:) !index of active species in tracer
real(r8), intent(out) :: mbarv(i0:i1,j0:j1,k0:k1) !molecular weight of dry air
real(r8), optional, intent(in) :: fact(i0:i1,j0:j1,nlev) !factor for converting tracer to dry mixing ratio
-
+
integer :: i,j,k,m_cnst,nq
real(r8):: factor(i0:i1,j0:j1,k0:k1), residual(i0:i1,j0:j1,k0:k1), mm
!
@@ -1321,15 +1321,15 @@ subroutine get_mbarv(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,mbar
mbarv = mwdry
else
if (present(fact)) then
- factor = fact(:,:,:)
+ factor(i0:i1,j0:j1,k0:k1) = fact(i0:i1,j0:j1,k0:k1)
else
- factor = 1.0_r8
- endif
+ factor(i0:i1,j0:j1,k0:k1) = 1.0_r8
+ endif
mbarv = 0.0_r8
residual = 1.0_r8
do nq=1,dry_air_species_num-1
- m_cnst = active_species_idx(nq)
+ m_cnst = active_species_idx(nq)
do k=k0,k1
do j=j0,j1
do i = i0,i1
@@ -1374,13 +1374,13 @@ subroutine get_kappa_dry(i0,i1,j0,j1,k0,k1,nlev,ntrac,tracer,active_species_idx,
allocate(R_dry(i0:i1,j0:j1,k0:k1))
allocate(cp_dry(i0:i1,j0:j1,k0:k1))
if (present(fact)) then
- call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry,fact=fact)
- call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry,fact=fact)
+ call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry,fact=fact)
+ call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry,fact=fact)
else
- call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry)
- call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry)
+ call get_cp_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,cp_dry)
+ call get_R_dry(i0,i1,j0,j1,k0,k1,1,nlev,ntrac,tracer,active_species_idx,R_dry)
end if
- kappa_dry = R_dry/cp_dry
+ kappa_dry = R_dry/cp_dry
deallocate(R_dry,cp_dry)
end if
end subroutine get_kappa_dry
@@ -1404,7 +1404,7 @@ subroutine get_sum_species(i0,i1,j0,j1,k0,k1,ntrac,tracer,active_species_idx,sum
real(r8) :: factor(i0:i1,j0:j1,k0:k1)
integer :: nq,itrac
-
+
if (present(dp_dry)) then
factor = 1.0_r8/dp_dry(:,:,:)
else
@@ -1423,11 +1423,11 @@ end subroutine get_sum_species
!
! Note:tracer is in units of m*dp_dry ("mass")
!
- !****************************************************************************************************************
+ !****************************************************************************************************************
!
subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,thermal_energy, &
active_species_idx_dycore)
- integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac
+ integer, intent(in) :: i0,i1,j0,j1,k0,k1,ntrac
real(r8), intent(in) :: tracer_mass(i0:i1,j0:j1,k0:k1,ntrac)!tracer array (mass weighted)
real(r8), intent(in) :: temp(i0:i1,j0:j1,k0:k1) !temperature
real(r8), intent(in) :: dp_dry(i0:i1,j0:j1,k0:k1) !dry presure level thickness
@@ -1437,7 +1437,7 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th
! (if different from physics index)
!
integer, optional, dimension(:) :: active_species_idx_dycore
-
+
! local vars
integer :: nq, itrac
integer, dimension(thermodynamic_active_species_num) :: idx_local
@@ -1448,7 +1448,7 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th
idx_local = active_species_idx_dycore
else
idx_local = thermodynamic_active_species_idx
- end if
+ end if
!
! "mass-weighted" cp (dp must be dry)
!
@@ -1460,7 +1460,7 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th
end if
!
! tracer is in units of m*dp ("mass"), where m is dry mixing ratio and dry pressure level thickness
- !
+ !
do nq=dry_air_species_num+1,thermodynamic_active_species_num
itrac = idx_local(nq)
thermal_energy(:,:,:) = thermal_energy(:,:,:)+thermodynamic_active_species_cp(nq)*tracer_mass(:,:,:,itrac)
@@ -1469,7 +1469,7 @@ subroutine get_thermal_energy(i0,i1,j0,j1,k0,k1,ntrac,tracer_mass,temp,dp_dry,th
end subroutine get_thermal_energy
!
!****************************************************************************************************************
- !
+ !
! Compute virtual temperature T_v
!
! tracer is in units of dry mixing ratio unless optional argument dp_dry is present in which case tracer is
@@ -1494,7 +1494,7 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q
! (if different from physics index)
!
integer, optional, intent(in) :: active_species_idx_dycore(:)
-
+
! local vars
integer :: itrac,nq
real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, factor, Rd
@@ -1509,15 +1509,15 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q
if (present(dp_dry)) then
factor = 1.0_r8/dp_dry
else
- factor = 1.0_r8
+ factor = 1.0_r8
end if
sum_species = 1.0_r8 !all dry air species sum to 1
do nq=dry_air_species_num+1,thermodynamic_active_species_num
- itrac = idx_local(nq)
+ itrac = idx_local(nq)
sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:)
end do
-
+
call get_R_dry (i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer,idx_local,Rd,fact=factor)
t_v(:,:,:) = Rd(:,:,:)
do nq=dry_air_species_num+1,thermodynamic_active_species_num
@@ -1527,7 +1527,7 @@ subroutine get_virtual_temp(i0,i1,j0,j1,k0,k1,ntrac,tracer,T_v,temp,dp_dry,sum_q
if (present(temp)) then
t_v(:,:,:) = t_v(:,:,:)*temp(:,:,:)/(Rd(:,:,:)*sum_species)
else
- t_v(:,:,:) = t_v(:,:,:)/(Rd(:,:,:)*sum_species)
+ t_v(:,:,:) = t_v(:,:,:)/(Rd(:,:,:)*sum_species)
end if
if (present(sum_q)) sum_q=sum_species
end subroutine get_virtual_temp
@@ -1551,7 +1551,7 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species
! (if different from physics index)
!
integer, optional, intent(in) :: active_species_idx_dycore(:)
-
+
! local vars
integer :: nq,i,j,k, itrac
real(r8), dimension(i0:i1,j0:j1,k0:k1) :: sum_species, sum_cp, factor
@@ -1566,12 +1566,12 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species
if (present(dp_dry)) then
factor = 1.0_r8/dp_dry
else
- factor = 1.0_r8
+ factor = 1.0_r8
end if
sum_species = 1.0_r8 !all dry air species sum to 1
do nq=dry_air_species_num+1,thermodynamic_active_species_num
- itrac = idx_local(nq)
+ itrac = idx_local(nq)
sum_species(:,:,:) = sum_species(:,:,:) + tracer(:,:,:,itrac)*factor(:,:,:)
end do
@@ -1581,7 +1581,7 @@ subroutine get_cp(i0,i1,j0,j1,k0,k1,ntrac,tracer,inv_cp,cp,dp_dry,active_species
call get_cp_dry(i0,i1,j0,j1,k0,k1,k0,k1,ntrac,tracer,idx_local,sum_cp,fact=factor)
end if
do nq=dry_air_species_num+1,thermodynamic_active_species_num
- itrac = idx_local(nq)
+ itrac = idx_local(nq)
sum_cp(:,:,:) = sum_cp(:,:,:)+thermodynamic_active_species_cp(nq)*tracer(:,:,:,itrac)*factor(:,:,:)
end do
if (inv_cp) then
@@ -1638,7 +1638,7 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_
integer, optional, intent(in) :: active_species_idx_dycore(:)
real(r8),optional,intent(out) :: pint_out(i0:i1,j0:j1,1:k1+1)
real(r8),optional,intent(out) :: pmid_out(i0:i1,j0:j1,1:k1)
-
+
! local vars
integer :: i,j,k
real(r8), dimension(i0:i1,j0:j1,1:k1) :: pmid
@@ -1659,7 +1659,7 @@ subroutine get_rho_dry(i0,i1,j0,j1,k1,nlev,ntrac,tracer,temp,ptop,dp_dry,tracer_
if (present(pint_out)) pmid_out=pmid
if (present(rhoi_dry)) then
allocate(R_dry(i0:i1,j0:j1,1:k1+1))
- if (tracer_mass) then
+ if (tracer_mass) then
call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry,fact=1.0_r8/dp_dry)
else
call get_R_dry(i0,i1,j0,j1,1,k1+1,1,nlev,ntrac,tracer,idx_local,R_dry)
@@ -1696,7 +1696,7 @@ end subroutine get_rho_dry
!*************************************************************************************************************************
!
! compute 3D molecular diffusion and thermal conductivity
- !
+ !
!*************************************************************************************************************************
!
subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sponge_factor,kmvis,kmcnd, ntrac,&
@@ -1721,7 +1721,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
integer :: i,j,k,icnst,ispecies
real(r8):: mbarvi,mm,residual ! Mean mass at mid level
real(r8):: cnst_vis, cnst_cnd, temp_local
- real(r8), dimension(i0:i1,j0:j1,1:k1) :: factor,mbarv
+ real(r8), dimension(i0:i1,j0:j1,1:k1) :: factor,mbarv
integer, dimension(thermodynamic_active_species_num):: idx_local
!--------------------------------------------
@@ -1737,7 +1737,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
do j=j0,j1
do i=i0,i1
temp_local = 0.5_r8*(temp(i,j,k)+temp(i,j,k-1))
- kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp_local**kv4
+ kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp_local**kv4
kmcnd(i,j,k) = sponge_factor(k)*cnst_cnd*temp_local**kc4
end do
end do
@@ -1747,11 +1747,11 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
!
kmvis(i0:i1,j0:j1,1) = 1.5_r8*kmvis(i0:i1,j0:j1,2)-0.5_r8*kmvis(i0:i1,j0:j1,3)
kmcnd(i0:i1,j0:j1,1) = 1.5_r8*kmcnd(i0:i1,j0:j1,2)-0.5_r8*kmcnd(i0:i1,j0:j1,3)
- else if (get_at_interfaces==0) then
+ else if (get_at_interfaces==0) then
do k=1,k1
do j=j0,j1
do i=i0,i1
- kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp(i,j,k)**kv4
+ kmvis(i,j,k) = sponge_factor(k)*cnst_vis*temp(i,j,k)**kv4
kmcnd(i,j,k) = sponge_factor(k)*cnst_cnd*temp(i,j,k)**kc4
end do
end do
@@ -1781,11 +1781,11 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
if (get_at_interfaces==1) then
do k=2,k1
do j=j0,j1
- do i=i0,i1
+ do i=i0,i1
kmvis(i,j,k) = 0.0_r8
kmcnd(i,j,k) = 0.0_r8
residual = 1.0_r8
- do icnst=1,dry_air_species_num-1
+ do icnst=1,dry_air_species_num-1
ispecies = idx_local(icnst)
mm = 0.5_r8*(tracer(i,j,k,ispecies)*factor(i,j,k)+tracer(i,j,k-1,ispecies)*factor(i,j,k-1))
kmvis(i,j,k) = kmvis(i,j,k)+thermodynamic_active_species_kv(icnst)* &
@@ -1800,10 +1800,10 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
thermodynamic_active_species_mwi(icnst)*residual
kmcnd(i,j,k) = kmcnd(i,j,k)+thermodynamic_active_species_kc(icnst)* &
thermodynamic_active_species_mwi(icnst)*residual
-
+
temp_local = .5_r8*(temp(i,j,k-1)+temp(i,j,k))
mbarvi = 0.5_r8*(mbarv(i,j,k-1)+mbarv(i,j,k))
- kmvis(i,j,k) = kmvis(i,j,k)*mbarvi*temp_local**kv4*1.e-7_r8
+ kmvis(i,j,k) = kmvis(i,j,k)*mbarvi*temp_local**kv4*1.e-7_r8
kmcnd(i,j,k) = kmcnd(i,j,k)*mbarvi*temp_local**kc4*1.e-5_r8
enddo
enddo
@@ -1816,7 +1816,7 @@ subroutine get_molecular_diff_coef(i0,i1,j0,j1,k1,nlev,temp,get_at_interfaces,sp
kmcnd(i,j,k1+1) = kmcnd(i,j,k1)
end do
end do
- else if (get_at_interfaces==0) then
+ else if (get_at_interfaces==0) then
else
call endrun('get_molecular_diff_coef: get_at_interfaces must be 0 or 1')
end if
@@ -1826,7 +1826,7 @@ end subroutine get_molecular_diff_coef
!*************************************************************************************************************************
!
! compute reference vertical profile of density, molecular diffusion and thermal conductivity
- !
+ !
!*************************************************************************************************************************
!
subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvis_ref,kmcnd_ref,rho_ref)
@@ -1838,10 +1838,10 @@ subroutine get_molecular_diff_coef_reference(k0,k1,tref,press,sponge_factor,kmvi
real(r8), intent(out) :: kmvis_ref(k0:k1) !reference molecular diffusion coefficient
real(r8), intent(out) :: kmcnd_ref(k0:k1) !reference thermal conductivity coefficient
real(r8), intent(out) :: rho_ref(k0:k1) !reference density
-
+
! local vars
integer :: k
-
+
!--------------------------------------------
! Set constants needed for updates
!--------------------------------------------