Browse Source

Initial commit.

This commit has been prepared by the following steps:

git clone --depth 1 --branch 5.1.0 https://github.com/mom-ocean/MOM5.git .
rm -rf .git
rm -r LICENSE  README  README.md  annex.py .gitignore data test doc
git init
git add *

It is thus branching from the aforementioned repository at commit from

Autor:			Nicholas Hannah <nicholash@users.noreply.github.com>
Datum:			vor 7 Jahren (24.03.2014 04:17:14)
Commit Hash:	f406b4c5b4bbece3b0ae7f376a4ba90ea68ffb1e
Kind:			Commit Index

Merge pull request #33 from nicholash/mom-87-various-bugs

Mom 87 various bugs
Enthalten in folgenden Branches:
(no branch)
Enthalten in folgenden Tags:
5.1.0

Leitet vom Tag ab: 5.1.0
distribute-radiation-on-surface-types
Sven Karsten 2 years ago
commit
42bd2e512e
  1. BIN
      bin/.prepare_pubrel.csh.swp
  2. 46
      bin/combine_blobs.py
  3. BIN
      bin/combine_blobs.pyc
  4. 598
      bin/diag_table_chk
  5. 17
      bin/environs.gfdl_ws_32.intel
  6. 14
      bin/environs.gfdl_ws_64.gnu
  7. 16
      bin/environs.gfdl_ws_64.intel
  8. 7
      bin/environs.gfortran
  9. 16
      bin/environs.hpcs.intel
  10. 3
      bin/environs.ibm.xlf
  11. 8
      bin/environs.nci
  12. 21
      bin/environs.ncrc1.intel
  13. 15
      bin/environs.ncrc2.gnu
  14. 21
      bin/environs.ncrc2.intel
  15. 2
      bin/environs.workstation.gfort
  16. 17
      bin/environs.workstation.intel
  17. 73
      bin/list_files_with_tag
  18. 98
      bin/list_paths
  19. 472
      bin/mkmf
  20. 182
      bin/mkmf.debugtemplate.gfdl_ws_32.intel
  21. 106
      bin/mkmf.debugtemplate.ia64
  22. 106
      bin/mkmf.debugtemplate.ia64_hdf
  23. 96
      bin/mkmf.debugtemplate.sgi
  24. 476
      bin/mkmf.html
  25. 182
      bin/mkmf.template.gfdl_ws_32.intel
  26. 184
      bin/mkmf.template.gfdl_ws_64.gnu
  27. 184
      bin/mkmf.template.gfdl_ws_64.gnu.debug
  28. 181
      bin/mkmf.template.gfdl_ws_64.intel
  29. 187
      bin/mkmf.template.gfortran
  30. 167
      bin/mkmf.template.hpcs.intel
  31. 106
      bin/mkmf.template.ia64
  32. 106
      bin/mkmf.template.ia64_flt
  33. 106
      bin/mkmf.template.ia64_flt_hdf
  34. 13
      bin/mkmf.template.ibm
  35. 15
      bin/mkmf.template.ibm.xlf
  36. 9
      bin/mkmf.template.ifc
  37. 146
      bin/mkmf.template.nci
  38. 183
      bin/mkmf.template.ncrc1.intel
  39. 177
      bin/mkmf.template.ncrc2.gnu
  40. 183
      bin/mkmf.template.ncrc2.intel
  41. 87
      bin/mkmf.template.nec
  42. 110
      bin/mkmf.template.pgi
  43. 112
      bin/mkmf.template.pscale
  44. 112
      bin/mkmf.template.sgi
  45. 79
      bin/mkmf.template.t3e
  46. 86
      bin/mkmf.template.t90
  47. 18
      bin/mkmf.template.workstation.gfort
  48. 182
      bin/mkmf.template.workstation.intel
  49. BIN
      bin/mppnccombine.gfdl_ws_64.intel
  50. 119
      bin/prepare_pubrel.csh
  51. 122
      bin/time_stamp.csh
  52. 838
      exp/MOM_compile.csh
  53. 319
      exp/MOM_run.csh
  54. 423
      exp/mom5_ebm_compile.csh
  55. 33
      exp/preprocessing.csh
  56. BIN
      src/atmos_bgrid/documentation/decomposition.jpg
  57. BIN
      src/atmos_bgrid/documentation/flowchart.jpg
  58. BIN
      src/atmos_bgrid/documentation/global_temp_grid.jpg
  59. BIN
      src/atmos_bgrid/documentation/global_vel_grid.jpg
  60. BIN
      src/atmos_bgrid/documentation/north_pole.jpg
  61. BIN
      src/atmos_bgrid/documentation/south_pole.jpg
  62. BIN
      src/atmos_bgrid/documentation/text.xbm
  63. BIN
      src/atmos_bgrid/documentation/time_steps.jpg
  64. 514
      src/atmos_bgrid/driver/coupled/atmosphere.F90
  65. 778
      src/atmos_bgrid/driver/coupled/bgrid_physics.F90
  66. 363
      src/atmos_bgrid/driver/shallow/atmosphere.F90
  67. 424
      src/atmos_bgrid/driver/solo/atmosphere.F90
  68. 1662
      src/atmos_bgrid/model/bgrid_advection.F90
  69. 189
      src/atmos_bgrid/model/bgrid_conserve_energy.F90
  70. 655
      src/atmos_bgrid/model/bgrid_core.F90
  71. 475
      src/atmos_bgrid/model/bgrid_core_driver.F90
  72. 684
      src/atmos_bgrid/model/bgrid_horiz_adjust.F90
  73. 1028
      src/atmos_bgrid/model/bgrid_horiz_diff.F90
  74. 498
      src/atmos_bgrid/model/bgrid_sponge.F90
  75. 203
      src/atmos_bgrid/model/bgrid_vert_adjust.F90
  76. 716
      src/atmos_bgrid/tools/bgrid_change_grid.F90
  77. 272
      src/atmos_bgrid/tools/bgrid_cold_start.F90
  78. 833
      src/atmos_bgrid/tools/bgrid_diagnostics.F90
  79. 450
      src/atmos_bgrid/tools/bgrid_halo.F90
  80. 956
      src/atmos_bgrid/tools/bgrid_horiz.F90
  81. 920
      src/atmos_bgrid/tools/bgrid_integrals.F90
  82. 258
      src/atmos_bgrid/tools/bgrid_masks.F90
  83. 976
      src/atmos_bgrid/tools/bgrid_polar_filter.F90
  84. 802
      src/atmos_bgrid/tools/bgrid_prog_var.F90
  85. 786
      src/atmos_bgrid/tools/bgrid_vert.F90
  86. 1004
      src/atmos_coupled/atmos_model.F90
  87. 1470
      src/atmos_ebm/atmosphere.F90
  88. 275
      src/atmos_ebm/ebm_diagnostics.F90
  89. 593
      src/atmos_fv_dynamics/driver/coupled/atmosphere.F90
  90. 782
      src/atmos_fv_dynamics/driver/coupled/fv_physics.F90
  91. 282
      src/atmos_fv_dynamics/driver/solo/atmosphere.F90
  92. 119
      src/atmos_fv_dynamics/driver/solo/fv_phys.F90
  93. 294
      src/atmos_fv_dynamics/driver/solo/hswf.F90
  94. 1123
      src/atmos_fv_dynamics/model/dyn_core.F90
  95. 603
      src/atmos_fv_dynamics/model/ecmfft.F90
  96. 362
      src/atmos_fv_dynamics/model/fill_module.F90
  97. 628
      src/atmos_fv_dynamics/model/fv_arrays.F90
  98. 36
      src/atmos_fv_dynamics/model/fv_arrays.h
  99. 804
      src/atmos_fv_dynamics/model/fv_dynamics.F90
  100. 1523
      src/atmos_fv_dynamics/model/fv_pack.F90
  101. Some files were not shown because too many files have changed in this diff Show More

BIN
bin/.prepare_pubrel.csh.swp

Binary file not shown.

46
bin/combine_blobs.py

@ -0,0 +1,46 @@
import os
import numpy
#import netCDF4
import sys
sys.path.append('/net2/nnz/opt/python/netCDF4-0.9.9/build/lib.linux-i686-2.4/')
import netCDF4
class ncFile(object):
def __init__(self,path):
self.path = path
self.open()
for dimname, dim in self.dims.iteritems():
if dim.isunlimited():
self.len = len(dim)
def close(self):
self.root.close()
del self.root, self.dims, self.vars, self.gatts
def delete(self):
self.close()
os.remove(self.path)
def open(self):
self.root = netCDF4.Dataset(self.path,'r')
self.dims = self.root.dimensions
self.vars = self.root.variables
self.gatts = self.root.ncattrs()
class newFile(object):
def __init__(self,path,seed):
self.path = path
self.root = netCDF4.Dataset(self.path, 'w', format='NETCDF3_CLASSIC')
self.vars = self.root.variables
self.dims = self.root.dimensions
for att in seed.root.ncattrs():
self.root.setncattr(att,seed.root.getncattr(att))
def close(self):
self.root.close()
del self.root, self.vars, self.dims

BIN
bin/combine_blobs.pyc

Binary file not shown.

598
bin/diag_table_chk

@ -0,0 +1,598 @@
#! /usr/bin/perl
# $Author: Niki.Zadeh $
# $Revision: 1.1.2.1 $
# $Date: 2013/12/18 17:47:53 $
# Perl script to parse the diag_table. Count the number of files to
# be used, and the max number of fields per file used.
use strict;
use Switch;
use List::Util qw/max/;
use XML::LibXML;
use Pod::Usage;
use Getopt::Long;
my $help = 0;
my $verbose = 0;
my $xmlFile = '';
GetOptions ("help|h" => \$help,
"verbose|v" => \$verbose,
"xml|x=s" => \$xmlFile) or pod2usage(2);
pod2usage(1) if $help;
# Variable to hold the location of the diag_table file.
my $diag_table_file = '';
# diag_table_chk can be called one of two ways. Either, the
# diag_table file is given on the command line, or we will extract the
# information from an XML file and experiment.
if ( ! $xmlFile ) { # If no XML file specified.
if ( $#ARGV < 0 ) {
pod2usage( { -message => "$0: diag_table file must be given as an argument.",
-verbose => 0,
} );
} else {
$diag_table_file = $ARGV[0]
}
} else { # We are using an XML file.
# Set up the XML Parser.
if ( $#ARGV < 0 ) {
pod2usage( { -message => "$0: experiment must be given as an argument.",
-verbose => 0,
} );
} else {
# Make sure the $xmlFile exists and is readable
die "File $xmlFile does not exist.\n" unless ( -e $xmlFile );
die "File $xmlFile exists, but is unreadable.\n" unless ( -r $xmlFile );
die "$xmlFile is not a file.\n" unless ( -f $xmlFile );
our $parser = XML::LibXML -> new();
our $root = $parser -> parse_file($xmlFile) -> getDocumentElement;
our $inputExperiment = $ARGV[0];
die "$0: Experiment $inputExperiment does not exist in file $xmlFile.\n" unless ( experimentExistsInXML($inputExperiment) );
$diag_table_file = getDiagTableFromXML($inputExperiment);
}
}
# Check if the diag table file exists, is not a directory and is readable.
die "$0: File $diag_table_file does not exist.\n" unless ( -e $diag_table_file );
die "$0: File $diag_table_file exists, but is unreadable.\n" unless ( -r $diag_table_file );
die "$0: $diag_table_file is not a file.\n" unless ( -f $diag_table_file );
# Announce what file we are going to read.
print "Reading file $diag_table_file\n\n";
# Open the file handler for the filename.
open(DIAG_TABLE, "<", $diag_table_file);
# Arrays to hold files and fields.
my @files = ( {
file_name => '',
output_frequency => 0,
output_frequency_units => 0,
output_format => 0,
time_units => 0,
long_name => '',
new_file_frequency => 0,
new_file_frequency_units => 0,
start_time_string => '',
file_duration => 0,
file_duration_units => 0,
} );
my @fields = ( {
file_name => '',
module_name => '',
field_name => '',
output_name => '',
time_sampling => '',
time_method => '',
spatial_ops => '',
packing => 0,
} );
# Other variables to hold useful information.
my %fields_per_file;
my @warnings = ( { line_number => 0,
message => '',
} );
my $tableName;
my @globalDate;
# Parse the data from the diag table file, and put it in the
# appropiate array.
while ( <DIAG_TABLE> ) {
my $line = sanitizeString($_);
next if ( $line =~ /^#/ or $line =~ /^$/ );
my @line_data = split(/,/,$line,11);
my $num_warnings = 0;
if ( $#line_data == 0 ) { # No Commas in string
# Find the descriptor and base date. Neither should have a comma.
my @date = split(/\s+/, sanitizeString($line_data[0]));
if ( $#date >= 1 ) { # We have a date.
my $message = verifyDate(@date);
if ( $message ) {
push @warnings, ( {
line_number => $.,
message => "Invalid global date. $message",
} );
} else {
@globalDate = @date;
}
} else { # We have the the descriptor / table name or the date may be set by the script
if ( $line_data[0] =~ /^\$.*[dD]ate$/ ) {
@globalDate[0] = $line_data[0];
} else {
$tableName = sanitizeString($line_data[0]);
}
}
} elsif ( $#line_data > 1 ) {
if ( $tableName =~ /^$/ or $globalDate[0] =~ /^$/ ) {
push @warnings, ( {
line_number => $.,
message => 'The table descriptor and the base date must be set before any files or fields.',
} );
$tableName = 'NOT SET' if ( $tableName =~ /^$/ );
$globalDate[0] = 'NOT SET' if ( $globalDate[0] =~ /^$/ ) ;
}
if ( lc($line_data[5]) =~ /time/ ) { # This is a file.
# Check output_frequency :: Must be >= -1
if ( $line_data[1] < -1 ) {
$num_warnings++;
push @warnings, ( {
line_number => $.,
message => 'Invalid output frequency. Must be >= -1.',
} );
}
# check output_frequency units :: return from find_unit_ivalue() > 0
if ( find_unit_ivalue($line_data[2]) < 0 ) {
$num_warnings++;
$line_data[2] =~ s/"//g;
push @warnings, ( {
line_number => $.,
message => "Invalid output frequency unit. ($line_data[2]).",
} );
}
# check output_format :: Must be in the range 1 <= output_format <= 2
if ( $line_data[3] < 1 or $line_data[3] > 2 ) {
$num_warnings++;
push @ warnings, ( {
line_number => $.,
message => "Output_format out of range. Must be in the range [1,2].",
} );
}
# check time_units :: return from find_unit_ivalue() > 0
if ( find_unit_ivalue($line_data[4]) < 0 ) {
$num_warnings++;
$line_data[4] =~ s/"//g;
push @warnings, ( {
line_number => $.,
message => "Invalid time unit. ($line_data[4]).",
} );
}
# The following are optional. (There may be a slight problem if the line ends with a ','.)
if ( $#line_data > 6 ) {
# Check new_file_frequency :: Must be > 0
if ( $line_data[6] < 0 ) {
$num_warnings++;
push @warnings, ( {
line_number => $.,
message => "Invalid new file frequency. Must be > 0.",
} );
}
# Check new_file_frequency_units :: return from find_unit_ivalue() > 0
if ( find_unit_ivalue($line_data[7]) < 0 ) {
$num_warnings++;
$line_data[7] =~ s/"//g;
push @warnings, ( {
line_number => $.,
message => "Invalid new file frequency unit. ($line_data[7]).",
} );
}
# More optional inputs
if ( $#line_data >= 8 ) {
$num_warnings++;
# remove quotes, beginning and ending space.
$line_data[8] =~ s/"//g;
$line_data[8] =~ s/^\s+//;
$line_data[8] =~ s/\s+$//;
my @start_time = split(/\s+/,$line_data[8]);
# Check start_time_string :: Must be valid date string
my $message = verifyDate(@start_time);
if ( $message ) {
push @warnings, ( {
line_number => $.,
message => "Invalid start time format. $message",
} );
}
# The last two optional inputs
if ( $#line_data > 8 ) {
# Check file_duration :: Must be > 0
if ( $line_data[9] < 0 ) {
$num_warnings++;
push @warnings, ( {
line_number => $.,
message => "Invalid file duration. Must be > 0.",
} );
}
# Check file_duration_units :: return from find_unit_ivalue() > 0
if ( find_unit_ivalue($line_data[10]) < 0 ) {
$num_warnings++;
$line_data[10] =~ s/"//g;
push @ warnings, ( {
line_number => $.,
message => "Invalid file duration unit. ($line_data[10]).",
} );
}
}
}
}
if ( $num_warnings == 0 ) {
push @files, ( {
file_name => sanitizeString($line_data[0]),
output_frequency => sanitizeString($line_data[1]),
output_frequency_units => sanitizeString($line_data[2]),
output_format => sanitizeString($line_data[3]),
time_units => sanitizeString($line_data[4]),
long_name => sanitizeString($line_data[5]),
new_file_frequency => sanitizeString($line_data[6]),
new_file_frequency_units => sanitizeString($line_data[7]),
start_time_string => sanitizeString($line_data[8]),
file_duration => sanitizeString($line_data[9]),
file_duration_units => sanitizeString($line_data[10]),
} );
$fields_per_file{$files[$#files]{file_name}} = 0;
# print "File found (",$files[$#files]{file_name},"), line ",$.,".\n";
}
} else { # This is a field.
# Make sure there are enough fields on the description line :: must be = 8.
if ( $#line_data != 7 ) {
$num_warnings++;
my $message;
# Are there too many?
if ( $#line_data > 7 ) {
$message = "Too many fields on field description line.";
} else { # Nope, too few.
$message = "Not enough fields on field description line.";
}
push @warnings, ( {
line_number => $.,
message => $message,
} );
}
# Verify that file_name exists in the files array
$line_data[3] =~ s/"//g;
$line_data[3] =~ s/^\s+//;
$line_data[3] =~ s/\s+$//;
my $notfound = 1;
for (my $i=0; $i <= $#files; $i++) {
if ( $files[$i]{file_name} =~ $line_data[3] ) {
$notfound = 0;
last;
}
}
if ( $notfound ) {
$num_warnings++;
push @warnings, ( {
line_number => $.,
message => "File ($line_data[3]) not defined. It must be defined before any fields.",
} );
}
# Verify time_method / time_avg is valid
if ( invalid_timeSample(sanitizeString($line_data[5])) ) {
$ num_warnings++;
push @warnings, ( {
line_number => $.,
message => "Time sampling method must be one of (.true., mean, average, avg, .false., none, point, maximum, max, minimum, min, diurnal[#]).",
} );
}
# Verify packing is valid :: must be in range [1,8]
if ( $line_data[7] < 1 or $line_data[7] > 8 ) {
$num_warnings++;
push @warnings, ( {
line_number => $.,
message => "Packing is out of the valid range. Must be in the range [1,8]."
} );
}
if ( $num_warnings == 0 ) {
push @fields, ( {
file_name => sanitizeString($line_data[3]),
module_name => sanitizeString($line_data[0]),
field_name => sanitizeString($line_data[1]),
output_name => sanitizeString($line_data[2]),
time_sampling => sanitizeString($line_data[4]),
time_method => sanitizeString($line_data[5]),
spatial_ops => sanitizeString($line_data[6]),
packing => sanitizeString($line_data[7]),
} );
$fields_per_file{$fields[$#fields]{file_name}}++;
}
}
}
}
if ( $verbose ) {
my $files2output;
my $fields2output;
open(FILES, '>', \$files2output);
open(FIELDS, '>', \$fields2output);
my $file_name;
my $output_frequency;
my $output_frequency_units;
my $output_format;
my $time_units;
my $module_name;
my $field_name;
my $output_name;
my $time_sampling;
my $time_method;
my $spatial_ops;
my $packing;
format FILES_TOP =
Files
Output Axis
File Name Frequency FMT Units
------------------------------------------------------------
.
format FILES =
@<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< @>>> @<<<<<<< @||| @<<<<
$file_name, $output_frequency, $output_frequency_units, $output_format, $time_units
.
for ( my $file=1; $file <= $#files; $file++ ) {
$file_name = $files[$file]{file_name};
$output_frequency = $files[$file]{output_frequency};
$output_frequency_units = $files[$file]{output_frequency_units};
$output_format = $files[$file]{output_format};
$time_units = $files[$file]{time_units};
write FILES;
}
format FIELDS_TOP =
Fields
Output Sample Spatial
Field Name Module File Name Name Samples Method Ops Packing
-------------------------------------------------------------------------------------------------------------
.
format FIELDS =
@<<<<<<<<<<<<<<< @<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< @<<<<<<<<<<<<<<< @<<<<< @<<<<<< @<<< @<
$field_name, $module_name, $file_name, $output_name, $time_sampling, $time_method, $spatial_ops, $packing
.
for ( my $field=1; $field <=$#fields; $field++ ) {
$module_name = $fields[$field]{module_name};
$field_name = $fields[$field]{field_name};
$output_name = $fields[$field]{output_name};
$file_name = $fields[$field]{file_name};
$time_sampling = $fields[$field]{time_sampling};
$time_method = $fields[$field]{time_method};
$spatial_ops = $fields[$field]{spatial_ops};
$packing = $fields[$field]{packing};
write FIELDS;
}
# Output the files and fields
close(FILES);
close(FIELDS);
print $files2output;
print $fields2output;
print "\n";
}
print "Table Descriptor:\t",$tableName,"\n";
print "Base Date:\t\t",join(' ',@globalDate),"\n";
print "Number of files:\t",$#files,"\n";
print "Max fields per file:\t",max(values(%fields_per_file)),"\n";
print "Number of warnings:\t",$#warnings,"\n";
if ( $#warnings ) {
for ( my $warning=1; $warning <= $#warnings; $warning++ ) {
print STDERR "WARNING($warnings[$warning]{line_number}): $warnings[$warning]{message}\n";
}
}
# Verify that the given unit is understood.
# A return value of -1 indicated an unknown unit.
sub invalid_timeSample {
my $timeSample = $_[0];
switch ($timeSample) {
case (/^\.true\.$/i) { return 0; }
case (/^\.false\.$/i) { return 0; }
case (/^mean|average|avg$/) { return 0; }
case (/^none|point$/) { return 0; }
case (/^maximum|max$/) { return 0; }
case (/^minimum|min$/) { return 0; }
case (/^diurnal\d+$/) { return 0; }
else { return 1 };
}
}
# Verify that the given unit is understood.
# A return value of -1 indicated an unknown unit.
sub find_unit_ivalue {
my $unit_string = $_[0];
switch ($unit_string) {
case (/seconds/) { return 1; }
case (/minutes/) { return 2; }
case (/hours/) { return 3; }
case (/days/) { return 4; }
case (/months/) { return 5; }
case (/years/) { return 6; }
else { return -1 }
}
}
sub experimentExistsInXML {
my $experiment = shift(@_);
my $experimentNode = $::root -> findnodes("experiment[\@label='$experiment' or \@name='$experiment']") -> get_node(1);
return $experimentNode;
}
sub getDiagTableFromXML {
my $experiment = shift(@_);
my $diagTableNode = $::root -> findnodes("experiment[\@label='$experiment' or \@name='$experiment']/input/diagTable") -> get_node(1);
# If the diagTable node is empty, then recursivly check the parent experiment until it is found.
if ( $diagTableNode ) {
return $diagTableNode -> findvalue("\@file");
} else {
my $parent = $::root -> findvalue("experiment[\@label='$experiment' or \@name='$experiment]/\@inherit");
if ( $parent) {
&getDiagTable($parent);
} else {
die "$0: Cannot find diagTable tag in the XML file $::xmlFile.\n"
}
}
}
sub verifyDate {
# The date must already be in an array. We will check the size here.
# The format should be (year, month, day, hour, min, sec)
my @date = @_;
my $leapYear = 0;
my @months = ( { month => 'January',
days => 31 },
{ month => 'February',
days => 28 },
{ month => 'March',
days => 31 },
{ month => 'April',
days => 30 },
{ month => 'May',
days => 31 },
{ month => 'June',
days => 30 },
{ month => 'July',
days => 31 },
{ month => 'August',
days => 31 },
{ month => 'September',
days => 30 },
{ month => 'October',
days => 31 },
{ month => 'November',
days => 30 },
{ month => 'December',
days => 31 } );
if ( scalar(@date) != 6 ) {
# Wrong number of elements in date. Are we too big?
return 'Too many elements in date string.' if ( scalar(@date) > 6 ) ;
return 'Too few elements in date string.' if ( scalar(@date) < 6 );
}
return 'Year must be > 0.' if ( $date[0] < 0 );
# Correct number of days in February if this is a leap year.
$months[1]{days} = $months[1]{days} + 1 if ( isLeapYear($date[0]) );
return 'Month must be in the range [1,12].' if ( $date[1] < 1 or $date[1] > 12 ) ;
return "Days must be in the range [1,$months[$date[1]-1]{days}] for $months[$date[1]-1]{month} in year $date[0]." if ( $date[2] < 1 or $date[2] > $months[$date[1]-1]{days} );
return 'Hours must be in the range [0,24].' if ( $date[3] < 0 or $date[3] > 24 );
return 'Minutes must be in the range [0,60].' if ( $date[4] < 0 or $date[4] > 60 );
return 'Seconds must be in the range [0,60].' if ( $date[5] < 0 or $date[5] > 60 );
return '';
}
sub isLeapYear {
my $year = shift(@_);
if ( ($year % 4 == 0) and ($year % 100 != 0) or ($year % 400 == 0) ) {
return 1;
} else {
return 0;
}
}
sub sanitizeString {
# Remove the quote marks and any additional space before and after
# the string.
my $string = shift(@_);
$string =~ s/"//g;
$string =~ s/^\s+//;
$string =~ s/\s+$//;
return $string;
}
__END__
=head1 NAME
diag_table_chk - Parse a diag_table, and report the number of files, max fields, and parse errors
=head1 SYNOPSIS
diag_table_chk [-h|--help]
diag_table_chk [-v|--verbose] I<diag_table>
diag_table_chk [-v|--verbose] -x I<xml_file> I<experiment>
=head1 DESCRIPTION
B<diag_table_chk> will parse a diag_table and report on the number of
files in the diag_table, the max fields used per file, and will give
warnings on any parse errors found in the format of 'WARNING(<line_number>)'.
=head1 OPTIONS
=over 8
=item B<-h>, B<--help>
Display usage information
=item B<-v>, B<--verbose>
Display the files and fields that were found.
=item B<-x>, B<--xml> <xml_file> <experiment>
Read the diagnostic table file from I<<experiment>> from the I<<xml_file>>.
=item <diag_table>
The file name of the diagnostic table to check
=back
=head1 EXAMPLE
> diag_table_chk -x SM2.1U-LM3V.xml SM2.1U_Control-1990_lm3v_pot_A1
Table Descriptor: NOT SET
Base Date: 0101 01 01 0 0 0
Number of files: 14
Max fields per file: 93
Number of warnings: 2
WARNING(3): The table descriptor and the base date must be set before any files or fields.
WARNING(206): Time sampling method must be one of (.true., mean, average, avg, .false., none, point, maximum, max, minimum, min, diurnal[#]).
=head1 AUTHOR
Seth Underwood <sunderwood@hpti.com>
=head1 BUGS
No known bugs at this time.
Report any bug to the author.
=cut

17
bin/environs.gfdl_ws_32.intel

@ -0,0 +1,17 @@
source $MODULESHOME/init/csh
module use -a /home/fms/local/modulefiles
module purge
module load ifort.11.0.074
module load icc.11.0.074
module load idb.10.1.35
module load hdf5-1.8.3
module load netcdf-4.0.1
setenv OMP_NUM_THREADS 1
#
setenv NC_BLKSZ 64K
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.
setenv netcdf3_inc_dir "/usr/local/netcdf-3.6.2/include"
setenv netcdf3_lib_dir "/usr/local/netcdf-3.6.2/lib"
setenv mpirunCommand "mpirun -np"

14
bin/environs.gfdl_ws_64.gnu

@ -0,0 +1,14 @@
source $MODULESHOME/init/csh
module purge
module rm netcdf hdf5
module load mpich2/1.2.1p1
module use -a /home/fms/local/modulefiles
module load hdf5/1.8.5-patch1-gnu-4
module load netcdf/4.1.1-gnu-4
#
setenv PATH ${PATH}:.
setenv mpirunCommand "/net2/nnz/opt/mpich2-1.3_ifort11_x64/bin/mpirun -np"
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.

16
bin/environs.gfdl_ws_64.intel

@ -0,0 +1,16 @@
source $MODULESHOME/init/csh
module use -a /home/fms/local/modulefiles
module purge
module rm netcdf hdf5
module load ifort/11.1.073
module load icc/11.1.073
module load hdf5/1.8.6
module load netcdf/4.1.2
module load mpich2/1.2.1p1
#
setenv PATH ${PATH}:.
setenv mpirunCommand "/net2/nnz/opt/mpich2-1.3_ifort11_x64/bin/mpirun -np"
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.

7
bin/environs.gfortran

@ -0,0 +1,7 @@
setenv NC_BLKSZ 64K
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.
setenv netcdf3_inc_dir "/usr/local/netcdf-3.6.2/include"
setenv netcdf3_lib_dir "/usr/local/netcdf-3.6.2/lib"
setenv mpirunCommand "mpirun -np"

16
bin/environs.hpcs.intel

@ -0,0 +1,16 @@
source /opt/modules/default/init/tcsh
module purge
module load ifort.11.0.074
module load icc.11.0.074
module load idb.11.0.034
module load scsl-1.5.1.0
module load mpt-1.18
module load hdf5-1.8.1
module load netcdf-4.0.1
setenv NC_BLKSZ 64K
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.
setenv netcdf3_inc_dir "/usr/local/netcdf-3.6.2/include"
setenv netcdf3_lib_dir "/usr/local/netcdf-3.6.2/lib"
setenv mpirunCommand "mpirun -np"

3
bin/environs.ibm.xlf

@ -0,0 +1,3 @@
export OMP_NUM_THREADS=1
export BG_APPTHREADDEPTH=1

8
bin/environs.nci

@ -0,0 +1,8 @@
source /etc/profile.d/nf_csh_modules
module purge
module load intel-fc
module load intel-cc
module load hdf5
module load netcdf
module load openmpi
setenv mpirunCommand "mpirun -np"

21
bin/environs.ncrc1.intel

@ -0,0 +1,21 @@
#This file should set all the necessary system environment variables
#and/or load all the necessary modules for the code to compile and run on the specific platform.
#Users need to find these for their systems.
#
source $MODULESHOME/init/csh
module rm PrgEnv-pgi PrgEnv-pathscale netcdf
module load PrgEnv-intel/3.1.29
module load hdf5/1.8.4.1
module load netcdf/4.0.1.3
module load nco
module list
setenv MPICH_MAX_SHORT_MSG_SIZE 8000
setenv NC_BLKSZ 1M
setenv F_UFMTENDIAN big
setenv OMP_NUM_THREADS 1
#
setenv FMS_ARCHIVE /lustre/fs/archive/fms
setenv mpirunCommand "aprun -n"
setenv PATH ${PATH}:.
setenv netcdf3_inc_dir "/opt/cray/netcdf/3.6.2/netcdf-gnu/include"
setenv netcdf3_lib_dir "/opt/cray/netcdf/3.6.2/netcdf-gnu/lib"

15
bin/environs.ncrc2.gnu

@ -0,0 +1,15 @@
source $MODULESHOME/init/csh
module use -a /ncrc/home2/fms/local/modulefiles
module unload PrgEnv-pgi PrgEnv-pathscale PrgEnv-intel PrgEnv-gnu PrgEnv-cray
module unload netcdf fre fre-commands
module load PrgEnv-gnu
module load hdf5/1.8.8
module load netcdf/4.2.0
module list
setenv MPICH_MAX_SHORT_MSG_SIZE 8000
setenv KMP_STACKSIZE 512m
setenv NC_BLKSZ 1M
setenv mpirunCommand "aprun -n"
setenv PATH ${PATH}:.

21
bin/environs.ncrc2.intel

@ -0,0 +1,21 @@
#This file should set all the necessary system environment variables
#and/or load all the necessary modules for the code to compile and run on the specific platform.
#Users need to find these for their systems.
#
source $MODULESHOME/init/csh
module rm PrgEnv-pgi PrgEnv-pathscale netcdf
module load PrgEnv-intel
module swap intel intel/12.0.5.220
module load hdf5/1.8.7
module load netcdf/4.1.3
module list
setenv MPICH_MAX_SHORT_MSG_SIZE 8000
setenv NC_BLKSZ 1M
setenv F_UFMTENDIAN big
# setenv OMP_NUM_THREADS 1
#
# setenv FMS_ARCHIVE /lustre/fs/archive/fms
setenv mpirunCommand "aprun -n"
setenv PATH ${PATH}:.
# setenv netcdf3_inc_dir "/opt/cray/netcdf/3.6.2/netcdf-gnu/include"
# setenv netcdf3_lib_dir "/opt/cray/netcdf/3.6.2/netcdf-gnu/lib"

2
bin/environs.workstation.gfort

@ -0,0 +1,2 @@
setenv mpirunCommand "mpirun -np"

17
bin/environs.workstation.intel

@ -0,0 +1,17 @@
source $MODULESHOME/init/csh
module use -a /home/fms/local/modulefiles
module purge
module load ifort.11.0.074
module load icc.11.0.074
module load idb.10.1.35
module load hdf5-1.8.3
module load netcdf-4.0.1
setenv OMP_NUM_THREADS 1
#
setenv NC_BLKSZ 64K
setenv FMS_ARCHIVE /archive/fms
setenv PATH ${PATH}:.
setenv netcdf3_inc_dir "/usr/local/netcdf-3.6.2/include"
setenv netcdf3_lib_dir "/usr/local/netcdf-3.6.2/lib"
setenv mpirunCommand "mpirun -np"

73
bin/list_files_with_tag

@ -0,0 +1,73 @@
#!/usr/bin/perl
#Contact: arl, pjk
#Usage: list_files_with_tag tagname
# Returns list of files under CVS control, relative to current
# directory, which can be updated or checked out with the tag tagname.
#Modified to also list files not currently checked out (arl, 4/2002)
#Version: $Id: list_files_with_tag,v 1.1.2.1 2013/12/18 17:47:54 Niki.Zadeh Exp $
#test that a tagname was given as an argument
if( "$#ARGV" ne "0" ) {
print "\nUsage: list_files_with_tag tagname\n\n";
print " Returns list of files under CVS control, relative to current\n";
print " directory, which can be updated or checked out with the tag tagname.\n\n";
exit;
}
$tag = @ARGV[0];
#use cvs status on files in the current directory to determine which
#have the given tag. This includes those files which have been deleted
#on the branch tag. Then parse the output into %files.
@cvsstatus = `cvs status -v 2>&1`;
$i = 0;
while($i <= $#cvsstatus) {
if( $cvsstatus[$i] =~ /cvs status: Examining (\S+)/ ) {
$thisdir = $1;
}
elsif( $cvsstatus[$i] =~ /^File: \w/ ) {
@thisline = split ' ',$cvsstatus[$i];
$currentfile = $thisline[1];
}
elsif( $cvsstatus[$i] =~ /Existing Tags/ ) {
#skip a line
$i++;
#Each tag line is of the form tag (branch/revision...)
while ($cvsstatus[$i] =~ s/\(.+\)//g) {
#remove blanks
$cvsstatus[$i] =~ s/\s+//g;
#create hash of files (unique list). Don't print "./" in front of files.
if ($tag =~ /^$cvsstatus[$i]$/) {
if( "$thisdir" eq "." ) {
$files{"$currentfile"} = 1;
}
else {
$files{"$thisdir/$currentfile"} = 1;
}
}
$i++;
}
}
$i++;
}
#Add list of files that would be checked out with this tag.
#This will include new files not currently checked out.
@cvsupdate = `cvs update -p -d -r $tag 2>&1`;
$i = 0;
while($i <= $#cvsupdate ) {
if( $cvsupdate[$i] =~ /Checking out/ ) {
@line = split(' ', $cvsupdate[$i]);
$files{"$line[2]"} = 1;
}
$i++;
}
@unique_files = sort(keys(%files));
if( @unique_files ) {
foreach $file (@unique_files) {
print "$file\n";
}
}

98
bin/list_paths

@ -0,0 +1,98 @@
#!/bin/csh -ef
# $Id: list_paths,v 1.1.2.1 2013/12/18 17:47:54 Niki.Zadeh Exp $
#-----------------------------------------------------------------------
# list_paths: CVS administrative script
#
# AUTHOR: V. Balaji (vb@gfdl.gov)
# SGI/GFDL Princeton University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For the full text of the GNU General Public License,
# write to: Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
#-----------------------------------------------------------------------
# script to be run from the CVSROOT/modules file to create path lists
# $1 contains the name of the archive being checked out
# this script creates two files:
# path_names contains all the source files ( *.{c,C,f,F,fh,f90,F90,h,H,inc} )
# path_names.html contains all the doc files ( *.{html,ps,txt}, README, readme )
# NOTE: if these files exist, they are appended to.
# This is necessary, since for aliases that checkout multiple
# directories you need to keep the results from earlier checkouts.
# This could yield unexpected results if you use the same working
# directory for different experiments using different modules. You
# must remove these files if beginning a fresh experiment.
set argv = (`getopt o: $*`)
set out = "$cwd/path_names"
#---------------------------------------------------
while ("$argv[1]" != "--")
switch ($argv[1])
case -o:
set out = $argv[2]; shift argv; breaksw
endsw
shift argv
end
shift argv
#---------------------------------------------------
unset noclobber
if( $?DEBUG )echo Running $0 in $cwd, args $*
set src = "$out.src.tmp"
set doc = "$out.doc.tmp"
set outdoc = "$out.html"
touch $out # create the file if it doesn't exist
cp $out $src
find $* -type f \
\( -name \*.c \
-o -name \*.C \
-o -name \*.f \
-o -name \*.fh \
-o -name \*.F \
-o -name \*.f90 \
-o -name \*.F90 \
-o -name \*.h \
-o -name \*.H \
-o -name \*.inc \
\) -print >> $src
sed 's:.*/\(.*\):\0 \1:' $src | nl | sort --key 3 -u | sort -n | awk '{print $2}' > $out
echo "A list of the files you checked out is in the file $out ..."
touch $doc # create the file if it doesn't exist
find $* -type f \
\( -name \*.html \
-o -name \*.ps \
-o -name \*.txt \
-o -name \*.pdf \
-o -name \*.jpg \
-o -name readme \
-o -name read_me \
-o -name README \
\) -print > $src
if ( -z $src ) then
rm -f $doc $src
exit
endif
# $src has non-zero size (i.e some doc exists)
cat $src >> $doc
#write path_names.html file
echo "<title>Documentation in current working directory</title>" > $outdoc
echo "<h1>Documentation in current working directory</h1>" >> $outdoc
sort -u $doc | awk '{print "<p><a href=\"" $1 "\">" $1 "</a>"}' >> $outdoc
echo '<p><hr><small>This file was automatically generated by list_paths.' >> $outdoc
echo '$Revision: 1.1.2.1 $ $Date: 2013/12/18 17:47:54 $' >> $outdoc
rm -f $doc $src

472
bin/mkmf

@ -0,0 +1,472 @@
#!/usr/bin/perl
#-----------------------------------------------------------------------
# mkmf: Perl script for makefile construction
#
# AUTHOR: V. Balaji (v.balaji@noaa.gov)
# Princeton University/GFDL
#
# Full web documentation for mkmf:
# http://www.gfdl.noaa.gov/~vb/mkmf.html
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For the full text of the GNU General Public License,
# write to: Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
#-----------------------------------------------------------------------
$ENV{'LANG'} = 'C';
require 5;
use strict;
use File::Basename;
use Getopt::Std;
use Config; # use to put in platform-specific stuff
use vars qw( $opt_a $opt_c $opt_d $opt_f $opt_l $opt_m $opt_o $opt_p $opt_t $opt_v $opt_x $opt_I ); # declare these global to be shared with Getopt:Std
#subroutines
sub ensureTrailingSlash {
#ensure trailing slash on directory names
local $/ = '/'; chomp @_[0]; @_[0] .= '/';
}
my $version = '$Id: mkmf,v 1.1.2.1 2013/12/18 17:47:54 Niki.Zadeh Exp $ ';
# initialize variables: use getopts for these
getopts( 'a:I:c:dfm:o:l:p:t:vx' ) || die "\aSyntax: $0 [-a abspath] [-c cppdefs] [-d] [-f] [-m makefile] [-o otherflags] ][-p program] [-t template] [-v] [-x] [-I \"space separated include dirs\"] [targets]\n";
$opt_v = 1 if $opt_d; # debug flag turns on verbose flag also
print "$0 $version\n" if $opt_v;
my $mkfile = $opt_m || 'Makefile';
print "Making makefile $mkfile ...\n" if $opt_v;
$opt_p = 'a.out' unless $opt_p; # set default program name
my @targets = '.'; # current working directory is always included in targets
push @targets, @ARGV; # then add remaining arguments on command line
ensureTrailingSlash($opt_a) if $opt_a;
#some generic declarations
my( $file, $include, $line, $module, $name, $object, $path, $source, $suffix, $target, $word );
my @list;
#some constants
my $endline = $/;
my @src_suffixes = ( q/\.F/, q/\.F90/, q/\.c/, q/\.f/, q/\.f90/ );
my @inc_suffixes = ( q/\.H/, q/\.fh/, q/\.h/, q/\.inc/, q/\.h90/ );
# push @inc_suffixes, @src_suffixes; # sourcefiles can be includefiles too: DISALLOW, 6 May 2004
# suffixes for the target (mkmf -p): if isn't on the list below it's a program
my @tgt_suffixes = ( q/\.a/ );
my %compile_cmd = ( # command to create .o file from a given source file suffix
q/.F/ => q/$(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) -c/,
q/.F90/ => q/$(FC) $(CPPDEFS) $(CPPFLAGS) $(FPPFLAGS) $(FFLAGS) $(OTHERFLAGS) -c/,
q/.c/ => q/$(CC) $(CPPDEFS) $(CPPFLAGS) $(CFLAGS) $(OTHERFLAGS) -c/,
q/.f/ => q/$(FC) $(FFLAGS) $(OTHERFLAGS) -c/,
q/.f90/ => q/$(FC) $(FFLAGS) $(OTHERFLAGS) -c/ );
my %delim_match = ( q/'/ => q/'/, # hash to find includefile delimiter pair
q/"/ => q/"/,
q/</ => q/>/ );
#formatting command for MAKEFILE, keeps very long lines to 256 characters
format MAKEFILE =
^<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< \~
$line
.
sub print_formatted_list{
#this routine, in conjunction with the format line above, can be used to break up long lines
# it is currently used to break up the potentially long defs of SRC, OBJ, CPPDEFS, etc.
# not used for the dependency lists
$line = "@_";
local $: = " \t\n"; # the default formatting word boundary includes the hyphen, but not here
while ( $opt_f && length $line > 254 ) {
write MAKEFILE, $line;
}
print MAKEFILE $line unless $line eq '';
print MAKEFILE "\n";
}
#begin writing makefile
open MAKEFILE, ">$mkfile" or die "\aERROR opening file $mkfile for writing: $!\n";
printf MAKEFILE "# Makefile created by %s $version\n\n", basename($0);
print MAKEFILE "SRCROOT = $opt_a\n\n" if $opt_a; # make abspath a variable
if ( $opt_c ) {
$opt_c =~ s/\s+$//;
if ( $Config{osname} eq 'aix' ) {
$opt_c .= ' -D__aix';
#AIX fortran (xlf) requires -WF, in front, comma delimiter, no spaces
my $cppdefs_xlf = '-WF "' . $opt_c . '"';
$cppdefs_xlf =~ s/,/\\,/g; # escape any commas already there
$cppdefs_xlf =~ s/\s+/,/g; # replace whitespace with commas
&print_formatted_list("CPPDEFS_XLF = $cppdefs_xlf");
$compile_cmd{'.F'} = q/$(FC) $(CPPDEFS_XLF) $(FFLAGS) $(OTHERFLAGS) -c/;
$compile_cmd{'.F90'} = q/$(FC) $(CPPDEFS_XLF) $(FFLAGS) $(OTHERFLAGS) -c/;
}
&print_formatted_list("CPPDEFS = $opt_c") if $opt_c;
}
print MAKEFILE "\nOTHERFLAGS = $opt_o" if $opt_o;
#vb 2009-12-17, include now comes after vardefs, so that it can modify make variables
print MAKEFILE "\n\ninclude $opt_t\n\n" if $opt_t; #include template if supplied
print MAKEFILE "\n.DEFAULT:\n\t-echo \$@ does not exist.\n";
print MAKEFILE "all: $opt_p\n"; # first target should be program, so you can type just 'make'
#if cppdefs flag is present, look for changes in cppdefs
my %chgdefs;
if ( $opt_c ) {
#split argument of -c into newdefs
my %newdefs;
foreach ( split /\s*-D/, $opt_c ) {
$newdefs{$_} = 1;
}
#get olddefs from file .cppdefs
my %olddefs;
my $cppdefsfile = ".$opt_p.cppdefs";
if ( -f $cppdefsfile ) {
open CPPFILE, $cppdefsfile or die "\aERROR opening cppdefsfile $cppdefsfile: $!\n";
while ( <CPPFILE> ) {
foreach $word ( split ) {
$olddefs{$word} = 1;
}
}
close CPPFILE;
#get words that are not in both newdefs and olddefs
#if you move this foreach{} outside the enclosing if{} then
# all cppdefs will be considered changed if there is no .cppdefs file.
foreach ( keys %newdefs, keys %olddefs ) {
$chgdefs{$_} = 1 unless( $newdefs{$_} && $olddefs{$_} );
}
}
#write current cppdefs list to file .cppdefs
open CPPFILE, ">$cppdefsfile";
my @newdefs = keys %newdefs;
print CPPFILE " @newdefs\n";
close CPPFILE;
if( $opt_d ) {
@list = keys %newdefs; print "newdefs= @list\n";
@list = keys %olddefs; print "olddefs= @list\n";
@list = keys %chgdefs; print "chgdefs= @list\n";
}
}
delete $chgdefs{''};
# get a list of sourcefiles to be treated from targets
# (a sourcefile is any regular file with a suffix matching src_suffixes)
# if target is a sourcefile, add to list
# if target is a directory, get all sourcefiles there
# if target is a regular file that is not a sourcefile, look for a
# sourcefile on last work of each line, rest of line (if present) is the
# compile command to apply to this file.
#@sources will contain a unique list of sourcefiles in targets
#@objects will contain corresponding objects
#separate targets into directories and files
my %scanned; # list of directories/files already scanned
my %actual_source_of; # hash returning sourcefile from object
my %source_of; # sourcefile from object, using SRCROOT variable if present
my @includepaths;
my $scanOrder = 0; # used to remember order of directory scan
foreach $target ( @targets ) {
print STDERR '.' unless $opt_v; # show progress on screen (STDERR is used because it is unbuffered)
if ( $opt_a and substr($target,0,1) ne '/' ) {
# if an abs_path exists, attach it to all relative paths
$target = $opt_a . $target;
}
ensureTrailingSlash($target) if( -d $target );
print "target=$target\n" if $opt_v;
#directory
if ( -d $target && !$scanned{$target} ) {
print "Processing directory $target\n" if $opt_v;
opendir DIR, $target;
my @files = readdir DIR;
#find all sourcefiles in directory DIR
foreach ( @files ) {
( $name, $path, $suffix ) = fileparse( "$target$_", @inc_suffixes );
push @includepaths, $target if $suffix; # is this line doing anything? looks like includepaths='' later...
( $name, $path, $suffix ) = fileparse( "$target$_", @src_suffixes );
$object = "$name.o";
if( $suffix && !$actual_source_of{$object} ) {
if ( $opt_a and substr($path,0,1) ne '/' ) { # if an abs_path exists, attach it to all relative paths
ensureTrailingSlash($path);
$path = '' if $path eq './';
$source_of{$object} = '$(SRCROOT)' . "$path$name$suffix";
$path = $opt_a . $path;
}
$actual_source_of{$object} = "$path$name$suffix";
$source_of{$object} = $actual_source_of{$object} unless $source_of{$object};
}
}
closedir DIR;
$scanned{$target} = $scanOrder++;
} elsif ( -f $target ) {
#file: check if it is a sourcefile
( $name, $path, $suffix ) = fileparse( $target, @src_suffixes );
$object = "$name.o";
if ( !$actual_source_of{$object} ) {
if ( $suffix ) {
$path = '' if $path eq './';
if ( $opt_a and substr($path,0,1) ne '/' ) { # if an abs_path exists, attach it to all relative paths
ensureTrailingSlash($path);
$source_of{$object} = '$(SRCROOT)' . "$path$name$suffix";
$path = $opt_a . $path;
}
$actual_source_of{$object} = "$path$name$suffix";
$source_of{$object} = $actual_source_of{$object} unless $source_of{$object};
} else {
( $name, $path, $suffix ) = fileparse( $target, @inc_suffixes );
if ( ! $suffix ) {
#not a sourcefile: assume it contains list of sourcefiles
#specify files requiring special commands (e.g special compiler flags) thus:
# f90 -Oaggress a.f90
#if last word on line is not a valid sourcefile, line is ignored
open CMDFILE, $target;
print "Reading commands from $target...\n" if $opt_v;
while ( <CMDFILE> ) {
next if ( $_ eq "\n");
$line = $_;
my @wordlist = split;
$file = @wordlist[$#wordlist]; # last word on line
( $name, $path, $suffix ) = fileparse( $file, @src_suffixes );
print "file=$file suffix=$suffix in $target\n" if $opt_d;
$object = "$name.o";
if ( $suffix && !$actual_source_of{$object} ) {
$path = '' if $path eq './';
if ( $opt_a and ( substr($path,0,1) ne '/' ) ) { # if an abs_path exists, attach it to all relative paths
ensureTrailingSlash($path);
$source_of{$object} = '$(SRCROOT)' . "$path$name$suffix";
$path = $opt_a . $path;
}
$actual_source_of{$object} = "$path$name$suffix";
$source_of{$object} = $actual_source_of{$object} unless $source_of{$object};
$scanned{$path} = $scanOrder++ unless $scanned{$path};
#command for this file is all of line except the filename
$line =~ /\s+$file/; $line=$`;
if ( $line ) {
$compile_cmd{"$name$suffix"} = $line;
print "Special command for file $name$suffix: ($line)\n" if $opt_v;
}
}
if ( ! $suffix ) { # look for include files
( $name, $path, $suffix ) = fileparse( $file, @inc_suffixes );
if ( $opt_a and ( substr($path,0,1) ne '/' ) ) { # if an abs_path exists, attach it to all relative paths
ensureTrailingSlash($path);
$path = $opt_a . $path;
}
print "file=$file path=$path suffix=$suffix order=$scanOrder in $target\n" if $opt_d;
# anything that's found here is an includefile but not a sourcefile...
# just include directory in scan
$scanned{$path} = $scanOrder++
if ( $suffix && !$scanned{$path} );
}
}
close CMDFILE;
}
}
}
}
}
delete $actual_source_of{''};
# sort scanned directories by scan order
sub ascendingScanOrder { $scanned{$a} <=> $scanned{$b}; }
my @dirs = sort ascendingScanOrder keys %scanned;
my @sources = values %source_of;
my @objects = keys %source_of;
if( $opt_d ) {
print "DEBUG: dirs= @dirs\n";
print "DEBUG: sources= @sources\n";
print "DEBUG: objects= @objects\n";
}
my %obj_of_module; # hash returning name of object file containing module
my %modules_used_by; # hash of modules used by a given source file (hashed against the corresponding object)
my %includes_in; # hash of includes in a given source file (hashed against the corresponding object)
my %has_chgdefs; # hash of files contains cppdefs that have been changed
#subroutine to scan file for use and module statements, and include files
# first argument is $object, second is $file
sub scanfile_for_keywords {
my $object = shift;
my $file = shift;
local $/ = $endline;
#if file has already been scanned, return: but first check if any .o needs to be removed
if( $scanned{$file} ) {
if( $has_chgdefs{$file} and -f $object ) {
unlink $object or die "\aERROR unlinking $object: $!\n";
print " Object $object is out-of-date because of change to cppdefs, removed.\n" if $opt_v;
}
return;
}
print "Scanning file $file of object $object ...\n" if $opt_v;
open FILE, $file or die "\aERROR opening file $file of object $object: $!\n";
foreach $line ( <FILE> ) {
if ( $line =~ /^\s*module\s+(\w*)/ix ) {
if ( $1 ) {
my $module = lc $1;
if ( $obj_of_module{$module} && $module ne "procedure" ) {
die "\a\nAMBIGUOUS: Module $module is associated with $file as well as $actual_source_of{$obj_of_module{$module}}.\n";
}
$obj_of_module{$module} = $object;
}
}
if ( $line =~ /^\s*use\s*(\w*)/ix ) {
$modules_used_by{$object} .= ' ' . lc $1 if $1;
}
if ( $line =~ /^[\#\s]*include\s*(['""'<])([\w\.\/]*)$delim_match{\1}/ix ) {
$includes_in{$file} .= ' ' . $2 if $2;
}
foreach ( keys %chgdefs ) {
$_ .= '='; /\s*=/; $word=$`; #cut string at =sign, else whole string
if ( $line =~ /\b$word\b/ ) {
$has_chgdefs{$file} = 1;
if ( -f $object ) {
unlink $object or die "\aERROR unlinking $object: $!\n";
print " Object $object is out-of-date because of change to cppdef $word, removed.\n" if $opt_v;
}
}
}
}
close FILE;
$scanned{$file} = 1;
print " uses modules=$modules_used_by{$object}, and includes=$includes_in{$file}.\n" if $opt_d;
}
foreach $object ( @objects ) {
&scanfile_for_keywords( $object, $actual_source_of{$object} );
}
my %off_sources; # list of source files not in current directory
my %includes; # global list of includes
my %used; # list of object files that are used by others
my @cmdline;
# for each file in sources, write down dependencies on includes and modules
foreach $object ( sort @objects ) {
print STDERR '.' unless $opt_v; # show progress on screen (STDERR is used because it is unbuffered)
my %is_used; # hash of objects containing modules used by current object
my %obj_of_include; # hash of includes for current object
$is_used{$object} = 1; # initialize with current object so as to avoid recursion
print "Collecting dependencies for $object ...\n" if $opt_v;
@cmdline = "$object: $source_of{$object}";
( $name, $path, $suffix ) = fileparse( $actual_source_of{$object}, @src_suffixes );
$off_sources{$source_of{$object}} = 1 unless( $path eq './' or $path eq '' );
#includes: done in subroutine since it must be recursively called to look for embedded includes
@includepaths = '';
&get_include_list( $object, $actual_source_of{$object} );
#modules
foreach $module ( split /\s+/, $modules_used_by{$object} ) {
$target = $obj_of_module{$module};
#we need to check target ne '' also below, since it is not mkmf's privilege
#to complain about modules not found. That should be left to the compiler.
if( $target and !$is_used{$target} ) {
$is_used{$target} = 1;
push @cmdline, $target;
$used{$target} = 1;
print " found module $module in object $target ...\n" if $opt_v;
}
}
#write the command line: if no file-specific command, use generic command for this suffix
&print_formatted_list(@cmdline);
$file = $actual_source_of{$object};
if ( $compile_cmd{$name.$suffix} ) {
print MAKEFILE "\t$compile_cmd{$name.$suffix}";
} else {
print MAKEFILE "\t$compile_cmd{$suffix}";
}
foreach ( @includepaths ) { # include files may be anywhere in directory array
print MAKEFILE " -I$_" if $_;
}
if ( $opt_I ){
foreach ( split /\s+/, $opt_I ){
print MAKEFILE " -I$_";
}
}
print MAKEFILE "\t$source_of{$object}\n";
# subroutine to seek out includes recursively
sub get_include_list {
my( $incfile, $incname, $incpath, $incsuffix );
my @paths;
my $object = shift;
my $file = shift;
foreach ( split /\s+/, $includes_in{$file} ) {
print "object=$object, file=$file, include=$_.\n" if $opt_d;
( $incname, $incpath, $incsuffix ) = fileparse( $_, @inc_suffixes );
if( $incsuffix ) { # only check for files with proper suffix
undef $incpath if $incpath eq './';
if( $incpath =~ /^\// ) {
@paths = $incpath; # exact incpath specified, use it
} else {
@paths = @dirs;
}
foreach ( @paths ) {
local $/ = '/'; chomp; # remove trailing / if present
my $newincpath = "$_/$incpath" if $_;
undef $newincpath if $newincpath eq './';
$incfile = "$newincpath$incname$incsuffix";
if ( $opt_a and ( substr($newincpath,0,1) ne '/' ) ) {
$newincpath = '$(SRCROOT)' . $newincpath;
}
print "DEBUG: checking for $incfile in $_ ...\n" if $opt_d;
if ( -f $incfile and $obj_of_include{$incfile} ne $object ) {
print " found $incfile ...\n" if $opt_v;
push @cmdline, "$newincpath$incname$incsuffix";
$includes{$incfile} = 1;
chomp( $newincpath, $path );
$off_sources{$incfile} = 1 if $newincpath;
$newincpath = '.' if $newincpath eq '';
push @includepaths, $newincpath unless( grep $_ eq $newincpath, @includepaths );
&scanfile_for_keywords($object,$incfile);
$obj_of_include{$incfile} = $object;
&get_include_list($object,$incfile); # recursively look for includes
last;
}
}
}
}
}
}
#lines to facilitate creation of local copies of source from other directories
#commented out because it makes make default rules kick in
foreach ( keys %off_sources ) {
my $file = basename($_);
$file =~ s/\$\(SRCROOT\)//;
print MAKEFILE "./$file: $_\n\tcp $_ .\n";
}
#objects not used by other objects
#if every object is a module, then only the unused objects
#need to be passed to the linker (see commented OBJ = line below).
#if any f77 or C routines are present, we need complete list
my @unused_objects;
foreach $object ( @objects ) {
push @unused_objects, $object unless $used{$object};
}
&print_formatted_list( "SRC =", @sources, keys %includes );
&print_formatted_list( "OBJ =", @objects );
# &print_formatted_list( "OBJ =", @unused_objects );
my $noff = scalar keys %off_sources;
&print_formatted_list( "OFF =", keys %off_sources ) if $noff > 0;
#write targets
print MAKEFILE "clean: neat\n\t-rm -f .$opt_p.cppdefs \$(OBJ) $opt_p\n";
print MAKEFILE "neat:\n\t-rm -f \$(TMPFILES)\n";
print MAKEFILE "localize: \$(OFF)\n\tcp \$(OFF) .\n" if $noff > 0;
print MAKEFILE "TAGS: \$(SRC)\n\tetags \$(SRC)\n";
print MAKEFILE "tags: \$(SRC)\n\tctags \$(SRC)\n";
( $name, $path, $suffix ) = fileparse( $opt_p, @tgt_suffixes );
if( $suffix eq '.a' ) {
print MAKEFILE "$opt_p: \$(OBJ)\n\t\$(AR) \$(ARFLAGS) $opt_p \$(OBJ)\n";
} else {
# opt_l is a new flag added to take care of libraries
print MAKEFILE "$opt_p: \$(OBJ) $opt_l\n\t\$(LD) \$(OBJ) -o $opt_p $opt_l \$(LDFLAGS)\n";
}
close MAKEFILE;
print " $mkfile is ready.\n";
exec 'make', '-f', $mkfile if $opt_x;

182
bin/mkmf.debugtemplate.gfdl_ws_32.intel

@ -0,0 +1,182 @@
# template for the Intel fortran compiler
# typical use with mkmf
# mkmf -t template.ifc -c"-Duse_libMPI -Duse_netCDF" path_names /usr/local/include
############
# commands #
############
FC = ifort
CC = icc
LD = ifort
#########
# flags #
#########
DEBUG = on
REPRO =
VERBOSE =
OPENMP =
##############################################
# Need to use at least GNU Make version 3.81 #
##############################################
need := 3.81
ok := $(filter $(need),$(firstword $(sort $(MAKE_VERSION) $(need))))
ifneq ($(need),$(ok))
$(error Need at least make version $(need). Load module gmake/3.81)
endif
MAKEFLAGS += --jobs=2
NETCDF_ROOT = /home/nnz/local/build/netcdf-4.1.1_ifort11_HDF
MPICH_ROOT = /home/nnz/local/build/mpich2-1.3_ifort11
#MPICH_ROOT = /usr/local/mpich
HDF5_ROOT = /home/nnz/local/build/hdf5-1.8.5-patch1_zlib-1.2.5_ifort11/lib
ZLIB_ROOT = /home/nnz/local/build/zlib-1.2.5
INCLUDE = -I$(NETCDF_ROOT)/include -I$(MPICH_ROOT)/include
FPPFLAGS := -fpp -Wp,-w $(INCLUDE)
FFLAGS := -fno-alias -automatic -safe-cray-ptr -ftz -assume byterecl -i4 -r8 -nowarn
FFLAGS_OPT = -O3 -debug minimal -fp-model precise -override-limits
FFLAGS_DEBUG = -g -O0 -check -check noarg_temp_created -check nopointer -warn -warn noerrors -fpe0 -traceback -ftrapuv
FFLAGS_REPRO = -O2 -debug minimal -no-vec -fp-model precise -override-limits
FFLAGS_OPENMP = -openmp
FFLAGS_VERBOSE = -v -V -what
CFLAGS := -D__IFC $(INCLUDE)
CFLAGS_OPT = -O2 -debug minimal -no-vec
CFLAGS_OPENMP = -openmp
CFLAGS_DEBUG = -O0 -g -ftrapuv -traceback
LDFLAGS :=
LDFLAGS_VERBOSE := -Wl,-V,--verbose,-cref,-M
ifneq ($(REPRO),)
CFLAGS += $(CFLAGS_REPRO)
FFLAGS += $(FFLAGS_REPRO)
endif
ifneq ($(DEBUG),)
CFLAGS += $(CFLAGS_DEBUG)
FFLAGS += $(FFLAGS_DEBUG)
#else
#CFLAGS += $(CFLAGS_OPT)
#FFLAGS += $(FFLAGS_OPT)
endif
ifneq ($(OPENMP),)
CFLAGS += $(CFLAGS_OPENMP)
FFLAGS += $(FFLAGS_OPENMP)
endif
ifneq ($(VERBOSE),)
CFLAGS += $(CFLAGS_VERBOSE)
FFLAGS += $(FFLAGS_VERBOSE)
LDFLAGS += $(LDFLAGS_VERBOSE)
endif
ifeq ($(NETCDF),3)
# add the use_LARGEFILE cppdef
ifneq ($(findstring -Duse_netCDF,$(CPPDEFS)),)
CPPDEFS += -Duse_LARGEFILE
endif
endif
ifneq ($(findstring netcdf-4.0.1,$(LOADEDMODULES)),)
LIBS := -L$(NETCDF_ROOT)/lib -lnetcdf -L$(HDF5_ROOT)/lib -lhdf5_hl -lhdf5 -lcurl -L$(ZLIB_ROOT)/lib -lz
else
LIBS := -L$(NETCDF_ROOT)/lib -lnetcdf
endif
LIBS += -L$(MPICH_ROOT)/lib -lmpich -lpthread -lmpl
LDFLAGS += $(LIBS)
#---------------------------------------------------------------------------
# you should never need to change any lines below.
# see the MIPSPro F90 manual for more details on some of the file extensions
# discussed here.
# this makefile template recognizes fortran sourcefiles with extensions
# .f, .f90, .F, .F90. Given a sourcefile <file>.<ext>, where <ext> is one of
# the above, this provides a number of default actions:
# make <file>.opt create an optimization report
# make <file>.o create an object file
# make <file>.s create an assembly listing
# make <file>.x create an executable file, assuming standalone
# source
# make <file>