diff --git a/demo/app/tensor-statistics.f90 b/demo/app/tensor-statistics.f90 index 8c5cf2d06..9a99d1e7d 100644 --- a/demo/app/tensor-statistics.f90 +++ b/demo/app/tensor-statistics.f90 @@ -1,5 +1,27 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt + +module ubounds_m + !! This module serves only to support array bounds checking in the main program below + implicit none + + type ubounds_t + integer, allocatable :: ubounds_(:) + contains + procedure equals + generic :: operator(==) => equals + end type + +contains + + elemental function equals(lhs, rhs) result(lhs_equals_rhs) + class(ubounds_t), intent(in) :: lhs, rhs + logical lhs_equals_rhs + lhs_equals_rhs = all(lhs%ubounds_ == rhs%ubounds_) + end function + +end module + program tensor_statistics !! This program !! 1. Computes the ranges and histograms of input and output tensors saved by @@ -9,7 +31,7 @@ program tensor_statistics ! External dependencies: use julienne_m, only : command_line_t, file_t, string_t use assert_m, only : assert, intrinsic_array_t - use fiats_m, only : ubounds_t + use ubounds_m, only : ubounds_t use ieee_arithmetic, only : ieee_is_nan use iso_fortran_env, only : int64, real64 diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 1187b42e7..93ad7b79d 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -1,12 +1,11 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -program train_on_flat_distribution +program train_cloud_microphysics !! Train a neural network to represent the simplest cloud microphysics model from !! the Intermediate Complexity Atmospheric Research Model (ICAR) at !! https://github.com/BerkeleyLab/icar. !! Intrinic modules : - use ieee_arithmetic, only : ieee_is_nan use iso_fortran_env, only : int64, real64 !! External dependencies: @@ -19,21 +18,21 @@ program train_on_flat_distribution !! Internal dependencies: use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t - use ubounds_m, only : ubounds_t + use NetCDF_variable_m, only: NetCDF_variable_t, tensors + use occupancy_m, only : occupancy_t implicit none - character(len=*), parameter :: usage = & - new_line('a') // new_line('a') // & - 'Usage: ' // new_line('a') // new_line('a') // & - './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & - ' --base --epochs \' // new_line('a') // & + character(len=*), parameter :: usage = new_line('a') // new_line('a') // & + 'Usage: ' // new_line('a') // new_line('a') // & + './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & + ' --base --epochs \' // new_line('a') // & ' [--start ] [--end ] [--stride ] [--bins ] [--report ] [--tolerance ]'// & - new_line('a') // new_line('a') // & - 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('a') // & + new_line('a') // new_line('a') // & + 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('a') // & 'The presence of a file named "stop" halts execution gracefully.' type command_line_arguments_t - integer num_epochs, start_step, stride, num_bins, report_interval + integer num_epochs, start_step, stride, num_bins, report_step integer, allocatable :: end_step character(len=:), allocatable :: base_name real cost_tolerance @@ -45,28 +44,24 @@ program train_on_flat_distribution end type integer(int64) t_start, t_finish, clock_rate + character(len=*), parameter :: config= "training_configuration.json" call system_clock(t_start, clock_rate) - - associate( & - command_line_arguments => get_command_line_arguments(), & - training_configuration => training_configuration_t(file_t(string_t("training_configuration.json"))) & - ) + #if defined(MULTI_IMAGE_SUPPORT) - if (this_image()==1) then + if (this_image()==1) then #endif - call read_train_write(training_configuration, command_line_arguments, create_or_append_to("cost.plt")) + call read_train_write(training_configuration_t(file_t(config)), get_command_line_arguments(), create_or_append_to("cost.plt")) #if defined(MULTI_IMAGE_SUPPORT) - else - call read_train_write(training_configuration, command_line_arguments) - end if + else + call read_train_write(training_configuration_t(file_t(config)), get_command_line_arguments()) + end if #endif - end associate call system_clock(t_finish) print *,"System clock time: ", real(t_finish - t_start, real64)/real(clock_rate, real64) - print *,new_line('a') // "______training_cloud_microhpysics done _______" + print *,new_line('a') // "______train_cloud_microphysics done _______" contains @@ -77,16 +72,16 @@ function create_or_append_to(plot_file_name) result(plot_file) logical preexisting_plot_file inquire(file=plot_file_name, exist=preexisting_plot_file) - open(newunit=plot_unit, file=plot_file_name, status="unknown", position="append") if (.not. preexisting_plot_file) then - write(plot_unit,*) " Epoch Cost (avg)" + open(newunit=plot_unit, file=plot_file_name, status="new", action="write") + write(plot_unit,'(a)') " Epoch Cost (avg)" previous_epoch = 0 else associate(plot_file => file_t(string_t(plot_file_name))) associate(lines => plot_file%lines()) associate(num_lines => size(lines)) - if (num_lines == 0) then + if (num_lines == 0 .or. num_lines == 1) then previous_epoch = 0 else block @@ -99,7 +94,9 @@ function create_or_append_to(plot_file_name) result(plot_file) end associate end associate end if + plot_file = plot_file_t(plot_file_name, plot_unit, previous_epoch) + end function function get_command_line_arguments() result(command_line_arguments) @@ -109,7 +106,7 @@ function get_command_line_arguments() result(command_line_arguments) base_name, epochs_string, start_string, end_string, stride_string, bins_string, report_string, tolerance_string real cost_tolerance integer, allocatable :: end_step - integer num_epochs, num_bins, start_step, stride, report_interval + integer num_epochs, num_bins, start_step, stride, report_step base_name = command_line%flag_value("--base") ! gfortran 13 seg faults if this is an association epochs_string = command_line%flag_value("--epochs") @@ -126,54 +123,24 @@ function get_command_line_arguments() result(command_line_arguments) read(epochs_string,*) num_epochs - if (len(stride_string)==0) then - stride = 1 - else - read(stride_string,*) stride - end if - - if (len(start_string)==0) then - start_step = 1 - else - read(start_string,*) start_step - end if - - if (len(report_string)==0) then - report_interval = 1 - else - read(report_string,*) report_interval - end if - - if (len(bins_string)/=0) then - read(bins_string,*) num_bins - else - num_bins = 1 - end if + stride = default_integer_or_read(1, stride_string) + start_step = default_integer_or_read(1, start_string) + report_step = default_integer_or_read(1, report_string) + num_bins = default_integer_or_read(3, bins_string) + cost_tolerance = default_real_or_read(5E-8, tolerance_string) if (len(end_string)/=0) then allocate(end_step) read(end_string,*) end_step end if - if (len(start_string)==0) then - start_step = 1 - else - read(start_string,*) start_step - end if - - if (len(tolerance_string)==0) then - cost_tolerance = 5.0E-08 - else - read(tolerance_string,*) cost_tolerance - end if - if (allocated(end_step)) then command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_interval, end_step, base_name, cost_tolerance & + num_epochs, start_step, stride, num_bins, report_step, end_step, base_name, cost_tolerance & ) else command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_interval, null(), base_name, cost_tolerance & + num_epochs, start_step, stride, num_bins, report_step, null(), base_name, cost_tolerance & ) end if @@ -183,299 +150,316 @@ subroutine read_train_write(training_configuration, args, plot_file) type(training_configuration_t), intent(in) :: training_configuration type(command_line_arguments_t), intent(in) :: args type(plot_file_t), intent(in), optional :: plot_file + type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:), derivative(:) + type(NetCDF_variable_t) input_time, output_time ! local variables: - real, allocatable, dimension(:,:,:,:) :: & - pressure_in , potential_temperature_in , temperature_in , & - pressure_out, potential_temperature_out, temperature_out, & - qv_out, qc_out, qr_out, qs_out, & - qv_in , qc_in , qr_in , qs_in , & - dpt_dt, dqv_dt, dqc_dt, dqr_dt, dqs_dt - type(ubounds_t), allocatable :: ubounds(:) - double precision, allocatable, dimension(:) :: time_in, time_out - integer, allocatable :: lbounds(:) - integer t, b, t_end + type(trainable_network_t) trainable_network + type(mini_batch_t), allocatable :: mini_batches(:) + type(bin_t), allocatable :: bins(:) + type(input_output_pair_t), allocatable :: input_output_pairs(:) + type(tensor_t), allocatable, dimension(:) :: input_tensors, output_tensors + real, allocatable :: cost(:) + integer i, network_unit, io_status, epoch, end_step, t, b, t_end, v + integer(int64) start_training, finish_training logical stop_requested - associate( network_file => args%base_name // "_network.json") - associate(network_input => args%base_name // "_input.nc") - print *,"Reading network inputs from " // network_input - associate(network_input_file => netCDF_file_t(network_input)) - ! Skipping the following unnecessary inputs that are in the current file format as of 14 Aug 2023: - ! precipitation, snowfall - call network_input_file%input("pressure", pressure_in) - call network_input_file%input("potential_temperature", potential_temperature_in) - call network_input_file%input("temperature", temperature_in) - call network_input_file%input("qv", qv_in) - call network_input_file%input("qc", qc_in) - call network_input_file%input("qr", qr_in) - call network_input_file%input("qs", qs_in) - call network_input_file%input("time", time_in) - t_end = size(time_in) - lbounds = [lbound(pressure_in), lbound(temperature_in), lbound(qv_in), lbound(qc_in), lbound(qr_in), lbound(qs_in)] - ubounds = & - [ubounds_t(ubound(qv_in)), ubounds_t(ubound(qc_in)), ubounds_t(ubound(qr_in)), ubounds_t(ubound(qs_in)), & - ubounds_t(ubound(pressure_in)), ubounds_t(ubound(temperature_in)) & - ] - end associate - end associate + input_names: & + associate(input_names => training_configuration%input_names()) - associate(network_output => args%base_name // "_output.nc") - print *,"Reading network outputs from " // network_output - associate(network_output_file => netCDF_file_t(network_output)) - call network_output_file%input("potential_temperature", potential_temperature_out) - ! Skipping the following unnecessary outputs that are in the current file format as of 14 Aug 2023: - ! pressure, temperature, precipitation, snowfall - call network_output_file%input("qv", qv_out) - call network_output_file%input("qc", qc_out) - call network_output_file%input("qr", qr_out) - call network_output_file%input("qs", qs_out) - call network_output_file%input("time", time_out) - lbounds = [lbounds, lbound(qv_out), lbound(qc_out), lbound(qr_out), lbound(qs_out)] - ubounds = [ubounds, ubounds_t(ubound(qv_out)), ubounds_t(ubound(qc_out)), & - ubounds_t(ubound(qr_out)), ubounds_t(ubound(qs_out))] - call assert(all(lbounds == 1), "main: default input/output lower bounds", intrinsic_array_t(lbounds)) - call assert(all(ubounds == ubounds(1)), "main: matching input/output upper bounds") - block - double precision, parameter :: time_tolerance = 1.E-07 - associate(matching_time_stamps => all(abs(time_in(2:t_end) - time_out(1:t_end-1)) real(time_out - time_in)) - do concurrent(t = 1:t_end) - dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) - dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) - dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) - dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) - dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) + input_file_name: & + associate(input_file_name => args%base_name // "_input.nc") + + print *,"Reading physics-based model inputs from " // input_file_name + + input_file: & + associate(input_file => netCDF_file_t(input_file_name)) + + do v=1, size(input_variable) + print *,"- reading ", input_names(v)%string() + call input_variable(v)%input(input_names(v), input_file, rank=4) + end do + + do v = 2, size(input_variable) + call assert(input_variable(v)%conformable_with(input_variable(1)), "train_cloud_microphysics: input variable conformance") + end do + + print *,"- reading time" + call input_time%input("time", input_file, rank=1) + + end associate input_file + end associate input_file_name + end associate input_names + + output_names: & + associate(output_names => training_configuration%output_names()) + + allocate(output_variable(size(output_names))) + + output_file_name: & + associate(output_file_name => args%base_name // "_output.nc") + + print *,"Reading physics-based model outputs from " // output_file_name + + output_file: & + associate(output_file => netCDF_file_t(output_file_name)) + + do v=1, size(output_variable) + print *,"- reading ", output_names(v)%string() + call output_variable(v)%input(output_names(v), output_file, rank=4) + end do + + do v = 1, size(output_variable) + call assert(output_variable(v)%conformable_with(input_variable(1)), "train_cloud_microphysics: output variable conformance") + end do + + print *,"- reading time" + call output_time%input("time", output_file, rank=1) + + call assert(output_time%conformable_with(input_time), "train_cloud_microphysics: input/output time conformance") + + end associate output_file + end associate output_file_name + + print *,"Calculating desired neural-network model outputs" + + allocate(derivative, mold=output_variable) + + dt: & + associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) + do v = 1, size(derivative) + derivative_name: & + associate(derivative_name => "d" // output_names(v)%string() // "/dt") + print *,"- " // derivative_name + derivative(v) = NetCDF_variable_t( (input_variable(v) - output_variable(v)) / dt, derivative_name) + call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") + end associate derivative_name end do - end associate + end associate dt + end associate output_names - call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") - call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") - call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") - call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") - call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") + if (allocated(args%end_step)) then + end_step = args%end_step + else + end_step = input_variable(1)%end_step() + end if - train_network: & + print *,"Defining input tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + input_tensors = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + output_map_and_network_file: & + associate( & + output_map => tensor_map_t( & + layer = "outputs" & + ,minima = [( derivative(v)%minimum(), v=1, size(derivative) )] & + ,maxima = [( derivative(v)%maximum(), v=1, size(derivative) )] & + ), & + network_file => args%base_name // "_network.json" & + ) + check_for_network_file: & block - type(trainable_network_t) trainable_network - type(mini_batch_t), allocatable :: mini_batches(:) - type(bin_t), allocatable :: bins(:) - type(input_output_pair_t), allocatable :: input_output_pairs(:) - type(tensor_t), allocatable, dimension(:) :: inputs, outputs - real, allocatable :: cost(:) - integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step - integer(int64) start_training, finish_training - - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - if (allocated(args%end_step)) then - end_step = args%end_step + logical preexisting_network_file + + inquire(file=network_file, exist=preexisting_network_file) + + read_or_initialize_network: & + if (preexisting_network_file) then + print *,"Reading network from file " // network_file + trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + close(network_unit) else - end_step = t_end - end if - - print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride - - ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 - ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 - inputs = [( [( [( [( & - tensor_t( & - [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & - qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & - ] & - ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - time = args%start_step, end_step, args%stride)] - - outputs = [( [( [( [( & - tensor_t( & - [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & - dqs_dt(lon,lat,level,time) & - ] & - ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - time = args%start_step, end_step, args%stride)] - - print *,"Calculating output tensor component ranges." - output_extrema: & - associate( & - output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & - output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & - ) - output_map: & - associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) - read_or_initialize_engine: & - if (io_status==0) then - print *,"Reading network from file " // network_file - trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - close(network_unit) - else - close(network_unit) + close(network_unit) - initialize_network: & - block - character(len=len('YYYYMMDD')) date - - call date_and_time(date) - - print *,"Calculating input tensor component ranges." - associate( & - input_map => tensor_map_t( & - layer = "inputs", & - minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & - minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & - maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & - maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & - ) ) - associate(activation => training_configuration%differentiable_activation()) - associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) - trainable_network = trainable_network_t( & - training_configuration, & - perturbation_magnitude = 0.05, & - metadata = [ & - string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & - activation%function_name(), residual_network & - ], input_map = input_map, output_map = output_map & - ) - end associate - end associate - end associate ! input_map, date_string - end block initialize_network - end if read_or_initialize_engine - - print *, "Conditionally sampling for a flat distribution of output values" - block - integer i - logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - logical keepers(size(outputs)) - type(phase_space_bin_t), allocatable :: bin(:) - occupied = .false. - keepers = .false. - - bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] - - do i = 1, size(outputs) - if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle - occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. - keepers(i) = .true. - end do - input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) - print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & - " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." - end block - end associate output_map - end associate output_extrema - - print *,"Normalizing the remaining input and output tensors" - input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - associate( & - num_pairs => size(input_output_pairs), & - n_bins => training_configuration%mini_batches(), & - adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - learning_rate => training_configuration%learning_rate() & - ) - bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - print *,"Training network" - print *, " Epoch Cost (avg)" - - call system_clock(start_training) - - train_write_and_maybe_exit: & + initialize_network: & block - integer first_epoch - integer me + character(len=len('YYYYMMDD')) date + + call date_and_time(date) + + print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + + activation: & + associate(activation => training_configuration%activation()) + trainable_network = trainable_network_t( & + training_configuration & + ,perturbation_magnitude = 0.05 & + ,metadata = [ & + string_t("ICAR microphysics" ) & + ,string_t("max-entropy-filter") & + ,string_t(date ) & + ,activation%function_name( ) & + ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & + ] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & + ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ) & + ,output_map = output_map & + ) + end associate activation + end block initialize_network + + end if read_or_initialize_network + + end block check_for_network_file + + print *, "Conditionally sampling for a flat distribution of output values" + + flatten_histogram: & + block + integer i + logical occupied(args%num_bins, args%num_bins) + logical keepers(size(output_tensors)) + type(phase_space_bin_t), allocatable :: bin(:) + type(occupancy_t) occupancy + + ! Determine the phase-space bin that holds each output tensor + associate(output_minima => output_map%minima(), output_maxima => output_map%maxima()) + bin = [(phase_space_bin_t(output_tensors(i), output_minima, output_maxima, args%num_bins), i = 1, size(output_tensors))] + end associate + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) -#if defined(MULTI_IMAGE_SUPPORT) - me = this_image() -#else - me = 1 -#endif - if (me==1) first_epoch = plot_file%previous_epoch + 1 - -#if defined(MULTI_IMAGE_SUPPORT) - call co_broadcast(first_epoch, source_image=1) -#endif - associate(last_epoch => first_epoch + args%num_epochs - 1) - epochs: & - do epoch = first_epoch, last_epoch + keepers = .false. - if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent - mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] + do i = 1, size(output_tensors) + if (occupancy%occupied(bin(i)%loc)) cycle + call occupancy%occupy(bin(i)%loc) + keepers(i) = .true. + end do - call trainable_network%train(mini_batches, cost, adam, learning_rate) + input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) - associate(average_cost => sum(cost)/size(cost)) - associate(converged => average_cost <= args%cost_tolerance) + print '(*(a,i))' & + ," Keeping " , size(input_output_pairs, kind=int64) & + ," out of " , size(output_tensors, kind=int64) & + ," input/output pairs in ", occupancy%num_occupied() & + ," out of " , occupancy%num_bins() & + ," bins." - image_1_maybe_writes: & - if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_interval)==0])) then + end block flatten_histogram - print *, epoch, average_cost - write(plot_file%plot_unit,*) epoch, average_cost + print *,"Normalizing the remaining input and output tensors" + input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - associate(json_file => trainable_network%to_json()) - call json_file%write_lines(string_t(network_file)) - end associate + training_parameters: & + associate( & + num_pairs => size(input_output_pairs), & + n_bins => training_configuration%mini_batches(), & + adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + learning_rate => training_configuration%learning_rate() & + ) + bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - end if image_1_maybe_writes + print *,"Training network" + print *, " Epoch Cost (avg)" - signal_convergence: & - if (converged) then - block - integer unit - open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. - close(unit) - exit epochs - end block - end if signal_convergence - end associate - end associate + call system_clock(start_training) + + train_write_and_maybe_exit: & + block + integer first_epoch + integer me +#if defined(MULTI_IMAGE_SUPPORT) + me = this_image() +#else + me = 1 +#endif + if (me==1) first_epoch = plot_file%previous_epoch + 1 +#if defined(MULTI_IMAGE_SUPPORT) + call co_broadcast(first_epoch, source_image=1) +#endif + last_epoch: & + associate(last_epoch => first_epoch + args%num_epochs - 1) + epochs: & + do epoch = first_epoch, last_epoch - inquire(file="stop", exist=stop_requested) + if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent + mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] - graceful_exit: & - if (stop_requested) then - print *,'Shutting down because a file named "stop" was found.' - return - end if graceful_exit + call trainable_network%train(mini_batches, cost, adam, learning_rate) - end do epochs - end associate - end block train_write_and_maybe_exit + average_cost: & + associate(average_cost => sum(cost)/size(cost)) + converged: & + associate(converged => average_cost <= args%cost_tolerance) - end associate + image_1_maybe_writes: & + if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then - call system_clock(finish_training) - print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & - args%num_epochs,"epochs" + !print '(*(g0,4x))', epoch, average_cost + write(plot_file%plot_unit,'(*(g0,4x))') epoch, average_cost - end block train_network - end associate ! network_file + associate(json_file => trainable_network%to_json()) + call json_file%write_lines(string_t(network_file)) + end associate + end if image_1_maybe_writes + + signal_convergence: & + if (converged) then + block + integer unit + open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. + close(unit) + exit epochs + end block + end if signal_convergence + end associate converged + end associate average_cost + + inquire(file="stop", exist=stop_requested) + + graceful_exit: & + if (stop_requested) then + print *,'Shutting down because a file named "stop" was found.' + return + end if graceful_exit + + end do epochs + end associate last_epoch + end block train_write_and_maybe_exit + + end associate training_parameters + end associate output_map_and_network_file + + call system_clock(finish_training) + print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & + args%num_epochs,"epochs" close(plot_file%plot_unit) end subroutine read_train_write - pure function normalize(x, x_min, x_max) result(x_normalized) - real, intent(in) :: x(:,:,:,:), x_min, x_max - real, allocatable :: x_normalized(:,:,:,:) - call assert(x_min/=x_max, "train_cloud_microphysics(normaliz): x_min/=x_max") - x_normalized = (x - x_min)/(x_max - x_min) + pure function default_integer_or_read(default, string) result(set_value) + integer, intent(in) :: default + character(len=*), intent(in) :: string + integer set_value + + if (len(string)==0) then + set_value = default + else + read(string,*) set_value + end if + + end function + + pure function default_real_or_read(default, string) result(set_value) + real, intent(in) :: default + character(len=*), intent(in) :: string + real set_value + + if (len(string)==0) then + set_value = default + else + read(string,*) set_value + end if + end function -end program train_on_flat_distribution +end program train_cloud_microphysics diff --git a/demo/fpm.toml b/demo/fpm.toml index 9bed6f611..729eb03ac 100644 --- a/demo/fpm.toml +++ b/demo/fpm.toml @@ -5,6 +5,6 @@ maintainer = "(Please see fiats/fpm.toml.)" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.2.2"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.3"} fiats = {path = "../"} netcdf-interfaces = {git = "https://github.com/LKedward/netcdf-interfaces.git", rev = "d2bbb71ac52b4e346b62572b1ca1620134481096"} diff --git a/demo/src/NetCDF_file_m.f90 b/demo/src/NetCDF_file_m.f90 index 7bba60ab0..91d1dac92 100644 --- a/demo/src/NetCDF_file_m.f90 +++ b/demo/src/NetCDF_file_m.f90 @@ -1,8 +1,5 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -#ifndef __INTEL_FORTRAN -!! Due to a suspected bug in the Intel ifx compiler, the above C preprocessor macro -!! effectively eliminates this file's source code when building with an Intel compiler. module NetCDF_file_m implicit none @@ -52,5 +49,4 @@ module subroutine input_double_precision(self, varname, values) end interface -end module NetCDF_file_m -#endif // __INTEL_FORTRAN \ No newline at end of file +end module NetCDF_file_m \ No newline at end of file diff --git a/demo/src/NetCDF_file_s.f90 b/demo/src/NetCDF_file_s.f90 index 0200cfc89..e1b40908a 100644 --- a/demo/src/NetCDF_file_s.f90 +++ b/demo/src/NetCDF_file_s.f90 @@ -1,8 +1,5 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -#ifndef __INTEL_FORTRAN -!! Due to a suspected bug in the Intel ifx compiler, the above C preprocessor macro -!! effectively eliminates this file's source code when building with an Intel compiler. submodule(netCDF_file_m) netCDF_file_s use netcdf, only : & nf90_create, nf90_def_dim, nf90_def_var, nf90_enddef, nf90_put_var, nf90_inquire_dimension, & ! functions @@ -226,5 +223,4 @@ function get_shape(ncid, varname) result(array_shape) end procedure -end submodule netCDF_file_s -#endif // __INTEL_FORTRAN \ No newline at end of file +end submodule netCDF_file_s \ No newline at end of file diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 new file mode 100644 index 000000000..113145236 --- /dev/null +++ b/demo/src/NetCDF_variable_m.f90 @@ -0,0 +1,237 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module NetCDF_variable_m + use NetCDF_file_m, only : NetCDF_file_t + use kind_parameters_m, only : default_real, double_precision + use julienne_m, only : string_t + use fiats_m, only : tensor_t + implicit none + + private + public :: NetCDF_variable_t + public :: tensors + + type NetCDF_variable_t(k) + integer, kind :: k = default_real + private + real(k), allocatable :: values_1D_(:), values_2D_(:,:), values_3D_(:,:,:), values_4D_(:,:,:,:) + character(len=:), allocatable :: name_ + contains + generic :: input => default_real_input, double_precision_input, default_real_input_character_name, double_precision_input_character_name + procedure, private, non_overridable :: default_real_input, double_precision_input, default_real_input_character_name, double_precision_input_character_name + generic :: conformable_with => default_real_conformable_with, double_precision_conformable_with + procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with + generic :: rank => default_real_rank , double_precision_rank + procedure, private, non_overridable :: default_real_rank , double_precision_rank + generic :: end_step => default_real_end_step , double_precision_end_step + procedure, private, non_overridable :: default_real_end_step , double_precision_end_step + generic :: any_nan => default_real_any_nan , double_precision_any_nan + procedure, private, non_overridable :: default_real_any_nan , double_precision_any_nan + generic :: minimum => default_real_minimum , double_precision_minimum + procedure, private, non_overridable :: default_real_minimum , double_precision_minimum + generic :: maximum => default_real_maximum , double_precision_maximum + procedure, private, non_overridable :: default_real_maximum , double_precision_maximum + generic :: operator(-) => default_real_subtract , double_precision_subtract + procedure, private, non_overridable :: default_real_subtract , double_precision_subtract + generic :: operator(/) => default_real_divide , double_precision_divide + procedure, private, non_overridable :: default_real_divide , double_precision_divide + generic :: assignment(=) => default_real_assign , double_precision_assign + procedure, private, non_overridable :: default_real_assign , double_precision_assign + end type + + interface NetCDF_variable_t + + elemental module function default_real_copy(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t), intent(in) :: source + type(string_t), intent(in), optional :: rename + type(NetCDF_variable_t) NetCDF_variable + end function + + elemental module function default_real_copy_character_name(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t), intent(in) :: source + character(len=*), intent(in), optional :: rename + type(NetCDF_variable_t) NetCDF_variable + end function + + elemental module function double_precision_copy(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t(double_precision)), intent(in) :: source + type(string_t), intent(in), optional :: rename + type(NetCDF_variable_t(double_precision)) NetCDF_variable + end function + + elemental module function double_precision_copy_character_name(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t(double_precision)), intent(in) :: source + character(len=*), intent(in), optional :: rename + type(NetCDF_variable_t(double_precision)) NetCDF_variable + end function + + end interface + + interface + + impure elemental module subroutine default_real_input(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + type(string_t), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine double_precision_input(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + type(string_t), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine default_real_input_character_name(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + character(len=*), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine double_precision_input_character_name(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + character(len=*), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + elemental module function default_real_conformable_with(self, NetCDF_variable) result(conformable) + implicit none + class(NetCDF_variable_t), intent(in) :: self, NetCDF_variable + logical conformable + end function + + elemental module function double_precision_conformable_with(self, NetCDF_variable) result(conformable) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self, NetCDF_variable + logical conformable + end function + + elemental module function default_real_rank(self) result(my_rank) + implicit none + class(NetCDF_variable_t), intent(in) :: self + integer my_rank + end function + + elemental module function double_precision_rank(self) result(my_rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + integer my_rank + end function + + elemental module function default_real_end_step(self) result(end_step) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_step + end function + + elemental module function double_precision_end_step(self) result(end_step) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + integer end_step + end function + + elemental module function default_real_subtract(lhs, rhs) result(difference) + implicit none + class(NetCDF_variable_t), intent(in) :: lhs, rhs + type(NetCDF_variable_t) difference + end function + + elemental module function double_precision_subtract(lhs, rhs) result(difference) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: lhs, rhs + type(NetCDF_variable_t(double_precision)) difference + end function + + elemental module function default_real_divide(lhs, rhs) result(ratio) + implicit none + class(NetCDF_variable_t), intent(in) :: lhs, rhs + type(NetCDF_variable_t) ratio + end function + + elemental module function double_precision_divide(lhs, rhs) result(ratio) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: lhs, rhs + type(NetCDF_variable_t(double_precision)) ratio + end function + + elemental module subroutine default_real_assign(lhs, rhs) + implicit none + class(NetCDF_variable_t), intent(inout) :: lhs + type(NetCDF_variable_t), intent(in) :: rhs + end subroutine + + elemental module subroutine double_precision_assign(lhs, rhs) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: lhs + type(NetCDF_variable_t(double_precision)), intent(in) :: rhs + end subroutine + + elemental module function default_real_any_nan(self) result(any_nan) + implicit none + class(NetCDF_variable_t), intent(in) :: self + logical any_nan + end function + + elemental module function double_precision_any_nan(self) result(any_nan) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + logical any_nan + end function + + elemental module function default_real_minimum(self) result(minimum) + implicit none + class(NetCDF_variable_t), intent(in) :: self + real minimum + end function + + elemental module function double_precision_minimum(self) result(minimum) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + real minimum + end function + + elemental module function default_real_maximum(self) result(maximum) + implicit none + class(NetCDF_variable_t), intent(in) :: self + real maximum + end function + + elemental module function double_precision_maximum(self) result(maximum) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + real maximum + end function + + module function tensors(NetCDF_variables, step_start, step_end, step_stride) + implicit none + type(NetCDF_variable_t), intent(in) :: NetCDF_variables(:) + type(tensor_t), allocatable :: tensors(:) + integer, optional :: step_start, step_end, step_stride + end function + + elemental module function default_real_end_time(self) result(end_time) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_time + end function + + elemental module function double_precision_end_time(self) result(end_time) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_time + end function + + end interface + +end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 new file mode 100644 index 000000000..6c18e0f98 --- /dev/null +++ b/demo/src/NetCDF_variable_s.f90 @@ -0,0 +1,568 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +submodule(NetCDF_variable_m) NetCDF_variable_s + use ieee_arithmetic, only : ieee_is_nan + use kind_parameters_m, only : default_real + use assert_m, only : assert, intrinsic_array_t + implicit none + + interface components_allocated + module procedure default_real_components_allocated + module procedure double_precision_components_allocated + end interface + + interface lower_bounds + module procedure default_real_lower_bounds + module procedure double_precision_lower_bounds + end interface + + interface upper_bounds + module procedure default_real_upper_bounds + module procedure double_precision_upper_bounds + end interface + + interface default_if_not_present + module procedure default_integer_if_not_present + module procedure default_real_if_not_present + end interface + +contains + + module procedure default_real_copy + + if (present(rename)) then + NetCDF_variable%name_ = rename + else + NetCDF_variable%name_ = source%name_ + end if + + select case(source%rank()) + case (1) + NetCDF_variable%values_1D_ = source%values_1D_ + case (2) + NetCDF_variable%values_2D_ = source%values_2D_ + case (3) + NetCDF_variable%values_3D_ = source%values_3D_ + case (4) + NetCDF_variable%values_4D_ = source%values_4D_ + case default + error stop 'NetCDF_variable_s(default_real_copy): unsupported rank' + end select + + end procedure + + module procedure default_real_copy_character_name + NetCDF_variable = default_real_copy(source, string_t(rename)) + end procedure + + module procedure double_precision_copy + + if (present(rename)) then + NetCDF_variable%name_ = rename + else + NetCDF_variable%name_ = source%name_ + end if + + select case(source%rank()) + case (1) + NetCDF_variable%values_1D_ = source%values_1D_ + case (2) + NetCDF_variable%values_2D_ = source%values_2D_ + case (3) + NetCDF_variable%values_3D_ = source%values_3D_ + case (4) + NetCDF_variable%values_4D_ = source%values_4D_ + case default + error stop 'NetCDF_variable_s(double_precision_copy): unsupported rank' + end select + + end procedure + + module procedure double_precision_copy_character_name + NetCDF_variable = double_precision_copy(source, string_t(rename)) + end procedure + + module procedure default_real_input + self%name_ = variable_name + select case (rank) + case (1) + call file%input(variable_name%string(), self%values_1D_) + case (2) + call file%input(variable_name%string(), self%values_2D_) + case (3) + call file%input(variable_name%string(), self%values_3D_) + case (4) + call file%input(variable_name%string(), self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_input): unsupported rank' + end select + end procedure + + module procedure default_real_input_character_name + call self%default_real_input(string_t(variable_name), file, rank) + end procedure + + module procedure double_precision_input + self%name_ = variable_name + select case (rank) + case (1) + call file%input(variable_name%string(), self%values_1D_) + case (2) + call file%input(variable_name%string(), self%values_2D_) + case (3) + call file%input(variable_name%string(), self%values_3D_) + case (4) + call file%input(variable_name%string(), self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_input): unsupported rank' + end select + end procedure + + module procedure double_precision_input_character_name + call self%double_precision_input(string_t(variable_name), file, rank) + end procedure + + pure function default_real_components_allocated(NetCDF_variable) result(allocation_vector) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + logical, allocatable :: allocation_vector(:) + allocation_vector = [ allocated(NetCDF_variable%values_1D_) & + ,allocated(NetCDF_variable%values_2D_) & + ,allocated(NetCDF_variable%values_3D_) & + ,allocated(NetCDF_variable%values_4D_) ] + end function + + pure function double_precision_components_allocated(NetCDF_variable) result(allocation_vector) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + logical, allocatable :: allocation_vector(:) + allocation_vector = [ allocated(NetCDF_variable%values_1D_) & + ,allocated(NetCDF_variable%values_2D_) & + ,allocated(NetCDF_variable%values_3D_) & + ,allocated(NetCDF_variable%values_4D_) ] + end function + + module procedure default_real_rank + associate(allocation_vector => components_allocated(self)) + call assert(count(allocation_vector) == 1, "NetCDF_variable_s(default_real_rank): allocation count") + my_rank = findloc(allocation_vector, .true., dim=1) + end associate + end procedure + + module procedure double_precision_rank + associate(allocation_vector => components_allocated(self)) + call assert(count(allocation_vector) == 1, "NetCDF_variable_s(double_precision_rank): allocation count") + my_rank = findloc(allocation_vector, .true., dim=1) + end associate + end procedure + + module procedure default_real_end_step + select case(self%rank()) + case(1) + end_step = ubound(self%values_1D_,1) + case(2) + end_step = ubound(self%values_2D_,2) + case(3) + end_step = ubound(self%values_3D_,3) + case(4) + end_step = ubound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(default_real_end_step): unsupported rank" + end select + end procedure + + module procedure double_precision_end_step + select case(self%rank()) + case(1) + end_step = ubound(self%values_1D_,1) + case(2) + end_step = ubound(self%values_2D_,2) + case(3) + end_step = ubound(self%values_3D_,3) + case(4) + end_step = ubound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(double_precision_end_step): unsupported rank" + end select + end procedure + + pure function default_real_lower_bounds(NetCDF_variable) result(lbounds) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + integer, allocatable :: lbounds(:) + select case(NetCDF_variable%rank()) + case(1) + lbounds = lbound(NetCDF_variable%values_1D_) + case(2) + lbounds = lbound(NetCDF_variable%values_2D_) + case(3) + lbounds = lbound(NetCDF_variable%values_3D_) + case(4) + lbounds = lbound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(default_real_lower_bounds): unsupported rank" + end select + end function + + pure function double_precision_lower_bounds(NetCDF_variable) result(lbounds) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + integer, allocatable :: lbounds(:) + select case(NetCDF_variable%rank()) + case(1) + lbounds = lbound(NetCDF_variable%values_1D_) + case(2) + lbounds = lbound(NetCDF_variable%values_2D_) + case(3) + lbounds = lbound(NetCDF_variable%values_3D_) + case(4) + lbounds = lbound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(double_precision_lower_bounds): unsupported rank" + end select + end function + + pure function default_real_upper_bounds(NetCDF_variable) result(ubounds) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + integer, allocatable :: ubounds(:) + select case(NetCDF_variable%rank()) + case(1) + ubounds = ubound(NetCDF_variable%values_1D_) + case(2) + ubounds = ubound(NetCDF_variable%values_2D_) + case(3) + ubounds = ubound(NetCDF_variable%values_3D_) + case(4) + ubounds = ubound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(default_real_upper_bounds): unsupported rank" + end select + end function + + pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + integer, allocatable :: ubounds(:) + select case(NetCDF_variable%rank()) + case(1) + ubounds = ubound(NetCDF_variable%values_1D_) + case(2) + ubounds = ubound(NetCDF_variable%values_2D_) + case(3) + ubounds = ubound(NetCDF_variable%values_3D_) + case(4) + ubounds = ubound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(double_precision_upper_bounds): unsupported rank" + end select + end function + + module procedure default_real_conformable_with + conformable = all([ self%rank() == NetCDF_variable%rank() & + ,lower_bounds(self) == lower_bounds(NetCDF_variable) & + ,upper_bounds(self) == upper_bounds(NetCDF_variable) ]) + end procedure + + module procedure double_precision_conformable_with + conformable = all([ self%rank() == NetCDF_variable%rank() & + ,lower_bounds(self) == lower_bounds(NetCDF_variable) & + ,upper_bounds(self) == upper_bounds(NetCDF_variable) ]) + end procedure + + module procedure default_real_subtract + + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(default_real_subtract): lhs%conformable_with(rhs)") + call assert(lhs%name_==rhs%name_, "NetCDF_variable_s(default_real_subtract): lhs%name_==rhs%name_", lhs%name_//"/="//rhs%name_) + + difference%name_ = lhs%name_ + + select case(lhs%rank()) + case(1) + difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ + case(2) + difference%values_2D_ = lhs%values_2D_ - rhs%values_2D_ + case(3) + difference%values_3D_ = lhs%values_3D_ - rhs%values_3D_ + case(4) + difference%values_4D_ = lhs%values_4D_ - rhs%values_4D_ + case default + error stop "NetCDF_variable_s(default_real_subtract): unsupported rank)" + end select + end procedure + + module procedure double_precision_subtract + + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(double_precision_subtract): lhs%conformable_with(rhs)") + call assert(lhs%name_ == rhs%name_, "NetCDF_variable_s(double_precision_subtract): lhs%name_==rhs%name_",lhs%name_//"/="//rhs%name_) + + difference%name_ = lhs%name_ + + select case(lhs%rank()) + case(1) + difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ + case(2) + difference%values_2D_ = lhs%values_2D_ - rhs%values_2D_ + case(3) + difference%values_3D_ = lhs%values_3D_ - rhs%values_3D_ + case(4) + difference%values_4D_ = lhs%values_4D_ - rhs%values_4D_ + case default + error stop "NetCDF_variable_s(double_precision_subtract): unsupported rank)" + end select + end procedure + + module procedure default_real_divide + + integer t + + call assert(rhs%rank()==1, "NetCDF_variable_s(default_real_divide): rhs%rank()==1") + + associate(t_end => size(rhs%values_1D_)) + + select case(lhs%rank()) + case(4) + + call assert(size(rhs%values_1D_) == size(lhs%values_4D_,4), "NetCDF_variable_s(default_real_divide): conformable numerator/denominator") + allocate(ratio%values_4D_, mold = lhs%values_4D_) + + do concurrent(t = 1:t_end) + ratio%values_4D_(:,:,:,t) = lhs%values_4D_(:,:,:,t) / rhs%values_1D_(t) + end do + + case default + error stop "NetCDF_variable_s(default_real_divide): unsupported lhs rank)" + end select + + end associate + + end procedure + + module procedure double_precision_divide + + integer t + + call assert(rhs%rank()==1, "NetCDF_variable_s(double_precision_divide): rhs%rank()==1") + + associate(t_end => size(rhs%values_1D_)) + + select case(lhs%rank()) + case(4) + + call assert(size(rhs%values_1D_) == size(lhs%values_4D_,4), "NetCDF_variable_s(double_precision_divide): conformable numerator/denominator") + allocate(ratio%values_4D_, mold = lhs%values_4D_) + + do concurrent(t = 1:t_end) + ratio%values_4D_(:,:,:,t) = lhs%values_4D_(:,:,:,t) / rhs%values_1D_(t) + end do + + case default + error stop "NetCDF_variable_s(double_precision_divide): unsupported lhs rank)" + end select + + end associate + + end procedure + + module procedure default_real_assign + select case(rhs%rank()) + case(1) + lhs%values_1D_ = rhs%values_1D_ + case(2) + lhs%values_2D_ = rhs%values_2D_ + case(3) + lhs%values_3D_ = rhs%values_3D_ + case(4) + lhs%values_4D_ = rhs%values_4D_ + case default + error stop "NetCDF_variable_s(default_real_assign): unsupported rank)" + end select + call assert(lhs%rank()==rhs%rank(), "NetCDF_variable_s(default_real_assign): ranks match)") + end procedure + + module procedure double_precision_assign + select case(rhs%rank()) + case(1) + lhs%values_1D_ = rhs%values_1D_ + case(2) + lhs%values_2D_ = rhs%values_2D_ + case(3) + lhs%values_3D_ = rhs%values_3D_ + case(4) + lhs%values_4D_ = rhs%values_4D_ + case default + error stop "NetCDF_variable_s(double_precision_assign): unsupported rank)" + end select + call assert(lhs%rank()==rhs%rank(), "NetCDF_variable_s(double_precision_assign): ranks match)") + end procedure + + module procedure default_real_any_nan + + select case(self%rank()) + case(1) + any_nan = any(ieee_is_nan(self%values_1D_)) + case(2) + any_nan = any(ieee_is_nan(self%values_2D_)) + case(3) + any_nan = any(ieee_is_nan(self%values_3D_)) + case(4) + any_nan = any(ieee_is_nan(self%values_4D_)) + case default + error stop "NetCDF_variable_s(default_real_any_nan): unsupported rank)" + end select + end procedure + + module procedure double_precision_any_nan + + select case(self%rank()) + case(1) + any_nan = any(ieee_is_nan(self%values_1D_)) + case(2) + any_nan = any(ieee_is_nan(self%values_2D_)) + case(3) + any_nan = any(ieee_is_nan(self%values_3D_)) + case(4) + any_nan = any(ieee_is_nan(self%values_4D_)) + case default + error stop "NetCDF_variable_s(double_precision_any_nan): unsupported rank)" + end select + end procedure + + module procedure tensors + + integer t_start, t_end, t_stride + + select case(NetCDF_variables(1)%rank()) + case(4) + + t_start = default_if_not_present(1, step_start ) + t_stride = default_if_not_present(1, step_stride) + t_end = default_if_not_present(size(NetCDF_variables(1)%values_4D_,4), step_end) + + associate( longitudes => size(NetCDF_variables(1)%values_4D_,1) & + ,latitudes => size(NetCDF_variables(1)%values_4D_,2) & + ,levels => size(NetCDF_variables(1)%values_4D_,3) & + ) + block + integer v, lon, lat, lev, time + + tensors = [( [( [( [( tensor_t( [( NetCDF_variables(v)%values_4D_(lon,lat,lev,time), v=1,size(NetCDF_variables) )] ), & + lon = 1, longitudes)], lat = 1, latitudes)], lev = 1, levels)], time = t_start, t_end, t_stride)] + end block + end associate + + case default + error stop "NetCDF_variable_s(tensors): unsupported rank)" + end select + + end procedure + + module procedure default_real_end_time + select case(self%rank()) + case (1) + end_time = size(self%values_1D_,1) + case (2) + end_time = size(self%values_2D_,2) + case (3) + end_time = size(self%values_3D_,3) + case (4) + end_time = size(self%values_4D_,4) + case default + error stop 'NetCDF_variable_s(default_real_end_time): unsupported rank' + end select + end procedure + + module procedure double_precision_end_time + select case(self%rank()) + case (1) + end_time = size(self%values_1D_,1) + case (2) + end_time = size(self%values_2D_,2) + case (3) + end_time = size(self%values_3D_,3) + case (4) + end_time = size(self%values_4D_,4) + case default + error stop 'NetCDF_variable_s(double_precision_end_time): unsupported rank' + end select + end procedure + + module procedure default_real_minimum + select case(self%rank()) + case (1) + minimum = minval(self%values_1D_) + case (2) + minimum = minval(self%values_2D_) + case (3) + minimum = minval(self%values_3D_) + case (4) + minimum = minval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_minimum): unsupported rank' + end select + end procedure + + module procedure double_precision_minimum + select case(self%rank()) + case (1) + minimum = minval(self%values_1D_) + case (2) + minimum = minval(self%values_2D_) + case (3) + minimum = minval(self%values_3D_) + case (4) + minimum = minval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_minimum): unsupported rank' + end select + end procedure + + module procedure default_real_maximum + select case(self%rank()) + case (1) + maximum = maxval(self%values_1D_) + case (2) + maximum = maxval(self%values_2D_) + case (3) + maximum = maxval(self%values_3D_) + case (4) + maximum = maxval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_maximum): unsupported rank' + end select + end procedure + + module procedure double_precision_maximum + select case(self%rank()) + case (1) + maximum = maxval(self%values_1D_) + case (2) + maximum = maxval(self%values_2D_) + case (3) + maximum = maxval(self%values_3D_) + case (4) + maximum = maxval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_maximum): unsupported rank' + end select + end procedure + + pure function default_integer_if_not_present(default_value, optional_argument) result(set_value) + integer, intent(in) :: default_value + integer, intent(in), optional :: optional_argument + integer set_value + + if (present(optional_argument)) then + set_value = optional_argument + else + set_value = default_value + end if + end function + + pure function default_real_if_not_present(default_value, optional_argument) result(set_value) + real, intent(in) :: default_value + real, intent(in), optional :: optional_argument + real set_value + + if (present(optional_argument)) then + set_value = optional_argument + else + set_value = default_value + end if + end function + +end submodule NetCDF_variable_s \ No newline at end of file diff --git a/demo/src/occupancy_m.f90 b/demo/src/occupancy_m.f90 new file mode 100644 index 000000000..15b2ef247 --- /dev/null +++ b/demo/src/occupancy_m.f90 @@ -0,0 +1,75 @@ +module occupancy_m + use iso_fortran_env, only : int64 + implicit none + + private + public :: occupancy_t + + type occupancy_t + private + logical, allocatable :: occupied_1D_(:) + logical, allocatable :: occupied_2D_(:,:) + logical, allocatable :: occupied_3D_(:,:,:) + logical, allocatable :: occupied_4D_(:,:,:,:) + logical, allocatable :: occupied_5D_(:,:,:,:,:) + logical, allocatable :: occupied_6D_(:,:,:,:,:,:) + logical, allocatable :: occupied_7D_(:,:,:,:,:,:,:) + logical, allocatable :: occupied_8D_(:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_9D_(:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_10D_(:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_11D_(:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_12D_(:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_13D_(:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_14D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_15D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:,:) + contains + procedure, non_overridable :: vacate + procedure, non_overridable :: occupy + procedure, non_overridable :: occupied + procedure, non_overridable :: num_occupied + procedure, non_overridable :: num_bins + procedure, non_overridable :: allocated_dim + end type + + interface + + pure module subroutine vacate(self, dims) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: dims(:) + end subroutine + + pure module subroutine occupy(self, loc) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: loc(:) + end subroutine + + pure module function occupied(self, loc) result(bin_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer, intent(in) :: loc(:) + logical bin_occupied + end function + + pure module function num_occupied(self) result(bins_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_occupied + end function + + pure module function num_bins(self) result(bins_total) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_total + end function + + pure module function allocated_dim(self) result(my_dim) + implicit none + class(occupancy_t), intent(in) :: self + integer my_dim + end function + + end interface + +end module occupancy_m diff --git a/demo/src/occupancy_s.f90 b/demo/src/occupancy_s.f90 new file mode 100644 index 000000000..3b4e4a44e --- /dev/null +++ b/demo/src/occupancy_s.f90 @@ -0,0 +1,261 @@ +submodule(occupancy_m) occupancy_s + use assert_m, only : assert, intrinsic_array_t + implicit none + +contains + + pure function allocations(occupancy) result(components_allocated) + + type(occupancy_t), intent(in) :: occupancy + logical, allocatable :: components_allocated(:) + + components_allocated = [ & + allocated(occupancy%occupied_1D_ ) & + ,allocated(occupancy%occupied_2D_ ) & + ,allocated(occupancy%occupied_3D_ ) & + ,allocated(occupancy%occupied_4D_ ) & + ,allocated(occupancy%occupied_5D_ ) & + ,allocated(occupancy%occupied_6D_ ) & + ,allocated(occupancy%occupied_7D_ ) & + ,allocated(occupancy%occupied_8D_ ) & + ,allocated(occupancy%occupied_9D_ ) & + ,allocated(occupancy%occupied_10D_) & + ,allocated(occupancy%occupied_11D_) & + ,allocated(occupancy%occupied_12D_) & + ,allocated(occupancy%occupied_13D_) & + ,allocated(occupancy%occupied_14D_) & + ,allocated(occupancy%occupied_15D_) & + ] + + end function allocations + + module procedure vacate + + select case(size(dims)) + case(1) + if (allocated(self%occupied_1D_)) deallocate(self%occupied_1D_) + allocate(self%occupied_1D_(dims(1)), source = .false.) + case(2) + if (allocated(self%occupied_2D_)) deallocate(self%occupied_2D_) + allocate(self%occupied_2D_(dims(1),dims(2)), source = .false.) + case(3) + if (allocated(self%occupied_3D_)) deallocate(self%occupied_3D_) + allocate(self%occupied_3D_(dims(1),dims(2),dims(3)), source = .false.) + case(4) + if (allocated(self%occupied_4D_)) deallocate(self%occupied_4D_) + allocate(self%occupied_4D_(dims(1),dims(2),dims(3),dims(4)), source = .false.) + case(5) + if (allocated(self%occupied_5D_)) deallocate(self%occupied_5D_) + allocate(self%occupied_5D_(dims(1),dims(2),dims(3),dims(4),dims(5)), source = .false.) + case(6) + if (allocated(self%occupied_6D_)) deallocate(self%occupied_6D_) + allocate(self%occupied_6D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), source = .false.) + case(7) + if (allocated(self%occupied_7D_)) deallocate(self%occupied_7D_) + allocate(self%occupied_7D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), source = .false.) + case(8) + if (allocated(self%occupied_8D_)) deallocate(self%occupied_8D_) + allocate(self%occupied_8D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8)), source = .false.) + case(9) + if (allocated(self%occupied_9D_)) deallocate(self%occupied_9D_) + allocate(self%occupied_9D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9)), source = .false.) + case(10) + if (allocated(self%occupied_10D_)) deallocate(self%occupied_10D_) + allocate(self%occupied_10D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10)), source = .false.) + case(11) + if (allocated(self%occupied_11D_)) deallocate(self%occupied_11D_) + allocate(self%occupied_11D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11)), source = .false.) + case(12) + if (allocated(self%occupied_12D_)) deallocate(self%occupied_12D_) + allocate(self%occupied_12D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12)), source = .false.) + case(13) + if (allocated(self%occupied_13D_)) deallocate(self%occupied_13D_) + allocate(self%occupied_13D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13)), source = .false.) + case(14) + if (allocated(self%occupied_14D_)) deallocate(self%occupied_14D_) + allocate(self%occupied_14D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14)), source = .false.) + case(15) + if (allocated(self%occupied_15D_)) deallocate(self%occupied_15D_) + allocate(self%occupied_15D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14),dims(15)), source= .false.) + case default + error stop "occupancy_s(vacate): unsupported rank" + end select + + call assert(self%allocated_dim()==size(dims), "occupancy_s(vacate): count(self%allocations()) == 1") + + end procedure vacate + + module procedure occupy + + associate_o: & + associate(o => (loc)) + select case(size(loc)) + case(1) + self%occupied_1D_(o(1)) = .true. + case(2) + self%occupied_2D_(o(1),o(2)) = .true. + case(3) + self%occupied_3D_(o(1),o(2),o(3)) = .true. + case(4) + self%occupied_4D_(o(1),o(2),o(3),o(4)) = .true. + case(5) + self%occupied_5D_(o(1),o(2),o(3),o(4),o(5)) = .true. + case(6) + self%occupied_6D_(o(1),o(2),o(3),o(4),o(5),o(6)) = .true. + case(7) + self%occupied_7D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7)) = .true. + case(8) + self%occupied_8D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8)) = .true. + case(9) + self%occupied_9D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9)) = .true. + case(10) + self%occupied_10D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10)) = .true. + case(11) + self%occupied_11D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11)) = .true. + case(12) + self%occupied_12D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12)) = .true. + case(13) + self%occupied_13D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13)) = .true. + case(14) + self%occupied_14D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14)) = .true. + case(15) + self%occupied_15D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14),o(15)) = .true. + case default + error stop "occupancy_s(occupy): unsupported rank" + end select + end associate associate_o + + end procedure occupy + + module procedure occupied + + nickname_loc: & + associate(b => (loc)) + select case(size(loc)) + case(1) + bin_occupied = self%occupied_1D_(b(1)) + case(2) + bin_occupied = self%occupied_2D_(b(1),b(2)) + case(3) + bin_occupied = self%occupied_3D_(b(1),b(2),b(3)) + case(4) + bin_occupied = self%occupied_4D_(b(1),b(2),b(3),b(4)) + case(5) + bin_occupied = self%occupied_5D_(b(1),b(2),b(3),b(4),b(5)) + case(6) + bin_occupied = self%occupied_6D_(b(1),b(2),b(3),b(4),b(5),b(6)) + case(7) + bin_occupied = self%occupied_7D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7)) + case(8) + bin_occupied = self%occupied_8D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8)) + case(9) + bin_occupied = self%occupied_9D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9)) + case(10) + bin_occupied = self%occupied_10D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10)) + case(11) + bin_occupied = self%occupied_11D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11)) + case(12) + bin_occupied = self%occupied_12D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12)) + case(13) + bin_occupied = self%occupied_13D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13)) + case(14) + bin_occupied = self%occupied_14D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14)) + case(15) + bin_occupied = self%occupied_15D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14),b(15)) + case default + error stop "occupancy_s(occupied): unsupported rank" + end select + end associate nickname_loc + + end procedure occupied + + module procedure num_occupied + + call assert(count(allocations(self))==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + + select case(self%allocated_dim()) + case(1) + bins_occupied = count(self%occupied_1D_, kind=int64) + case(2) + bins_occupied = count(self%occupied_2D_, kind=int64) + case(3) + bins_occupied = count(self%occupied_3D_, kind=int64) + case(4) + bins_occupied = count(self%occupied_4D_, kind=int64) + case(5) + bins_occupied = count(self%occupied_5D_, kind=int64) + case(6) + bins_occupied = count(self%occupied_6D_, kind=int64) + case(7) + bins_occupied = count(self%occupied_7D_, kind=int64) + case(8) + bins_occupied = count(self%occupied_8D_, kind=int64) + case(9) + bins_occupied = count(self%occupied_9D_, kind=int64) + case(10) + bins_occupied = count(self%occupied_10D_, kind=int64) + case(11) + bins_occupied = count(self%occupied_11D_, kind=int64) + case(12) + bins_occupied = count(self%occupied_12D_, kind=int64) + case(13) + bins_occupied = count(self%occupied_13D_, kind=int64) + case(14) + bins_occupied = count(self%occupied_14D_, kind=int64) + case(15) + bins_occupied = count(self%occupied_15D_, kind=int64) + case default + error stop "occupancy_s(num_occupied): unsupported rank" + end select + + end procedure num_occupied + + module procedure num_bins + + select case(self%allocated_dim()) + case(1) + bins_total = size(self%occupied_1D_) + case(2) + bins_total = size(self%occupied_2D_) + case(3) + bins_total = size(self%occupied_3D_) + case(4) + bins_total = size(self%occupied_4D_) + case(5) + bins_total = size(self%occupied_5D_) + case(6) + bins_total = size(self%occupied_6D_) + case(7) + bins_total = size(self%occupied_7D_) + case(8) + bins_total = size(self%occupied_8D_) + case(9) + bins_total = size(self%occupied_9D_) + case(10) + bins_total = size(self%occupied_10D_) + case(11) + bins_total = size(self%occupied_11D_) + case(12) + bins_total = size(self%occupied_12D_) + case(13) + bins_total = size(self%occupied_13D_) + case(14) + bins_total = size(self%occupied_14D_) + case(15) + bins_total = size(self%occupied_15D_) + case default + error stop "occupancy_s(num_bins): unsupported rank" + end select + + end procedure num_bins + + module procedure allocated_dim + + associate(my_allocations => allocations(self)) + call assert(count(my_allocations)==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + my_dim = findloc(my_allocations, .true., dim=1) + end associate + + end procedure + +end submodule occupancy_s diff --git a/demo/src/run-fpm.sh-header b/demo/src/run-fpm.sh-header index e09df076b..b0b3e167a 100644 --- a/demo/src/run-fpm.sh-header +++ b/demo/src/run-fpm.sh-header @@ -1,5 +1,5 @@ #!/bin/sh -#-- DO NOT EDIT -- created by inference-engine/setup.sh +#-- DO NOT EDIT -- created by fiats/demo/setup.sh export PKG_CONFIG_PATH fpm_arguments="" diff --git a/demo/training_configuration.json b/demo/training_configuration.json index 94cd9c49c..a01576ee5 100644 --- a/demo/training_configuration.json +++ b/demo/training_configuration.json @@ -9,5 +9,10 @@ "skip connections" : false, "nodes per layer" : [7,32,32,32,5], "activation function" : "sigmoid" + }, + , + "tensor names": { + "inputs" : ["potential_temperature","qv", "qc", "qr", "qs", "pressure", "temperature"], + "outputs" : ["potential_temperature","qv", "qc", "qr", "qs"] } } diff --git a/example/print-training-configuration.F90 b/example/print-training-configuration.F90 index 36ed0983f..300d2030c 100644 --- a/example/print-training-configuration.F90 +++ b/example/print-training-configuration.F90 @@ -2,25 +2,30 @@ ! Terms of use are as specified in LICENSE.txt program print_training_configuration !! Demonstrate how to construct and print a training_configuration_t object - use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t - use julienne_m, only : file_t + use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t, tensor_names_t + use julienne_m, only : file_t, string_t implicit none -#ifdef _CRAYFTN - type(training_configuration_t) :: training_configuration - type(file_t) :: json_file - training_configuration = training_configuration_t( & - hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid")) - json_file = file_t(training_configuration%to_json()) - call json_file%write_lines() -#else +#ifndef _CRAYFTN associate(training_configuration => training_configuration_t( & - hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs = [string_t("pressure"), string_t("temperature")], outputs = ([string_t("saturated mixing ratio")])) & )) associate(json_file => file_t(training_configuration%to_json())) call json_file%write_lines() end associate end associate +#else + block + type(training_configuration_t) :: training_configuration + type(file_t) :: json_file + training_configuration = training_configuration_t( & + hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensorm_names_t(inputs=[string_t("pressure"), string_t("temperature")], outputs([string_t("saturated mixing ratio")])) & + ) + json_file = file_t(training_configuration%to_json()) + call json_file%write_lines() + end block #endif end program diff --git a/example/write-read-infer.F90 b/example/write-read-infer.F90 index 1f0cbed31..704014733 100644 --- a/example/write-read-infer.F90 +++ b/example/write-read-infer.F90 @@ -57,16 +57,16 @@ subroutine write_read_query_infer(output_file_name) type(file_t) json_output_file, json_input_file type(tensor_t) inputs, outputs - print *, "Constructing an neural_network_t neural-network object from scratch." + print *, "Constructing a neural_network_t neural-network object from scratch." network = identity_network() - print *, "Converting an neural_network_t object to a file_t object." + print *, "Converting a neural_network_t object to a file_t object." json_output_file = network%to_json() - print *, "Writing an neural_network_t object to the file '"//output_file_name%string()//"' in JSON format." + print *, "Writing a neural_network_t object to the file '"//output_file_name%string()//"' in JSON format." call json_output_file%write_lines(output_file_name) - print *, "Reading an neural_network_t object from the same JSON file '"//output_file_name%string()//"'." + print *, "Reading a neural_network_t object from the same JSON file '"//output_file_name%string()//"'." json_input_file = file_t(output_file_name) print *, "Constructing a new neural_network_t object from the parameters read." diff --git a/fpm.toml b/fpm.toml index 815ffa443..379178226 100644 --- a/fpm.toml +++ b/fpm.toml @@ -6,4 +6,4 @@ maintainer = "rouson@lbl.gov" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.3.1"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.3"} diff --git a/src/fiats/tensor_map_m.f90 b/src/fiats/tensor_map_m.f90 index b105f4613..1343343fb 100644 --- a/src/fiats/tensor_map_m.f90 +++ b/src/fiats/tensor_map_m.f90 @@ -15,14 +15,18 @@ module tensor_map_m character(len=:), allocatable, private :: layer_ real(k), dimension(:), allocatable, private :: intercept_, slope_ contains - generic :: map_to_training_range => default_real_map_to_training_range, double_precision_map_to_training_range - procedure, private, non_overridable :: default_real_map_to_training_range, double_precision_map_to_training_range - generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range - procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals + generic :: map_to_training_range => default_real_map_to_training_range , double_precision_map_to_training_range + procedure, private, non_overridable :: default_real_map_to_training_range , double_precision_map_to_training_range + generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range + procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range + generic :: minima => default_real_minima , double_precision_minima + procedure, private, non_overridable :: default_real_minima , double_precision_minima + generic :: maxima => default_real_maxima , double_precision_maxima + procedure, private, non_overridable :: default_real_maxima , double_precision_maxima + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals end type @@ -58,6 +62,30 @@ module function double_precision_from_json(lines) result(tensor_map) interface + pure module function default_real_minima(self) result(minima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: minima(:) + end function + + pure module function double_precision_minima(self) result(minima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: minima(:) + end function + + pure module function default_real_maxima(self) result(maxima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: maxima(:) + end function + + pure module function double_precision_maxima(self) result(maxima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: maxima(:) + end function + elemental module function default_real_map_to_training_range(self, tensor) result(normalized_tensor) implicit none class(tensor_map_t), intent(in) :: self diff --git a/src/fiats/tensor_map_s.f90 b/src/fiats/tensor_map_s.f90 index c0e504d81..4651078f2 100644 --- a/src/fiats/tensor_map_s.f90 +++ b/src/fiats/tensor_map_s.f90 @@ -15,6 +15,22 @@ tensor_map%slope_ = maxima - minima end procedure + module procedure default_real_minima + minima = self%intercept_ + end procedure + + module procedure default_real_maxima + maxima = self%intercept_ + self%slope_ + end procedure + + module procedure double_precision_minima + minima = self%intercept_ + end procedure + + module procedure double_precision_maxima + maxima = self%intercept_ + self%slope_ + end procedure + module procedure construct_double_precision call assert(size(minima)==size(maxima),"tensor_map_s(construct_double_precision): size(minima)==size(maxima)") tensor_map%layer_ = layer diff --git a/src/fiats/tensor_names_m.f90 b/src/fiats/tensor_names_m.f90 new file mode 100644 index 000000000..b1c1a9723 --- /dev/null +++ b/src/fiats/tensor_names_m.f90 @@ -0,0 +1,65 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module tensor_names_m + use julienne_string_m, only : string_t + implicit none + + private + public :: tensor_names_t + + type tensor_names_t + private + type(string_t), allocatable :: inputs_(:), outputs_(:) + contains + procedure :: to_json + procedure :: equals + procedure :: input_names + procedure :: output_names + generic :: operator(==) => equals + end type + + interface tensor_names_t + + pure module function from_json(lines) result(tensor_names) + implicit none + class(string_t), intent(in) :: lines(:) + type(tensor_names_t) tensor_names + end function + + pure module function from_components(inputs, outputs) result(tensor_names) + implicit none + type(string_t), intent(in) :: inputs(:), outputs(:) + type(tensor_names_t) tensor_names + end function + + end interface + + interface + + pure module function to_json(self) result(lines) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: lines(:) + end function + + elemental module function equals(lhs, rhs) result(lhs_equals_rhs) + implicit none + class(tensor_names_t), intent(in) :: lhs, rhs + logical lhs_equals_rhs + end function + + pure module function input_names(self) result(names) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: names(:) + end function + + pure module function output_names(self) result(names) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: names(:) + end function + + end interface + +end module diff --git a/src/fiats/tensor_names_s.f90 b/src/fiats/tensor_names_s.f90 new file mode 100644 index 000000000..defb9cead --- /dev/null +++ b/src/fiats/tensor_names_s.f90 @@ -0,0 +1,62 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +submodule(tensor_names_m) tensor_names_s + use assert_m, only : assert + use julienne_m, only : operator(.csv.), operator(.cat.) + implicit none + + character(len=*), parameter :: inputs_key = "inputs" + character(len=*), parameter :: outputs_key = "outputs" + +contains + + module procedure from_components + tensor_names%inputs_ = inputs + tensor_names%outputs_ = outputs + end procedure + + module procedure equals + call assert( all([allocated(lhs%inputs_), allocated(rhs%inputs_), allocated(lhs%outputs_), allocated(rhs%outputs_)]) & + ,"tensor_names_s(equals): all components allocated") + lhs_equals_rhs = all(lhs%inputs_ == rhs%inputs_) .and. all(lhs%outputs_ == rhs%outputs_) + end procedure + + module procedure from_json + integer l + logical tensor_names_key_found + + tensor_names_key_found = .false. + + do l=1,size(lines) + if (lines(l)%get_json_key() == "tensor names") then + tensor_names_key_found = .true. + tensor_names%inputs_ = lines(l+1)%get_json_value(string_t("inputs") , mold=[string_t("")]) + tensor_names%outputs_ = lines(l+2)%get_json_value(string_t("outputs"), mold=[string_t("")]) + return + end if + end do + + call assert(tensor_names_key_found, "tensor_names_s(from_json): tensor_names_found") + end procedure + + module procedure to_json + character(len=*), parameter :: indent = repeat(" ",ncopies=4) + + lines = [ & + string_t(indent // '"tensor names": {' ) & + ,indent // indent // '"inputs" : [' // .csv. self%inputs_%bracket('"') // '],' & + ,indent // indent // '"outputs" : [' // .csv. self%outputs_%bracket('"') // ']' & + ,string_t(indent // '}' ) & + ] + + end procedure + + module procedure input_names + names = self%inputs_ + end procedure + + module procedure output_names + names = self%outputs_ + end procedure + +end submodule tensor_names_s diff --git a/src/fiats/tmp b/src/fiats/tmp new file mode 100644 index 000000000..2c56ec4c1 --- /dev/null +++ b/src/fiats/tmp @@ -0,0 +1,14 @@ ++operator(==) ++infer() ++to_json() ++map_to_input_range() ++map_from_output_range() ++num_hidden_layers() ++num_inputs() ++num_outputs() ++nodes_per_layer() ++assert_conformable_with () ++skip() ++activation_function_name() ++learn() ++assert_consistency() diff --git a/src/fiats/trainable_network_m.f90 b/src/fiats/trainable_network_m.f90 index 0cb2cfc50..a085168fa 100644 --- a/src/fiats/trainable_network_m.f90 +++ b/src/fiats/trainable_network_m.f90 @@ -34,8 +34,8 @@ module function perturbed_identity_network(training_configuration, perturbation_ result(trainable_network) implicit none type(training_configuration_t), intent(in) :: training_configuration - type(string_t), intent(in) :: metadata(:) real, intent(in) :: perturbation_magnitude + type(string_t), intent(in) :: metadata(:) type(tensor_map_t) input_map, output_map type(trainable_network_t) trainable_network end function diff --git a/src/fiats/training_configuration_m.f90 b/src/fiats/training_configuration_m.f90 index eb20a30f0..ae2ca8837 100644 --- a/src/fiats/training_configuration_m.f90 +++ b/src/fiats/training_configuration_m.f90 @@ -8,6 +8,7 @@ module training_configuration_m use network_configuration_m, only : network_configuration_t use kind_parameters_m, only : default_real, double_precision use double_precision_file_m, only : double_precision_file_t + use tensor_names_m, only : tensor_names_t implicit none private @@ -17,38 +18,45 @@ module training_configuration_m integer, kind :: k = default_real type(hyperparameters_t(k)), private :: hyperparameters_ type(network_configuration_t), private :: network_configuration_ + type(tensor_names_t), private :: tensor_names_ contains - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: mini_batches => default_real_mini_batches, double_precision_mini_batches - procedure, private :: default_real_mini_batches, double_precision_mini_batches - generic :: optimizer_name => default_real_optimizer_name, double_precision_optimizer_name - procedure, private :: default_real_optimizer_name, double_precision_optimizer_name - generic :: learning_rate => default_real_learning_rate, double_precision_learning_rate - procedure, private :: default_real_learning_rate, double_precision_learning_rate - generic :: differentiable_activation => default_real_differentiable_activation, double_precision_differentiable_activation - procedure, private :: default_real_differentiable_activation, double_precision_differentiable_activation - generic :: nodes_per_layer => default_real_nodes_per_layer, double_precision_nodes_per_layer - procedure, private :: default_real_nodes_per_layer, double_precision_nodes_per_layer + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: mini_batches => default_real_mini_batches , double_precision_mini_batches + procedure, private :: default_real_mini_batches , double_precision_mini_batches + generic :: optimizer_name => default_real_optimizer_name , double_precision_optimizer_name + procedure, private :: default_real_optimizer_name , double_precision_optimizer_name + generic :: learning_rate => default_real_learning_rate , double_precision_learning_rate + procedure, private :: default_real_learning_rate , double_precision_learning_rate + generic :: activation => default_real_activation , double_precision_activation + procedure, private :: default_real_activation , double_precision_activation + generic :: nodes_per_layer => default_real_nodes_per_layer , double_precision_nodes_per_layer + procedure, private :: default_real_nodes_per_layer , double_precision_nodes_per_layer generic :: skip_connections => default_real_skip_connections, double_precision_skip_connections procedure, private :: default_real_skip_connections, double_precision_skip_connections + generic :: input_names => default_real_input_names , double_precision_input_names + procedure, private :: default_real_input_names , double_precision_input_names + generic :: output_names => default_real_output_names , double_precision_output_names + procedure, private :: default_real_output_names , double_precision_output_names end type interface training_configuration_t - module function default_real_from_components(hyperparameters, network_configuration) result(training_configuration) + module function default_real_from_components(hyperparameters, network_configuration, tensor_names) result(training_configuration) implicit none type(hyperparameters_t), intent(in) :: hyperparameters type(network_configuration_t), intent(in) :: network_configuration type(training_configuration_t) training_configuration + type(tensor_names_t), intent(in) :: tensor_names end function - module function double_precision_from_components(hyperparameters, network_configuration) result(training_configuration) + module function double_precision_from_components(hyperparameters, network_configuration, tensor_names) result(training_configuration) implicit none type(hyperparameters_t(double_precision)), intent(in) :: hyperparameters type(network_configuration_t), intent(in) :: network_configuration + type(tensor_names_t), intent(in) :: tensor_names type(training_configuration_t(double_precision)) training_configuration end function @@ -128,13 +136,13 @@ elemental module function double_precision_learning_rate(self) result(rate) double precision rate end function - module function default_real_differentiable_activation(self) result(activation) + module function default_real_activation(self) result(activation) implicit none class(training_configuration_t), intent(in) :: self type(activation_t) activation end function - module function double_precision_differentiable_activation(self) result(activation) + module function double_precision_activation(self) result(activation) implicit none class(training_configuration_t(double_precision)), intent(in) :: self type(activation_t) activation @@ -164,6 +172,30 @@ elemental module function double_precision_skip_connections(self) result(using_s logical using_skip end function + pure module function default_real_input_names(self) result(input_names) + implicit none + class(training_configuration_t), intent(in) :: self + type(string_t), allocatable :: input_names(:) + end function + + pure module function double_precision_input_names(self) result(input_names) + implicit none + class(training_configuration_t(double_precision)), intent(in) :: self + type(string_t), allocatable :: input_names(:) + end function + + pure module function default_real_output_names(self) result(output_names) + implicit none + class(training_configuration_t), intent(in) :: self + type(string_t), allocatable :: output_names(:) + end function + + pure module function double_precision_output_names(self) result(output_names) + implicit none + class(training_configuration_t(double_precision)), intent(in) :: self + type(string_t), allocatable :: output_names(:) + end function + end interface end module diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index 3b982a202..f72e1a659 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -14,32 +14,38 @@ training_configuration%hyperparameters_ = hyperparameters training_configuration%network_configuration_ = network_configuration + training_configuration%tensor_names_ = tensor_names + training_configuration%file_t = file_t([ & string_t(header), & training_configuration%hyperparameters_%to_json(), & string_t(separator), & training_configuration%network_configuration_%to_json(), & + string_t(separator), & + training_configuration%tensor_names_%to_json(), & string_t(footer) & ]) + end procedure module procedure double_precision_from_components training_configuration%hyperparameters_ = hyperparameters training_configuration%network_configuration_ = network_configuration + training_configuration%tensor_names_ = tensor_names + training_configuration%file_t = file_t([ & string_t(header), & training_configuration%hyperparameters_%to_json(), & string_t(separator), & training_configuration%network_configuration_%to_json(), & + string_t(separator), & + training_configuration%tensor_names_%to_json(), & string_t(footer) & ]) end procedure module procedure default_real_from_file - integer, parameter :: hyperparameters_start=2, hyperparameters_end=6, separator_line=7 ! line numbers - integer, parameter :: net_config_start=8, net_config_end=12 ! line numbers - integer, parameter :: file_start=hyperparameters_start-1, file_end=net_config_end+1 ! line numbers #if defined __INTEL_COMPILER || _CRAYFTN type(string_t), allocatable :: lines(:) #endif @@ -48,35 +54,21 @@ #if defined __INTEL_COMPILER || _CRAYFTN lines = training_configuration%file_t%lines() - call assert(trim(adjustl(lines(file_start)%string()))==header, & - "training_configuration_s(default_precision_from_file): header",lines(file_start)) - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - call assert(trim(adjustl(lines(separator_line)%string()))==separator, & - "training_configuration_s(default_precision_from_file): separator", & - lines(file_start)) - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - call assert(trim(adjustl(lines(file_end)%string()))==footer, & - "training_configuration_s(default_precision_from_file): footer", lines(file_end)) #else associate(lines => training_configuration%file_t%lines()) - call assert(trim(adjustl(lines(file_start)%string()))==header, & - "training_configuration_s(default_precision_from_file): header",lines(file_start)) - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - call assert(trim(adjustl(lines(separator_line)%string()))==separator, & - "training_configuration_s(default_precision_from_file): separator", & - lines(file_start)) - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - call assert(trim(adjustl(lines(file_end)%string()))==footer, & - "training_configuration_s(default_precision_from_file): footer", lines(file_end)) +#endif + + training_configuration%hyperparameters_ = hyperparameters_t(lines) + training_configuration%network_configuration_= network_configuration_t(lines) + training_configuration%tensor_names_ = tensor_names_t(lines) + +#if ! defined __INTEL_COMPILER || _CRAYFTN end associate #endif end procedure module procedure double_precision_from_file - integer, parameter :: hyperparameters_start=2, hyperparameters_end=6, separator_line=7 ! line numbers - integer, parameter :: net_config_start=8, net_config_end=12 ! line numbers - integer, parameter :: file_start=hyperparameters_start-1, file_end=net_config_end+1 ! line numbers #if defined __INTEL_COMPILER || _CRAYFTN type(double_precision_string_t), allocatable :: lines(:) #endif @@ -85,40 +77,17 @@ #if defined __INTEL_COMPILER || _CRAYFTN lines = training_configuration%double_precision_file_t%double_precision_lines() - - call assert(adjustl(lines(file_start)%string()) == header, & - "training_configuration_s(double_precision_from_file): header",lines(file_start)) - - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - - call assert(adjustl(lines(separator_line)%string()) == separator, & - "training_configuration_s(double_precision_from_file): separator", lines(file_start)) - - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - - call assert(adjustl(lines(file_end)%string()) == footer, & - "training_configuration_s(double_precision_from_file): footer", lines(file_end)) #else - associate(lines => training_configuration%double_precision_file_t%double_precision_lines()) +#endif - call assert(adjustl(lines(file_start)%string()) == header, & - "training_configuration_s(double_precision_from_file): header", lines(file_start)) - - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - - call assert(adjustl(lines(separator_line)%string()) == separator, & - "training_configuration_s(double_precision_from_file): separator", lines(file_start)) - - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - - call assert(adjustl(lines(file_end)%string()) == footer, & - "training_configuration_s(double_precision_from_file): footer", lines(file_end)) + training_configuration%hyperparameters_ = hyperparameters_t(lines) + training_configuration%network_configuration_= network_configuration_t(lines) + training_configuration%tensor_names_ = tensor_names_t(lines) +#if ! defined __INTEL_COMPILER || _CRAYFTN end associate - #endif - end procedure module procedure default_real_to_json @@ -132,13 +101,15 @@ module procedure default_real_equals lhs_eq_rhs = & lhs%hyperparameters_ == rhs%hyperparameters_ .and. & - lhs%network_configuration_ == rhs%network_configuration_ + lhs%network_configuration_ == rhs%network_configuration_ .and. & + lhs%tensor_names_ == rhs%tensor_names_ end procedure module procedure double_precision_equals lhs_eq_rhs = & lhs%hyperparameters_ == rhs%hyperparameters_ .and. & - lhs%network_configuration_ == rhs%network_configuration_ + lhs%network_configuration_ == rhs%network_configuration_ .and. & + lhs%tensor_names_ == rhs%tensor_names_ end procedure module procedure default_real_mini_batches @@ -181,7 +152,7 @@ using_skip = self%network_configuration_%skip_connections() end procedure - module procedure default_real_differentiable_activation + module procedure default_real_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name() @@ -200,13 +171,12 @@ case default error stop 'activation_factory_s(factory): unrecognized activation name "' // activation_name%string() // '"' end select -#if defined __INTEL_COMPILER || _CRAYFTN -#else +#if ! (defined __INTEL_COMPILER || _CRAYFTN) end associate #endif end procedure - module procedure double_precision_differentiable_activation + module procedure double_precision_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name() @@ -225,10 +195,25 @@ case default error stop 'activation_factory_s(factory): unrecognized activation name "' // activation_name%string() // '"' end select -#if defined __INTEL_COMPILER || _CRAYFTN -#else +#if ! (defined __INTEL_COMPILER || _CRAYFTN) end associate #endif end procedure + module procedure default_real_input_names + input_names = self%tensor_names_%input_names() + end procedure + + module procedure double_precision_input_names + input_names = self%tensor_names_%input_names() + end procedure + + module procedure default_real_output_names + output_names = self%tensor_names_%output_names() + end procedure + + module procedure double_precision_output_names + output_names = self%tensor_names_%output_names() + end procedure + end submodule training_configuration_s diff --git a/src/fiats/ubounds_m.f90 b/src/fiats/ubounds_m.f90 deleted file mode 100644 index d3d9762ea..000000000 --- a/src/fiats/ubounds_m.f90 +++ /dev/null @@ -1,22 +0,0 @@ -! Copyright (c), The Regents of the University of California -! Terms of use are as specified in LICENSE.txt -module ubounds_m - !! This module serves only to support array bounds checking in the main program below - implicit none - - type ubounds_t - integer, allocatable :: ubounds_(:) - contains - procedure equals - generic :: operator(==) => equals - end type - -contains - - elemental function equals(lhs, rhs) result(lhs_equals_rhs) - class(ubounds_t), intent(in) :: lhs, rhs - logical lhs_equals_rhs - lhs_equals_rhs = all(lhs%ubounds_ == rhs%ubounds_) - end function - -end module diff --git a/src/fiats_m.f90 b/src/fiats_m.f90 index 4d09e40c7..b8ce8d625 100644 --- a/src/fiats_m.f90 +++ b/src/fiats_m.f90 @@ -13,8 +13,8 @@ module fiats_m use network_configuration_m, only : network_configuration_t use tensor_m, only : tensor_t use tensor_map_m, only : tensor_map_t + use tensor_names_m, only : tensor_names_t use trainable_network_m, only : trainable_network_t use training_configuration_m, only : training_configuration_t - use ubounds_m, only : ubounds_t implicit none end module fiats_m diff --git a/test/main.F90 b/test/main.F90 index 40026b524..b4a97fa98 100644 --- a/test/main.F90 +++ b/test/main.F90 @@ -9,6 +9,7 @@ program main use network_configuration_test_m, only : network_configuration_test_t use training_configuration_test_m, only : training_configuration_test_t use tensor_map_test_m, only : tensor_map_test_t + use tensor_names_test_m, only : tensor_names_test_t use tensor_test_m, only : tensor_test_t use julienne_m, only : command_line_t implicit none @@ -21,6 +22,7 @@ program main type(network_configuration_test_t) network_configuration_test type(training_configuration_test_t) training_configuration_test type(tensor_map_test_t) tensor_map_test + type(tensor_names_test_t) tensor_names_test type(tensor_test_t) tensor_test real t_start, t_finish @@ -46,6 +48,7 @@ program main call metadata_test%report(passes, tests) call training_configuration_test%report(passes, tests) call tensor_map_test%report(passes, tests) + call tensor_names_test%report(passes, tests) call tensor_test%report(passes, tests) call asymmetric_network_test%report(passes, tests) call neural_network_test%report(passes, tests) diff --git a/test/tensor_names_test.f90 b/test/tensor_names_test.f90 new file mode 100644 index 000000000..edac2869e --- /dev/null +++ b/test/tensor_names_test.f90 @@ -0,0 +1,76 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module tensor_names_test_m + !! Test tensor_names_t object I/O and construction + + ! External dependencies + use fiats_m, only : tensor_names_t + use julienne_m, only : test_t, test_result_t, test_description_t, test_description_substring, string_t +#ifdef __GFORTRAN__ + use julienne_m, only : test_function_i +#endif + + ! Internal dependencies + use tensor_names_m, only : tensor_names_t + implicit none + + private + public :: tensor_names_test_t + + type, extends(test_t) :: tensor_names_test_t + contains + procedure, nopass :: subject + procedure, nopass :: results + end type + +contains + + pure function subject() result(specimen) + character(len=:), allocatable :: specimen + specimen = "A tensor_names_t object" + end function + + function results() result(test_results) + type(test_result_t), allocatable :: test_results(:) + type(test_description_t), allocatable :: test_descriptions(:) + +#ifndef __GFORTRAN__ + test_descriptions = [ & + test_description_t( & + string_t("component-wise construction followed by conversion to and from JSON"), & + write_then_read_tensor_names) & + ] +#else + procedure(test_function_i), pointer :: check_write_then_read_ptr + check_write_then_read_ptr => write_then_read_tensor_names + + test_descriptions = [ & + test_description_t( & + string_t("component-wise construction followed by conversion to and from JSON"), & + check_write_then_read_ptr) & + ] +#endif + associate( & + substring_in_subject => index(subject(), test_description_substring) /= 0, & + substring_in_description => test_descriptions%contains_text(string_t(test_description_substring)) & + ) + test_descriptions = pack(test_descriptions, substring_in_subject .or. substring_in_description) + end associate + test_results = test_descriptions%run() + end function + + function write_then_read_tensor_names() result(test_passes) + logical test_passes + + associate( & + from_components => tensor_names_t( & + inputs = [string_t("qc"), string_t("qv"), string_t("pressure")], & + outputs = [string_t("qc"), string_t("qv")] & + ) ) + associate(from_json => tensor_names_t(from_components%to_json())) + test_passes = from_components == from_json + end associate + end associate + end function + +end module tensor_names_test_m diff --git a/test/training_configuration_test_m.F90 b/test/training_configuration_test_m.F90 index a09dc0c09..6bf1db85c 100644 --- a/test/training_configuration_test_m.F90 +++ b/test/training_configuration_test_m.F90 @@ -4,7 +4,7 @@ module training_configuration_test_m !! Test training_configuration_t object I/O and construction ! External dependencies - use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t + use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t, tensor_names_t use julienne_m, only : test_t, test_result_t, test_description_t, test_description_substring, string_t, file_t #ifdef __GFORTRAN__ use julienne_m, only : test_function_i @@ -64,13 +64,16 @@ function construct_and_convert_to_and_from_json() result(test_passes) #ifdef _CRAYFTN type(training_configuration_t) :: training_configuration, from_json training_configuration = training_configuration_t( & - hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid")) + hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs=[string_t("pressure"), string_t("temperature")], ouptuts=[string_t("saturated mixing ratio")]) & + ) from_json = training_configuration_t(file_t(training_configuration%to_json())) #else associate(training_configuration => training_configuration_t( & - hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs=[string_t("pressure"), string_t("temperature")], outputs=[string_t("saturated mixing ratio")]) & )) associate(from_json => training_configuration_t(file_t(training_configuration%to_json()))) #endif