diff --git a/tests/warmth3d/test_3d.py b/tests/warmth3d/test_3d.py index 5781200..3753375 100644 --- a/tests/warmth3d/test_3d.py +++ b/tests/warmth3d/test_3d.py @@ -5,10 +5,15 @@ import numpy as np from mpi4py import MPI import pickle +import time def test_3d_compare(): comm = MPI.COMM_WORLD - if comm.rank == 0: + inc = 1000 + model_pickled = f"model-out-inc_{inc}.p" + if comm.rank == 0 and not os.path.isfile(model_pickled): + global runtime_1D_sim + st = time.time() maps_dir = Path("./docs/notebooks/data/") model = warmth.Model() @@ -22,8 +27,6 @@ def test_3d_compare(): inputs.loc[6]=[182,"182.gri",None,"Erosive"] model.builder.input_horizons=inputs - - inc = 1000 model.builder.define_geometry(maps_dir/"0.gri",xinc=inc,yinc=inc,fformat="irap_binary") model.builder.extract_nodes(4,maps_dir) @@ -50,12 +53,13 @@ def test_3d_compare(): model.parameters.tetha = 0 model.parameters.alphav = 0 - model.simulator.run(save=True,purge=True, parallel=False) + model.simulator.run(save=True, purge=True, parallel=True, use_mpi=(comm.size>1)) - import pickle - pickle.dump( model, open( "model-out.p", "wb" ) ) - model = pickle.load( open( "model-out.p", "rb" ) ) + runtime_1D_sim = time.time() - st + print("Total time 1D simulations:", runtime_1D_sim) + pickle.dump( model, open( model_pickled, "wb" ) ) + model = pickle.load( open( model_pickled, "rb" ) ) try: os.mkdir('mesh') except FileExistsError: @@ -66,41 +70,43 @@ def test_3d_compare(): pass comm.Barrier() - import pickle - model = pickle.load( open( "model-out.p", "rb" ) ) + model = pickle.load( open( model_pickled, "rb" ) ) comm.Barrier() - # if comm.rank == 0: - # mm2 = - mm2, posarr, Tarr = run_3d(model.builder,model.parameters,start_time=model.parameters.time_start,end_time=0, pad_num_nodes=2,writeout=False, base_flux=None) - + mm2 = run_3d(model.builder,model.parameters,start_time=model.parameters.time_start,end_time=0, pad_num_nodes=2,writeout=False, base_flux=None) - nnx = (model.builder.grid.num_nodes_x+2*mm2.padX) - nny = (model.builder.grid.num_nodes_y+2*mm2.padX) - # hx = model.builder.grid.num_nodes_x // 2 - # hy = model.builder.grid.num_nodes_y // 2 - hx = nnx // 2 - hy = nny // 2 + comm.Barrier() + if comm.rank == 0: + nnx = (model.builder.grid.num_nodes_x+2*mm2.padX) + nny = (model.builder.grid.num_nodes_y+2*mm2.padX) + hx = nnx // 2 + hy = nny // 2 + + nn = model.builder.nodes[hy-mm2.padX][hx-mm2.padX] + dd = nn._depth_out[:,0] + + mm2_pos, mm2_temp = mm2.get_node_pos_and_temp(-1) - nn = model.builder.nodes[hy-mm2.padX][hx-mm2.padX] - dd = nn._depth_out[:,0] + node_ind = hy*nnx + hx + v_per_n = int(mm2_pos.shape[0]/(mm2.num_nodes)) - node_ind = hy*nnx + hx - # v_per_n = int(mm2.mesh_vertices.shape[0]/(model.builder.grid.num_nodes_y*model.builder.grid.num_nodes_x)) - v_per_n = int(mm2.mesh_vertices.shape[0]/(mm2.num_nodes)) + temp_1d = np.nan_to_num(nn.temperature_out[:,0], nan=5.0) + + # temp_3d_ind = np.array([ np.where([mm2.mesh_reindex==i])[1][0] for i in range(node_ind*v_per_n, (node_ind+1)*v_per_n) ] ) + temp_3d_ind = np.array([ i for i in range(node_ind*v_per_n, (node_ind+1)*v_per_n) ] ) - temp_1d = np.nan_to_num(nn.temperature_out[:,0], nan=5.0) - temp_3d_ind = np.array([ np.where([mm2.mesh_reindex==i])[1][0] for i in range(node_ind*v_per_n, (node_ind+1)*v_per_n) ] ) - dd_mesh = mm2.mesh.geometry.x[temp_3d_ind,2] - mm2.sed_diff_z[range(node_ind*v_per_n, (node_ind+1)*v_per_n)] - temp_3d_mesh = mm2.u_n.x.array[temp_3d_ind] + # dd_mesh = mm2.mesh.geometry.x[temp_3d_ind,2] - mm2.sed_diff_z[range(node_ind*v_per_n, (node_ind+1)*v_per_n)] + dd_mesh = mm2_pos[temp_3d_ind,2] - mm2.sed_diff_z[range(node_ind*v_per_n, (node_ind+1)*v_per_n)] + # temp_3d_mesh = mm2.u_n.x.array[temp_3d_ind] + temp_3d_mesh = mm2_temp[temp_3d_ind] - temp_1d_at_mesh_pos = np.interp(dd_mesh, dd, temp_1d) - dd_subset = np.where(dd_mesh<5000) + temp_1d_at_mesh_pos = np.interp(dd_mesh, dd, temp_1d) + dd_subset = np.where(dd_mesh<5000) - max_abs_error = np.amax(np.abs(temp_1d_at_mesh_pos-temp_3d_mesh)) - max_abs_error_shallow = np.amax(np.abs(temp_1d_at_mesh_pos[dd_subset]-temp_3d_mesh[dd_subset])) + max_abs_error = np.amax(np.abs(temp_1d_at_mesh_pos-temp_3d_mesh)) + max_abs_error_shallow = np.amax(np.abs(temp_1d_at_mesh_pos[dd_subset]-temp_3d_mesh[dd_subset])) - print("Max error overall:", max_abs_error) - print("Max error <5000m:", max_abs_error_shallow) + print("Max error overall:", max_abs_error) + print("Max error <5000m:", max_abs_error_shallow) # assert (max_abs_error<25.0), "Temperature difference between 3D and 1D simulations is >25" # assert (max_abs_error_shallow<5.0), "Temperature difference between 3D and 1D simulations is >5 in the sediments." @@ -109,4 +115,6 @@ def test_3d_compare(): if __name__ == "__main__": - test_3d_compare() \ No newline at end of file + test_3d_compare() + if 'runtime_1D_sim' in globals(): + print("Total time 1D simulations:", runtime_1D_sim) \ No newline at end of file diff --git a/warmth/mesh_model.py b/warmth/mesh_model.py index 1e80db0..258214c 100644 --- a/warmth/mesh_model.py +++ b/warmth/mesh_model.py @@ -52,6 +52,9 @@ def __init__(self, builder:Builder,parameters:Parameters, sedimentsOnly = False, self.averageLABdepth = 260000 self.runSedimentsOnly = sedimentsOnly + self.posarr = [] + self.Tarr = [] + self.time_indices = [] # 2 6 6 6 self.numElemPerSediment = 2 @@ -168,8 +171,64 @@ def boundary(x): cond_per_cell, rhp_per_cell, lid_per_cell) return filename + def send_mpi_messages(self): + comm = MPI.COMM_WORLD + comm.send(mm2.mesh.topology.index_map(0).local_to_global(list(range(mm2.mesh.geometry.x.shape[0]))) , dest=0, tag=((comm.rank-1)*10)+21) + comm.send(mm2.mesh_reindex, dest=0, tag=((comm.rank-1)*10)+23) + comm.send(mm2.mesh_vertices_age, dest=0, tag=((comm.rank-1)*10)+25) + comm.send(self.posarr, dest=0, tag=( (comm.rank-1)*10)+20) + comm.send(self.Tarr, dest=0, tag=( (comm.rank-1)*10)+24) + + def receive_mpi_messages(self): + comm = MPI.COMM_WORLD + import time + st = time.time() + + self.sub_posarr_s = [self.posarr] + self.sub_Tarr_s = [self.Tarr] + + self.index_map_s = [self.mesh.topology.index_map(0).local_to_global(list(range(self.mesh.geometry.x.shape[0])))] + self.mesh_reindex_s = [self.mesh_reindex] + self.mesh_vertices_age_s = [self.mesh_vertices_age] + + for i in range(1,comm.size): + self.index_map_s.append(comm.recv(source=i, tag=((i-1)*10)+21)) + self.mesh_reindex_s.append(comm.recv(source=i, tag=((i-1)*10)+23)) + self.mesh_vertices_age_s.append(comm.recv(source=i, tag=((i-1)*10)+25)) + self.sub_posarr_s.append(comm.recv(source=i, tag=((i-1)*10)+20)) + self.sub_Tarr_s.append(comm.recv(source=i, tag=((i-1)*10)+24)) + + nv = np.amax(np.array( [np.amax(index_map) for index_map in self.index_map_s ] )) + 1 # no. vertices/nodes + mri = np.arange( nv, dtype=np.int32) - def write_hexa_mesh_resqml( self, out_path): + self.x_original_order = [ (np.ones( [nv,3], dtype= np.float32) * -1) for _ in range(len(self.posarr)) ] + self.T_per_vertex = [ (np.ones( nv, dtype= np.float32) * -1) for _ in range(len(self.Tarr)) ] + self.age_per_vertex = np.ones( nv, dtype= np.int32) * -1 + + for k in range(len(self.mesh_reindex_s)): + for ind,val in enumerate(self.mesh_reindex_s[k]): + for i in range(len(self.posarr)): + self.x_original_order[i][val,:] = self.sub_posarr_s[k][i][ind,:] + self.T_per_vertex[i][val] = self.sub_Tarr_s[k][i][ind] + self.age_per_vertex[val] = self.mesh_vertices_age_s[k][ind] + + delta = time.time() - st + print("receive_mpi_messages delta", delta) + + def get_node_pos_and_temp(self, tti=-1): + # + # This function can only be called after all MPI compute processes have finished and receive_mpi_messages has been called. + # + start_time = self.time_indices[0] + end_time = self.time_indices[-1] + ind = start_time - ( tti if (tti>=0) else end_time) + if (ind >= len(self.x_original_order)): + return None,None + if (ind < 0): + return None,None + return self.x_original_order[ind], self.T_per_vertex[ind] + + def write_hexa_mesh_resqml( self, out_path, tti): """Prepares arrays and calls the RESQML output helper function for hexa meshes: the lith and aesth are removed, and the remaining vertices and cells are renumbered; the sediment properties are prepared for output. @@ -177,11 +236,8 @@ def write_hexa_mesh_resqml( self, out_path): returns the filename (of the .epc file) that was written """ - x_original_order = self.mesh.geometry.x[:].copy() - reverse_reindex_order = np.arange( self.mesh_vertices.shape[0] ) - for ind,val in enumerate(self.mesh_reindex): - x_original_order[val,:] = self.mesh.geometry.x[ind,:] - reverse_reindex_order[val] = ind + comm = MPI.COMM_WORLD + nv = np.amax(np.array([np.amax(index_map) for index_map in self.index_map_s])) +1 # no. vertices/nodes hexaHedra, hex_data_layerID, hex_data_nodeID = self.buildHexahedra(keep_padding=False) hexa_to_keep = [] @@ -189,6 +245,7 @@ def write_hexa_mesh_resqml( self, out_path): lid_to_keep = [] cond_per_cell = [] cell_id_to_keep = [] + x_original_order, T_per_vertex = self.get_node_pos_and_temp(tti) for i,h in enumerate(hexaHedra): lid0 = hex_data_layerID[i] # @@ -208,11 +265,11 @@ def write_hexa_mesh_resqml( self, out_path): # if (pp<0.7) and lid0>=0: # print("weird phi: ", pp, minY, self.node1D[hex_data_nodeID[i]].Y, abs( self.node1D[hex_data_nodeID[i]].Y - minY), i, hex_data_nodeID[i]) # breakpoint() - k_cond_mean = [] + # k_cond_mean = [] for hi in h: p_to_keep.add(hi) - k_cond_mean.append(self.thermalCond.x.array[hi]) - cond_per_cell.append( np.mean(np.array(k_cond_mean))) + # k_cond_mean.append(self.thermalCond.x.array[hi]) # the actual, Sekiguchi-derived conductivitues + # cond_per_cell.append( np.mean(np.array(k_cond_mean))) poro0_per_cell = np.array( [ self.getSedimentPropForLayerID('phi', lid,cid) for lid,cid in zip(lid_to_keep,cell_id_to_keep) ] ) decay_per_cell = np.array( [ self.getSedimentPropForLayerID('decay', lid,cid) for lid,cid in zip(lid_to_keep,cell_id_to_keep) ]) @@ -222,29 +279,30 @@ def write_hexa_mesh_resqml( self, out_path): lid_per_cell = np.array(lid_to_keep) points_cached = [] - point_original_to_cached = np.ones(self.mesh.geometry.x.shape[0], dtype = np.int32) * (-1) - for i in range(self.mesh.geometry.x.shape[0]): + point_original_to_cached = np.ones(nv, dtype = np.int32) * (-1) + for i in range(nv): if (i in p_to_keep): point_original_to_cached[i] = len(points_cached) points_cached.append(x_original_order[i,:]) hexa_renumbered = [ [point_original_to_cached[i] for i in hexa] for hexa in hexa_to_keep ] - for i,h in enumerate(hexa_renumbered): - minY = np.amin(np.array ( [np.array(points_cached)[hi,1] for hi in h] )) - poro0 = poro0_per_cell[i] - lid0 = lid_to_keep[i] + # for i,h in enumerate(hexa_renumbered): + # minY = np.amin(np.array ( [np.array(points_cached)[hi][1] for hi in h] )) + # poro0 = poro0_per_cell[i] + # lid0 = lid_to_keep[i] - T_per_vertex = [ self.uh.x.array[reverse_reindex_order[i]] for i in range(self.mesh.geometry.x.shape[0]) if i in p_to_keep ] - age_per_vertex = [ self.mesh_vertices_age[reverse_reindex_order[i]] for i in range(self.mesh.geometry.x.shape[0]) if i in p_to_keep ] + T_per_vertex_keep = [ T_per_vertex[i] for i in range(nv) if i in p_to_keep ] + age_per_vertex_keep = [ self.age_per_vertex[i] for i in range(nv) if i in p_to_keep ] from os import path - filename_hex = path.join(out_path, self.modelName+'_hexa_'+str(self.tti)+'.epc') + filename_hex = path.join(out_path, self.modelName+'_hexa_'+str(tti)+'.epc') write_hexa_grid_with_properties(filename_hex, np.array(points_cached), hexa_renumbered, "hexamesh", - np.array(T_per_vertex), np.array(age_per_vertex), poro0_per_cell, decay_per_cell, density_per_cell, + np.array(T_per_vertex_keep), np.array(age_per_vertex_keep), poro0_per_cell, decay_per_cell, density_per_cell, cond_per_cell, rhp_per_cell, lid_per_cell) return filename_hex - def write_hexa_mesh_timeseries( self, out_path, posarr, Tarr): + + def write_hexa_mesh_timeseries( self, out_path): """Prepares arrays and calls the RESQML output helper function for hexa meshes: the lith and aesth are removed, and the remaining vertices and cells are renumbered; the sediment properties are prepared for output. @@ -252,11 +310,6 @@ def write_hexa_mesh_timeseries( self, out_path, posarr, Tarr): returns the filename (of the .epc file) that was written """ - x_original_order = self.mesh.geometry.x[:].copy() - reverse_reindex_order = np.arange( self.mesh_vertices.shape[0] ) - for ind,val in enumerate(self.mesh_reindex): - x_original_order[val,:] = self.mesh.geometry.x[ind,:] - reverse_reindex_order[val] = ind hexaHedra, hex_data_layerID, hex_data_nodeID = self.buildHexahedra(keep_padding=False) hexa_to_keep = [] @@ -272,12 +325,7 @@ def write_hexa_mesh_timeseries( self, out_path, posarr, Tarr): if (lid0>=-1) and (lid0<100): hexa_to_keep.append(h) lid_to_keep.append(lid0) - # cell_id_to_keep.append(self.node_index[i]) cell_id_to_keep.append(hex_data_nodeID[i]) - # minY = np.amin(np.array ( [x_original_order[hi,1] for hi in h] )) - # if abs( self.node1D[hex_data_nodeID[i]].Y - minY)>1: - # print("weird Y:", minY, self.node1D[hex_data_nodeID[i]].Y, abs( self.node1D[hex_data_nodeID[i]].Y - minY), i, hex_data_nodeID[i]) - # breakpoint() # k_cond_mean = [] for hi in h: p_to_keep.add(hi) @@ -292,31 +340,20 @@ def write_hexa_mesh_timeseries( self, out_path, posarr, Tarr): # lid_per_cell = np.array(lid_to_keep) points_cached_series = [] - for j in range(len(posarr)): - x_original_order = posarr[j].copy() - # reverse_reindex_order = np.arange( self.mesh_vertices.shape[0] ) - for ind,val in enumerate(self.mesh_reindex): - x_original_order[val,:] = posarr[j][ind,:] + Temp_per_vertex_series = [] + + for tti in self.time_indices: + x_original_order, T_per_vertex = self.get_node_pos_and_temp(tti) + T_per_vertex_filt = [ T_per_vertex[i] for i in range(x_original_order.shape[0]) if i in p_to_keep ] + Temp_per_vertex_series.append(np.array(T_per_vertex_filt)) points_cached = [] - point_original_to_cached = np.ones(self.mesh.geometry.x.shape[0], dtype = np.int32) * (-1) - for i in range(self.mesh.geometry.x.shape[0]): + point_original_to_cached = np.ones(x_original_order.shape[0], dtype = np.int32) * (-1) + for i in range(x_original_order.shape[0]): if (i in p_to_keep): point_original_to_cached[i] = len(points_cached) points_cached.append(x_original_order[i,:]) points_cached_series.append(np.array(points_cached)) hexa_renumbered = [ [point_original_to_cached[i] for i in hexa] for hexa in hexa_to_keep ] - - # for i,h in enumerate(hexa_renumbered): - # minY = np.amin(np.array ( [np.array(points_cached)[hi,1] for hi in h] )) - # poro0 = poro0_per_cell[i] - # lid0 = lid_to_keep[i] - - Temp_per_vertex_series = [] - for j in range(len(Tarr)): - # T_per_vertex = [ self.uh.x.array[reverse_reindex_order[i]] for i in range(self.mesh.geometry.x.shape[0]) if i in p_to_keep ] - T_per_vertex = [ Tarr[j][reverse_reindex_order[i]] for i in range(self.mesh.geometry.x.shape[0]) if i in p_to_keep ] - Temp_per_vertex_series.append(np.array(T_per_vertex)) - # age_per_vertex = [ self.mesh_vertices_age[reverse_reindex_order[i]] for i in range(self.mesh.geometry.x.shape[0]) if i in p_to_keep ] from os import path filename_hex = path.join(out_path, self.modelName+'_hexa_ts_'+str(self.tti)+'.epc') @@ -333,7 +370,6 @@ def heatflow_at_crust_sed_boundary(self): # first_ind_in_crust = v_per_n - self.numElemInAsth - self.numElemInLith - self.numElemInCrust node_ind = hy*self.num_nodes_x + hx - # nn = model.builder.nodes[hy][hx] nn = self.node1D[node_ind] temp_3d_ind = np.array([ np.where([self.mesh_reindex==i])[1][0] for i in range(node_ind*v_per_n+ind_base_of_sed, node_ind*v_per_n+ind_base_of_sed+2 ) ] ) dd = self.mesh.geometry.x[temp_3d_ind,2] @@ -492,12 +528,7 @@ def buildVertices(self, time_index=0, useFakeEncodedZ=False, optimized=False): import time compare = False - # if (self.mesh_vertices is not None): if (optimized) and hasattr(self, 'mesh_vertices'): - # self.numElemPerSediment = 1 - # self.numElemInCrust = 0 if self.runSedimentsOnly else 1 # split crust hexahedron into pieces - # self.numElemInLith = 0 if self.runSedimentsOnly else 1 # split lith hexahedron into pieces - # self.numElemInAsth = 0 if self.runSedimentsOnly else 1 # split asth hexahedron into pieces compare = True xxT = self.top_sed_at_nodes[:,tti] bc = self.base_crust_at_nodes[:,tti] @@ -517,12 +548,6 @@ def buildVertices(self, time_index=0, useFakeEncodedZ=False, optimized=False): zpos = xxT + base_of_current_sediments aa = np.concatenate([aa,np.array([zpos])]) - - # for k in range(self.numberOfSediments): - # aa = np.concatenate([aa,xxT+np.array([self.bottom_sed_id_at_nodes[k][:,tti]])]) - - # aa = np.concatenate([aa,np.array([bc])]) - # zp = base_of_last_sediments+ (base_crust-base_of_last_sediments)*(i/self.numElemInCrust) base_of_last_sediments = (xxT+self.bottom_sed_id_at_nodes[-1][:,tti]) if (len(self.bottom_sed_id_at_nodes)>0) else xxT @@ -540,8 +565,6 @@ def buildVertices(self, time_index=0, useFakeEncodedZ=False, optimized=False): zp = bl+ (ba-bl)*(i/self.numElemInLith) aa = np.concatenate([aa,np.array([zp])]) - # aa = np.concatenate([aa,np.array([bl])]) - # aa = np.concatenate([aa,np.array([ba])]) new_z_pos_0 = np.transpose(aa).flatten() mm0 = self.mesh_vertices.copy() mm0[:,2] = new_z_pos_0 @@ -580,46 +603,20 @@ def buildVertices(self, time_index=0, useFakeEncodedZ=False, optimized=False): st = time.time() if not self.runSedimentsOnly: base_of_last_sediments = bottom_sed_id(node, self.numberOfSediments-1, tti) if (self.numberOfSediments>0) else top_of_sediments - # base_crust = mean_top_of_lith - # base_crust = self.getTopOfLithAtNode(tti, node) - # top_crust(nn,tti) + thick_crust(nn,tti) - # def top_crust(nn, tti): - # if (tti > nn.subsidence.shape[0]-1): - # return 0.0 - # return nn.subsidence[tti] + nn.sed_thickness_ls[tti] - # def top_sed(nn:single_node, tti): - # if (tti > nn.subsidence.shape[0]-1): - # return 0.0 - # return nn.subsidence[tti] - # def thick_crust(nn, tti): - # if (tti > nn.crust_ls.shape[0]-1): - # return 0.0 - # return nn.crust_ls[tti] - base_crust = node.subsidence[tti] + node.sed_thickness_ls[tti] + node.crust_ls[tti] - # base_crust = 30000 - # if (ind==0): - # print("0", type(node.subsidence)) - # print("1", type(node.sed_thickness_ls)) - # print("2", type(node.crust_ls)) - # print("3", type(node.lith_ls)) for i in range(1,self.numElemInCrust+1): self.mesh_vertices_0.append( [ node.X, node.Y, base_of_last_sediments+ (base_crust-base_of_last_sediments)*(i/self.numElemInCrust) ] ) self.sed_diff_z.append(0.0) self.mesh_vertices_age_unsorted.append(1000) - # base_lith = mean_top_of_asth - # base_lith = self.getTopOfAsthAtNode(tti, node) - base_lith = node.crust_ls[tti]+node.lith_ls[tti]+node.subsidence[tti]+node.sed_thickness_ls[tti] - # base_lith = 100000 for i in range(1,self.numElemInLith+1): self.mesh_vertices_0.append( [ node.X, node.Y, base_crust+ (base_lith-base_crust)*(i/self.numElemInLith) ] ) self.sed_diff_z.append(0.0) self.mesh_vertices_age_unsorted.append(1000) - base_aest = base_lith+130000 # 260000 + base_aest = base_lith+130000 for i in range(1,self.numElemInAsth+1): self.mesh_vertices_0.append( [ node.X, node.Y, base_lith+(base_aest-base_lith)*(i/self.numElemInAsth) ] ) self.sed_diff_z.append(0.0) @@ -630,17 +627,11 @@ def buildVertices(self, time_index=0, useFakeEncodedZ=False, optimized=False): assert len(self.mesh_vertices_0) % self.num_nodes ==0 self.mesh_vertices_0 = np.array(self.mesh_vertices_0) - # if (compare): - # print("diff", np.amax(np.abs(self.mesh_vertices_0-mm0)) ) - # breakpoint() self.sed_diff_z = np.array(self.sed_diff_z) self.mesh_vertices = self.mesh_vertices_0.copy() self.mesh_vertices[:,2] = self.mesh_vertices_0[:,2] + self.sed_diff_z if (useFakeEncodedZ): self.mesh_vertices[:,2] = np.ceil(self.mesh_vertices[:,2])*1000 + np.array(list(range(self.mesh_vertices.shape[0])))*0.01 - # zz = self.mesh_vertices[:,2].copy() - # zz2=np.mod(zz,1000) - # # mesh_reindex = (1e-4+zz2*10).astype(np.int32) def updateVertices(self): """Update the mesh vertex positions using the values in self.mesh_vertices, and using the known dolfinx-induded reindexing @@ -649,7 +640,9 @@ def updateVertices(self): self.mesh_vertices_age = np.array(self.mesh_vertices_age_unsorted)[self.mesh_reindex].copy() self.mesh0_geometry_x = self.mesh.geometry.x.copy() self.mesh_vertices[:,2] = self.mesh_vertices_0[:,2] - self.sed_diff_z - self.updateTopVertexMap() + + if hasattr(self, 'top_sed_at_nodes'): + self.updateTopVertexMap() if self.runSedimentsOnly or (self.useBaseFlux): self.updateBottomVertexMap() self.mesh_vertices[:,2] = self.mesh_vertices_0[:,2] + self.sed_diff_z @@ -664,7 +657,6 @@ def buildMesh(self,tti:int): logger.info("Built mesh") self.updateMesh(tti) logger.info("Updated vertices") - def updateMesh(self,tti:int, optimized=False): @@ -673,14 +665,10 @@ def updateMesh(self,tti:int, optimized=False): import time assert self.mesh is not None self.tti = tti - st = time.time() self.buildVertices(time_index=tti, useFakeEncodedZ=False, optimized=optimized) - delta = time.time() - st - print("updatemesh delta 1", delta) - st = time.time() self.updateVertices() - delta = time.time() - st - print("updatemesh delta 2", delta) + self.posarr.append(self.mesh.geometry.x.copy()) + self.time_indices.append(self.tti) def buildHexahedra(self, keep_padding=True): xpnum = self.num_nodes_x @@ -756,8 +744,6 @@ def constructMesh(self): lid_per_node.append(-3) assert len(lid_per_node) == v_per_n - # breakpoint() - cells = [] cell_data_layerID = [] node_index = [] @@ -765,7 +751,6 @@ def constructMesh(self): for h,lid,nid in zip(hexaHedra, hex_data_layerID, hex_data_nodeID): for t in tetsplit0: candidate_tet_ind = [ h[k] for k in t ] - # candidate_tet_pos = np.array(self.mesh_vertices[candidate_tet_ind,:] ) cells.append(candidate_tet_ind) cell_data_layerID.append(lid) node_index.append(nid) @@ -781,7 +766,7 @@ def constructMesh(self): ) def mpi_print(s): - print(f"Rank {comm.rank}: {s}") + logger.info(f"Rank {comm.rank}: {s}") fn = self.modelName+"_mesh.xdmf" if comm.rank==0: mesh.write( fn ) @@ -790,7 +775,7 @@ def mpi_print(s): enc = dolfinx.io.XDMFFile.Encoding.HDF5 with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fn, "r", encoding=enc) as file: # MPI.COMM_SELF self.mesh = file.read_mesh(name="Grid", ghost_mode=dolfinx.cpp.mesh.GhostMode.shared_facet) - aa=file.read_meshtags(self.mesh, name="Grid") + aa = file.read_meshtags(self.mesh, name="Grid") self.cell_data_layerID = np.floor(aa.values.copy()*1e-7)-3 self.node_index = np.mod(aa.values.copy(),1e7).astype(np.int32) self.mesh.topology.create_connectivity(3,0) # create_connectivity_all() @@ -799,38 +784,34 @@ def mpi_print(s): mpi_print(f"Number of local vertices: {self.mesh.topology.index_map(0).size_local}") mpi_print(f"Ghost cells (global numbering): {self.mesh.topology.index_map(3).ghosts}") mpi_print(f"Ghost nodes (global numbering): {self.mesh.topology.index_map(0).ghosts}") - # mpi_print("Cell (dim = 2) to vertex (dim = 0) connectivity") - # mpi_print(self.mesh.topology.connectivity(3, 0)) - - # obtain original vertex order as encoded in z-pos digits + mpi_print(f"Dir local cells: {type(self.mesh.topology.index_map(3))} {dir(self.mesh.topology.index_map(3))}") + mpi_print(f"Dir local verts: {dir(self.mesh.topology.index_map(0))}") # local_to_global ! ? + mpi_print(f"Type local verts: {type(self.mesh.topology.index_map(0))}") + + # obtain original vertex order as encoded in z-pos digits zz = self.mesh.geometry.x[:,2].copy() zz2 = np.mod(zz,1000) self.mesh_reindex = (1e-4+zz2*100).astype(np.int32) self.mesh0_geometry_x = self.mesh.geometry.x.copy() - # breakpoint() - def TemperatureGradient(self, x): self.averageLABdepth = np.mean(np.array([ top_asth(n, self.tti) for n in self.node1D])) - Zmin, Zmax = np.amin(x[2,:]), np.amax(x[2,:]) - nz = (x[2,:] - Zmin) / (self.averageLABdepth - Zmin) + + nz = (x[2,:] - self.Zmin) / (self.averageLABdepth - self.Zmin) nz[nz>1.0] = 1.0 res = nz * (self.TempBase-self.Temp0) + self.Temp0 for i in range(x.shape[1]): p = x[:,i] fkey = self.floatKey2D(p+[2e-2, 2e-2,0.0]) dz = UniformNodeGridFixedSizeMeshModel.point_top_vertex_map.get(fkey, 1e10) - Zmin0 = dz if (dz<1e9) else np.amin(x[2,:]) + Zmin0 = dz if (dz<1e9) else self.Zmin nz0 = (p[2] - Zmin0) / (self.averageLABdepth - Zmin0) nz0 = min(nz0, 1.0) res[i] = nz0 * (self.TempBase-self.Temp0) + self.Temp0 - if (p[2]>130000): + if (p[2] > self.averageLABdepth): res[i] = 1330 + (p[2]-self.averageLABdepth)*0.0003 # 1369 - - # Zmax = np.amax(x[2,:]) - # res[x[2,:]=0]-conductivity_effective[self.layerIDsFcn.x.array[:]>=0]) ) - #error_poro = np.amax( np.abs(self.mean_porosity.x.array[:]-mean_porosity) ) - # print("error cond.", np.amax( np.abs(self.thermalCond.x.array[self.layerIDsFcn.x.array[:]>=0]-conductivity_effective[self.layerIDsFcn.x.array[:]>=0]) )) - # print("error poro ", np.amax( np.abs(self.mean_porosity.x.array[:]-mean_porosity) )) - # if (np.isnan(error_cond)): - # breakpoint() - # if (np.isnan(error_poro)): - # breakpoint() - # if (error_cond>0.1): - # breakpoint() self.thermalCond.x.array[self.layerIDsFcn.x.array[:]>=0] = conductivity_effective[self.layerIDsFcn.x.array[:]>=0] self.mean_porosity.x.array[self.layerIDsFcn.x.array[:]>=0] = mean_porosity[self.layerIDsFcn.x.array[:]>=0] newrho = self._parameters.cp * np.multiply( \ ((self.c_rho0.x.array[:]/self._parameters.cp)), (1-mean_porosity)) + mean_porosity*self._parameters.rhowater - #self.c_rho.x.array[:] = self._parameters.cp * np.multiply( \ - # ((self.c_rho0.x.array[:]/self._parameters.cp)), (1-mean_porosity)) + mean_porosity*self._parameters.rhowater self.c_rho.x.array[self.layerIDsFcn.x.array[:]>=0] = newrho[self.layerIDsFcn.x.array[:]>=0] - # self.rhpFcn.x.array[:] = np.multiply( self.rhp0.x.array[:], (1.0-self.mean_porosity.x.array[:]) ) - # self.rhpFcn.x.array[:] = self.rhp0.x.array - # self.rhpFcn.x.array[:] = np.multiply( self.rhp0.x.array[:], 1.0 ) def getCellMidpoints(self): def boundary(x): @@ -1009,7 +926,6 @@ def boundary(x): c_rho.x.array[:] = np.array(crs, dtype=PETSc.ScalarType).flatten() self.c_rho0.x.array[:] = np.array(crs, dtype=PETSc.ScalarType).flatten() - # self.node_index[i] poro = [ self.porosity0ForLayerID(lid, self.node_index[i])[0] for i,lid in enumerate(ls)] self.porosity0.x.array[:] = np.array(poro, dtype=PETSc.ScalarType).flatten() self.porosityAtDepth.x.array[:] = np.array(poro, dtype=PETSc.ScalarType).flatten() @@ -1047,20 +963,17 @@ def boundary(x): def updateTopVertexMap(self): """ Updates the point_top_vertex_map, used for fast lookup of subsidence values. - (to be re-designed?) """ UniformNodeGridFixedSizeMeshModel.point_top_vertex_map = {} v_per_n = int(len(self.mesh_vertices) / self.num_nodes) - if not self.runSedimentsOnly: - indices = np.where( np.mod(self.mesh_reindex,v_per_n)==0)[0] - else: - indices = range(self.mesh.geometry.x.shape[0]) - for i in indices: - p = self.mesh.geometry.x[i,:] - fkey = self.floatKey2D(p+[2e-2, 2e-2,0.0]) - dz = UniformNodeGridFixedSizeMeshModel.point_top_vertex_map.get(fkey, 1e10) - if p[2]1].shape, subs0[np.abs(subs0)>1] ) - #print("subs0", self.tti, subs0.shape, subs0[np.abs(subs0)>1].shape, subs0[np.abs(subs0)>1] ) xx = np.logical_or( np.abs(x[2]-subs0) < 0.9*self.minimumCellThick, np.isclose(x[2], 1e6*self.Zmax) ) return xx if (self.useBaseFlux): st = time.time() dofs_D = dolfinx.fem.locate_dofs_geometrical(self.V, boundary_D_top) - print("dofs_D", self.tti, dofs_D.shape) - print("delta BC 2 ", time.time()-st) + print("buildDirichletBC delta 2 ", time.time()-st) else: st = time.time() dofs_D = dolfinx.fem.locate_dofs_geometrical(self.V, boundary_D_top_bottom) - print("delta BC 3 ", time.time()-st) + dofs_D2 = dolfinx.fem.locate_dofs_geometrical(self.V, boundary_D_bottom) + dofs_D3 = dolfinx.fem.locate_dofs_geometrical(self.V, boundary_D_top) + print("buildDirichletBC delta 3 ", time.time()-st) u_bc = dolfinx.fem.Function(self.V) st = time.time() u_bc.interpolate(self.TemperatureGradient) - print("delta BC 4 ", time.time()-st) + print("buildDirichletBC delta 4 ", time.time()-st) bc = dolfinx.fem.dirichletbc(u_bc, dofs_D) return bc @@ -1160,121 +1077,112 @@ def writeOutputFunctions(self, outfilename, tti=0): xdmf.write_function(self.layerIDsFcn, tti) xdmf.write_function(self.u_n, tti) - def setupSolverAndSolve(self, n_steps:int=100, time_step:int=-1, skip_setup:bool = False, initial_state_model = None): - """ Sets up the function spaces, output functions, input function (kappa values), boundary conditions, initial conditions. - Sets up the heat equation in dolfinx, and solves the system in time for the given number of steps. - - Use skip_setup = True to continue a computation (e.g. after deforming the mesh), instead of starting one from scratch - """ + def setupSolver(self, initial_state_model = None): comm = MPI.COMM_WORLD def mpi_print(s): print(f"Rank {comm.rank}: {s}") - if (not skip_setup): - mpi_print(f"Before resetmesh Number of local cells: {self.mesh.topology.index_map(3).size_local}") - mpi_print(f"Number of global cells: {self.mesh.topology.index_map(3).size_global}") - mpi_print(f"Number of local vertices: {self.mesh.topology.index_map(0).size_local}") - self.resetMesh() - self.Zmin = np.min(self.mesh_vertices, axis=0)[2] - self.Zmax = np.max(self.mesh_vertices, axis=0)[2] - mpi_print(f"After resetmeshNumber of local cells: {self.mesh.topology.index_map(3).size_local}") - mpi_print(f"Number of global cells: {self.mesh.topology.index_map(3).size_global}") - mpi_print(f"Number of local vertices: {self.mesh.topology.index_map(0).size_local}") - - # Time-dependent heat problem: - # time-discretized variational form with backwards Euler, - # see: https://fenicsproject.org/pub/tutorial/html/._ftut1006.html - - if (not skip_setup): - # - # define function space - self.FE = ufl.FiniteElement("CG", self.mesh.ufl_cell(), self.CGorder) - self.V = dolfinx.fem.FunctionSpace(self.mesh, self.FE) - - # Define solution variable uh - self.uh = dolfinx.fem.Function(self.V) - self.uh.name = "uh" - - # u_n: solution at previous time step - self.u_n = dolfinx.fem.Function(self.V) - self.u_n.name = "u_n" - - # initialise both with initial condition: either a step function, or the solution from another Model instance - if (initial_state_model is None): - # self.u_n.interpolate(self.TemperatureStep) - self.u_n.interpolate(self.TemperatureGradient) - # self.u_n.interpolate(self.TemperatureFromNode) - else: - self.u_n.x.array[:] = initial_state_model.uh.x.array[:].copy() - self.uh.x.array[:] = self.u_n.x.array[:].copy() + self.resetMesh() + self.Zmin = np.min(self.mesh_vertices, axis=0)[2] + self.Zmax = np.max(self.mesh_vertices, axis=0)[2] + # mpi_print(f"Number of local cells: {self.mesh.topology.index_map(3).size_local}") + # mpi_print(f"Number of global cells: {self.mesh.topology.index_map(3).size_global}") + # mpi_print(f"Number of local vertices: {self.mesh.topology.index_map(0).size_local}") + # + # define function space + self.FE = ufl.FiniteElement("CG", self.mesh.ufl_cell(), self.CGorder) + self.V = dolfinx.fem.FunctionSpace(self.mesh, self.FE) - import time + # Define solution variable uh + self.uh = dolfinx.fem.Function(self.V) + self.uh.name = "uh" - if (not skip_setup): - st = time.time() - self.thermalCond, self.c_rho, self.layerIDsFcn, self.rhpFcn = self.buildKappaAndLayerIDs() - assert not np.any(np.isnan(self.thermalCond.x.array)) - print("solve delay 1", time.time()-st) + # u_n: solution at previous time step + self.u_n = dolfinx.fem.Function(self.V) + self.u_n.name = "u_n" - if (not skip_setup): - st = time.time() - nn = self.node1D[self.node_index[0]] - self.subsidence_at_nodes = np.zeros([self.thermalCond0.shape[0], nn.subsidence.shape[0] ]) - for i in range(len(self.node1D)): - nn = self.node1D[i] - iix = np.where(self.node_index==i) - self.subsidence_at_nodes[iix,:] = nn.subsidence - print("solve delay 1.3", time.time()-st) + import time + st = time.time() + nn = self.node1D[self.node_index[0]] + + self.subsidence_at_nodes = np.zeros([ len(self.cell_data_layerID), nn.subsidence.shape[0] ]) + + for i in range(len(self.node1D)): + nn = self.node1D[i] + iix = np.where(self.node_index==i) + self.subsidence_at_nodes[iix,:] = nn.subsidence + print("setup delay 1.3", time.time()-st) - if (not skip_setup): - st = time.time() - # self.averageLABdepth = np.mean(np.array([ top_asth(n, self.tti) for n in self.node1D])) - top_of_sediments = self.subsidence_at_nodes - self.bottom_sed_id_at_nodes = [] - self.top_sed_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) - self.base_crust_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) - self.base_lith_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) - for k in range(0,self.numberOfSediments): - bottom_sed_id_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) - for i in range(len(self.node1D)): - nn = self.node1D[i] - bottom_sed_id_at_nodes[i, :] = nn.sed[k,1,:] - self.bottom_sed_id_at_nodes.append(bottom_sed_id_at_nodes) + st = time.time() + top_of_sediments = self.subsidence_at_nodes + self.bottom_sed_id_at_nodes = [] + self.top_sed_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) + self.base_crust_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) + self.base_lith_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) + for k in range(0,self.numberOfSediments): + bottom_sed_id_at_nodes = np.zeros([self.num_nodes, nn.subsidence.shape[0] ]) for i in range(len(self.node1D)): nn = self.node1D[i] - self.top_sed_at_nodes[i,:] = nn.subsidence - self.base_crust_at_nodes[i,:] = nn.subsidence + nn.sed_thickness_ls + nn.crust_ls - self.base_lith_at_nodes[i,:] = self.base_crust_at_nodes[i,:] + nn.lith_ls - print("solve delay 1.4", time.time()-st) + bottom_sed_id_at_nodes[i, :] = nn.sed[k,1,:] + self.bottom_sed_id_at_nodes.append(bottom_sed_id_at_nodes) + for i in range(len(self.node1D)): + nn = self.node1D[i] + self.top_sed_at_nodes[i,:] = nn.subsidence + self.base_crust_at_nodes[i,:] = nn.subsidence + nn.sed_thickness_ls + nn.crust_ls + self.base_lith_at_nodes[i,:] = self.base_crust_at_nodes[i,:] + nn.lith_ls + print("setup delay 1.4", time.time()-st) + st = time.time() + self.thermalCond, self.c_rho, self.layerIDsFcn, self.rhpFcn = self.buildKappaAndLayerIDs() + assert not np.any(np.isnan(self.thermalCond.x.array)) + print("setup delay 1", time.time()-st) st = time.time() - self.sedimentsConductivitySekiguchi() - print("solve delay 2", time.time()-st) + # initialise both with initial condition: either a step function, or the solution from another Model instance + if (initial_state_model is None): + self.u_n.interpolate(self.TemperatureGradient) + else: + self.u_n.x.array[:] = initial_state_model.uh.x.array[:].copy() + self.uh.x.array[:] = self.u_n.x.array[:].copy() + print("setup delay 1.5", time.time()-st) + + def setupSolverAndSolve(self, n_steps:int=100, time_step:int=-1, skip_setup:bool = False, initial_state_model = None, update_bc=False): + """ Sets up the function spaces, output functions, input function (kappa values), boundary conditions, initial conditions. + Sets up the heat equation in dolfinx, and solves the system in time for the given number of steps. + + Use skip_setup = True to continue a computation (e.g. after deforming the mesh), instead of starting one from scratch + """ + comm = MPI.COMM_WORLD + def mpi_print(s): + print(f"Rank {comm.rank}: {s}") + if (not skip_setup): - st = time.time() + self.setupSolver(initial_state_model) + + if (not skip_setup) or update_bc: self.bc = self.buildDirichletBC() - print("delta C", time.time()-st) - else: - pass - # st = time.time() - # self.updateDBC() - # print("delta C.2", time.time()-st) + + import time + st = time.time() + self.sedimentsConductivitySekiguchi() + # print("solve delay 2", time.time()-st) t=0 dt = time_step if (time_step>0) else 3600*24*365 * 5000000 num_steps = n_steps + # + # Time-dependent heat problem: + # time-discretized variational form with backwards Euler, + # see: https://fenicsproject.org/pub/tutorial/html/._ftut1006.html # # solver setup, see: # https://jorgensd.github.io/dolfinx-tutorial/chapter2/diffusion_code.html # - st = time.time() - u = ufl.TrialFunction(self.V) v = ufl.TestFunction(self.V) @@ -1283,11 +1191,9 @@ def mpi_print(s): logger.info(f"mean RHP {np.mean(self.rhpFcn.x.array[:])}") if ( self.useBaseFlux ): - # baseFlux = 0.03 if (self.tti>50) else 0.03 baseFlux = self.baseFluxMagnitude # define Neumann condition: constant flux at base # expression g defines values of Neumann BC (heat flux at base) - #x = ufl.SpatialCoordinate(self.mesh) domain_c = dolfinx.fem.Function(self.V) domain_c.x.array[ : ] = 0.0 if (self.CGorder>1): @@ -1336,9 +1242,7 @@ def marker(x): A = dolfinx.fem.petsc.assemble_matrix(bilinear_form, bcs=[self.bc]) A.assemble() b = dolfinx.fem.petsc.create_vector(linear_form) - print("delta 2.F", time.time()-st) - st = time.time() comm = MPI.COMM_WORLD solver = PETSc.KSP().create(self.mesh.comm) @@ -1346,12 +1250,9 @@ def marker(x): solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) - print("delta F", time.time()-st) - import time for i in range(num_steps): t += dt - st = time.time() # Update the right hand side reusing the initial vector with b.localForm() as loc_b: loc_b.set(0) @@ -1374,9 +1275,9 @@ def marker(x): # Update solution at previous time step (u_n) # diffnorm = np.sum(np.abs(self.u_n.x.array - self.uh.x.array)) / self.u_n.x.array.shape[0] self.u_n.x.array[:] = self.uh.x.array - print("delay compute ", time.time()-st) # comm.Barrier() - + self.Tarr.append(self.uh.x.array[:].copy()) + # print("latest Tarr", self.Tarr[-1], np.mean(self.Tarr[-1])) @@ -1673,20 +1574,6 @@ def boundary(x): assert res.flatten().shape[0] == x.shape[0] return res.flatten() - # def nodeIsOnDomainEdge(self, node0): - # return any([ e[0]==node0 or e[1]==node0 for e in self.convexHullEdges]) - - # def pointIsOnDomainEdge(self, pt, node0, node1, weight): - # if (abs(weight)<0.01): - # return self.nodeIsOnDomainEdge(node0) - # if (abs(weight-1.0)<0.01): - # return self.nodeIsOnDomainEdge(node1) - # b0 = [node0, node1] in self.convexHullEdges - # b1 = [node1, node0] in self.convexHullEdges - # if b0 or b1: - # return True - # return False - def run_3d( builder:Builder, parameters:Parameters, start_time=182, end_time=0, pad_num_nodes=0, out_dir = "out-mapA/",sedimentsOnly=False, writeout=True, base_flux=None): @@ -1700,18 +1587,13 @@ def run_3d( builder:Builder, parameters:Parameters, start_time=182, end_time=0, # base_flux = 0.0033 writeout_final = True time_solve = 0.0 - posarr = [] - Tarr = [] with Bar('Processing...',check_tty=False, max=(start_time-end_time)) as bar: for tti in range(start_time, end_time-1,-1): #start from oldest rebuild_mesh = (tti==start_time) if rebuild_mesh: logger.info(f"Rebuild/reload mesh at {tti}") mm2 = UniformNodeGridFixedSizeMeshModel(builder, parameters,sedimentsOnly, padding_num_nodes=pad_num_nodes) - comm.Barrier() - # if comm.rank == 0: mm2.buildMesh(tti) - comm.Barrier() if (base_flux is not None): mm2.baseFluxMagnitude = base_flux else: @@ -1720,7 +1602,6 @@ def run_3d( builder:Builder, parameters:Parameters, start_time=182, end_time=0, mm2.updateMesh(tti, optimized=True) toc(msg="update mesh") logger.info(f"Solving {tti}") - posarr.append( mm2.mesh.geometry.x.copy() ) mm2.useBaseFlux = (base_flux is not None) mm2.baseFluxMagnitude = base_flux @@ -1732,7 +1613,7 @@ def run_3d( builder:Builder, parameters:Parameters, start_time=182, end_time=0, else: mm2.useBaseFlux = (base_flux is not None) tic() - mm2.setupSolverAndSolve( n_steps=nums, time_step=dt, skip_setup=(not rebuild_mesh)) + mm2.setupSolverAndSolve( n_steps=nums, time_step=dt, skip_setup=(not rebuild_mesh), update_bc = mm2.useBaseFlux and (len(mms2) == 1)) time_solve = time_solve + toc(msg="setup solver and solve") if (writeout): tic() @@ -1741,16 +1622,24 @@ def run_3d( builder:Builder, parameters:Parameters, start_time=182, end_time=0, # mm2.writeOutputFunctions(out_dir+"test4-"+str(tti)+".xdmf", tti=tti) toc(msg="write function") - Tarr.append(mm2.u_n.x.array.copy()) mms2.append(mm2) mms_tti.append(tti) logger.info(f"Simulated time step {tti}") bar.next() - print("total time solve: " , time_solve) + print("total time solve 3D: " , time_solve) if (writeout_final): - EPCfilename = mm2.write_hexa_mesh_resqml("temp/") - print("RESQML model written to: " , EPCfilename) - EPCfilename_ts = mm2.write_hexa_mesh_timeseries("temp/", posarr, Tarr) - print("RESQML partial model with timeseries written to: ", EPCfilename_ts) - read_mesh_resqml_hexa(EPCfilename) # test reading of the .epc file - return mm2,posarr,Tarr + comm.Barrier() + if comm.rank>=1: + comm.send(mm2.mesh.topology.index_map(0).local_to_global(list(range(mm2.mesh.geometry.x.shape[0]))) , dest=0, tag=((comm.rank-1)*10)+21) + comm.send(mm2.mesh_reindex, dest=0, tag=((comm.rank-1)*10)+23) + comm.send(mm2.mesh_vertices_age, dest=0, tag=((comm.rank-1)*10)+25) + comm.send(mm2.posarr, dest=0, tag=((comm.rank-1)*10)+20) + comm.send(mm2.Tarr, dest=0, tag=((comm.rank-1)*10)+24) + if comm.rank==0: + mm2.receive_mpi_messages() + EPCfilename = mm2.write_hexa_mesh_resqml("temp/", end_time) + print("RESQML model written to: " , EPCfilename) + EPCfilename_ts = mm2.write_hexa_mesh_timeseries("temp/") + print("RESQML partial model with timeseries written to: ", EPCfilename_ts) + read_mesh_resqml_hexa(EPCfilename) # test reading of the .epc file + return mm2 diff --git a/warmth/simulator.py b/warmth/simulator.py index de9c2aa..9066479 100644 --- a/warmth/simulator.py +++ b/warmth/simulator.py @@ -111,17 +111,29 @@ def dump_input_nodes(self, node: single_node): node._dump(p) return p - def dump_input_data(self): + def dump_input_data(self, use_mpi=False): p = [] parameter_data_path = self._parameters_path - self._builder.parameters.dump(self._parameters_path) - if isinstance(self._builder.grid,type(None)) is False: - self._builder.grid.dump(self._grid_path) - with concurrent.futures.ThreadPoolExecutor(max_workers=10) as th: - futures = [th.submit(self.dump_input_nodes, i) - for i in self._builder.iter_node() if i is not False] - for future in concurrent.futures.as_completed(futures): - p.append([parameter_data_path, future.result()]) + + from mpi4py import MPI + comm = MPI.COMM_WORLD + if (comm.rank==0): + self._builder.parameters.dump(self._parameters_path) + if isinstance(self._builder.grid,type(None)) is False: + self._builder.grid.dump(self._grid_path) + if use_mpi: + from mpi4py.futures import MPIPoolExecutor + with MPIPoolExecutor(max_workers=10) as th: + futures = [th.submit(self.dump_input_nodes, i) + for i in self._builder.iter_node() if i is not False] + for future in concurrent.futures.as_completed(futures): + p.append([parameter_data_path, future.result()]) + else: + with concurrent.futures.ThreadPoolExecutor(max_workers=10) as th: + futures = [th.submit(self.dump_input_nodes, i) + for i in self._builder.iter_node() if i is not False] + for future in concurrent.futures.as_completed(futures): + p.append([parameter_data_path, future.result()]) return p def setup_directory(self, purge=False): @@ -135,9 +147,9 @@ def setup_directory(self, purge=False): self._nodes_path.mkdir(parents=True, exist_ok=True) return - def run(self, save=False,purge=False,parallel=True): + def run(self, save=False,purge=False,parallel=True,use_mpi=True): if parallel: - self._parellel_run(save,purge) + self._parellel_run(save,purge,use_mpi=use_mpi) else: if self.simulate_every != 1: logger.warning("Serial simulation will run full simulation on all nodes") @@ -177,40 +189,62 @@ def _filter_full_sim(self)->int: if count >0: logger.info(f"Setting {count} nodes to partial simulation") return count - def _parellel_run(self, save,purge): + + + def _parellel_run(self, save, purge, use_mpi=False): filtered = self._filter_full_sim() - self.setup_directory(purge) - p = self.dump_input_data() - #self._builder.nodes=self._builder.grid.make_grid_arr() - with concurrent.futures.ProcessPoolExecutor(mp_context=get_context('spawn')) as executor: - results = [executor.submit(runWorker, i) for i in p] - with Bar('Processing...',check_tty=False, max=len(p)) as bar: - for future in concurrent.futures.as_completed(results): - bar.next() - try: - path_result = future.result() - n= load_node(path_result) # numerical model error should still resovle - if save==False: - path_result.unlink() - self.put_node_to_grid(n) - except Exception as e: - logger.error(e) + from mpi4py import MPI + comm = MPI.COMM_WORLD + if (comm.rank==0): + self.setup_directory(purge) + p = self.dump_input_data(use_mpi=use_mpi) + + if use_mpi: + from mpi4py.futures import MPIPoolExecutor + with MPIPoolExecutor(max_workers=20) as executor: + results = [executor.submit(runWorker, i) for i in p] + with Bar('Processing...',check_tty=False, max=len(p)) as bar: + for future in concurrent.futures.as_completed(results): + bar.next() + try: + path_result = future.result() + n= load_node(path_result) # numerical model error should still resovle + if save==False: + path_result.unlink() + self.put_node_to_grid(n) + except Exception as e: + logger.error(e) + else: + with concurrent.futures.ProcessPoolExecutor(mp_context=get_context('spawn')) as executor: + results = [executor.submit(runWorker, i) for i in p] + with Bar('Processing...',check_tty=False, max=len(p)) as bar: + for future in concurrent.futures.as_completed(results): + bar.next() + try: + path_result = future.result() + n= load_node(path_result) # numerical model error should still resovle + if save==False: + path_result.unlink() + self.put_node_to_grid(n) + except Exception as e: + logger.error(e) # pick up node with no results (failed) - for node_path in self._nodes_path.iterdir(): - str_f = str(node_path) - if str_f.endswith(".pickle"): - n=load_node(node_path) - if save==False: - node_path.unlink() - self.put_node_to_grid(n) - logger.warning(f"No result file for node X:{n.X}, Y:{n.Y}") - if save==False: - from shutil import rmtree - rmtree(self._builder.parameters.output_path) - if filtered >0: - logger.info(f"Interpolating results back to {filtered} partial simulated nodes") - interp_res= Results_interpolator(self._builder,len(p)-filtered) - interp_res.run() + if comm.rank==0: + for node_path in self._nodes_path.iterdir(): + str_f = str(node_path) + if str_f.endswith(".pickle"): + n=load_node(node_path) + if save==False: + node_path.unlink() + self.put_node_to_grid(n) + logger.warning(f"No result file for node X:{n.X}, Y:{n.Y}") + if save==False: + from shutil import rmtree + rmtree(self._builder.parameters.output_path) + if filtered >0: + logger.info(f"Interpolating results back to {filtered} partial simulated nodes") + interp_res= Results_interpolator(self._builder,len(p)-filtered) + interp_res.run() return def put_node_to_grid(self,node:single_node): self._builder.nodes[node.indexer[0]][node.indexer[1]]=node