diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk index a052631aa9..78512a5eeb 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %%/bin/nvcc,%%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %%/bin/hipcc,%%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt b/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt index 5982c61ae8..f059e68f5e 100644 --- a/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt +++ b/epochX/cudacpp/ee_mumu.mad/CODEGEN_mad_ee_mumu_log.txt @@ -62,7 +62,7 @@ generate e+ e- > mu+ mu- No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005379676818847656  +DEBUG: model prefixing takes 0.005307912826538086  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -177,7 +177,7 @@ INFO: Generating Helas calls for process: e+ e- > mu+ mu- WEIGHTED<=4 @1 INFO: Processing color information for process: e+ e- > mu+ mu- @1 INFO: Creating files in directory P1_epem_mupmum DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -198,18 +198,18 @@ INFO: Finding symmetric diagrams for subprocess group epem_mupmum DEBUG: iconfig_to_diag =  {1: 1, 2: 2} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (2 diagrams) in 0.004 s -Wrote files for 8 helas calls in 0.114 s +Wrote files for 8 helas calls in 0.112 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFV2 routines ALOHA: aloha creates FFV4 routines -ALOHA: aloha creates 3 routines in 0.201 s +ALOHA: aloha creates 3 routines in 0.198 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFV2 routines ALOHA: aloha creates FFV4 routines ALOHA: aloha creates FFV2_4 routines -ALOHA: aloha creates 7 routines in 0.255 s +ALOHA: aloha creates 7 routines in 0.253 s FFV1 FFV1 FFV2 @@ -252,9 +252,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.118s -user 0m1.862s -sys 0m0.242s +real 0m2.067s +user 0m1.807s +sys 0m0.251s Code generation completed in 2 seconds ************************************************************ * * diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/ee_mumu.sa/CODEGEN_cudacpp_ee_mumu_log.txt b/epochX/cudacpp/ee_mumu.sa/CODEGEN_cudacpp_ee_mumu_log.txt index ae0e225418..a96bc91d5b 100644 --- a/epochX/cudacpp/ee_mumu.sa/CODEGEN_cudacpp_ee_mumu_log.txt +++ b/epochX/cudacpp/ee_mumu.sa/CODEGEN_cudacpp_ee_mumu_log.txt @@ -62,7 +62,7 @@ generate e+ e- > mu+ mu- No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.00564265251159668  +DEBUG: model prefixing takes 0.005346059799194336  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -177,13 +177,13 @@ INFO: Creating files in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TM FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_ee_mumu/SubProcesses/P1_Sigma_sm_epem_mupmum/./CPPProcess.h FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_ee_mumu/SubProcesses/P1_Sigma_sm_epem_mupmum/./CPPProcess.cc INFO: Created files CPPProcess.h and CPPProcess.cc in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_ee_mumu/SubProcesses/P1_Sigma_sm_epem_mupmum/. -Generated helas calls for 1 subprocesses (2 diagrams) in 0.004 s +Generated helas calls for 1 subprocesses (2 diagrams) in 0.003 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFV2 routines ALOHA: aloha creates FFV4 routines ALOHA: aloha creates FFV2_4 routines -ALOHA: aloha creates 4 routines in 0.271 s +ALOHA: aloha creates 4 routines in 0.264 s FFV1 FFV1 FFV2 @@ -202,7 +202,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_ee_mumu/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_ee_mumu/src/. quit -real 0m0.674s -user 0m0.596s -sys 0m0.058s +real 0m0.647s +user 0m0.592s +sys 0m0.048s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt b/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt index d3614c325f..b7616fe096 100644 --- a/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt +++ b/epochX/cudacpp/gg_tt.mad/CODEGEN_mad_gg_tt_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005547761917114258  +DEBUG: model prefixing takes 0.005777120590209961  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -178,7 +178,7 @@ INFO: Generating Helas calls for process: g g > t t~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t t~ @1 INFO: Creating files in directory P1_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -198,15 +198,15 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttx DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (3 diagrams) in 0.006 s -Wrote files for 10 helas calls in 0.117 s +Wrote files for 10 helas calls in 0.115 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 2 routines in 0.145 s +ALOHA: aloha creates 2 routines in 0.146 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 4 routines in 0.139 s +ALOHA: aloha creates 4 routines in 0.132 s VVV1 FFV1 FFV1 @@ -241,9 +241,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.050s -user 0m1.662s -sys 0m0.268s +real 0m1.927s +user 0m1.671s +sys 0m0.252s Code generation completed in 2 seconds ************************************************************ * * diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt.sa/CODEGEN_cudacpp_gg_tt_log.txt b/epochX/cudacpp/gg_tt.sa/CODEGEN_cudacpp_gg_tt_log.txt index 5f921c39c6..b84f753a35 100644 --- a/epochX/cudacpp/gg_tt.sa/CODEGEN_cudacpp_gg_tt_log.txt +++ b/epochX/cudacpp/gg_tt.sa/CODEGEN_cudacpp_gg_tt_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005394935607910156  +DEBUG: model prefixing takes 0.005595207214355469  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=2: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ WEIGHTED<=2 @1 INFO: Process has 3 diagrams -1 processes with 3 diagrams generated in 0.009 s +1 processes with 3 diagrams generated in 0.008 s Total: 1 processes with 3 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_gg_tt Load PLUGIN.CUDACPP_OUTPUT @@ -182,7 +182,7 @@ Generated helas calls for 1 subprocesses (3 diagrams) in 0.006 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 2 routines in 0.145 s +ALOHA: aloha creates 2 routines in 0.144 s VVV1 FFV1 FFV1 @@ -197,7 +197,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_tt/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_tt/src/. quit -real 0m0.549s -user 0m0.474s -sys 0m0.058s -Code generation completed in 0 seconds +real 0m0.556s +user 0m0.475s +sys 0m0.048s +Code generation completed in 1 seconds diff --git a/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt b/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt index 2ea2a5346a..7fabd11d28 100644 --- a/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt +++ b/epochX/cudacpp/gg_tt01g.mad/CODEGEN_mad_gg_tt01g_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0055332183837890625  +DEBUG: model prefixing takes 0.005646228790283203  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=2: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ WEIGHTED<=2 @1 INFO: Process has 3 diagrams -1 processes with 3 diagrams generated in 0.008 s +1 processes with 3 diagrams generated in 0.009 s Total: 1 processes with 3 diagrams add process g g > t t~ g INFO: Checking for minimal orders which gives processes. @@ -163,7 +163,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=3: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g WEIGHTED<=3 @2 INFO: Process has 16 diagrams -1 processes with 16 diagrams generated in 0.020 s +1 processes with 16 diagrams generated in 0.019 s Total: 2 processes with 19 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_gg_tt01g --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -188,7 +188,7 @@ INFO: Generating Helas calls for process: g g > t t~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t t~ @1 INFO: Creating files in directory P2_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -209,7 +209,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} [model_handling.py at line 1545]  INFO: Creating files in directory P1_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -228,22 +228,22 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttx DEBUG: len(subproc_diagrams_for_config) =  3 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1545]  -Generated helas calls for 2 subprocesses (19 diagrams) in 0.042 s -Wrote files for 46 helas calls in 0.281 s +Generated helas calls for 2 subprocesses (19 diagrams) in 0.043 s +Wrote files for 46 helas calls in 0.275 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 5 routines in 0.336 s +ALOHA: aloha creates 5 routines in 0.331 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 10 routines in 0.317 s +ALOHA: aloha creates 10 routines in 0.315 s VVV1 VVV1 FFV1 @@ -291,10 +291,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m3.722s -user 0m2.407s -sys 0m0.292s -Code generation completed in 4 seconds +real 0m2.676s +user 0m2.362s +sys 0m0.310s +Code generation completed in 3 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt b/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt index dc2276a50d..18b1d80415 100644 --- a/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt +++ b/epochX/cudacpp/gg_ttg.mad/CODEGEN_mad_gg_ttg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005425453186035156  +DEBUG: model prefixing takes 0.005260467529296875  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=3: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g WEIGHTED<=3 @1 INFO: Process has 16 diagrams -1 processes with 16 diagrams generated in 0.022 s +1 processes with 16 diagrams generated in 0.021 s Total: 1 processes with 16 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_gg_ttg --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -178,7 +178,7 @@ INFO: Generating Helas calls for process: g g > t t~ g WEIGHTED<=3 @1 INFO: Processing color information for process: g g > t t~ g @1 INFO: Creating files in directory P1_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -198,21 +198,21 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxg DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (16 diagrams) in 0.039 s -Wrote files for 36 helas calls in 0.165 s +Wrote files for 36 helas calls in 0.162 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 5 routines in 0.330 s +ALOHA: aloha creates 5 routines in 0.322 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 10 routines in 0.317 s +ALOHA: aloha creates 10 routines in 0.308 s VVV1 VVV1 FFV1 @@ -256,9 +256,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.506s -user 0m2.207s -sys 0m0.271s +real 0m2.483s +user 0m2.197s +sys 0m0.283s Code generation completed in 3 seconds ************************************************************ * * diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttg.sa/CODEGEN_cudacpp_gg_ttg_log.txt b/epochX/cudacpp/gg_ttg.sa/CODEGEN_cudacpp_gg_ttg_log.txt index 433938fa3c..a103152d0f 100644 --- a/epochX/cudacpp/gg_ttg.sa/CODEGEN_cudacpp_gg_ttg_log.txt +++ b/epochX/cudacpp/gg_ttg.sa/CODEGEN_cudacpp_gg_ttg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005648612976074219  +DEBUG: model prefixing takes 0.00570988655090332  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=3: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g WEIGHTED<=3 @1 INFO: Process has 16 diagrams -1 processes with 16 diagrams generated in 0.022 s +1 processes with 16 diagrams generated in 0.021 s Total: 1 processes with 16 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_gg_ttg Load PLUGIN.CUDACPP_OUTPUT @@ -178,14 +178,14 @@ INFO: Creating files in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TM FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttg/SubProcesses/P1_Sigma_sm_gg_ttxg/./CPPProcess.h FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttg/SubProcesses/P1_Sigma_sm_gg_ttxg/./CPPProcess.cc INFO: Created files CPPProcess.h and CPPProcess.cc in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttg/SubProcesses/P1_Sigma_sm_gg_ttxg/. -Generated helas calls for 1 subprocesses (16 diagrams) in 0.038 s +Generated helas calls for 1 subprocesses (16 diagrams) in 0.037 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 set of routines with options: P0 ALOHA: aloha creates VVVV3 set of routines with options: P0 ALOHA: aloha creates VVVV4 set of routines with options: P0 -ALOHA: aloha creates 5 routines in 0.327 s +ALOHA: aloha creates 5 routines in 0.323 s VVV1 VVV1 FFV1 @@ -205,7 +205,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttg/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttg/src/. quit -real 0m0.818s -user 0m0.725s -sys 0m0.053s +real 0m0.774s +user 0m0.711s +sys 0m0.055s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt b/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt index 8412f20e64..816c1d75f7 100644 --- a/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt +++ b/epochX/cudacpp/gg_ttgg.mad/CODEGEN_mad_gg_ttgg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0055027008056640625  +DEBUG: model prefixing takes 0.0055654048919677734  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=4: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Process has 123 diagrams -1 processes with 123 diagrams generated in 0.160 s +1 processes with 123 diagrams generated in 0.156 s Total: 1 processes with 123 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_gg_ttgg --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -178,7 +178,7 @@ INFO: Generating Helas calls for process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Processing color information for process: g g > t t~ g g @1 INFO: Creating files in directory P1_gg_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -197,22 +197,22 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxgg DEBUG: len(subproc_diagrams_for_config) =  105 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 12, 12: 13, 13: 14, 14: 15, 15: 16, 16: 17, 17: 18, 18: 19, 19: 20, 20: 21, 21: 22, 22: 23, 23: 24, 24: 25, 25: 26, 26: 27, 27: 28, 28: 29, 29: 30, 30: 31, 31: 33, 32: 34, 33: 35, 34: 36, 35: 37, 36: 38, 37: 39, 38: 40, 39: 41, 40: 42, 41: 43, 42: 44, 43: 45, 44: 46, 45: 47, 46: 49, 47: 50, 48: 51, 49: 52, 50: 53, 51: 54, 52: 55, 53: 56, 54: 57, 55: 59, 56: 60, 57: 61, 58: 62, 59: 63, 60: 64, 61: 65, 62: 66, 63: 67, 64: 68, 65: 69, 66: 70, 67: 71, 68: 72, 69: 73, 70: 75, 71: 76, 72: 77, 73: 78, 74: 79, 75: 80, 76: 81, 77: 82, 78: 83, 79: 84, 80: 85, 81: 86, 82: 87, 83: 88, 84: 89, 85: 90, 86: 91, 87: 92, 88: 94, 89: 95, 90: 96, 91: 97, 92: 98, 93: 99, 94: 101, 95: 102, 96: 103, 97: 104, 98: 105, 99: 106, 100: 108, 101: 109, 102: 110, 103: 111, 104: 112, 105: 113} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 12: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 26: 25, 27: 26, 28: 27, 29: 28, 30: 29, 31: 30, 33: 31, 34: 32, 35: 33, 36: 34, 37: 35, 38: 36, 39: 37, 40: 38, 41: 39, 42: 40, 43: 41, 44: 42, 45: 43, 46: 44, 47: 45, 49: 46, 50: 47, 51: 48, 52: 49, 53: 50, 54: 51, 55: 52, 56: 53, 57: 54, 59: 55, 60: 56, 61: 57, 62: 58, 63: 59, 64: 60, 65: 61, 66: 62, 67: 63, 68: 64, 69: 65, 70: 66, 71: 67, 72: 68, 73: 69, 75: 70, 76: 71, 77: 72, 78: 73, 79: 74, 80: 75, 81: 76, 82: 77, 83: 78, 84: 79, 85: 80, 86: 81, 87: 82, 88: 83, 89: 84, 90: 85, 91: 86, 92: 87, 94: 88, 95: 89, 96: 90, 97: 91, 98: 92, 99: 93, 101: 94, 102: 95, 103: 96, 104: 97, 105: 98, 106: 99, 108: 100, 109: 101, 110: 102, 111: 103, 112: 104, 113: 105} [model_handling.py at line 1545]  -Generated helas calls for 1 subprocesses (123 diagrams) in 0.430 s -Wrote files for 222 helas calls in 0.712 s +Generated helas calls for 1 subprocesses (123 diagrams) in 0.428 s +Wrote files for 222 helas calls in 0.706 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.334 s +ALOHA: aloha creates 5 routines in 0.333 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.319 s +ALOHA: aloha creates 10 routines in 0.317 s VVV1 VVV1 FFV1 @@ -259,9 +259,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m3.929s -user 0m3.539s -sys 0m0.294s +real 0m3.822s +user 0m3.543s +sys 0m0.260s Code generation completed in 4 seconds ************************************************************ * * diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttgg.sa/CODEGEN_cudacpp_gg_ttgg_log.txt b/epochX/cudacpp/gg_ttgg.sa/CODEGEN_cudacpp_gg_ttgg_log.txt index ec446c348d..5c8b6b0535 100644 --- a/epochX/cudacpp/gg_ttgg.sa/CODEGEN_cudacpp_gg_ttgg_log.txt +++ b/epochX/cudacpp/gg_ttgg.sa/CODEGEN_cudacpp_gg_ttgg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005393266677856445  +DEBUG: model prefixing takes 0.0053234100341796875  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=4: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g WEIGHTED<=4 @1 INFO: Process has 123 diagrams -1 processes with 123 diagrams generated in 0.158 s +1 processes with 123 diagrams generated in 0.157 s Total: 1 processes with 123 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_gg_ttgg Load PLUGIN.CUDACPP_OUTPUT @@ -178,14 +178,14 @@ INFO: Creating files in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TM FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttgg/SubProcesses/P1_Sigma_sm_gg_ttxgg/./CPPProcess.h FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttgg/SubProcesses/P1_Sigma_sm_gg_ttxgg/./CPPProcess.cc INFO: Created files CPPProcess.h and CPPProcess.cc in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttgg/SubProcesses/P1_Sigma_sm_gg_ttxgg/. -Generated helas calls for 1 subprocesses (123 diagrams) in 0.429 s +Generated helas calls for 1 subprocesses (123 diagrams) in 0.430 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.321 s +ALOHA: aloha creates 5 routines in 0.322 s VVV1 VVV1 FFV1 @@ -208,7 +208,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttgg/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttgg/src/. quit -real 0m1.690s -user 0m1.392s -sys 0m0.051s -Code generation completed in 2 seconds +real 0m1.496s +user 0m1.376s +sys 0m0.058s +Code generation completed in 1 seconds diff --git a/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt b/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt index 80b849a95d..cf81051351 100644 --- a/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt +++ b/epochX/cudacpp/gg_ttggg.mad/CODEGEN_mad_gg_ttggg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005666971206665039  +DEBUG: model prefixing takes 0.005418062210083008  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=5: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g g WEIGHTED<=5 @1 INFO: Process has 1240 diagrams -1 processes with 1240 diagrams generated in 1.906 s +1 processes with 1240 diagrams generated in 1.889 s Total: 1 processes with 1240 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_gg_ttggg --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -180,7 +180,7 @@ INFO: Creating files in directory P1_gg_ttxggg INFO: Computing Color-Flow optimization [15120 term] INFO: Color-Flow passed to 1630 term in 8s. Introduce 3030 contraction DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -199,22 +199,22 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxggg DEBUG: len(subproc_diagrams_for_config) =  945 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 4, 4: 5, 5: 7, 6: 8, 7: 14, 8: 15, 9: 16, 10: 18, 11: 19, 12: 20, 13: 22, 14: 23, 15: 24, 16: 26, 17: 27, 18: 28, 19: 29, 20: 30, 21: 31, 22: 33, 23: 34, 24: 35, 25: 36, 26: 37, 27: 38, 28: 39, 29: 40, 30: 41, 31: 42, 32: 43, 33: 44, 34: 45, 35: 46, 36: 47, 37: 49, 38: 50, 39: 51, 40: 52, 41: 53, 42: 54, 43: 55, 44: 56, 45: 57, 46: 58, 47: 59, 48: 60, 49: 61, 50: 62, 51: 63, 52: 65, 53: 66, 54: 67, 55: 68, 56: 69, 57: 70, 58: 71, 59: 72, 60: 73, 61: 74, 62: 75, 63: 76, 64: 77, 65: 78, 66: 79, 67: 81, 68: 82, 69: 83, 70: 84, 71: 85, 72: 86, 73: 87, 74: 88, 75: 89, 76: 91, 77: 92, 78: 93, 79: 94, 80: 95, 81: 96, 82: 97, 83: 98, 84: 99, 85: 101, 86: 102, 87: 103, 88: 104, 89: 105, 90: 106, 91: 107, 92: 108, 93: 109, 94: 110, 95: 111, 96: 112, 97: 113, 98: 114, 99: 115, 100: 116, 101: 117, 102: 118, 103: 119, 104: 120, 105: 121, 106: 124, 107: 125, 108: 126, 109: 127, 110: 128, 111: 129, 112: 130, 113: 131, 114: 132, 115: 133, 116: 134, 117: 135, 118: 136, 119: 137, 120: 138, 121: 140, 122: 141, 123: 143, 124: 144, 125: 145, 126: 146, 127: 147, 128: 148, 129: 149, 130: 150, 131: 151, 132: 152, 133: 153, 134: 154, 135: 155, 136: 156, 137: 157, 138: 159, 139: 160, 140: 161, 141: 162, 142: 163, 143: 164, 144: 165, 145: 166, 146: 167, 147: 168, 148: 169, 149: 170, 150: 171, 151: 172, 152: 173, 153: 175, 154: 176, 155: 177, 156: 178, 157: 179, 158: 180, 159: 181, 160: 182, 161: 183, 162: 184, 163: 185, 164: 186, 165: 187, 166: 188, 167: 189, 168: 190, 169: 191, 170: 192, 171: 193, 172: 194, 173: 195, 174: 196, 175: 197, 176: 198, 177: 199, 178: 200, 179: 201, 180: 202, 181: 203, 182: 204, 183: 205, 184: 206, 185: 207, 186: 208, 187: 209, 188: 210, 189: 211, 190: 212, 191: 213, 192: 214, 193: 215, 194: 216, 195: 217, 196: 218, 197: 220, 198: 221, 199: 222, 200: 223, 201: 224, 202: 225, 203: 227, 204: 228, 205: 229, 206: 230, 207: 231, 208: 232, 209: 234, 210: 235, 211: 247, 212: 248, 213: 249, 214: 250, 215: 251, 216: 252, 217: 253, 218: 254, 219: 255, 220: 256, 221: 257, 222: 258, 223: 259, 224: 260, 225: 261, 226: 263, 227: 264, 228: 266, 229: 267, 230: 268, 231: 269, 232: 270, 233: 271, 234: 272, 235: 273, 236: 274, 237: 275, 238: 276, 239: 277, 240: 278, 241: 279, 242: 280, 243: 282, 244: 283, 245: 284, 246: 285, 247: 286, 248: 287, 249: 288, 250: 289, 251: 290, 252: 291, 253: 292, 254: 293, 255: 294, 256: 295, 257: 296, 258: 298, 259: 299, 260: 300, 261: 301, 262: 302, 263: 303, 264: 304, 265: 305, 266: 306, 267: 307, 268: 308, 269: 309, 270: 310, 271: 311, 272: 312, 273: 313, 274: 314, 275: 315, 276: 316, 277: 317, 278: 318, 279: 319, 280: 320, 281: 321, 282: 322, 283: 323, 284: 324, 285: 325, 286: 326, 287: 327, 288: 328, 289: 329, 290: 330, 291: 331, 292: 332, 293: 333, 294: 334, 295: 335, 296: 336, 297: 337, 298: 338, 299: 339, 300: 340, 301: 341, 302: 343, 303: 344, 304: 345, 305: 346, 306: 347, 307: 348, 308: 350, 309: 351, 310: 352, 311: 353, 312: 354, 313: 355, 314: 357, 315: 358, 316: 370, 317: 371, 318: 372, 319: 373, 320: 374, 321: 375, 322: 377, 323: 378, 324: 379, 325: 380, 326: 381, 327: 382, 328: 383, 329: 384, 330: 385, 331: 386, 332: 387, 333: 388, 334: 389, 335: 390, 336: 391, 337: 393, 338: 394, 339: 395, 340: 396, 341: 397, 342: 398, 343: 399, 344: 400, 345: 401, 346: 402, 347: 403, 348: 404, 349: 405, 350: 406, 351: 407, 352: 409, 353: 410, 354: 411, 355: 412, 356: 413, 357: 414, 358: 415, 359: 416, 360: 417, 361: 418, 362: 419, 363: 420, 364: 421, 365: 422, 366: 423, 367: 425, 368: 426, 369: 427, 370: 428, 371: 429, 372: 430, 373: 431, 374: 432, 375: 433, 376: 434, 377: 435, 378: 437, 379: 438, 380: 440, 381: 441, 382: 447, 383: 448, 384: 449, 385: 450, 386: 451, 387: 452, 388: 453, 389: 454, 390: 455, 391: 457, 392: 458, 393: 459, 394: 460, 395: 461, 396: 462, 397: 463, 398: 464, 399: 465, 400: 467, 401: 468, 402: 469, 403: 470, 404: 471, 405: 472, 406: 473, 407: 474, 408: 475, 409: 477, 410: 478, 411: 479, 412: 480, 413: 481, 414: 482, 415: 484, 416: 485, 417: 486, 418: 487, 419: 488, 420: 489, 421: 493, 422: 494, 423: 495, 424: 496, 425: 497, 426: 498, 427: 500, 428: 501, 429: 502, 430: 503, 431: 504, 432: 505, 433: 506, 434: 507, 435: 508, 436: 509, 437: 510, 438: 511, 439: 512, 440: 513, 441: 514, 442: 516, 443: 517, 444: 518, 445: 519, 446: 520, 447: 521, 448: 522, 449: 523, 450: 524, 451: 525, 452: 526, 453: 527, 454: 528, 455: 529, 456: 530, 457: 532, 458: 533, 459: 534, 460: 535, 461: 536, 462: 537, 463: 538, 464: 539, 465: 540, 466: 541, 467: 542, 468: 543, 469: 544, 470: 545, 471: 546, 472: 548, 473: 549, 474: 550, 475: 551, 476: 552, 477: 553, 478: 554, 479: 555, 480: 556, 481: 557, 482: 558, 483: 560, 484: 561, 485: 563, 486: 564, 487: 570, 488: 571, 489: 572, 490: 573, 491: 574, 492: 575, 493: 576, 494: 577, 495: 578, 496: 580, 497: 581, 498: 582, 499: 583, 500: 584, 501: 585, 502: 586, 503: 587, 504: 588, 505: 590, 506: 591, 507: 592, 508: 593, 509: 594, 510: 595, 511: 596, 512: 597, 513: 598, 514: 600, 515: 601, 516: 602, 517: 603, 518: 604, 519: 605, 520: 607, 521: 608, 522: 609, 523: 610, 524: 611, 525: 612, 526: 616, 527: 617, 528: 618, 529: 619, 530: 620, 531: 621, 532: 623, 533: 624, 534: 625, 535: 626, 536: 627, 537: 628, 538: 629, 539: 630, 540: 631, 541: 632, 542: 633, 543: 634, 544: 635, 545: 636, 546: 637, 547: 639, 548: 640, 549: 641, 550: 642, 551: 643, 552: 644, 553: 645, 554: 646, 555: 647, 556: 648, 557: 649, 558: 650, 559: 651, 560: 652, 561: 653, 562: 655, 563: 656, 564: 657, 565: 658, 566: 659, 567: 660, 568: 661, 569: 662, 570: 663, 571: 664, 572: 665, 573: 666, 574: 667, 575: 668, 576: 669, 577: 671, 578: 672, 579: 673, 580: 674, 581: 675, 582: 676, 583: 677, 584: 678, 585: 679, 586: 680, 587: 681, 588: 683, 589: 684, 590: 686, 591: 687, 592: 693, 593: 694, 594: 695, 595: 696, 596: 697, 597: 698, 598: 699, 599: 700, 600: 701, 601: 703, 602: 704, 603: 705, 604: 706, 605: 707, 606: 708, 607: 709, 608: 710, 609: 711, 610: 713, 611: 714, 612: 715, 613: 716, 614: 717, 615: 718, 616: 719, 617: 720, 618: 721, 619: 723, 620: 724, 621: 725, 622: 726, 623: 727, 624: 728, 625: 730, 626: 731, 627: 732, 628: 733, 629: 734, 630: 735, 631: 739, 632: 740, 633: 741, 634: 742, 635: 743, 636: 744, 637: 745, 638: 746, 639: 747, 640: 748, 641: 749, 642: 750, 643: 751, 644: 752, 645: 753, 646: 754, 647: 755, 648: 756, 649: 757, 650: 758, 651: 759, 652: 760, 653: 761, 654: 762, 655: 763, 656: 764, 657: 765, 658: 766, 659: 767, 660: 768, 661: 769, 662: 770, 663: 771, 664: 773, 665: 774, 666: 775, 667: 776, 668: 777, 669: 778, 670: 780, 671: 781, 672: 782, 673: 783, 674: 784, 675: 785, 676: 789, 677: 790, 678: 791, 679: 792, 680: 793, 681: 794, 682: 795, 683: 796, 684: 797, 685: 798, 686: 799, 687: 800, 688: 801, 689: 802, 690: 803, 691: 804, 692: 805, 693: 806, 694: 807, 695: 808, 696: 809, 697: 810, 698: 811, 699: 812, 700: 813, 701: 814, 702: 815, 703: 816, 704: 817, 705: 818, 706: 819, 707: 820, 708: 821, 709: 823, 710: 824, 711: 825, 712: 826, 713: 827, 714: 828, 715: 830, 716: 831, 717: 832, 718: 833, 719: 834, 720: 835, 721: 839, 722: 840, 723: 842, 724: 843, 725: 845, 726: 846, 727: 852, 728: 853, 729: 854, 730: 855, 731: 856, 732: 857, 733: 858, 734: 859, 735: 860, 736: 862, 737: 863, 738: 864, 739: 865, 740: 866, 741: 867, 742: 868, 743: 869, 744: 870, 745: 872, 746: 873, 747: 874, 748: 875, 749: 876, 750: 877, 751: 878, 752: 879, 753: 880, 754: 882, 755: 883, 756: 884, 757: 885, 758: 886, 759: 887, 760: 889, 761: 890, 762: 891, 763: 892, 764: 893, 765: 894, 766: 895, 767: 896, 768: 898, 769: 899, 770: 901, 771: 902, 772: 908, 773: 909, 774: 910, 775: 911, 776: 912, 777: 913, 778: 914, 779: 915, 780: 916, 781: 918, 782: 919, 783: 920, 784: 921, 785: 922, 786: 923, 787: 924, 788: 925, 789: 926, 790: 928, 791: 929, 792: 930, 793: 931, 794: 932, 795: 933, 796: 934, 797: 935, 798: 936, 799: 938, 800: 939, 801: 940, 802: 941, 803: 942, 804: 943, 805: 945, 806: 946, 807: 947, 808: 948, 809: 949, 810: 950, 811: 951, 812: 952, 813: 954, 814: 955, 815: 957, 816: 958, 817: 964, 818: 965, 819: 966, 820: 967, 821: 968, 822: 969, 823: 970, 824: 971, 825: 972, 826: 974, 827: 975, 828: 976, 829: 977, 830: 978, 831: 979, 832: 980, 833: 981, 834: 982, 835: 984, 836: 985, 837: 986, 838: 987, 839: 988, 840: 989, 841: 990, 842: 991, 843: 992, 844: 994, 845: 995, 846: 996, 847: 997, 848: 998, 849: 999, 850: 1001, 851: 1002, 852: 1003, 853: 1004, 854: 1005, 855: 1006, 856: 1007, 857: 1008, 858: 1010, 859: 1011, 860: 1013, 861: 1014, 862: 1019, 863: 1020, 864: 1022, 865: 1023, 866: 1025, 867: 1026, 868: 1031, 869: 1032, 870: 1034, 871: 1035, 872: 1037, 873: 1038, 874: 1046, 875: 1047, 876: 1048, 877: 1049, 878: 1050, 879: 1051, 880: 1052, 881: 1053, 882: 1054, 883: 1055, 884: 1056, 885: 1057, 886: 1058, 887: 1059, 888: 1060, 889: 1061, 890: 1062, 891: 1063, 892: 1065, 893: 1066, 894: 1067, 895: 1068, 896: 1069, 897: 1070, 898: 1071, 899: 1072, 900: 1073, 901: 1074, 902: 1075, 903: 1076, 904: 1077, 905: 1078, 906: 1079, 907: 1080, 908: 1081, 909: 1082, 910: 1084, 911: 1085, 912: 1086, 913: 1087, 914: 1088, 915: 1089, 916: 1090, 917: 1091, 918: 1092, 919: 1093, 920: 1094, 921: 1095, 922: 1096, 923: 1097, 924: 1098, 925: 1099, 926: 1100, 927: 1101, 928: 1103, 929: 1104, 930: 1105, 931: 1106, 932: 1107, 933: 1108, 934: 1110, 935: 1111, 936: 1112, 937: 1113, 938: 1114, 939: 1115, 940: 1117, 941: 1118, 942: 1119, 943: 1120, 944: 1121, 945: 1122} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 4: 3, 5: 4, 7: 5, 8: 6, 14: 7, 15: 8, 16: 9, 18: 10, 19: 11, 20: 12, 22: 13, 23: 14, 24: 15, 26: 16, 27: 17, 28: 18, 29: 19, 30: 20, 31: 21, 33: 22, 34: 23, 35: 24, 36: 25, 37: 26, 38: 27, 39: 28, 40: 29, 41: 30, 42: 31, 43: 32, 44: 33, 45: 34, 46: 35, 47: 36, 49: 37, 50: 38, 51: 39, 52: 40, 53: 41, 54: 42, 55: 43, 56: 44, 57: 45, 58: 46, 59: 47, 60: 48, 61: 49, 62: 50, 63: 51, 65: 52, 66: 53, 67: 54, 68: 55, 69: 56, 70: 57, 71: 58, 72: 59, 73: 60, 74: 61, 75: 62, 76: 63, 77: 64, 78: 65, 79: 66, 81: 67, 82: 68, 83: 69, 84: 70, 85: 71, 86: 72, 87: 73, 88: 74, 89: 75, 91: 76, 92: 77, 93: 78, 94: 79, 95: 80, 96: 81, 97: 82, 98: 83, 99: 84, 101: 85, 102: 86, 103: 87, 104: 88, 105: 89, 106: 90, 107: 91, 108: 92, 109: 93, 110: 94, 111: 95, 112: 96, 113: 97, 114: 98, 115: 99, 116: 100, 117: 101, 118: 102, 119: 103, 120: 104, 121: 105, 124: 106, 125: 107, 126: 108, 127: 109, 128: 110, 129: 111, 130: 112, 131: 113, 132: 114, 133: 115, 134: 116, 135: 117, 136: 118, 137: 119, 138: 120, 140: 121, 141: 122, 143: 123, 144: 124, 145: 125, 146: 126, 147: 127, 148: 128, 149: 129, 150: 130, 151: 131, 152: 132, 153: 133, 154: 134, 155: 135, 156: 136, 157: 137, 159: 138, 160: 139, 161: 140, 162: 141, 163: 142, 164: 143, 165: 144, 166: 145, 167: 146, 168: 147, 169: 148, 170: 149, 171: 150, 172: 151, 173: 152, 175: 153, 176: 154, 177: 155, 178: 156, 179: 157, 180: 158, 181: 159, 182: 160, 183: 161, 184: 162, 185: 163, 186: 164, 187: 165, 188: 166, 189: 167, 190: 168, 191: 169, 192: 170, 193: 171, 194: 172, 195: 173, 196: 174, 197: 175, 198: 176, 199: 177, 200: 178, 201: 179, 202: 180, 203: 181, 204: 182, 205: 183, 206: 184, 207: 185, 208: 186, 209: 187, 210: 188, 211: 189, 212: 190, 213: 191, 214: 192, 215: 193, 216: 194, 217: 195, 218: 196, 220: 197, 221: 198, 222: 199, 223: 200, 224: 201, 225: 202, 227: 203, 228: 204, 229: 205, 230: 206, 231: 207, 232: 208, 234: 209, 235: 210, 247: 211, 248: 212, 249: 213, 250: 214, 251: 215, 252: 216, 253: 217, 254: 218, 255: 219, 256: 220, 257: 221, 258: 222, 259: 223, 260: 224, 261: 225, 263: 226, 264: 227, 266: 228, 267: 229, 268: 230, 269: 231, 270: 232, 271: 233, 272: 234, 273: 235, 274: 236, 275: 237, 276: 238, 277: 239, 278: 240, 279: 241, 280: 242, 282: 243, 283: 244, 284: 245, 285: 246, 286: 247, 287: 248, 288: 249, 289: 250, 290: 251, 291: 252, 292: 253, 293: 254, 294: 255, 295: 256, 296: 257, 298: 258, 299: 259, 300: 260, 301: 261, 302: 262, 303: 263, 304: 264, 305: 265, 306: 266, 307: 267, 308: 268, 309: 269, 310: 270, 311: 271, 312: 272, 313: 273, 314: 274, 315: 275, 316: 276, 317: 277, 318: 278, 319: 279, 320: 280, 321: 281, 322: 282, 323: 283, 324: 284, 325: 285, 326: 286, 327: 287, 328: 288, 329: 289, 330: 290, 331: 291, 332: 292, 333: 293, 334: 294, 335: 295, 336: 296, 337: 297, 338: 298, 339: 299, 340: 300, 341: 301, 343: 302, 344: 303, 345: 304, 346: 305, 347: 306, 348: 307, 350: 308, 351: 309, 352: 310, 353: 311, 354: 312, 355: 313, 357: 314, 358: 315, 370: 316, 371: 317, 372: 318, 373: 319, 374: 320, 375: 321, 377: 322, 378: 323, 379: 324, 380: 325, 381: 326, 382: 327, 383: 328, 384: 329, 385: 330, 386: 331, 387: 332, 388: 333, 389: 334, 390: 335, 391: 336, 393: 337, 394: 338, 395: 339, 396: 340, 397: 341, 398: 342, 399: 343, 400: 344, 401: 345, 402: 346, 403: 347, 404: 348, 405: 349, 406: 350, 407: 351, 409: 352, 410: 353, 411: 354, 412: 355, 413: 356, 414: 357, 415: 358, 416: 359, 417: 360, 418: 361, 419: 362, 420: 363, 421: 364, 422: 365, 423: 366, 425: 367, 426: 368, 427: 369, 428: 370, 429: 371, 430: 372, 431: 373, 432: 374, 433: 375, 434: 376, 435: 377, 437: 378, 438: 379, 440: 380, 441: 381, 447: 382, 448: 383, 449: 384, 450: 385, 451: 386, 452: 387, 453: 388, 454: 389, 455: 390, 457: 391, 458: 392, 459: 393, 460: 394, 461: 395, 462: 396, 463: 397, 464: 398, 465: 399, 467: 400, 468: 401, 469: 402, 470: 403, 471: 404, 472: 405, 473: 406, 474: 407, 475: 408, 477: 409, 478: 410, 479: 411, 480: 412, 481: 413, 482: 414, 484: 415, 485: 416, 486: 417, 487: 418, 488: 419, 489: 420, 493: 421, 494: 422, 495: 423, 496: 424, 497: 425, 498: 426, 500: 427, 501: 428, 502: 429, 503: 430, 504: 431, 505: 432, 506: 433, 507: 434, 508: 435, 509: 436, 510: 437, 511: 438, 512: 439, 513: 440, 514: 441, 516: 442, 517: 443, 518: 444, 519: 445, 520: 446, 521: 447, 522: 448, 523: 449, 524: 450, 525: 451, 526: 452, 527: 453, 528: 454, 529: 455, 530: 456, 532: 457, 533: 458, 534: 459, 535: 460, 536: 461, 537: 462, 538: 463, 539: 464, 540: 465, 541: 466, 542: 467, 543: 468, 544: 469, 545: 470, 546: 471, 548: 472, 549: 473, 550: 474, 551: 475, 552: 476, 553: 477, 554: 478, 555: 479, 556: 480, 557: 481, 558: 482, 560: 483, 561: 484, 563: 485, 564: 486, 570: 487, 571: 488, 572: 489, 573: 490, 574: 491, 575: 492, 576: 493, 577: 494, 578: 495, 580: 496, 581: 497, 582: 498, 583: 499, 584: 500, 585: 501, 586: 502, 587: 503, 588: 504, 590: 505, 591: 506, 592: 507, 593: 508, 594: 509, 595: 510, 596: 511, 597: 512, 598: 513, 600: 514, 601: 515, 602: 516, 603: 517, 604: 518, 605: 519, 607: 520, 608: 521, 609: 522, 610: 523, 611: 524, 612: 525, 616: 526, 617: 527, 618: 528, 619: 529, 620: 530, 621: 531, 623: 532, 624: 533, 625: 534, 626: 535, 627: 536, 628: 537, 629: 538, 630: 539, 631: 540, 632: 541, 633: 542, 634: 543, 635: 544, 636: 545, 637: 546, 639: 547, 640: 548, 641: 549, 642: 550, 643: 551, 644: 552, 645: 553, 646: 554, 647: 555, 648: 556, 649: 557, 650: 558, 651: 559, 652: 560, 653: 561, 655: 562, 656: 563, 657: 564, 658: 565, 659: 566, 660: 567, 661: 568, 662: 569, 663: 570, 664: 571, 665: 572, 666: 573, 667: 574, 668: 575, 669: 576, 671: 577, 672: 578, 673: 579, 674: 580, 675: 581, 676: 582, 677: 583, 678: 584, 679: 585, 680: 586, 681: 587, 683: 588, 684: 589, 686: 590, 687: 591, 693: 592, 694: 593, 695: 594, 696: 595, 697: 596, 698: 597, 699: 598, 700: 599, 701: 600, 703: 601, 704: 602, 705: 603, 706: 604, 707: 605, 708: 606, 709: 607, 710: 608, 711: 609, 713: 610, 714: 611, 715: 612, 716: 613, 717: 614, 718: 615, 719: 616, 720: 617, 721: 618, 723: 619, 724: 620, 725: 621, 726: 622, 727: 623, 728: 624, 730: 625, 731: 626, 732: 627, 733: 628, 734: 629, 735: 630, 739: 631, 740: 632, 741: 633, 742: 634, 743: 635, 744: 636, 745: 637, 746: 638, 747: 639, 748: 640, 749: 641, 750: 642, 751: 643, 752: 644, 753: 645, 754: 646, 755: 647, 756: 648, 757: 649, 758: 650, 759: 651, 760: 652, 761: 653, 762: 654, 763: 655, 764: 656, 765: 657, 766: 658, 767: 659, 768: 660, 769: 661, 770: 662, 771: 663, 773: 664, 774: 665, 775: 666, 776: 667, 777: 668, 778: 669, 780: 670, 781: 671, 782: 672, 783: 673, 784: 674, 785: 675, 789: 676, 790: 677, 791: 678, 792: 679, 793: 680, 794: 681, 795: 682, 796: 683, 797: 684, 798: 685, 799: 686, 800: 687, 801: 688, 802: 689, 803: 690, 804: 691, 805: 692, 806: 693, 807: 694, 808: 695, 809: 696, 810: 697, 811: 698, 812: 699, 813: 700, 814: 701, 815: 702, 816: 703, 817: 704, 818: 705, 819: 706, 820: 707, 821: 708, 823: 709, 824: 710, 825: 711, 826: 712, 827: 713, 828: 714, 830: 715, 831: 716, 832: 717, 833: 718, 834: 719, 835: 720, 839: 721, 840: 722, 842: 723, 843: 724, 845: 725, 846: 726, 852: 727, 853: 728, 854: 729, 855: 730, 856: 731, 857: 732, 858: 733, 859: 734, 860: 735, 862: 736, 863: 737, 864: 738, 865: 739, 866: 740, 867: 741, 868: 742, 869: 743, 870: 744, 872: 745, 873: 746, 874: 747, 875: 748, 876: 749, 877: 750, 878: 751, 879: 752, 880: 753, 882: 754, 883: 755, 884: 756, 885: 757, 886: 758, 887: 759, 889: 760, 890: 761, 891: 762, 892: 763, 893: 764, 894: 765, 895: 766, 896: 767, 898: 768, 899: 769, 901: 770, 902: 771, 908: 772, 909: 773, 910: 774, 911: 775, 912: 776, 913: 777, 914: 778, 915: 779, 916: 780, 918: 781, 919: 782, 920: 783, 921: 784, 922: 785, 923: 786, 924: 787, 925: 788, 926: 789, 928: 790, 929: 791, 930: 792, 931: 793, 932: 794, 933: 795, 934: 796, 935: 797, 936: 798, 938: 799, 939: 800, 940: 801, 941: 802, 942: 803, 943: 804, 945: 805, 946: 806, 947: 807, 948: 808, 949: 809, 950: 810, 951: 811, 952: 812, 954: 813, 955: 814, 957: 815, 958: 816, 964: 817, 965: 818, 966: 819, 967: 820, 968: 821, 969: 822, 970: 823, 971: 824, 972: 825, 974: 826, 975: 827, 976: 828, 977: 829, 978: 830, 979: 831, 980: 832, 981: 833, 982: 834, 984: 835, 985: 836, 986: 837, 987: 838, 988: 839, 989: 840, 990: 841, 991: 842, 992: 843, 994: 844, 995: 845, 996: 846, 997: 847, 998: 848, 999: 849, 1001: 850, 1002: 851, 1003: 852, 1004: 853, 1005: 854, 1006: 855, 1007: 856, 1008: 857, 1010: 858, 1011: 859, 1013: 860, 1014: 861, 1019: 862, 1020: 863, 1022: 864, 1023: 865, 1025: 866, 1026: 867, 1031: 868, 1032: 869, 1034: 870, 1035: 871, 1037: 872, 1038: 873, 1046: 874, 1047: 875, 1048: 876, 1049: 877, 1050: 878, 1051: 879, 1052: 880, 1053: 881, 1054: 882, 1055: 883, 1056: 884, 1057: 885, 1058: 886, 1059: 887, 1060: 888, 1061: 889, 1062: 890, 1063: 891, 1065: 892, 1066: 893, 1067: 894, 1068: 895, 1069: 896, 1070: 897, 1071: 898, 1072: 899, 1073: 900, 1074: 901, 1075: 902, 1076: 903, 1077: 904, 1078: 905, 1079: 906, 1080: 907, 1081: 908, 1082: 909, 1084: 910, 1085: 911, 1086: 912, 1087: 913, 1088: 914, 1089: 915, 1090: 916, 1091: 917, 1092: 918, 1093: 919, 1094: 920, 1095: 921, 1096: 922, 1097: 923, 1098: 924, 1099: 925, 1100: 926, 1101: 927, 1103: 928, 1104: 929, 1105: 930, 1106: 931, 1107: 932, 1108: 933, 1110: 934, 1111: 935, 1112: 936, 1113: 937, 1114: 938, 1115: 939, 1117: 940, 1118: 941, 1119: 942, 1120: 943, 1121: 944, 1122: 945} [model_handling.py at line 1545]  -Generated helas calls for 1 subprocesses (1240 diagrams) in 6.619 s -Wrote files for 2281 helas calls in 18.549 s +Generated helas calls for 1 subprocesses (1240 diagrams) in 6.527 s +Wrote files for 2281 helas calls in 18.453 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.325 s +ALOHA: aloha creates 5 routines in 0.318 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.369 s +ALOHA: aloha creates 10 routines in 0.355 s VVV1 VVV1 FFV1 @@ -261,9 +261,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m32.883s -user 0m32.292s -sys 0m0.459s +real 0m32.580s +user 0m32.015s +sys 0m0.455s Code generation completed in 33 seconds ************************************************************ * * diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttggg.sa/CODEGEN_cudacpp_gg_ttggg_log.txt b/epochX/cudacpp/gg_ttggg.sa/CODEGEN_cudacpp_gg_ttggg_log.txt index 9fa53f086d..70ece972f5 100644 --- a/epochX/cudacpp/gg_ttggg.sa/CODEGEN_cudacpp_gg_ttggg_log.txt +++ b/epochX/cudacpp/gg_ttggg.sa/CODEGEN_cudacpp_gg_ttggg_log.txt @@ -62,7 +62,7 @@ generate g g > t t~ g g g No model currently active, so we import the Standard Model INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005432844161987305  +DEBUG: model prefixing takes 0.005778312683105469  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -155,7 +155,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=5: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ g g g WEIGHTED<=5 @1 INFO: Process has 1240 diagrams -1 processes with 1240 diagrams generated in 1.902 s +1 processes with 1240 diagrams generated in 1.872 s Total: 1 processes with 1240 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_gg_ttggg Load PLUGIN.CUDACPP_OUTPUT @@ -178,14 +178,14 @@ INFO: Creating files in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TM FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttggg/SubProcesses/P1_Sigma_sm_gg_ttxggg/./CPPProcess.h FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttggg/SubProcesses/P1_Sigma_sm_gg_ttxggg/./CPPProcess.cc INFO: Created files CPPProcess.h and CPPProcess.cc in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttggg/SubProcesses/P1_Sigma_sm_gg_ttxggg/. -Generated helas calls for 1 subprocesses (1240 diagrams) in 6.640 s +Generated helas calls for 1 subprocesses (1240 diagrams) in 6.585 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.354 s +ALOHA: aloha creates 5 routines in 0.348 s VVV1 VVV1 FFV1 @@ -208,7 +208,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttggg/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gg_ttggg/src/. quit -real 0m13.132s -user 0m12.955s -sys 0m0.111s -Code generation completed in 13 seconds +real 0m13.103s +user 0m12.928s +sys 0m0.109s +Code generation completed in 14 seconds diff --git a/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt b/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt index f5c94e00cd..cb97eb9e35 100644 --- a/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt +++ b/epochX/cudacpp/gq_ttq.mad/CODEGEN_mad_gq_ttq_log.txt @@ -61,7 +61,7 @@ set zerowidth_tchannel F define q = u c d s u~ c~ d~ s~ INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005633831024169922  +DEBUG: model prefixing takes 0.005686521530151367  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -170,7 +170,7 @@ INFO: Crossed process found for g u~ > t t~ u~, reuse diagrams. INFO: Crossed process found for g c~ > t t~ c~, reuse diagrams. INFO: Crossed process found for g d~ > t t~ d~, reuse diagrams. INFO: Crossed process found for g s~ > t t~ s~, reuse diagrams. -8 processes with 40 diagrams generated in 0.080 s +8 processes with 40 diagrams generated in 0.076 s Total: 8 processes with 40 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_gq_ttq --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -201,7 +201,7 @@ INFO: Combined process g d~ > t t~ d~ WEIGHTED<=3 @1 with process g u~ > t t~ u~ INFO: Combined process g s~ > t t~ s~ WEIGHTED<=3 @1 with process g u~ > t t~ u~ WEIGHTED<=3 @1 INFO: Creating files in directory P1_gu_ttxu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -222,7 +222,7 @@ INFO: Finding symmetric diagrams for subprocess group gu_ttxu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1545]  INFO: Creating files in directory P1_gux_ttxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -241,7 +241,7 @@ INFO: Finding symmetric diagrams for subprocess group gux_ttxux DEBUG: len(subproc_diagrams_for_config) =  5 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1545]  -Generated helas calls for 2 subprocesses (10 diagrams) in 0.031 s +Generated helas calls for 2 subprocesses (10 diagrams) in 0.030 s Wrote files for 32 helas calls in 0.249 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines @@ -250,7 +250,7 @@ ALOHA: aloha creates 2 routines in 0.146 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVV1 routines -ALOHA: aloha creates 4 routines in 0.134 s +ALOHA: aloha creates 4 routines in 0.133 s FFV1 FFV1 FFV1 @@ -302,10 +302,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.313s -user 0m1.990s -sys 0m0.293s -Code generation completed in 2 seconds +real 0m3.389s +user 0m1.964s +sys 0m0.295s +Code generation completed in 3 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gq_ttq.sa/CODEGEN_cudacpp_gq_ttq_log.txt b/epochX/cudacpp/gq_ttq.sa/CODEGEN_cudacpp_gq_ttq_log.txt index 96ced9fbc8..1548b0cef5 100644 --- a/epochX/cudacpp/gq_ttq.sa/CODEGEN_cudacpp_gq_ttq_log.txt +++ b/epochX/cudacpp/gq_ttq.sa/CODEGEN_cudacpp_gq_ttq_log.txt @@ -61,7 +61,7 @@ set zerowidth_tchannel F define q = u c d s u~ c~ d~ s~ INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.0056612491607666016  +DEBUG: model prefixing takes 0.005625486373901367  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -170,7 +170,7 @@ INFO: Crossed process found for g u~ > t t~ u~, reuse diagrams. INFO: Crossed process found for g c~ > t t~ c~, reuse diagrams. INFO: Crossed process found for g d~ > t t~ d~, reuse diagrams. INFO: Crossed process found for g s~ > t t~ s~, reuse diagrams. -8 processes with 40 diagrams generated in 0.080 s +8 processes with 40 diagrams generated in 0.077 s Total: 8 processes with 40 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_gq_ttq Load PLUGIN.CUDACPP_OUTPUT @@ -210,11 +210,11 @@ INFO: Creating files in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TM FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gq_ttq/SubProcesses/P1_Sigma_sm_gux_ttxux/./CPPProcess.h FileWriter for /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gq_ttq/SubProcesses/P1_Sigma_sm_gux_ttxux/./CPPProcess.cc INFO: Created files CPPProcess.h and CPPProcess.cc in directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gq_ttq/SubProcesses/P1_Sigma_sm_gux_ttxux/. -Generated helas calls for 2 subprocesses (10 diagrams) in 0.031 s +Generated helas calls for 2 subprocesses (10 diagrams) in 0.032 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVV1 routines -ALOHA: aloha creates 2 routines in 0.145 s +ALOHA: aloha creates 2 routines in 0.144 s FFV1 FFV1 FFV1 @@ -230,7 +230,7 @@ INFO: Created files Parameters_sm.h and Parameters_sm.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gq_ttq/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_gq_ttq/src/. quit -real 0m0.656s -user 0m0.589s -sys 0m0.057s +real 0m0.659s +user 0m0.597s +sys 0m0.049s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/heft_gg_bb.mad/CODEGEN_mad_heft_gg_bb_log.txt b/epochX/cudacpp/heft_gg_bb.mad/CODEGEN_mad_heft_gg_bb_log.txt index 71b6f32fa3..d530a89960 100644 --- a/epochX/cudacpp/heft_gg_bb.mad/CODEGEN_mad_heft_gg_bb_log.txt +++ b/epochX/cudacpp/heft_gg_bb.mad/CODEGEN_mad_heft_gg_bb_log.txt @@ -150,7 +150,7 @@ INFO: Generating Helas calls for process: g g > b b~ HIG<=1 HIW<=1 @1 INFO: Processing color information for process: g g > b b~ HIG<=1 HIW<=1 @1 INFO: Creating files in directory P1_gg_bbx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -170,19 +170,19 @@ INFO: Finding symmetric diagrams for subprocess group gg_bbx DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3, 4: 4} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (4 diagrams) in 0.009 s -Wrote files for 12 helas calls in 0.118 s +Wrote files for 12 helas calls in 0.119 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVS3 routines ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFS2 routines -ALOHA: aloha creates 4 routines in 0.269 s +ALOHA: aloha creates 4 routines in 0.262 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVS3 routines ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFS2 routines -ALOHA: aloha creates 8 routines in 0.251 s +ALOHA: aloha creates 8 routines in 0.249 s VVS3 VVV1 FFV1 @@ -219,9 +219,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m2.198s -user 0m1.905s -sys 0m0.268s +real 0m3.154s +user 0m1.883s +sys 0m0.276s Code generation completed in 3 seconds ************************************************************ * * diff --git a/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/heft_gg_bb.sa/CODEGEN_cudacpp_heft_gg_bb_log.txt b/epochX/cudacpp/heft_gg_bb.sa/CODEGEN_cudacpp_heft_gg_bb_log.txt index b38ca5ac91..14cb5a6988 100644 --- a/epochX/cudacpp/heft_gg_bb.sa/CODEGEN_cudacpp_heft_gg_bb_log.txt +++ b/epochX/cudacpp/heft_gg_bb.sa/CODEGEN_cudacpp_heft_gg_bb_log.txt @@ -156,7 +156,7 @@ ALOHA: aloha creates VVS3 routines ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines ALOHA: aloha creates FFS2 routines -ALOHA: aloha creates 4 routines in 0.264 s +ALOHA: aloha creates 4 routines in 0.278 s VVS3 VVV1 FFV1 @@ -173,7 +173,7 @@ INFO: Created files Parameters_heft.h and Parameters_heft.cc in directory INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_heft_gg_bb/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_heft_gg_bb/src/. quit -real 0m0.666s -user 0m0.585s -sys 0m0.056s +real 0m0.756s +user 0m0.610s +sys 0m0.064s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt index 439cf73e6a..c6b7a90b66 100644 --- a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt +++ b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt @@ -61,7 +61,7 @@ set zerowidth_tchannel F define j = p INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005627632141113281  +DEBUG: model prefixing takes 0.00522923469543457  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -212,7 +212,7 @@ INFO: Process d~ g > t t~ d~ added to mirror process g d~ > t t~ d~ INFO: Process d~ d > t t~ g added to mirror process d d~ > t t~ g INFO: Process s~ g > t t~ s~ added to mirror process g s~ > t t~ s~ INFO: Process s~ s > t t~ g added to mirror process s s~ > t t~ g -13 processes with 76 diagrams generated in 0.137 s +13 processes with 76 diagrams generated in 0.135 s Total: 18 processes with 83 diagrams add process p p > t t~ j j @2 INFO: Checking for minimal orders which gives processes. @@ -378,7 +378,7 @@ INFO: Process s~ u~ > t t~ u~ s~ added to mirror process u~ s~ > t t~ u~ s~ INFO: Process s~ c~ > t t~ c~ s~ added to mirror process c~ s~ > t t~ c~ s~ INFO: Process s~ d~ > t t~ d~ s~ added to mirror process d~ s~ > t t~ d~ s~ INFO: Crossed process found for s~ s~ > t t~ s~ s~, reuse diagrams. -65 processes with 1119 diagrams generated in 1.816 s +65 processes with 1119 diagrams generated in 1.855 s Total: 83 processes with 1202 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_pp_tt012j --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -500,7 +500,7 @@ INFO: Combined process d d~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED INFO: Combined process s s~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED<=2 INFO: Creating files in directory P2_gg_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -521,7 +521,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxgg DEBUG: diag_to_iconfig =  {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 12: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 26: 25, 27: 26, 28: 27, 29: 28, 30: 29, 31: 30, 33: 31, 34: 32, 35: 33, 36: 34, 37: 35, 38: 36, 39: 37, 40: 38, 41: 39, 42: 40, 43: 41, 44: 42, 45: 43, 46: 44, 47: 45, 49: 46, 50: 47, 51: 48, 52: 49, 53: 50, 54: 51, 55: 52, 56: 53, 57: 54, 59: 55, 60: 56, 61: 57, 62: 58, 63: 59, 64: 60, 65: 61, 66: 62, 67: 63, 68: 64, 69: 65, 70: 66, 71: 67, 72: 68, 73: 69, 75: 70, 76: 71, 77: 72, 78: 73, 79: 74, 80: 75, 81: 76, 82: 77, 83: 78, 84: 79, 85: 80, 86: 81, 87: 82, 88: 83, 89: 84, 90: 85, 91: 86, 92: 87, 94: 88, 95: 89, 96: 90, 97: 91, 98: 92, 99: 93, 101: 94, 102: 95, 103: 96, 104: 97, 105: 98, 106: 99, 108: 100, 109: 101, 110: 102, 111: 103, 112: 104, 113: 105} [model_handling.py at line 1545]  INFO: Creating files in directory P2_gg_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -542,7 +542,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxuux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1545]  INFO: Creating files in directory P2_gu_ttxgu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -563,7 +563,7 @@ INFO: Finding symmetric diagrams for subprocess group gu_ttxgu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1545]  INFO: Creating files in directory P2_gux_ttxgux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -584,7 +584,7 @@ INFO: Finding symmetric diagrams for subprocess group gux_ttxgux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uux_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -605,7 +605,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxgg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1545]  INFO: Creating files in directory P1_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -626,7 +626,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uu_ttxuu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -647,7 +647,7 @@ INFO: Finding symmetric diagrams for subprocess group uu_ttxuu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uux_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -668,7 +668,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxuux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uxux_ttxuxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -689,7 +689,7 @@ INFO: Finding symmetric diagrams for subprocess group uxux_ttxuxux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uc_ttxuc DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -710,7 +710,7 @@ INFO: Finding symmetric diagrams for subprocess group uc_ttxuc DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uux_ttxccx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -731,7 +731,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxccx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1545]  INFO: Creating files in directory P2_ucx_ttxucx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -752,7 +752,7 @@ INFO: Finding symmetric diagrams for subprocess group ucx_ttxucx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1545]  INFO: Creating files in directory P2_uxcx_ttxuxcx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -773,7 +773,7 @@ INFO: Finding symmetric diagrams for subprocess group uxcx_ttxuxcx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1545]  INFO: Creating files in directory P1_gu_ttxu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -794,7 +794,7 @@ INFO: Finding symmetric diagrams for subprocess group gu_ttxu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1545]  INFO: Creating files in directory P1_gux_ttxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -815,7 +815,7 @@ INFO: Finding symmetric diagrams for subprocess group gux_ttxux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1545]  INFO: Creating files in directory P1_uux_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -836,7 +836,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1545]  INFO: Creating files in directory P0_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -857,7 +857,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1545]  INFO: Creating files in directory P0_uux_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -876,22 +876,22 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttx DEBUG: len(subproc_diagrams_for_config) =  1 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 1} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1} [model_handling.py at line 1545]  -Generated helas calls for 18 subprocesses (372 diagrams) in 1.297 s -Wrote files for 810 helas calls in 4.490 s +Generated helas calls for 18 subprocesses (372 diagrams) in 1.293 s +Wrote files for 810 helas calls in 3.534 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.341 s +ALOHA: aloha creates 5 routines in 0.335 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.318 s +ALOHA: aloha creates 10 routines in 0.315 s VVV1 VVV1 FFV1 @@ -1100,10 +1100,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m12.229s -user 0m10.360s -sys 0m0.958s -Code generation completed in 13 seconds +real 0m11.245s +user 0m10.299s +sys 0m0.899s +Code generation completed in 12 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/smeft_gg_tttt.mad/CODEGEN_mad_smeft_gg_tttt_log.txt b/epochX/cudacpp/smeft_gg_tttt.mad/CODEGEN_mad_smeft_gg_tttt_log.txt index fe284c1cc5..d55f30f145 100644 --- a/epochX/cudacpp/smeft_gg_tttt.mad/CODEGEN_mad_smeft_gg_tttt_log.txt +++ b/epochX/cudacpp/smeft_gg_tttt.mad/CODEGEN_mad_smeft_gg_tttt_log.txt @@ -77,7 +77,7 @@ INFO: load vertices DEBUG: MG5 converter defines FFFF26 to Gamma(-2,-4,-3)*Gamma(-2,2,-6)*Gamma(-1,-6,-5)*Gamma(-1,4,-4)*ProjP(-5,1)*ProjP(-3,3) + Gamma(-2,-4,-3)*Gamma(-2,4,-6)*Gamma(-1,-6,-5)*Gamma(-1,2,-4)*ProjP(-5,3)*ProjP(-3,1) + Gamma(-2,-4,-3)*Gamma(-2,2,-6)*Gamma(-1,-6,-5)*Gamma(-1,4,-4)*ProjM(-5,1)*ProjM(-3,3) + Gamma(-2,-4,-3)*Gamma(-2,4,-6)*Gamma(-1,-6,-5)*Gamma(-1,2,-4)*ProjM(-5,3)*ProjM(-3,1)  DEBUG: MG5 converter defines FFFF27 to ProjP(2,1)*ProjP(4,3) + ProjM(2,1)*ProjM(4,3)  DEBUG: MG5 converter defines FFFF112 to ProjM(2,3)*ProjM(4,1) + ProjP(2,3)*ProjP(4,1)  -DEBUG: model prefixing takes 0.13704657554626465  +DEBUG: model prefixing takes 0.13804030418395996  INFO: Change particles name to pass to MG5 convention Defined multiparticle p = g u c d s u~ c~ d~ s~ Defined multiparticle j = g u c d s u~ c~ d~ s~ @@ -92,7 +92,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=4: WEIGTHED IS QCD+2*QED+99*SMHLOOP+99*NP+99*NPshifts+99*NPprop+99*NPcpv+NPcbb+NPcbB+NPcbBB+NPcbd1+NPcbd8+NPcbe+NPcbG+NPcbH+NPcbj1+NPcbj8+NPcbl+NPcbu1+NPcbu8+NPcbW+NPcdB+NPcdd1+NPcdd8+NPcdG+NPcdH+NPcdW+NPceB+NPced+NPcee+NPceH+NPceu+NPceW+NPcG+NPcGtil+NPcH+NPcHB+NPcHbox+NPcHbq+NPcHBtil+NPcHd+NPcHDD+NPcHe+NPcHG+NPcHGtil+NPcHj1+NPcHj3+NPcHl1+NPcHl3+NPcHQ1+NPcHQ3+NPcHt+NPcHtb+NPcHu+NPcHud+NPcHW+NPcHWB+NPcHWBtil+NPcHWtil+NPcjd1+NPcjd8+NPcje+NPcjj11+NPcjj18+NPcjj31+NPcjj38+NPcjQbd1+NPcjQbd8+NPcjQtu1+NPcjQtu8+NPcjtQd1+NPcjtQd8+NPcju1+NPcju8+NPcjujd1+NPcjujd11+NPcjujd8+NPcjujd81+NPcjuQb1+NPcjuQb8+NPcld+NPcle+NPclebQ+NPcledj+NPcleju1+NPcleju3+NPcleQt1+NPcleQt3+NPclj1+NPclj3+NPcll+NPcll1+NPclu+NPcQb1+NPcQb8+NPcQd1+NPcQd8+NPcQe+NPcQj11+NPcQj18+NPcQj31+NPcQj38+NPcQl1+NPcQl3+NPcQQ1+NPcQQ8+NPcQt1+NPcQt8+NPcQtjd1+NPcQtjd8+NPcQtQb1+NPcQtQb8+NPcQu1+NPcQu8+NPcQujb1+NPcQujb8+NPctB+NPctb1+NPctb8+NPctd1+NPctd8+NPcte+NPctG+NPctH+NPctj1+NPctj8+NPctl+NPctt+NPctu1+NPctu8+NPctW+NPcuB+NPcud1+NPcud8+NPcuG+NPcuH+NPcutbd1+NPcutbd8+NPcuu1+NPcuu8+NPcuW+NPcW+NPcWtil+NPQjujb8 INFO: Trying process: g g > t t~ t t~ WEIGHTED<=4 @1 INFO: Process has 72 diagrams -1 processes with 72 diagrams generated in 3.722 s +1 processes with 72 diagrams generated in 3.673 s Total: 1 processes with 72 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_smeft_gg_tttt --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -115,7 +115,7 @@ INFO: Generating Helas calls for process: g g > t t~ t t~ WEIGHTED<=4 @1 INFO: Processing color information for process: g g > t t~ t t~ @1 INFO: Creating files in directory P1_gg_ttxttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -134,22 +134,22 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxttx DEBUG: len(subproc_diagrams_for_config) =  70 [model_handling.py at line 1520]  DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35, 36: 36, 37: 37, 38: 38, 39: 39, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46, 47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61, 62: 62, 63: 63, 64: 64, 65: 65, 66: 66, 67: 68, 68: 69, 69: 71, 70: 72} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35, 36: 36, 37: 37, 38: 38, 39: 39, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46, 47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61, 62: 62, 63: 63, 64: 64, 65: 65, 66: 66, 68: 67, 69: 68, 71: 69, 72: 70} [model_handling.py at line 1545]  -Generated helas calls for 1 subprocesses (72 diagrams) in 0.187 s -Wrote files for 119 helas calls in 0.437 s +Generated helas calls for 1 subprocesses (72 diagrams) in 0.185 s +Wrote files for 119 helas calls in 0.432 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV5 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV9 routines ALOHA: aloha creates VVVV10 routines -ALOHA: aloha creates 5 routines in 0.318 s +ALOHA: aloha creates 5 routines in 0.317 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV5 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV9 routines ALOHA: aloha creates VVVV10 routines -ALOHA: aloha creates 10 routines in 0.329 s +ALOHA: aloha creates 10 routines in 0.333 s VVV5 VVV5 FFV1 @@ -193,10 +193,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m7.228s -user 0m6.896s -sys 0m0.299s -Code generation completed in 8 seconds +real 0m7.220s +user 0m6.848s +sys 0m0.283s +Code generation completed in 7 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/smeft_gg_tttt.sa/CODEGEN_cudacpp_smeft_gg_tttt_log.txt b/epochX/cudacpp/smeft_gg_tttt.sa/CODEGEN_cudacpp_smeft_gg_tttt_log.txt index 62d7042d00..4fb7228286 100644 --- a/epochX/cudacpp/smeft_gg_tttt.sa/CODEGEN_cudacpp_smeft_gg_tttt_log.txt +++ b/epochX/cudacpp/smeft_gg_tttt.sa/CODEGEN_cudacpp_smeft_gg_tttt_log.txt @@ -77,7 +77,7 @@ INFO: load vertices DEBUG: MG5 converter defines FFFF26 to Gamma(-2,-4,-3)*Gamma(-2,2,-6)*Gamma(-1,-6,-5)*Gamma(-1,4,-4)*ProjP(-5,1)*ProjP(-3,3) + Gamma(-2,-4,-3)*Gamma(-2,4,-6)*Gamma(-1,-6,-5)*Gamma(-1,2,-4)*ProjP(-5,3)*ProjP(-3,1) + Gamma(-2,-4,-3)*Gamma(-2,2,-6)*Gamma(-1,-6,-5)*Gamma(-1,4,-4)*ProjM(-5,1)*ProjM(-3,3) + Gamma(-2,-4,-3)*Gamma(-2,4,-6)*Gamma(-1,-6,-5)*Gamma(-1,2,-4)*ProjM(-5,3)*ProjM(-3,1)  DEBUG: MG5 converter defines FFFF27 to ProjP(2,1)*ProjP(4,3) + ProjM(2,1)*ProjM(4,3)  DEBUG: MG5 converter defines FFFF112 to ProjM(2,3)*ProjM(4,1) + ProjP(2,3)*ProjP(4,1)  -DEBUG: model prefixing takes 0.13954997062683105  +DEBUG: model prefixing takes 0.13859224319458008  INFO: Change particles name to pass to MG5 convention Defined multiparticle p = g u c d s u~ c~ d~ s~ Defined multiparticle j = g u c d s u~ c~ d~ s~ @@ -92,7 +92,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=4: WEIGTHED IS QCD+2*QED+99*SMHLOOP+99*NP+99*NPshifts+99*NPprop+99*NPcpv+NPcbb+NPcbB+NPcbBB+NPcbd1+NPcbd8+NPcbe+NPcbG+NPcbH+NPcbj1+NPcbj8+NPcbl+NPcbu1+NPcbu8+NPcbW+NPcdB+NPcdd1+NPcdd8+NPcdG+NPcdH+NPcdW+NPceB+NPced+NPcee+NPceH+NPceu+NPceW+NPcG+NPcGtil+NPcH+NPcHB+NPcHbox+NPcHbq+NPcHBtil+NPcHd+NPcHDD+NPcHe+NPcHG+NPcHGtil+NPcHj1+NPcHj3+NPcHl1+NPcHl3+NPcHQ1+NPcHQ3+NPcHt+NPcHtb+NPcHu+NPcHud+NPcHW+NPcHWB+NPcHWBtil+NPcHWtil+NPcjd1+NPcjd8+NPcje+NPcjj11+NPcjj18+NPcjj31+NPcjj38+NPcjQbd1+NPcjQbd8+NPcjQtu1+NPcjQtu8+NPcjtQd1+NPcjtQd8+NPcju1+NPcju8+NPcjujd1+NPcjujd11+NPcjujd8+NPcjujd81+NPcjuQb1+NPcjuQb8+NPcld+NPcle+NPclebQ+NPcledj+NPcleju1+NPcleju3+NPcleQt1+NPcleQt3+NPclj1+NPclj3+NPcll+NPcll1+NPclu+NPcQb1+NPcQb8+NPcQd1+NPcQd8+NPcQe+NPcQj11+NPcQj18+NPcQj31+NPcQj38+NPcQl1+NPcQl3+NPcQQ1+NPcQQ8+NPcQt1+NPcQt8+NPcQtjd1+NPcQtjd8+NPcQtQb1+NPcQtQb8+NPcQu1+NPcQu8+NPcQujb1+NPcQujb8+NPctB+NPctb1+NPctb8+NPctd1+NPctd8+NPcte+NPctG+NPctH+NPctj1+NPctj8+NPctl+NPctt+NPctu1+NPctu8+NPctW+NPcuB+NPcud1+NPcud8+NPcuG+NPcuH+NPcutbd1+NPcutbd8+NPcuu1+NPcuu8+NPcuW+NPcW+NPcWtil+NPQjujb8 INFO: Trying process: g g > t t~ t t~ WEIGHTED<=4 @1 INFO: Process has 72 diagrams -1 processes with 72 diagrams generated in 3.727 s +1 processes with 72 diagrams generated in 3.821 s Total: 1 processes with 72 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_smeft_gg_tttt Load PLUGIN.CUDACPP_OUTPUT @@ -122,7 +122,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV9 routines ALOHA: aloha creates VVVV10 routines -ALOHA: aloha creates 5 routines in 0.322 s +ALOHA: aloha creates 5 routines in 0.316 s VVV5 VVV5 FFV1 @@ -142,7 +142,7 @@ INFO: Created files Parameters_SMEFTsim_topU3l_MwScheme_UFO.h and Parameters_SME INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_smeft_gg_tttt/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_smeft_gg_tttt/src/. quit -real 0m5.137s -user 0m5.030s -sys 0m0.072s +real 0m5.206s +user 0m5.107s +sys 0m0.076s Code generation completed in 5 seconds diff --git a/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_t1t1.mad/CODEGEN_mad_susy_gg_t1t1_log.txt b/epochX/cudacpp/susy_gg_t1t1.mad/CODEGEN_mad_susy_gg_t1t1_log.txt index 37089500b4..49e61427c5 100644 --- a/epochX/cudacpp/susy_gg_t1t1.mad/CODEGEN_mad_susy_gg_t1t1_log.txt +++ b/epochX/cudacpp/susy_gg_t1t1.mad/CODEGEN_mad_susy_gg_t1t1_log.txt @@ -554,7 +554,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=2: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t1 t1~ WEIGHTED<=2 @1 INFO: Process has 6 diagrams -1 processes with 6 diagrams generated in 0.125 s +1 processes with 6 diagrams generated in 0.130 s Total: 1 processes with 6 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_susy_gg_t1t1 --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -577,7 +577,7 @@ INFO: Generating Helas calls for process: g g > t1 t1~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t1 t1~ @1 INFO: Creating files in directory P1_gg_t1t1x DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -597,7 +597,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_t1t1x DEBUG: iconfig_to_diag =  {1: 2, 2: 3, 3: 4, 4: 5, 5: 6} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {2: 1, 3: 2, 4: 3, 5: 4, 6: 5} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (6 diagrams) in 0.008 s -Wrote files for 16 helas calls in 0.125 s +Wrote files for 16 helas calls in 0.126 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates VSS1 routines @@ -607,7 +607,7 @@ ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates VSS1 routines ALOHA: aloha creates VVSS1 routines -ALOHA: aloha creates 6 routines in 0.180 s +ALOHA: aloha creates 6 routines in 0.182 s VVV1 VSS1 VSS1 @@ -647,10 +647,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m3.044s -user 0m2.726s -sys 0m0.300s -Code generation completed in 3 seconds +real 0m3.278s +user 0m2.733s +sys 0m0.284s +Code generation completed in 4 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_t1t1.sa/CODEGEN_cudacpp_susy_gg_t1t1_log.txt b/epochX/cudacpp/susy_gg_t1t1.sa/CODEGEN_cudacpp_susy_gg_t1t1_log.txt index c2f899fe3e..1085728e17 100644 --- a/epochX/cudacpp/susy_gg_t1t1.sa/CODEGEN_cudacpp_susy_gg_t1t1_log.txt +++ b/epochX/cudacpp/susy_gg_t1t1.sa/CODEGEN_cudacpp_susy_gg_t1t1_log.txt @@ -582,7 +582,7 @@ ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates VSS1 routines ALOHA: aloha creates VVSS1 routines -ALOHA: aloha creates 3 routines in 0.185 s +ALOHA: aloha creates 3 routines in 0.183 s VVV1 VSS1 VSS1 @@ -598,7 +598,7 @@ INFO: Created files Parameters_MSSM_SLHA2.h and Parameters_MSSM_SLHA2.cc in dire INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_susy_gg_t1t1/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_susy_gg_t1t1/src/. quit -real 0m1.342s -user 0m1.248s -sys 0m0.080s +real 0m1.401s +user 0m1.286s +sys 0m0.057s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_tt.mad/CODEGEN_mad_susy_gg_tt_log.txt b/epochX/cudacpp/susy_gg_tt.mad/CODEGEN_mad_susy_gg_tt_log.txt index 4f86b653e0..a1082c61f1 100644 --- a/epochX/cudacpp/susy_gg_tt.mad/CODEGEN_mad_susy_gg_tt_log.txt +++ b/epochX/cudacpp/susy_gg_tt.mad/CODEGEN_mad_susy_gg_tt_log.txt @@ -577,7 +577,7 @@ INFO: Generating Helas calls for process: g g > t t~ WEIGHTED<=2 @1 INFO: Processing color information for process: g g > t t~ @1 INFO: Creating files in directory P1_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1152]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -597,15 +597,15 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttx DEBUG: iconfig_to_diag =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1544]  DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1545]  Generated helas calls for 1 subprocesses (3 diagrams) in 0.006 s -Wrote files for 10 helas calls in 0.119 s +Wrote files for 10 helas calls in 0.116 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 2 routines in 0.137 s +ALOHA: aloha creates 2 routines in 0.139 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 4 routines in 0.136 s +ALOHA: aloha creates 4 routines in 0.135 s VVV1 FFV1 FFV1 @@ -640,10 +640,10 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m3.851s -user 0m2.590s -sys 0m0.316s -Code generation completed in 4 seconds +real 0m2.872s +user 0m2.564s +sys 0m0.301s +Code generation completed in 2 seconds ************************************************************ * * * W E L C O M E to * diff --git a/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_tt.sa/CODEGEN_cudacpp_susy_gg_tt_log.txt b/epochX/cudacpp/susy_gg_tt.sa/CODEGEN_cudacpp_susy_gg_tt_log.txt index 45e10ca3ac..8479028997 100644 --- a/epochX/cudacpp/susy_gg_tt.sa/CODEGEN_cudacpp_susy_gg_tt_log.txt +++ b/epochX/cudacpp/susy_gg_tt.sa/CODEGEN_cudacpp_susy_gg_tt_log.txt @@ -554,7 +554,7 @@ INFO: Please specify coupling orders to bypass this step. INFO: Trying coupling order WEIGHTED<=2: WEIGTHED IS QCD+2*QED INFO: Trying process: g g > t t~ WEIGHTED<=2 @1 INFO: Process has 3 diagrams -1 processes with 3 diagrams generated in 0.122 s +1 processes with 3 diagrams generated in 0.121 s Total: 1 processes with 3 diagrams output standalone_cudacpp ../TMPOUT/CODEGEN_cudacpp_susy_gg_tt Load PLUGIN.CUDACPP_OUTPUT @@ -581,7 +581,7 @@ Generated helas calls for 1 subprocesses (3 diagrams) in 0.006 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 set of routines with options: P0 ALOHA: aloha creates FFV1 routines -ALOHA: aloha creates 2 routines in 0.137 s +ALOHA: aloha creates 2 routines in 0.136 s VVV1 FFV1 FFV1 @@ -596,7 +596,7 @@ INFO: Created files Parameters_MSSM_SLHA2.h and Parameters_MSSM_SLHA2.cc in dire INFO: /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_susy_gg_tt/src/. and /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_cudacpp_susy_gg_tt/src/. quit -real 0m1.308s -user 0m1.215s -sys 0m0.063s +real 0m1.278s +user 0m1.188s +sys 0m0.072s Code generation completed in 1 seconds diff --git a/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand