Skip to content

Commit

Permalink
Fix model inference tests (#12)
Browse files Browse the repository at this point in the history
  • Loading branch information
koparasy committed Oct 19, 2023
1 parent cfc0db5 commit 3e25d60
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 7 deletions.
6 changes: 3 additions & 3 deletions src/ml/surrogate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ PERFFASPECT()
// -------------------------------------------------------------------------
PERFFASPECT()
inline void _evaluate(long num_elements,
long num_in,
size_t num_in,
size_t num_out,
const TypeInValue** inputs,
TypeInValue** outputs)
Expand Down Expand Up @@ -196,9 +196,9 @@ PERFFASPECT()

PERFFASPECT()
inline void evaluate(long num_elements,
long num_in,
size_t num_in,
size_t num_out,
TypeInValue** inputs,
const TypeInValue** inputs,
TypeInValue** outputs)
{
_evaluate(num_elements, num_in, num_out, inputs, outputs);
Expand Down
2 changes: 2 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ function (ADDTEST binary_name src_file test_name)
target_link_libraries(${binary_name} PRIVATE AMS umpire MPI::MPI_CXX)

add_test(NAME "${test_name}::HOST" COMMAND ${binary_name} 0 ${ARGN})

target_compile_definitions(${binary_name} PRIVATE ${AMS_APP_DEFINES})
if (WITH_CUDA)
set_target_properties(${binary_name} PROPERTIES CUDA_ARCHITECTURES "${AMS_CUDA_ARCH}")
set_property(TARGET ${binary_name} PROPERTY CUDA_SEPARABLE_COMPILATION ON)
Expand Down
9 changes: 5 additions & 4 deletions tests/torch_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@
#include <wf/data_handler.hpp>
#include <wf/resource_manager.hpp>

#define SIZE (32 * 1024 + 3)
#define SIZE (32L * 1024L + 3L)

template <typename T>
void inference(char *path, int device, AMSResourceType resource)
{
using namespace ams;
SurrogateModel<T> model(path, !device);

std::vector<T *> inputs;
std::vector<const T *> inputs;
std::vector<T *> outputs;

for (int i = 0; i < 2; i++)
Expand All @@ -35,8 +35,9 @@ void inference(char *path, int device, AMSResourceType resource)
model.evaluate(
SIZE, inputs.size(), outputs.size(), inputs.data(), outputs.data());


for (int i = 0; i < 2; i++)
ResourceManager::deallocate(inputs[i], resource);
ResourceManager::deallocate(const_cast<T*>(inputs[i]), resource);

for (int i = 0; i < 4; i++)
ResourceManager::deallocate(outputs[i], resource);
Expand All @@ -56,7 +57,7 @@ int main(int argc, char *argv[])
if (use_device == 1) {
AMSSetupAllocator(AMSResourceType::DEVICE);
AMSSetDefaultAllocator(AMSResourceType::DEVICE);
AMSResourceType resource = AMSResourceType::DEVICE;
resource = AMSResourceType::DEVICE;
}

inference<double>(model_path, use_device, resource);
Expand Down

0 comments on commit 3e25d60

Please sign in to comment.