http://developer.download.nvidia.com/notebooks/dlsw-notebooks/merlin_hugectr_hps-hps-tensorflow-triton-deployment/nvidia_logo.png

HPS TensorRT Plugin Demo for PyTorch Trained Model

Overview

This notebook demonstrates how to build and deploy the HPS-integrated TensorRT engine for the model trained with PyTorch.

For more details about HPS, please refer to HugeCTR Hierarchical Parameter Server (HPS).

Installation

Use NGC

The HPS TensorRT plugin is preinstalled in the 23.01 and later Merlin PyTorch Container: nvcr.io/nvidia/merlin/merlin-pytorch:23.01.

You can check the existence of the required libraries by running the following Python code after launching this container.

import ctypes
plugin_lib_name = "/usr/local/hps_trt/lib/libhps_plugin.so"
plugin_handle = ctypes.CDLL(plugin_lib_name, mode=ctypes.RTLD_GLOBAL)

Configurations

First of all we specify the required configurations, e.g., the arguments needed for generating the dataset, the model parameters and the paths to save the model. We will use DLRM model which has one embedding table, bottom MLP layers, interaction layer and top MLP layers. Please note that the input to the embedding layer will be a dense key tensor of int32.

import os
import numpy as np
import torch
from torch.utils.data import DataLoader
import struct

args = dict()

args["gpu_num"] = 1                               # the number of available GPUs
args["iter_num"] = 50                             # the number of training iteration
args["slot_num"] = 26                             # the number of feature fields in this embedding layer
args["embed_vec_size"] = 128                      # the dimension of embedding vectors
args["dense_dim"] = 13                            # the dimension of dense features
args["global_batch_size"] = 1024                  # the globally batchsize for all GPUs
args["max_vocabulary_size"] = 260000
args["vocabulary_range_per_slot"] = [[i*10000, (i+1)*10000] for i in range(26)]
args["combiner"] = "mean"

args["ps_config_file"] = "dlrm_pytorch.json"
args["embedding_table_path"] = "dlrm_pytorch_sparse.model"
args["onnx_path"] = "dlrm_pytorch.onnx"
args["modified_onnx_path"] = "dlrm_pytorch_with_hps.onnx"
args["np_key_type"] = np.int32
args["np_vector_type"] = np.float32

os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args["gpu_num"])))
/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
  from .autonotebook import tqdm as notebook_tqdm
def generate_random_samples(num_samples, vocabulary_range_per_slot, dense_dim, key_dtype = args["np_key_type"]):
    keys = list()
    for vocab_range in vocabulary_range_per_slot:
        keys_per_slot = np.random.randint(low=vocab_range[0], high=vocab_range[1], size=(num_samples, 1), dtype=key_dtype)
        keys.append(keys_per_slot)
    keys = np.concatenate(np.array(keys), axis = 1)
    numerical_features = np.random.random((num_samples, dense_dim)).astype(np.float32)
    labels = np.random.randint(low=0, high=2, size=(num_samples, 1))
    return keys, numerical_features, labels

Train with PyTorch

We define the model graph for training with native PyTorch layers, i.e., torch.nn.Embedding, torch.nn.Linear and so on. We can then train the model and extract the trained weights of the embedding table.

import torch

class MLP(torch.nn.Module):
    def __init__(self,
                arch,
                name,
                out_activation,
                **kwargs):
        super(MLP, self).__init__(**kwargs)
        self.mlp = torch.nn.Sequential()
        
        for idx in range(1, len(arch)-1):
            self.mlp.add_module(name + "_linear_layer_%d" % idx, torch.nn.Linear(arch[idx-1], arch[idx]))
            self.mlp.add_module(name + "_relu_layer_%d" % idx, torch.nn.ReLU(inplace=True))
            
        idx = len(arch) - 1
        if out_activation == "relu":
            self.mlp.add_module(name + "_linear_layer_%d" % idx, torch.nn.Linear(arch[idx-1], arch[idx]))
            self.mlp.add_module(name + "_relu_layer_%d" % idx, torch.nn.ReLU(inplace=True))
        elif out_activation == "sigmoid":
            self.mlp.add_module(name + "_linear_layer_%d" % idx, torch.nn.Linear(arch[idx-1], arch[idx]))
            self.mlp.add_module(name + "_relu_layer_%d" % idx, torch.nn.Sigmoid())

    def forward(self, x):
        y = self.mlp(x)
        return y

    
class SecondOrderFeatureInteraction(torch.nn.Module):
    def __init__(self):
        super(SecondOrderFeatureInteraction, self).__init__()

    def forward(self, inputs, num_feas):
        dot_products = torch.reshape(torch.matmul(inputs, torch.transpose(inputs, 1, 2)), (-1, num_feas * num_feas))
        indices = torch.tensor([i * num_feas + j for j in range(1, num_feas) for i in range(j)])
        flat_interactions = torch.index_select(dot_products, 1, indices)
        return flat_interactions    

class DLRM(torch.nn.Module):
    def __init__(self,
                 init_tensors,
                 embed_vec_size,
                 slot_num,
                 dense_dim,
                 arch_bot,
                 arch_top,
                 **kwargs):
        
        super(DLRM, self).__init__()
        self.embedding = torch.nn.Embedding.from_pretrained(init_tensors, freeze=False)
        
        self.embed_vec_size = embed_vec_size
        self.slot_num = slot_num
        self.dense_dim = dense_dim
        self.arch_bot = arch_bot
        self.arch_top = arch_top

        self.bot_mlp = MLP([self.dense_dim] + arch_bot, name = "bottom", out_activation='relu')
        self.interaction_layer = SecondOrderFeatureInteraction()
        self.interaction_out_dim = self.slot_num * (self.slot_num+1) // 2
        self.top_mlp = MLP([self.interaction_out_dim + self.arch_bot[-1]] + arch_top, name = "top", out_activation='sigmoid')
    
    def forward(self, inputs):
        categorical_features = inputs[0]
        numerical_features = inputs[1]
        
        embedding_vector = self.embedding(categorical_features)
        dense_x = self.bot_mlp(numerical_features)
        
        concat_features = torch.concat([embedding_vector, torch.reshape(dense_x, (-1, 1, self.arch_bot[-1]))], 1)
        
        Z = self.interaction_layer(concat_features, self.slot_num+1)
        z = torch.concat([dense_x, Z], 1)
        logit = self.top_mlp(z)
        return logit
def train(args):
    init_tensors = torch.Tensor(np.ones(shape=[args["max_vocabulary_size"], args["embed_vec_size"]], dtype=args["np_vector_type"]))
    
    model = DLRM(init_tensors, args["embed_vec_size"], args["slot_num"], args["dense_dim"],
                arch_bot = [512, 256, args["embed_vec_size"]],
                arch_top = [1024, 1024, 512, 256, 1])

    print(model)

    criterion = torch.nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
    
    keys, numerical_features, labels = generate_random_samples(args["global_batch_size"]  * args["iter_num"], args["vocabulary_range_per_slot"], args["dense_dim"], args["np_key_type"])
    x0_iterator = iter(DataLoader(torch.from_numpy(keys), batch_size=args["global_batch_size"], shuffle=True, num_workers=0, pin_memory=False, drop_last=False))
    x1_iterator = iter(DataLoader(torch.from_numpy(numerical_features), batch_size=args["global_batch_size"], shuffle=True, num_workers=0, pin_memory=False, drop_last=False))
    y_iterator = iter(DataLoader(torch.from_numpy(labels).float(), batch_size=args["global_batch_size"], shuffle=True, num_workers=0, pin_memory=False, drop_last=False))
    
    
    for i in range(args["iter_num"]):
        inputs = [next(x0_iterator), next(x1_iterator)]
        labels = next(y_iterator)
        preds = model(inputs)
        loss = criterion(preds.squeeze(), labels.squeeze())
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print("-"*20, "Step {}, loss: {}".format(i, loss),  "-"*20)
    return model
trained_model = train(args)
embedding_weights = trained_model.state_dict()["embedding.weight"]
print(embedding_weights)
DLRM(
  (embedding): Embedding(260000, 128)
  (bot_mlp): MLP(
    (mlp): Sequential(
      (bottom_linear_layer_1): Linear(in_features=13, out_features=512, bias=True)
      (bottom_relu_layer_1): ReLU(inplace=True)
      (bottom_linear_layer_2): Linear(in_features=512, out_features=256, bias=True)
      (bottom_relu_layer_2): ReLU(inplace=True)
      (bottom_linear_layer_3): Linear(in_features=256, out_features=128, bias=True)
      (bottom_relu_layer_3): ReLU(inplace=True)
    )
  )
  (interaction_layer): SecondOrderFeatureInteraction()
  (top_mlp): MLP(
    (mlp): Sequential(
      (top_linear_layer_1): Linear(in_features=479, out_features=1024, bias=True)
      (top_relu_layer_1): ReLU(inplace=True)
      (top_linear_layer_2): Linear(in_features=1024, out_features=1024, bias=True)
      (top_relu_layer_2): ReLU(inplace=True)
      (top_linear_layer_3): Linear(in_features=1024, out_features=512, bias=True)
      (top_relu_layer_3): ReLU(inplace=True)
      (top_linear_layer_4): Linear(in_features=512, out_features=256, bias=True)
      (top_relu_layer_4): ReLU(inplace=True)
      (top_linear_layer_5): Linear(in_features=256, out_features=1, bias=True)
      (top_relu_layer_5): Sigmoid()
    )
  )
)
-------------------- Step 0, loss: 1.1652954816818237 --------------------
-------------------- Step 1, loss: 1.7626148462295532 --------------------
-------------------- Step 2, loss: 1.1845550537109375 --------------------
-------------------- Step 3, loss: 0.7347715497016907 --------------------
-------------------- Step 4, loss: 1.0786197185516357 --------------------
-------------------- Step 5, loss: 0.9271171689033508 --------------------
-------------------- Step 6, loss: 0.7060756683349609 --------------------
-------------------- Step 7, loss: 0.7490934133529663 --------------------
-------------------- Step 8, loss: 0.8274499773979187 --------------------
-------------------- Step 9, loss: 0.7962949275970459 --------------------
-------------------- Step 10, loss: 0.6947690844535828 --------------------
-------------------- Step 11, loss: 0.7241608500480652 --------------------
-------------------- Step 12, loss: 0.7649394869804382 --------------------
-------------------- Step 13, loss: 0.7043794393539429 --------------------
-------------------- Step 14, loss: 0.6948238611221313 --------------------
-------------------- Step 15, loss: 0.7003152370452881 --------------------
-------------------- Step 16, loss: 0.7330600619316101 --------------------
-------------------- Step 17, loss: 0.711887001991272 --------------------
-------------------- Step 18, loss: 0.6917610168457031 --------------------
-------------------- Step 19, loss: 0.7227296233177185 --------------------
-------------------- Step 20, loss: 0.7232402563095093 --------------------
-------------------- Step 21, loss: 0.7025701999664307 --------------------
-------------------- Step 22, loss: 0.6962350010871887 --------------------
-------------------- Step 23, loss: 0.7100769281387329 --------------------
-------------------- Step 24, loss: 0.7159318923950195 --------------------
-------------------- Step 25, loss: 0.6963521242141724 --------------------
-------------------- Step 26, loss: 0.7058508396148682 --------------------
-------------------- Step 27, loss: 0.7144895792007446 --------------------
-------------------- Step 28, loss: 0.7082542181015015 --------------------
-------------------- Step 29, loss: 0.6955724954605103 --------------------
-------------------- Step 30, loss: 0.6997341513633728 --------------------
-------------------- Step 31, loss: 0.7167338132858276 --------------------
-------------------- Step 32, loss: 0.6962475776672363 --------------------
-------------------- Step 33, loss: 0.6955674290657043 --------------------
-------------------- Step 34, loss: 0.7098587155342102 --------------------
-------------------- Step 35, loss: 0.6992183327674866 --------------------
-------------------- Step 36, loss: 0.6928209066390991 --------------------
-------------------- Step 37, loss: 0.6933107972145081 --------------------
-------------------- Step 38, loss: 0.697549045085907 --------------------
-------------------- Step 39, loss: 0.6969214677810669 --------------------
-------------------- Step 40, loss: 0.6935250163078308 --------------------
-------------------- Step 41, loss: 0.6948344111442566 --------------------
-------------------- Step 42, loss: 0.7015650868415833 --------------------
-------------------- Step 43, loss: 0.6928752660751343 --------------------
-------------------- Step 44, loss: 0.6936203837394714 --------------------
-------------------- Step 45, loss: 0.6962599158287048 --------------------
-------------------- Step 46, loss: 0.6941655278205872 --------------------
-------------------- Step 47, loss: 0.6939643025398254 --------------------
-------------------- Step 48, loss: 0.6933950185775757 --------------------
-------------------- Step 49, loss: 0.6970551013946533 --------------------
tensor([[1.0014, 1.0014, 1.0014,  ..., 1.0014, 1.0014, 1.0014],
        [0.9997, 0.9997, 0.9997,  ..., 0.9997, 0.9997, 0.9997],
        [0.9991, 0.9991, 0.9991,  ..., 0.9991, 0.9991, 0.9991],
        ...,
        [1.0004, 1.0004, 1.0005,  ..., 1.0004, 1.0004, 1.0004],
        [1.0001, 1.0001, 1.0001,  ..., 1.0001, 1.0001, 1.0001],
        [1.0002, 1.0002, 1.0002,  ..., 1.0002, 1.0002, 1.0002]])

Build the HPS-integrated TensorRT engine

In order to use HPS in the inference stage, we need to convert the embedding weights to the formats required by HPS first and create JSON configuration file for HPS.

Then we convert the PyTorch model to ONNX, and employ the ONNX GraphSurgoen tool to replace the native PyTorch embedding lookup layer with the placeholder of HPS TensorRT plugin layer.

After that, we can build the TensorRT engine, which is comprised of the HPS TensorRT plugin layer and the dense network.

Step1: Prepare sparse model and JSON configuration file for HPS

Please note that the storage format in the dlrm_pytorch_sparse.model/key file is int64, while the HPS TensorRT plugin currently only support int32 when loading the keys into memory. There is no overflow since the key value range is 0~260000.

def convert_to_sparse_model(embeddings_weights, embedding_table_path, embedding_vec_size):
    os.system("mkdir -p {}".format(embedding_table_path))
    with open("{}/key".format(embedding_table_path), 'wb') as key_file, \
        open("{}/emb_vector".format(embedding_table_path), 'wb') as vec_file:
      for key in range(embeddings_weights.shape[0]):
        vec = embeddings_weights[key]
        key_struct = struct.pack('q', key)
        vec_struct = struct.pack(str(embedding_vec_size) + "f", *vec)
        key_file.write(key_struct)
        vec_file.write(vec_struct)
convert_to_sparse_model(embedding_weights.numpy(), args["embedding_table_path"], args["embed_vec_size"])
%%writefile dlrm_pytorch.json
{
    "supportlonglong": false,
    "models": [{
        "model": "dlrm",
        "sparse_files": ["dlrm_pytorch_sparse.model"],
        "num_of_worker_buffer_in_pool": 3,
        "embedding_table_names":["sparse_embedding0"],
        "embedding_vecsize_per_table": [128],
        "maxnum_catfeature_query_per_table_per_sample": [26],
        "default_value_for_each_table": [1.0],
        "deployed_device_list": [0],
        "max_batch_size": 1024,
        "cache_refresh_percentage_per_iteration": 0.2,
        "hit_rate_threshold": 1.0,
        "gpucacheper": 1.0,
        "gpucache": true
        }
    ]
}
Writing dlrm_pytorch.json

Step2: Convert to ONNX and do ONNX graph surgery

dummy_keys = torch.randint(0, args["max_vocabulary_size"], (args["global_batch_size"], args["slot_num"]), dtype=torch.int32)
dummy_numerical_features = torch.randn(args["global_batch_size"], args["dense_dim"])
torch.onnx.export(trained_model, 
                  [dummy_keys, dummy_numerical_features],
                  args["onnx_path"], 
                  verbose = True, 
                  input_names = ["keys", "numerical_features"], 
                  output_names = ["output"], 
                  dynamic_axes = {'keys' : {0 : 'batch_size'}, 'numerical_features' : {0 : 'batch_size'}}
                 )
/tmp/ipykernel_52545/1281679600.py:35: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.
  indices = torch.tensor([i * num_feas + j for j in range(1, num_feas) for i in range(j)])
Exported graph: graph(%keys : Int(*, 26, strides=[26, 1], requires_grad=0, device=cpu),
      %numerical_features : Float(*, 13, strides=[13, 1], requires_grad=0, device=cpu),
      %embedding.weight : Float(260000, 128, strides=[128, 1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_1.weight : Float(512, 13, strides=[13, 1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_1.bias : Float(512, strides=[1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_2.weight : Float(256, 512, strides=[512, 1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_2.bias : Float(256, strides=[1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_3.weight : Float(128, 256, strides=[256, 1], requires_grad=1, device=cpu),
      %bot_mlp.mlp.bottom_linear_layer_3.bias : Float(128, strides=[1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_1.weight : Float(1024, 479, strides=[479, 1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_1.bias : Float(1024, strides=[1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_2.weight : Float(1024, 1024, strides=[1024, 1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_2.bias : Float(1024, strides=[1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_3.weight : Float(512, 1024, strides=[1024, 1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_3.bias : Float(512, strides=[1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_4.weight : Float(256, 512, strides=[512, 1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_4.bias : Float(256, strides=[1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_5.weight : Float(1, 256, strides=[256, 1], requires_grad=1, device=cpu),
      %top_mlp.mlp.top_linear_layer_5.bias : Float(1, strides=[1], requires_grad=1, device=cpu)):
  %/embedding/Gather_output_0 : Float(*, 26, 128, strides=[3328, 128, 1], requires_grad=1, device=cpu) = onnx::Gather[onnx_name="/embedding/Gather"](%embedding.weight, %keys), scope: __main__.DLRM::/torch.nn.modules.sparse.Embedding::embedding # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:2206:0
  %/bot_mlp/mlp/bottom_linear_layer_1/Gemm_output_0 : Float(*, 512, strides=[512, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/bot_mlp/mlp/bottom_linear_layer_1/Gemm"](%numerical_features, %bot_mlp.mlp.bottom_linear_layer_1.weight, %bot_mlp.mlp.bottom_linear_layer_1.bias), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::bottom_linear_layer_1 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/bot_mlp/mlp/bottom_relu_layer_1/Relu_output_0 : Float(*, 512, strides=[512, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/bot_mlp/mlp/bottom_relu_layer_1/Relu"](%/bot_mlp/mlp/bottom_linear_layer_1/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::bottom_relu_layer_1 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/bot_mlp/mlp/bottom_linear_layer_2/Gemm_output_0 : Float(*, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/bot_mlp/mlp/bottom_linear_layer_2/Gemm"](%/bot_mlp/mlp/bottom_relu_layer_1/Relu_output_0, %bot_mlp.mlp.bottom_linear_layer_2.weight, %bot_mlp.mlp.bottom_linear_layer_2.bias), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::bottom_linear_layer_2 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/bot_mlp/mlp/bottom_relu_layer_2/Relu_output_0 : Float(*, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/bot_mlp/mlp/bottom_relu_layer_2/Relu"](%/bot_mlp/mlp/bottom_linear_layer_2/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::bottom_relu_layer_2 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/bot_mlp/mlp/bottom_linear_layer_3/Gemm_output_0 : Float(*, 128, strides=[128, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/bot_mlp/mlp/bottom_linear_layer_3/Gemm"](%/bot_mlp/mlp/bottom_relu_layer_2/Relu_output_0, %bot_mlp.mlp.bottom_linear_layer_3.weight, %bot_mlp.mlp.bottom_linear_layer_3.bias), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::bottom_linear_layer_3 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/bot_mlp/mlp/bottom_relu_layer_3/Relu_output_0 : Float(*, 128, strides=[128, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/bot_mlp/mlp/bottom_relu_layer_3/Relu"](%/bot_mlp/mlp/bottom_linear_layer_3/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::bot_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::bottom_relu_layer_3 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/Constant_output_0 : Long(3, strides=[1], device=cpu) = onnx::Constant[value=  -1    1  128 [ CPULongType{3} ], onnx_name="/Constant"](), scope: __main__.DLRM:: # /tmp/ipykernel_52545/1281679600.py:70:0
  %/Reshape_output_0 : Float(*, *, *, strides=[128, 128, 1], requires_grad=1, device=cpu) = onnx::Reshape[allowzero=0, onnx_name="/Reshape"](%/bot_mlp/mlp/bottom_relu_layer_3/Relu_output_0, %/Constant_output_0), scope: __main__.DLRM:: # /tmp/ipykernel_52545/1281679600.py:70:0
  %/Concat_output_0 : Float(*, *, 128, strides=[3456, 128, 1], requires_grad=1, device=cpu) = onnx::Concat[axis=1, onnx_name="/Concat"](%/embedding/Gather_output_0, %/Reshape_output_0), scope: __main__.DLRM:: # /tmp/ipykernel_52545/1281679600.py:70:0
  %/interaction_layer/Transpose_output_0 : Float(*, 128, *, strides=[3456, 1, 128], requires_grad=1, device=cpu) = onnx::Transpose[perm=[0, 2, 1], onnx_name="/interaction_layer/Transpose"](%/Concat_output_0), scope: __main__.DLRM::/__main__.SecondOrderFeatureInteraction::interaction_layer # /tmp/ipykernel_52545/1281679600.py:34:0
  %/interaction_layer/MatMul_output_0 : Float(*, *, *, strides=[729, 27, 1], requires_grad=1, device=cpu) = onnx::MatMul[onnx_name="/interaction_layer/MatMul"](%/Concat_output_0, %/interaction_layer/Transpose_output_0), scope: __main__.DLRM::/__main__.SecondOrderFeatureInteraction::interaction_layer # /tmp/ipykernel_52545/1281679600.py:34:0
  %/interaction_layer/Constant_output_0 : Long(2, strides=[1], device=cpu) = onnx::Constant[value=  -1  729 [ CPULongType{2} ], onnx_name="/interaction_layer/Constant"](), scope: __main__.DLRM::/__main__.SecondOrderFeatureInteraction::interaction_layer # /tmp/ipykernel_52545/1281679600.py:34:0
  %/interaction_layer/Reshape_output_0 : Float(*, *, strides=[729, 1], requires_grad=1, device=cpu) = onnx::Reshape[allowzero=0, onnx_name="/interaction_layer/Reshape"](%/interaction_layer/MatMul_output_0, %/interaction_layer/Constant_output_0), scope: __main__.DLRM::/__main__.SecondOrderFeatureInteraction::interaction_layer # /tmp/ipykernel_52545/1281679600.py:34:0
  %onnx::Gather_33 : Long(351, strides=[1], requires_grad=0, device=cpu) = onnx::Constant[value=<Tensor>]()
  %/interaction_layer/Gather_output_0 : Float(*, 351, strides=[351, 1], requires_grad=1, device=cpu) = onnx::Gather[axis=1, onnx_name="/interaction_layer/Gather"](%/interaction_layer/Reshape_output_0, %onnx::Gather_33), scope: __main__.DLRM::/__main__.SecondOrderFeatureInteraction::interaction_layer # /tmp/ipykernel_52545/1281679600.py:36:0
  %/Concat_1_output_0 : Float(*, 479, strides=[479, 1], requires_grad=1, device=cpu) = onnx::Concat[axis=1, onnx_name="/Concat_1"](%/bot_mlp/mlp/bottom_relu_layer_3/Relu_output_0, %/interaction_layer/Gather_output_0), scope: __main__.DLRM:: # /tmp/ipykernel_52545/1281679600.py:73:0
  %/top_mlp/mlp/top_linear_layer_1/Gemm_output_0 : Float(*, 1024, strides=[1024, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/top_mlp/mlp/top_linear_layer_1/Gemm"](%/Concat_1_output_0, %top_mlp.mlp.top_linear_layer_1.weight, %top_mlp.mlp.top_linear_layer_1.bias), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::top_linear_layer_1 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/top_mlp/mlp/top_relu_layer_1/Relu_output_0 : Float(*, 1024, strides=[1024, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/top_mlp/mlp/top_relu_layer_1/Relu"](%/top_mlp/mlp/top_linear_layer_1/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::top_relu_layer_1 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/top_mlp/mlp/top_linear_layer_2/Gemm_output_0 : Float(*, 1024, strides=[1024, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/top_mlp/mlp/top_linear_layer_2/Gemm"](%/top_mlp/mlp/top_relu_layer_1/Relu_output_0, %top_mlp.mlp.top_linear_layer_2.weight, %top_mlp.mlp.top_linear_layer_2.bias), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::top_linear_layer_2 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/top_mlp/mlp/top_relu_layer_2/Relu_output_0 : Float(*, 1024, strides=[1024, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/top_mlp/mlp/top_relu_layer_2/Relu"](%/top_mlp/mlp/top_linear_layer_2/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::top_relu_layer_2 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/top_mlp/mlp/top_linear_layer_3/Gemm_output_0 : Float(*, 512, strides=[512, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/top_mlp/mlp/top_linear_layer_3/Gemm"](%/top_mlp/mlp/top_relu_layer_2/Relu_output_0, %top_mlp.mlp.top_linear_layer_3.weight, %top_mlp.mlp.top_linear_layer_3.bias), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::top_linear_layer_3 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/top_mlp/mlp/top_relu_layer_3/Relu_output_0 : Float(*, 512, strides=[512, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/top_mlp/mlp/top_relu_layer_3/Relu"](%/top_mlp/mlp/top_linear_layer_3/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::top_relu_layer_3 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/top_mlp/mlp/top_linear_layer_4/Gemm_output_0 : Float(*, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/top_mlp/mlp/top_linear_layer_4/Gemm"](%/top_mlp/mlp/top_relu_layer_3/Relu_output_0, %top_mlp.mlp.top_linear_layer_4.weight, %top_mlp.mlp.top_linear_layer_4.bias), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::top_linear_layer_4 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %/top_mlp/mlp/top_relu_layer_4/Relu_output_0 : Float(*, 256, strides=[256, 1], requires_grad=1, device=cpu) = onnx::Relu[onnx_name="/top_mlp/mlp/top_relu_layer_4/Relu"](%/top_mlp/mlp/top_linear_layer_4/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.ReLU::top_relu_layer_4 # /usr/local/lib/python3.8/dist-packages/torch/nn/functional.py:1455:0
  %/top_mlp/mlp/top_linear_layer_5/Gemm_output_0 : Float(*, 1, strides=[1, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/top_mlp/mlp/top_linear_layer_5/Gemm"](%/top_mlp/mlp/top_relu_layer_4/Relu_output_0, %top_mlp.mlp.top_linear_layer_5.weight, %top_mlp.mlp.top_linear_layer_5.bias), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.linear.Linear::top_linear_layer_5 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/linear.py:114:0
  %output : Float(*, 1, strides=[1, 1], requires_grad=1, device=cpu) = onnx::Sigmoid[onnx_name="/top_mlp/mlp/top_relu_layer_5/Sigmoid"](%/top_mlp/mlp/top_linear_layer_5/Gemm_output_0), scope: __main__.DLRM::/__main__.MLP::top_mlp/torch.nn.modules.container.Sequential::mlp/torch.nn.modules.activation.Sigmoid::top_relu_layer_5 # /usr/local/lib/python3.8/dist-packages/torch/nn/modules/activation.py:294:0
  return (%output)
# ONNX graph surgery to insert HPS the TensorRT plugin placeholder
import onnx_graphsurgeon as gs
from onnx import  shape_inference
import numpy as np
import onnx

graph = gs.import_onnx(onnx.load("dlrm_pytorch.onnx"))
saved = []

for node in graph.nodes:
    if node.name == "/embedding/Gather":
        categorical_features = gs.Variable(name="categorical_features", dtype=np.int32, shape=("unknown", 26))
        hps_node = gs.Node(op="HPS_TRT", attrs={"ps_config_file": "dlrm_pytorch.json\0", "model_name": "dlrm\0", "table_id": 0, "emb_vec_size": 128}, 
                           inputs=[categorical_features], outputs=[node.outputs[0]])
        graph.nodes.append(hps_node)
        saved.append(categorical_features)
        node.outputs.clear()
for i in graph.inputs:
    if i.name == "numerical_features":
        saved.append(i)
graph.inputs = saved

graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), "dlrm_pytorch_with_hps.onnx")

Step3: Build the TensorRT engine

# build the TensorRT engine based on dlrm_pytorch_with_hps.onnx
import tensorrt as trt
import ctypes

plugin_lib_name = "/usr/local/hps_trt/lib/libhps_plugin.so"
handle = ctypes.CDLL(plugin_lib_name, mode=ctypes.RTLD_GLOBAL)

TRT_LOGGER = trt.Logger(trt.Logger.INFO)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def build_engine_from_onnx(onnx_model_path):
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser, builder.create_builder_config() as builder_config:        
        model = open(onnx_model_path, 'rb')
        parser.parse(model.read())

        profile = builder.create_optimization_profile()        
        profile.set_shape("categorical_features", (1, 26), (1024, 26), (1024, 26))    
        profile.set_shape("numerical_features", (1, 13), (1024, 13), (1024, 13))
        builder_config.add_optimization_profile(profile)
        engine = builder.build_serialized_network(network, builder_config)
        return engine

serialized_engine = build_engine_from_onnx("dlrm_pytorch_with_hps.onnx")
with open("dlrm_pytorch_with_hps.trt", "wb") as fout:
    fout.write(serialized_engine)
print("Succesfully build the TensorRT engine")
[01/03/2023-07:25:18] [TRT] [I] [MemUsageChange] Init CUDA: CPU +268, GPU +0, now: CPU 1035, GPU 497 (MiB)
[01/03/2023-07:25:20] [TRT] [I] [MemUsageChange] Init builder kernel library: CPU +170, GPU +46, now: CPU 1259, GPU 543 (MiB)
[01/03/2023-07:25:20] [TRT] [W] CUDA lazy loading is not enabled. Enabling it can significantly reduce device memory usage. See `CUDA_MODULE_LOADING` in https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars
[01/03/2023-07:25:20] [TRT] [W] onnx2trt_utils.cpp:377: Your ONNX model has been generated with INT64 weights, while TensorRT does not natively support INT64. Attempting to cast down to INT32.
[01/03/2023-07:25:20] [TRT] [I] No importer registered for op: HPS_TRT. Attempting to import as plugin.
[01/03/2023-07:25:20] [TRT] [I] Searching for plugin: HPS_TRT, plugin_version: 1, plugin_namespace: 
=====================================================HPS Parse====================================================
[HCTR][07:25:20.652][INFO][RK0][main]: dense_file is not specified using default: 
[HCTR][07:25:20.652][INFO][RK0][main]: num_of_refresher_buffer_in_pool is not specified using default: 1
[HCTR][07:25:20.652][INFO][RK0][main]: maxnum_des_feature_per_sample is not specified using default: 26
[HCTR][07:25:20.652][INFO][RK0][main]: refresh_delay is not specified using default: 0
[HCTR][07:25:20.652][INFO][RK0][main]: refresh_interval is not specified using default: 0
[HCTR][07:25:20.652][INFO][RK0][main]: use_static_table is not specified using default: 0
====================================================HPS Create====================================================
[HCTR][07:25:20.653][INFO][RK0][main]: Creating HashMap CPU database backend...
[HCTR][07:25:20.653][DEBUG][RK0][main]: Created blank database backend in local memory!
[HCTR][07:25:20.653][INFO][RK0][main]: Volatile DB: initial cache rate = 1
[HCTR][07:25:20.653][INFO][RK0][main]: Volatile DB: cache missed embeddings = 0
[HCTR][07:25:20.653][DEBUG][RK0][main]: Created raw model loader in local memory!
[HCTR][07:25:20.653][INFO][RK0][main]: Using Local file system backend.
[HCTR][07:25:22.209][INFO][RK0][main]: Table: hps_et.dlrm.sparse_embedding0; cached 260000 / 260000 embeddings in volatile database (HashMapBackend); load: 260000 / 18446744073709551615 (0.00%).
[HCTR][07:25:22.220][DEBUG][RK0][main]: Real-time subscribers created!
[HCTR][07:25:22.220][INFO][RK0][main]: Creating embedding cache in device 0.
[HCTR][07:25:22.227][INFO][RK0][main]: Model name: dlrm
[HCTR][07:25:22.227][INFO][RK0][main]: Max batch size: 1024
[HCTR][07:25:22.227][INFO][RK0][main]: Number of embedding tables: 1
[HCTR][07:25:22.227][INFO][RK0][main]: Use GPU embedding cache: True, cache size percentage: 1.000000
[HCTR][07:25:22.227][INFO][RK0][main]: Use static table: False
[HCTR][07:25:22.227][INFO][RK0][main]: Use I64 input key: False
[HCTR][07:25:22.227][INFO][RK0][main]: Configured cache hit rate threshold: 1.000000
[HCTR][07:25:22.227][INFO][RK0][main]: The size of thread pool: 80
[HCTR][07:25:22.227][INFO][RK0][main]: The size of worker memory pool: 3
[HCTR][07:25:22.227][INFO][RK0][main]: The size of refresh memory pool: 1
[HCTR][07:25:22.227][INFO][RK0][main]: The refresh percentage : 0.200000
[HCTR][07:25:22.280][DEBUG][RK0][main]: Created raw model loader in local memory!
[HCTR][07:25:22.280][INFO][RK0][main]: Using Local file system backend.
[HCTR][07:25:22.408][INFO][RK0][main]: EC initialization for model: "dlrm", num_tables: 1
[HCTR][07:25:22.408][INFO][RK0][main]: EC initialization on device: 0
[HCTR][07:25:22.433][INFO][RK0][main]: Creating lookup session for dlrm on device: 0
[01/03/2023-07:25:22] [TRT] [I] Successfully created plugin: HPS_TRT
[01/03/2023-07:25:22] [TRT] [I] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +331, GPU +144, now: CPU 5771, GPU 933 (MiB)
[01/03/2023-07:25:23] [TRT] [I] [MemUsageChange] Init cuDNN: CPU +115, GPU +54, now: CPU 5886, GPU 987 (MiB)
[01/03/2023-07:25:23] [TRT] [I] Local timing cache in use. Profiling results in this builder pass will not be stored.
[01/03/2023-07:26:27] [TRT] [I] Total Activation Memory: 34103362048
[01/03/2023-07:26:27] [TRT] [I] Detected 2 inputs and 1 output network tensors.
[01/03/2023-07:26:27] [TRT] [I] Total Host Persistent Memory: 416
[01/03/2023-07:26:27] [TRT] [I] Total Device Persistent Memory: 0
[01/03/2023-07:26:27] [TRT] [I] Total Scratch Memory: 45142016
[01/03/2023-07:26:27] [TRT] [I] [MemUsageStats] Peak memory usage of TRT CPU/GPU memory allocators: CPU 0 MiB, GPU 75 MiB
[01/03/2023-07:26:27] [TRT] [I] [BlockAssignment] Started assigning block shifts. This will take 3 steps to complete.
[01/03/2023-07:26:27] [TRT] [I] [BlockAssignment] Algorithm ShiftNTopDown took 0.011619ms to assign 3 blocks to 3 nodes requiring 58774016 bytes.
[01/03/2023-07:26:27] [TRT] [I] Total Activation Memory: 58774016
[01/03/2023-07:26:27] [TRT] [I] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +0, GPU +8, now: CPU 5933, GPU 1035 (MiB)
[01/03/2023-07:26:27] [TRT] [I] [MemUsageChange] Init cuDNN: CPU +0, GPU +8, now: CPU 5933, GPU 1043 (MiB)
[01/03/2023-07:26:27] [TRT] [I] [MemUsageChange] TensorRT-managed allocation in building engine: CPU +0, GPU +32, now: CPU 0, GPU 32 (MiB)
Succesfully build the TensorRT engine

Deploy HPS-integrated TensorRT engine on Triton

In order to deploy the TensorRT engine with the Triton TensorRT backend, we need to create the model repository and define the config.pbtxt first.

!mkdir -p model_repo/dlrm_pytorch_with_hps/1
!mv dlrm_pytorch_with_hps.trt model_repo/dlrm_pytorch_with_hps/1
%%writefile model_repo/dlrm_pytorch_with_hps/config.pbtxt

platform: "tensorrt_plan"
default_model_filename: "dlrm_pytorch_with_hps.trt"
backend: "tensorrt"
max_batch_size: 0
input [
  {
    name: "categorical_features"
    data_type: TYPE_INT32
    dims: [-1,26]
  },
  {
    name: "numerical_features"
    data_type: TYPE_FP32
    dims: [-1,13]
  }
]
output [
  {
      name: "output"
      data_type: TYPE_FP32
      dims: [-1,1]
  }
]
instance_group [
  {
    count: 1
    kind: KIND_GPU
    gpus:[0]

  }
]
Writing model_repo/dlrm_pytorch_with_hps/config.pbtxt
!tree model_repo/dlrm_pytorch_with_hps
model_repo/dlrm_pytorch_with_hps
├── 1
│   └── dlrm_pytorch_with_hps.trt
└── config.pbtxt

1 directory, 2 files

We can then launch the Triton inference server using the TensorRT backend. Please note that LD_PRELOAD is utilized to load the custom TensorRT plugin (i.e., HPS TensorRT plugin) into Triton.

Note: Since Background processes not supported by Jupyter, please launch the Triton Server according to the following command independently in the background.

LD_PRELOAD=/usr/local/hps_trt/lib/libhps_plugin.so tritonserver –model-repository=/hugectr/hps_trt/notebooks/model_repo/ –load-model=dlrm_pytorch_with_hps –model-control-mode=explicit

If you successfully started tritonserver, you should see a log similar to following:

+----------+--------------------------------+--------------------------------+
| Backend  | Path                           | Config                         |
+----------+--------------------------------+--------------------------------+
| tensorrt | /opt/tritonserver/backends/ten | {"cmdline":{"auto-complete-con |
|          | sorrt/libtriton_tensorrt.so    | fig":"true","min-compute-capab |
|          |                                | ility":"6.000000","backend-dir |
|          |                                | ectory":"/opt/tritonserver/bac |
|          |                                | kends","default-max-batch-size |
|          |                                | ":"4"}}                        |
|          |                                |                                |
+----------+--------------------------------+--------------------------------+


+-----------------------+---------+--------+
| Model                 | Version | Status |
+-----------------------+---------+--------+
| dlrm_pytorch_with_hps | 1       | READY  |
+-----------------------+---------+--------+

We can then send the requests to the Triton inference server using the HTTP client.

import os
import shutil
import numpy as np
import tritonclient.http as httpclient
from tritonclient.utils import *

BATCH_SIZE = 1024

categorical_feature = np.random.randint(0,260000,size=(BATCH_SIZE,26)).astype(np.int32)
numerical_feature = np.random.random((BATCH_SIZE, 13)).astype(np.float32)

inputs = [
    httpclient.InferInput("categorical_features", 
                          categorical_feature.shape,
                          np_to_triton_dtype(np.int32)),
    httpclient.InferInput("numerical_features", 
                          numerical_feature.shape,
                          np_to_triton_dtype(np.float32)),                          
]
inputs[0].set_data_from_numpy(categorical_feature)
inputs[1].set_data_from_numpy(numerical_feature)


outputs = [
    httpclient.InferRequestedOutput("output")
]

model_name = "dlrm_pytorch_with_hps"

with httpclient.InferenceServerClient("localhost:8000") as client:
    response = client.infer(model_name,
                            inputs,
                            outputs=outputs)
    result = response.get_response()
    
    print("Prediction result is \n{}".format(response.as_numpy("output")))
    print("Response details:\n{}".format(result))
Prediction result is 
[[0.5128022 ]
 [0.51312006]
 [0.51246136]
 ...
 [0.5129204 ]
 [0.51302147]
 [0.513144  ]]
Response details:
{'model_name': 'dlrm_pytorch_with_hps', 'model_version': '1', 'outputs': [{'name': 'output', 'datatype': 'FP32', 'shape': [1024, 1], 'parameters': {'binary_data_size': 4096}}]}