Inferencing applications can be achieved using any of the methods described below.
Hydrosphere UI
To send a sample request using Hydrosphere UI, open the desired application, and press the Test button at the upper right corner. We will generate dummy inputs based on your model's contract and send an HTTP request to the model's endpoint.
HTTP Inference
POST/gateway/application/<application_name>
To send an HTTP request, you should send a POST request to the /gateway/application/<applicationName> endpoint with the JSON body containing your request data, composed with respect to the model's contract.
Path Parameters
Name
Type
Description
Request Body
Name
Type
Description
gRPC
To send a gRPC request you need to create a specific client.
import grpc import hydro_serving_grpc as hs # pip install hydro-serving-grpc# connect to your ML Lamba instancechannel = grpc.insecure_channel("<host>")stub = hs.PredictionServiceStub(channel)# 1. define a model, that you'll usemodel_spec = hs.ModelSpec(name="model")# 2. define tensor_shape for Tensor instancetensor_shape = hs.TensorShapeProto( dim=[hs.TensorShapeProto.Dim(size=-1), hs.TensorShapeProto.Dim(size=2)])# 3. define tensor with needed datatensor = hs.TensorProto(dtype=hs.DT_DOUBLE, tensor_shape=tensor_shape, double_val=[1,1,1,1])# 4. create PredictRequest instancerequest = hs.PredictRequest(model_spec=model_spec, inputs={"x": tensor})# call Predict methodresult = stub.Predict(request)
importcom.google.protobuf.Int64Value;importio.grpc.ManagedChannel;importio.grpc.ManagedChannelBuilder;importio.hydrosphere.serving.tensorflow.DataType;importio.hydrosphere.serving.tensorflow.TensorProto;importio.hydrosphere.serving.tensorflow.TensorShapeProto;importio.hydrosphere.serving.tensorflow.api.Model;importio.hydrosphere.serving.tensorflow.api.Predict;importio.hydrosphere.serving.tensorflow.api.PredictionServiceGrpc;importjava.util.Random;publicclassHydrosphereClient {privatefinalString modelName; // Actual model name, registered within Hydrosphere platformprivatefinalInt64Value modelVersion; // Model version of the registered model within Hydrosphere platformprivatefinalManagedChannel channel;privatefinalPredictionServiceGrpc.PredictionServiceBlockingStub blockingStub;publicHydrosphereClient2(String target,String modelName,long modelVersion) {this(ManagedChannelBuilder.forTarget(target).build(), modelName, modelVersion); }HydrosphereClient2(ManagedChannel channel,String modelName,long modelVersion) {this.channel= channel;this.modelName= modelName;this.modelVersion=Int64Value.newBuilder().setValue(modelVersion).build();this.blockingStub=PredictionServiceGrpc.newBlockingStub(this.channel); }privateModel.ModelSpecgetModelSpec() {/* Helper method to generate ModelSpec. */returnModel.ModelSpec.newBuilder().setName(this.modelName).setVersion(this.modelVersion).build(); }privateTensorProtogenerateDoubleTensorProto() {/* Helper method generating random TensorProto object for double values. */returnTensorProto.newBuilder().addDoubleVal(newRandom().nextDouble()).setDtype(DataType.DT_DOUBLE).setTensorShape(TensorShapeProto.newBuilder().build()) // Empty TensorShape indicates scalar shape.build(); }publicPredict.PredictRequestgeneratePredictRequest() {/* PredictRequest is used to define the data passed to the model for inference. */returnPredict.PredictRequest.newBuilder().putInputs("in",this.generateDoubleTensorProto()).setModelSpec(this.getModelSpec()).build(); }publicPredict.PredictResponsepredict(Predict.PredictRequest request) {/* The actual use of RPC method Predict of the PredictionService to invoke prediction. */returnthis.blockingStub.predict(request); }publicstaticvoidmain(String[] args) throwsException {HydrosphereClient client =newHydrosphereClient("<host>","example",2);Predict.PredictRequest request =client.generatePredictRequest();Predict.PredictResponse response =client.predict(request);System.out.println(response); }}