Skip to content

SingaAuto Predictor API

naili-xing edited this page Jul 25, 2020 · 8 revisions

Prediction Service APIs

Request Format

1. if upload files like images, using multipart/form-data: examples code:

    const formData = new FormData()
    // append(<whatever name>, value, <namePropterty>)
    console.log("selectedFiles[0]: ", this.state.selectedFiles[0])

    formData.append("img", this.state.selectedFiles[0])
    try {
      const res = await axios.post(
        `http://${this.state.predictorHost}/predict`,
        formData,
        {
          headers: {
            'Content-Type': 'multipart/form-data',
            //"Authorization": `Bearer ${this.props.reduxToken}`
          },
          onUploadProgress: progressEvent => {
            // progressEvent will contain loaded and total
            let percentCompleted = parseInt(
              Math.round( (progressEvent.loaded * 100) / progressEvent.total )
            )
            console.log("From EventEmiiter, file Uploaded: ", percentCompleted)
            this.setState({
              uploadPercentage: percentCompleted
            })
          }
        }
      );

PandaVgg model

        predictor_host = "ncrs.d2.comp.nus.edu.sg:3005/PyPandaVgg_xray_10epoch"
        query_path = "./IM-0164-0001.jpeg"
        files = {'img': open(query_path, 'rb')}
        res = requests.post('http://{}'.format(predictor_host), files=files)
        print(res.text)

Paramters:

  • File:
    • 'img': bytes
  • Responses:
[{
  explanation: {
    gradcam_img: base64,
    lime_img: base64
  },
  mc_dropout: [
    {
      label: string,
      mean: num,
      std: num
    },
    {
      label: string,
      mean: num,
      std: num
    },
    ...
  ]
}]

QA model

        # 1. create model
        client.create_model( 
               name='questionAnswer',
               task='question_answer',   
               model_file_path='./Question_Answering.py',
               model_class='QuestionAnswering',
               model_preload_file_path="./covid19data.zip",
               dependencies={"torch": "1.0.1", "torchvision": "0.2.2",
                             "semanticscholar": "0.1.4",
                             "sentence_transformers": "0.2.6.1",
                             "tqdm": "4.27"}
               )

        # 2. create inference service
        print(client.create_inference_job_by_checkpoint(model_name='questionAnswer'))

        # 3. do prediction
        predictor_host = "ncrs.d2.comp.nus.edu.sg:3005/questionAnswer"
        data = {
            "Task1":
                {'area': 'What is known about transmission, incubation, and environmental stability?',
                 'questions': ['What is the range of the incubation period in humans?',
                               ]
                 },
        }
        data = json.dumps(data)
        res = requests.post('http://{}'.format(predictor_host), data=data)
        print(res.text)

Responses:

  html string

Food Detection Models

        # 1. create model, zip file should includes yolov3-food.cfg, yolov3-food_final.weights, food.names, xception weight,food101.npy, refer to the code for details
        client.create_model( 
               name='food101_v3',
               task='image_detection',   
               model_file_path='./food101.py',
               model_class='FoodDetection101',
               model_preload_file_path="./food101.zip",
               dependencies={"keras": "2.2.4", "tensorflow": "1.12.0"}
               )

        # 2. create inference service
        print(client.create_inference_job_by_checkpoint(model_name='food101_v3'))

        # 3. do prediction
        predictor_host = "ncrs.d2.comp.nus.edu.sg:3005/food101_v3"
        query_path = "/examples/data/object_detection/000002.jpg"
        files = {'img': open(query_path, 'rb')}
        res = requests.post('http://{}'.format(predictor_host), files=files)
        print(res.text)
  • Responses: return a json string
[
     {
      predictions: [
          {
           detection_box: [float],
           label: string,
           label_id: string,
           probability: float
         }],
      status: string
      }
]

example: '[{"predictions":[{"detection_box":[0.46903259085810833,0.11065910062142081,0.9265644759812615,0.7505181238737905],"label":"omelette","label_id":"67","probability":0.9999932646751404}],"status":"ok"}]'
  
Clone this wiki locally