from grid.model.perception.vla.openvla import OpenVLAcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center","rgb").datamodel = OpenVLA(use_local =True)result = model.run(rgbimage=img, prompt ="Close the drawer")print(result)
The OpenVLA class provides core functionality for this module.
Predicted action based on the query and image, represented as a 7-DoF vector.
from grid.model.perception.vla.openvla import OpenVLAcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center","rgb").datamodel = OpenVLA(use_local =True)result = model.run(rgbimage=img, prompt ="Close the drawer")print(result)
from grid.model.perception.vla.openvla import OpenVLAcar = AirGenCar()# We will be capturing an image from the AirGen simulator # and run model inference on it.img = car.getImage("front_center","rgb").datamodel = OpenVLA(use_local =True)result = model.run(rgbimage=img, prompt ="Close the drawer")print(result)