ROS Resources: Documentation | Support | Discussion Forum | Index | Service Status | ros @ Robotics Stack Exchange
Ask Your Question

Revision history [back]

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage

def encode_compressed_depth_image_msg(depth_image: np.array,
                                  depth_min=1.0, depth_max=10.0,
                                  depth_quantization=100.0, verbose=False):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
   header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of teh CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage

def encode_compressed_depth_image_msg(depth_image: np.array,
                                  depth_min=1.0, depth_max=10.0,
                                  depth_quantization=100.0, verbose=False):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
   header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of teh the CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage
 
def encode_compressed_depth_image_msg(depth_image: np.array,
                                  depth_min=1.0, depth_max=10.0,
                                  depth_quantization=100.0, verbose=False):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
   header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of the CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage


def encode_compressed_depth_image_msg(depth_image: np.array,
                                   depth_min=1.0, depth_max=10.0,
                                   depth_quantization=100.0, verbose=False):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
   header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of the CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage


def encode_compressed_depth_image_msg(depth_image: np.array,
                                      depth_min=1.0, depth_max=10.0,
                                      depth_quantization=100.0, verbose=False):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
    header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of the CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).

Here is what I came up with- I haven't yet checked to make sure the depth values when decompressed are correct, they just seemed reasonable as seen in rqt:

import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage


def encode_compressed_depth_image_msg(depth_image: np.array,
                                      depth_min=1.0, depth_max=10.0,
                                      depth_quantization=100.0, verbose=False):
depth_quantization=100.0):
    depth_image[depth_image > depth_max] = np.nan
    depth_image[depth_image < depth_min] = np.nan
    depth_z0 = depth_quantization
    depth_quant_a = depth_z0 * (depth_z0 + 1.0)
    depth_quant_b = 1.0 - depth_quant_a / depth_max
    inv_depth = (depth_quant_a / depth_image + depth_quant_b).astype(np.uint16)
    depth_encoded = cv2.imencode(".png", inv_depth)[1]
    header = np.array([0.0, depth_quant_a, depth_quant_b], np.float32)

    compressed_depth_msg = CompressedImage()
    compressed_depth_msg.format = "32FC1; compressedDepth png"
    compressed_depth_msg.data = header.tobytes() + depth_encoded.tobytes()
    return compressed_depth_msg

The payload of the CompressedDepth "32FC1; compressedDepth png" is 12 header bytes which contains the conversion scaling numbers quant a & b, then an encoded 16-bit grayscale png follows (if you lop off those first 12 bytes and save the rest of the bytes to a file you can display it in a png viewer and see the depth values). The scaling numbers fixed above but could be changed per-frame if there was a reason to, by manipulating depth_max and depth_quantization (or refactor so depth_quant_a and b are set through some other means).