카메라/카메라 테스트

[Depth camera] - Intel realsense D435

dohyeon2 2021. 7. 9. 16:04

목차

    프로젝트 진행중 애를 먹었던 문제입니다. 혹시 필요하신분은 이 글을 참고하면 도움이 될것같아 공유합니다. 

    https://github.com/dohyeonYoon/Depth_Camera

     

    GitHub - dohyeonYoon/Depth_Camera: Intel Realsense L515 depth camera code

    Intel Realsense L515 depth camera code. Contribute to dohyeonYoon/Depth_Camera development by creating an account on GitHub.

    github.com


    이번에 Intel realsense depthcamera L515를 이용하여 RGB frame과 Depth frame을 동시에 받아오면서 저장해주는 코드를 작성해보려 하였습니다. openCV에서 창을 여러개 띄우고, 동시에 해당 프레임을 디렉토리에 이미지 파일로 저장해주는 키보드 이벤트를 처리하기 위해서 cv2.waitkey 를 사용하는 경우 몇가지 문제가 발생합니다.

     

    저의 경우에는 2개의 창을 띄우고 키보드 이벤트 발생 시, 현재 프레임이 1부터 순서대로 

    해당 디렉토리에 이미지 파일로 저장되도록 코드를 작성하였는데

    다음의 문제들이 발생하였습니다. 

     

    문제점 1.  for문을 사용했을 때 현재 프레임이 디렉토리에 저장이 되지만 순서대로 저장이 되지 않음 + 프레임 버벅거림 (실시간 x )  

     

    import pyrealsense2 as rs
    import numpy as np
    import cv2
    from datetime import datetime
    
    pipeline = rs.pipeline()
    config = rs.config()
    
    
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))
    
    found_rgb = False
    for s in device.sensors:
        if s.get_info(rs.camera_info.name) == 'RGB Camera':
            found_rgb = True
            break
    if not found_rgb:
        print("The demo requires Depth camera with Color sensor")
        exit(0)
    
    config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
    
    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    
    # start streaming
    pipeline.start(config)
    
    try:
        while True: 
    
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
    
            # Convert images to numpy arrays
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
    
            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    
            depth_colormap_dim = depth_colormap.shape
            color_colormap_dim = color_image.shape
    
            # If depth and color resolutions are different, resize color image to match depth image for display
            if depth_colormap_dim != color_colormap_dim:
                resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
                images1= resized_color_image
                images2= depth_colormap
                images = np.hstack((resized_color_image, depth_colormap))
            else: 
                images1= color_image
                images2= depth_colormap
                images = np.hstack((color_image, depth_colormap))
                
     		cv2.namedWindow('RealSense1', cv2.WINDOW_AUTOSIZE)
     		cv2.namedWindow('RealSense2', cv2.WINDOW_AUTOSIZE)
    
            cv2.imshow('RealSense1', images1)
            cv2.imshow('RealSense2', images2)
    
            i=0
            for i in range(150):
                 if cv2.waitKey(1) != -1:  #if 아무 키보드나 누르면
                    cv2.imwrite(f'C:/Users/user/Desktop/test/c{i}.png' , images1) #해당 디렉토리에 저장
                    cv2.imwrite(f'C:/Users/user/Desktop/test/d{i}.png' , images2) #해당 디렉토리에 저장
                    i= i+1
    
    
    finally:
    
        # Stop streaming
        pipeline.stop()

     

     

     

    문제점 2.  while문을 사용했을 때 현재 프레임이 순서대로 디렉토리에 저장이 되지만 영상이 멈춤

     

    import pyrealsense2 as rs
    import numpy as np
    import cv2
    from datetime import datetime
    
    pipeline = rs.pipeline()
    config = rs.config()
    
    
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))
    
    found_rgb = False
    for s in device.sensors:
        if s.get_info(rs.camera_info.name) == 'RGB Camera':
            found_rgb = True
            break
    if not found_rgb:
        print("The demo requires Depth camera with Color sensor")
        exit(0)
    
    config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
    
    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    
    # start streaming
    pipeline.start(config)
    
    try:
        while True: 
    
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
    
            # Convert images to numpy arrays
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
    
            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    
            depth_colormap_dim = depth_colormap.shape
            color_colormap_dim = color_image.shape
    
            # If depth and color resolutions are different, resize color image to match depth image for display
            if depth_colormap_dim != color_colormap_dim:
                resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
                images1= resized_color_image
                images2= depth_colormap
                images = np.hstack((resized_color_image, depth_colormap))
            else: 
                images1= color_image
                images2= depth_colormap
                images = np.hstack((color_image, depth_colormap))
                
     		cv2.namedWindow('RealSense1', cv2.WINDOW_AUTOSIZE)
     		cv2.namedWindow('RealSense2', cv2.WINDOW_AUTOSIZE)
    
            cv2.imshow('RealSense1', images1)
            cv2.imshow('RealSense2', images2)
    
            i=0
    		while i <150:
        		if cv2.waitKey(1) != -1: #if 아무키나 누르면
            		cv2.imwrite(f'C:/Users/user/Desktop/test/c{i}.png' , images1) #해당 디렉토리에 저장
           		 	cv2.imwrite(f'C:/Users/user/Desktop/test/d{i}.png' , images2) #해당 디렉토리에 저장
           		 	i = i+1
            
    
    finally:
    
        # Stop streaming
        pipeline.stop()

    해결책 1. 2개의 창을 하나로 합치고 키보드 이벤트 발생 시, 각각의 프레임을 디렉토리에 저장

     

    if depth_colormap_dim != color_colormap_dim:
        resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
        images1= resized_color_image
        images2= depth_colormap
        images = np.hstack((resized_color_image, depth_colormap))
    else: 
        images1= color_image
        images2= depth_colormap
        images = np.hstack((color_image, depth_colormap))
    
    # print frame image 
    cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('RealSense', images)
    
    i=0 
    while i <150:
        if cv2.waitKey(1) != -1:
            cv2.imwrite(f'C:/Users/user/Desktop/test/c{i}.png' , images1)
            cv2.imwrite(f'C:/Users/user/Desktop/test/d{i}.png' , images2)
            i = i+1

    이런식으로 np.hstack 함수를 통해 2개의 창을 하나로 합쳐주고 

    실행해보았지만 프레임 이미지는 순서대로 저장되지만 프레임이 멈춰버리는 문제가 발생하였습니다.

     

    이는 아무래도 cv2.imshow마다 cv2.waitkey가 호출되지 않았기 때문에 프레임이 갱신되지 못하는 문제로 추측됩니다. 

     

     

     

    해결책 2. cv2.waitkey 호출마다 키 값을 따로 저장해 두었다가 나중에 한꺼번에 처리하는 방법 

     

    if depth_colormap_dim != color_colormap_dim:
        resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
        images1= resized_color_image
        images2= depth_colormap
        images = np.hstack((resized_color_image, depth_colormap))
    else: 
        images1= color_image
        images2= depth_colormap
        images = np.hstack((color_image, depth_colormap))
    
    # print frame image 
    cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('RealSense', images)
    
    i=0 
    while i <150:
        if cv2.waitKey(1) != -1:
            cv2.imwrite(f'C:/Users/user/Desktop/test/c{i}.png' , images1)
            cv2.imwrite(f'C:/Users/user/Desktop/test/d{i}.png' , images2)
        i = i+1

    이 방법 또한 frame이 정지하는 문제 발생 

     

    해결책 3. datetime 라이브러리를 통해 현재시간을 변수로 받아주는 방법 (반복문 불필요)  

     

    import pyrealsense2 as rs
    import numpy as np
    import cv2
    from datetime import datetime
    
    pipeline = rs.pipeline()
    config = rs.config()
    
    
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))
    
    found_rgb = False
    for s in device.sensors:
        if s.get_info(rs.camera_info.name) == 'RGB Camera':
            found_rgb = True
            break
    if not found_rgb:
        print("The demo requires Depth camera with Color sensor")
        exit(0)
    
    config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
    
    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
    
    # start streaming
    pipeline.start(config)
    
    try:
        while True: 
    
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
    
            # Convert images to numpy arrays
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
    
            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    
            depth_colormap_dim = depth_colormap.shape
            color_colormap_dim = color_image.shape
    
            # If depth and color resolutions are different, resize color image to match depth image for display
            if depth_colormap_dim != color_colormap_dim:
                resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
                images1= resized_color_image
                images2= depth_colormap
                images = np.hstack((resized_color_image, depth_colormap))
            else: 
                images1= color_image
                images2= depth_colormap
                images = np.hstack((color_image, depth_colormap))
    
            # print frame image 
            cv2.namedWindow('RealSense1', cv2.WINDOW_AUTOSIZE)
            cv2.namedWindow('RealSense2', cv2.WINDOW_AUTOSIZE)
    
            cv2.imshow('RealSense1', images1)
            cv2.imshow('RealSense2', images2)
            
        
            now = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
    
            if cv2.waitKey(1) != -1:
                cv2.imwrite(f'C:/Users/user/Desktop/test/c_'+now+'.png' , images1)
                cv2.imwrite(f'C:/Users/user/Desktop/test/d_'+now+'.png' , images2)
    
    finally:
    
        # Stop streaming
        pipeline.stop()

     

    2021.07.14 수정