Module to process video with hough transform¶

This module belongs to the manuscript "Burchardt, L., van der Sande, Y., Kehy, M., Gamba, M., Ravignani, A., Pouw, W.". This code takes in an example video and tracks any semi-circular objects in the video using the hough transform. The results folder will contain a video and time series containing the estimate radii and their locations.

In [8]:
# import the necessary packages
import cv2                       #image/video processing
import pandas as pd              #data wranlging/csv
from skimage import io, feature, color, measure, draw, img_as_float #image processing
import numpy as np               #data wrangling
import os                        #folder structuring
from os.path import isfile, join #for basic file operations
from tqdm import tqdm            #for a process bar
from IPython.display import Video #for showing a video
# set videofolder
videofolder = '../input/'
outputfolder = './results/'
#version 2, using videos #################### loading in the videos
vids = [f for f in os.listdir(videofolder) if isfile(join(videofolder, f))]
vidlist = []

for i in vids: #add the image folder name to get the full path
    vidlist.append(videofolder +i) 
In [7]:
# Example video
Video('https://tsg-131-174-75-200.hosting.ru.nl/samples_airsactoolkit/example8_tracked_rec.mp4', width=600, height=400)
Out[7]:
Your browser does not support the video element.

Main presets¶

In [6]:
# preset settings preprocessing (thresh 1 and 2 are also weighted and then passed to hough transform)
medianblur_preset = 27
dilation_preset = 5
alpha_preset = 2
beta_preset = 30
thresh_div_1_preset = 5
thresh_div_2_preset = 14

# hough presets
dp_preset = 1
minDist_preset = 10000
maxRadius_preset = 270

Main Functions¶

In [14]:
def preprocessing(image, medianblur = medianblur_preset,
                              dilation = dilation_preset,
                              alpha = alpha_preset,
                              beta= beta_preset,
                              thresh_div_1= thresh_div_1_preset,
                              thresh_div_2= thresh_div_2_preset):
    #image0 = hougdraw(image)
    #convert to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    #brightness change
    gray = cv2.convertScaleAbs(gray, alpha = alpha, beta = beta)
    #set dynamic tresholds for canny (we will also pass this to the hough)
    mean_intensity = np.median(gray)
    threshold1 = int(max(0, (1.0 - 0.33) * mean_intensity/thresh_div_1))
    threshold2 = int(min(255, (1.0 + 0.33) * mean_intensity/thresh_div_2))    
    #blur
    image2 = cv2.medianBlur(gray, medianblur)
    #dynamic thresholds for canny edge detection based on intensity of image
    #Thresholds one standard deviation above and below median intensity
    #edge detection
    image3 = cv2.Canny(image2, threshold1, threshold2)
    #dilation and second blur
    submitted = cv2.dilate(image3, None, iterations= dilation)  
    image4 = cv2.medianBlur(submitted, medianblur) 
    #add hough
    image4 = np.float32(image4)
    return image4, threshold1, threshold2

def preprocess_hough_apply_to_frame(image, mindist=10000, maxradius=250):
    image, param1, param2 = preprocessing(image=image)
    image = cv2.normalize(src= image, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) 
    circles = cv2.HoughCircles(image, cv2.HOUGH_GRADIENT,param1 = param1, param2 = param2, dp = dp_preset, minDist = minDist_preset, maxRadius = maxRadius_preset)   
    return(circles)

Loop throuh video folder and process each video¶

In [15]:
#######################################
for video in vidlist:
    name = os.path.basename(video)[0:-4]
    #set up empty output dataframe
    column_names = ['frame', # info on region of interest for repetability
                            'x','y', 'r', 'namefr', 'sample_rate']# parameters of hough circle transform 
    df = pd.DataFrame(columns = column_names)  
    ####################set up video settings
    cap = cv2.VideoCapture(video)                       #set video to capture
    frameWidth = cap.get(cv2.CAP_PROP_FRAME_WIDTH)      #frame width
    frameHeight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)    #frame height
    fps = cap.get(cv2.CAP_PROP_FPS)                     #fps = frames per second
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) #number of frames
            #set up video writer
    fourcc = cv2.VideoWriter_fourcc(*'MP4V') #for different video formats you could use e.g., *'XVID'
    out = cv2.VideoWriter(outputfolder + name + '_tracked.mp4', fourcc, 
                          fps = fps, frameSize = (int(frameWidth), int(frameHeight)))
    ###################loop over frames of the original video
    j = 0   #fame counter
    
    #set up progress bar
    with tqdm(total=num_frames, desc="Processing " + name, bar_format="{l_bar}{bar:50}{r_bar}") as pbar:
        #the loop over frames (will close when no more frames are left to process)
        while(cap.isOpened()):
            ret, frame = cap.read()
            if ret == False:
                break
            j=j+1 #add to the frame counter
            namefr = 'framenr_' + str(j) + '_framevid_' + os.path.basename(video[0:-4])
            ############################detect circles   
            to_be_processed_frame=frame.copy() #we keep the original frame
            #apply hough
            circles = preprocess_hough_apply_to_frame(to_be_processed_frame)        
            #draw the circles
            if circles is not None:
                circles = np.round(circles[0, 0:1]).astype("int")
                x = circles[0,0] #x  + plus the shift from the roi
                y = circles[0,1] #y  + plus the shift from the roi
                r = circles[0,2]
                cv2.circle(frame, (x, y), r, (255, 255, 0), 2) #version without drawing roi back on whole image 
                #save it to a row
            if circles is None:
                x = "NA"
                y = "NA"
                r = "NA"
            #write frame
            out.write(frame) #save the frame to the new masked video
            #write x,y,r data
            new_row = [j, x, y, r, namefr, fps]
            df.loc[len(df)] = new_row
            #now update the progress bar
            pbar.update(1)
        #release video writer
        out.release()
        cap.release()
        #save csv file with the timeseries results
        df.to_csv(outputfolder+name+'.csv', sep = ',') 
    print('done with processing video ' + name)

    
OpenCV: FFMPEG: tag 0x5634504d/'MP4V' is not supported with codec id 12 and format 'mp4 / MP4 (MPEG-4 Part 14)'
OpenCV: FFMPEG: fallback to use tag 0x7634706d/'mp4v'
Processing example8: 100%|██████████████████████████████████████████████████| 1195/1195 [01:44<00:00, 11.47it/s]
done with processing video example8