123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242 |
- #!/usr/bin/python3
- # should be used in docker which has jetson package installed.
- #
- #
- #
- #
- import cv2
- import jetson.inference
- import jetson.utils
- import argparse
- import sys
- import http.client, urllib.parse
- import json
- import numpy as np
- from tkinter import *
- from tkinter import simpledialog
- import threading,queue
- # parse the command line
- parser = argparse.ArgumentParser(description="Draw rectangle areas and set names for them, and use DNN to recognize and send result to remote peer",
- formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.detectNet.Usage() +
- jetson.utils.videoSource.Usage() + jetson.utils.videoOutput.Usage() + jetson.utils.logUsage())
- parser.add_argument("input_URI", type=str, default="/dev/video0", nargs='?', help="URI of the input stream")
- parser.add_argument("output_URI", type=str, default="", nargs='?', help="URI of the output stream")
- parser.add_argument("--network", type=str, default="ssd-mobilenet-v2", help="pre-trained model to load (see below for options)")
- parser.add_argument("--overlay", type=str, default="box,labels,conf", help="detection overlay flags (e.g. --overlay=box,labels,conf)\nvalid combinations are: 'box', 'labels', 'conf', 'none'")
- parser.add_argument("--threshold", type=float, default=0.65, help="minimum detection threshold to use")
- parser.add_argument("--fcc_Ip", type=str, default="192.168.1.100:8384", help="fusion ip address and port No..")
- is_headless = ["--headless"] if sys.argv[0].find('console.py') != -1 else [""]
- try:
- opt = parser.parse_known_args()[0]
- except:
- parser.print_help()
- sys.exit(0)
- is_process_exiting = False
- def TimelyReportToRemoteApp():
- if is_process_exiting:
- return
- timer_ReportToRemoteApp = threading.Timer(0.5, TimelyReportToRemoteApp)
- batch = []
- #print(' 1')
- while True:
- try:
- nxtData = queuePendingToSendToRemote.get(block=False)
- #print(' stage 2: batch length: '+str(len(batch)))
- batch.append(nxtData)
- if len(batch)>=30:
- #don't want batch size too large
- break
- #print('report detection to remote app...')
- except queue.Empty:
- #print(' get exception with Empty')
- if len(batch)==0:
- #print(' empty batch in TimelyReportToRemoteApp')
- timer_ReportToRemoteApp.start()
- return
- else:
- break
- try:
- if len(reportServicePath)>1:
- #print(' doing http posting to TimelyReportToRemoteApp')
- conn = http.client.HTTPConnection(opt.fcc_Ip,timeout=15)
- headers = {'Content-type': 'application/json'}
- #detectionData = {"AreaName":rec_name,"ClassID":det.ClassID,"ClassName":net.GetClassDesc(det.ClassID),"Confidence":det.Confidence}
- json_data = json.dumps([batch])
- print('posting detections with length '+str(len(batch))+' to remoteApp')
- conn.request('POST', reportServicePath, json_data, headers)
- response = conn.getresponse()
- #print('accessing service: ShowMeApi with responseCode: ' + str(response.status), response.reason)
- #if str(response.status) != '200':
- print('Accessed Report service with responseCode: ' + str(response.status), response.reason)
- #sys.exit(0)
- #else:
- #print('Succeed send one batch detections(count: '+len(batch)+') to remote App.')
- except Exception as e:
- print('Failed for fatal error when accessing remote App.')
- print(e)
- timer_ReportToRemoteApp.start()
- timer_ReportToRemoteApp = threading.Timer(3.0, TimelyReportToRemoteApp)
- queuePendingToSendToRemote = queue.Queue()
- reportServicePath = ""
- if len(opt.fcc_Ip)>=7:
- print('connecting to FCC at '+opt.fcc_Ip+' (for disable then set param --fcc_Ip to empty)')
- # try read persist Areas from remote service
- conn = http.client.HTTPConnection(opt.fcc_Ip,timeout=15)
- headers = {'Content-type': 'application/json'}
- serviceTag = ["webapi",["Camera"]]
- json_data = json.dumps(serviceTag)
- conn.request('POST', '/u/?apitype=service&an=ShowMeApi&pn=ProcessorsDispatcher&en=Edge.Core.Processor.Dispatcher.DefaultDispatcher',
- json_data, headers)
- response = conn.getresponse()
- print('accessing service: ShowMeApi with responseCode: ' + str(response.status), response.reason)
- if response.status != 200:
- print("Failed for accessing ShowMeApi service, fatal error.")
- sys.exit(0)
- # Convert bytes to string type and string type to dict
- showmeapidata_raw = response.read().decode('utf-8')
- showmeapidata = json.loads(showmeapidata_raw)
- for api in showmeapidata:
- if api['ProviderType']== 'AreaIntrudeDetecterServer.App' and api['ApiName']=='Report':
- print('Found AreaIntrudeDetecterServerApp and its Report Api at: '+api['Path'])
- reportServicePath = api['Path']
- timer_ReportToRemoteApp.start()
- else:
- print('Skipped conn to: AreaIntrudeDetecterServer.App')
- #rectA.x0,y0 rectB.x1,y1
- def AreTwoRectAreaIntersect(rectA,rectB):
- # rectA is samller
- if rectA['x1']>=rectB['x0'] and rectA['x1']<=rectB['x1'] and rectA['y1']>=rectB['y0'] and rectA['y1']<=rectB['y1']:
- return True
- if rectB['x1']>=rectA['x0'] and rectB['x1']<=rectA['x1'] and rectB['y1']<=rectA['y0'] and rectB['y1']>=rectA['y1']:
- return True
- if rectA['x0']>=rectB['x0'] and rectA['x0']<=rectB['x1'] and rectA['y1']>=rectB['y0'] and rectA['y1']<=rectB['y1']:
- return True
- if rectB['x0']>=rectA['x0'] and rectB['x0']<=rectA['x1'] and rectB['y1']>=rectA['y0'] and rectB['y1']<=rectA['y1']:
- return True
- return False
- root = Tk()
- #hide this base window
- root.withdraw()
- onDrawingRect = False
- rectangles = {'':[]}
- ref_point = []
- def shape_selection(event, x, y, flags, param):
- # grab references to the global variables
- global ref_point,onDrawingRect
-
- # if the left mouse button was clicked, record the starting
- # (x, y) coordinates and indicate that cropping is being performed
- if event == cv2.EVENT_LBUTTONDOWN:
- #print('mouseDown')
- onDrawingRect = True
- ref_point = [(x, y)]
- # check to see if the left mouse button was released
- elif event == cv2.EVENT_LBUTTONUP:
- onDrawingRect = False
- if x - ref_point[0][0] <= 20:
- print('area is too small')
- ref_point.clear()
- return
- print('mouse is up, wait for input areaName')
- #d = AskForInputAreaNameDialogue(root)
- areaName = simpledialog.askstring("Input","pls input the area name",parent=root)
- if not areaName:
- print('cancelled input areaName')
- ref_point.clear()
- return
- ref_point.append((x, y))
- rectangles[areaName] = ref_point
- print('areaName collected: '+str(areaName))
- elif event == cv2.EVENT_MOUSEMOVE:
- #print('mouseUp')
- # record the ending (x, y) coordinates and indicate that
- # the cropping operation is finished
- if onDrawingRect:
- ref_point.append((x, y))
- # load the object detection network
- net = jetson.inference.detectNet(opt.network, sys.argv, opt.threshold)
- # create video sources & outputs
- input = jetson.utils.videoSource(opt.input_URI, argv=sys.argv)
- output = jetson.utils.videoOutput(opt.output_URI, argv=sys.argv+is_headless)
- cv2WindowName = 'Camera: '+opt.input_URI+' | '+opt.network
- cv2.namedWindow(cv2WindowName)
- # process frames until the user exits
- while True:
- cv2.setMouseCallback(cv2WindowName, shape_selection)
- cuda_img = input.Capture()
- # detect objects in the image (with overlay)
- detections = net.Detect(cuda_img, overlay=opt.overlay)
- jetson.utils.cudaDeviceSynchronize()
- #print(cuda_img)
- cv_img_rgb = jetson.utils.cudaToNumpy(cuda_img)
- cv_img_bgr = cv2.cvtColor(cv_img_rgb, cv2.COLOR_RGB2BGR)
- for det in detections:
- #print(det)
- for rec_name, rec_points in rectangles.items():
- if rec_name =='':
- continue
- rectA = {'x0':det.Left,'y0':det.Top,'x1':det.Left+det.Width,'y1':det.Top+det.Height}
- rectB = {'x0':rec_points[0][0],'y0':rec_points[0][1],'x1':rec_points[-1][0],'y1':rec_points[-1][1]}
- if AreTwoRectAreaIntersect(rectA,rectB):
- print('Area: '+rec_name+' has Intruder: '+net.GetClassDesc(det.ClassID)+' with confidence: '+str(det.Confidence))#.ClassID)+' at area: '+rec_name)
- if len(reportServicePath)>1:
- detectionData = {"AreaName":rec_name,"ClassID":det.ClassID,"ClassName":net.GetClassDesc(det.ClassID),"Confidence":det.Confidence}
- queuePendingToSendToRemote.put(detectionData)
-
- if len(ref_point)>=2:
- #print('drawing rect...')
- cv2.rectangle(cv_img_bgr, ref_point[0], ref_point[-1], (255, 0, 255), 2)
- #print(' left-top: '+str(ref_point[0])+', right-bot: '+str(ref_point[-1]))
- for rec_name, rec_points in rectangles.items():
- if rec_name =='':
- continue
- cv2.rectangle(cv_img_bgr, rec_points[0], rec_points[-1], (255,0,0), 2)
- cv2.putText(cv_img_bgr, rec_name,(rec_points[0][0]+5,rec_points[0][1]+25),cv2.FONT_HERSHEY_SIMPLEX,1,(255,155,155),2)
- try:
- #show perf counters
- cv2.putText(cv_img_bgr, "{:.0f} FPS".format(net.GetNetworkFPS()),(5,18),cv2.FONT_HERSHEY_SIMPLEX,0.6,(51,255,255),1)
- cv2.imshow(cv2WindowName,cv_img_bgr)
- except:
- print('cv2 imshow got an error.')
- key = cv2.waitKey(1) & 0xFF
- if key == ord('q'):
- print('user input key: q which will terminate the whole process...')
- break
-
- # exit on input/output EOS
- if not input.IsStreaming() or not output.IsStreaming():
- break
- is_process_exiting = True
- cv2.destroyAllWindows()
- timer_ReportToRemoteApp.cancel()
|