在入手think board前,用的是树莓派3B+
tinker board国内用的比较少,所以资料也比较少,如果用的话最好用谷歌浏览器进行搜索资料
系统安装步骤也同树莓派类同,请参考树莓派。
接下来配置环境:
OpenCV in python (Face Detection)
#Install $ sudo apt-get update $ sudo apt-get upgrade #Install a few developer tools $ sudo apt-get install -y build-essential git cmake pkg-config #Install image I/O packages which allow us to load image file formats such as JPEG, PNG, TIFF, etc. $ sudo apt-get install -y libjpeg-dev libtiff5-dev libpng-dev #Install video I/O packages $ sudo apt-get install -y libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev #Install the GTK development library $ sudo apt-get install -y libgtk2.0-dev #Various operations inside of OpenCV (such as matrix operations) can be optimized using added dependencies $ sudo apt-get install -y libatlas-base-dev gfortran #Install the Python 2.7 and Python 3 header files $ sudo apt-get install -y python2.7-dev python3-dev python-opencv $ wget https://github.com/opencv/opencv/archive/3.3.0.zip $ unzip 3.3.0.zip $ cd opencv-3.3.0 $ mkdir build $ cd build $ cmake -D CMAKE_BUILD_TYPE=Release -D WITH_LIBV4L=ON -D CMAKE_INSTALL_PREFIX=/usr/local .. $ sudo make install
测试csi摄像头
$ gst-launch-1.0 v4l2src ! video/x-raw,format=NV12,width=640,height=480 ! videoconvert ! autovideosink
输入这个命令后就能看到摄像头显示的图片。
If the Camera Module isn't working correctly, please use below command to check whether the Camera Module be detected by Tinker Board.
$ ls /dev/video*
You will see /dev/video0 /dev/video1 /dev/video2. If not, there are number of things to try:
Gstreamer
#preview $ gst-launch-1.0 v4l2src ! videoconvert ! autovideosink $ gst-launch-1.0 v4l2src device=/dev/video0 ! video/x-raw,format=NV12,width=640,height=480 ! videoconvert ! autovideosink #capture $ gst-launch-1.0 v4l2src num-buffers=10 ! video/x-raw,format=NV12,width=640,height=480 ! jpegenc ! multifilesink location=image_%02d.jpg $ gst-launch-1.0 v4l2src num-buffers=10 ! video/x-raw,format=NV12,width=640,height=480 ! jpegenc ! multifilesink location=image.jpg #recording $ gst-launch-1.0 v4l2src num-buffers=512 ! video/x-raw,format=NV12,width=640,height=480,framerate=30/1 ! queue ! mpph264enc ! queue ! h264parse ! mpegtsmux ! filesink location=/home/linaro/vga.ts #show picture $ gst-launch-1.0 playbin uri=file:///home//linaro//image.jpg $ gst-launch-1.0 filesrc location=image.jpg ! decodebin ! imagefreeze ! autovideosink #play video $ gst-launch-1.0 playbin video-sink=rkximagesink uri=file:///home/linaro/vga.ts $ gst-launch-1.0 uridecodebin uri=file:///home/linaro/vga.ts ! rkximagesink
MJPG-streamer
#Install $ sudo apt-get update $ sudo apt-get upgrade $ sudo apt-get install subversion libjpeg62-turbo-dev imagemagick $ svn co https://svn.code.sf.net/p/mjpg-streamer/code/ $ cd code/mjpg-streamer $ make $ sudo make install #Start MJPG-streamer $ cd ~/code/mjpg-streamer $ ./mjpg_streamer -i "./input_uvc.so -y" -o "./output_http.so -w ./www"
sample code in Python
import sys, os, cv2 print(cv2.__version__) cv2.setNumThreads(4) #please copy ~/opencv-3.3.0/data/harrcascades_cuda/haarcascade_frontalface_alt.xml to /home/linaro/ faceCascade = cv2.CascadeClassifier("/home/linaro/haarcascade_frontalface_alt.xml") cap = cv2.VideoCapture("v4l2src ! video/x-raw,format=NV12,width=640,height=480 ! videoconvert ! appsink") while cap.isOpened(): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) cv2.imshow("Frame", frame) if cv2.waitKey(1) & 0xFF == ord("q"): break cap.release() cv2.destroyAllWindows()
Sample code in c++
Sample code - test.c
#include <opencv2/highgui.hpp> #include <iostream> using namespace cv; using namespace std; const String keys = "{ help | false | print usage }" "{ camera_width | 640 | camera device width }" "{ camera_height | 480 | camera device height }" ; int main(int argc, char** argv) { CommandLineParser parser(argc, argv, keys); if (parser.get<bool>("help")) { parser.printMessage(); return 0; } VideoCapture cap; cap = VideoCapture("v4l2src ! video/x-raw,format=NV12,width=640,height=480 ! videoconvert ! appsink"); if(!cap.isOpened()) { cout << "Couldn't find camera: " << cameraDevice << endl; return -1; } cap.set(CAP_PROP_FRAME_WIDTH, parser.get<int>("camera_width")); cap.set(CAP_PROP_FRAME_HEIGHT, parser.get<int>("camera_height")); for(;;) { Mat frame; cap >> frame; // get a new frame from camera/video or read image if (frame.empty()) { waitKey(); break; } imshow("image", frame); if (waitKey(1) >= 0) break; } return 0; }
Build test.c
$ gcc test.c `pkg-config --cflags --libs opencv`
Gstreamer in C code
Sample code - test.c
#include <gst/gst.h> int main(int argc, char *argv[]) { GstElement *pipeline; GstBus *bus; GstMessage *msg; /* Initialize GStreamer */ gst_init (&argc, &argv); /* Build the pipeline */ pipeline = gst_parse_launch ("v4l2src num-buffers=512 ! video/x-raw,format=NV12,width=640,height=480 ! videoconvert ! autovideosink", NULL); /* Start playing */ gst_element_set_state (pipeline, GST_STATE_PLAYING); /* Wait until error or EOS */ bus = gst_element_get_bus (pipeline); msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS); /* Free resources */ if (msg != NULL) gst_message_unref (msg); gst_object_unref (bus); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (pipeline); return 0; }
Build test.c
$ sudo apt-get install libgstreamer1.0-dev $ gcc test.c -o test1 `pkg-config --cflags --libs gstreamer-1.0`