The program is written in Qt, taking live video from a webcam through the QCamera class and passing it to a subclass of the QAbstractViewfinder, which passes the incoming QVideoFrame objects into an QOpenGLWidget subclass. To start the process, we ask the user to choose an available webcam to then create our QCamera object:

QStringList strings;
QList<QCameraInfo> cameras = QCameraInfo::availableCameras();
for (int n = 0; n < cameras.count(); n++) {
    strings << cameras.at(n).description();
}

if (strings.count() > 1) {
    bool okay = false;
    QString string = QInputDialog::getItem(this, QString("Select Camera"), 
                     QString("Select input device"), strings, 0, false, &okay);
    if (okay) {
        int n = strings.indexOf(string);
        camera = new QCamera(cameras.at(n));
    }
} else if (strings.count() == 1) {
    camera = new QCamera(cameras.first());
}

From here, we create our custom OpenGL widget along with our flavored viewfinder widget and assign it to the camera using the following code:

label = new LAURandomizePixelsGLWidget();       // CREATE AN OPENGLWIDGET FOR SHUFFLING PIXELS
this->layout()->addWidget(label);               // ADD THIS OPENGLWIDGET TO OUR MAIN WIDGET

surface = new LAUVideoSurface();                // CREATE OUR CUSTOM VIEWFINDER OBJECT
surface->setLabel(label);                       // GIVE IT A COPY OF OUR OPENGLWIDGET'S POINTER

camera->setViewfinder(surface);                 // HAND OUR VIEWFINDER OFF TO THE CAMERA
camera->setCaptureMode(QCamera::CaptureVideo);  // TELL THE CAMERA TO RUN IN VIDEO MODE

The role of our viewfinder widget is to simply pass the video frames from the camera to our OpenGL widget, which will do all the work shuffling pixels.  So we define out custom viewfinder with the following header:

class LAUVideoSurface : public QAbstractVideoSurface
{
    Q_OBJECT

public:
    explicit LAUVideoSurface(QObject *parent = NULL) : QAbstractVideoSurface(parent), 
                                                       labelWidget(NULL) { ; }

    LAUVideoGLWidget *label() const
    {
        return (labelWidget);
    }

    void setLabel(LAUVideoGLWidget *lbl)
    {
        labelWidget = lbl;
    }

    QVideoSurfaceFormat nearestFormat(const QVideoSurfaceFormat &format) const;
    bool isFormatSupported(const QVideoSurfaceFormat &format) const;
    bool present(const QVideoFrame &frame);
    bool start(const QVideoSurfaceFormat &format);
    void stop();

    QList<QVideoFrame::PixelFormat> supportedPixelFormats(QAbstractVideoBuffer::HandleType type = 
                                                          QAbstractVideoBuffer::NoHandle) const;

private:
    LAUVideoGLWidget *labelWidget;
};

So  the "setLabel" method takes the QOpenGLWidget from the user and keeps a local copy of its pointer.  While the nearestFormat, isFormatSupported, start, and stop methods are boiler plate, the one of interest is the present() method.  This method receives an incoming video frame from the QCamera and passes it to our OpenGL widget according to the following:

bool LAUVideoSurface::present(const QVideoFrame &frame)
{
    // SEND THE IN-COMING VIDEO TO THE LABEL WIDGET, IF IT EXISTS
    if (labelWidget) {
        labelWidget->setFrame(frame);
    }
    return (true);
}

For displaying video on screen in our QOpenGLWidget, we make the low-level subclass:

class LAUVideoGLWidget : public QOpenGLWidget, protected QOpenGLFunctions
{
    Q_OBJECT

public:
    explicit LAUVideoGLWidget(QWidget *parent = NULL) : QOpenGLWidget(parent), 
                                                        videoTexture(NULL), counter(0) { ; }
    ~LAUVideoGLWidget();

    virtual bool isValid() const
    {
        return (wasInitialized());
    }

    bool wasInitialized() const
    {
        return (vertexArrayObject.isCreated());
 }
...
Read more »