From eef88f6c2d5726bed04c114e37e86c0fce114207 Mon Sep 17 00:00:00 2001 From: Corey Frang Date: Tue, 3 Apr 2018 11:44:44 -0400 Subject: [PATCH] First draft of video IO device --- src/engine/runtime.js | 4 +- .../scratch3_video_sensing/index.js | 175 +-------- src/io/video.js | 332 ++++++++++++++++++ 3 files changed, 354 insertions(+), 157 deletions(-) create mode 100644 src/io/video.js diff --git a/src/engine/runtime.js b/src/engine/runtime.js index aa73c9690..7552c2d7e 100644 --- a/src/engine/runtime.js +++ b/src/engine/runtime.js @@ -15,6 +15,7 @@ const DeviceManager = require('../io/deviceManager'); const Keyboard = require('../io/keyboard'); const Mouse = require('../io/mouse'); const MouseWheel = require('../io/mouseWheel'); +const Video = require('../io/video'); const defaultBlockPackages = { scratch3_control: require('../blocks/scratch3_control'), @@ -260,7 +261,8 @@ class Runtime extends EventEmitter { deviceManager: new DeviceManager(), keyboard: new Keyboard(this), mouse: new Mouse(this), - mouseWheel: new MouseWheel(this) + mouseWheel: new MouseWheel(this), + video: new Video(this) }; /** diff --git a/src/extensions/scratch3_video_sensing/index.js b/src/extensions/scratch3_video_sensing/index.js index 9b70c498f..3a276768a 100644 --- a/src/extensions/scratch3_video_sensing/index.js +++ b/src/extensions/scratch3_video_sensing/index.js @@ -3,7 +3,7 @@ const Runtime = require('../../engine/runtime'); const ArgumentType = require('../../extension-support/argument-type'); const BlockType = require('../../extension-support/block-type'); const Clone = require('../../util/clone'); -const log = require('../../util/log'); +const Video = require('../../io/video'); const VideoMotion = require('./library'); @@ -34,39 +34,6 @@ class Scratch3VideoSensingBlocks { */ this._lastUpdate = null; - /** - * Id representing a Scratch Renderer skin the video is rendered to for - * previewing. - * @type {number} - */ - this._skinId = -1; - - /** - * The Scratch Renderer Skin object. - * @type {Skin} - */ - this._skin = null; - - /** - * Id for a drawable using the video's skin that will render as a video - * preview. - * @type {Drawable} - */ - this._drawable = -1; - - /** - * Canvas DOM element video is rendered to down or up sample to the - * expected resolution. - * @type {HTMLCanvasElement} - */ - this._sampleCanvas = null; - - /** - * Canvas 2D Context to render to the _sampleCanvas member. - * @type {CanvasRenderingContext2D} - */ - this._sampleContext = null; - if (this.runtime.ioDevices) { // Clear target motion state values when the project starts. this.runtime.on(Runtime.PROJECT_RUN_START, this.reset.bind(this)); @@ -74,10 +41,11 @@ class Scratch3VideoSensingBlocks { // Boot up the video, canvas to down/up sample the video stream, the // preview skin and drawable, and kick off looping the analysis // logic. - this._setupVideo(); - this._setupSampleCanvas(); - this._setupPreview(); - this._loop(); + this.runtime.ioDevices.video.requestVideo() + .then(({release}) => { + this.releaseVideo = release; + this._loop(); + }); } } @@ -99,14 +67,6 @@ class Scratch3VideoSensingBlocks { return [480, 360]; } - /** - * Order preview drawable is inserted at in the renderer. - * @type {number} - */ - static get ORDER () { - return 1; - } - /** * The key to load & store a target's motion-related state. * @type {string} @@ -145,127 +105,30 @@ class Scratch3VideoSensingBlocks { } } - /** - * Setup a video element connected to a user media stream. - * @private - */ - _setupVideo () { - this._video = document.createElement('video'); - navigator.getUserMedia({ - audio: false, - video: { - width: {min: 480, ideal: 640}, - height: {min: 360, ideal: 480} - } - }, stream => { - this._video.src = window.URL.createObjectURL(stream); - // Hint to the stream that it should load. A standard way to do this - // is add the video tag to the DOM. Since this extension wants to - // hide the video tag and instead render a sample of the stream into - // the webgl rendered Scratch canvas, another hint like this one is - // needed. - this._track = stream.getTracks()[0]; - }, err => { - // @todo Properly handle errors - log(err); - }); - } - - /** - * Create a campus to render the user media video to down/up sample to the - * needed resolution. - * @private - */ - _setupSampleCanvas () { - // Create low-resolution image to sample video for analysis and preview - const canvas = this._sampleCanvas = document.createElement('canvas'); - canvas.width = Scratch3VideoSensingBlocks.DIMENSIONS[0]; - canvas.height = Scratch3VideoSensingBlocks.DIMENSIONS[1]; - this._sampleContext = canvas.getContext('2d'); - } - - /** - * Create a Scratch Renderer Skin and Drawable to preview the user media - * video stream. - * @private - */ - _setupPreview () { - if (this._skinId !== -1) return; - if (this._skin !== null) return; - if (this._drawable !== -1) return; - if (!this.runtime.renderer) return; - - this._skinId = this.runtime.renderer.createPenSkin(); - this._skin = this.runtime.renderer._allSkins[this._skinId]; - this._drawable = this.runtime.renderer.createDrawable(); - this.runtime.renderer.setDrawableOrder( - this._drawable, - Scratch3VideoSensingBlocks.ORDER - ); - this.runtime.renderer.updateDrawableProperties(this._drawable, { - skinId: this._skinId - }); - } - /** * Occasionally step a loop to sample the video, stamp it to the preview * skin, and add a TypedArray copy of the canvas's pixel data. * @private */ _loop () { - setTimeout(this._loop.bind(this), this.runtime.currentStepTime); - - // Ensure video stream is established - if (!this._video) return; - if (!this._track) return; - if (typeof this._video.videoWidth !== 'number') return; - if (typeof this._video.videoHeight !== 'number') return; - - // Bail if the camera is *still* not ready - const nativeWidth = this._video.videoWidth; - const nativeHeight = this._video.videoHeight; - if (nativeWidth === 0) return; - if (nativeHeight === 0) return; - - const ctx = this._sampleContext; - - // Mirror - ctx.scale(-1, 1); - - // Generate video thumbnail for analysis - ctx.drawImage( - this._video, - 0, - 0, - nativeWidth, - nativeHeight, - Scratch3VideoSensingBlocks.DIMENSIONS[0] * -1, - 0, - Scratch3VideoSensingBlocks.DIMENSIONS[0], - Scratch3VideoSensingBlocks.DIMENSIONS[1] - ); - - // Restore the canvas transform - ctx.resetTransform(); - - // Render to preview layer - if (this._skin !== null) { - const xOffset = Scratch3VideoSensingBlocks.DIMENSIONS[0] / 2 * -1; - const yOffset = Scratch3VideoSensingBlocks.DIMENSIONS[1] / 2; - this._skin.drawStamp(this._sampleCanvas, xOffset, yOffset); - this.runtime.requestRedraw(); - } + setTimeout(this._loop.bind(this), Math.max(this.runtime.currentStepTime, Scratch3VideoSensingBlocks.INTERVAL)); // Add frame to detector const time = Date.now(); - if (this._lastUpdate === null) this._lastUpdate = time; + if (this._lastUpdate === null) { + this._lastUpdate = time; + } const offset = time - this._lastUpdate; if (offset > Scratch3VideoSensingBlocks.INTERVAL) { - this._lastUpdate = time; - const data = ctx.getImageData( - 0, 0, Scratch3VideoSensingBlocks.DIMENSIONS[0], Scratch3VideoSensingBlocks.DIMENSIONS[1] - ); - this.detect.addFrame(data.data); + const frame = this.runtime.ioDevices.video.getFrame({ + mirror: true, + format: Video.FORMAT_IMAGE_DATA, + dimensions: Scratch3VideoSensingBlocks.DIMENSIONS + }); + if (frame) { + this._lastUpdate = time; + this.detect.addFrame(frame.data); + } } } diff --git a/src/io/video.js b/src/io/video.js new file mode 100644 index 000000000..59b04d6f0 --- /dev/null +++ b/src/io/video.js @@ -0,0 +1,332 @@ +const log = require('../util/log'); + +class Video { + constructor (runtime) { + /** + * Reference to the owning Runtime. + * @type{!Runtime} + */ + this.runtime = runtime; + + /** + * Cache frames for this many ms. + * @type number + */ + this._frameCacheTimeout = 16; + + /** + * Store each request for video, so when all are released we can disable preview/video feed. + * @type Array. + */ + this._requests = []; + + /** + * DOM Video element + * @private + */ + this._video = null; + + /** + * Usermedia stream track + * @private + */ + this._track = null; + + /** + * Stores some canvas/frame data per resolution/mirror states + */ + this._workspace = []; + + /** + * Id representing a Scratch Renderer skin the video is rendered to for + * previewing. + * @type {number} + */ + this._skinId = -1; + + /** + * The Scratch Renderer Skin object. + * @type {Skin} + */ + this._skin = null; + + /** + * Id for a drawable using the video's skin that will render as a video + * preview. + * @type {Drawable} + */ + this._drawable = -1; + } + + static get FORMAT_IMAGE_DATA () { + return 'image-data'; + } + + static get FORMAT_CANVAS () { + return 'canvas'; + } + + /** + * Dimensions the video stream is analyzed at after its rendered to the + * sample canvas. + * @type {Array.} + */ + static get DIMENSIONS () { + return [480, 360]; + } + + /** + * Order preview drawable is inserted at in the renderer. + * @type {number} + */ + static get ORDER () { + return 1; + } + + /** + * Request video be enabled. Sets up video, creates video skin and enables preview. + * + * ioDevices.video.requestVideo() + * .then(({ release }) => { + * this.releaseVideo = release; + * }) + * + * @return {Promise.} A request object with a "release" property that + * should be called when you are done with the video. + */ + requestVideo () { + const io = this; + const request = { + release () { + const index = io._requests.indexOf(request); + if (index > -1) { + io._requests.splice(index, 1); + } + if (io._requests.length === 0) { + io._disablePreview(); + // by clearing refs to video and track, we should lose our hold over the camera + io._video = null; + io._track = null; + } + } + }; + + if (this.videoReady) { + this._requests.push(request); + return Promise.resolve(request); + } + + if (this._lastSetup) { + return this._lastSetup.then(() => { + this._requests.push(request); + return request; + }); + } + + this._lastSetup = this._setupVideo() + .then(() => { + this._setupPreview(); + this._requests.push(request); + this._lastSetup = null; + return request; + }, err => { + this._lastSetup = null; + throw err; + }); + return this._lastSetup; + } + + /** + * Create a video stream. + * Should probably be moved to -render or somewhere similar later + * @private + * @return {Promise} When video has been received, rejected if video is not received + */ + _setupVideo () { + this._video = document.createElement('video'); + return new Promise((resolve, reject) => { + navigator.getUserMedia({ + audio: false, + video: { + width: {min: 480, ideal: 640}, + height: {min: 360, ideal: 480} + } + }, stream => { + this._video.src = window.URL.createObjectURL(stream); + // Hint to the stream that it should load. A standard way to do this + // is add the video tag to the DOM. Since this extension wants to + // hide the video tag and instead render a sample of the stream into + // the webgl rendered Scratch canvas, another hint like this one is + // needed. + this._track = stream.getTracks()[0]; + resolve(this._video); + }, err => { + // There are probably some error types we could handle gracefully here. + reject(err); + }); + }); + } + + _disablePreview () { + if (this._skin) { + this._skin.clear(); + } + this._renderPreviewFrame = null; + } + + _setupPreview () { + const {renderer} = this.runtime; + if (!renderer) return; + + if (this._skinId === -1 && this._skin === null && this._drawable === -1) { + this._skinId = renderer.createPenSkin(); + this._skin = renderer._allSkins[this._skinId]; + this._drawable = renderer.createDrawable(); + renderer.setDrawableOrder( + this._drawable, + Video.ORDER + ); + renderer.updateDrawableProperties(this._drawable, { + skinId: this._skinId + }); + } + + // if we haven't already created and started a preview frame render loop, do so + if (!this._renderPreviewFrame) { + this._renderPreviewFrame = () => { + if (!this._renderPreviewFrame) { + return; + } + + setTimeout(this._renderPreviewFrame, this.runtime.currentStepTime); + + const canvas = this.getFrame({format: Video.FORMAT_CANVAS}); + + if (!canvas) { + return; + } + + const xOffset = Video.DIMENSIONS[0] / -2; + const yOffset = Video.DIMENSIONS[1] / 2; + this._skin.drawStamp(canvas, xOffset, yOffset); + this.runtime.requestRedraw(); + }; + + this._renderPreviewFrame(); + } + + } + + get videoReady () { + if (!this._video) { + return false; + } + if (!this._track) { + return false; + } + const {videoWidth, videoHeight} = this._video; + if (typeof videoWidth !== 'number' || typeof videoHeight !== 'number') { + return false; + } + if (videoWidth === 0 || videoHeight === 0) { + return false; + } + return true; + } + + /** + * get an internal workspace for canvas/context/caches + * this uses some document stuff to create a canvas and what not, probably needs abstraction + * into the renderer layer? + * @private + * @return {object} A workspace for canvas/data storage. Internal format not documented intentionally + */ + _getWorkspace ({dimensions, mirror}) { + let workspace = this._workspace.find(space => ( + space.dimensions.join('-') === dimensions.join('-') && + space.mirror === mirror + )); + if (!workspace) { + workspace = { + dimensions, + mirror, + canvas: document.createElement('canvas'), + lastUpdate: 0, + cacheData: {} + }; + workspace.canvas.width = dimensions[0]; + workspace.canvas.height = dimensions[1]; + workspace.context = workspace.canvas.getContext('2d'); + this._workspace.push(workspace); + } + return workspace; + } + + /** + * Return frame data from the video feed in a specified dimensions, format, and mirroring. + * @return {ArrayBuffer|Canvas|string|null} Frame data in requested format, null when errors. + */ + getFrame ({ + dimensions = Video.DIMENSIONS, + mirror = true, + format = Video.FORMAT_IMAGE_DATA, + cacheTimeout = this._frameCacheTimeout + }) { + if (!this.videoReady) { + return null; + } + const [width, height] = dimensions; + const workspace = this._getWorkspace({dimensions, mirror: Boolean(mirror)}); + const {videoWidth, videoHeight} = this._video; + const {canvas, context, lastUpdate, cacheData} = workspace; + const now = Date.now(); + + // if the canvas hasn't been updated... + if (lastUpdate + cacheTimeout < now) { + + if (mirror) { + context.scale(-1, 1); + context.translate(width * -1, 0); + } + + context.drawImage(this._video, + // source x, y, width, height + 0, 0, videoWidth, videoHeight, + // dest x, y, width, height + 0, 0, width, height + ); + + context.resetTransform(); + workspace.lastUpdate = now; + } + + // each data type has it's own data cache, but the canvas is the same + if (!cacheData[format]) { + cacheData[format] = {lastUpdate: 0}; + } + const formatCache = cacheData[format]; + + if (formatCache.lastUpdate + cacheTimeout < now) { + if (format === Video.FORMAT_IMAGE_DATA) { + formatCache.lastData = context.getImageData(0, 0, width, height); + } else if (format === Video.FORMAT_CANVAS) { + // this will never change + formatCache.lastUpdate = Infinity; + formatCache.lastData = canvas; + } else { + log.error(`video io error - unimplemented format ${format}`); + // cache the null result forever, don't log about it again.. + formatCache.lastUpdate = Infinity; + formatCache.lastData = null; + } + + // rather than set to now, this data is as stale as it's canvas is + formatCache.lastUpdate = Math.max(workspace.lastUpdate, formatCache.lastUpdate); + } + + return formatCache.lastData; + } +} + + +module.exports = Video;