使用HTML5/Canvas/JavaScript进行浏览器截图

谷歌的“报告错误”或“反馈工具”允许您选择浏览器窗口的一个区域来创建一个屏幕截图,该截图与您对bug的反馈一起提交。

谷歌反馈工具截图屏幕截图由Jason Small发布在重复问题中

他们是如何做到这一点的?Google的JavaScript反馈API是从这里加载的,他们对反馈模块的概述将演示屏幕截图功能。

603387 次浏览

JavaScript可以读取DOM并使用canvas呈现相当准确的表示。我一直在编写一个将超文本标记语言转换为画布图像的脚本。今天决定将其实现为像您描述的那样发送反馈。

该脚本允许您创建反馈表单,其中包括在客户端浏览器上创建的屏幕截图以及表单。屏幕截图基于DOM,因此可能无法100%准确到真实表示,因为它不会制作实际的屏幕截图,而是根据页面上可用的信息构建屏幕截图。

不需要来自服务器的任何渲染,因为整个图像是在客户端的浏览器上创建的。HTML2Canvas脚本本身仍然处于非常实验的状态,因为它没有解析我想要的那么多CSS3属性,即使代理可用,它也不支持加载CORS图像。

浏览器兼容性仍然非常有限(不是因为无法支持更多,只是没有时间让它更多地支持跨浏览器)。

有关更多信息,请查看此处的示例:

http://hertzen.com/experiments/jsfeedback/

编辑html2Canvas脚本现在可以单独使用这里和一些这里的例子

编辑2谷歌使用非常相似的方法的另一个确认(事实上,基于留档,唯一的主要区别是他们的异步遍历/绘图方法)可以在Google Feedback团队的Elliott S的演示中找到:http://www.elliottsprehn.com/preso/fluentconf/

您的Web应用程序现在可以使用getUserMedia()对客户端的整个桌面进行“本机”屏幕截图:

看看这个例子:

https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/

客户端必须使用chrome(目前),并且需要在chrome://标志下启用屏幕捕获支持。

PoC

作为尼克拉斯提到,您可以使用html2Canvas库在浏览器中使用JS截屏。我将在这一点上通过提供使用该库截屏的示例(“概念证明”)来扩展他的回答:

function report() {let region = document.querySelector("body"); // whole screenhtml2canvas(region, {onrendered: function(canvas) {let pngUrl = canvas.toDataURL(); // png in dataURL formatlet img = document.querySelector(".screen");img.src = pngUrl;
// here you can allow user to set bug-region// and send it with 'pngUrl' to server},});}
.container {margin-top: 10px;border: solid 1px black;}
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script><div>Screenshot tester</div><button onclick="report()">Take screenshot</button>
<div class="container"><img width="75%" class="screen"></div>

onrendered中的report()函数中,在获取图像作为数据URI后,您可以将其显示给用户,并允许他通过鼠标绘制“bug区域”,然后将屏幕截图和区域坐标发送到服务器。

这个例子async/await版本:与尼斯makeScreenshot()功能.

更新

简单的示例,允许您截取屏幕截图,选择区域,描述bug并发送POST请求(这里jsfiddle)(主要功能是report())。

async function report() {let screenshot = await makeScreenshot(); // png dataUrllet img = q(".screen");img.src = screenshot;    
let c = q(".bug-container");c.classList.remove('hide')        
let box = await getBox();c.classList.add('hide');
send(screenshot,box); // sed post request  with bug image, region and descriptionalert('To see POST requset with image go to: chrome console > network tab');}
// ----- Helper functions
let q = s => document.querySelector(s); // query selector helperwindow.report = report; // bind report be visible in fiddle html
async function  makeScreenshot(selector="body"){return new Promise((resolve, reject) => {let node = document.querySelector(selector);    
html2canvas(node, { onrendered: (canvas) => {let pngUrl = canvas.toDataURL();resolve(pngUrl);}});});}
async function getBox(box) {return new Promise((resolve, reject) => {let b = q(".bug");let r = q(".region");let scr = q(".screen");let send = q(".send");let start=0;let sx,sy,ex,ey=-1;r.style.width=0;r.style.height=0;     
let drawBox= () => {r.style.left   = (ex > 0 ? sx : sx+ex ) +'px';r.style.top    = (ey > 0 ? sy : sy+ey) +'px';r.style.width  = Math.abs(ex) +'px';r.style.height = Math.abs(ey) +'px';}     
     
     
//console.log({b,r, scr});b.addEventListener("click", e=>{if(start==0) {sx=e.pageX;sy=e.pageY;ex=0;ey=0;drawBox();}start=(start+1)%3;});     
b.addEventListener("mousemove", e=>{//console.log(e)if(start==1) {ex=e.pageX-sx;ey=e.pageY-sydrawBox();}});     
send.addEventListener("click", e=>{start=0;let a=100/75 //zoom out img 75%resolve({x:Math.floor(((ex > 0 ? sx : sx+ex )-scr.offsetLeft)*a),y:Math.floor(((ey > 0 ? sy : sy+ey )-b.offsetTop)*a),width:Math.floor(Math.abs(ex)*a),height:Math.floor(Math.abs(ex)*a),desc: q('.bug-desc').value});          
});});}
function send(image,box) {
let formData = new FormData();let req = new XMLHttpRequest();    
formData.append("box", JSON.stringify(box));formData.append("screenshot", image);    
req.open("POST", '/upload/screenshot');req.send(formData);}
.bug-container { background: rgb(255,0,0,0.1); margin-top:20px; text-align: center; }.send { border-radius:5px; padding:10px; background: green; cursor: pointer; }.region { position: absolute; background: rgba(255,0,0,0.4); }.example { height: 100px; background: yellow; }.bug { margin-top: 10px; cursor: crosshair; }.hide { display: none; }.screen { pointer-events: none }
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script><body><div>Screenshot tester</div><button onclick="report()">Report bug</button>
<div class="example">Lorem ipsum</div>
<div class="bug-container hide"><div>Select bug region: click once - move mouse - click again</div><div class="bug"><img width="75%" class="screen" ><div class="region"></div></div><div><textarea class="bug-desc">Describe bug here...</textarea></div><div class="send">SEND BUG</div></div>
</body>

下面是一个例子:获取展示媒体

document.body.innerHTML = '<video style="width: 100%; height: 100%; border: 1px black solid;"/>';
navigator.mediaDevices.getDisplayMedia().then( mediaStream => {const video = document.querySelector('video');video.srcObject = mediaStream;video.onloadedmetadata = e => {video.play();video.pause();};}).catch( err => console.log(`${err.name}: ${err.message}`));

值得一提的是屏幕捕获API文档。

使用获取展示媒体 API以Canvas或Jpeg Blob/ArrayBuffer的形式获取屏幕截图:

FIX1:仅将getUserMedia与chromeMediaSource一起使用Electron.js
FIX2:抛出错误而不是返回空对象
FIX 3:修复演示以防止错误:getDisplayMedia must be called from a user gesture handler

// docs: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia// see: https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/#20893521368186473// see: https://github.com/muaz-khan/WebRTC-Experiment/blob/master/Pluginfree-Screen-Sharing/conference.js
function getDisplayMedia(options) {if (navigator.mediaDevices && navigator.mediaDevices.getDisplayMedia) {return navigator.mediaDevices.getDisplayMedia(options)}if (navigator.getDisplayMedia) {return navigator.getDisplayMedia(options)}if (navigator.webkitGetDisplayMedia) {return navigator.webkitGetDisplayMedia(options)}if (navigator.mozGetDisplayMedia) {return navigator.mozGetDisplayMedia(options)}throw new Error('getDisplayMedia is not defined')}
function getUserMedia(options) {if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {return navigator.mediaDevices.getUserMedia(options)}if (navigator.getUserMedia) {return navigator.getUserMedia(options)}if (navigator.webkitGetUserMedia) {return navigator.webkitGetUserMedia(options)}if (navigator.mozGetUserMedia) {return navigator.mozGetUserMedia(options)}throw new Error('getUserMedia is not defined')}
async function takeScreenshotStream() {// see: https://developer.mozilla.org/en-US/docs/Web/API/Window/screenconst width = screen.width * (window.devicePixelRatio || 1)const height = screen.height * (window.devicePixelRatio || 1)
const errors = []let streamtry {stream = await getDisplayMedia({audio: false,// see: https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamConstraints/videovideo: {width,height,frameRate: 1,},})} catch (ex) {errors.push(ex)}
// for electron jsif (navigator.userAgent.indexOf('Electron') >= 0) {try {stream = await getUserMedia({audio: false,video: {mandatory: {chromeMediaSource: 'desktop',// chromeMediaSourceId: source.id,minWidth         : width,maxWidth         : width,minHeight        : height,maxHeight        : height,},},})} catch (ex) {errors.push(ex)}}
if (errors.length) {console.debug(...errors)if (!stream) {throw errors[errors.length - 1]}}
return stream}
async function takeScreenshotCanvas() {const stream = await takeScreenshotStream()
// from: https://stackoverflow.com/a/57665309/5221762const video = document.createElement('video')const result = await new Promise((resolve, reject) => {video.onloadedmetadata = () => {video.play()video.pause()
// from: https://github.com/kasprownik/electron-screencapture/blob/master/index.jsconst canvas = document.createElement('canvas')canvas.width = video.videoWidthcanvas.height = video.videoHeightconst context = canvas.getContext('2d')// see: https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElementcontext.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)resolve(canvas)}video.srcObject = stream})
stream.getTracks().forEach(function (track) {track.stop()})    
if (result == null) {throw new Error('Cannot take canvas screenshot')}
return result}
// from: https://stackoverflow.com/a/46182044/5221762function getJpegBlob(canvas) {return new Promise((resolve, reject) => {// docs: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlobcanvas.toBlob(blob => resolve(blob), 'image/jpeg', 0.95)})}
async function getJpegBytes(canvas) {const blob = await getJpegBlob(canvas)return new Promise((resolve, reject) => {const fileReader = new FileReader()
fileReader.addEventListener('loadend', function () {if (this.error) {reject(this.error)return}resolve(this.result)})
fileReader.readAsArrayBuffer(blob)})}
async function takeScreenshotJpegBlob() {const canvas = await takeScreenshotCanvas()return getJpegBlob(canvas)}
async function takeScreenshotJpegBytes() {const canvas = await takeScreenshotCanvas()return getJpegBytes(canvas)}
function blobToCanvas(blob, maxWidth, maxHeight) {return new Promise((resolve, reject) => {const img = new Image()img.onload = function () {const canvas = document.createElement('canvas')const scale = Math.min(1,maxWidth ? maxWidth / img.width : 1,maxHeight ? maxHeight / img.height : 1,)canvas.width = img.width * scalecanvas.height = img.height * scaleconst ctx = canvas.getContext('2d')ctx.drawImage(img, 0, 0, img.width, img.height, 0, 0, canvas.width, canvas.height)resolve(canvas)}img.onerror = () => {reject(new Error('Error load blob to Image'))}img.src = URL.createObjectURL(blob)})}

演示:

document.body.onclick = async () => {// take the screenshotvar screenshotJpegBlob = await takeScreenshotJpegBlob()
// show preview with max size 300 x 300 pxvar previewCanvas = await blobToCanvas(screenshotJpegBlob, 300, 300)previewCanvas.style.position = 'fixed'document.body.appendChild(previewCanvas)
// send it to the servervar formdata = new FormData()formdata.append("screenshot", screenshotJpegBlob)await fetch('https://your-web-site.com/', {method: 'POST',body: formdata,'Content-Type' : "multipart/form-data",})}
// and click on the page

这是一个完整的截图示例,可在2021年与chrome一起使用。最终结果是一个准备传输的blob。流程是:请求媒体>抓取框架>绘制到画布>传输到blob。如果你想这样做更节省内存,请探索画布或可能图像位图渲染上下文

https://jsfiddle.net/v24hyd3q/1/

// Request medianavigator.mediaDevices.getDisplayMedia().then(stream =>{// Grab frame from streamlet track = stream.getVideoTracks()[0];let capture = new ImageCapture(track);capture.grabFrame().then(bitmap =>{// Stop sharingtrack.stop();      
// Draw the bitmap to canvascanvas.width = bitmap.width;canvas.height = bitmap.height;canvas.getContext('2d').drawImage(bitmap, 0, 0);      
// Grab blob from canvascanvas.toBlob(blob => {// Do things with blob hereconsole.log('output blob:', blob);});});}).catch(e => console.log(e));

你可以试试我的新JS库:screenshot.js

它能够采取真正的截图。

您加载脚本:

<script src="https://raw.githubusercontent.com/amiad/screenshot.js/master/screenshot.js"></script>

并截图:

new Screenshot({success: img => {// callback functionmyimage = img;}});

您可以在项目页面中阅读更多选项。