<script setup lang="tsx">
import { ref, onMounted, inject, onActivated } from 'vue'
import { ElMessage } from 'element-plus'
import { uploadFile } from '@/api/materials'
import { ContentWrap } from '@/components/ContentWrap'
import { uploadReportFile } from '@/api/report'
const updateImgList = inject('updateImgList') as (() => void) | undefined
const setIsVideo = inject('setIsVideo') as ((yesno: boolean) => void) | undefined
const props = defineProps({
edittype: {
type: String,
default: ''
},
reportinfo: {
type: Object,
default: () => ({})
},
patientInfo: {
type: Object,
default: () => ({})
}
})
const video = ref<HTMLVideoElement | null>(null)
const canvasVideo = ref<HTMLCanvasElement | null>(null)
const context = ref<CanvasRenderingContext2D | null>(null)
let vstream: MediaStream | null = null
let deviceVideos: string[] = []
let deviceVideosIndex = 0
let chunks: Blob[] = []
let recording = false
let mediaRecorder: MediaRecorder | null = null
let recordedVideoUrl: string | null = null
const getUserMedia = async (constraints: MediaStreamConstraints): Promise<MediaStream> => {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
return await navigator.mediaDevices.getUserMedia(constraints)
} else {
throw new Error('不支持访问用户媒体')
}
}
const closeCamera = () => {
return new Promise((resolve, reject) => {
if (video.value && video.value.srcObject) {
try {
const tracks = (video.value.srcObject as MediaStream).getTracks()
console.log(tracks)
tracks.forEach((track) => {
track.stop()
})
video.value.srcObject = null
} catch (e) {
console.error(e)
}
} else {
resolve('ok')
}
})
}
const openCamera = async () => {
await start()
try {
await closeCamera()
if (deviceVideos.length === 0) {
ElMessage.error('没有找到摄像头设备')
return
}
console.log(deviceVideos)
console.log('摄像头数量:' + deviceVideos.length)
const stream = await getUserMedia({
video: {
width: 1920,
height: 1080,
deviceId: deviceVideos[deviceVideosIndex++ % deviceVideos.length]
},
audio: true
})
vstream = stream
if (video.value) {
video.value.srcObject = stream
video.value.play()
}
setIsVideo?.(true)
} catch (e) {
console.log(e)
ElMessage.error(e + '!')
}
}
const keyDown = (event: KeyboardEvent) => {
console.log('ssss')
if (event.key === 'Enter') {
console.log('xxxx')
}
}
const captureSavePhoto = (event: KeyboardEvent) => {
capturePhoto()
savePhotox()
}
const capturePhoto = () => {
if (context.value && video.value) {
context.value.drawImage(video.value, 0, 0, 1920, 1080, 0, 0, 800, 600)
}
}
const saveAs = (data: string, filename: string) => {
const link = document.createElement('a')
link.href = data
link.download = filename
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
}
const saveAsx = (data: string, filename: string) => {
const link = document.createElement('a')
link.href = data
link.download = filename
document.body.appendChild(link)
link.click()
document.body.removeChild(link)
}
const savePhotox = async () => {
const canvas1 = document.createElement('canvas')
if (video.value) {
canvas1.width = video.value.videoWidth
canvas1.height = video.value.videoHeight
const context1 = canvas1.getContext('2d')
if (context1) {
context1.drawImage(video.value, 0, 0, 1920, 1080)
const fd = new FormData()
fd.append('file', blobtoFile(base64toBlob(canvas1.toDataURL()), 'xxx.png'))
console.log('xxxxxx')
if (props.edittype === 'reg') {
console.log('取材在操作SetImage')
fd.append('RegID', props.patientInfo.RegID)
const res = await uploadFile(fd)
if (res) {
ElMessage.success('保存成功')
updateImgList()
} else {
ElMessage.error('保存失败')
}
}
if (props.edittype === 'report') {
fd.append('RegID', props.reportinfo.RegID)
const res = await uploadReportFile(fd)
if (res) {
ElMessage.success('保存成功')
updateImgList()
} else {
ElMessage.error('保存失败')
}
}
}
}
}
const blobtoFile = (blob, fileName) => {
const file = new File([blob], fileName, { type: blob.type })
return file
}
const base64toBlob = (dataurl) => {
let arr = dataurl.split(',')
let mime = arr[0].match(/:(.*?);/)[1]
let bstr = atob(arr[1])
let n = bstr.length
let u8arr = new Uint8Array(n)
while (n--) {
u8arr[n] = bstr.charCodeAt(n)
}
return new Blob([u8arr], { type: mime })
}
const savePhoto = () => {
const canvas1 = document.createElement('canvas')
if (video.value) {
canvas1.width = video.value.videoWidth
canvas1.height = video.value.videoHeight
const context1 = canvas1.getContext('2d')
if (context1) {
context1.drawImage(video.value, 0, 0, 1920, 1080)
saveAs(canvas1.toDataURL(), 'photo.jpg')
}
}
}
const startRecording = () => {
if (vstream && MediaRecorder.isTypeSupported('video/webm')) {
chunks = []
mediaRecorder = new MediaRecorder(vstream, { mimeType: 'video/webm' })
mediaRecorder.ondataavailable = (e) => {
if (e.data && e.data.size > 0) {
chunks.push(e.data)
}
}
mediaRecorder.onstop = () => {
const recordedBlob = new Blob(chunks, { type: 'video/webm' })
chunks = []
recordedVideoUrl = URL.createObjectURL(recordedBlob)
console.log('录像文件路径1:' + recordedVideoUrl)
}
mediaRecorder.start()
recording = true
} else {
console.error('不支持录制视频')
}
}
const stopRecording = () => {
if (recording && mediaRecorder) {
mediaRecorder.stop()
mediaRecorder = null
recording = false
}
}
const saveRecording = () => {
console.log('录像文件路径2:' + recordedVideoUrl)
if (recordedVideoUrl) {
const a = document.createElement('a')
a.href = recordedVideoUrl
a.download = '录像.webm'
a.click()
}
}
const start = () => {
if (canvasVideo.value) {
context.value = canvasVideo.value.getContext('2d')
}
console.log('onMounted')
console.log(navigator.mediaDevices)
if (navigator.mediaDevices) {
navigator.mediaDevices
.enumerateDevices()
.then((devices) => {
devices.forEach((device) => {
if (device.kind.startsWith('video')) {
deviceVideos.push(device.deviceId)
}
})
})
.catch((error) => {
console.error('Failed to enumerate devices:', error)
})
}
}
onMounted(async () => {
await start()
})
onActivated(async () => {
await start()
})
defineExpose({
openCamera,
closeCamera,
capturePhoto,
savePhoto,
startRecording,
stopRecording,
saveRecording,
captureSavePhoto
})
</script>
<template>
<ContentWrap style="border: 0px; position: relative">
<div class="flex justify-center mb-10px">
<BaseButton size="large" @keydown="keyDown" @click="captureSavePhoto">拍照并保存</BaseButton>
<!--
<BaseButton @click="openCamera">打开摄像头</BaseButton>
<BaseButton @click="closeCamera">关闭摄像头</BaseButton>
<BaseButton @click="capturePhoto">拍照</BaseButton>
<BaseButton @click="savePhoto">拍照另存为</BaseButton>
<BaseButton @click="startRecording">开始录像</BaseButton>
<BaseButton @click="stopRecording">停止录像</BaseButton>
<BaseButton @click="saveRecording">录像另存为</BaseButton>
-->
</div>
<div class="flex justify-center">
<video ref="video" width="80%" height="80%" controls></video>
</div>
<canvas id="canvasVideo" ref="canvasVideo" width="400" height="300"></canvas>
</ContentWrap>
</template>
<style scoped lang="less">
#canvasVideo {
background-color: #1c2518;
position: absolute;
bottom: 0px;
left: 0px;
display: none;
}
</style>
在你提供的 Vue 3 调用摄像头的代码中,主要逻辑和功能实现看起来是合理的,但有几个潜在的问题和改进点需要注意:
键盘事件处理 (keydown
):
BaseButton
组件上使用 @keydown="keyDown"
事件监听器,但通常按钮(<button>
或类似组件)不支持 keydown
事件。这种事件通常用于可获取焦点的元素,如输入框。如果你希望通过键盘触发拍照,可能需要考虑其他方法,比如监听整个文档的 keydown
事件或使用可聚焦的按钮。savePhotox
和 savePhoto
函数的重复:
savePhotox
和 savePhoto
函数在功能上有重叠,但处理方式略有不同(例如,savePhotox
使用 FormData
上传,而 savePhoto
直接使用 saveAs
)。你可能想根据实际需求合并或区分这两个函数的功能。canvasVideo
的使用:
canvas
元素,并为其设置了 ref="canvasVideo"
,但在实际代码中,canvasVideo
主要用于获取绘图上下文(context.value = canvasVideo.value.getContext('2d')
),并未在拍照或录像功能中直接使用。如果 canvasVideo
不是必需的,可以考虑移除以简化代码。异常处理:
getUserMedia
和 closeCamera
等函数中使用了 try...catch
来处理可能的错误,这是一个好的做法。然而,确保所有可能引发错误的异步操作都被适当捕获和处理是很重要的。代码组织和可维护性:
样式和显示:
canvasVideo
设置为 display: none;
,这意味着它不会在页面上显示。如果你打算在某个时候使用它(例如进行图像处理预览),请确保在需要时更改其显示属性。设备权限和用户提示:
性能优化:
这些是一些基本的审查点和建议,希望能帮助你改进和优化你的 Vue 3 摄像头调用代码。
问题内容: 我碰到了Java行,并对它的输出感到困惑。您能否解释一下此代码背后的逻辑 输出: 问题答案: 好吧,它等效于: 真正地将原始内容显式转换为只是使其调用而不是。 我相信to 转换 实际上首先 要进行隐式加宽转换-就像这样: 这些帮助有用?
下面是监控网卡流量的 shell 脚本,运行后提示: expr:语法错误 看来看去不知道哪里出错了。
idl文件,内容比较多,我删减了一些东西,不知道大佬可以从这里面看出哪个是 CLSD 和IID不。 有偿!!!有偿!!!有偿!!!很急
Dlib有一个非常方便、快速和高效的目标检测程序,我想做一个类似于这个例子的酷脸跟踪例子。 OpenCV,这是广泛支持,有视频捕捉模块,这是相当快的(五分之一秒的快照相比,1秒或更多的调用一些程序,唤醒网络摄像头和获取图片)。我将此添加到Dlib中的人脸检测器Python示例中。 如果您直接显示和处理OpenCV VideoCapture输出,它看起来很奇怪,因为OpenCV显然存储BGR而不是R
我正在尝试使用bash编写代码来模拟下一个最短作业或下一个流程,但我在理解它背后的逻辑时遇到了一些困难。我从一个有进程名称、到达时间和突发时间的文件中读取。所以让我们假设 一个|, ρ5 B | 2 | 3 C|3|2 D|4|4 E | 6 | 3 这是我目前为止的思考过程。我使用bash脚本,所以我把数据分成3个独立的数组。过程、到达和爆发。我创建了第四个名为totaltime的数组。事实上,
} 我面临的问题是Hi从来没有打印过。永远!!即使我给了足够的时间,回电也不打.