Made by the Pipe Video Recording Platform
These examples demonstrate the powerful getUserMedia
method, which gives web applications secure access to device cameras and microphones. To successfully run the getUserMedia
, your web page needs to be in a secure context and you need to have user permissions at the browser and operating system levels. When first accessed, the browser will prompt for permissions, and you can manage these settings through browser preferences or OS privacy controls.
Originally part of WebRTC, getUserMedia
has become essential for media capture tasks such as video recording, audio recording, taking pictures, and enumerating available devices. These examples showcase how getUserMedia
enables a range of key scenarios, making it a vital tool for modern web applications.
This example demonstrates the basic use of the getUserMedia
method to access both the user's camera and microphone simultaneously. The code requests permission for both video and audio streams using { video: true, audio: true }
, then creates a video element that displays the live camera feed with audio in real-time.
<video id="video" autoplay playsinline> </video>
<script>
navigator.mediaDevices.getUserMedia({ video: true, audio: true })
.then(stream => {
const video = document.createElement('video');
video.autoplay = true;
video.srcObject = stream;
document.body.appendChild(video);
})
.catch(error => {
console.error('Error accessing camera and microphone:', error);
});
</script>
Key points:
{ video: true, audio: true }
to request both camera and microphone access;.catch()
for permission denials or device issues.Demonstrates how to properly stop media streams by calling getTracks().forEach(track => track.stop())
on the stream object. This is essential for releasing camera and microphone resources when they're no longer needed.
<script>
stream.getTracks().forEach(track => track.stop());
</script>
Key points:
Shows how to request access to only the microphone using { audio: true }
without requesting video.
<script>
navigator.mediaDevices.getUserMedia({ audio: true })
.then(stream => {
console.log('Microphone access granted');
})
.catch(error => {
console.error('Error accessing microphone:', error);
});
</script>
Key points:
Demonstrates requesting access to only the camera using { video: true }
without audio.
<script>
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
const video = document.getElementById('video');
video.srcObject = stream;
})
.catch(error => {
console.error('Error accessing camera:', error);
});
</script>
Key points:
Uses navigator.mediaDevices.enumerateDevices()
to list all available media input devices (cameras and microphones) on the user's system.
<script>
navigator.mediaDevices.enumerateDevices().then(devices => {
devices.forEach(device => {
console.log(device.kind + ": " + device.label + " id = " + device.deviceId);
});
});
</script>
Key points:
Shows how to record video and audio using the MediaRecorder API
.
<video id="preview" autoplay playsinline width="320" height="240"></video>
<button id="record">Start Recording</button>
<button id="stop" disabled>Stop & Download</button>
<script>
const videoEl = document.getElementById('preview'),
recordBtn = document.getElementById('record'),
stopBtn = document.getElementById('stop');
let recorder, recordedChunks = [];
// 1. Get both video and audio
navigator.mediaDevices.getUserMedia({ video: true, audio: true })
.then(stream => {
videoEl.srcObject = stream;
// 2. Setup MediaRecorder
recorder = new MediaRecorder(stream);
recorder.ondataavailable = e => {
if (e.data && e.data.size > 0) {
recordedChunks.push(e.data);
}
};
recorder.onstop = () => {
// Create a Blob from recorded chunks
const blob = new Blob(recordedChunks, { type: 'video/webm' });
const url = URL.createObjectURL(blob);
// Create download link
const a = document.createElement('a');
a.style.display = 'block';
a.href = url;
a.download = 'capture.webm';
a.textContent = 'Download recording';
document.body.appendChild(a);
};
})
.catch(console.error);
// 3. Wire up buttons
recordBtn.addEventListener('click', () => {
recordedChunks = [];
recorder.start();
recordBtn.disabled = true;
stopBtn.disabled = false;
});
stopBtn.addEventListener('click', () => {
recorder.stop();
recordBtn.disabled = false;
stopBtn.disabled = true;
});
</script>
Key points:
MediaRecorder
to capture chunks;.webm
and offers a download link.Demonstrates accessing the back camera with facingMode: 'environment'
and controlling the device's flashlight/torch feature.
<div class="controls">
<button id="startBtn">Start Camera</button>
<button id="stopBtn" disabled>Stop Camera</button>
<button id="torchBtn" class="torch-btn" disabled>π¦ Toggle Torch</button>
</div>
<video id="video" autoplay playsinline muted></video>
<div id="status" class="status info">
Ready to start camera. Make sure you're using Chrome on Android for torch functionality.
</div>
<script>
let stream = null;
let track = null;
let torchOn = false;
const video = document.getElementById('video');
const startBtn = document.getElementById('startBtn');
const torchBtn = document.getElementById('torchBtn');
const stopBtn = document.getElementById('stopBtn');
const status = document.getElementById('status');
function updateStatus(message, type = '') {
status.textContent = message;
status.className = type;
}
async function startCamera() {
try {
updateStatus('Starting camera...');
// Request camera with back camera
stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: 'environment' }
});
track = stream.getVideoTracks()[0];
video.srcObject = stream;
video.style.display = 'block';
// Check if torch is supported
const capabilities = track.getCapabilities();
const hasTorch = capabilities.torch === true;
// Update buttons
startBtn.disabled = true;
stopBtn.disabled = false;
torchBtn.disabled = !hasTorch;
if (hasTorch) {
updateStatus('Camera started! Torch available π¦', 'success');
} else {
updateStatus('Camera started, but torch not supported on this device');
}
} catch (error) {
updateStatus('Failed to start camera: ' + error.message, 'error');
}
}
async function toggleTorch() {
if (!track) return;
try {
torchOn = !torchOn;
// Try to apply torch constraint
await track.applyConstraints({
advanced: [{ torch: torchOn }]
});
// Update button
torchBtn.classList.toggle('active', torchOn);
torchBtn.textContent = torchOn ? 'π¦ Torch ON' : 'π¦ Torch OFF';
updateStatus(`Torch turned ${torchOn ? 'ON' : 'OFF'}`, 'success');
} catch (error) {
updateStatus('Torch control failed: ' + error.message, 'error');
torchOn = !torchOn; // Reset state
}
}
function stopCamera() {
if (stream) {
stream.getTracks().forEach(track => track.stop());
stream = null;
track = null;
}
video.style.display = 'none';
startBtn.disabled = false;
stopBtn.disabled = true;
torchBtn.disabled = true;
torchBtn.classList.remove('active');
torchBtn.textContent = 'π¦ Toggle Torch';
torchOn = false;
updateStatus('Camera stopped');
}
// Event listeners
startBtn.addEventListener('click', startCamera);
torchBtn.addEventListener('click', toggleTorch);
stopBtn.addEventListener('click', stopCamera);
// Check browser support
if (!navigator.mediaDevices?.getUserMedia) {
updateStatus('Camera not supported in this browser', 'error');
startBtn.disabled = true;
}
</script>
Key points:
track.getCapabilities()
to check if torch/flashlight is supportedtrack.applyConstraints({ advanced: [{ torch: torchOn }] })
.Shows how to request a specific aspect ratio (16:9) and resolution (1280Γ720) from the camera using constraints like aspectRatio: { ideal: 16/9 }
and displays the actual achieved dimensions.
<script>
const videoElem = document.getElementById('preview');
const infoElem = document.getElementById('info');
async function startCamera() {
try {
// Desired aspect ratio: 16:9
const desiredRatio = 16 / 9;
const stream = await navigator.mediaDevices.getUserMedia({
video: {
aspectRatio: { ideal: desiredRatio },
width: { ideal: 1280 },
height: { ideal: 720 }
}
});
videoElem.srcObject = stream;
// Inspect actual settings
const track = stream.getVideoTracks()[0];
const settings = track.getSettings();
infoElem.textContent =
`Got ${settings.width}Γ${settings.height} ` +
`(aspectRatio: ${settings.aspectRatio.toFixed(2)})`;
} catch (err) {
console.error('Error opening camera:', err);
infoElem.textContent = 'Unable to access camera with desired aspect ratio.';
}
}
startCamera();
</script>
Key points:
aspectRatio: { ideal: 16/9 }
;track.getSettings()
.Demonstrates capturing video in portrait orientation (720Γ1280) using the front-facing camera by specifying facingMode: "user"
and swapped width/height dimensions.
<script>
navigator.mediaDevices.getUserMedia({
video: {
width: { ideal: 720 },
height: { ideal: 1280 },
facingMode: "user"
},
audio: false
})
.then(stream => {
const video = document.getElementById('video');
video.srcObject = stream;
})
.catch(error => {
console.error('Error accessing camera:', error);
});
</script>
Key points:
audio: false
for video-only capture.A simple example showing how to display a live video preview from the camera using promise-based getUserMedia
with basic error handling.
<video id="preview" autoplay playsinline width="640" height="480"></video>
<script>
const videoEl = document.getElementById('preview');
navigator.mediaDevices.getUserMedia({ video: true })
.then(stream => {
// Attach the stream to the video element
videoEl.srcObject = stream;
})
.catch(err => {
console.error('Error accessing camera:', err);
});
</script>
What it does:
Shows modern async/await syntax for accessing the microphone, with start/stop buttons to control audio capture
<button id="start">Start Mic</button>
<button id="stop" disabled>Stop Mic</button>
<audio id="player" controls></audio>
<script>
const startBtn = document.getElementById('start');
const stopBtn = document.getElementById('stop');
const player = document.getElementById('player');
let mediaStream;
startBtn.addEventListener('click', async () => {
try {
mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
player.srcObject = mediaStream;
startBtn.disabled = true;
stopBtn.disabled = false;
} catch (err) {
console.error('Microphone access error:', err);
}
});
stopBtn.addEventListener('click', () => {
// Stop all audio tracks
mediaStream.getTracks().forEach(track => track.stop());
startBtn.disabled = false;
stopBtn.disabled = true;
});
</script>
Key points:
async/await
for cleaner control flow;Combines live video preview with snapshot functionality using HTML5 canvas.
<video id="cam" autoplay playsinline width="320" height="240"></video>
<button id="snap">Take Snapshot</button>
<canvas id="photo" width="320" height="240"></canvas>
<script>
const video = document.getElementById('cam');
const canvas = document.getElementById('photo');
const snapBtn = document.getElementById('snap');
const ctx = canvas.getContext('2d');
navigator.mediaDevices.getUserMedia({ video: { facingMode: "user" } })
.then(stream => {
video.srcObject = stream;
})
.catch(console.error);
snapBtn.addEventListener('click', () => {
// Draw current video frame onto canvas
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
});
</script>
Highlights:
facingMode: "user"
requests the front-facing camera on devices that support it;