# 语音轮播图
# 代码展示
import * as speechCommands from '@tensorflow-models/speech-commands';
const MODEL_PATH = 'http://127.0.0.1:8080';
let transferRecognizer; // 迁移学习器
let curIndex = 0;
window.onload = async () => {
const recognizer = speechCommands.create(
'BROWSER_FFT',
null,
MODEL_PATH + '/speech/model.json',
MODEL_PATH + '/speech/metadata.json'
);
await recognizer.ensureModelLoaded(); // 确保加载完成
transferRecognizer = recognizer.createTransfer('轮播图'); // 创建迁移学习器
const res = await fetch(MODEL_PATH + '/slider/data.bin'); // fetch方法是一种请求http的方法
const arrayBuffer = await res.arrayBuffer(); // 转换成arrayBuffer
transferRecognizer.loadExamples(arrayBuffer); // 加载数据
console.log(
'transferRecognizer.countExamples',
transferRecognizer.countExamples()
); // 证明拿到了数据
await transferRecognizer.train({ epochs: 30 });
console.log('done');
};
window.toggle = async (checked) => {
// 监听开关
if (checked) {
await transferRecognizer.listen(
(result) => {
const { scores } = result;
const labels = transferRecognizer.wordLabels();
const index = scores.indexOf(Math.max(...scores));
window.play(labels[index]);
},
{
overlapFactor: 0,
probabilityThreshold: 0.5,
}
);
} else {
transferRecognizer.stopListening();
}
};
window.play = (label) => {
console.log('label', label);
const div = document.querySelector('.slider>div');
if (label === '上一张') {
if (curIndex === 0) {
return;
}
curIndex -= 1;
} else {
if (curIndex === document.querySelectorAll('img').length - 1) {
return;
}
curIndex += 1;
}
div.style.transition = 'transform 1s';
div.style.transform = `translateX(-${100 * curIndex}%)`;
};
<script src="index.js"></script>
监听开关:<input type="checkbox" onchange="toggle(this.checked)" />
<style>
.slider {
width: 600px;
overflow: hidden;
margin: 10px auto;
}
.slider > div {
display: flex;
align-items: center;
}
</style>
<div class="slider">
<div>
<img
src="https://cdn.pixabay.com/photo/2019/10/29/15/57/vancouver-4587302__480.jpg"
alt=""
width="600"
/>
<img
src="https://cdn.pixabay.com/photo/2019/10/31/07/14/coffee-4591159__480.jpg"
alt=""
width="600"
/>
<img
src="https://cdn.pixabay.com/photo/2019/11/01/11/08/landscape-4593909__480.jpg"
alt=""
width="600"
/>
<img
src="https://cdn.pixabay.com/photo/2019/11/02/21/45/maple-leaf-4597501__480.jpg"
alt=""
width="600"
/>
<img
src="https://cdn.pixabay.com/photo/2019/11/02/03/13/in-xinjiang-4595560__480.jpg"
alt=""
width="600"
/>
<img
src="https://cdn.pixabay.com/photo/2019/11/01/22/45/reschensee-4595385__480.jpg"
alt=""
width="600"
/>
</div>
</div>
# 重点笔记
- transferRecognizer.loadExamples(arrayBuffer) 加载外部数据
const recognizer = speechCommands.create(
'BROWSER_FFT',
null,
MODEL_PATH + '/speech/model.json',
MODEL_PATH + '/speech/metadata.json'
);
await recognizer.ensureModelLoaded(); // 确保加载完成
transferRecognizer = recognizer.createTransfer('轮播图'); // 创建迁移学习器
const res = await fetch(MODEL_PATH + '/slider/data.bin'); // fetch方法是一种请求http的方法
const arrayBuffer = await res.arrayBuffer(); // 转换成arrayBuffer
transferRecognizer.loadExamples(arrayBuffer); // 加载数据
console.log(
'transferRecognizer.countExamples',
transferRecognizer.countExamples()
); // 证明拿到了数据
await transferRecognizer.train({ epochs: 30 });
console.log('done');