Let me show you the effect picture first. Personally, I think the effect is okay. The recognition is not very accurate because the app has too little time to learn the pictures (the computer is too slow). (The author is using Windows 10) Installation and operation environment:
The project directory is as follows train folder index.js (entry file) const tf = require('@tensorflow/tfjs-node') const getData = require('./data') const TRAIN_DIR = '../garbage classification/train' const OUTPUT_DIR = '../outputDir' const MOBILENET_URL = 'http://ai-sample.oss-cn-hangzhou.aliyuncs.com/pipcook/models/mobilenet/web_model/model.json' const main = async () => { // Load data const { ds, classes} = await getData(TRAIN_DIR, OUTPUT_DIR) // Define the model const mobilenet = await tf.loadLayersModel(MOBILENET_URL) mobilenet.summary() // console.log(mobilenet.layers.map((l, i) => [l.name, i])) const model = tf.sequential() for (let i = 0; i <= 86; i += 1) { const layer = mobilenet.layers[i] layer.trainable = false model.add(layer) } model.add(tf.layers.flatten()) model.add(tf.layers.dense({ units: 10, activation: 'relu' })) model.add(tf.layers.dense({ units: classes.length, activation: 'softmax' })) // Training model model.compile({ loss: 'sparseCategoricalCrossentropy', optimizer: tf.train.adam(), metrics: ['acc'] }) await model.fitDataset(ds, { epochs: 20 }) await model.save(`file://${process.cwd()}/${OUTPUT_DIR}`) } main() data.js (processing data) const fs = require('fs') const tf = require('@tensorflow/tfjs-node') const img2x = (imgPath) => { const buffer = fs.readFileSync(imgPath) return tf.tidy(() => { const imgTs = tf.node.decodeImage(new Uint8Array(buffer)) const imgTsResized = tf.image.resizeBilinear(imgTs, [224, 224]) return imgTsResized.toFloat().sub(255/2).div(255/2).reshape([1, 224, 224, 3]) }) } const getData = async (trainDir, outputDir) => { const classes = fs.readdirSync(trainDir) fs.writeFileSync(`${outputDir}/classes.json`, JSON.stringify(classes)) const data = [] classes.forEach((dir, dirIndex) => { fs.readdirSync(`${trainDir}/${dir}`) .filter(n => n.match(/jpg$/)) .slice(0, 10) .forEach(filename => { console.log('read', dir, filename) const imgPath = `${trainDir}/${dir}/${filename}` data.push({ imgPath, dirIndex }) }) }) tf.util.shuffle(data) const ds = tf.data.generator(function* () { const count = data.length const batchSize = 32 for (let start = 0; start < count; start += batchSize) { const end = Math.min(start + batchSize, count) yield tf.tidy(() => { const inputs = [] const labels = [] for (let j = start; j < end; j += 1) { const { imgPath, dirIndex } = data[j] const x = img2x(imgPath) inputs.push(x) labels.push(dirIndex) } const xs = tf.concat(inputs) const ys = tf.tensor(labels) return { xs, ys } }) } }) return { ds, classes } } module.exports = getData Install some plugins needed to run the project app folder import React, { PureComponent } from 'react' import { Button, Progress, Spin, Empty } from 'antd' import 'antd/dist/antd.css' import * as tf from '@tensorflow/tfjs' import { file2img, img2x } from './utils' import intro from './intro' const DATA_URL = 'http://127.0.0.1:8080/' class App extends PureComponent { state = {} async componentDidMount() { this.model = await tf.loadLayersModel(DATA_URL + '/model.json') // this.model.summary() this.CLASSES = await fetch(DATA_URL + '/classes.json').then(res => res.json()) } predict = async (file) => { const img = await file2img(file) this.setState({ imgSrc: img.src, isLoading: true }) setTimeout(() => { const pred = tf.tidy(() => { const x = img2x(img) return this.model.predict(x) }) const results = pred.arraySync()[0] .map((score, i) => ({score, label: this.CLASSES[i]})) .sort((a, b) => b.score - a.score) this.setState({ results, isLoading: false }) }, 0) } renderResult = (item) => { const finalScore = Math.round(item.score * 100) return ( <tr key={item.label}> <td style={{ width: 80, padding: '5px 0' }}>{item.label}</td> <td> <Progress percent={finalScore} status={finalScore === 100 ? 'success' : 'normal'} /> </td> </tr> ) } render() { const { imgSrc, results, isLoading } = this.state const finalItem = results && {...results[0], ...intro[results[0].label]} return ( <div style={{padding: 20}}> <span style={{ color: '#cccccc', textAlign: 'center', fontSize: 12, display: 'block' }} >Recognition may not be accurate</span> <Button type="primary" size="large" style={{width: '100%'}} onClick={() => this.upload.click()} > Select Image Recognition</Button> <input type="file" onChange={e => this.predict(e.target.files[0])} ref={el => {this.upload = el}} style={{ display: 'none' }} /> { !results && !imgSrc && <Empty style={{ marginTop: 40 }} /> } {imgSrc && <div style={{ marginTop: 20, textAlign: 'center' }}> <img src={imgSrc} style={{ maxWidth: '100%' }} /> </div>} {finalItem && <div style={{marginTop: 20}}>Recognition result: </div>} {finalItem && <div style={{display: 'flex', alignItems: 'flex-start', marginTop: 20}}> <img src={finalItem.icon} width={120} /> <div> <h2 style={{color: finalItem.color}}> {finalItem.label} </h2> <div style={{color: finalItem.color}}> {finalItem.intro} </div> </div> </div>} { isLoading && <Spin size="large" style={{display: 'flex', justifyContent: 'center', alignItems: 'center', marginTop: 40 }} /> } {results && <div style={{ marginTop: 20 }}> <table style={{width: '100%'}}> <tbody> <tr> <td>Category</td> <td>Matching degree</td> </tr> {results.map(this.renderResult)} </tbody> </table> </div>} </div> ) } } export default App index.html <!DOCTYPE html> <html> <head> <title>Garbage Classification</title> <meta name="viewport" content="width=device-width, inital-scale=1"> </head> <body> <div id="app"></div> <script src="./index.js"></script> </body> </html> index.js import React from 'react' import ReactDOM from 'react-dom' import App from './App' ReactDOM.render(<App />, document.querySelector('#app')) intro.js export default { 'Recyclables': { icon: 'https://lajifenleiapp.com/static/svg/1_3F6BA8.svg', color: '#3f6ba8', intro: 'Refers to items that are generated in daily life or in activities that provide services for daily life, have lost all or part of their original use value, and can be recycled and processed into production raw materials or can be sorted and reused, including waste paper, plastics, glass, metals, fabrics, etc. ' }, 'Hazardous waste': { icon: 'https://lajifenleiapp.com/static/svg/2v_B43953.svg', color: '#b43953', intro: 'Refers to substances in domestic waste that cause direct or potential harm to human health or the natural environment, including waste rechargeable batteries, waste button batteries, waste light bulbs, discarded medicines, waste pesticides (containers), waste paints (containers), waste daily chemicals, waste mercury products, waste electrical appliances and electronic products, etc. ' }, 'Kitchen waste': { icon: 'https://lajifenleiapp.com/static/svg/3v_48925B.svg', color: '#48925b', intro: 'Refers to the organic and perishable waste generated in residents' daily lives, including vegetable leaves, leftovers, fruit peels, egg shells, tea dregs, bones, etc. ' }, 'Other garbage': { icon: 'https://lajifenleiapp.com/static/svg/4_89918B.svg', color: '#89918b', intro: 'Refers to other household waste that is mixed, contaminated and difficult to classify, except for recyclables, hazardous waste and kitchen waste. ' } } utils.js import * as tf from '@tensorflow/tfjs' export const file2img = async (f) => { return new Promise(reslove => { const reader = new FileReader() reader.readAsDataURL(f) reader.onload = (e) => { const img = document.createElement('img') img.src = e.target.result img.width = 224 img.height = 224 img.onload = () => { reslove(img) } } }) } export function img2x(imgEl) { return tf.tidy(() => { return tf.browser.fromPixels(imgEl) .toFloat().sub(255/2).div(255/2) .reshape([1, 224, 224, 3]) }) } Before running the project code, we need to run node index.js in the train directory to generate model.json for the recognition system to use. After that, you need to run hs outputDir --cors in the root directory to make the generated model.json run in the http environment. Only then can you run npm start, otherwise the project will report an error. The main codes are above. The author also said it before. I don't know anything about this, so I can't explain the code. If you are interested, please research it yourself. The code address is provided. gitee.com/suiboyu/gar…Summarize This is the end of this article on how to use React to implement image recognition app. For more relevant React image recognition app content, please search 123WORDPRESS.COM’s previous articles or continue to browse the following related articles. I hope everyone will support 123WORDPRESS.COM in the future! You may also be interested in:
|
<<: HTML+CSS div solution when relative width and absolute width conflict
1. Priority of multiple servers For example, if e...
1: Define a stored procedure to separate strings ...
Deployment environment: docker container, liunx s...
MultiTail is a software used to monitor multiple ...
In an article a long time ago, I talked about the...
Sometimes it is slow to download large network fi...
Redis uses the apline (Alps) image of Redis versi...
1. Introduction to Linux .NET Core Microsoft has ...
The first time I used the essay, I felt quite awkw...
Table of contents 1. Introduction 2. Solution Imp...
In the database, both UNION and UNION ALL keyword...
Payment countdown to return to the home page case...
Table of contents 1. Please explain what are the ...
Table of contents Preface 1. Get the current time...
Table of contents 1. Introduction to calculator f...