The following code (working), train a model to recognize cats and make a prediction on the selected picture. (Code TensorFlowJS but the question is generally TensorFlow)
So far it is only predicting one class ("cat"), so that a car or a dog would be for example 80% a cat.
Question:
How do i add other classes (like "dog") ?
Should it look like that (abstracted): model.fit([img1, img2, img3], [label1, label2, label3] ...) ?
I don't get it:
What is the relation between the labels and the training set.
Here is the code (please ignore the "Predict" part for now):
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#1.2.7"> </script>
<script src="https://unpkg.com/#tensorflow-models/mobilenet"></script>
</head>
<body>
<div class="container mt-5">
<div class="row">
<input id ="image-selector" class="form-control border-0" type="file"/>
</div>
<div class="row">
<div class="col">
<h2>Prediction</h2>
<ol id="prediction-list"></ol>
</div>
</div>
<div class="row">
<div class="col-12">
<h2 class="ml-3">Image</h2>
<canvas id="canvas" width="400" height="300" style="border:1px solid #000000;"></canvas>
</div>
</div>
</div>
<div id="training-images">
<img width="400" height="300" class="train-image cat" src="training-images/cat.jpg" />
<img width="400" height="300" class="train-image cat" src="training-images/cat2.jpeg" />
<img width="400" height="300" class="train-image cat" src="training-images/cat3.jpeg" />
<img width="400" height="300" class="train-image cat" src="training-images/cat4.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog2.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog3.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog4.jpeg" />
</div>
</body>
<script>
const modelType = "mobilenet";
const model = tf.sequential();
const label = ['cat'];
var ys, setLabel, input, canvas, context;
input = document.getElementById("image-selector");
canvas = document.getElementById("canvas");
context = canvas.getContext('2d');
//-------------------------- Training: --------------------------------
window.addEventListener('load', (event) => {
// Labels
setLabel = Array.from(new Set(label));
ys = tf.oneHot(tf.tensor1d(label.map((a) => setLabel.findIndex(e => e === a)), 'int32'), 10);
console.log('ys:::'+ys);
// Prepare model :
model.add(tf.layers.conv2d({
inputShape: [224, 224 , 3],
kernelSize: 5,
filters: 8,
strides: 2,
activation: 'relu',
kernelInitializer: 'VarianceScaling'
}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.flatten({}));
model.add(tf.layers.dense({units: 64, activation: 'relu'}));
model.add(tf.layers.dense({units: 10, activation: 'softmax'}));
model.compile({
loss: 'meanSquaredError',
optimizer : 'sgd'
});
// Prepare training images
var images = [];
for(var i = 0; i < 40; i++) {
let img = preprocessImage(document.getElementsByClassName("cat")[i], modelType);
images.push(tf.reshape(img, [1, 224, 224, 3],'resize'));
}
console.log("processed images : ");
console.log(images);
trainModel(images);
});
async function trainModel(images) {
for(var i = 0; i < images.length; i++) {
await model.fit(images[i], ys, {epochs: 100, batchSize: 32}).then((loss) => {
const t = model.predict(images[i]);
console.log('Prediction:::'+t);
pred = t.argMax(1).dataSync(); // get the class of highest probability
const labelsPred = Array.from(pred).map(e => setLabel[e]);
console.log('labelsPred:::'+labelsPred);
}).catch((e) => {
console.log(e.message);
})
}
console.log("Training done!");
}
//-------------------------- Predict: --------------------------------
input.addEventListener("change", function() {
var reader = new FileReader();
reader.addEventListener("loadend", function(arg) {
var src_image = new Image();
src_image.onload = function() {
canvas.height = src_image.height;
canvas.width = src_image.width;
context.drawImage(src_image, 0, 0);
var imageData = canvas.toDataURL("image/png");
runPrediction(src_image)
}
src_image.src = this.result;
});
var res = reader.readAsDataURL(this.files[0]);
});
async function runPrediction(imageData){
let tensor = preprocessImage(imageData, "mobilenet");
const resize_image = tf.reshape(tensor, [1, 224, 224, 3],'resize');
let prediction = await model.predict(tensor).data();
console.log('prediction:::'+ prediction);
let top5 = Array.from(prediction)
.map(function(p,i){
return {
probability: p,
className: prediction[i]
};
}).sort(function(a,b){
return b.probability-a.probability;
}).slice(0,1);
$("#prediction-list").empty();
top5.forEach(function(p){
$("#prediction-list").append(`<li>${p.className}:${p.probability.toFixed(6)}</li>`);
});
}
//-------------------------- Helpers: --------------------------------
function preprocessImage(image, modelName)
{
let tensor = tf.browser.fromPixels(image)
.resizeNearestNeighbor([224,224])
.toFloat();
let offset=tf.scalar(127.5);
return tensor.sub(offset)
.div(offset)
.expandDims();
}
</script>
The code is based on the TFJS documentation and a comment on the github : https://github.com/tensorflow/tfjs/issues/1288
UPDATE :
So I need X and Y to be the same length for X:images and Y:labels, with Y1 being the label for X1 and so on...
I tried:
ys:::Tensor (with only 2 classes represented in the training data set) :
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]
One image + all labels -> with "model.fit(images[i], ys, {epochs: 100})...", I get:
Error: "Input Tensors should have the same number of samples as target Tensors. Found 1 input sample(s) and 10 target sample(s)."
One image + one label -> with "model.fit(images[i], ys[i], {epochs: 100})...", I get:
Error: "Cannot read property 'shape' of null", i guess ys is a tensor but y[i] is not.
All images + all labels -> with "model.fit(images, ys, {epochs: 100})...", I get:
Error: "when checking model input: the Array of Tensors that you are passing to your model is not the size the model expected.
Expected to see 1 Tensor(s), but instead got the following list of Tensor(s): Tensor ..."
Guess: I need to put all images in one tensor with the same structure as ys.
SOLVED :
After solving the problem with the labels thanks to Rishabh Sahrawat, I had to merge all tensor(images) in to one with the help of tf.concat(...).
[tensorImg1, tensorImg2, tensorImg3, tensorImg4, ...] x tensor[label1, label2, label3, label4, ...]
->
tensor[dataImg1, dataImg2, dataImg3, dataImg4, ...] x tensor[label1, label2, label3, label4, ...]
Updated code :
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#1.2.7"> </script>
<script src="https://unpkg.com/#tensorflow-models/mobilenet"></script>
</head>
<body>
<div class="container mt-5">
<div class="row">
<input id ="image-selector" class="form-control border-0" type="file"/>
</div>
<div class="row">
<div class="col">
<h2>Prediction</h2>
<ol id="prediction-list"></ol>
</div>
</div>
<div class="row">
<div class="col-12">
<h2 class="ml-3">Image</h2>
<canvas id="canvas" width="400" height="300" style="border:1px solid #000000;"></canvas>
</div>
</div>
</div>
<div id="training-images">
<img width="400" height="300" class="train-image cat" src="training-images/cat.jpg" />
<img width="400" height="300" class="train-image cat" src="training-images/cat2.jpeg" />
<img width="400" height="300" class="train-image cat" src="training-images/cat3.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog2.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog3.jpeg" />
<img width="400" height="300" class="train-image dog" src="training-images/dog4.jpeg" />
</div>
</body>
<script>
const modelType = "mobilenet";
const model = tf.sequential();
var labels = ['cat', 'dog'];
var ys, setLabel, input, canvas, context;
input = document.getElementById("image-selector");
canvas = document.getElementById("canvas");
context = canvas.getContext('2d');
//-------------------------- Training: --------------------------------
window.addEventListener('load', (event) => {
// Prepare model :
prepareModel();
// Prepare training images
var images = [];
var trainLabels = []
for(var i = 0; i < document.getElementsByClassName('train-image').length; i++) {
let img = preprocessImage(document.getElementsByClassName('train-image')[i], modelType);
//images.push(tf.reshape(img, [1, 224, 224, 3],'resize'));
images.push(img);
if (document.getElementsByClassName('train-image')[i].classList.contains("cat")){
trainLabels.push(0)
} else {
trainLabels.push(1)
}
}
console.log(labels)
setLabel = Array.from(labels);
ys = tf.oneHot(trainLabels, 2);
console.log('ys:::'+ys);
console.log(images);
trainModel(images);
});
async function trainModel(images) {
for(var i = 0; i < images.length; i++) {
await model.fit(tf.concat(images, 0), ys, {epochs: 100}).then((loss) => {
const t = model.predict(images[i]);
console.log('Prediction:::'+t);
pred = t.argMax().dataSync(); // get the class of highest probability
//const labelsPred = Array.from(pred).map(e => setLabel[e]);
//console.log('labelsPred:::'+labelsPred);
}).catch((e) => {
console.log(e.message);
})
}
console.log("Training done!");
}
//-------------------------- Predict: --------------------------------
input.addEventListener("change", function() {
var reader = new FileReader();
reader.addEventListener("loadend", function(arg) {
var src_image = new Image();
src_image.onload = function() {
canvas.height = src_image.height;
canvas.width = src_image.width;
context.drawImage(src_image, 0, 0);
var imageData = canvas.toDataURL("image/png");
runPrediction(src_image)
}
src_image.src = this.result;
});
var res = reader.readAsDataURL(this.files[0]);
});
async function runPrediction(imageData){
let tensor = preprocessImage(imageData, "mobilenet");
const resize_image = tf.reshape(tensor, [1, 224, 224, 3],'resize');
let prediction = await model.predict(tensor).data();
console.log('prediction:::'+ prediction);
let top5 = Array.from(prediction)
.map(function(p,i){
return {
probability: p,
className: prediction[i]
};
}).sort(function(a,b){
return b.probability-a.probability;
}).slice(0,1);
$("#prediction-list").empty();
top5.forEach(function(p){
$("#prediction-list").append(`<li>${p.className}:${p.probability.toFixed(6)}</li>`);
});
}
//-------------------------- Helpers: --------------------------------
function prepareModel(){
model.add(tf.layers.conv2d({
inputShape: [224, 224 , 3],
kernelSize: 5,
filters: 8,
strides: 2,
activation: 'relu',
kernelInitializer: 'VarianceScaling'
}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.maxPooling2d({poolSize: 2, strides: 2}));
model.add(tf.layers.flatten({}));
model.add(tf.layers.dense({units: 64, activation: 'relu'}));
model.add(tf.layers.dense({units: 2, activation: 'softmax'}));
model.compile({
loss: 'meanSquaredError',
optimizer : 'sgd'
});
model.summary()
}
function preprocessImage(image, modelName)
{
let tensor = tf.browser.fromPixels(image)
.resizeNearestNeighbor([224,224])
.toFloat();
let offset=tf.scalar(127.5);
return tensor.sub(offset)
.div(offset)
.expandDims();
}
</script>
How do i add other classes (like "dog") ?
You can make model predict also on another class is by adding the new class to your training dataset. Let's say you added Dog class, so now your dataset consists Cat and Dog pictures.
Should it look like that (abstracted): model.fit([img1, img2, img3], [label1, label2, label3] ...)
Yes, images x = [img1, img2, img3] and labels to corresponding images, y = [label1, label2, label3]. In x, img1 or img2 or any other image can be a cat image or dog image. For simplicity, you can feed images represented as numpy arrays. Here is how the input training data must look like.
What is the relation between the labels and the training set.
Labels are a part of training set. If you are performing supervised classification then labels have to be fed along with your input features (images).
UPDATE for updated question
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]
In this you have a shape mismatch. The shape here is (10,10) but the model expects label input with shape (10,).
If you have two classes, you don't need to represent one class with [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] or other with [0, 1, 0, 0, 0, 0, 0, 0, 0, 0] in Y (Label). What does the rest of the zeroes do?. Just keep it simple and define as follows.
If you have a cat you label it 0, and for dog image, you label it 1 or vice-versa.
and then you feed it like [0,1,0], here first 0 is the label for img1, 1 for img2 and 0 for img3.
So I am building my own custom bingo table that is 4x4 with vue. I have everything down except how to detect wether a user has formed a 4x4 line that is horizontal, diagonal, or vertical
Inside my data function I have an array that represents the 4x4
[9, 13, 28, 24],
[11, 22, 15, 43],
[54, 5, 37, 4],
[27, 40, 12, 36]
My question is how can I check to see if a user has clicked a 4x4 line? No code is needed as an answer I just want to know how I can approach this problem.
Given an n x n matrix
Horizontal:
Are there n selected elements that have the same rowIndex?
Vertical:
Are there n selected elements that have the same columnIndex?
Diagonal (Top Left to Bottom Right)
Are there n elements that have the same rowIndex as their columnIndex?
Diagonal (Top Right to Bottom Left)
Are there n elements that satisfy (length(row) - 1) - rowIndex == columnIndex?
const Card = Vue.component('card', {
template: '#card',
props: {
playerCard: Array
},
data() {
return {
selectedVals: [],
rowCounts: {},
colCounts: {}
}
},
computed: {
horizontalNumberToWin() {
return this.playerCard[0].length;
},
verticalNumberToWin() {
return this.playerCard.length;
},
diagonalNumberToWin() {
return this.playerCard.length;
},
isDiagonal() {
if (this.selectedVals.length < this.diagonalNumberToWin) return false;
// top left to bottom right
// [0, 0] [1, 1], [2, 2], [3, 3], etc..
const isTLtoBR = this.selectedVals.filter(val => val[0] === val[1]);
if (isTLtoBR.length >= this.diagonalNumberToWin) return true;
// top right to bottom left
// [0, 3], [1, 2], [2, 1], [3, 0], etc..
const rowLen = this.playerCard[0].length;
const isTRtoBL = this.selectedVals.filter(val => {
return (rowLen -1) - val[0] === val[1];
});
if (isTRtoBL.length >= this.diagonalNumberToWin) return true;
return false;
},
isHorizontal() {
if (this.selectedVals.length < this.horizontalNumberToWin) return false;
return Object.values(this.rowCounts).some(row => row >= this.horizontalNumberToWin);
},
isVertical() {
if (this.selectedVals.length < this.verticalNumberToWin) return false;
return Object.values(this.colCounts).some(col => col >= this.verticalNumberToWin);
},
},
methods: {
onCardClicked(coord) {
this.selectedVals.push(coord);
this.updateCounts(coord);
},
cardDisabled(coord) {
return this.selectedVals.some(vals => vals[0] === coord[0] && vals[1] === coord[1]);
},
updateCounts(coord) {
const rowIndex = coord[0];
const colIndex = coord[1];
this.rowCounts[rowIndex] = this.rowCounts[rowIndex] ? this.rowCounts[rowIndex] + 1 : 1;
this.colCounts[colIndex] = this.colCounts[colIndex] ? this.colCounts[colIndex] + 1 : 1;
}
}
});
new Vue({
el: '#app',
components: {
Card,
},
data: {
playerCard: [
[9, 13, 28, 24],
[11, 22, 15, 43],
[54, 5, 37, 4],
[27, 40, 12, 36]
],
},
})
#app {
display: flex;
flex-direction: row;
justify-content: center;
}
.board {
max-width: 500px;
display: grid;
grid-template-columns: 1fr 1fr 1fr 1fr;
grid-template-rows: 1fr 1fr 1fr 1fr;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.17/vue.min.js"></script>
<div id="app">
<card :player-card="playerCard" />
</div>
<template id="card">
<div>
<p>Horizontal: {{ isHorizontal }}</p>
<p>Vertical: {{ isVertical }}</p>
<p>Diagonal: {{ isDiagonal }}</p>
<div class="board">
<template v-for="(row, rowIndex) in playerCard">
<button
v-for="(col, colIndex) in row"
:key="col"
:disabled="cardDisabled([rowIndex, colIndex])"
#click="onCardClicked([rowIndex, colIndex])">
{{ col }}
</button>
</template>
</div>
</div>
</template>
First, I think you should add isClick to check whether the user has clicked the card or not.
[
[
{ Number: 9 , isClick: false },
{ Number: 13 , isClick: false },
{ Number: 28 , isClick: false },
{ Number: 24 , isClick: false }
],
// other array ...
]
Second, bind your data into html (Maybe you should used twice v-for to achieve it). When user click the card, set isClick = true.
Third, write your own logic to check if a user has clicked a 4x4 line.
I see animations for Chartist line graphs (http://gionkunz.github.io/chartist-js/examples.html#example-line-path-animation), but none where the line literally draws itself out from left to right. Is that possible?
Not my solution, but it seems to do what you want.
HTML
<div class="ct-chart ct-golden-section"></div>
CSS
$color1: #ada8b6; //rgba(173, 168, 182, 100)
$color2: #ffeedb; //rgba(255, 238, 219, 100)
$color3: #4c3b4d; //rgba(76, 59, 77, 100)
$color4: #a53860; //rgba(165, 56, 96, 100)
$color5: #61c9a8; //rgba(97, 201, 168, 100)
body {
width: 100vw;
height: 100vh;
background: #111;
}
.ct-chart {
width: 90vw;
max-width: 1100px;
height: 375px;
margin: 5vh 6.5vw;
svg {
width: 100%;
}
}
.ct-grids line {
stroke: $color3;
opacity: 0.4;
}
.ct-labels span {
color: $color3;
}
#mixin pathseries($length, $delay, $strokecolor) {
stroke-dasharray: $length;
stroke-dashoffset: $length;
animation: draw 1s $delay ease both;
fill: none;
stroke: $strokecolor;
opacity: 0.8;
}
.ct-series-a {
#include pathseries(1093, 0s, $color1);
}
.ct-series-b {
#include pathseries(1665, 0.25s, $color5);
}
.ct-series-c {
#include pathseries(1644, 0.5s, $color2);
}
.ct-series-d {
#include pathseries(1540, 0.75s, $color4);
}
#keyframes draw {
to {
stroke-dashoffset: 0;
}
}
JS
new Chartist.Line('.ct-chart', {
labels: [1, 2, 3, 4, 5, 6, 7, 8],
series: [
[11, 12, 13, 11, 12, 10, 11, 10],
[12, 11, 17, -1, 0, 18, -2, 8],
[0, 8, 12, 1, 15, 3, 18, 1],
[3, 2, 12, 15, 16, 3, 18, -3]
]
}, {
high: 20,
low: -3,
fullWidth: true,
// As this is axis specific we need to tell Chartist to use whole numbers only on the concerned axis
axisY: {
onlyInteger: true,
offset: 20
}
});
setTimeout (
function() {
var path = document.querySelector('.ct-series-a path');
var length = path.getTotalLength();
console.log(length);
},
3000);