First we will scrape a dataset using a NodeJS console app and Azure Cognitive Service Bing Search to create an image dataset in minutes. Then use the Azure Cognitive Service Custom Vision SDK to automagically create the Custom Vision project, upload and tag your images, then train and test the model using the Python Custom Vision SDK in Jupyter notebooks.
Prerequisites
- VS Code
- NodeJS
- Anaconda
- Create a free Azure account
1. Create Azure Resources
Create Bing Search Api Azure Resource
- Go to portal.azure.com
- Select "Create Resource"
- Select "AI + Machine Learning"
- Select "Bing Search"
- Complete the required fields
- Select "Create"
- When the deployment succeeds you will get a notification in the top right corner.
- Select "Go to resource"
- Select "Keys" from the left hand navigation for the resource.
- We will come back to the
Keys
when we are ready to call it from our App.
Create Custom Vision Resource in Azure
- Go to portal.azure.com
- Select "Create Resource"
- Search "Custom Vision"
- Select "Create"
- Complete the required fields
- Select "Create"
- When the deployment succeeds you will get a notification in the top right corner.
- Select "Go to resource"
2. Clone the Repo
git clone https://github.com/cassieview/ImageScraperAndCustomVisionMagic.git
3. Get the Data with Bing Search and the NodeJS Console App
We are now ready to run the app and scrape our dataset together using the Bing Search API.
- Update the key at the top of the script to the key from the Bing Search Resource you created
const subscriptionKey = "add your key here";
- Update the
searchTermList
in the script to scrape the type of images you need for the custom vision model
var searchTermList = ["cardinal", "bluejay"];
- To run the app open the VS Code terminal (CTRL+SHIRT+) and use the following command
node index.js
- Here is the full index.js script file:
"use strict";
var https = require("https");
var fs = require("fs");
var download = require("image-downloader");
// Replace the subscriptionKey string value with your valid subscription key.
const subscriptionKey = "add your key here";
// Verify the endpoint URI. At this writing, only one endpoint is used for Bing
// search APIs. In the future, regional endpoints may be available. If you
// encounter unexpected authorization errors, double-check this host against
// the endpoint for your Bing Search instance in your Azure dashboard.
const host = "api.cognitive.microsoft.com";
const path = "/bing/v7.0/images/search";
//filter by license
const filter = "&qft=+filterui:license-L2_L3_L4&FORM=IRFLTR";
var searchAndSaveImages = (search) => {
console.log("Searching images for: " + search);
//set global to current search term
let request_params = {
method: "GET",
hostname: host,
path: path + "?q=" + encodeURIComponent(`${search}`) + filter,
headers: {
"Ocp-Apim-Subscription-Key": subscriptionKey,
},
};
var req = https.request(request_params, (response) => {
console.log("statusCode:", response.statusCode);
let body = "";
response.on("data", (d) => {
body += d;
});
response.on("end", () => {
let imageResults = JSON.parse(body);
console.log(`Image result count: ${imageResults.value.length}`);
if (imageResults.value.length > 0) {
//create directory folder for current search term
let rootDir = `./birds`;
let searchDir = `${rootDir}/${search}`;
let testDir = `${searchDir}/test`;
let trainDir = `${searchDir}/train`;
if (!fs.existsSync(rootDir)) {
fs.mkdirSync(rootDir);
}
if (!fs.existsSync(searchDir)) {
fs.mkdirSync(searchDir);
fs.mkdirSync(testDir);
fs.mkdirSync(trainDir);
}
//create count so we can split the results between test and train folder
let count = 0;
//save image results
imageResults.value.forEach((imageResult) => {
let destDir = count % 3 == 0 ? testDir : trainDir;
let options = {
url: imageResult.contentUrl,
dest: `./${destDir}/${imageResult.imageId}.png`,
};
download
.image(options)
.then(({ filename, image }) => {
console.log("File saved to", filename);
})
.catch((err) => {
console.error(err);
});
count++;
});
} else {
console.log("Couldn't find image results!");
}
});
response.on("error", function (e) {
console.log("Error: " + e.message);
});
});
req.end();
};
let main = () => {
//bird species search term list
var searchTermList = ["cardinal", "bluejay"];
//loop search terms
searchTermList.forEach((term) => {
searchAndSaveImages(term);
});
};
//check to make sure the subscription key was updated and kick off main func
if (subscriptionKey.length === 32) {
main();
} else {
console.log("Invalid Bing Search API subscription key!");
console.log("Please paste yours into the source code.");
}
- Now we have a dataset split out by tag name with a train and test folder for each.
4. Create the Custom Vision Model
Lets run the Python Jupyter notebook to create a project, tags, upload data, train and test the model. NOTE: The Custom Vision SDK is available in the following languages: Python, .Net, Node, Go and Java.
- Open Anaconda Prompt, navigate to the notebook directory that you cloned from the GitHub and run the
jupyter notebook
cmd. Open the
CustomVisionSdkUpload.ipynb
notebookRun the following command to import the package.
!pip install azure-cognitiveservices-vision-customvision
- Update the endpoint and key with the values from the resource you created in Azure.
ENDPOINT = "https://<resource-name>.cognitiveservices.azure.com/"
# Replace with a valid key
training_key = "<training-key>"
publish_iteration_name = "classifyBirdModel"
prediction_resource_id ="/subscriptions/<sub-id>/resourceGroups/<resource-group-name>/providers/Microsoft.CognitiveServices/accounts/<resource-name-Prediction>"
trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
# Create a new project
print ("Creating project...")
# Create the project based on if you need a default or compact model. Only the compact models can be exported, default is deployed with custom vision only.
#project = trainer.create_project("Bird Classification")
project = trainer.create_project(name="Bird Classification Compact", domain_id="General (compact)", classification_type="Multiclass", target_export_platforms="Basic platforms")
print("Project created!")
Go to customvision.ai if you would like to validate the project was created in the UI.
- Next create a tag list from the file names. You will need to update the directory for your project.
#Create Tag List from folders in bird directory
import os
os.chdir('./bird_photos/train')
tags = [name for name in os.listdir('.') if os.path.isdir(name)]
print(tags)
- We will create three methods that we will loop thru for each tag. First one creates the tag in custom vision.
def createTag(tag):
result = trainer.create_tag(project.id, tag)
print(f'{tag} create with id: {result}')
return result.id
- Next it takes the tag and returned tag id to create a image upload list.
def createImageList(tag, tag_id):
#set directory to current tag
base_image_url = f"./{tag}/"
photo_name_list = os.listdir(base_image_url)
image_list = []
for file_name in photo_name_list:
with open(base_image_url+file_name, "rb") as image_contents:
image_list.append(ImageFileCreateEntry(name=base_image_url+file_name, contents=image_contents.read(), tag_ids=[tag_id]))
return image_list
- Then we take the created image list and upload it to Custom Vision
def uploadImageList(image_list):
upload_result = trainer.create_images_from_files(project.id, images=image_list)
if not upload_result.is_batch_successful:
print("Image batch upload failed.")
for image in upload_result.images:
print("Image status: ", image.status)
exit(-1)
- Now that we have created our methods lets loop thru each tag and upload in batches. This could take a little while depending on the amount of images and tags you are using.
#get images names from directory
for tag in tags:
tag_id = createTag(tag)
print(f"tag creation done with tag id {tag_id}")
image_list = createImageList(tag, tag_id)
print("image_list created with length " + str(len(image_list)))
#break list into lists of 25 and upload in batches
for i in range(0, len(image_list), 25):
batch = image_list[i:i + 25]
print(f'Upload started for batch {i} total items {len(batch)} for tag {tag}...')
uploadImageList(batch)
print(f"Batch {i} Image upload completed. Total uploaded {len(batch)} for tag {tag}")
- Train the model
import time
print ("Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
iteration = trainer.get_iteration(project.id, iteration.id)
print ("Training status: " + iteration.status)
time.sleep(5)
- Publish the endpoint
# The iteration is now trained. Publish it to the project endpoint
trainer.publish_iteration(project.id, iteration.id, publish_iteration_name, prediction_resource_id)
print ("Done!")
- Now we can test the endpoint. You will need to update the key and endpoint for your prediction endpoint that was created.
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
prediction_key = "<prediction-key>"
prediction_endpoint ="https://<resource-name-prediction>.cognitiveservices.azure.com/"
# Now there is a trained endpoint that can be used to make a prediction
predictor = CustomVisionPredictionClient(prediction_key, endpoint=prediction_endpoint)
- Update the directory to the test/validation image
#get currect directory
os.getcwd()
#move back
os.chdir("../")
val_image_url = f"./test/American Crow/img.jpg"
- Test the endpoint and print the results
with open(val_image_url, "rb") as image_contents:
results = predictor.classify_image(project.id, publish_iteration_name, image_contents.read())
# Display the results.
for prediction in results.predictions:
print("\t" + prediction.tag_name +
": {0:.2f}%".format(prediction.probability * 100))
... and DONE! We scraped a image data set with Bing Search and created a model with Azure Custom Vision SDK!
Checkout other Cognitive Services and Azure Machine Learning to learn more about building AI/ML on Azure!
Top comments (0)