Azure Machine Learning (Azure ML)

Azure Machine Learning is Microsoft's managed ML platform for training, tracking, deploying, and monitoring models. It covers the full classical-ML lifecycle — data preparation, AutoML, hyperparameter sweeps, distributed training, managed endpoints, and MLOps pipelines — and underpins Azure AI Foundry's generative-AI tooling.


Core Concepts:


Examples

1. Submit a Training Job (Python SDK v2)


from azure.ai.ml import MLClient, command, Input
from azure.ai.ml.constants import AssetTypes
from azure.identity import DefaultAzureCredential

ml = MLClient(DefaultAzureCredential(),
              subscription_id="", resource_group_name="",
              workspace_name="my-aml-ws")

job = command(
    code="./src",                                   # folder with train.py
    command="python train.py --data $ --epochs 10",
    environment="azureml://registries/azureml/environments/sklearn-1.5/versions/latest",
    compute="cpu-cluster",
    inputs={"data": Input(type=AssetTypes.URI_FOLDER, path="azureml:titanic:1")},
    experiment_name="titanic-gbm",
    display_name="gbm-sweep-base",
)
returned = ml.jobs.create_or_update(job)
print(returned.studio_url)
  


2. Hyperparameter Sweep


from azure.ai.ml.sweep import Choice, Uniform

sweep = job.sweep(
    compute="cpu-cluster",
    sampling_algorithm="bayesian",
    primary_metric="val_auc",
    goal="Maximize",
    max_total_trials=20,
    max_concurrent_trials=4,
)
sweep.search_space = {
    "learning_rate": Uniform(min_value=0.01, max_value=0.3),
    "max_depth":     Choice(values=[3, 5, 7, 9]),
}
ml.jobs.create_or_update(sweep)
  


3. AutoML for Tabular Classification


from azure.ai.ml import automl, Input
from azure.ai.ml.constants import AssetTypes

automl_job = automl.classification(
    compute="cpu-cluster",
    experiment_name="churn-automl",
    training_data=Input(type=AssetTypes.MLTABLE, path="azureml:churn-train:1"),
    target_column_name="churned",
    primary_metric="AUC_weighted",
    n_cross_validations=5,
    enable_model_explainability=True,
)
automl_job.set_limits(timeout_minutes=60, max_trials=30, max_concurrent_trials=4)
ml.jobs.create_or_update(automl_job)
  


4. Deploy to a Managed Online Endpoint


from azure.ai.ml.entities import ManagedOnlineEndpoint, ManagedOnlineDeployment, Model, Environment

endpoint = ManagedOnlineEndpoint(name="churn-endpoint", auth_mode="key")
ml.online_endpoints.begin_create_or_update(endpoint).result()

deployment = ManagedOnlineDeployment(
    name="blue",
    endpoint_name="churn-endpoint",
    model=Model(path="azureml:churn-gbm:3"),
    environment=Environment(image="mcr.microsoft.com/azureml/inference-base:latest"),
    instance_type="Standard_DS3_v2",
    instance_count=1,
)
ml.online_deployments.begin_create_or_update(deployment).result()
ml.online_endpoints.begin_update(ManagedOnlineEndpoint(name="churn-endpoint", traffic={"blue": 100})).result()
  


5. Pipeline Job — Prep → Train → Register


from azure.ai.ml import dsl, Input, Output

prep  = command(code="./prep",  command="python prep.py --in $ --out $",
                environment="...", inputs={"raw": Input(type="uri_folder")},
                outputs={"clean": Output(type="uri_folder")})
train = command(code="./train", command="python train.py --data $",
                environment="...", inputs={"data": Input(type="uri_folder")},
                outputs={"model": Output(type="uri_folder")})

@dsl.pipeline(compute="cpu-cluster")
def churn_pipeline(raw_data):
    p = prep(raw=raw_data)
    t = train(data=p.outputs.clean)
    return {"model": t.outputs.model}

pipeline_job = churn_pipeline(raw_data=Input(type="uri_folder", path="azureml:raw-churn:1"))
ml.jobs.create_or_update(pipeline_job)
  


When to Use Azure ML vs. Foundry vs. OpenAI Service: