import mlflow
import mlflow.sklearn
from sklearn.metrics import roc_auc_score
mlflow.set_experiment('customer-churn')
with mlflow.start_run(run_name='hist_gbm_v3'):
mlflow.log_params({'learning_rate': 0.05, 'max_depth': 6, 'features_version': '2026_04_07'})
model.fit(X_train, y_train)
probabilities = model.predict_proba(X_valid)[:, 1]
auc = roc_auc_score(y_valid, probabilities)
mlflow.log_metric('roc_auc', auc)
mlflow.sklearn.log_model(model, artifact_path='model', registered_model_name='customer_churn_model')
If experiments matter, they should be searchable after the notebook is closed. MLflow gives me parameter tracking, metric history, artifact storage, and a lightweight model registry without much ceremony. It is one of the fastest ways to make a small ML team more disciplined.