#!/usr/bin/env python # # file: 09_main.py # # revision history: # # 20260203 (SP): initial version for PCA/LDA/QDA demo on set_13 #------------------------------------------------------------------------------ # import system modules # import os import sys import urllib.request import numpy as np import matplotlib.pyplot as plt #------------------------------------------------------------------------------ # # global variables are listed here # #------------------------------------------------------------------------------ # set the filename using basename # __FILE__ = os.path.basename(__file__) # dataset location (set_13) # DEF_BASE_URL = "https://isip.piconepress.com/courses/temple/ece_8527/resources/data/set_13/" DEF_TRAIN_FILE = "train_v01.csv" DEF_EVAL_FILE = "eval_v01.csv" # local cache directory # DEF_DATA_DIR = "./set13_data" # numerical stability # DEF_EPS = 1.0e-6 # grid settings for decision boundary plots # DEF_GRID_N = 400 # output plot # DEF_OUT_PLOT = "set13_pca_lda_qda_demo.png" #------------------------------------------------------------------------------ # model names (alphabetical) #------------------------------------------------------------------------------ MDL_PCA = "PCA" MDL_LDA = "LDA" MDL_QDA = "QDA" #------------------------------------------------------------------------------ # model dictionary key constants (alphabetical) #------------------------------------------------------------------------------ MDL_KEY_B = "b" MDL_KEY_CLS = "classes" MDL_KEY_COV = "cov" MDL_KEY_INV_COV = "inv_cov" MDL_KEY_INV_SIG = "inv_sigma" MDL_KEY_LOGDET = "logdet" MDL_KEY_MU = "mu" MDL_KEY_MUS = "mus" MDL_KEY_PARAMS = "params" MDL_KEY_PRIOR = "prior" MDL_KEY_PRIORS = "priors" MDL_KEY_SIG = "sigma" MDL_KEY_W = "w" #------------------------------------------------------------------------------ # # functions are listed here # #------------------------------------------------------------------------------ def download_file(url, outpath): """method: download_file arguments: url: remote URL (str) outpath: local file path (str) return: status: True on success (bool) description: Download url -> outpath if outpath does not already exist. """ os.makedirs(os.path.dirname(outpath), exist_ok=True) # skip download if file exists and is non-empty # if os.path.exists(outpath) and os.path.getsize(outpath) > 0: return True try: print("Downloading: %s" % url) # use urlretrieve for simplicity # urllib.request.urlretrieve(url, outpath) except Exception as e: print("Error: download failed (%s): %s" % (url, str(e))) return False # exit gracefully # return True def load_set13_csv(csv_path): """method: load_set13_csv arguments: csv_path: path to csv file (str) return: X: features, shape (N,2) (np.ndarray) y: labels, shape (N,) (np.ndarray) description: Loads set_13 CSV format: y, x1, x2 with comment lines starting with '#'. """ # load data from CSV, skip comment lines # data = np.loadtxt(csv_path, delimiter=",", comments="#") # if unexpected shape, raise error # if data.ndim != 2 or data.shape[1] < 3: raise ValueError("Unexpected data shape from %s" % csv_path) # y is first column denoted as class label (0 or 1), # rest are features X[x1,x2]) # y = data[:, 0].astype(int) X = data[:, 1:3].astype(float) # exit gracefully # return X, y def pca_fit(X): """method: pca_fit arguments: X: data matrix (N,D) return: mu: mean (D,) eigvals: sorted eigenvalues (D,) eigvecs: sorted eigenvectors (D,D) columns are components description: Standard PCA via covariance eigendecomposition. """ # calculate mean, axis=0 for column-wise mean # mu = np.mean(X, axis=0) # center data # Xc = X - mu # calculate covariance matrix # cov = np.cov(Xc, rowvar=False, bias=False) # get eigenvalues/vectors # eigvals, eigvecs = np.linalg.eigh(cov) # sort eigenvalues/vectors in descending order # idx = np.argsort(eigvals)[::-1] # get sorted results # eigvals = eigvals[idx] eigvecs = eigvecs[:, idx] # exit gracefully # return mu, eigvals, eigvecs def pca_project(X, mu, v): """method: pca_project arguments: X: data matrix (N,D) mu: mean (D,) v: unit vector (D,) return: z: projected data (N,) """ # project data onto v using centered data # z = (X - mu) @ v # exit gracefully # return z def train_lda(X, y, eps=DEF_EPS): """method: train_lda arguments: X: features (N,D) y: labels (N,) eps: diagonal regularization (float) return: model: dict containing LDA parameters description: LDA: Gaussian class-conditional with shared covariance. Uses log discriminant: δ_k(x) = x^T Σ^{-1} μ_k - 0.5 μ_k^T Σ^{-1} μ_k + log π_k """ # get unique classes # classes = np.unique(y) # number of samples (N) and dimensions (D) # N, D = X.shape mus = {} priors = {} # pooled within-class covariance # Sw = np.zeros((D, D), dtype=float) # for each class, calculate the shared covariance # for c in classes: # get class-specific data # Xc = X[y == c] # compute prior # priors[int(c)] = float(Xc.shape[0]) / float(N) # compute class mean of axis=0 (column-wise) # mu = np.mean(Xc, axis=0) # store class mean # mus[int(c)] = mu # accumulate scatter # if Xc.shape[0] > 1: # unbiased covariance scaled by (n-1) # Sw += np.cov(Xc, rowvar=False, bias=False) * (Xc.shape[0] - 1) # compute shared covariance by pooling # sigma = Sw / float(N - classes.size) # regularize covariance by adding eps to diagonal because without # regularization covariance may be singular (non-invertible) # sigma = sigma + eps * np.eye(D) # compute inverse covariance # invSigma = np.linalg.inv(sigma) # precompute linear boundary parameters w, b for 2-class case # c0, c1 = int(classes[0]), int(classes[1]) # get class means # mu0, mu1 = mus[c0], mus[c1] # compute w which is direction vector # w = invSigma @ (mu1 - mu0) # compute b which is bias term # b = (-0.5 * (mu1 @ invSigma @ mu1 - mu0 @ invSigma @ mu0) + np.log(priors[c1] / priors[c0])) # assemble model dictionary # model = { MDL_KEY_B: b, MDL_KEY_CLS: (c0, c1), MDL_KEY_INV_SIG: invSigma, MDL_KEY_MUS: mus, MDL_KEY_PRIORS: priors, MDL_KEY_SIG: sigma, MDL_KEY_W: w } # exit gracefully # return model def lda_predict(model, X): """method: lda_predict arguments: model: LDA model dict X: features (N,D) return: yhat: predicted labels (N,) """ # get model parameters # c0, c1: class labels # c0, c1 = model[MDL_KEY_CLS] # invS : inverse shared covariance # invS = model[MDL_KEY_INV_SIG] # mu0, mu1: class means # mu0 = model[MDL_KEY_MUS][c0] mu1 = model[MDL_KEY_MUS][c1] # p0, p1: class priors # p0 = model[MDL_KEY_PRIORS][c0] p1 = model[MDL_KEY_PRIORS][c1] # compute discriminants for each class # d0 = X @ invS @ mu0 - 0.5 * (mu0 @ invS @ mu0) + np.log(p0) d1 = X @ invS @ mu1 - 0.5 * (mu1 @ invS @ mu1) + np.log(p1) # assign class based on larger discriminant # yhat = np.where(d1 > d0, c1, c0).astype(int) # exit gracefully # return yhat def train_qda(X, y, eps=DEF_EPS): """method: train_qda arguments: X: features (N,D) y: labels (N,) eps: diagonal regularization (float) return: model: dict containing QDA parameters description: QDA: Gaussian class-conditional with class-specific covariance. Uses log discriminant: δ_k(x) = -0.5 log|Σ_k| -0.5 (x-μ_k)^T Σ_k^{-1} (x-μ_k) + log π_k """ # get unique classes # classes = np.unique(y) # number of samples (N) and dimensions (D) # N, D = X.shape # initialize parameter dict # params = {} # for each class, calculate class-specific parameters (mean, cov, prior) # for c in classes: # get class-specific data # Xc = X[y == c] # compute class prior # prior = float(Xc.shape[0]) / float(N) # compute class mean of axis=0 (column-wise) # mu = np.mean(Xc, axis=0) # compute class covariance with regularization # cov = np.cov(Xc, rowvar=False, bias=False) + eps * np.eye(D) # compute inverse covariance # invcov = np.linalg.inv(cov) # compute log-determinant of covariance using slogdet for # numerical stability sign, logdet = np.linalg.slogdet(cov) # if sign is non-positive, raise error # if sign <= 0: raise ValueError("Non-positive definite covariance for class %d" % int(c)) # assemble class parameters for class c # params[int(c)] = { MDL_KEY_COV: cov, MDL_KEY_INV_COV: invcov, MDL_KEY_LOGDET: float(logdet), MDL_KEY_MU: mu, MDL_KEY_PRIOR: prior } # assemble model dictionary # model = { MDL_KEY_CLS: (int(classes[0]), int(classes[1])), MDL_KEY_PARAMS: params } # exit gracefully # return model def qda_score(p, X): """method: score arguments: p: class parameters dict X: features (N,D) return: s: log discriminant scores for all samples (N,) description: Compute log discriminant score for QDA class parameters. """ # calculate difference from mean # diff = X - p[MDL_KEY_MU] # calculate quadratic term # quad = np.sum((diff @ p[MDL_KEY_INV_COV]) * diff, axis=1) # calculate log discriminant score for all samples # s = -0.5 * (p[MDL_KEY_LOGDET] + quad) + np.log(p[MDL_KEY_PRIOR]) # exit gracefully # return s def qda_predict(model, X): """method: qda_predict arguments: model: QDA model dict X: features (N,D) return: yhat: predicted labels (N,) """ # get model parameters # c0, c1: class labels # p0, p1: class parameters # c0, c1 = model[MDL_KEY_CLS] p0 = model[MDL_KEY_PARAMS][c0] p1 = model[MDL_KEY_PARAMS][c1] # compute discriminant scores for each class # s0 = qda_score(p0, X) s1 = qda_score(p1, X) # assign class based on larger discriminant score # yhat = np.where(s1 > s0, c1, c0).astype(int) # exit gracefully # return yhat def confusion_matrix(y_true, y_pred, classes=(0, 1)): """method: confusion_matrix arguments: y_true: ground truth labels (N,) y_pred: predicted labels (N,) classes: tuple of class labels (2,) return: cm: 2x2 confusion matrix """ # initialize confusion matrix # c0, c1 = classes cm = np.zeros((2, 2), dtype=int) # calculate confusion matrix entries # cm[0, 0] = np.sum((y_true == c0) & (y_pred == c0)) cm[0, 1] = np.sum((y_true == c0) & (y_pred == c1)) cm[1, 0] = np.sum((y_true == c1) & (y_pred == c0)) cm[1, 1] = np.sum((y_true == c1) & (y_pred == c1)) # exit gracefully # return cm def pca_1d_classify(z_train, y_train, z_eval, mode="lda", eps=DEF_EPS): """method: pca_1d_classify arguments: z_train: 1D projections (N,) y_train: labels (N,) z_eval: 1D projections (M,) mode: "lda" (shared var) or "qda" (per-class var) eps: numerical stability return: yhat: predicted labels (M,) """ # get unique classes # classes = np.unique(y_train) # class labels # c0, c1 = int(classes[0]), int(classes[1]) # separate projected data by class # z0 = z_train[y_train == c0] z1 = z_train[y_train == c1] # compute class means (mu0, mu1) mu0, mu1 = float(np.mean(z0)), float(np.mean(z1)) # compute class variances (v0, v1) with regularization # v0 = float(np.var(z0, ddof=1)) + eps v1 = float(np.var(z1, ddof=1)) + eps # compute class priors (p0, p1) # p0 = float(z0.size) / float(z_train.size) p1 = float(z1.size) / float(z_train.size) # if LDA, use pooled variance # if mode == "lda": # pooled variance # pooled = ((z0.size - 1) * v0 + (z1.size - 1) * v1) / float(z0.size + z1.size - 2) pooled += eps v0 = pooled v1 = pooled # calculate score using log Gaussian likelihood and log prior # s0 = -0.5 * np.log(2.0 * np.pi * v0) - 0.5 * ((z_eval - mu0) ** 2) / v0 + np.log(p0) s1 = -0.5 * np.log(2.0 * np.pi * v1) - 0.5 * ((z_eval - mu1) ** 2) / v1 + np.log(p1) # assign class based on larger score # yhat = np.where(s1 > s0, c1, c0).astype(int) # exit gracefully # return yhat def plot_demo(Xtr, ytr, Xev, yev, pca_mu, pc1, explained, lda_model, qda_model, pca_acc_lda, lda_acc, qda_acc, outfile): """method: plot_demo description: Side-by-side decision surface plots (Eval) for: (1) PCA (2) LDA (shared covariance) (3) QDA (class-specific covariance) """ # set plot limits, pad by 10% for aesthetics # Xall = np.vstack([Xtr, Xev]) pad_x = 0.10 * (np.max(Xall[:, 0]) - np.min(Xall[:, 0]) + 1e-12) pad_y = 0.10 * (np.max(Xall[:, 1]) - np.min(Xall[:, 1]) + 1e-12) xmin, xmax = np.min(Xall[:, 0]) - pad_x, np.max(Xall[:, 0]) + pad_x ymin, ymax = np.min(Xall[:, 1]) - pad_y, np.max(Xall[:, 1]) + pad_y # get the classes # classes = np.unique(ytr) c0, c1 = int(classes[0]), int(classes[1]) # point colors # pt0 = "tab:blue" pt1 = "tab:orange" # region colors # region0 = "#cfe8f3" # light blue region1 = "#f9d6b5" # light orange # create grid # gx = np.linspace(xmin, xmax, DEF_GRID_N) gy = np.linspace(ymin, ymax, DEF_GRID_N) XX, YY = np.meshgrid(gx, gy) grid = np.stack([XX.ravel(), YY.ravel()], axis=1) # split training points # T0 = Xtr[ytr == c0] T1 = Xtr[ytr == c1] # split eval points # E0 = Xev[yev == c0] E1 = Xev[yev == c1] # common scatter style # scat_kw = dict(s=4, alpha=0.8, linewidths=0) # compute PCA decision surface # pc1u = pc1 / (np.linalg.norm(pc1) + 1e-12) # project training data onto first PC # ztr = (Xtr - pca_mu) @ pc1u z0 = ztr[ytr == c0] z1 = ztr[ytr == c1] # compute class statistics (mean and variance) # mu0 = float(np.mean(z0)) mu1 = float(np.mean(z1)) v0 = float(np.var(z0, ddof=1)) + DEF_EPS v1 = float(np.var(z1, ddof=1)) + DEF_EPS # compute class priors # p0 = float(z0.size) / float(ztr.size) p1 = float(z1.size) / float(ztr.size) # shared variance # v = ((z0.size - 1) * v0 + (z1.size - 1) * v1) / float(z0.size + z1.size - 2) v = float(v) + DEF_EPS # project grid points onto first PC # zg = (grid - pca_mu) @ pc1u # log p(z|class) + log prior # lp0 = -0.5 * np.log(2.0 * np.pi * v) - 0.5 * ((zg - mu0) ** 2) / v + np.log(p0) lp1 = -0.5 * np.log(2.0 * np.pi * v) - 0.5 * ((zg - mu1) ** 2) / v + np.log(p1) # compute posteriors using log-sum-exp for numerical stability # lse = np.logaddexp(lp0, lp1) pca_post1 = np.exp(lp1 - lse).reshape(YY.shape) # get LDA model parameters # invS = lda_model[MDL_KEY_INV_SIG] m0 = lda_model[MDL_KEY_MUS][c0] m1 = lda_model[MDL_KEY_MUS][c1] pr0 = lda_model[MDL_KEY_PRIORS][c0] pr1 = lda_model[MDL_KEY_PRIORS][c1] # compute LDA decision surface (posterior) # d0 = grid @ invS @ m0 - 0.5 * (m0 @ invS @ m0) + np.log(pr0) d1 = grid @ invS @ m1 - 0.5 * (m1 @ invS @ m1) + np.log(pr1) # compute posteriors using log-sum-exp for numerical stability # lse = np.logaddexp(d0, d1) lda_post1 = np.exp(d1 - lse).reshape(YY.shape) # get the QDA model parameters # qc0, qc1 = qda_model[MDL_KEY_CLS] q0 = qda_model[MDL_KEY_PARAMS][qc0] q1 = qda_model[MDL_KEY_PARAMS][qc1] # calculate qda scores on grid # s0 = qda_score(q0, grid) s1 = qda_score(q1, grid) # compute posteriors using log-sum-exp for numerical stability # lse = np.logaddexp(s0, s1) qda_post1 = np.exp(s1 - lse).reshape(YY.shape) # create subplots # fig, axes = plt.subplots(2, 3, figsize=(14, 8), sharex=True, sharey=True) fig.suptitle("Top: Train Set Bottom: Eval Set", y=1.02) def _panel(ax, post1, Xp, yp, subtitle): ax.contourf(XX, YY, post1, levels=[0.0, 0.5, 1.0], colors=[region0, region1], alpha=1.0) ax.contour(XX, YY, post1, levels=[0.5], colors="black", linewidths=2.5) P0 = Xp[yp == c0] P1 = Xp[yp == c1] ax.scatter(P0[:, 0], P0[:, 1], c=pt0, label="Class %d" % c0, **scat_kw) ax.scatter(P1[:, 0], P1[:, 1], c=pt1, label="Class %d" % c1, **scat_kw) ax.set_title(subtitle) ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) ax.grid(False) # top row: train # _panel(axes[0, 0], pca_post1, Xtr, ytr, MDL_PCA) _panel(axes[0, 1], lda_post1, Xtr, ytr, MDL_LDA) _panel(axes[0, 2], qda_post1, Xtr, ytr, MDL_QDA) # bottom row: eval # _panel(axes[1, 0], pca_post1, Xev, yev, MDL_PCA) _panel(axes[1, 1], lda_post1, Xev, yev, MDL_LDA) _panel(axes[1, 2], qda_post1, Xev, yev, MDL_QDA) # add x1 annotations # for ax in axes[1, :]: ax.set_xlabel("x1") # add x2 annotations # for ax in axes[:, 0]: ax.set_ylabel("x2") # add subplot titles with accuracies # handles, labels = axes[0, 0].get_legend_handles_labels() fig.legend(handles[:2], labels[:2]) plt.tight_layout() plt.savefig(outfile, dpi=200, bbox_inches="tight") print("\nVisualization saved to: %s" % outfile) # exit gracefully # return True # function: main # def main(): """method: main arguments: none return: status: True on success (bool) description: (1) Download set_13 train/eval CSV files. (2) Fit PCA (PC1), LDA, and QDA on training. (3) Evaluate on eval. (4) Plot PCA axis, LDA boundary, QDA boundary. """ print("Running: %s" % __FILE__) # download data # train_url = DEF_BASE_URL + DEF_TRAIN_FILE eval_url = DEF_BASE_URL + DEF_EVAL_FILE # local paths # train_path = os.path.join(DEF_DATA_DIR, DEF_TRAIN_FILE) eval_path = os.path.join(DEF_DATA_DIR, DEF_EVAL_FILE) # check downloads for train/eval # if not download_file(train_url, train_path): return False if not download_file(eval_url, eval_path): return False # load data # Xtr, ytr = load_set13_csv(train_path) Xev, yev = load_set13_csv(eval_path) print("\nLoaded:") print(" train: X=%s y=%s" % (str(Xtr.shape), str(ytr.shape))) print(" eval : X=%s y=%s" % (str(Xev.shape), str(yev.shape))) # PCA (unsupervised dimensionality reduction) # pca_mu, eigvals, eigvecs = pca_fit(Xtr) pc1 = eigvecs[:, 0] explained = float(eigvals[0] / np.sum(eigvals)) # show PCA classification # ztr = pca_project(Xtr, pca_mu, pc1 / np.linalg.norm(pc1)) zev = pca_project(Xev, pca_mu, pc1 / np.linalg.norm(pc1)) yhat_pca_train = pca_1d_classify(ztr, ytr, ztr, mode="lda", eps=DEF_EPS) pca_acc_train = float(np.mean(yhat_pca_train == ytr)) yhat_pca_lda_eval = pca_1d_classify(ztr, ytr, zev, mode="lda", eps=DEF_EPS) pca_acc_lda = float(np.mean(yhat_pca_lda_eval == yev)) # LDA / QDA (supervised classifiers) # train on full 2D data, evaluate on full 2D data, and report accuracies # lda_model = train_lda(Xtr, ytr, eps=DEF_EPS) qda_model = train_qda(Xtr, ytr, eps=DEF_EPS) yhat_lda_train = lda_predict(lda_model, Xtr) yhat_qda_train = qda_predict(qda_model, Xtr) yhat_lda_eval = lda_predict(lda_model, Xev) yhat_qda_eval = qda_predict(qda_model, Xev) lda_acc_train = float(np.mean(yhat_lda_train == ytr)) qda_acc_train = float(np.mean(yhat_qda_train == ytr)) lda_acc = float(np.mean(yhat_lda_eval == yev)) qda_acc = float(np.mean(yhat_qda_eval == yev)) # get confusion matrix for train/eval sets for each method # c0, c1 = lda_model[MDL_KEY_CLS] cm_pca_train = confusion_matrix(ytr, yhat_pca_train, classes=(c0, c1)) cm_lda_train = confusion_matrix(ytr, yhat_lda_train, classes=(c0, c1)) cm_qda_train = confusion_matrix(ytr, yhat_qda_train, classes=(c0, c1)) cm_lda_eval = confusion_matrix(yev, yhat_lda_eval, classes=(c0, c1)) cm_qda_eval = confusion_matrix(yev, yhat_qda_eval, classes=(c0, c1)) cm_pca_eval = confusion_matrix(yev, yhat_pca_lda_eval, classes=(c0, c1)) # calculate angle between PCA PC1 and LDA direction # w = lda_model[MDL_KEY_W] pc1u = pc1 / np.linalg.norm(pc1) wu = w / np.linalg.norm(w) cosang = abs(float(np.dot(pc1u, wu))) cosang = np.clip(cosang, -1.0, 1.0) angle_deg = float(np.degrees(np.arccos(cosang))) print("\nPCA:") print(" explained variance (PC1): %.2f" % explained) print(" angle between PC1 and LDA direction: %.2f deg\n" % angle_deg) print(" PCA->1D then LDA train error rate: %.2f%%\n" % (100*(1-pca_acc_train))) print(" PCA->1D then LDA confusion matrix (rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_pca_train[0, 0], cm_pca_train[0, 1])) print(" 1 {:4d} {:4d}".format(cm_pca_train[1, 0], cm_pca_train[1, 1])) print(" \n") print(" PCA->1D then LDA eval error rate: %.2f%%\n" % (100*(1-pca_acc_lda))) print(" PCA->1D then LDA confusion matrix (rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_pca_eval[0, 0], cm_pca_eval[0, 1])) print(" 1 {:4d} {:4d}".format(cm_pca_eval[1, 0], cm_pca_eval[1, 1])) print(f"{'-'*40}") print("\nLDA:") print(" train error rate: %.2f%%\n" % (100*(1-lda_acc_train))) print(" confusion matrix (train, rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_lda_train[0, 0], cm_lda_train[0, 1])) print(" 1 {:4d} {:4d}".format(cm_lda_train[1, 0], cm_lda_train[1, 1])) print(" \n") print(" eval error rate: %.2f%%\n" % (100*(1-lda_acc))) print(" confusion matrix (eval, rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_lda_eval[0, 0], cm_lda_eval[0, 1])) print(" 1 {:4d} {:4d}".format(cm_lda_eval[1, 0], cm_lda_eval[1, 1])) print(f"{'-'*40}") print("\nQDA:") print(" train error rate: %.2f%%\n" % (100*(1-qda_acc_train))) print(" confusion matrix (train, rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_qda_train[0, 0], cm_qda_train[0, 1])) print(" 1 {:4d} {:4d}".format(cm_qda_train[1, 0], cm_qda_train[1, 1])) print(" \n") print(" eval error rate: %.2f%%\n" % (100*(1-qda_acc))) print(" confusion matrix (eval, rows=true, cols=pred):") print(" pred 0 1") print("true 0 {:4d} {:4d}".format(cm_qda_eval[0, 0], cm_qda_eval[0, 1])) print(" 1 {:4d} {:4d}".format(cm_qda_eval[1, 0], cm_qda_eval[1, 1])) print(f"{'-'*40}") # plot results # plot_demo(Xtr, ytr, Xev, yev, pca_mu, pc1, explained, lda_model, qda_model, pca_acc_lda, lda_acc, qda_acc, DEF_OUT_PLOT) # exit gracefully # return True if __name__ == '__main__': main()