# Databricks notebook source import json import os from enum import Enum from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.sql.types import * from pyspark.dbutils import DBUtils # Create dataframe to load sql table data pipeline_df = (spark.read .format("jdbc") .option('driver','com.microsoft.sqlserver.jdbc.SQLServerDriver') .option("url", "jdbc:sqlserver://:;database=") .option("user", "") .option("password", "") .option("dbtable", "") .load() ) # Load the data to a databricks delta table which doesn't exist in first time run pipeline_df.write.mode("overwrite").option("path", "").saveAsTable("") # Command to view loaded data %sql select * from