# Databricks notebook source
import json
import os
from enum import Enum
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.dbutils import DBUtils

# Create dataframe to load sql table data

pipeline_df = (spark.read
  .format("jdbc")
  .option('driver','com.microsoft.sqlserver.jdbc.SQLServerDriver')
  .option("url", "jdbc:sqlserver://<Host>:<port>;database=<databasename>")
  .option("user", "<username>")
  .option("password", "<password>")
  .option("dbtable", "<schema.tablename>")
  .load()
)

# Load the data to a databricks delta table which doesn't exist in first time run

pipeline_df.write.mode("overwrite").option("path", "<mount_path>").saveAsTable("<databricks_table_name>")

# Command to view loaded data

%sql
select * from <databricks_table_name>