(1) Create the database on mysql before run the spark jdbc job
	mysql -u hadoop -p
	create database foodmart_spark

(2) Create the database on hive before run the spark jdbc job
	create database foodmart_spark
	
(3) The following steps should be executed on the command line before launch Nifi Jobs
	
	hadoop fs -mkdir /root/data/foodmart_nifi
	hadoop fs -mkdir /root/data/foodmart_nifi/product
	hadoop fs -mkdir /root/data/foodmart_nifi/sales
	hadoop fs -chmod 777 -R /root
	
(4) Login mysql on the command line
	mysql -u hadoop -p

(5) Run the following step to create new table
	use foodmart_spark;
	CREATE TABLE IF NOT EXISTS demo_products (
         productID    INT UNSIGNED  NOT NULL AUTO_INCREMENT,
         productCode  CHAR(3)       NOT NULL DEFAULT '',
         name         VARCHAR(30)   NOT NULL DEFAULT '',
         quantity     INT UNSIGNED  NOT NULL DEFAULT 0,
         price        DECIMAL(7,2)  NOT NULL DEFAULT 99999.99,
         PRIMARY KEY  (productID)
       );	
	INSERT INTO demo_products VALUES (1001, 'PEN', 'Pen Red', 5000, 1.23);
	INSERT INTO demo_products VALUES (NULL, 'PEN', 'Pen Blue',  8000, 1.25), (NULL, 'PEN', 'Pen Black', 2000, 1.25);
	INSERT INTO demo_products (productCode, name, quantity, price) VALUES ('PEC', 'Pencil 2B', 10000, 0.48), ('PEC', 'Pencil 2H', 8000, 0.49);
	