BACK

0
Posted by albydarned 17 days ago

	from shared.Util.Format import DecimalFtToFtInValue
	def print_data(data):
		print(data.getColumnNames())
		for row in range(data.getRowCount()):
			row_val = ""
			for col in range(data.getColumnCount()):
				row_val = row_val + str(data.getValueAt(row, col)) + "	"
			print(row_val)
	
	def get_value(dataset, row, tag_path):
		if tag_path:
			return dataset.getValueAt(row, tag_path.split("]")[1])
		else:
			return None
	
	
	DEBUG = True
	
	if DEBUG:
		#data["Levels"] = system.dataset.toDataSet(["LevelID"], [[454],[455],[456],[457],[458],[459],[450],[451],[452],[453]])
		lvls = [4259,4260,7430,4755,4756,5214,5215,4740,4271,4706,4707,4732,4733,4734,4735,5804,5805,4786,4787,4759,4760,4741,4742,4724,4725,4778,4779,4782,4783,5209,5210,4784,4785,4726,4727,4730,4731,4709,4710,4711,4712,4728,4729,5838,5839,4788,4789,4751,4752,4753,4754,4736,4737,4738,4739,4757,4758,4713,4714,4715,4716,4722,4723,4790,4791,4792,4793,4220,4221,4222,6643,6644,6645,7130,7131,7132,7136,7137,7178,7179]
		rows = []
		for l in lvls:
			rows.append([l])
		data["Levels"] = system.dataset.toDataSet(["LevelID"], rows)
		
	
	# Use run at parameter to support running at point in time
	try:
		runAt = long(data["RunAt"])
	except:
		runAt = None
		
		
	startDate = data["StartDate"]
	endDate = data["EndDate"]
	use_range = False
	relativeValue = data["RelativeValue"]
	relativeUnits = data["RelativeUnits"]
	
	
	# Use now
	if runAt == -1:
		now = system.date.now()
			
	# Specific date
	elif(runAt is not None):
		now = system.date.fromMillis(runAt)
	
	
	# Range was chosen
	else:
		# Use range
		use_range = True
		if startDate is not None and endDate is not None:
			startDate = system.date.parse(data["StartDate"], "yyyy-MM-dd")
			endDate = system.date.parse(data["EndDate"], "yyyy-MM-dd")
			now = endDate
			
		elif(relativeValue is not None and relativeUnits is not None):
			endDate = system.date.now()
			if(relativeUnits == "Days"):
				startDate = system.date.addDays(endDate, -1 * relativeValue)
			elif(relativeUnits == "Minutes"):
				startDate = system.date.addMinutes(endDate, -1 * relativeValue)
			elif(relativeUnits == "Hours"):
				startDate = system.date.addHours(endDate, -1 * relativeValue)
			else:
				startDate = system.date.addDays(endDate, -1)
			now = endDate

		
		
		
	timestamp, tz = shared.Util.Reports.format_timestamp(now, data["TimeZoneID"])
	data["Now"] = timestamp
	data["Timezone"] = tz
	
	# Get the level objects
	column = 'LevelID' # match the level ID column name in parameter Levels
	if data['Levels'] is None:
		header = [column]
		rows = []
		data['Levels'] = system.dataset.toDataSet(header, rows)
	
	
	text = '' # build the placeholder string for the database query to select the specified level objects
	for row in range(data['Levels'].getRowCount()):
		suffix = str(data['Levels'].getValueAt(row, column))
		if len(text) < 1:
			text += '(' + suffix
		else:
			text += ',' + suffix
	text += ')'
	
	# Get levels from DB
	query = """
	SELECT
		l.SiteID, l.LevelName, l.TopLevel, l.BottomLevel, l.TopVolume, l.BottomVolume, l.Temperature,
		s.SiteName, s.UseCompanyContractHour, s.ContractHour AS 'SiteContractHour', 
		c.ContractHour AS 'CompanyContractHour'
	FROM
		Level AS l JOIN Site AS s ON
		l.SiteID = s.SiteID JOIN Company AS c ON
		s.CompanyID = c.CompanyID
	WHERE
		l.LevelID IN %s
	ORDER BY 
		s.SiteID;
	""" % text
	
	levelsRaw = system.db.runQuery(query)
	levelsRaw = shared.Util.Dataset.to_dict(levelsRaw)
	
	levelGroups = {}
	
	# Initialize flow and bad flow lists
	levels = []
	badLevels = []
	
	# Group flows by contract hour for processing
	levelGroups = {}
	
	# Cache site time zones
	siteTimeZones = {}
	
	# Get Time Zone and contract hour data for each flow
	# and group each flow into contract hour groups
	for i, value in enumerate(levelsRaw):
		level = levelsRaw[i]
		levels.append(level)
				
	for level in levels:
		siteId = level['SiteID']
		try:
			siteTimeZone = siteTimeZones[siteId]
		except:
			try:
				siteTimeZone = shared.Admin.Site.Site.get_site_timezone(siteId)
				siteTimeZones[siteId] = siteTimeZone
			except:
				siteTimeZones[siteId] = None
			
		companyContractHour = level['UseCompanyContractHour']
		if companyContractHour:
			contractHour = level['CompanyContractHour']
		else:
			contractHour = level['SiteContractHour']
			
		level['contractHourLocal'] = contractHour
		
		try:
			utcContractHour = shared.Util.ContractHour.to_utc(contractHour, siteTimeZone)
		except Exception, e:
			utcContractHour = -1
		
		groupHour = shared.Util.ContractHour.to_local(utcContractHour, now)
		if groupHour not in levelGroups:
			levelGroups[groupHour] = []
			
		levelGroups[groupHour].append(level)
	
	# Define dataset variables
	header = ['Date', 'Site', 'Level', 'Top Level', 'Bottom Level', 'Top Volume', 'Bottom Volume', 'Temperature']
	rows = []
	recent_rows = []

	for hour in levelGroups.keys():
		lvls = levelGroups[hour]
		
		# Get the end time using the shared script function
		# Since we just want the last value, we can just go back
		# One hour in time. This should reduce historical load
		end = shared.Util.ContractHour.get_end_time(hour, now)
		
		# If the current time isn't pass the contract, set the end to the previous day
		if(not system.date.isAfter(now, end)):
			end = system.date.addDays(end, -1)
		
		if(use_range):
			startDate = system.date.setTime(startDate, groupHour, 0, 0)
			
		else:
			# Get the start of the month
			currentDay = system.date.getDayOfMonth(end)
						
			# Default the month start date to the first day of the current month, at the contract hour
			startDate = system.date.setTime(system.date.addDays(now, -(currentDay - 1)), groupHour, 0, 0)
		
		
		
		
		
		# Instead of a history query for each level
		# Do one history query for each value type
		# For each contract hour group. Very often there is
		# only 1 contract hour group
		
		# Build the paths. Only add them if they are truthy values
		paths = []
		for l in lvls:
			if l["TopLevel"]:
				paths.append(l["TopLevel"])
			
			if l["BottomLevel"]:
				paths.append(l["BottomLevel"])
				
			if l["TopVolume"]:
				paths.append(l["TopVolume"])
				
			if l["BottomVolume"]:
				paths.append(l["BottomVolume"])
				
			if l["Temperature"]:
				paths.append(l["Temperature"])
				
		# Eliminate duplicates
		paths = list(set(paths))
		
		
		# Get values with a column per tag and a row for each day
		# Use the LastValue aggregate and get the data in 24 hour intervals
		# This should give us the value at contract hour
		values = system.tag.queryTagHistory(paths, startDate, end, aggregationMode="LastValue", intervalHours=24, returnFormat="Wide", aliases=paths)
		
		# Loop through each level and build the row
		# This should be pretty fast, because the most 
		# number of rows in a dataset would be a 31 day month
		recent_date = None
		for row in range(values.getRowCount()):
			date = system.date.addDays(values.getValueAt(row, "t_stamp"),0)
			if(recent_date == None):
				recent_date = date
			else:
				if(system.date.isAfter(date, recent_date)):
					recent_date = date
		data["RecentDate"] = recent_date
		
		
		for l in lvls:
			for row in range(values.getRowCount()):
				date = system.date.addDays(values.getValueAt(row, "t_stamp"), 0)
				site = l["SiteName"]
				name = l["LevelName"]
				tl = str(DecimalFtToFtInValue(get_value(values, row, l["TopLevel"])))
				bl = str(DecimalFtToFtInValue(get_value(values, row, l["BottomLevel"])))
				tv = str(get_value(values, row, l["TopVolume"]))
				bv = str(get_value(values, row, l["BottomVolume"]))
				t = str(get_value(values, row, l["Temperature"]))
				
				rows.append([
					date,
					site,
					name,
					tl,
					bl,
					tv,
					bv,
					t
				])
				if(date == recent_date):
					recent_rows.append([
									date,
									site,
									name,
									tl,
									bl,
									tv,
									bv,
									t
								])
	if(use_range):
		data["LevelKey"] = system.dataset.toDataSet(header, rows)
	data["RecentLevelKey"] = system.dataset.toDataSet(header, recent_rows)