Hive hplsql stored procedure cannot insert timestamp values - hive

I try to use Hive HPL/SQL stored procedure's insert data into hive table. But it is unable to insert timestamp value. My table defined as following:
col_name | data_type | comment |
+----------------+---------------+----------+
| id | bigint | |
| clean_batchid | bigint | |
| job_name | varchar(50) | |
| table_name | varchar(30) | |
| begin_time | timestamp | |
| end_time | timestamp | |
| clean_type | varchar(20) | |
| total_count | bigint | |
| clean_count | bigint | |
| description | varchar(500) |
I use the hive cli to insert value without any problem.
insert into nbl_clean_batch
values(12345, 1, 'tryme','B', '2016-12-12 12:52:43', '2016-12-12 12:56:19', 0, 10, 5, 'bbb');
but when I use the hplsql to execute the following script:
START_DATE := SYSDATE;
END_DATE := SYSDATE;
INSERT INTO TABLE NBL_CLEAN_BATCH
(
ID, CLEAN_BATCHID, JOB_NAME, TABLE_NAME, BEGIN_TIME, END_TIME, CLEAN_TYPE, TOTAL_COUNT, CLEAN_COUNT, DESCRIPTION
)
VALUES (
23456, 1, 'bbb', 'B', START_DATE, END_DATE, 0, 9, 2, 'line'
);
it throws the following exception:
Unhandled exception in HPL/SQL
org.apache.hive.service.cli.HiveSQLException: Error while compiling statement: FAILED: ParseException line 2:27 mismatched input '-' expecting ) near '2016' in value row constructor
at org.apache.hive.jdbc.Utils.verifySuccess(Utils.java:267)
at org.apache.hive.jdbc.Utils.verifySuccessWithInfo(Utils.java:253)
at org.apache.hive.jdbc.HiveStatement.runAsyncOnServer(HiveStatement.java:310)
at org.apache.hive.jdbc.HiveStatement.execute(HiveStatement.java:250)
at org.apache.hive.hplsql.Conn.executeSql(Conn.java:110)
at org.apache.hive.hplsql.Exec.executeSql(Exec.java:592)
at org.apache.hive.hplsql.Stmt.insertValues(Stmt.java:800)
at org.apache.hive.hplsql.Stmt.insert(Stmt.java:713)
at org.apache.hive.hplsql.Exec.visitInsert_stmt(Exec.java:1116)
at org.apache.hive.hplsql.Exec.visitInsert_stmt(Exec.java:52)
at org.apache.hive.hplsql.HplsqlParser$Insert_stmtContext.accept(HplsqlParser.java:10330)
at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visitChildren(AbstractParseTreeVisitor.java:70)
at org.apache.hive.hplsql.Exec.visitStmt(Exec.java:1009)
at org.apache.hive.hplsql.Exec.visitStmt(Exec.java:52)
at org.apache.hive.hplsql.HplsqlParser$StmtContext.accept(HplsqlParser.java:1015)
at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visitChildren(AbstractParseTreeVisitor.java:70)
at org.apache.hive.hplsql.HplsqlBaseVisitor.visitBlock(HplsqlBaseVisitor.java:28)
at org.apache.hive.hplsql.HplsqlParser$BlockContext.accept(HplsqlParser.java:449)
at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visitChildren(AbstractParseTreeVisitor.java:70)
at org.apache.hive.hplsql.Exec.visitProgram(Exec.java:916)
at org.apache.hive.hplsql.Exec.visitProgram(Exec.java:52)
at org.apache.hive.hplsql.HplsqlParser$ProgramContext.accept(HplsqlParser.java:392)
at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:42)
at org.apache.hive.hplsql.Exec.run(Exec.java:771)
at org.apache.hive.hplsql.Exec.run(Exec.java:747)
at org.apache.hive.hplsql.Hplsql.main(Hplsql.java:23)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Caused by: org.apache.hive.service.cli.HiveSQLException: Error while compiling statement: FAILED: ParseException line 2:27 mismatched input '-' expecting ) near '2016' in value row constructor
at org.apache.hive.service.cli.operation.Operation.toSQLException(Operation.java:394)
at org.apache.hive.service.cli.operation.SQLOperation.prepare(SQLOperation.java:199)
at org.apache.hive.service.cli.operation.SQLOperation.runInternal(SQLOperation.java:282)
at org.apache.hive.service.cli.operation.Operation.run(Operation.java:334)
at org.apache.hive.service.cli.session.HiveSessionImpl.executeStatementInternal(HiveSessionImpl.java:505)
at org.apache.hive.service.cli.session.HiveSessionImpl.executeStatementAsync(HiveSessionImpl.java:492)
at sun.reflect.GeneratedMethodAccessor43.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:78)
at org.apache.hive.service.cli.session.HiveSessionProxy.access$000(HiveSessionProxy.java:36)
at org.apache.hive.service.cli.session.HiveSessionProxy$1.run(HiveSessionProxy.java:63)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.hive.service.cli.session.HiveSessionProxy.invoke(HiveSessionProxy.java:59)
at com.sun.proxy.$Proxy38.executeStatementAsync(Unknown Source)
at org.apache.hive.service.cli.CLIService.executeStatementAsync(CLIService.java:297)
at org.apache.hive.service.cli.thrift.ThriftCLIService.ExecuteStatement(ThriftCLIService.java:506)
at org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement.getResult(TCLIService.java:1437)
at org.apache.hive.service.rpc.thrift.TCLIService$Processor$ExecuteStatement.getResult(TCLIService.java:1422)
at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:56)
at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.parse.ParseException:line 2:27 mismatched input '-' expecting ) near '2016' in value row constructor
at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:207)
at org.apache.hadoop.hive.ql.parse.ParseDriver.parse(ParseDriver.java:166)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:465)
at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1301)
at org.apache.hadoop.hive.ql.Driver.compileAndRespond(Driver.java:1279)
at org.apache.hive.service.cli.operation.SQLOperation.prepare(SQLOperation.java:197)
Any idea why?
Thanks,

I have used this format before ...
hive> set CURRENT_DATE=FROM_UNIXTIME(UNIX_TIMESTAMP());
hive> select ${hiveconf:CURRENT_DATE}, * from tab1;
OK
2016-12-14 13:43:13 1 100
2016-12-14 13:43:13 2 235325
2016-12-14 13:43:13 3 3242
2016-12-14 13:43:13 4 32543
Time taken: 1.377 seconds, Fetched: 4 row(s)
hive> set CURRENT_TIMESTAMP = CURRENT_TIMESTAMP;
hive> select ${hiveconf:CURRENT_TIMESTAMP}, * from tab1;
OK
2016-12-14 13:43:35.425 1 100
2016-12-14 13:43:35.425 2 235325
2016-12-14 13:43:35.425 3 3242
2016-12-14 13:43:35.425 4 32543
Time taken: 1.32 seconds, Fetched: 4 row(s)
hive>

Related

01722. 00000 - "invalid number" : Oracle

I have a table1 with
t1id NUMBER(10,0)
channel_id NUMBER(10,0)
and another table2 with columns
t1id NUMBER(10,0)
channel2_id NVARCHAR2(100 CHAR)
cl2_id NVARCHAR2(100 CHAR)
Having called the query
SELECT t1.id, t2.cl2_id
FROM table1 t1
LEFT JOIN table2 t2
ON t1.channel_id = c.channel2_id;
I recieve the below error while joining the query.? Is it due to the data type of both the columns? how to resolve this
01722. 00000 - "invalid number"
*Cause: The specified number was invalid.
*Action: Specify a valid number.
If you have a datatype mismatch, we will (silently) try to correct that mismatch, eg
SQL> create table t ( num_col_stored_as_string varchar2(10));
Table created.
SQL>
SQL> insert into t values ('123');
1 row created.
SQL> insert into t values ('456');
1 row created.
SQL> insert into t values ('789');
1 row created.
SQL>
SQL> explain plan for
2 select * from t
3 where num_col_stored_as_string = 456;
Explained.
SQL>
SQL> select * from dbms_xplan.display();
PLAN_TABLE_OUTPUT
---------------------------------------------------------------------------
Plan hash value: 1601196873
--------------------------------------------------------------------------
| Id | Operation | Name | Rows | Bytes | Cost (%CPU)| Time |
--------------------------------------------------------------------------
| 0 | SELECT STATEMENT | | 1 | 7 | 3 (0)| 00:00:01 |
|* 1 | TABLE ACCESS FULL| T | 1 | 7 | 3 (0)| 00:00:01 |
--------------------------------------------------------------------------
Predicate Information (identified by operation id):
---------------------------------------------------
1 - filter(TO_NUMBER("NUM_COL_STORED_AS_STRING")=456)
Notice that we silently added a TO_NUMBER to the filter to ensure that the column matched with the input value (456).
It then becomes obvious as to why this can cause problems, because:
SQL> insert into t values ('NOT A NUM');
1 row created.
SQL> select * from t
2 where num_col_stored_as_string = 456;
ERROR:
ORA-01722: invalid number
As others have suggested in comments, look at
getting your datatypes aligned
use TO_CHAR
use VALIDATE_CONVERSION
but ideally, data type alignment is the way to go

Running multiple inserts into an Oracle table, how can I commit after each insert and restart the stored procedure at the last inserted point?

I'm very new to Oracle and am writing my first stored procedure for a side project. Essentially I have one table for intraday data, and another table to store historical data. I need to insert chunks of the intraday table into the history table, commit those inserts, and restart the stored procedure at the first uninserted point in the case of failure.
Here is what I have so far:
CREATE OR REPLACE PROCEDURE test_proc (p_array_size IN PLS_INTEGER DEFAULT 5000)
IS
TYPE ARRAY IS TABLE OF z_intraday%ROWTYPE;
l_data ARRAY;
CURSOR c IS SELECT *
FROM "intraday";
BEGIN
OPEN c;
LOOP
FETCH c BULK COLLECT INTO l_data LIMIT p_array_size;
FORALL i IN 1..l_data.COUNT
INSERT INTO history
VALUES l_data(i);
EXIT WHEN c%notfound
END LOOP;
COMMIT;
EXCEPTION WHEN OTHERS THEN
ROLLBACK;
RAISE;
CLOSE c;
END test_proc;
So I only commit after the loop has finished. How can I refactor so that each insert operation in the loop commits, then if there is a failure, roll back to the previous batch of records that failed and run the procedure again? Sorry I know this is a heavy question, but any guidance would be greatly appreciated.
Use set-based operations wherever possible, not row-by-row operations. A single "insert as select" or "merge" statement with filter would run faster by several orders of magnitude than the row-by-slow construct you have created. Also, committing after every individual row will kill your performance for the entire database instance, not just this procedure, as it forces checkpoints in the redo logs.
insert into history (col1, col2, col3)
as select col1, col2, col3 from intraday d
where d.id not in (select id from history);
commit;
or
merge into history h
using intraday d
on (h.id = d.id)
when not matched then
insert (h.id, h.col2, h.col3) values (d.id, d.col2, d.col3);
commit;
You don't need a complicated procedure, you can use INSERT INTO ... LOG ERRORS INTO ... to capture the errors and then all the errors can be put into one table and the valid rows will all be successfully inserted (continuing on from each error, if you specify REJECT LIMIT UNLIMITED).
If you have the tables:
CREATE TABLE "intraday" (
a INT PRIMARY KEY,
b DATE,
c TIMESTAMP,
d VARCHAR2(30),
e VARCHAR2(10)
);
CREATE TABLE history (
a INT,
b DATE,
c TIMESTAMP NOT NULL,
d VARCHAR2(30),
e DATE
);
INSERT INTO "intraday"
SELECT 1, DATE '2020-01-01', TIMESTAMP '2020-01-01 00:00:00', 'valid', '2020-01-01' FROM DUAL UNION ALL
SELECT 2, DATE '2020-01-02', NULL, 'timestamp null', '2020-01-01' FROM DUAL UNION ALL
SELECT 3, DATE '2020-01-03', TIMESTAMP '2020-01-03 00:00:00', 'implicit date cast fails', '2020-01-XX' FROM DUAL UNION ALL
SELECT 4, DATE '2020-01-04', TIMESTAMP '2020-01-04 00:00:00', 'valid', '2020-01-04' FROM DUAL;
ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD';
Then you can create a table to put the errors using:
BEGIN
DBMS_ERRLOG.CREATE_ERROR_LOG (
dml_table_name => 'HISTORY',
err_log_table_name => 'HISTORY_ERRORS'
);
END;
/
Then you can run the SQL statement:
INSERT /*+ APPEND */ INTO history
SELECT * FROM "intraday"
LOG ERRORS INTO history_errors ('INSERT APPEND') REJECT LIMIT UNLIMITED;
Then the history table will contain:
SELECT * FROM history;
A | B | C | D | E
-: | :--------- | :------------------------ | :---- | :---------
1 | 2020-01-01 | 01-JAN-20 00.00.00.000000 | valid | 2020-01-01
4 | 2020-01-04 | 04-JAN-20 00.00.00.000000 | valid | 2020-01-04
And the errors will be:
SELECT * FROM history_errors;
ORA_ERR_NUMBER$ | ORA_ERR_MESG$ | ORA_ERR_ROWID$ | ORA_ERR_OPTYP$ | ORA_ERR_TAG$ | A | B | C | D | E
--------------: | :----------------------------------------------------------------------------------- | :------------- | :------------- | :------------ | :- | :--------- | :--------------------------- | :----------------------- | :---------
1400 | ORA-01400: cannot insert NULL into ("FIDDLE_HSUKHKSUNFGTKKAMLHOA"."HISTORY"."C")<br> | null | I | INSERT APPEND | 2 | 2020-01-02 | null | timestamp null | 2020-01-01
1858 | ORA-01858: a non-numeric character was found where a numeric was expected<br> | null | I | INSERT APPEND | 3 | 2020-01-03 | 03-JAN-20 00.00.00.000000000 | implicit date cast fails | 2020-01-XX
db<>fiddle here

Hive: Get the rows from a table by mapping rows from another table having no common columns

I have two tables.
Table 1:
timestamp
Table 2:
timestamp_lower_bound,
timestamp_upper_bound
I want to select rows from Table 1 if the timestamp is in between the lower bound and upper bound in Table 2 in hive.
I tried the following code.
SELECT *
FROM Table1
where from_unixtime(unix_timestamp(timestamp, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm')
BETWEEN (select from_unixtime(unix_timestamp(timestamp_lower_bound, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm') from Table2)
AND (select from_unixtime(unix_timestamp(timestamp_upper_bound, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm') from Table2)
And it give the error below.
DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1
I also followed in the instructions here on stackoverflow and here is the code. It also failed due to vertex failure.
SELECT a.*
FROM Table1 AS a
WHERE EXISTS
(SELECT 1
FROM Table2 AS b
WHERE a.timestamp BETWEEN from_unixtime(unix_timestamp(b.timestamp_lb, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm')
AND from_unixtime(unix_timestamp(b.timestamp_ub, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm') )
Any idea on how to do this? Not necessary to do it in Hive, Spark also works. Any idea?
EIDT
Here is a toy example.
Suppose in Table 1 timestamp, I have the following.
|---------------------|
| Timestamp |
|---------------------|
| 2018-01-15 17:56 |
|---------------------|
| 2019-04-29 08:43 |
|---------------------|
| 2018-10-23 23:43 |
|---------------------|
| 2018-08-21 04:54 |
|---------------------|
| 2019-12-06 14:09 |
|---------------------|
In Table 2, I have the following upper bounds and lower bounds.
|----------------------|------------------------|
|timestamp_lower_bound |timestamp_upper_bound |
|----------------------|------------------------|
| 2018-04-15 12:37 | 2018-04-15 12:57 |
|----------------------|------------------------|
| 2018-10-23 23:38 | 2018-10-23 23:58 |
|----------------------|------------------------|
| 2018-08-21 04:50 | 2018-08-21 05:10 |
|----------------------|------------------------|
In this case, only the third and fourth record in Table 1 should be selected. How shall I achieve that?
As per the above example, It clearly indicate that 'Table2' contain same data in both the column(timestamp_lower_bound and timestamp_upper_bound) i.e Date, only 'HH:MM' can be changed or only 'MM' can be changed.
its not necessary to scan both the column if we scan only one column it will suffice.
For e.g:
create table table1
(TTimestamp string);
insert into table1 values('2018-01-15 17:56');
insert into table1 values('2019-04-29 08:43');
insert into table1 values('2018-10-23 23:43');
insert into table1 values('2018-08-21 04:54');
insert into table1 values('2019-12-06 14:09');
create table table2
(timestamp_lower_bound string,
timestamp_upper_bound string);
insert into table2 values('2018-04-15 12:37','2018-04-15 12:57');
insert into table2 values('2018-10-23 23:43','2018-10-23 23:58');
insert into table2 values('2018-08-21 04:54','2018-08-21 05:10');
select TTimestamp from table1 where TTimestamp in(select timestamp_lower_bound from table2);
TTimestamp
2018-08-21 04:54
2018-10-23 23:43
Here, I have taken datatype as a string and inserted the record as it is.In your case you can use below query.
select TTimestamp from table1 where from_unixtime(unix_timestamp(timestamp, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm') in(select from_unixtime(unix_timestamp(timestamp_lower_bound, 'yyyy-MM-dd HH:mm:ss.SSS'),'yyyy-MM-dd HH:mm') from table2);

The difference between CASE and UPIVOT to find the max date across the columns; ORA-00904 wrong message

I have millions of IDs and I need to find the max date from 3 different dates for each ID.
Then, I need the start date of the month of the max date.
Here's a reference:
+---------+-----------+---------------+--------------------+
| ID | SETUP_DT | REINSTATE_DT | LOCAL_REINSTATE_DT |
+---------+-----------+---------------+--------------------+
| C111111 | 2018/1/1 | Null | Null |
| C111112 | 2015/12/9 | 2018/10/25 | 2018/10/25 |
| C111113 | 2018/10/1 | Null | Null |
| C111114 | 2018/10/6 | 2018/12/14 | 2018/12/14 |
+---------+-----------+---------------+--------------------+
And what I want is below:
+---------+-----------+
| ID | APP_MON |
+---------+-----------+
| C111111 | 2018/1/1 |
| C111112 | 2018/10/1 |
| C111113 | 2018/10/1 |
| C111114 | 2018/12/1 |
+---------+-----------+
I try different code to get the result.
When I used case and unpivot to find some specific IDs, the result looks all fine.
/* case */
SELECT DIST_ID as ID,
trunc(
case
when REINSTATE_DT is not null and LOCAL_REINSTATE_DT is not null then greatest(LOCAL_REINSTATE_DT, REINSTATE_DT)
when REINSTATE_DT is null and LOCAL_REINSTATE_DT is not null then LOCAL_REINSTATE_DT
when REINSTATE_DT is not null and LOCAL_REINSTATE_DT is null then REINSTATE_DT
else SETUP_DT
end, 'MM') AS CN_APP_MON
FROM DISTRIBUTOR
where DIST_ID in ('CN111111','CN111112','CN111113','CN111114');
/* unpivot */
SELECT DIST_ID as ID,
trunc(MAX(Date_value),'MM') AS CN_APP_MON
FROM DISTRIBUTOR
UNPIVOT (Date_value FOR Date_type IN (SETUP_DT, REINSTATE_DT, LOCAL_REINSTATE_DT))
where DIST_ID in ('CN111111','CN111112','CN111113','CN111114')
GROUP BY DIST_ID;
However, when I change the condition and tried to use the date period to pull out the data, the result is weird.
To be more specific, I tried to replace
where DIST_ID in ('CN111111','CN111112','CN111113','CN111114')` <br>
by
where REINSTATE_DT
between TO_DATE('2018/01/01','yyyy/mm/dd') and TO_DATE('2018/01/02','yyyy/mm/dd')`
But the unpivot function was not work. It showed:
ORA-00904: "REINSTATE_DT": invalid identifier
00904. 00000 - "%s: invalid identifier"
I want to know:
Which method is more efficient, or what else more efficient way to do that?
Why the unpivot method didn't work? What difference is between the 2 methods?
Thank you so much!
Assuming your dates are stored as dates, you can do this using greatest(). I'm not a fan of "magic" values in queries, so I like coalesce() for this purpose.
All your rows seem to have a setup_dt it can be used as a "default" using coalesce(). So:
select id,
trunc(greatest(setup_dt,
coalesce(reinstate_dt, setup_dt,
coalesce(local_reinstate_dt, setup_dt)
),
'mm') as app_mon
from distributor;
You don't need such daunting tasks, greatest with nvl function resolves your problem.
with distributor( ID, setup_dt, reinstate_dt, local_reinstate_dt ) as
(
select 'C111111',date'2018-01-01', Null, Null from dual union all
select 'C111112',date'2015-12-09',date'2018-10-25',date'2018-10-25' from dual union all
select 'C111113',date'2018-10-01',Null,Null from dual union all
select 'C111114',date'2018-10-06',date'2018-12-14',date'2018-12-14' from dual
)
select id, trunc(greatest(nvl(setup_dt,date'1900-01-01'),
nvl(reinstate_dt,date'1900-01-01'),
nvl(local_reinstate_dt,date'1900-01-01')),'mm')
as app_mon
from distributor;
ID APP_MON
------- ----------
C111111 01.01.2018
C111112 01.10.2018
C111113 01.10.2018
C111114 01.12.2018
Rextester Demo
P.S.: Using SETUP_DT, REINSTATE_DT or LOCAL_REINSTATE_DT columns can not be allowed In your query's where clause, because they are converted to Date_type in the unpivot part.

Sorting off of a formatted date

I currently have a table set up which reports history data. I have
SELECT ACTIONTYPE,
BINNUM,
DSID,
LOCATIONNAME,
LOCATIONTYPE,
ORDNO,
ORIGREC,
convert(varchar(10),TIMEOFACTION, 101) +
right(convert(varchar(32),TIMEOFACTION,100),8) as TIMEOFACTION,
TOTALLIFE
FROM DLOCATIONHISTORY
ORDER BY TIMEOFACTION DESC
I have edited the TIMEOFACTION column so that it displays the date-formatted field as mm/dd/yyyy hh:mmAM/PM. However, the program that is referencing my query, is placing AM before PM because A comes before P. What is the best way to resolve my query to prevent this from happening? Is there a different ordering technique I could use?
My current reporting query shows:
**TIMEOFACTION**
12/13/2017 7:29AM
12/12/2017 10:07AM
12/12/2017 9:58AM
12/12/2017 1:51PM
12/12/2017 2:02PM
12/11/2017 11:01AM
When it should show:
**TIMEOFACTION**
12/13/2017 7:29AM
12/12/2017 2:02PM
12/12/2017 1:51PM
12/12/2017 10:07AM
12/12/2017 9:58AM
12/11/2017 11:01AM
LOL, just full reference column with table name or table alias:
SELECT ACTIONTYPE,
BINNUM,
DSID,
LOCATIONNAME,
LOCATIONTYPE,
ORDNO,
ORIGREC,
convert(varchar(10),TIMEOFACTION, 101) +
right(convert(varchar(32),TIMEOFACTION,100),8) as TIMEOFACTION,
TOTALLIFE
FROM DLOCATIONHISTORY
ORDER BY DLOCATIONHISTORY.TIMEOFACTION DESC --<-- here!
Simplified sample
MS SQL Server 2014 Schema Setup:
create table t ( i int, a char(1) );
insert into t values
(1,'a'),
(2,'b'),
(3,'c');
Query 1:
select -1*i as i, a
from t
order by t.i
Results:
| i | a |
|----|---|
| -1 | a |
| -2 | b |
| -3 | c |
Query 2:
select -1*i as i, a
from t
order by i
Results:
| i | a |
|----|---|
| -3 | c |
| -2 | b |
| -1 | a |
You can use a derived table with a column alias, then rename back to the original column name. For example:
DECLARE #table TABLE(TIMEOFACTION datetime)
INSERT INTO #table VALUES
('2017-12-13 07:29:00')
,('2017-12-12 10:07:00')
,('2017-12-12 09:58:00')
,('2017-12-12 13:51:00')
,('2017-12-12 14:02:00')
,('2017-12-11 11:01:00')
SELECT convert(varchar(10),dT.TIMEOFACTION2, 101)
+ right(convert(varchar(32),TIMEOFACTION2,100),8) as TIMEOFACTION
FROM (
SELECT TIMEOFACTION AS TIMEOFACTION2
FROM #table
) AS dT
ORDER BY TIMEOFACTION2 DESC
Produces:
TIMEOFACTION
12/13/2017 7:29AM
12/12/2017 2:02PM
12/12/2017 1:51PM
12/12/2017 10:07AM
12/12/2017 9:58AM
12/11/2017 11:01AM