Hive: how to select from specfic sub-partiton - hive

For example, create a table in hive.
CREATE TABLE t_data_daily(
imp_date BIGINT,
sp STRING,
datax STRING
)
PARTITION BY LIST( imp_date )
SUBPARTITION BY LIST( sp )(
SUBPARTITION sp_1 VALUES IN ( 'sp_1' ),
SUBPARTITION sp_2 VALUES IN ( 'sp_2' ),
SUBPARTITION sp_3 VALUES IN ( 'sp_3' )
)
(
PARTITION p_20191030 VALUES IN ( 20191030 ),
PARTITION p_20191101 VALUES IN ( 20191101 ),
PARTITION p_20191122 VALUES IN ( 20191122 )
)
select data with specific partition:
select * from t_data_daily partition (p_20191030) x limit 100
How to select data from specific partition and sub-partition?
Except the following:
select * from t_data_daily partition (p_20191030) x where sp = 'sp_1' limit 100

Related

Can I use lag function in an update function to update a variable table

I have this #variable table with an initial row. I'd like to update the following rows based on that first row.
DECLARE #varTable1 Table
(
[id] [int],
[field1] [decimal](18,4)
)
INSERT INTO #varTable1
VALUES
(1,20),
(1,NULL),
(1,NULL),
(1,NULL)
SELECT * FROM #varTable1
Let's just say I want to multiply field1 by 2. So the expected values following inital row would be
1 20
2 40
3 60
4 80
One way is to get the min value and multiple it by a row number - assuming your id column should actually be incrementing rather than all 1's e.g.
WITH cte AS (
SELECT *
, MIN(field1) OVER () * ROW_NUMBER() OVER (ORDER BY id ASC) newField1
FROM #varTable1
)
UPDATE cte SET field1 = newField1;
I guess id is important so you have to multiple field of same id
DECLARE #varTable1 Table
(
[id] [int],
[field1] [decimal](18,4)
)
INSERT INTO #varTable1
VALUES
(1,20),
(1,NULL),
(1,NULL),
(1,NULL),
(2,NULL)
;with CTE as
(
SELECT *,row_number()over(order by id)rn FROM #varTable1
),
CTE1 as
(
select id, min([field1])Minfield from #varTable1
group by [id]
)
select vt.id,vt.field1,c.Minfield*rn from CTE VT
inner join CTE1 c on vt.id=c.id

How I can to exchange partition in ORACLE. ORA-14095: ALTER TABLE EXCHANGE requires a non-partitioned, non-clustered table

create table TEST_TABLE_2
(
report_month DATE,
name varchar(128)
)
partition by list (REPORT_MONTH)
(
partition TEST_PART_2022_05_31 values (TO_DATE(' 2022-05-31 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN'))
tablespace TEST_TABLESPACE,
partition MONTH_UNKNOWN values (default)
tablespace TEST_TABLESPACE
);
create table TEST_TABLE_1
(
report_month DATE,
name varchar(128)
)
partition by list (REPORT_MONTH)
(
partition TEST_PART_2022_05_31 values (TO_DATE(' 2022-05-31 00:00:00', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN'))
tablespace TEST_TABLESPACE,
partition MONTH_UNKNOWN values (default)
tablespace TEST_TABLESPACE
);
Advise me please, How I can to exchange partition TEST_PART_2022_05_31 from TEST_TABLE_2 with partition TEST_PART_2022_05_31 in TEST_TABLE_1?
WHen I exec this script
ALTER TABLE ADS.test_table_1
EXCHANGE PARTITION TEST_PART_2022_05_31
WITH TABLE ADS.test_table_2
I get Error: ORA-14095: ALTER TABLE EXCHANGE requires a non-partitioned, non-clustered table
Are you looking for something like this?
create table t (
c1, c2, c3
) partition by range ( c2 )
interval ( interval '1' month ) (
partition p0 values less than ( date'2022-02-01' )
)
as
select level, date'2022-01-01' + level, 'remove'
from dual
connect by level <= 100;
create table temp
for exchange with table t;
select count(*) from temp;
0
alter table t
exchange partition p0
with table temp;
select count(*) from temp;
100

Calculate Running Total Amount with Bonus

I have following table:
create table test_table
(
employee_id integer,
salary_year integer,
raise_in_salary_perentage decimal(18,2),
annual_salary decimal(18,2)
);
**Test Data is following: **
insert into test_table values ( 1,2016, 0 , 100);
insert into test_table values ( 1,2017, 10, 100);
insert into test_table values ( 1,2018, 10, 100);
insert into test_table values ( 1,2019, 0, 100);
insert into test_table values ( 1,2020, 10, 100);
insert into test_table values ( 2,2016, 10 , 100);
insert into test_table values ( 2,2017, 10, 100);
insert into test_table values ( 2,2018, 0, 100);
insert into test_table values ( 2,2019, 0, 100);
insert into test_table values ( 2,2020, 0, 100);
I am trying to achieve following output:
The cumulative salary should include the running total of annual salary over years for each employee.
There is a percentage of raise every year, so if current year has a raise the cumulative salary will be sum of previous salaries plus the amount received in raise.
I tried to achieve it using following SQL, but results does seems right. Will be thankful for solution.
SELECT *
,sum(annual_salary) OVER (
PARTITION BY employee_id ORDER BY salary_year ROWS BETWEEN UNBOUNDED PRECEDING
AND CURRENT ROW
) AS cummulative_salary
,(
sum(annual_salary) OVER (
PARTITION BY employee_id ORDER BY salary_year ROWS BETWEEN UNBOUNDED PRECEDING
AND CURRENT ROW
)
) + (
sum(annual_salary) OVER (
PARTITION BY employee_id ORDER BY salary_year ROWS BETWEEN UNBOUNDED PRECEDING
AND CURRENT ROW
)
) * (
sum(raise_in_salary_perentage) OVER (
PARTITION BY employee_id ORDER BY salary_year ROWS BETWEEN UNBOUNDED PRECEDING
AND CURRENT ROW
) / 100
) AS csalary
FROM test_table;
Based on your description, the increase in salary should be cumulative. However, a given year's increase should not affect previous years.
That is not what your desired results show. Based on my interpretation, I think you want:
with recursive cte as (
select employee_id, salary_year, (t.annual_salary * (1 + raise_in_salary_perentage / 100.0))::numeric(18, 2) as annual_salary,
raise_in_salary_perentage,
(t.annual_salary * (1 + raise_in_salary_perentage / 100.0))::numeric(18, 2) as total
from test_table t
where salary_year = 2016
union all
select t.employee_id, t.salary_year, (cte.annual_salary * (1 + t.raise_in_salary_perentage / 100.0))::numeric(18, 2),
t.raise_in_salary_perentage,
(cte.total + cte.annual_salary * (1 + t.raise_in_salary_perentage / 100.0))::numeric(18, 2)
from cte join
test_table t
on t.employee_id = cte.employee_id and t.salary_year = cte.salary_year + 1
)
select *
from cte
order by employee_id, salary_year;
Here is a db<>fiddle.

How to secure table for avoid duplicate data

I cant resolve the problem how secure my table to avoid duplicate combination of attributes_positions. The best way to show you what I mean is the following image
column id_combination represents number of combination. Combination consists of attributes_positions. So Combination is sequence of attributes_positions.
And now I would secure table from insert exaclty the same sequence of attributes_positions.
Of course if already inserted combination contains one additional attributes_positions or one less than inserting combination is ok
image I show the different bettwen duplicate and not duplicate combination.
Is there a some way how I can do that?? Meaby something like 'before update'. But how to implement for this example. I`m not so pretty good with advanced sql.
The database where I trying to secure table is postgresql 9.4
I will be grateful for help
-- The data
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
);
INSERT INTO theset(set_id, set_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four');
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
INSERT INTO theitem(item_id, item_name) VALUES
( 1, 'one'), ( 2, 'two'), ( 3, 'three'), ( 4, 'four'), ( 5, 'five');
CREATE TABLE set_item (
set_id integer NOT NULL REFERENCES theset (set_id)
, item_id integer NOT NULL REFERENCES theitem(item_id)
, PRIMARY KEY (set_id,item_id)
);
-- swapped index is indicated for junction tables
CREATE UNIQUE INDEX ON set_item(item_id, set_id);
INSERT INTO set_item(set_id,item_id) VALUES
(1,1), (1,2), (1,3), (1,4),
(2,1), (2,2), (2,3), -- (2,4),
(3,1), (3,2), (3,3), (3,4), (3,5),
(4,1), (4,2), (4,4);
CREATE FUNCTION set_item_unique_set( ) RETURNS TRIGGER AS
$func$
BEGIN
IF EXISTS ( -- other set
SELECT * FROM theset oth
-- WHERE oth.set_id <> NEW.set_id -- only for insert/update
WHERE TG_OP = 'DELETE' AND oth.set_id <> OLD.set_id
OR TG_OP <> 'DELETE' AND oth.set_id <> NEW.set_id
-- count (common) members in the two sets
-- items not in common will have count=1
AND NOT EXISTS (
SELECT item_id FROM set_item x1
WHERE (x1.set_id = NEW.set_id OR x1.set_id = oth.set_id )
GROUP BY item_id
HAVING COUNT(*) = 1
)
) THEN
RAISE EXCEPTION 'Not unique set';
RETURN NULL;
ELSE
RETURN NEW;
END IF;
END;
$func$ LANGUAGE 'plpgsql'
;
CREATE CONSTRAINT TRIGGER check_item_set_unique
AFTER UPDATE OR INSERT OR DELETE
-- BEFORE UPDATE OR INSERT
ON set_item
FOR EACH ROW
EXECUTE PROCEDURE set_item_unique_set()
;
-- Test it
INSERT INTO set_item(set_id,item_id) VALUES(4,5); -- success
INSERT INTO set_item(set_id,item_id) VALUES(2,4); -- failure
DELETE FROM set_item WHERE set_id=1 AND item_id= 4; -- failure
Note: There should also be a trigger for the DELETE case.
UPDATE: added handling of DELETE
(the handling of deletes is not perfect; imagine the case where the last element from a set is removed)
My answer assumes that the target is without dupes, and that we want to insert a new set - which happens to be a duplicate. I choose the group of 4 with the id_comb of 1.
You would have to put the group of 4 into a staging table. Then, you have to pivot both staging and target horizontally - so that you get 5 columns named attr_pos1 to attr_pos5 (the biggest group in your example is 5). To pivot, you need a sequence number, which we get by using ROW_NUMBER(). That's for both tables, staging and target. Then, you pivot both. Then, you try to join pivoted staging and target on all 5 attr_pos# columns, and count the rows. If you get 0, you have no duplicates. If you get 1, you have duplicates.
Here's the whole scenario:
WITH
-- input section: a) target table, no dupes
target(id_comb,attr_pos) AS (
SELECT 2,1
UNION ALL SELECT 2,2
UNION ALL SELECT 2,3
UNION ALL SELECT 2,4
UNION ALL SELECT 3,1
UNION ALL SELECT 3,2\
UNION ALL SELECT 3,3
UNION ALL SELECT 3,4
UNION ALL SELECT 3,5
UNION ALL SELECT 4,1
UNION ALL SELECT 4,2
UNION ALL SELECT 4,3
)
,
-- input section: b) staging, input, would be a dupe
staging(id_comb,attr_pos) AS (
SELECT 1,1
UNION ALL SELECT 1,2
UNION ALL SELECT 1,3
UNION ALL SELECT 1,4
)
,
-- query section:
-- add sequence numbers to stage and target
target_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM target
)
,
staging_s AS (
SELECT
ROW_NUMBER() OVER(PARTITION BY id_comb ORDER BY attr_pos) AS seq
, *
FROM staging
)
,
-- horizontally pivot target, NULLS as -1 for later join
target_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM target_s
GROUP BY id_comb ORDER BY id_comb
)
,
-- horizontally pivot staging, NULLS as -1 for later join
staging_h AS (
SELECT
id_comb
, IFNULL(MAX(CASE seq WHEN 1 THEN attr_pos END),-1) AS attr_pos1
, IFNULL(MAX(CASE seq WHEN 2 THEN attr_pos END),-1) AS attr_pos2
, IFNULL(MAX(CASE seq WHEN 3 THEN attr_pos END),-1) AS attr_pos3
, IFNULL(MAX(CASE seq WHEN 4 THEN attr_pos END),-1) AS attr_pos4
, IFNULL(MAX(CASE seq WHEN 5 THEN attr_pos END),-1) AS attr_pos5
FROM staging_s
GROUP BY id_comb ORDER BY id_comb
)
SELECT
COUNT(*)
FROM target_h
JOIN staging_h USING (
attr_pos1
, attr_pos2
, attr_pos3
, attr_pos4
, attr_pos5
);
Hope this helps ----
Marco
Interesting but not very useful solution by #wildplasser. I create script to insert sample data:
WITH param AS (
SELECT 8 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name)
SELECT id, id::text FROM generate_series(1, (SELECT 2^max - 1 FROM param)::int8) as id
RETURNING *
), ins_set_item AS (
INSERT INTO set_item (set_id, item_id)
SELECT id, item_id FROM pre WHERE (SELECT count(*) FROM ins_item) > 0 AND (SELECT count(*) FROM ins_set) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
UNION ALL
SELECT
'sets_items', count(*)
FROM ins_set_item
;
When I call it with 8 (1024 - 2^8 rows for set_item) it run 21 seconds. It is very bad. When I off trigger it took less then 1 milliseconds.
My proposal
It is very interesting to use arrays in this case. Unfortunatelly PostgreSQL does not support foreighn key for arrays, but it may be done by TRIGGERs. I remove set_item table and add items int[] field for theset:
-- The data
CREATE TABLE theitem (
item_id integer NOT NULL PRIMARY KEY
, item_name text UNIQUE
);
CREATE TABLE theset (
set_id INTEGER NOT NULL PRIMARY KEY
, set_name text UNIQUE
, items integer[] UNIQUE NOT NULL
);
CREATE INDEX i1 ON theset USING gin (items);
CREATE OR REPLACE FUNCTION check_item_CU() RETURNS TRIGGER AS $sql$
BEGIN
IF (SELECT count(*) > 0 FROM unnest(NEW.items) AS u LEFT JOIN theitem ON (item_id = u) WHERE item_id IS NULL) THEN
RETURN NULL;
END IF;
NEW.items = ARRAY(SELECT unnest(NEW.items) ORDER BY 1);
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_CU BEFORE INSERT OR UPDATE ON theset FOR EACH ROW EXECUTE PROCEDURE check_item_CU();
CREATE OR REPLACE FUNCTION check_item_UD() RETURNS TRIGGER AS $sql$
BEGIN
IF (TG_OP = 'DELETE' OR TG_OP = 'UPDATE' AND NEW.item_id != OLD.item_id) AND (SELECT count(*) > 0 FROM theset WHERE OLD.item_id = ANY(items)) THEN
RAISE EXCEPTION 'item_id % still used', OLD.item_id;
RETURN NULL;
END IF;
RETURN NEW;
END;
$sql$ LANGUAGE plpgsql;
CREATE TRIGGER check_item_UD BEFORE DELETE OR UPDATE ON theitem FOR EACH ROW EXECUTE PROCEDURE check_item_UD();
WITH param AS (
SELECT 10 AS max
), maxarray AS (
SELECT array_agg(i) as ma FROM (SELECT generate_series(1, max) as i FROM param) as i
), pre AS (
SELECT
*
FROM (
SELECT
*, CASE WHEN (id >> mbit) & 1 = 1 THEN ma[mbit + 1] END AS item_id
FROM (
SELECT *,
generate_series(0, array_upper(ma, 1) - 1) as mbit
FROM (
SELECT *,
generate_series(1,(2^max - 1)::int8) AS id
FROM param, maxarray
) AS pre1
) AS pre2
) AS pre3
WHERE item_id IS NOT NULL
), pre_arr AS (
SELECT id, array_agg(item_id) AS items
FROM pre
GROUP BY 1
), ins_item AS (
INSERT INTO theitem (item_id, item_name) SELECT i, i::text FROM generate_series(1, (SELECT max FROM param)) as i RETURNING *
), ins_set AS (
INSERT INTO theset (set_id, set_name, items)
SELECT id, id::text, items FROM pre_arr WHERE (SELECT count(*) FROM ins_item) > 0
RETURNING *
)
SELECT
'sets', count(*)
FROM ins_set
UNION ALL
SELECT
'items', count(*)
FROM ins_item
;
This variant run less than 1ms

Reference lookup replace in Oracle

I'm looking for an approach of doing hash and replace algorithm in Oracle.
I had a customer table which had customer ID and customer phone number. I have another table which had list of reference phone number values in a table. I wanted to use these reference values and replace them consistently in the main table.
There should be a consistent masking when applying this replace logic i.e. wherever the same customer appears in a different table, the phone number replacement should be the same.
Main table name 1 :- CUS_PHONE_NUM
Main table name 2 :- CUS_PHONE_NUM_2
Reference table name :- REF_PHONE_NUMBER
I had tried below approach, but the performance is hitting very badly — for 20000 customers it took 18 minutes. Can you please suggest a better approach to get this requirement.
DROP TABLE CUS_PHONE_NUM ;
DROP TABLE CUS_PHONE_NUM2 ;
DROP TABLE REF_PHONE_NUMBER ;
-- Main table 1
CREATE TABLE CUS_PHONE_NUM (
CUS_ID VARCHAR2(09) ,
PHONE_NUMBER NUMBER(12) );
-- Main table 2
CREATE TABLE CUS_PHONE_NUM_2 (
CUS_ID VARCHAR2(09) ,
PHONE_NUMBER NUMBER(12) );
-- reference table
CREATE TABLE REF_PHONE_NUMBER ( REF_PHONE_NUMBER_VALUE NUMBER(12));
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134321) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134322) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134323) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134324) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134325) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134326) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134327) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134328) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134329) ;
INSERT INTO REF_PHONE_NUMBER VALUES ( 0470134320) ;
TRUNCATE TABLE CUS_PHONE_NUM ;
COMMIT;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795401' , 0426881030 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795402' , 0426881031 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795403' , 0426881032 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795404' , 0426881033 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795405' , 0426881034 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795406' , 0426881035 ) ;
INSERT INTO CUS_PHONE_NUM VALUES ( '401795407' , 0426881036 ) ;
TRUNCATE TABLE CUS_PHONE_NUM_2 ;
COMMIT;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795401' , 0426881030 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795403' , 0426881032 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795405' , 0426881034 ) ;
INSERT INTO CUS_PHONE_NUM_2 VALUES ( '401795407' , 0426881036 ) ;
COMMIT ;
-- SQL1 to replace reference values for Main table 1 - CUS_PHONE_NUM
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH,
count(*) over() max_buckets
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM )
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, n.max_buckets)
FROM CUS_PHONE_NUM_HASH c
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, n.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH;
-- SQL1 to replace reference values for Main table 2 - CUS_PHONE_NUM_2
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH,
count(*) over() max_buckets
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM_2 )
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, n.max_buckets)
FROM CUS_PHONE_NUM_HASH c
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, n.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH;
Try to move max_bucket outside. It changed explain plan on your test data. But I don't know how it will work on your real one. I don't know your indexes too.
WITH REF_PHONE_NUMBER_HASH AS
(
SELECT REF_PHONE_NUMBER_VALUE ,
row_number() over(order by REF_PHONE_NUMBER_VALUE) - 1 REF_PHONE_NUMBER_VALUE_HASH
FROM REF_PHONE_NUMBER
),
CUS_PHONE_NUM_HASH AS
(select CUS_ID,
PHONE_NUMBER
from CUS_PHONE_NUM )
, BUCKET as
(select count(*) max_buckets from REF_PHONE_NUMBER)
SELECT c.*, n.* ,ora_hash(PHONE_NUMBER, b.max_buckets)
FROM CUS_PHONE_NUM_HASH c
cross join BUCKET b
JOIN REF_PHONE_NUMBER_HASH n
ON ora_hash(PHONE_NUMBER, b.max_buckets) = n.REF_PHONE_NUMBER_VALUE_HASH
Maybe you can also think about hard coding max_bucket value. After hadcoding you can create index for that:
create index idx_CUS_PHONE_NUM_01 on CUS_PHONE_NUM
( ora_hash(Phone_NUmber, 255) )