多维函数的使用

多维分析函数的使用

多维分析

1
2
3
4
5
6
在业务需要上,我们时长需要对数据进行分组,假如一个学校的学生数据
想查看他们每个年级的人数,需要对年级进行分组
想查看他们每个班级的人数,需要对班级进行分组
想查看他们每个小组的人数,需要对小组进行分组

可以看到年级->班级->人数,这就是多个维度,也可以说是一个维度的不同深度

Grouping Sets与GroupBy的优缺点

GroupBy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
分组函数(常用)
# 所有学生+各年级的学生+各班级的学生
SELECT stuId,'all' key,'all' value,dt
FROM school
GROUP BY stuId,dt
UNION ALL
SELECT stuId,'grade' key, grade value,dt
FROM school
GROUP BY grade,stuId,dt
UNION ALL
SELECT stuId,'class' key, class value,dt
FROM school
GROUP BY class,stuId,dt

# Job数(Spark/Hive)
9/3

# 优点
可以灵活指定分组字段并直接添加额外字段

# 缺点
对于需要不同分组的业务,SQL过于冗余

Grouping Sets

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
多维分析函数(不常用)
SELECT stuId,
CASE WHEN grade IS NULL AND class IS NULL THEN 'all'
WHEN grade IS NOT NULL THEN 'grade'
ELSE 'class' END key,
CASE WHEN grade IS NULL AND class IS NULL THEN 'all'
WHEN grade IS NOT NULL THEN grade
ELSE class END value,dt
FROM (
SELECT stuId,grade,class,dt
FROM school
GROUP BY grade,class,stuId,dt
grouping sets ((stuId,dt),(grade,stuId,dt),(class,stuId,dt))
) a

# Job数(Spark/Hive)
3/1

# 优点
在Hive/Spark执行中,相较于GroupBy执行计划更优

# 缺点
最大的缺点就是,需要进行NULL值填充
尤其是不能确定原始数据中是否有值为NULL的字段

# 补充点
对于其缺点有grouping__id函数进行完善
grouping__id为位向量,计算逻辑如下
grade,class,stuId,dt四个分组字段
0000
在分组集合内的置为1,否则置为0
坑:
最右边的表示第1列,依次类推
然后进行反转,计算出十进制值
(stuId,dt)->0011->1100->12
(grade,stuId,dt)->1011->1101->13
(class,stuId,dt)->0111->1110->14
假如有字段为空,那么其grouping__id字段值将发生改变
e.g:
其他分组字段有为空的情况
10086,2020-05-13,null,null,12->(选择stuId,dt)
10086,2020-05-13,null,null,13->(选择grade,stuId,dt,但是grade有空值)
这样就可以很好的辨认分组字段是什么

Cube

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
SELECT gradeID, classID, MAX(score) 
FROM exam
GROUP BY gradeID, classID
WITH CUBE

# 等价于
SELECT gradeID, classID, MAX(score)
FROM exam
GROUP BY gradeID, classID
UNION
SELECT gradeID, NULL, MAX(score)
FROM exam
GROUP BY gradeID, NULL
UNION
SELECT NULL, classID, MAX(score)
FROM exam
GROUP BY NULL, classID
UNION
SELECT NULL, NULL, MAX(score)
FROM exam
GROUP BY NULL, NULL


Rollup

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
SELECT gradeID, classID, MAX(score) 
FROM exam
GROUP BY gradeID, classID
WITH ROLLUP

# 等价于
SELECT gradeID, classID, MAX(score)
FROM exam
GROUP BY gradeID, classID
UNION
SELECT gradeID, NULL, MAX(score)
FROM exam
GROUP BY gradeID, NULL
UNION
SELECT NULL, NULL, MAX(score)
FROM exam
GROUP BY NULL, NULL;

Flink上的使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
DROP TABLE IF EXISTS gen_data;
CREATE TABLE gen_data (
supplier_id STRING,
product_id STRING,
rating STRING
) WITH (
'connector' = 'datagen',
'rows-per-second' = '100',
'fields.supplier_id.length'='1',
'fields.product_id.length'='1',
'fields.rating.length'='1'
);

DROP VIEW IF EXISTS temp_s;
CREATE VIEW temp_s AS SELECT GROUPING_ID(supplier_id,rating) grouping_id,supplier_id, rating, COUNT(*) AS total
FROM gen_data
GROUP BY GROUPING SETS ((supplier_id, rating), (supplier_id), ())
;

DROP TABLE IF EXISTS s_r;
DROP TABLE IF EXISTS s;

CREATE TABLE s_r (
supplier_id VARCHAR,
rating VARCHAR,
total BIGINT,
PRIMARY KEY (`supplier_id`,`rating`) NOT ENFORCED
) WITH (
'connector' = 'kafka',
'topic' = 's_r',
'properties.bootstrap.servers' = 'localhost:9092',
'format' = 'canal-json',
'canal-json.ignore-parse-errors' = 'true',
'sink.partitioner' = 'fixed'
);
CREATE TABLE s (
supplier_id VARCHAR,
rating VARCHAR,
total BIGINT,
PRIMARY KEY (`supplier_id`) NOT ENFORCED
) WITH (
'connector' = 'kafka',
'topic' = 's',
'properties.bootstrap.servers' = 'localhost:9092',
'format' = 'canal-json',
'canal-json.ignore-parse-errors' = 'true',
'sink.partitioner' = 'fixed'
);

-- 在Zeppelin上使用时,可以使用runAsOne=true,运行在同一个Job上
insert into s_r
select cast(supplier_id as STRING) supplier_id,cast(rating as STRING) rating,total
from temp_s
where grouping_id = 0;

insert into s
select cast(supplier_id as STRING) supplier_id,cast(rating as STRING) rating,total
from temp_s
where grouping_id = 1;