"htmlcode">

item_id int,
 created_time datetime,
 modified_time datetime,
 item_name varchar(20),
 other varchar(20)

要求:

1.源表中有100万条数据,其中有50万created_time和item_name重复。
2.要把去重后的50万数据写入到目标表。
3.重复created_time和item_name的多条数据,可以保留任意一条,不做规则限制。

"htmlcode">

-- 建立源表
create table t_source 
( item_id int, 
 created_time datetime, 
 modified_time datetime, 
 item_name varchar(20), 
 other varchar(20) 
); 
-- 建立目标表
create table t_target like t_source; 
-- 生成100万测试数据,其中有50万created_time和item_name重复
delimiter // 
create procedure sp_generate_data() 
begin 
 set @i := 1; 
 while @i<=500000 do 
 set @created_time := date_add('2017-01-01',interval @i second); 
 set @modified_time := @created_time; 
 set @item_name := concat('a',@i); 
 insert into t_source 
 values (@i,@created_time,@modified_time,@item_name,'other'); 
 set @i:=@i+1; 
 end while; 
 commit; 
 set @last_insert_id := 500000; 
 insert into t_source 
 select item_id + @last_insert_id, 
 created_time, 
 date_add(modified_time,interval @last_insert_id second), 
 item_name, 
 'other' 
 from t_source; 
 commit;
end 
// 
delimiter ; 
call sp_generate_data(); 

-- 源表没有主键或唯一性约束,有可能存在两条完全一样的数据,所以再插入一条记录模拟这种情况。
insert into t_source select * from t_source where item_id=1;

 源表中有1000001条记录,去重后的目标表应该有500000条记录。
mysql> select count(*),count(distinct created_time,item_name) from t_source;
+----------+----------------------------------------+
| count(*) | count(distinct created_time,item_name) |
+----------+----------------------------------------+
| 1000001 |   500000 |
+----------+----------------------------------------+
1 row in set (1.92 sec)

一、巧用索引与变量

1. 无索引对比测试

(1)使用相关子查询

truncate t_target; 
insert into t_target 
select distinct t1.* from t_source t1 where item_id in 
(select min(item_id) from t_source t2 where t1.created_time=t2.created_time and t1.item_name=t2.item_name);

这个语句很长时间都出不来结果,只看一下执行计划吧。

mysql> explain select distinct t1.* from t_source t1 where item_id in 
 -> (select min(item_id) from t_source t2 where t1.created_time=t2.created_time and t1.item_name=t2.item_name); 
+----+--------------------+-------+------------+------+---------------+------+---------+------+--------+----------+------------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra  |
+----+--------------------+-------+------------+------+---------------+------+---------+------+--------+----------+------------------------------+
| 1 | PRIMARY | t1 | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 100.00 | Using where; Using temporary |
| 2 | DEPENDENT SUBQUERY | t2 | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 1.00 | Using where  |
+----+--------------------+-------+------------+------+---------------+------+---------+------+--------+----------+------------------------------+
2 rows in set, 3 warnings (0.00 sec)

主查询和相关子查询都是全表扫描,一共要扫描100万*100万数据行,难怪出不来结果。

(2)使用表连接

truncate t_target; 
insert into t_target 
select distinct t1.* from t_source t1, 
(select min(item_id) item_id,created_time,item_name from t_source group by created_time,item_name) t2 
where t1.item_id = t2.item_id;

这种方法用时14秒,查询计划如下:

mysql> explain select distinct t1.* from t_source t1, (select min(item_id) item_id,created_time,item_name from t_source group by created_time,item_name) t2 where t1.item_id = t2.item_id;
+----+-------------+------------+------------+------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra  |
+----+-------------+------------+------------+------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
| 1 | PRIMARY | t1 | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 100.00 | Using where; Using temporary |
| 1 | PRIMARY | <derived2> | NULL | ref | <auto_key0> | <auto_key0> | 5 | test.t1.item_id | 10 | 100.00 | Distinct  |
| 2 | DERIVED | t_source | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 100.00 | Using temporary |
+----+-------------+------------+------------+------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
3 rows in set, 1 warning (0.00 sec)

"htmlcode">

set @a:='1000-01-01 00:00:00'; 
set @b:=' '; 
set @f:=0; 
truncate t_target; 
insert into t_target 
select item_id,created_time,modified_time,item_name,other 
 from 
(select t0.*,if(@a=created_time and @b=item_name,@f:=0,@f:=1) f, @a:=created_time,@b:=item_name 
 from 
(select * from t_source order by created_time,item_name) t0) t1 where f=1;

这种方法用时13秒,查询计划如下:

mysql> explain select item_id,created_time,modified_time,item_name,other 
 -> from 
 -> (select t0.*,if(@a=created_time and @b=item_name,@f:=0,@f:=1) f, @a:=created_time,@b:=item_name 
 -> from 
 -> (select * from t_source order by created_time,item_name) t0) t1 where f=1; 
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
| 1 | PRIMARY | <derived2> | NULL | ref | <auto_key0> | <auto_key0> | 4 | const | 10 | 100.00 | NULL |
| 2 | DERIVED | <derived3> | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 100.00 | NULL |
| 3 | DERIVED | t_source | NULL | ALL | NULL | NULL | NULL | NULL | 997282 | 100.00 | Using filesort |
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
3 rows in set, 5 warnings (0.00 sec)

"htmlcode">

-- 建立created_time和item_name字段的联合索引
create index idx_sort on t_source(created_time,item_name,item_id); 
analyze table t_source;

(1)使用相关子查询

truncate t_target; 
insert into t_target 
select distinct t1.* from t_source t1 where item_id in 
(select min(item_id) from t_source t2 where t1.created_time=t2.created_time and t1.item_name=t2.item_name);

本次用时19秒,查询计划如下:

mysql> explain select distinct t1.* from t_source t1 where item_id in 
 -> (select min(item_id) from t_source t2 where t1.created_time=t2.created_time and t1.item_name=t2.item_name); 
+----+--------------------+-------+------------+------+---------------+----------+---------+----------------------------------------+--------+----------+------------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref   | rows | filtered | Extra  |
+----+--------------------+-------+------------+------+---------------+----------+---------+----------------------------------------+--------+----------+------------------------------+
| 1 | PRIMARY | t1 | NULL | ALL | NULL | NULL | NULL | NULL   | 997281 | 100.00 | Using where; Using temporary |
| 2 | DEPENDENT SUBQUERY | t2 | NULL | ref | idx_sort | idx_sort | 89 | test.t1.created_time,test.t1.item_name | 2 | 100.00 | Using index  |
+----+--------------------+-------+------------+------+---------------+----------+---------+----------------------------------------+--------+----------+------------------------------+
2 rows in set, 3 warnings (0.00 sec)

"htmlcode">

truncate t_target; 
insert into t_target 
select distinct t1.* from t_source t1, 
(select min(item_id) item_id,created_time,item_name from t_source group by created_time,item_name) t2 
where t1.item_id = t2.item_id;

本次用时13秒,查询计划如下:

mysql> explain select distinct t1.* from t_source t1, 
 -> (select min(item_id) item_id,created_time,item_name from t_source group by created_time,item_name) t2 
 -> where t1.item_id = t2.item_id; 
+----+-------------+------------+------------+-------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra  |
+----+-------------+------------+------------+-------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
| 1 | PRIMARY | t1 | NULL | ALL | NULL | NULL | NULL | NULL | 997281 | 100.00 | Using where; Using temporary |
| 1 | PRIMARY | <derived2> | NULL | ref | <auto_key0> | <auto_key0> | 5 | test.t1.item_id | 10 | 100.00 | Distinct  |
| 2 | DERIVED | t_source | NULL | index | idx_sort | idx_sort | 94 | NULL | 997281 | 100.00 | Using index  |
+----+-------------+------------+------------+-------+---------------+-------------+---------+-----------------+--------+----------+------------------------------+
3 rows in set, 1 warning (0.00 sec)

和没有索引相比,子查询虽然从全表扫描变为了全索引扫描,但还是需要扫描100万行记录。因此查询性能提升并不是明显。

(3)使用变量

set @a:='1000-01-01 00:00:00'; 
set @b:=' '; 
set @f:=0; 
truncate t_target; 
insert into t_target 
select item_id,created_time,modified_time,item_name,other 
 from 
(select t0.*,if(@a=created_time and @b=item_name,@f:=0,@f:=1) f, @a:=created_time,@b:=item_name 
 from 
(select * from t_source order by created_time,item_name) t0) t1 where f=1; 

本次用时13秒,查询计划与没有索引时的完全相同。可见索引对这种写法没有作用。能不能消除嵌套,只用一层查询出结果呢?

(4)使用变量,并且消除嵌套查询

set @a:='1000-01-01 00:00:00'; 
set @b:=' '; 
truncate t_target; 
insert into t_target 
select * from t_source force index (idx_sort) 
 where (@a!=created_time or @b!=item_name) and (@a:=created_time) is not null and (@b:=item_name) is not null 
 order by created_time,item_name;

本次用时12秒,查询计划如下:

mysql> explain select * from t_source force index (idx_sort) 
 -> where (@a!=created_time or @b!=item_name) and (@a:=created_time) is not null and (@b:=item_name) is not null 
 -> order by created_time,item_name;
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-------------+
| 1 | SIMPLE | t_source | NULL | index | NULL | idx_sort | 94 | NULL | 997281 | 99.00 | Using where |
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-------------+
1 row in set, 3 warnings (0.00 sec)

该语句具有以下特点:

"color: #ff0000">二、利用窗口函数

MySQL 8中新增的窗口函数使得原来麻烦的去重操作变得很简单。

truncate t_target; 
insert into t_target 
select item_id, created_time, modified_time, item_name, other
 from (select *, row_number() over(partition by created_time,item_name) as rn
 from t_source) t where rn=1;

这个语句执行只需要12秒,而且写法清晰易懂,其查询计划如下:

mysql> explain select item_id, created_time, modified_time, item_name, other
 -> from (select *, row_number() over(partition by created_time,item_name) as rn
 -> from t_source) t where rn=1;
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
| 1 | PRIMARY | <derived2> | NULL | ref | <auto_key0> | <auto_key0> | 8 | const | 10 | 100.00 | NULL |
| 2 | DERIVED | t_source | NULL | ALL | NULL | NULL | NULL | NULL | 997281 | 100.00 | Using filesort |
+----+-------------+------------+------------+------+---------------+-------------+---------+-------+--------+----------+----------------+
2 rows in set, 2 warnings (0.00 sec)

该查询对t_source表进行了一次全表扫描,同时用filesort对表按分区字段created_time、item_name进行了排序。外层查询从每个分区中保留一条数据。因为重复created_timeitem_name的多条数据中可以保留任意一条,所以oevr中不需要使用order by子句。

从执行计划看,窗口函数去重语句似乎没有消除嵌套查询的变量去重好,但此方法实际执行是最快的。

MySQL窗口函数说明参见“https://dev.mysql.com/doc/refman/8.0/en/window-functions.html”。

三、多线程并行执行

前面已经将单条查重语句调整到最优,但还是以单线程方式执行。能否利用多处理器,让去重操作多线程并行执行,从而进一步提高速度呢?比如我的实验环境是4处理器,如果使用4个线程同时执行查重SQL,理论上应该接近4倍的性能提升。

1. 数据分片

在生成测试数据时,created_time采用每条记录加一秒的方式,也就是最大和在最小的时间差为50万秒,而且数据均匀分布,因此先把数据平均分成4份。

(1)查询出4份数据的created_time边界值

mysql> select date_add('2017-01-01',interval 125000 second) dt1,
 -> date_add('2017-01-01',interval 2*125000 second) dt2,
 -> date_add('2017-01-01',interval 3*125000 second) dt3,
 -> max(created_time) dt4
 -> from t_source;
+---------------------+---------------------+---------------------+---------------------+
| dt1   | dt2   | dt3   | dt4   |
+---------------------+---------------------+---------------------+---------------------+
| 2017-01-02 10:43:20 | 2017-01-03 21:26:40 | 2017-01-05 08:10:00 | 2017-01-06 18:53:20 |
+---------------------+---------------------+---------------------+---------------------+
1 row in set (0.00 sec)

(2)查看每份数据的记录数,确认数据平均分布

mysql> select case when created_time >= '2017-01-01' 
 ->  and created_time < '2017-01-02 10:43:20'
 ->  then '2017-01-01'
 ->  when created_time >= '2017-01-02 10:43:20'
 ->  and created_time < '2017-01-03 21:26:40'
 ->  then '2017-01-02 10:43:20'
 ->  when created_time >= '2017-01-03 21:26:40' 
 ->  and created_time < '2017-01-05 08:10:00'
 ->  then '2017-01-03 21:26:40' 
 ->  else '2017-01-05 08:10:00'
 ->  end min_dt,
 -> case when created_time >= '2017-01-01' 
 ->  and created_time < '2017-01-02 10:43:20'
 ->  then '2017-01-02 10:43:20'
 ->  when created_time >= '2017-01-02 10:43:20'
 ->  and created_time < '2017-01-03 21:26:40'
 ->  then '2017-01-03 21:26:40'
 ->  when created_time >= '2017-01-03 21:26:40' 
 ->  and created_time < '2017-01-05 08:10:00'
 ->  then '2017-01-05 08:10:00'
 ->  else '2017-01-06 18:53:20'
 ->  end max_dt,
 -> count(*)
 -> from t_source
 -> group by case when created_time >= '2017-01-01' 
 ->  and created_time < '2017-01-02 10:43:20'
 ->  then '2017-01-01'
 ->  when created_time >= '2017-01-02 10:43:20'
 ->  and created_time < '2017-01-03 21:26:40'
 ->  then '2017-01-02 10:43:20'
 ->  when created_time >= '2017-01-03 21:26:40' 
 ->  and created_time < '2017-01-05 08:10:00'
 ->  then '2017-01-03 21:26:40' 
 ->  else '2017-01-05 08:10:00'
 ->  end,
 -> case when created_time >= '2017-01-01' 
 ->  and created_time < '2017-01-02 10:43:20'
 ->  then '2017-01-02 10:43:20'
 ->  when created_time >= '2017-01-02 10:43:20'
 ->  and created_time < '2017-01-03 21:26:40'
 ->  then '2017-01-03 21:26:40'
 ->  when created_time >= '2017-01-03 21:26:40' 
 ->  and created_time < '2017-01-05 08:10:00'
 ->  then '2017-01-05 08:10:00'
 ->  else '2017-01-06 18:53:20'
 ->  end;
+---------------------+---------------------+----------+
| min_dt  | max_dt  | count(*) |
+---------------------+---------------------+----------+
| 2017-01-01  | 2017-01-02 10:43:20 | 249999 |
| 2017-01-02 10:43:20 | 2017-01-03 21:26:40 | 250000 |
| 2017-01-03 21:26:40 | 2017-01-05 08:10:00 | 250000 |
| 2017-01-05 08:10:00 | 2017-01-06 18:53:20 | 250002 |
+---------------------+---------------------+----------+
4 rows in set (4.86 sec)

4份数据的并集应该覆盖整个源数据集,并且数据之间是不重复的。也就是说4份数据的created_time要连续且互斥,连续保证处理全部数据,互斥确保了不需要二次查重。实际上这和时间范围分区的概念类似,或许用分区表更好些,只是这里省略了重建表的步骤。

2. 建立查重的存储过程

有了以上信息我们就可以写出4条语句处理全部数据。为了调用接口尽量简单,建立下面的存储过程。

delimiter //
create procedure sp_unique(i smallint) 
begin 
 set @a:='1000-01-01 00:00:00'; 
 set @b:=' '; 
 if (i<4) then
 insert into t_target 
 select * from t_source force index (idx_sort) 
  where created_time >= date_add('2017-01-01',interval (i-1)*125000 second) 
  and created_time < date_add('2017-01-01',interval i*125000 second) 
  and (@a!=created_time or @b!=item_name) 
  and (@a:=created_time) is not null 
  and (@b:=item_name) is not null 
  order by created_time,item_name; 
 else 
 insert into t_target 
 select * from t_source force index (idx_sort) 
  where created_time >= date_add('2017-01-01',interval (i-1)*125000 second) 
  and created_time <= date_add('2017-01-01',interval i*125000 second) 
  and (@a!=created_time or @b!=item_name) 
  and (@a:=created_time) is not null 
  and (@b:=item_name) is not null 
  order by created_time,item_name; 
 end if; 
end 
//

查询语句的执行计划如下:

mysql> explain select * from t_source force index (idx_sort) 
 ->  where created_time >= date_add('2017-01-01',interval (1-1)*125000 second) 
 ->  and created_time < date_add('2017-01-01',interval 1*125000 second) 
 ->  and (@a!=created_time or @b!=item_name) 
 ->  and (@a:=created_time) is not null 
 ->  and (@b:=item_name) is not null 
 ->  order by created_time,item_name; 
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-----------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra   |
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-----------------------+
| 1 | SIMPLE | t_source | NULL | range | idx_sort | idx_sort | 6 | NULL | 498640 | 100.00 | Using index condition |
+----+-------------+----------+------------+-------+---------------+----------+---------+------+--------+----------+-----------------------+
1 row in set, 3 warnings (0.00 sec)

MySQL优化器进行索引范围扫描,并且使用索引条件下推(ICP)优化查询。

3. 并行执行

下面分别使用shell后台进程和MySQL Schedule Event实现并行。

(1)shell后台进程

"htmlcode">

#!/bin/bash
mysql -vvv -u root -p123456 test -e "truncate t_target" &>/dev/null 
date '+%H:%M:%S'
for y in {1..4}
do
 sql="call sp_unique($y)"
 mysql -vvv -u root -p123456 test -e "$sql" &>par_sql1_$y.log &
done
wait
date '+%H:%M:%S'

"htmlcode">

[mysql@hdp2~]$./duplicate_removal.sh
14:27:30
14:27:35

这种方法用时5秒,并行执行的4个过程调用分别用时为4.87秒、4.88秒、4.91秒、4.73秒:

[mysql@hdp2~]$cat par_sql1_1.log | sed '/^$/d'
mysql: [Warning] Using a password on the command line interface can be insecure.
--------------
call sp_unique(1)
--------------
Query OK, 124999 rows affected (4.87 sec)
Bye
[mysql@hdp2~]$cat par_sql1_2.log | sed '/^$/d'
mysql: [Warning] Using a password on the command line interface can be insecure.
--------------
call sp_unique(2)
--------------
Query OK, 125000 rows affected (4.88 sec)
Bye
[mysql@hdp2~]$cat par_sql1_3.log | sed '/^$/d'
mysql: [Warning] Using a password on the command line interface can be insecure.
--------------
call sp_unique(3)
--------------
Query OK, 125000 rows affected (4.91 sec)
Bye
[mysql@hdp2~]$cat par_sql1_4.log | sed '/^$/d'
mysql: [Warning] Using a password on the command line interface can be insecure.
--------------
call sp_unique(4)
--------------
Query OK, 125001 rows affected (4.73 sec)
Bye
[mysql@hdp2~]$

可以看到,每个过程的执行时间均4.85,因为是并行执行,总的过程执行时间为最慢的4.91秒,比单线程速度提高了2.5倍。

(2)MySQL Schedule Event

"htmlcode">

-- 用于查看事件执行时间等信息
create table t_event_history ( 
 dbname varchar(128) not null default '', 
 eventname varchar(128) not null default '', 
 starttime datetime(3) not null default '1000-01-01 00:00:00', 
 endtime datetime(3) default null, 
 issuccess int(11) default null, 
 duration int(11) default null, 
 errormessage varchar(512) default null, 
 randno int(11) default null
);

"htmlcode">

delimiter //
create event ev1 on schedule at current_timestamp + interval 1 hour on completion preserve disable do 
begin
 declare r_code char(5) default '00000'; 
 declare r_msg text; 
 declare v_error integer; 
 declare v_starttime datetime default now(3); 
 declare v_randno integer default floor(rand()*100001); 
 insert into t_event_history (dbname,eventname,starttime,randno) 
 #作业名 
 values(database(),'ev1', v_starttime,v_randno); 
 begin 
 #异常处理段 
 declare continue handler for sqlexception 
 begin 
  set v_error = 1; 
  get diagnostics condition 1 r_code = returned_sqlstate , r_msg = message_text; 
 end; 
 #此处为实际调用的用户程序过程 
 call sp_unique(1); 
 end; 
 update t_event_history set endtime=now(3),issuccess=isnull(v_error),duration=timestampdiff(microsecond,starttime,now(3)), errormessage=concat('error=',r_code,', message=',r_msg),randno=null where starttime=v_starttime and randno=v_randno; 
end
// 
create event ev2 on schedule at current_timestamp + interval 1 hour on completion preserve disable do 
begin
 declare r_code char(5) default '00000'; 
 declare r_msg text; 
 declare v_error integer; 
 declare v_starttime datetime default now(3); 
 declare v_randno integer default floor(rand()*100001); 
 insert into t_event_history (dbname,eventname,starttime,randno) 
 #作业名 
 values(database(),'ev2', v_starttime,v_randno); 
 begin 
 #异常处理段 
 declare continue handler for sqlexception 
 begin 
  set v_error = 1; 
  get diagnostics condition 1 r_code = returned_sqlstate , r_msg = message_text; 
 end; 
 #此处为实际调用的用户程序过程 
 call sp_unique(2); 
 end; 
 update t_event_history set endtime=now(3),issuccess=isnull(v_error),duration=timestampdiff(microsecond,starttime,now(3)), errormessage=concat('error=',r_code,', message=',r_msg),randno=null where starttime=v_starttime and randno=v_randno; 
end
// 
create event ev3 on schedule at current_timestamp + interval 1 hour on completion preserve disable do 
begin
 declare r_code char(5) default '00000'; 
 declare r_msg text; 
 declare v_error integer; 
 declare v_starttime datetime default now(3); 
 declare v_randno integer default floor(rand()*100001); 
 insert into t_event_history (dbname,eventname,starttime,randno) 
 #作业名 
 values(database(),'ev3', v_starttime,v_randno); 
 begin 
 #异常处理段 
 declare continue handler for sqlexception 
 begin 
  set v_error = 1; 
  get diagnostics condition 1 r_code = returned_sqlstate , r_msg = message_text; 
 end; 
 #此处为实际调用的用户程序过程 
 call sp_unique(3); 
 end; 
 update t_event_history set endtime=now(3),issuccess=isnull(v_error),duration=timestampdiff(microsecond,starttime,now(3)), errormessage=concat('error=',r_code,', message=',r_msg),randno=null where starttime=v_starttime and randno=v_randno; 
end
// 
create event ev4 on schedule at current_timestamp + interval 1 hour on completion preserve disable do 
begin
 declare r_code char(5) default '00000'; 
 declare r_msg text; 
 declare v_error integer; 
 declare v_starttime datetime default now(3); 
 declare v_randno integer default floor(rand()*100001); 
 insert into t_event_history (dbname,eventname,starttime,randno) 
 #作业名 
 values(database(),'ev4', v_starttime,v_randno); 
 begin 
 #异常处理段 
 declare continue handler for sqlexception 
 begin 
  set v_error = 1; 
  get diagnostics condition 1 r_code = returned_sqlstate , r_msg = message_text; 
 end; 
 #此处为实际调用的用户程序过程 
 call sp_unique(4); 
 end; 
 update t_event_history set endtime=now(3),issuccess=isnull(v_error),duration=timestampdiff(microsecond,starttime,now(3)), errormessage=concat('error=',r_code,', message=',r_msg),randno=null where starttime=v_starttime and randno=v_randno; 
end
//

为了记录每个事件执行的时间,在事件定义中增加了操作日志表的逻辑,因为每个事件中只多执行了一条insert,一条update,4个事件总共多执行8条很简单的语句,对测试的影响可以忽略不计。执行时间精确到毫秒。

"htmlcode">

mysql -vvv -u root -p123456 test -e "truncate t_target;alter event ev1 on schedule at current_timestamp enable;alter event ev2 on schedule at current_timestamp enable;alter event ev3 on schedule at current_timestamp enable;alter event ev4 on schedule at current_timestamp enable;"

该命令行顺序触发了4个事件,但不会等前一个执行完才执行下一个,而是立即向下执行。这可从命令的输出可以清除看到:

[mysql@hdp2~]$mysql -vvv -u root -p123456 test -e "truncate t_target;alter event ev1 on schedule at current_timestamp enable;alter event ev2 on schedule at current_timestamp enable;alter event ev3 on schedule at current_timestamp enable;alter event ev4 on schedule at current_timestamp enable;"
mysql: [Warning] Using a password on the command line interface can be insecure.
--------------
truncate t_target
--------------
Query OK, 0 rows affected (0.06 sec)
--------------
alter event ev1 on schedule at current_timestamp enable
--------------
Query OK, 0 rows affected (0.02 sec)
--------------
alter event ev2 on schedule at current_timestamp enable
--------------
Query OK, 0 rows affected (0.00 sec)
--------------
alter event ev3 on schedule at current_timestamp enable
--------------
Query OK, 0 rows affected (0.02 sec)
--------------
alter event ev4 on schedule at current_timestamp enable
--------------
Query OK, 0 rows affected (0.00 sec)
Bye
[mysql@hdp2~]$

"htmlcode">

mysql> select * from test.t_event_history;
+--------+-----------+-------------------------+-------------------------+-----------+----------+--------------+--------+
| dbname | eventname | starttime  | endtime   | issuccess | duration | errormessage | randno |
+--------+-----------+-------------------------+-------------------------+-----------+----------+--------------+--------+
| test | ev1 | 2019-07-31 14:38:04.000 | 2019-07-31 14:38:09.389 |  1 | 5389000 | NULL  | NULL |
| test | ev2 | 2019-07-31 14:38:04.000 | 2019-07-31 14:38:09.344 |  1 | 5344000 | NULL  | NULL |
| test | ev3 | 2019-07-31 14:38:05.000 | 2019-07-31 14:38:09.230 |  1 | 4230000 | NULL  | NULL |
| test | ev4 | 2019-07-31 14:38:05.000 | 2019-07-31 14:38:09.344 |  1 | 4344000 | NULL  | NULL |
+--------+-----------+-------------------------+-------------------------+-----------+----------+--------------+--------+
4 rows in set (0.00 sec)

可以看到,每个过程的执行均为4.83秒,又因为是并行执行的,因此总的执行之间为最慢的5.3秒,优化效果和shell后台进程方式几乎相同。

总结

以上所述是小编给大家介绍的将MySQL去重操作优化到极致的操作方法,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对网站的支持!
如果你觉得本文对你有帮助,欢迎转载,烦请注明出处,谢谢!

标签:
mysql,去重,mysql,去重操作,优化

免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
评论“将MySQL去重操作优化到极致的操作方法”
暂无“将MySQL去重操作优化到极致的操作方法”评论...

《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线

暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。

艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。

《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。