1. 程式人生 > >Mycat批量插入效能測試

Mycat批量插入效能測試

本文采用mycat的values批量插入方式進行測試,連寫的資料量達到8000左右事務提交可以達到每秒75000左右資料量,也證實了Mycat的效率是小於或等於Mysql的效能。在實際生產中,因為訪問量和併發問題使得效率下降,這也是資料庫底層IO無法避免的困境,所以實際生產中多采用主備-讀寫分離的方式進行分片處理,可以多設定幾個Mycat的主備節點。本文采用的是一主一備,單個Mycat節點的讀寫分離之Mysql InnoDB的測試。

理想測試

何為理想測試,只是理想狀態的下的測試資料,可能不是很準確。

Mycat資料分片

schema.xml

<table name="userinfo" primaryKey="id" type="global" dataNode="dn1,dn2" />

<table name="processtask" primaryKey="id" type="global" dataNode="dn1,dn2" />

dbBatch.sql

DROP TABLE IF EXISTS `userinfo`;
CREATE TABLE `userinfo` (
  `id` int(20) NOT NULL,
  `name` varchar(50) DEFAULT NULL,
  `phone` varchar(30) DEFAULT NULL,
  `address` varchar(100) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;


DROP TABLE IF EXISTS `processtask`;
CREATE TABLE `processtask` (
  `id` int(12) NOT NULL AUTO_INCREMENT,
  `pmethod` varchar(50) DEFAULT NULL,
  `plimit` int(20) DEFAULT NULL,
  `ptime` int(20) DEFAULT NULL,
  `systime` timestamp NULL DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

Java測試類

BatchInsert

package demo.test;

import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Random;

import org.junit.Before;
import org.junit.Test;

import com.mysql.jdbc.Connection;
import com.mysql.jdbc.PreparedStatement;
/**
 * 批量插入JDBC操作類
 * 
 * @author pengjunlin
 *
 */
public class BatchInsert {
	
	private String driver = "com.mysql.jdbc.Driver";

	private String url = "jdbc:mysql://192.168.178.128:8066/TESTDB";
	
	private String batch_url = "jdbc:mysql://192.168.178.128:8066/TESTDB?useUnicode=true&characterEncoding=utf-8&rewriteBatchedStatements=true";//要5.1.13以上版本的驅動包

	private String user = "root";

	private String password = "123456";
	
	private int limit=10;
	
	private String method="batchInsertWithTransaction";
	
	public String getMethod() {
		return method;
	}

	public void setMethod(String method) {
		this.method = method;
	}
	
	public int getLimit() {
		return limit;
	}

	public void setLimit(int limit) {
		this.limit = limit;
	}
	

	@Before
	public void deleteAll(){
		Connection conn = null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(url, user, password);
			String sql = "DELETE FROM userinfo ;";
			conn.prepareStatement(sql).execute();
		
		} catch (Exception e) {
			e.printStackTrace();
			throw new RuntimeException(e);
		} finally {
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
		}
	}
	
	/**
	 * 記錄執行的時間
	 * 
	 * @MethodName: insertResult 
	 * @Description: 
	 * @param methodName
	 * @param limit
	 * @param timeStr
	 * @throws
	 */
	public void insertResult(String methodName,String limit,String timeStr) {
		Connection conn = null;
		PreparedStatement pstm = null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(url, user, password);
			SimpleDateFormat sdf=new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
			String sql = "/*#mycat:db_type=master*/INSERT INTO processtask (pmethod,plimit,ptime,systime) VALUES('"+methodName+"','"+limit+"','"+timeStr+"','"+sdf.format(new Date())+"')";
			System.out.println(sql);
			pstm = (PreparedStatement) conn.prepareStatement(sql);
			pstm.executeUpdate();
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (pstm != null) {
				try {
					pstm.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
		}
	}


	
	@Test
	public void batchInsertWithTransaction() {
		Connection conn = null;
		PreparedStatement pstm = null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(batch_url, user, password);
			StringBuffer sql = new StringBuffer("/*#mycat:db_type=master*/INSERT INTO userinfo(id,name,phone,address) VALUES");
			conn.setAutoCommit(false);// 即手動提交
			Random rand = new Random();
			int a, b, c, d;
			int index=1;
			for (int i = 1; i <= limit; i++) {
				a = rand.nextInt(10);
				b = rand.nextInt(10);
				c = rand.nextInt(10);
				d = rand.nextInt(10);
				if(index==limit){
					sql.append("("+i+",'boonya',"+"'188" + a + "88" + b + c + "66" + d+"','"+"xxxxxxxxxx_" + "188" + a + "88" + b + c
							+ "66" + d+"');");
				}else{
					sql.append("("+i+",'boonya',"+"'188" + a + "88" + b + c + "66" + d+"','"+"xxxxxxxxxx_" + "188" + a + "88" + b + c
							+ "66" + d+"'),");
				}
				index++;
			}
			System.out.println(sql.toString()); 
			pstm = (PreparedStatement) conn.prepareStatement(sql.toString());
			Long startTime = System.currentTimeMillis();
			pstm.execute();
			conn.commit();// 手動提交
			Long endTime = System.currentTimeMillis();
			String timeStr=(endTime - startTime)+""; 
			System.out.println("OK,用時:" + timeStr);
			insertResult("batchInsertWithTransaction", limit+"", timeStr);
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			if (pstm != null) {
				try {
					pstm.close();
				} catch (SQLException e) {
					e.printStackTrace();
					throw new RuntimeException(e);
				}
			}
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
					throw new RuntimeException(e);
				}
			}
		}
	}

}

BatchInsertThread

package demo.test;

import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
/**
 * 批量插入執行緒類
 * 
 * @author pengjunlin
 *
 */
public class BatchInsertThread implements Runnable{
	
	BatchInsert batchInsert;
	
	static int loop=10;
	
	
	
	public BatchInsertThread(BatchInsert batchInsert){
		this.batchInsert=batchInsert;

	}

	
	public static void main(String[] args) {
		Executor executor=Executors.newSingleThreadExecutor();
		
		int limit=15630;
		
		for (int i = 0; i <= 50000; i++) {
			limit+=10;
			BatchInsert bi=new BatchInsert();
			bi.setLimit(limit);
			executor.execute(new BatchInsertThread(bi));
		}
		
	}

	public void run() { 
		synchronized (batchInsert) {
			try {
				for (int i = 0; i < loop; i++) {
					System.out.println("第--"+i+"---次---------------------開始");
					batchInsert.deleteAll();
					batchInsert.batchInsertWithTransaction();
					System.out.println("第--"+i+"---次---------------------結束");
				}
			} catch (Exception e) {
				e.printStackTrace();
			}finally{
			}
		}
	}
	

}

BatchInsertDataParsor

package demo.test;

import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.junit.Test;
import com.mysql.jdbc.Connection;
import com.mysql.jdbc.ResultSetMetaData;
/**
 * 測試資料分析類
 * 
 * @author pengjunlin
 *
 */
public class BatchInsertDataParsor {
	
	private String driver = "com.mysql.jdbc.Driver";

	private String url = "jdbc:mysql://192.168.178.128:8066/TESTDB?useUnicode=true&characterEncoding=utf-8&rewriteBatchedStatements=true";//要5.1.13以上版本的驅動包

	private String user = "root";

	private String password = "123456";
	
	
	@Test
	public void queryData(){
		Connection conn = null;
		ResultSet rs=null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(url, user, password);
			String sql = "/*#mycat:db_type=slave*/SELECT id,pmethod,plimit,ptime,systime FROM processtask ;";
			long startTime=System.currentTimeMillis();
			rs=conn.prepareStatement(sql).executeQuery(sql);
			if(rs==null){
				throw new RuntimeException("ResultSet is null。。。。");
			}
			long endTime=System.currentTimeMillis();
			long cost=endTime-startTime;
			System.out.println("Totoal rows:"+rs.getRow()+" cost:"+cost+"ms");
		} catch (Exception e) {
			e.printStackTrace();
			throw new RuntimeException(e);
		} finally {
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
			try {
				if(rs!=null&&!rs.isClosed()){
					rs.close();
				}
			} catch (SQLException e) {
				e.printStackTrace();
			}
		}
	}
	

	
	@Test
	public void parseTimeTest(){
		Connection conn = null;
		ResultSet rs=null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(url, user, password);
			String sql = "/*#mycat:db_type=slave*/SELECT avg(ptime) avg,max(ptime) max,min(ptime) min FROM processtask;";
			rs=conn.prepareStatement(sql).executeQuery(sql);
			if(rs==null){
				throw new RuntimeException("ResultSet is null。。。。");
			}
			ResultSetMetaData md = (ResultSetMetaData) rs.getMetaData();//獲取鍵名
			int columnCount = md.getColumnCount();//獲取行的數量
			while (rs.next()) {
			   for (int i = 1; i <= columnCount; i++) {
			     System.out.println(md.getColumnName(i)+": "+rs.getString(i));//獲取鍵名及值
			   }
			}
		} catch (Exception e) {
			e.printStackTrace();
			throw new RuntimeException(e);
		} finally {
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
			try {
				if(rs!=null&&!rs.isClosed()){
					rs.close();
				}
			} catch (SQLException e) {
				e.printStackTrace();
			}
		}
	}
	

	@Test
	public void parseLimitAndTimeTest(){
		Connection conn = null;
		ResultSet rs=null;
		try {
			Class.forName(driver);
			conn = (Connection) DriverManager.getConnection(url, user, password);
			String sql = "/*#mycat:db_type=slave*/SELECT plimit,avg(ptime) avg FROM processtask group by plimit;";
			rs=conn.prepareStatement(sql).executeQuery(sql);
			if(rs==null){
				throw new RuntimeException("ResultSet is null。。。。");
			}
			ResultSetMetaData md = (ResultSetMetaData) rs.getMetaData();//獲取鍵名
			int columnCount = md.getColumnCount();//獲取欄位的數量
			while (rs.next()) {
			   float limit=0,avg=0;
			   for (int i = 1; i <= columnCount; i++) {
				 float result=rs.getFloat(i);
			     //System.out.println(md.getColumnName(i)+": "+result+"");//獲取鍵名及值
			     if(i==1){
			    	 limit=result;
			     }else{
			    	 avg=result;
			     }
			   }
			   System.out.println("limit="+limit+"\t\t估算1s大概的批量插入量:"+(1000*limit)/avg); 
			}
		} catch (Exception e) {
			e.printStackTrace();
			throw new RuntimeException(e);
		} finally {
			if (conn != null) {
				try {
					conn.close();
				} catch (SQLException e) {
					e.printStackTrace();
				}
			}
			try {
				if(rs!=null&&!rs.isClosed()){
					rs.close();
				}
			} catch (SQLException e) {
				e.printStackTrace();
			}
		}
	}

}

測試資料

樣本(limit)-每組10個例項樣本批量插入平均耗時(avg)ms估算一秒內批量插入可插入的數量
SELECT plimit as '樣本(limit)-每組10個例項樣本',avg(ptime) as  '批量插入平均耗時(avg)ms' ,(1000*plimit/avg(ptime)) as '估算一秒內批量插入可插入的數量' FROM processtask group by plimit;
1011.6862.069
768097.878527.6074
7790100.677435.3877
973012677222.2222
8050105.676231.0606
1042013776058.3942
728095.875991.6493
9740128.775679.8757
7700101.875638.5069
9520125.975615.5679
10430139.174982.0273
9410126.174623.3148
8180109.774567.0009
575077.574193.5484
659089.473713.6465
10600143.873713.491
9130123.973688.4584
579078.873477.1574
8620117.573361.7021
664090.773208.3793
672091.873202.6144
9810134.173154.3624
7820106.973152.479
616084.572899.4083
8850121.972600.4922
9140125.972597.2994
711098.272403.2587
9470130.872400.6116
996013872173.913
9440130.972116.1192
1262017572114.2857
1563021772027.6498
623086.572023.1214
10630147.672018.9702
8980124.871955.1282
10360144.171894.5177
10050139.971837.0264
1056014771836.7347
696096.971826.6254
649090.471792.0354
954013371729.3233
666093.171535.9828
8110113.471516.7549
8400117.571489.3617
9710135.971449.5953
1250017571428.5714
473066.371342.3831
683095.871294.3633
8060113.471075.8377
7450104.971020.0191
7440104.870992.3664
7520106.170876.5316
467065.970864.9469
7570106.970813.8447
7650108.170767.8076
10260145.270661.157
12200172.770642.7331
7490106.170593.7795
1058015070533.3333
692098.270468.4318
8680123.370397.4047
7360104.670363.2887
594084.670212.766
9290132.570113.2075
9970142.270112.5176
7760110.770099.3677
11180159.570094.0439
953013670073.5294
9770139.669985.6734
436062.369983.9486
7380105.669886.3636
675096.669875.7764
9160131.369763.8995
498071.469747.8992
10120145.169745.0034
14870213.369714.0178
8370120.169691.9234
625089.869599.1091
66809669583.3333
9500136.669546.1201
45206569538.4615
10080145.269421.4876
9050130.469401.8405
55508069375
10270148.269298.2456
13320192.569194.8052
10220147.969100.7437
1050015269078.9474
7420107.569023.2558
7530109.169019.2484
15000217.468997.2401
9080131.668996.9605
15420223.568993.2886
10140147.268885.8696
10700155.468854.5689
9240134.268852.459
11900173.268706.6975
8140118.568691.9831
7090103.368635.0436
14620213.168606.2881
7260105.968555.2408
15610227.768555.1164
12520182.768527.6409
10110147.668495.935
10010146.268467.8523
601087.868451.0251
1054015468441.5584
12300179.868409.3437
493072.168377.2538
10410152.368351.937
10900159.768252.9743
455066.768215.8921
10720157.568063.4921
55808268048.7805
7750113.968042.1422
6840100.667992.0477
12330181.467971.3341
693010267941.1765
7500110.567873.3032
657096.867871.9008
8760129.267801.8576
8630127.367792.6159
6900101.867779.9607
7400109.267765.5678
724010767663.5514
13370197.667661.9433
11690172.867650.463
15460228.867569.9301
13120194.267559.2173
597088.467533.9367
12680187.967482.7036
7980118.367455.6213
9720144.167453.1575
439065.167434.7158
9620142.867366.9468
578085.867365.9674
9760144.967356.7978
577085.767327.888
7780115.667301.0381
8820131.167276.8879
975014567241.3793
14920222.167176.9473
7340109.467093.2358
9490141.567067.1378
8950133.567041.1985
494073.767028.4939
14810221.366922.7293
7540112.766903.2831
8900133.166867.0173
9640144.266851.595
362054.266789.6679
613091.866775.5991
655098.166768.6035
437065.566717.5573
11280169.266666.6667
7690115.466637.7816
407061.166612.1113
7740116.266609.2943
13260199.166599.6986
7020105.566540.2844
497074.866443.8503
10590159.466436.6374
10170153.266383.812
9910149.366376.4233
430064.866358.0247
1327020066350
11680176.166325.9512
11410172.266260.1626
10520158.866246.8514
12660191.366178.7768
11600175.466134.5496
9670146.366097.0608
9860149.266085.7909
6910104.666061.1855
9800148.466037.7358
917013965971.223
451068.465935.6725
14430218.965920.5116
9660146.665893.588
458069.665804.5977
10200155.165764.0232
10100153.665755.2083
8410127.965754.4957
13300202.365743.9446
619094.265711.2527
1228018765668.4492
8430128.765501.1655
12320188.365427.5093
35305465370.3704
8030122.965337.6729
8480129.865331.2789
921014165319.1489
395060.565289.2562
15100231.365283.182
576088.365232.1631
9510145.865226.3374
6940106.465225.5639
8290127.265172.956
7310112.265151.5152
628096.465145.2282
13680210.165111.8515
12180187.464994.6638
12490192.264984.3913
12060185.764943.4572
12100186.564879.3566
567087.464874.1419
387059.764824.1206
15290235.964815.5998
15560240.364752.3928
14860229.564749.4553
8200126.764719.8106
1488023064695.6522
11370175.864675.7679
14280220.964644.6356
382059.164636.2098
6740104.364621.2848
11460177.464599.7745
15210235.564585.9873
12370191.764527.9082
553085.764527.4212
8230127.664498.4326
11330175.764484.9175
14010217.564413.7931
392060.964367.8161
10960170.364357.017
12050187.364335.291
56608864318.1818
10470162.864312.0393
14070218.864305.3016
448069.764275.4663
61709664270.8333
1478023064260.8696
9230143.764231.0369
6730104.864217.5573
14680228.664216.9729
12240190.764184.5831
9920154.664165.5886
7230112.764152.6176
15050234.864097.1039
614095.864091.858
10180158.964065.45
8220128.563968.8716
15440241.463960.232
14690229.863925.1523
1048016463902.439
12550196.463900.2037
12410194.363870.3037
475074.463844.086
1098017263837.2093
7580118.863804.7138
13430210.563800.4751
13480211.363795.5513
56108863750
13360209.663740.458
9790153.663736.9792
8700136.563736.2637
11960187.763718.7001
12160191.163631.6065
14520228.263628.3961
6770106.463627.8195
11260177.163579.8984
11010173.363531.4484
13570213.663529.9625
13110206.563486.6828
14550229.463426.3296
61509763402.0619
10390163.963392.3124
7720121.963330.5989
559088.363306.9083
6880108.763293.4683
10760170.163256.9077
11800186.663236.8703
942014963221.4765
13940220.563219.9546
10650168.563204.7478
14830234.963133.2482
7860124.563132.5301
13950221.363036.6019
9310147.862990.5277
434068.962989.8403
8010127.262971.6981
11380180.862942.4779
13730218.362895.0985
12360196.862804.878
1016016262716.0494
11430182.362698.8481
10090161.162631.9056
14730235.262627.551
378060.462582.7815
11070176.962577.7275
8670138.662554.1126
14960239.362515.6707
1031016562484.8485
14890238.462458.0537
14560233.662328.7671
13530217.162321.5108
15550249.662299.6795
602096.862190.0826
13880223.362158.5311
13030209.862106.7684
15250245.662092.8339
1260020362068.9655
478077.161997.406
9030145.761976.6644
1295020961961.7225
11980193.461944.1572
485078.361941.2516
12770206.361900.1454
6620107.161811.3912
9840159.261809.0452
12790207.161757.605
11390184.661700.9751
359058.261683.8488
7270117.961662.4258
9900160.661643.8356
10070163.661552.5672
1045017061470.5882
513083.561437.1257
428069.761406.0258
12630205.861370.2624
54608961348.3146
14450235.661332.7674
13140214.361315.9123
11140181.761309.8514
9000146.961266.1675
13550221.361229.1008
15150247.561212.1212
12890210.661206.0779
11570189.161184.5584
11630190.161178.3272
37906261129.0323
13580222.261116.1116
15010245.861065.9072
6890112.961027.4579
15260250.161015.5938
8450138.561010.8303
8150133.661002.994
14120231.660967.1848
56009260869.5652
43207160845.0704
12020197.760799.1907
11530189.760780.1792
11610191.160753.5322
14050231.460717.3725
7880129.860708.7827
10750177.160700.1694
14610240.760697.9643
7150117.860696.0951
593097.760696.0082
12040198.660624.3706
10400171.760570.763
14260235.560552.017
11920196.960538.3443
12260202.660513.3268
519085.860489.5105
12170201.660367.0635
8960148.560336.7003
1285021360328.6385
12700210.660303.8936
12080200.560249.3766
10570175.560227.9202
15130251.360206.924
15490257.460178.7102
11030183.360174.5772
6240103.7

相關推薦

Mycat批量插入效能測試

本文采用mycat的values批量插入方式進行測試,連寫的資料量達到8000左右事務提交可以達到每秒75000左右資料量,也證實了Mycat的效率是小於或等於Mysql的效能。在實際生產中,因為訪問量和併發問題使得效率下降,這也是資料庫底層IO無法避免的困境,所以實際生產中

Mybatis+mysql批量插入效能分析測試

前言 今天在網上看到一篇文章(後文中的文章指的就是它) https://www.jianshu.com/p/cce617be9f9e 發現了一種有關於mybatis批量插入的新方法,而且看了文章發現我原來的方法好像有點問題,但是由於文章中使用的環境是sqlserver而我經常使用的是mysql所以還是需要親

locust手機號批量註冊效能測試

from locust import TaskSet,task,HttpLocust from common.redisCon import redis_clusters import queue class register(TaskSet): @task def register

MySQL批量插入效能優化(二)

"jdbc:mysql://192.168.104.163:3306/testdb",                                          "vcom", "vcom", 2, 4,                                          "c:\\te

【ADO.NET-中級】百萬級數據的批量插入的兩種方法測試

arch pub 連接 code 新特性 try 簡單 nal ++ 在SQL Server 中插入一條數據使用Insert語句,但是如果想要批量插入一堆數據的話,循環使用Insert不僅效率低,而且會導致SQL一系統性能問題。下面介紹SQL Server支持的兩種批量

Mysql慢查詢開啟和查看 ,存儲過程批量插入1000萬條記錄進行慢查詢測試

har src spa not 生成 con image creat 進入 首先登陸進入Mysql命令行 執行sql show variables like ‘slow_query%‘; 結果為OFF 說明還未開啟慢查詢 執行sql show var

mysql測試資料批量插入

簡介 場景1:測試sql伺服器效能時需要單表100萬以上資料時 場景2:業務測試資料1000個賬號每個賬號有5個商品 當我們遇到以上場景時,如何快速造資料? 原理 利用select的交叉連線(cross join)。如果不帶WHERE條件子句,它將會返回被連

MySQL批量SQL插入效能優化

 對於一些資料量較大的系統,資料庫面臨的問題除了查詢效率低下,還有就是資料入庫時間長。特別像報表系統,可能每天花費在資料匯入上的時間就會長達幾個小時之久。因此,優化資料庫插入效能是很有意義的。        網路上的牛人很多,總會有一些手段可以提高insert效率,大家跟我

資料庫大批量SQL插入效能優化

對於一些資料量較大的系統,資料庫面臨的問題除了查詢效率低下,還有就是資料入庫時間長。特別像報表系統,每天花費在資料匯入上的時間可能會長達幾個小時或十幾個小時之久。因此,優化資料庫插入效能是很有意義的。 經過對MySQL InnoDB的一些效能測試,發現一些可以提高ins

MYSQL批量插入資料庫實現語句效能分析

  假定我們的表結構如下 程式碼如下   CREATE TABLE example ( example_id INT NOT NULL, name VARCHAR( 50 ) NOT NULL

java實現各種排序演算法(包括氣泡排序,選擇排序,插入排序,快速排序(簡潔版))及效能測試

1、氣泡排序是排序裡面最簡單的了,但效能也最差,數量小的時候還可以,數量一多,是非常慢的。      它的時間複雜度是O(n*n),空間複雜度是O(1)      程式碼如下,很好理解。 public static void bubbleSort(int[] arr)

大量資料情況下單執行緒插入和多執行緒insert資料庫的效能測試

之前一直沒有遇到過大批量資料入庫的場景,所以一直沒有思考過在大量資料的情況下單執行緒插入和多執行緒插入的效能情況。今天在看一個專案原始碼的時候發現使用了多執行緒insert操作。 於是簡單的寫了一個測試程式來測試一批資料在N個執行緒下的insert情況。 public class ThreadImport

MySQL批量千萬級資料SQL插入效能優化

       對於一些資料量較大的系統,資料庫面臨的問題除了查詢效率低下,還有就是資料入庫時間長。特別像報表系統,可能每天花費在資料匯入上的時間就會長達幾個小時之久。因此,優化資料庫插入效能是很有意義的。        網路上的牛人很多,總會有一些手段可以提高inser

MySql使用指令碼批量插入資料用於測試

步驟如下 1. 建表  2. 設定log_bin_trust_function_creators  3. 建立函式  4. 建立儲存過程  5. 呼叫儲存過程 mysql批量資料指令碼 1 建表 create table dept( id int u

MongoDB與MySQL的插入、查詢效能測試

1. 背景介紹 1.1  MongoDB的簡單介紹 在當今的資料庫市場上,MySQL無疑是佔有一席之地的。作為一個開源的關係型資料庫,MySQL被大量應用在各大網站後臺中,承擔著資訊儲存的重要作用。2009年,甲骨文公司(Oracle)收購Sun公司,MySQL成為Orac

【MongoDb】一次關於Oracle和MongoDb的插入和查詢效能測試

本次實驗是在筆者上學期期末一次課程實習中的一部分,現在整理出來以供參考。 本次資料用的是不同數量級別上的資料分別在Oracle和MongoDb中進行實驗的。 其中兩者的表結構一樣,都是如下欄位:

MySQL實現批量插入以優化效能的教程

對於一些資料量較大的系統,資料庫面臨的問題除了查詢效率低下,還有就是資料入庫時間長。特別像報表系統,每天花費在資料匯入上的時間可能會長達幾個小時或十幾個小時之久。因此,優化資料庫插入效能是很有意義的。 經過對MySQL innodb的一些效能測試,發現一些可以提高i

discuz論壇用sql語句批量插入測試資料(百萬級別)

public class InsertIntoSQL {public final static String path = "D:/ComsenzEXP/wwwroot/discuz/attachments/testLog.txt";public static void main (String[] agrs

Dapper插入記錄效能測試

最近學習了Dapper,聽說它裡面有批量插記錄的功能,就寫了個小程式來測試下:分別用SqlBulkCopay,原始的單條插入,Dapper批量插入5萬條記錄。 表結構 tb_User (id, UserName,Password) 比較結果如下: 1.Dapper: 花費時