有紧急任务怎么办?!上spark,比MR快很多,但是代价就是资源(主要是内存)。以下配置会用大量资源,谨慎使用:
set hive.execution.engine=spark;
set mapreduce.input.fileinputformat.split.maxsize=750000000;
set hive.vectorized.execution.enabled=true;
set hive.cbo.enable=true;
set hive.optimize.reducededuplication.min.reducer=4;
set hive.optimize.reducededuplication=true;
set hive.orc.splits.include.file.footer=false;
set hive.merge.mapfiles=true;
set hive.merge.sparkfiles=false;
set hive.merge.smallfiles.avgsize=16000000;
set hive.merge.size.per.task=256000000;
set hive.merge.orcfile.stripe.level=true;
set hive.auto.convert.join=true;
set hive.auto.convert.join.noconditionaltask=true;
set hive.auto.convert.join.noconditionaltask.size=894435328;
set hive.optimize.bucketmapjoin.sortedmerge=false;
set hive.map.aggr.hash.percentmemory=0.5;
set hive.map.aggr=true;
set hive.optimize.sort.dynamic.partition=false;
set hive.stats.autogather=true;
set hive.stats.fetch.column.stats=true;
set hive.vectorized.execution.reduce.enabled=false;
set hive.vectorized.groupby.checkinterval=4096;
set hive.vectorized.groupby.flush.percent=0.1;
set hive.compute.query.using.stats=true;
set hive.limit.pushdown.memory.usage=0.4;
set hive.optimize.index.filter=true;
set hive.exec.reducers.bytes.per.reducer=67108864;
set hive.smbjoin.cache.rows=10000;
set hive.exec.orc.default.stripe.size=67108864;
set hive.fetch.task.conversion=more;
set hive.fetch.task.conversion.threshold=1073741824;
set hive.fetch.task.aggr=false;
set mapreduce.input.fileinputformat.list-status.num-threads=5;
set spark.kryo.referenceTracking=false;
set spark.kryo.classesToRegister=org.apache.hadoop.hive.ql.io.HiveKey,org.apache.hadoop.io.BytesWritable,org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
这个设置会动态申请资源,把spark相关的设置去掉后对于MR也适用。
