Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce log in unit test #333

Open
kination opened this issue Feb 12, 2021 · 0 comments
Open

Reduce log in unit test #333

kination opened this issue Feb 12, 2021 · 0 comments

Comments

@kination
Copy link

kination commented Feb 12, 2021

Hello,
While using the module for spark unit testing, it is displaying huge verbose logs, which is not being used for checking the test.

...
...
07:55:25.069 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.sql.execution.WholeStageCodegenExec - 
/* 001 */ public Object generate(Object[] references) {
/* 002 */   return new GeneratedIteratorForCodegenStage2(references);
/* 003 */ }
/* 004 */
/* 005 */ // codegenStageId=2
/* 006 */ final class GeneratedIteratorForCodegenStage2 extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 007 */   private Object[] references;
/* 008 */   private scala.collection.Iterator[] inputs;
/* 009 */   private boolean agg_initAgg_0;
/* 010 */   private boolean agg_bufIsNull_0;
/* 011 */   private long agg_bufValue_0;
/* 012 */   private scala.collection.Iterator inputadapter_input_0;
/* 013 */   private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[] agg_mutableStateArray_0 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[1];
/* 014 */
/* 015 */   public GeneratedIteratorForCodegenStage2(Object[] references) {
/* 016 */     this.references = references;
/* 017 */   }
/* 018 */
/* 019 */   public void init(int index, scala.collection.Iterator[] inputs) {
/* 020 */     partitionIndex = index;
/* 021 */     this.inputs = inputs;
/* 022 */
/* 023 */     inputadapter_input_0 = inputs[0];
/* 024 */     agg_mutableStateArray_0[0] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0);
/* 025 */
/* 026 */   }
/* 027 */
/* 028 */   private void agg_doAggregateWithoutKey_0() throws java.io.IOException {
/* 029 */     // initialize aggregation buffer
/* 030 */     agg_bufIsNull_0 = false;
/* 031 */     agg_bufValue_0 = 0L;
/* 032 */
/* 033 */     while (inputadapter_input_0.hasNext() && !stopEarly()) {
/* 034 */       InternalRow inputadapter_row_0 = (InternalRow) inputadapter_input_0.next();
/* 035 */       long inputadapter_value_0 = inputadapter_row_0.getLong(0);
/* 036 */
/* 037 */       agg_doConsume_0(inputadapter_row_0, inputadapter_value_0);
/* 038 */       if (shouldStop()) return;
/* 039 */     }
/* 040 */
/* 041 */   }
/* 042 */
/* 043 */   private void agg_doConsume_0(InternalRow inputadapter_row_0, long agg_expr_0_0) throws java.io.IOException {
/* 044 */     // do aggregate
/* 045 */     // common sub-expressions
/* 046 */
/* 047 */     // evaluate aggregate function
/* 048 */     long agg_value_3 = -1L;
/* 049 */     agg_value_3 = agg_bufValue_0 + agg_expr_0_0;
/* 050 */     // update aggregation buffer
/* 051 */     agg_bufIsNull_0 = false;
/* 052 */     agg_bufValue_0 = agg_value_3;
/* 053 */
/* 054 */   }
/* 055 */
/* 056 */   protected void processNext() throws java.io.IOException {
/* 057 */     while (!agg_initAgg_0) {
/* 058 */       agg_initAgg_0 = true;
/* 059 */       long agg_beforeAgg_0 = System.nanoTime();
/* 060 */       agg_doAggregateWithoutKey_0();
/* 061 */       ((org.apache.spark.sql.execution.metric.SQLMetric) references[1] /* aggTime */).add((System.nanoTime() - agg_beforeAgg_0) / 1000000);
/* 062 */
/* 063 */       // output the result
/* 064 */
/* 065 */       ((org.apache.spark.sql.execution.metric.SQLMetric) references[0] /* numOutputRows */).add(1);
/* 066 */       agg_mutableStateArray_0[0].reset();
/* 067 */
/* 068 */       agg_mutableStateArray_0[0].zeroOutNullBytes();
/* 069 */
/* 070 */       agg_mutableStateArray_0[0].write(0, agg_bufValue_0);
/* 071 */       append((agg_mutableStateArray_0[0].getRow()));
/* 072 */     }
/* 073 */   }
/* 074 */
/* 075 */ }

07:55:25.073 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.sql.execution.WholeStageCodegenExec - 
/* 001 */ public Object generate(Object[] references) {
/* 002 */   return new GeneratedIteratorForCodegenStage1(references);
/* 003 */ }
/* 004 */
/* 005 */ // codegenStageId=1
/* 006 */ final class GeneratedIteratorForCodegenStage1 extends org.apache.spark.sql.execution.BufferedRowIterator {
/* 007 */   private Object[] references;
/* 008 */   private scala.collection.Iterator[] inputs;
/* 009 */   private boolean agg_initAgg_0;
/* 010 */   private boolean agg_bufIsNull_0;
/* 011 */   private long agg_bufValue_0;
/* 012 */   private scala.collection.Iterator inputadapter_input_0;
/* 013 */   private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[] agg_mutableStateArray_0 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[1];
/* 014 */
/* 015 */   public GeneratedIteratorForCodegenStage1(Object[] references) {
/* 016 */     this.references = references;
/* 017 */   }
/* 018 */
/* 019 */   public void init(int index, scala.collection.Iterator[] inputs) {
/* 020 */     partitionIndex = index;
/* 021 */     this.inputs = inputs;
/* 022 */
/* 023 */     inputadapter_input_0 = inputs[0];
/* 024 */     agg_mutableStateArray_0[0] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0);
/* 025 */
/* 026 */   }
/* 027 */
/* 028 */   private void agg_doAggregateWithoutKey_0() throws java.io.IOException {
/* 029 */     // initialize aggregation buffer
/* 030 */     agg_bufIsNull_0 = false;
/* 031 */     agg_bufValue_0 = 0L;
/* 032 */
/* 033 */     while (inputadapter_input_0.hasNext() && !stopEarly()) {
/* 034 */       InternalRow inputadapter_row_0 = (InternalRow) inputadapter_input_0.next();
/* 035 */       agg_doConsume_0();
/* 036 */       if (shouldStop()) return;
/* 037 */     }
/* 038 */
/* 039 */   }
/* 040 */
/* 041 */   private void agg_doConsume_0() throws java.io.IOException {
/* 042 */     // do aggregate
/* 043 */     // common sub-expressions
/* 044 */
/* 045 */     // evaluate aggregate function
/* 046 */     long agg_value_1 = -1L;
/* 047 */     agg_value_1 = agg_bufValue_0 + 1L;
/* 048 */     // update aggregation buffer
/* 049 */     agg_bufIsNull_0 = false;
/* 050 */     agg_bufValue_0 = agg_value_1;
/* 051 */
/* 052 */   }
/* 053 */
/* 054 */   protected void processNext() throws java.io.IOException {
/* 055 */     while (!agg_initAgg_0) {
/* 056 */       agg_initAgg_0 = true;
/* 057 */       long agg_beforeAgg_0 = System.nanoTime();
/* 058 */       agg_doAggregateWithoutKey_0();
/* 059 */       ((org.apache.spark.sql.execution.metric.SQLMetric) references[1] /* aggTime */).add((System.nanoTime() - agg_beforeAgg_0) / 1000000);
/* 060 */
/* 061 */       // output the result
/* 062 */
/* 063 */       ((org.apache.spark.sql.execution.metric.SQLMetric) references[0] /* numOutputRows */).add(1);
/* 064 */       agg_mutableStateArray_0[0].reset();
/* 065 */
/* 066 */       agg_mutableStateArray_0[0].zeroOutNullBytes();
/* 067 */
/* 068 */       agg_mutableStateArray_0[0].write(0, agg_bufValue_0);
/* 069 */       append((agg_mutableStateArray_0[0].getRow()));
/* 070 */     }
/* 071 */   }
/* 072 */
/* 073 */ }

07:55:25.074 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner - Cleaning lambda: $anonfun$doExecute$4$adapted
07:55:25.075 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner -  +++ Lambda closure ($anonfun$doExecute$4$adapted) is now cleaned +++
07:55:25.076 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner - Cleaning lambda: $anonfun$doExecute$4$adapted
07:55:25.077 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner -  +++ Lambda closure ($anonfun$doExecute$4$adapted) is now cleaned +++
07:55:25.078 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner - Cleaning lambda: $anonfun$collect$2
07:55:25.080 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner -  +++ Lambda closure ($anonfun$collect$2) is now cleaned +++
07:55:25.081 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner - Cleaning lambda: $anonfun$runJob$5
07:55:25.083 [pool-1-thread-1-ScalaTest-running-FileTest] DEBUG org.apache.spark.util.ClosureCleaner -  +++ Lambda closure ($anonfun$runJob$5) is now cleaned +++
...
...

Is there some option to reduce this?

@kination kination changed the title Reduce log Reduce log in unit test Feb 12, 2021
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant