Uploaded image for project: 'Hive'
  1. Hive
  2. HIVE-26759

ERROR: column "CC_START" does not exist, when Postgres is used as Hive metastore

    XMLWordPrintableJSON

Details

    • Reviewed

    Description

      This error is coming when Postgres is used as Hive Metastore. 

      hive-site.xml

       

      <?xml version="1.0"?>
      <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
      <configuration>
      
          <!-- UPSTREAM -->
      
          <property>
              <name>hive.server2.logging.operation.level</name>
              <value>NONE</value>
          </property>
          <property>
              <name>hive.log4j.file</name>
              <value>hive-log4j.properties</value>
          </property>
          <property>
              <name>metastore.log4j.file</name>
              <value>metastore-log4j.properties</value>
          </property>
          <!-- Intellij -->
          <property>
              <name>hive.jar.path</name>
              <value>/Users/am/Desktop/work/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar</value>
              <description>The location of hive_cli.jar that is used when submitting jobs in a separate jvm.</description>
          </property>
          <property>
              <name>hive.hadoop.classpath</name>
              <value>/Users/am/Desktop/work/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar</value>
          </property>
          <property>
              <name>hive.metastore.local</name>
              <value>false</value>
          </property>
          <property>
              <name>hive.metastore.uris</name>
              <value>thrift://localhost:9083</value>
          </property>
          <property>
              <name>hive.metastore.warehouse.dir</name>
              <value>/Users/am/Desktop/work/hivestuff/warehouse</value>
          </property>
          <property>
              <name>hive.server2.metrics.enabled</name>
              <value>true</value>
          </property>
      
       
          <property>
              <name>spark.eventLog.enabled</name>
              <value>true</value>
          </property>
          <property>
              <name>spark.eventLog.dir</name>
              <value>/tmp/hive</value>
          </property>
          <!-- Intellij -->
      
      
          
          <property>
              <name>metastore.metastore.event.db.notification.api.auth</name>
              <value>false</value>
          </property>
      
          <property>
              <name>hive.metastore.schema.verification</name>
              <value>false</value>
          </property>
      
          <property>
              <name>datanucleus.autoCreateTables</name>
              <value>true</value>
          </property>
        
      
          <property>
              <name>hive.exec.scratchdir</name>
              <values>/tmp/hive-${user.name}</values>
          </property>
      
              <property>
                  <name>javax.jdo.option.ConnectionURL</name>
                  <value>jdbc:postgresql://localhost:5432/hive_metastore</value>
                  <description>JDBC connect string for a JDBC metastore</description>
              </property>
      
              <property>
                  <name>javax.jdo.option.ConnectionDriverName</name>
                  <value>org.postgresql.Driver</value>
              </property>
      
      
              <property>
                  <name>javax.jdo.option.ConnectionUserName</name>
                  <value>hive</value>
              </property>
      
              <property>
                  <name>javax.jdo.option.ConnectionPassword</name>
                  <value>hive</value>
              </property>
      
          <property>
              <name>datanucleus.schema.autoCreateAll</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.server2.enable.doAs</name>
              <value>false</value>
              <description></description>
          </property>
      
          <property>
              <name>hive.server2.enable.impersonation</name>
              <value>false</value>
              <description></description>
          </property>
      
          <property>
              <name>dfs.namenode.acls.enabled</name>
              <value>false</value>
          </property>
      
          <!-- FAIR SCHEDULER -->
      
      
          <!-- These following lines are needed to use ACID features -->
          <!-- BEGIN -->
      
          <!--
          <property>
            <name>hive.enforce.bucketing</name>
            <value>true</value>
          </property>
          <property>
            <name>hive.support.concurrency</name>
            <value>true</value>
          </property>
          <property>
            <name>hive.exec.dynamic.partition.mode</name>
            <value>nonstrict</value>
          </property>
          <property>
            <name>hive.txn.manager</name>
            <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
          </property>
          <property>
            <name>hive.lock.manager</name>
            <value>org.apache.hadoop.hive.ql.lockmgr.DbLockManager</value>
          </property>
          <property>
            <name>hive.compactor.initiator.on</name>
            <value>true</value>
          </property>
          <property>
            <name>hive.compactor.worker.threads</name>
            <value>2</value>
          </property>
          -->
          <!-- END -->
      
      
      
          <property>
              <name>hive.server2.webui.explain.output</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.server2.webui.show.graph</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.server2.webui.show.stats</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.server2.webui.max.graph.size</name>
              <value>40</value>
          </property>
      
          <!-- ACID -->
          <property>
              <name>hive.txn.manager</name>
              <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
          </property>
      
          <property>
              <name>hive.compactor.initiator.on</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.compactor.worker.threads</name>
              <value>3</value>
          </property>
      
          <property>
              <name>metastore.compactor.worker.threads</name>
              <value>4</value>
          </property>
      
          <property>
              <name>hive.support.concurrency</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.exec.dynamic.partition.mode</name>
              <value>nonstrict</value>
          </property>
      
          <property>
              <name>hive.lock.manager</name>
              <value>org.apache.hadoop.hive.ql.lockmgr.DbLockManager</value>
          </property>
      
          <property>
              <name>hive.compactor.crud.query.based</name>
              <value>true</value>
          </property>
      
          <property>
              <name>hive.metastore.runworker.in</name>
              <value>hs2</value>
          </property>
      
      
      
      
      
          <!-- Random -->
      
          <!--     <property>
                  <name>hive.users.in.admin.role</name>
                  <value>karencoppage</value>
              </property> -->
      
      
      
          <!--Timestamp-->
      
          <!--     <property>
                  <name>hive.parquet.write.int64.timestamp</name>
                  <value>true</value>
              </property>
           -->
          <!--for WebUI explain plan-->
      
          <!--     <property>
                  <name>hive.server2.webui.max.historic.queries</name>
                  <value>40</value>
              </property> -->
      
      
      
      
          <!--     <property>
                  <name></name>
                  <value></value>
              </property>
           -->
      </configuration>
       

       

       

      Following is the stack trace when HMS service is started:

      [Thread-5] ERROR org.apache.hadoop.hive.ql.txn.compactor.Initiator - Initiator loop caught unexpected exception this time through the loop
      org.apache.hadoop.hive.metastore.api.MetaException: Unable to select from transaction database org.postgresql.util.PSQLException: ERROR: column "CC_START" does not exist
        Position: 1215
          at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2676)
          at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2366)
          at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:356)
          at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:490)
          at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:408)
          at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:181)
          at org.postgresql.jdbc.PgPreparedStatement.executeQuery(PgPreparedStatement.java:133)
          at com.zaxxer.hikari.pool.ProxyPreparedStatement.executeQuery(ProxyPreparedStatement.java:52)
          at com.zaxxer.hikari.pool.HikariProxyPreparedStatement.executeQuery(HikariProxyPreparedStatement.java)
          at org.apache.hadoop.hive.metastore.txn.TxnHandler.showCompact(TxnHandler.java:3894)
          at org.apache.hadoop.hive.ql.txn.compactor.Initiator.run(Initiator.java:154)    at org.apache.hadoop.hive.metastore.txn.TxnHandler.showCompact(TxnHandler.java:3946) ~[classes/:?]
          at org.apache.hadoop.hive.ql.txn.compactor.Initiator.run(Initiator.java:154) ~[hive-exec-4.0.0-SNAPSHOT.jar:4.0.0-SNAPSHOT] 

      This error disappears when derby is configured as HMS.

      Attachments

        Issue Links

          Activity

            People

              akshatm Akshat Mathur
              akshatm Akshat Mathur
              Votes:
              0 Vote for this issue
              Watchers:
              1 Start watching this issue

              Dates

                Created:
                Updated:
                Resolved:

                Time Tracking

                  Estimated:
                  Original Estimate - Not Specified
                  Not Specified
                  Remaining:
                  Remaining Estimate - 0h
                  0h
                  Logged:
                  Time Spent - 1h 40m
                  1h 40m