`
bupt04406
  • 浏览: 347388 次
  • 性别: Icon_minigender_1
  • 来自: 杭州
社区版块
存档分类
最新评论

Hive alter table

    博客分类:
  • Hive
阅读更多
接http://bupt04406.iteye.com/blog/1151545

create table tablePartition(s string) partitioned by(pt string);
alter table tablePartition add if not exists partition(pt='1');
alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');  //内部表转外部表
alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='FALSE');  //外部表转内部表

hive> create table tablePartition(s string) partitioned by(pt string);
OK
Time taken: 0.209 seconds
hive> desc formatted tablepartition;                                  
OK
# col_name            data_type           comment            

s                   string              None               

# Partition Information
# col_name            data_type           comment            

pt                  string              None               

# Detailed Table Information
Database:           default            
Owner:              root               
CreateTime:         Mon Aug 29 19:05:22 PDT 2011
LastAccessTime:     UNKNOWN            
Retention:          0                  
Location:           hdfs://localhost:54310/user/hive/warehouse/tablepartition
Table Type:         MANAGED_TABLE      
Table Parameters:
transient_lastDdlTime 1314669922         

# Storage Information
SerDe Library:      org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat:        org.apache.hadoop.mapred.TextInputFormat
OutputFormat:       org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed:         No                 
Num Buckets:        -1                 
Bucket Columns:     []                 
Sort Columns:       []                 
Storage Desc Params:
serialization.format 1  



hive> alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');
OK
Time taken: 0.116 seconds
hive> desc formatted tablepartition;                                  
OK
# col_name            data_type           comment            

s                   string              None               

# Partition Information
# col_name            data_type           comment            

pt                  string              None               

# Detailed Table Information
Database:           default            
Owner:              root               
CreateTime:         Mon Aug 29 19:05:22 PDT 2011
LastAccessTime:     UNKNOWN            
Retention:          0                  
Location:           hdfs://localhost:54310/user/hive/warehouse/tablepartition
Table Type:         EXTERNAL_TABLE     
Table Parameters:
EXTERNAL            TRUE               
last_modified_by    tianzhao           
last_modified_time  1314670000         
transient_lastDdlTime 1314670000         

# Storage Information
SerDe Library:      org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat:        org.apache.hadoop.mapred.TextInputFormat
OutputFormat:       org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed:         No                 
Num Buckets:        -1                 
Bucket Columns:     []                 
Sort Columns:       []                 
Storage Desc Params:
serialization.format 1                  
Time taken: 0.089 seconds
    




CliDriver:
CliDriver.main() {
   ret = cli.processLine(line); //alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');
}
CliDriver:
public int processLine(String line) { // line = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');
   ret = processCmd(command);
}

CliDriver:
  public int processCmd(String cmd) { // cmd = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE')
     CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
     Driver qp = (Driver) proc;
     ret = qp.run(cmd).getResponseCode();
  }

CommandProcessor proc = CommandProcessorFactory.get(tokens[0]); // tokens[0] = alter
创建return new Driver();来处理

Driver:
CommandProcessorResponse run(String command)  {
// command = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE') 
   int ret = compile(command);
   ret = execute();
}

Driver:
public int compile(String command) {
       SemanticAnalyzerFactory.get(ASTNode tree){
           return new DDLSemanticAnalyzer(conf);
       }

}

DDLSemanticAnalyzer:
public void analyzeInternal(ASTNode ast) throws SemanticException {
    } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) {
      analyzeAlterTableProps(ast, false);
    }
}

DDLSemanticAnalyzer:
  private void analyzeAlterTableProps(ASTNode ast, boolean expectView)
    throws SemanticException {
    String tableName = unescapeIdentifier(ast.getChild(0).getText()); // tablePartition
    HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(1))
        .getChild(0)); // {EXTERNAL=TRUE}
    AlterTableDesc alterTblDesc =
      new AlterTableDesc(AlterTableTypes.ADDPROPS, expectView);
    alterTblDesc.setProps(mapProp); // {EXTERNAL=TRUE}
    alterTblDesc.setOldName(tableName); // tablePartition

    accessTbName = tableName; //tablePartition
    privilege = Privilege.ALTER_PRIV;

    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
        alterTblDesc), conf)); //创建DDLwork,alterTblDesc不为空,创建DDLTask。
  }
DDLSemanticAnalyzer:
  static HashMap<String, String> getProps(ASTNode prop) {
    HashMap<String, String> mapProp = new HashMap<String, String>();
    readProps(prop, mapProp);
    return mapProp; // {EXTERNAL=TRUE}
  }
BaseSemanticAnalyzer:
  public static void readProps(
    ASTNode prop, Map<String, String> mapProp) {

    for (int propChild = 0; propChild < prop.getChildCount(); propChild++) {
      String key = unescapeSQLString(prop.getChild(propChild).getChild(0)
          .getText()); // EXTERNAL
      String value = unescapeSQLString(prop.getChild(propChild).getChild(1)
          .getText()); // TRUE
      mapProp.put(key, value); // {EXTERNAL=TRUE}
    }
  }

  public AlterTableDesc(AlterTableTypes alterType, boolean expectView) {
    op = alterType; // ADDPROPS
    this.expectView = expectView; // false
  }


Driver.launchTask
TaskRunner.runSequential
Task. executeTask
DDLTask. Execute
      AlterTableDesc alterTbl = work.getAlterTblDesc();
      if (alterTbl != null) {
        return alterTable(db, alterTbl);
      }
DDLTask. alterTable(Hive db, AlterTableDesc alterTbl)
Hive.alterTable
HiveMetaStoreClient.alter_table
HiveMetaStore.alter_table
HiveAlterHandler.alterTable
ObjectStore



  private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
    // alter the table
    Table tbl = db.getTable(alterTbl.getOldName()); //从数据库中获取tablePartition的信息

    Partition part = null;
    if(alterTbl.getPartSpec() != null) {
      part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
      if(part == null) {
        console.printError("Partition : " + alterTbl.getPartSpec().toString()
            + " does not exist.");
        return 1;
      }
    }

    validateAlterTableType(tbl, alterTbl.getOp()); // op = ADDPROPS

    if (tbl.isView()) {
      if (!alterTbl.getExpectView()) {
        throw new HiveException("Cannot alter a view with ALTER TABLE");
      }
    } else {
      if (alterTbl.getExpectView()) {
        throw new HiveException("Cannot alter a base table with ALTER VIEW");
      }
    }

    Table oldTbl = tbl.copy(); //拷贝一份

    if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
      tbl.setTableName(alterTbl.getNewName());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
      List<FieldSchema> newCols = alterTbl.getNewCols();
      List<FieldSchema> oldCols = tbl.getCols();
      if (tbl.getSerializationLib().equals(
          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
        console
            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
        tbl.getTTable().getSd().setCols(newCols);
      } else {
        // make sure the columns does not already exist
        Iterator<FieldSchema> iterNewCols = newCols.iterator();
        while (iterNewCols.hasNext()) {
          FieldSchema newCol = iterNewCols.next();
          String newColName = newCol.getName();
          Iterator<FieldSchema> iterOldCols = oldCols.iterator();
          while (iterOldCols.hasNext()) {
            String oldColName = iterOldCols.next().getName();
            if (oldColName.equalsIgnoreCase(newColName)) {
              console.printError("Column '" + newColName + "' exists");
              return 1;
            }
          }
          oldCols.add(newCol);
        }
        tbl.getTTable().getSd().setCols(oldCols);
      }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
      List<FieldSchema> oldCols = tbl.getCols();
      List<FieldSchema> newCols = new ArrayList<FieldSchema>();
      Iterator<FieldSchema> iterOldCols = oldCols.iterator();
      String oldName = alterTbl.getOldColName();
      String newName = alterTbl.getNewColName();
      String type = alterTbl.getNewColType();
      String comment = alterTbl.getNewColComment();
      boolean first = alterTbl.getFirst();
      String afterCol = alterTbl.getAfterCol();
      FieldSchema column = null;

      boolean found = false;
      int position = -1;
      if (first) {
        position = 0;
      }

      int i = 1;
      while (iterOldCols.hasNext()) {
        FieldSchema col = iterOldCols.next();
        String oldColName = col.getName();
        if (oldColName.equalsIgnoreCase(newName)
            && !oldColName.equalsIgnoreCase(oldName)) {
          console.printError("Column '" + newName + "' exists");
          return 1;
        } else if (oldColName.equalsIgnoreCase(oldName)) {
          col.setName(newName);
          if (type != null && !type.trim().equals("")) {
            col.setType(type);
          }
          if (comment != null) {
            col.setComment(comment);
          }
          found = true;
          if (first || (afterCol != null && !afterCol.trim().equals(""))) {
            column = col;
            continue;
          }
        }

        if (afterCol != null && !afterCol.trim().equals("")
            && oldColName.equalsIgnoreCase(afterCol)) {
          position = i;
        }

        i++;
        newCols.add(col);
      }

      // did not find the column
      if (!found) {
        console.printError("Column '" + oldName + "' does not exist");
        return 1;
      }
      // after column is not null, but we did not find it.
      if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
        console.printError("Column '" + afterCol + "' does not exist");
        return 1;
      }

      if (position >= 0) {
        newCols.add(position, column);
      }

      tbl.getTTable().getSd().setCols(newCols);
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
      // change SerDe to LazySimpleSerDe if it is columnsetSerDe
      if (tbl.getSerializationLib().equals(
          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
        console
            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
      } else if (!tbl.getSerializationLib().equals(
          MetadataTypedColumnsetSerDe.class.getName())
          && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
          && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
          && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
        console.printError("Replace columns is not supported for this table. "
            + "SerDe may be incompatible.");
        return 1;
      }
      tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {  //
      tbl.getTTable().getParameters().putAll(alterTbl.getProps());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
      tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
          alterTbl.getProps());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
      tbl.setSerializationLib(alterTbl.getSerdeName());
      if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
        tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
            alterTbl.getProps());
      }
      tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
          .getDeserializer()));
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
      if(part != null) {
        part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
        part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
        if (alterTbl.getSerdeName() != null) {
          part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
              alterTbl.getSerdeName());
        }
      } else {
        tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
        tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
        if (alterTbl.getSerdeName() != null) {
          tbl.setSerializationLib(alterTbl.getSerdeName());
        }
      }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
      // validate sort columns and bucket columns
      List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
          .getCols());
      Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
      if (alterTbl.getSortColumns() != null) {
        Utilities.validateColumnNames(columns, Utilities
            .getColumnNamesFromSortCols(alterTbl.getSortColumns()));
      }
      tbl.getTTable().getSd().setBucketCols(alterTbl.getBucketColumns());
      tbl.getTTable().getSd().setNumBuckets(alterTbl.getNumberBuckets());
      tbl.getTTable().getSd().setSortCols(alterTbl.getSortColumns());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
      String newLocation = alterTbl.getNewLocation();
      try {
        URI locURI = new URI(newLocation);
        if (!locURI.isAbsolute() || locURI.getScheme() == null
            || locURI.getScheme().trim().equals("")) {
          throw new HiveException(
              newLocation
                  + " is not absolute or has no scheme information. "
                  + "Please specify a complete absolute uri with scheme information.");
        }
        if (part != null) {
          part.setLocation(newLocation);
        } else {
          tbl.setDataLocation(locURI);
        }
      } catch (URISyntaxException e) {
        throw new HiveException(e);
      }
    } else {
      console.printError("Unsupported Alter commnad");
      return 1;
    }

    // set last modified by properties
    String user = null;
    try {
      user = conf.getUser();
    } catch (IOException e) {
      console.printError("Unable to get current user: " + e.getMessage(),
          stringifyException(e));
      return 1;
    }

    if(part == null) {
      tbl.setProperty("last_modified_by", user);
      tbl.setProperty("last_modified_time", Long.toString(System
          .currentTimeMillis() / 1000));
      try {
        tbl.checkValidity();
      } catch (HiveException e) {
        console.printError("Invalid table columns : " + e.getMessage(),
            stringifyException(e));
        return 1;
      }
    } else {
      part.getParameters().put("last_modified_by", user);
      part.getParameters().put("last_modified_time", Long.toString(System
          .currentTimeMillis() / 1000));
    }

    try {
      if (part == null) {
        db.alterTable(alterTbl.getOldName(), tbl); //修改表
      } else {
        db.alterPartition(tbl.getTableName(), part);
      }
    } catch (InvalidOperationException e) {
      console.printError("Invalid alter operation: " + e.getMessage());
      LOG.info("alter table: " + stringifyException(e));
      return 1;
    } catch (HiveException e) {
      return 1;
    }

    // This is kind of hacky - the read entity contains the old table, whereas
    // the write entity
    // contains the new table. This is needed for rename - both the old and the
    // new table names are
    // passed
    if(part != null) {
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    } else {
      work.getInputs().add(new ReadEntity(oldTbl));
      work.getOutputs().add(new WriteEntity(tbl));
    }
    return 0;
  }

Hive:
  public void alterTable(String tblName, Table newTbl)
      throws InvalidOperationException, HiveException {
    try {
      getMSC().alter_table(getCurrentDatabase(), tblName,
          newTbl.getTTable());
    } catch (MetaException e) {
      throw new HiveException("Unable to alter table.", e);
    } catch (TException e) {
      throw new HiveException("Unable to alter table.", e);
    }
  }
HiveMetaStoreClient:
  public void alter_table(String dbname, String tbl_name, Table new_tbl)
      throws InvalidOperationException, MetaException, TException {
    client.alter_table(dbname, tbl_name, new_tbl);
  }
HiveMetaStore.HMSHandler:
    public void alter_table(final String dbname, final String name, final Table newTable)
        throws InvalidOperationException, MetaException {
      incrementCounter("alter_table");
      logStartFunction("alter_table: db=" + dbname + " tbl=" + name
          + " newtbl=" + newTable.getTableName()); // 11/08/28 07:02:02 INFO metastore.HiveMetaStore: 0: alter_table: db=default tbl=tablePartition newtbl=tablepartition
      newTable.putToParameters(Constants.DDL_TIME, Long.toString(System
          .currentTimeMillis() / 1000));

      try {
        executeWithRetry(new Command<Boolean>() {
          @Override
          Boolean run(RawStore ms) throws Exception {
            alterHandler.alterTable(ms, wh, dbname, name, newTable); //AlterHandler
            return Boolean.TRUE;
          }
        });
      } catch (MetaException e) {
        throw e;
      } catch (InvalidOperationException e) {
        throw e;
      } catch (Exception e) {
        assert(e instanceof RuntimeException);
        throw (RuntimeException)e;
      }

    }
HiveMetaStore.HMSHandler:
    private <T> T executeWithRetry(Command<T> cmd) throws Exception {
      T ret = null;

      boolean gotNewConnectUrl = false;
      boolean reloadConf = HiveConf.getBoolVar(hiveConf,
          HiveConf.ConfVars.METASTOREFORCERELOADCONF);

      if (reloadConf) {
        updateConnectionURL(getConf(), null);
      }

      int retryCount = 0;
      Exception caughtException = null;
      while(true) {
        try {
          RawStore ms = getMS(reloadConf || gotNewConnectUrl); // org.apache.hadoop.hive.metastore.ObjectStore
          ret = cmd.run(ms);
          break;
        } catch (javax.jdo.JDOFatalDataStoreException e) {
          caughtException = e;
        } catch (javax.jdo.JDODataStoreException e) {
          caughtException = e;
        }

        if (retryCount >= retryLimit) {
          throw caughtException;
        }

        assert(retryInterval >= 0);
        retryCount++;
        LOG.error(
            String.format(
                "JDO datastore error. Retrying metastore command " +
                "after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit));
        Thread.sleep(retryInterval);
        // If we have a connection error, the JDO connection URL hook might
        // provide us with a new URL to access the datastore.
        String lastUrl = getConnectionURL(getConf());
        gotNewConnectUrl = updateConnectionURL(getConf(), lastUrl);
      }
      return ret;
    }

HiveAlterHandler:
  public void alterTable(RawStore msdb, Warehouse wh, String dbname,
      String name, Table newt) throws InvalidOperationException, MetaException { // msdb=org.apache.hadoop.hive.metastore.ObjectStore
    if (newt == null) {
      throw new InvalidOperationException("New table is invalid: " + newt);
    }

    if (!MetaStoreUtils.validateName(newt.getTableName())
        || !MetaStoreUtils.validateColNames(newt.getSd().getCols())) {
      throw new InvalidOperationException(newt.getTableName()
          + " is not a valid object name");
    }

    Path srcPath = null;
    FileSystem srcFs = null;
    Path destPath = null;
    FileSystem destFs = null;

    boolean success = false;
    String oldTblLoc = null;
    String newTblLoc = null;
    boolean moveData = false;
    boolean rename = false;
    try {
      msdb.openTransaction();
      name = name.toLowerCase(); // tablepartition
      dbname = dbname.toLowerCase(); // default

      // check if table with the new name already exists
      if (!newt.getTableName().equalsIgnoreCase(name)
          || !newt.getDbName().equalsIgnoreCase(dbname)) {
        if (msdb.getTable(newt.getDbName(), newt.getTableName()) != null) {
          throw new InvalidOperationException("new table " + newt.getDbName()
              + "." + newt.getTableName() + " already exists");
        }
        rename = true;
      }

      // get old table
      Table oldt = msdb.getTable(dbname, name); // Table(tableName:tablepartition, dbName:default, owner:root, createTime:1314540180, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:s, type:string, comment:null)], location:hdfs://localhost:54310/user/hive/warehouse/tablepartition, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pt, type:string, comment:null)], parameters:{transient_lastDdlTime=1314540180}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)

      if (oldt == null) {
        throw new InvalidOperationException("table " + newt.getDbName() + "."
            + newt.getTableName() + " doesn't exist");
      }

      // check that partition keys have not changed
      if (oldt.getPartitionKeys().size() != newt.getPartitionKeys().size()
          || !oldt.getPartitionKeys().containsAll(newt.getPartitionKeys())) {
        throw new InvalidOperationException(
            "partition keys can not be changed.");
      }

      // if this alter is a rename, and user didn't change the
      // default location (or new location is empty), and table is
      // not an external table, that means user is asking metastore
      // to move data to new location corresponding to the new name
      if (rename
          && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
            || StringUtils.isEmpty(newt.getSd().getLocation()))
          && !MetaStoreUtils.isExternalTable(oldt)) {  // rename=false
        // that means user is asking metastore to move data to new location
        // corresponding to the new name
        // get new location
        newTblLoc = wh.getDefaultTablePath(newt.getDbName(),
            newt.getTableName()).toString();
        newt.getSd().setLocation(newTblLoc);
        oldTblLoc = oldt.getSd().getLocation();
        moveData = true;
        // check that destination does not exist otherwise we will be
        // overwriting data
        srcPath = new Path(oldTblLoc);
        srcFs = wh.getFs(srcPath);
        destPath = new Path(newTblLoc);
        destFs = wh.getFs(destPath);
        // check that src and dest are on the same file system
        if (srcFs != destFs) {
          throw new InvalidOperationException("table new location " + destPath
              + " is on a different file system than the old location "
              + srcPath + ". This operation is not supported");
        }
        try {
          srcFs.exists(srcPath); // check that src exists and also checks
                                 // permissions necessary
          if (destFs.exists(destPath)) {
            throw new InvalidOperationException("New location for this table "
                + newt.getDbName() + "." + newt.getTableName()
                + " already exists : " + destPath);
          }
        } catch (IOException e) {
          Warehouse.closeFs(srcFs);
          Warehouse.closeFs(destFs);
          throw new InvalidOperationException("Unable to access new location "
              + destPath + " for table " + newt.getDbName() + "."
              + newt.getTableName());
        }
        // also the location field in partition
        List<Partition> parts = msdb.getPartitions(dbname, name, 0);
        for (Partition part : parts) {
          String oldPartLoc = part.getSd().getLocation();
          String oldTblLocPath = new Path(oldTblLoc).toUri().getPath();
          String newTblLocPath = new Path(newTblLoc).toUri().getPath();
          if (oldPartLoc.contains(oldTblLocPath)) {
            URI newPartLocUri = null;
            try {
              URI oldPartLocUri = new URI(oldPartLoc);
              newPartLocUri = new URI(
                  oldPartLocUri.getScheme(),
                  oldPartLocUri.getUserInfo(),
                  oldPartLocUri.getHost(),
                  oldPartLocUri.getPort(),
                  oldPartLocUri.getPath().replace(oldTblLocPath, newTblLocPath),
                  oldPartLocUri.getQuery(),
                  oldPartLocUri.getFragment());
            } catch (URISyntaxException e) {
              throw new InvalidOperationException("Old partition location " +
              " is invalid. (" + oldPartLoc + ")");
            }
            part.getSd().setLocation(newPartLocUri.toString());
            msdb.alterPartition(dbname, name, part);
          }
        }
      }
      // now finally call alter table
      msdb.alterTable(dbname, name, newt);
//newt=Table(tableName:tablepartition, dbName:default, owner:root, createTime:1314540180, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:s, type:string, comment:null)], location:hdfs://localhost:54310/user/hive/warehouse/tablepartition, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pt, type:string, comment:null)], parameters:{EXTERNAL=TRUE, last_modified_by=tianzhao, last_modified_time=1314540223, transient_lastDdlTime=1314540299}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)

      // commit the changes
      success = msdb.commitTransaction();
    } catch (InvalidObjectException e) {
      LOG.debug(e);
      throw new InvalidOperationException(
          "Unable to change partition or table."
              + " Check metastore logs for detailed stack." + e.getMessage());
    } finally {
      if (!success) {
        msdb.rollbackTransaction();
      }
      if (success && moveData) {
        // change the file name in hdfs
        // check that src exists otherwise there is no need to copy the data
        try {
          if (srcFs.exists(srcPath)) {
            // rename the src to destination
            srcFs.rename(srcPath, destPath);
          }
        } catch (IOException e) {
          throw new InvalidOperationException("Unable to access old location "
              + srcPath + " for table " + dbname + "." + name);
        }
      }
    }

  }

ObjectStore:
  public void alterTable(String dbname, String name, Table newTable)
      throws InvalidObjectException, MetaException {
    boolean success = false;
    try {
      openTransaction();
      name = name.toLowerCase();
      dbname = dbname.toLowerCase();
      MTable newt = convertToMTable(newTable);//  newt的tableType 已被转成 EXTERNAL_TABLE
      if (newt == null) {
        throw new InvalidObjectException("new table is invalid");
      }

      MTable oldt = getMTable(dbname, name);
      if (oldt == null) {
        throw new MetaException("table " + name + " doesn't exist");
      }

      // For now only alter name, owner, paramters, cols, bucketcols are allowed
      oldt.setTableName(newt.getTableName().toLowerCase());
      oldt.setParameters(newt.getParameters());
      oldt.setOwner(newt.getOwner());
      oldt.setSd(newt.getSd());
      oldt.setDatabase(newt.getDatabase());
      oldt.setRetention(newt.getRetention());
      oldt.setPartitionKeys(newt.getPartitionKeys());
      oldt.setTableType(newt.getTableType());

      // commit the changes
      success = commitTransaction();
    } finally {
      if (!success) {
        rollbackTransaction();
      }
    }
  }

ObjectStore:
  private MTable convertToMTable(Table tbl) throws InvalidObjectException,
      MetaException {
    if (tbl == null) {
      return null;
    }
    MDatabase mdb = null;
    try {
      mdb = getMDatabase(tbl.getDbName());
    } catch (NoSuchObjectException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw new InvalidObjectException("Database " + tbl.getDbName()
          + " doesn't exist.");
    }

    // If the table has property EXTERNAL set, update table type
    // accordingly
    String tableType = tbl.getTableType();
    boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL"));
    if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
      if (isExternal) {
        tableType = TableType.EXTERNAL_TABLE.toString();
      }
    }
    if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
      if (!isExternal) {
        tableType = TableType.MANAGED_TABLE.toString();
      }
    }

    return new MTable(tbl.getTableName().toLowerCase(), mdb,
        convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl
            .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
        convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
        tbl.getViewOriginalText(), tbl.getViewExpandedText(),
        tableType);
  }
分享到:
评论

相关推荐

    hive操作命令大全.txt

    alter table table_name rename to new_table_name; # 增加字段 alter table table_name add columns (newcol1 int comment ‘新增’); # 修改字段 alter table table_name change col_name new_col_name new_...

    HIVE-SQL操作语句

    hive&gt; ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'acomment'); ``` ##### 重命名表 ```sql hive&gt; ALTER TABLE events RENAME TO 3koobecaf; ``` 此命令将 `events` 表重命名为 `3koobecaf`。 #### ...

    Hive使用手册Hive使用手册

    ` 重命名表:`ALTER TABLE my_table RENAME TO new_table_name;` - **删除表** 用DROP TABLE语句,例如:`DROP TABLE my_table;` 5. **Hive分区** 分区是提高Hive查询效率的一种手段,允许用户将大表划分为小块。...

    Hive基本命令整理

    hive&gt; ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'a comment'); ``` 删除表 Hive 提供了多种方式来删除表,例如,删除 records 表: ``` hive&gt; DROP TABLE records; ``` 删除表中的数据,但保持表的...

    Hive用户指南 Hive user guide 中文版

    - **添加/替换列**:`ALTER TABLE table_name ADD COLUMN column_name data_type [COMMENT column_comment]`用于添加新列,而`ALTER TABLE table_name REPLACE COLUMNS (column1 data_type, ...)`用于替换整个表的列...

    hive基本操作

    hive 基本操作的命令和hive 实用命令等,有利与学习hive和mysql

    Hive开发规范及要点

    4. 添加分区:使用`alter table table_name add partition (dt='2008-08-08', country='us') location '/path/to/us/part080808' partition (dt='2008-08-09', country='us');`语句可以添加分区。 八、Hive命令行...

    hive自学笔记.docx

    ALTER TABLE table_name DROP PARTITION(partition_name='value'); ``` - 删除满足条件的一系列分区: ```sql ALTER TABLE table_name DROP PARTITION(partition_name); ALTER TABLE table_name DROP ...

    hive表新增字段或者修改字段

    alter table 数据库名.表名 set tblproperties('EXTERNAL' = 'FALSE'); ``` 2. 使用 `ALTER TABLE` 命令来更改字段。假设我们要更改名为`字段名`的字段,新的字段名为`新的字段名`,数据类型为`字段类型`,并添加...

    hiveSql.docx

    - 使用`ALTER TABLE`语句添加新的分区,例如`ALTER TABLE day_table ADD PARTITION (dt='2008-08-08', hour='08') LOCATION '/path/pv1.txt';` 3. **删除分区**: - 使用`ALTER TABLE`语句的`DROP PARTITION`子句...

    hive入门.pdf

    ALTER TABLE old_table_name RENAME TO new_table_name; ``` 以上内容概述了Hive的基础知识和常用操作,对于初学者来说是一个良好的起点。随着对Hive的深入了解,还可以学习更多高级特性,如视图、索引、自定义...

    Hive基本操作命令大全

    * 从本地文件加载数据:`LOAD DATA LOCAL INPATH '/home/hadoop/input/hive/partitions/file1' INTO TABLE logs PARTITION(dt='2001-01-01', country='GB');` * 从HDFS加载数据:`LOAD DATA INPATH '/user/hadoop/...

    Hive用户手册)_中文版.pdf

    Hive基本操作包括create table、drop table、alter table等。这些操作可以帮助用户快速创建和管理Hive中的数据表。 2.1 create table create table是Hive中最基本的操作,用于创建新的数据表。create table语句...

    Hive教程--命令

    hive&gt; ALTER TABLE employee ADD salary double; 删除表 删除表是 Hive 中的一种基本操作。DROP TABLE 语句是删除表的语句。语法和示例如下: 语法 DROP TABLE [IF EXISTS] table_name 示例假设需要使用 DROP ...

    13-Hive基本操作1

    使用`ALTER TABLE`可以修改已存在的表结构,比如添加、删除或更改列。 6. **删除表**: `DROP TABLE`语句用于删除表。内部表的删除会同时移除数据和元数据,而外部表只移除元数据,数据保持不变。 7. **加载数据...

    hive常用命令+日常

    在Hive中,创建外部表时通常会使用`CREATE EXTERNAL TABLE`语句。这有助于在Hive与HDFS之间建立良好的解耦关系,使得即使Hive表被删除,其对应的数据仍然保留在HDFS上。 **示例命令**: ```sql DROP TABLE IF ...

    Hive常用的SQL命令操作[定义].pdf

    ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'a comment'); ``` 6. 更改表名 可以使用 `ALTER TABLE` 命令更改表名,例如: ``` ALTER TABLE events RENAME TO 3koobecaf; ``` 7. 删除表 可以使用 `...

    Hive教程.pdf

    - 使用`ALTER TABLE table_name IMPORT FROM 'backup_path';`来还原表。 #### 五、HiveQL数据查询语法 - **Select查询**: - `SELECT column_name(s) FROM table_name [WHERE condition] [GROUP BY column_name(s...

    分布式数据仓库Hive大全

    2.2 Alter Table 17 2.2.1 Add Partitions 17 2.2.2 Drop Partitions 17 2.2.3 Rename Table 17 2.2.4 Change Column 18 2.2.5 Add/Replace Columns 18 2.3 Create View 18 2.4 Show 19 2.5 Load 19 2.6 Insert 21 ...

    深入浅出 Hive

    - 表操作:ALTER TABLE 改变表结构,DROP TABLE 删除表,TRUNCATE TABLE 清空表。 5. HiveQL 执行过程 HiveQL 语句被解析后生成逻辑计划,然后转化为物理计划,物理计划由 MapReduce 任务执行。这个过程包括词法...

Global site tag (gtag.js) - Google Analytics