-
-
Notifications
You must be signed in to change notification settings - Fork 212
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
support multi-inherit #2738
support multi-inherit #2738
Changes from 6 commits
d6cc88d
2375a54
4ea2b94
6033561
b2de759
14a7c3c
facbd96
b76a774
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -332,84 +332,139 @@ func assignColumnIndexesInSchema(schema sql.Schema) sql.Schema { | |
} | ||
|
||
func (b *Builder) buildCreateTableLike(inScope *scope, ct *ast.DDL) *scope { | ||
outScope, ok := b.buildTablescan(inScope, ct.OptLike.LikeTable, nil) | ||
if !ok { | ||
b.handleErr(sql.ErrTableNotFound.New(ct.OptLike.LikeTable.Name.String())) | ||
} | ||
database := b.resolveDbForTable(ct.Table) | ||
newTableName := strings.ToLower(ct.Table.Name.String()) | ||
|
||
likeTable, ok := outScope.node.(*plan.ResolvedTable) | ||
if !ok { | ||
err := fmt.Errorf("expected resolved table: %s", ct.OptLike.LikeTable.Name.String()) | ||
b.handleErr(err) | ||
var pkSch sql.PrimaryKeySchema | ||
var coll sql.CollationID | ||
var comment string | ||
outScope := inScope.push() | ||
if ct.TableSpec != nil { | ||
pkSch, coll, _ = b.tableSpecToSchema(inScope, outScope, database, strings.ToLower(ct.Table.Name.String()), ct.TableSpec, false) | ||
} | ||
|
||
newTableName := strings.ToLower(ct.Table.Name.String()) | ||
outScope.setTableAlias(newTableName) | ||
|
||
var ok bool | ||
var pkOrdinals []int | ||
var newSch sql.Schema | ||
newSchMap := make(map[string]struct{}) | ||
var idxDefs sql.IndexDefs | ||
if indexableTable, ok := likeTable.Table.(sql.IndexAddressableTable); ok { | ||
indexes, err := indexableTable.GetIndexes(b.ctx) | ||
if err != nil { | ||
var checkDefs []*sql.CheckConstraint | ||
for _, likeTable := range ct.OptLike.LikeTables { | ||
outScope, ok = b.buildTablescan(outScope, likeTable, nil) | ||
if !ok { | ||
b.handleErr(sql.ErrTableNotFound.New(likeTable.Name.String())) | ||
} | ||
lTable, isResTbl := outScope.node.(*plan.ResolvedTable) | ||
if !isResTbl { | ||
err := fmt.Errorf("expected resolved table: %s", likeTable.Name.String()) | ||
b.handleErr(err) | ||
} | ||
for _, index := range indexes { | ||
if index.IsGenerated() { | ||
|
||
if coll == sql.Collation_Unspecified { | ||
coll = lTable.Collation() | ||
} | ||
|
||
if comment == "" { | ||
comment = lTable.Comment() | ||
} | ||
|
||
schOff := len(newSch) | ||
hasSkippedCols := false | ||
for _, col := range lTable.Schema() { | ||
newCol := *col | ||
name := strings.ToLower(newCol.Name) | ||
if _, ok := newSchMap[name]; ok { | ||
// TODO: throw warning | ||
hasSkippedCols = true | ||
continue | ||
} | ||
constraint := sql.IndexConstraint_None | ||
if index.IsUnique() { | ||
if index.ID() == "PRIMARY" { | ||
constraint = sql.IndexConstraint_Primary | ||
} else { | ||
constraint = sql.IndexConstraint_Unique | ||
} | ||
} | ||
newSchMap[name] = struct{}{} | ||
newCol.Source = newTableName | ||
newSch = append(newSch, &newCol) | ||
} | ||
|
||
columns := make([]sql.IndexColumn, len(index.Expressions())) | ||
for i, col := range index.Expressions() { | ||
//TODO: find a better way to get only the column name if the table is present | ||
col = strings.TrimPrefix(col, indexableTable.Name()+".") | ||
columns[i] = sql.IndexColumn{Name: col} | ||
// if a column was skipped due to duplicates, don't copy over PK ords, idxDefs, or checkDefs | ||
// since they might be incorrect | ||
if hasSkippedCols { | ||
continue | ||
} | ||
|
||
// Copy over primary key schema ordinals | ||
if pkTable, isPkTable := lTable.Table.(sql.PrimaryKeyTable); isPkTable { | ||
for _, pkOrd := range pkTable.PrimaryKeySchema().PkOrdinals { | ||
pkOrdinals = append(pkOrdinals, schOff+pkOrd) | ||
} | ||
idxDefs = append(idxDefs, &sql.IndexDef{ | ||
Name: index.ID(), | ||
Storage: sql.IndexUsing_Default, | ||
Constraint: constraint, | ||
Columns: columns, | ||
Comment: index.Comment(), | ||
}) | ||
} | ||
} | ||
origSch := likeTable.Schema() | ||
newSch := make(sql.Schema, len(origSch)) | ||
for i, col := range origSch { | ||
tempCol := *col | ||
tempCol.Source = newTableName | ||
newSch[i] = &tempCol | ||
} | ||
|
||
var pkOrdinals []int | ||
if pkTable, ok := likeTable.Table.(sql.PrimaryKeyTable); ok { | ||
pkOrdinals = pkTable.PrimaryKeySchema().PkOrdinals | ||
} | ||
// Load index definitions | ||
if idxTbl, isIdxTbl := lTable.Table.(sql.IndexAddressableTable); isIdxTbl { | ||
idxs, err := idxTbl.GetIndexes(b.ctx) | ||
if err != nil { | ||
b.handleErr(err) | ||
} | ||
for _, idx := range idxs { | ||
if idx.IsGenerated() { | ||
continue | ||
} | ||
constraint := sql.IndexConstraint_None | ||
if idx.IsUnique() { | ||
if idx.ID() == "PRIMARY" { | ||
// TODO: deal with multiple primary key constraints? | ||
constraint = sql.IndexConstraint_Primary | ||
} else { | ||
constraint = sql.IndexConstraint_Unique | ||
} | ||
} | ||
|
||
var checkDefs []*sql.CheckConstraint | ||
if checksTable, ok := likeTable.Table.(sql.CheckTable); ok { | ||
checks, err := checksTable.GetChecks(b.ctx) | ||
if err != nil { | ||
b.handleErr(err) | ||
columns := make([]sql.IndexColumn, len(idx.Expressions())) | ||
for i, col := range idx.Expressions() { | ||
// TODO: find a better way to get only the column name if the table is present | ||
col = strings.TrimPrefix(col, idxTbl.Name()+".") | ||
columns[i] = sql.IndexColumn{Name: col} | ||
} | ||
idxDefs = append(idxDefs, &sql.IndexDef{ | ||
Name: idx.ID(), | ||
Storage: sql.IndexUsing_Default, | ||
Constraint: constraint, | ||
Columns: columns, | ||
Comment: idx.Comment(), | ||
}) | ||
} | ||
} | ||
|
||
for _, check := range checks { | ||
checkConstraint := b.buildCheckConstraint(outScope, &check) | ||
// Load check constraints | ||
if chkTable, isChkTable := lTable.Table.(sql.CheckTable); isChkTable { | ||
checks, err := chkTable.GetChecks(b.ctx) | ||
if err != nil { | ||
b.handleErr(err) | ||
} | ||
for _, check := range checks { | ||
checkConstraint := b.buildCheckConstraint(outScope, &check) | ||
if err != nil { | ||
b.handleErr(err) | ||
} | ||
|
||
// Prevent a name collision between old and new checks. | ||
// New check will be assigned a name during building. | ||
checkConstraint.Name = "" | ||
checkDefs = append(checkDefs, checkConstraint) | ||
// Prevent a name collision between old and new checks. | ||
// New check name will be assigned a name during building. | ||
checkConstraint.Name = "" | ||
checkDefs = append(checkDefs, checkConstraint) | ||
} | ||
} | ||
} | ||
|
||
var hasSkippedCols bool | ||
for _, col := range pkSch.Schema { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is explicit schema not incompatible with the LIKE list? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do we have tests for this? |
||
name := strings.ToLower(col.Name) | ||
if _, ok := newSchMap[name]; ok { | ||
// TODO: throw warning | ||
hasSkippedCols = true | ||
continue | ||
} | ||
newSch = append(newSch, col) | ||
} | ||
if !hasSkippedCols { | ||
for _, pkOrd := range pkSch.PkOrdinals { | ||
pkOrdinals = append(pkOrdinals, len(newSch)+pkOrd) | ||
} | ||
} | ||
|
||
|
@@ -420,13 +475,13 @@ func (b *Builder) buildCreateTableLike(inScope *scope, ct *ast.DDL) *scope { | |
Schema: pkSchema, | ||
IdxDefs: idxDefs, | ||
ChDefs: checkDefs, | ||
Collation: likeTable.Collation(), | ||
Comment: likeTable.Comment(), | ||
Collation: coll, | ||
Comment: comment, | ||
} | ||
|
||
database := b.resolveDbForTable(ct.Table) | ||
|
||
b.qFlags.Set(sql.QFlagSetDatabase) | ||
|
||
outScope.setTableAlias(newTableName) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this is probably incorrect strictly speaking, as you'll only have the columns in the last table in the list in this scope. Not sure when it matters though. @max-hoffman ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's definitely an abuse of the model...a more reasonable pass might accumulate the table cols into a top-level output scope or return an empty scope |
||
outScope.node = plan.NewCreateTable(database, newTableName, ct.IfNotExists, ct.Temporary, tableSpec) | ||
return outScope | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
are the semantics really this simple? seems like you'd need way more testing to figure out correct behavior