-
Notifications
You must be signed in to change notification settings - Fork 24
/
Copy pathtable.go
649 lines (586 loc) · 18.1 KB
/
table.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
package schema
import (
"context"
"encoding/json"
"fmt"
"regexp"
"slices"
"github.com/apache/arrow-go/v18/arrow"
"github.com/cloudquery/plugin-sdk/v4/glob"
"github.com/thoas/go-funk"
)
// TableResolver is the main entry point when a table is sync is called.
//
// Table resolver has 3 main arguments:
// - meta(ClientMeta): is the client returned by the plugin.Provider Configure call
// - parent(Resource): resource is the parent resource in case this table is called via parent table (i.e. relation)
// - res(chan any): is a channel to pass results fetched by the TableResolver
type TableResolver func(ctx context.Context, meta ClientMeta, parent *Resource, res chan<- any) error
type RowResolver func(ctx context.Context, meta ClientMeta, resource *Resource) error
type Multiplexer func(meta ClientMeta) []ClientMeta
type Transform func(table *Table) error
type Tables []*Table
// Deprecated: SyncSummary is deprecated.
type SyncSummary struct {
Resources uint64
Errors uint64
Panics uint64
}
type TableColumnChangeType int
const (
TableColumnChangeTypeUnknown TableColumnChangeType = iota
TableColumnChangeTypeAdd
TableColumnChangeTypeUpdate
TableColumnChangeTypeRemove
// These are special cases to help users migrate
// As we remove unique constraints on _cq_id columns this will give destination plugins the ability to auto migrate
TableColumnChangeTypeRemoveUniqueConstraint
// Moving from composite pks to singular PK on _cq_id this will give destination plugins the ability to auto migrate
TableColumnChangeTypeMoveToCQOnly
)
type TableColumnChange struct {
Type TableColumnChangeType
ColumnName string
Current Column
Previous Column
}
type Table struct {
// Name of table
Name string `json:"name"`
// Title to be used in documentation (optional: will be generated from name if not set)
Title string `json:"title"`
// table description
Description string `json:"description"`
// List of permissions needed to access this table, if any. For example ["Microsoft.Network/dnsZones/read"] or ["storage.buckets.list"]
PermissionsNeeded []string `json:"permissions_needed"`
// Columns are the set of fields that are part of this table
Columns ColumnList `json:"columns"`
// Relations are a set of related tables defines
Relations Tables `json:"relations"`
// Transform
Transform Transform `json:"-"`
// Resolver is the main entry point to fetching table data and
Resolver TableResolver `json:"-"`
// Multiplex returns re-purposed meta clients. The sdk will execute the table with each of them
Multiplex Multiplexer `json:"-"`
// PostResourceResolver is called after all columns have been resolved, but before the Resource is sent to be inserted. The ordering of resolvers is:
// (Table) Resolver → PreResourceResolver → ColumnResolvers → PostResourceResolver
PostResourceResolver RowResolver `json:"-"`
// PreResourceResolver is called before all columns are resolved but after Resource is created. The ordering of resolvers is:
// (Table) Resolver → PreResourceResolver → ColumnResolvers → PostResourceResolver
PreResourceResolver RowResolver `json:"-"`
// IsIncremental is a flag that indicates if the table is incremental or not. This flag mainly affects how the table is
// documented.
IsIncremental bool `json:"is_incremental"`
// IgnoreInTests is used to exclude a table from integration tests.
// By default, integration tests fetch all resources from cloudquery's test account, and verify all tables
// have at least one row.
// When IgnoreInTests is true, integration tests won't fetch from this table.
// Used when it is hard to create a reproducible environment with a row in this table.
IgnoreInTests bool `json:"-"`
// Parent is the parent table in case this table is called via parent table (i.e. relation)
Parent *Table `json:"-"`
PkConstraintName string `json:"pk_constraint_name"`
// IsPaid indicates whether this table is a paid (premium) table.
// This relates to the CloudQuery plugin itself, and should not be confused
// with whether the table makes use of a paid API or not.
IsPaid bool `json:"is_paid"`
}
var (
reValidColumnName = regexp.MustCompile(`^[a-z_][a-z\d_]*$`)
)
// AddCqIDs adds the cq_id and cq_parent_id columns to the table and all its relations
// set cq_id as primary key if no other primary keys
func AddCqIDs(table *Table) {
havePks := len(table.PrimaryKeys()) > 0
cqIDColumn := CqIDColumn
if !havePks {
cqIDColumn.PrimaryKey = true
}
table.Columns = append(
ColumnList{
cqIDColumn,
CqParentIDColumn,
},
table.Columns...,
)
for _, rel := range table.Relations {
AddCqIDs(rel)
}
}
// AddCqClientID adds the cq_client_id column to the table,
// which is used to identify the multiplexed client that fetched the resource
func AddCqClientID(t *Table) {
if t.Columns.Get(CqClientIDColumn.Name) == nil {
t.Columns = append(ColumnList{CqClientIDColumn}, t.Columns...)
}
for _, rel := range t.Relations {
AddCqClientID(rel)
}
}
// CqIDAsPK sets the cq_id column as primary key if it exists
// and removes the primary key from all other columns
func CqIDAsPK(t *Table) {
cqIDCol := t.Columns.Get(CqIDColumn.Name)
if cqIDCol == nil {
return
}
for i, c := range t.Columns {
if c.Name == CqIDColumn.Name {
// Ensure that the cq_id column is the primary key
t.Columns[i].PrimaryKey = true
continue
}
if !c.PrimaryKey {
continue
}
t.Columns[i].PrimaryKey = false
}
}
func NewTablesFromArrowSchemas(schemas []*arrow.Schema) (Tables, error) {
tables := make(Tables, len(schemas))
for i, schema := range schemas {
table, err := NewTableFromArrowSchema(schema)
if err != nil {
return nil, err
}
tables[i] = table
}
return tables, nil
}
// NewTableFromArrowSchema creates a CloudQuery Table abstraction from an Arrow schema.
// The Arrow schema is a low level representation of a table that can be sent
// over the wire in a cross-language way.
func NewTableFromArrowSchema(sc *arrow.Schema) (*Table, error) {
tableMD := sc.Metadata()
name, found := tableMD.GetValue(MetadataTableName)
if !found {
return nil, fmt.Errorf("missing table name")
}
description, _ := tableMD.GetValue(MetadataTableDescription)
constraintName, _ := tableMD.GetValue(MetadataConstraintName)
title, _ := tableMD.GetValue(MetadataTableTitle)
dependsOn, _ := tableMD.GetValue(MetadataTableDependsOn)
permissionsNeeded, _ := tableMD.GetValue(MetadataTablePermissionsNeeded)
var parent *Table
if dependsOn != "" {
parent = &Table{Name: dependsOn}
}
fields := sc.Fields()
columns := make(ColumnList, len(fields))
for i, field := range fields {
columns[i] = NewColumnFromArrowField(field)
}
var permissionsNeededArr []string
_ = json.Unmarshal([]byte(permissionsNeeded), &permissionsNeededArr)
table := &Table{
Name: name,
Description: description,
PkConstraintName: constraintName,
Columns: columns,
Title: title,
Parent: parent,
PermissionsNeeded: permissionsNeededArr,
}
if isIncremental, found := tableMD.GetValue(MetadataIncremental); found {
table.IsIncremental = isIncremental == MetadataTrue
}
if isPaid, found := tableMD.GetValue(MetadataTableIsPaid); found {
table.IsPaid = isPaid == MetadataTrue
}
return table, nil
}
func (t TableColumnChangeType) String() string {
switch t {
case TableColumnChangeTypeAdd:
return "add"
case TableColumnChangeTypeUpdate:
return "update"
case TableColumnChangeTypeRemove:
return "remove"
case TableColumnChangeTypeRemoveUniqueConstraint:
return "remove_unique_constraint"
case TableColumnChangeTypeMoveToCQOnly:
return "move_to_cq_only"
default:
return "unknown"
}
}
func (t TableColumnChange) String() string {
switch t.Type {
case TableColumnChangeTypeAdd:
return fmt.Sprintf("column: %s, type: %s, current: %s", t.ColumnName, t.Type, t.Current)
case TableColumnChangeTypeUpdate:
return fmt.Sprintf("column: %s, type: %s, current: %s, previous: %s", t.ColumnName, t.Type, t.Current, t.Previous)
case TableColumnChangeTypeRemove:
return fmt.Sprintf("column: %s, type: %s, previous: %s", t.ColumnName, t.Type, t.Previous)
case TableColumnChangeTypeRemoveUniqueConstraint:
return fmt.Sprintf("column: %s, previous: %s", t.ColumnName, t.Previous)
case TableColumnChangeTypeMoveToCQOnly:
return fmt.Sprintf("multi-column: %s, type: %s", t.ColumnName, t.Type)
default:
return fmt.Sprintf("column: %s, type: %s, current: %s, previous: %s", t.ColumnName, t.Type, t.Current, t.Previous)
}
}
func (tt Tables) FilterDfsFunc(include, exclude func(*Table) bool, skipDependentTables bool) Tables {
filteredTables := make(Tables, 0, len(tt))
for _, t := range tt {
filteredTable := t.Copy(nil)
filteredTable = filteredTable.filterDfs(false, include, exclude, skipDependentTables)
if filteredTable != nil {
filteredTables = append(filteredTables, filteredTable)
}
}
return filteredTables
}
func (tt Tables) ToArrowSchemas() Schemas {
flattened := tt.FlattenTables()
schemas := make(Schemas, len(flattened))
for i, t := range flattened {
schemas[i] = t.ToArrowSchema()
}
return schemas
}
func (tt Tables) FilterDfs(tables, skipTables []string, skipDependentTables bool) (Tables, error) {
flattenedTables := tt.FlattenTables()
for _, includePattern := range tables {
matched := false
for _, table := range flattenedTables {
if glob.Glob(includePattern, table.Name) {
matched = true
break
}
}
if !matched {
return nil, fmt.Errorf("tables include a pattern %s with no matches", includePattern)
}
}
for _, excludePattern := range skipTables {
matched := false
for _, table := range flattenedTables {
if glob.Glob(excludePattern, table.Name) {
matched = true
break
}
}
if !matched {
return nil, fmt.Errorf("skip_tables include a pattern %s with no matches", excludePattern)
}
}
include := func(t *Table) bool {
for _, includePattern := range tables {
if glob.Glob(includePattern, t.Name) {
return true
}
}
return false
}
exclude := func(t *Table) bool {
for _, skipPattern := range skipTables {
if glob.Glob(skipPattern, t.Name) {
return true
}
}
return false
}
return tt.FilterDfsFunc(include, exclude, skipDependentTables), nil
}
func (tt Tables) flattenTablesRecursive() Tables {
tables := make(Tables, 0, len(tt))
for _, t := range tt {
table := *t
table.Relations = nil
tables = append(tables, &table)
tables = append(tables, t.Relations.flattenTablesRecursive()...)
}
return tables
}
func (tt Tables) FlattenTables() Tables {
tables := tt.flattenTablesRecursive()
seen := make(map[string]struct{})
deduped := make(Tables, 0, len(tables))
for _, t := range tables {
if _, found := seen[t.Name]; !found {
deduped = append(deduped, t)
seen[t.Name] = struct{}{}
}
}
return slices.Clip(deduped)
}
// UnflattenTables returns a new Tables copy with the relations unflattened. This is the
// opposite operation of FlattenTables.
func (tt Tables) UnflattenTables() (Tables, error) {
tables := make(Tables, 0, len(tt))
for _, t := range tt {
table := *t
tables = append(tables, &table)
}
topLevel := make([]*Table, 0, len(tt))
// build relations
for _, table := range tables {
if table.Parent == nil {
topLevel = append(topLevel, table)
continue
}
parent := tables.Get(table.Parent.Name)
if parent == nil {
return nil, fmt.Errorf("parent table %s not found", table.Parent.Name)
}
table.Parent = parent
parent.Relations = append(parent.Relations, table)
}
return slices.Clip(topLevel), nil
}
func (tt Tables) TableNames() []string {
ret := []string{}
for _, t := range tt {
ret = append(ret, t.TableNames()...)
}
return ret
}
// GetTopLevel returns a table by name. Only returns the table if it is in top-level list.
func (tt Tables) GetTopLevel(name string) *Table {
for _, t := range tt {
if t.Name == name {
return t
}
}
return nil
}
// Get returns a table by name. Returns top-level tables and relations.
func (tt Tables) Get(name string) *Table {
for _, t := range tt {
if t.Name == name {
return t
}
table := t.Relations.Get(name)
if table != nil {
return table
}
}
return nil
}
func (tt Tables) ValidateDuplicateColumns() error {
for _, t := range tt {
if err := t.ValidateDuplicateColumns(); err != nil {
return err
}
}
return nil
}
func (tt Tables) ValidateDuplicateTables() error {
tableNames := tt.TableNames()
tables := make(map[string]bool, len(tt))
for _, t := range tableNames {
if _, ok := tables[t]; ok {
return fmt.Errorf("duplicate table %s", t)
}
tables[t] = true
}
return nil
}
func (tt Tables) GetPaidTables() Tables {
flattenedTables := tt.FlattenTables()
paidTables := make(Tables, 0, len(flattenedTables))
for i := range flattenedTables {
if flattenedTables[i].IsPaid {
paidTables = append(paidTables, flattenedTables[i])
}
}
return paidTables
}
func (tt Tables) HasPaidTables() bool {
return len(tt.GetPaidTables()) > 0
}
// this will filter the tree in-place
func (t *Table) filterDfs(parentMatched bool, include, exclude func(*Table) bool, skipDependentTables bool) *Table {
if exclude(t) {
return nil
}
matched := parentMatched && !skipDependentTables
if include(t) {
matched = true
}
filteredRelations := make([]*Table, 0, len(t.Relations))
childMatched := false
for _, r := range t.Relations {
filteredChild := r.filterDfs(matched, include, exclude, skipDependentTables)
if filteredChild != nil {
childMatched = true
filteredRelations = append(filteredRelations, r)
}
}
t.Relations = filteredRelations
if matched || childMatched {
return t
}
return nil
}
func (t *Table) PrimaryKeysIndexes() []int {
var primaryKeys []int
for i, c := range t.Columns {
if c.PrimaryKey {
primaryKeys = append(primaryKeys, i)
}
}
return primaryKeys
}
func (t *Table) ToArrowSchema() *arrow.Schema {
fields := make([]arrow.Field, len(t.Columns))
md := map[string]string{
MetadataTableName: t.Name,
MetadataTableDescription: t.Description,
MetadataTableTitle: t.Title,
MetadataConstraintName: t.PkConstraintName,
}
if t.IsIncremental {
md[MetadataIncremental] = MetadataTrue
}
if t.Parent != nil {
md[MetadataTableDependsOn] = t.Parent.Name
}
if t.IsPaid {
md[MetadataTableIsPaid] = MetadataTrue
}
asJSON, _ := json.Marshal(t.PermissionsNeeded)
md[MetadataTablePermissionsNeeded] = string(asJSON)
schemaMd := arrow.MetadataFrom(md)
for i, c := range t.Columns {
fields[i] = c.ToArrowField()
}
return arrow.NewSchema(fields, &schemaMd)
}
// GetChanges returns changes between two tables when t is the new one and old is the old one.
func (t *Table) GetChanges(old *Table) []TableColumnChange {
var changes []TableColumnChange
// Special case: Moving from individual pks to singular PK on _cq_id
newPks := t.PrimaryKeys()
if len(newPks) == 1 && newPks[0] == CqIDColumn.Name && !funk.Contains(old.PrimaryKeys(), CqIDColumn.Name) && len(old.PrimaryKeys()) > 0 {
changes = append(changes, TableColumnChange{
Type: TableColumnChangeTypeMoveToCQOnly,
})
}
for _, c := range t.Columns {
otherColumn := old.Columns.Get(c.Name)
// A column was added to the table definition
if otherColumn == nil {
changes = append(changes, TableColumnChange{
Type: TableColumnChangeTypeAdd,
ColumnName: c.Name,
Current: c,
})
continue
}
// Column type or options (e.g. PK, Not Null) changed in the new table definition
if !arrow.TypeEqual(c.Type, otherColumn.Type) || c.NotNull != otherColumn.NotNull || c.PrimaryKey != otherColumn.PrimaryKey {
changes = append(changes, TableColumnChange{
Type: TableColumnChangeTypeUpdate,
ColumnName: c.Name,
Current: c,
Previous: *otherColumn,
})
}
// Unique constraint was removed
if !c.Unique && otherColumn.Unique {
changes = append(changes, TableColumnChange{
Type: TableColumnChangeTypeRemoveUniqueConstraint,
ColumnName: c.Name,
Previous: *otherColumn,
})
}
}
// A column was removed from the table definition
for _, c := range old.Columns {
if t.Columns.Get(c.Name) == nil {
changes = append(changes, TableColumnChange{
Type: TableColumnChangeTypeRemove,
ColumnName: c.Name,
Previous: c,
})
}
}
return changes
}
func (t *Table) ValidateDuplicateColumns() error {
columns := make(map[string]bool, len(t.Columns))
for _, c := range t.Columns {
if _, ok := columns[c.Name]; ok {
return fmt.Errorf("duplicate column %s in table %s", c.Name, t.Name)
}
columns[c.Name] = true
}
for _, rel := range t.Relations {
if err := rel.ValidateDuplicateColumns(); err != nil {
return err
}
}
return nil
}
func (t *Table) Column(name string) *Column {
for _, c := range t.Columns {
if c.Name == name {
return &c
}
}
return nil
}
// OverwriteOrAddColumn overwrites or adds columns.
// If the column with the same name exists, overwrites it.
// Otherwise, adds the column to the beginning of the table.
func (t *Table) OverwriteOrAddColumn(column *Column) {
for i, c := range t.Columns {
if c.Name == column.Name {
t.Columns[i] = *column
return
}
}
t.Columns = append([]Column{*column}, t.Columns...)
}
func (t *Table) PrimaryKeys() []string {
var primaryKeys []string
for _, c := range t.Columns {
if c.PrimaryKey {
primaryKeys = append(primaryKeys, c.Name)
}
}
return primaryKeys
}
func (t *Table) IncrementalKeys() []string {
var incrementalKeys []string
for _, c := range t.Columns {
if c.IncrementalKey {
incrementalKeys = append(incrementalKeys, c.Name)
}
}
return incrementalKeys
}
func (t *Table) PrimaryKeyComponents() []string {
var primaryKeyComponents []string
for _, c := range t.Columns {
if c.PrimaryKeyComponent {
primaryKeyComponents = append(primaryKeyComponents, c.Name)
}
}
return primaryKeyComponents
}
func (t *Table) TableNames() []string {
ret := []string{t.Name}
for _, rel := range t.Relations {
ret = append(ret, rel.TableNames()...)
}
return ret
}
func (t *Table) Copy(parent *Table) *Table {
c := *t
c.Parent = parent
c.Columns = make([]Column, len(t.Columns))
copy(c.Columns, t.Columns)
c.Relations = make([]*Table, len(t.Relations))
for i := range t.Relations {
c.Relations[i] = t.Relations[i].Copy(&c)
}
return &c
}
func ValidColumnName(name string) bool {
return reValidColumnName.MatchString(name)
}