Skip to content

openavmkit.pipeline

Pipeline

This module contains every public function that is called from the notebooks in the openavmkit project.

Rules:

  • Every public function should be called from at least one notebook.
  • The primary openavmkit notebooks should only call functions from this module.
  • This module imports from other modules, but no other modules import from it.

NotebookState

NotebookState(locality, base_path=None)

Represents the state of a notebook session including the base path and locality.

Attributes:

Name Type Description
base_path str
The base directory path for the notebook.

locality : str The locality identifier (e.g., "us-nc-guilford").

Initialize a NotebookState instance.

Attributes:

Name Type Description
locality str

The locality slug (e.g., "us-nc-guilford").

base_path str

The base directory path. Defaults to the current working directory if not provided.

Source code in openavmkit/pipeline.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def __init__(self, locality: str, base_path: str = None):
    """Initialize a NotebookState instance.

    Attributes
    ----------
    locality: str
        The locality slug (e.g., "us-nc-guilford").
    base_path : str
        The base directory path. Defaults to the current working directory if not provided.
    """
    self.locality = locality
    if base_path is None:
        base_path = os.getcwd()
    self.base_path = base_path

cloud_sync

cloud_sync(locality, verbose=False, dry_run=False, ignore_paths=None)

Synchronize local files to cloud storage.

This function initializes the cloud service and syncs files for the given locality.

Parameters:

Name Type Description Default
locality str

The locality identifier used to form remote paths.

required
verbose bool

If True, prints detailed log messages. Defaults to False.

False
dry_run bool

If True, simulates the sync without performing any changes. Defaults to False.

False
ignore_paths list

List of file paths or patterns to ignore during sync. Defaults to None.

None
Source code in openavmkit/pipeline.py
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
def cloud_sync(
    locality: str,
    verbose: bool = False,
    dry_run: bool = False,
    ignore_paths: list = None,
) -> None:
    """
    Synchronize local files to cloud storage.

    This function initializes the cloud service and syncs files for the given locality.

    Parameters
    ----------
    locality : str
        The locality identifier used to form remote paths.
    verbose : bool, optional
        If True, prints detailed log messages. Defaults to False.
    dry_run : bool, optional
        If True, simulates the sync without performing any changes. Defaults to False.
    ignore_paths : list, optional
        List of file paths or patterns to ignore during sync. Defaults to None.
    """

    cloud_settings = cloud.load_cloud_settings()

    if cloud_settings is None:
        warnings.warn("No cloud.json file found, cannot initialize cloud service.")
        return

    cloud_service = cloud.init(verbose, cloud_settings=cloud_settings)
    if cloud_service is None:
        print("Cloud service not initialized, skipping...")
        return

    if ignore_paths is None:
        ignore_paths = []
    extra_ignore = cloud_settings.get("ignore_paths", [])
    ignore_paths = ignore_paths + extra_ignore + ["cloud.json"]

    print(f"ignore_paths = {ignore_paths}")

    remote_path = locality.replace("-", "/") + "/"
    cloud_service.sync_files(
        locality,
        "in",
        remote_path,
        dry_run=dry_run,
        verbose=verbose,
        ignore_paths=ignore_paths,
    )

delete_checkpoints

delete_checkpoints(prefix)

Delete all checkpoints that match the given prefix.

Parameters:

Name Type Description Default
prefix str

The prefix used to identify checkpoints to delete.

required

Returns:

Type Description
None

FILL_IN_HERE: Describe return value if any.

Source code in openavmkit/pipeline.py
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
def delete_checkpoints(prefix: str) -> None:
    """
    Delete all checkpoints that match the given prefix.

    Parameters
    ----------
    prefix : str
        The prefix used to identify checkpoints to delete.

    Returns
    -------
    None
        FILL_IN_HERE: Describe return value if any.
    """
    return openavmkit.checkpoint.delete_checkpoints(prefix)

enrich_sup_spatial_lag

enrich_sup_spatial_lag(sup, settings, verbose=False)

Enrich the sales and universe DataFrames with spatial lag features.

This function calculates "spatial lag", that is, the spatially-weighted average, of the sale price and other fields, based on nearest neighbors.

For sales, the spatial lag is calculated based on the training set of sales. For non-sale characteristics, the spatial lag is calculated based on the universe parcels.

Parameters:

Name Type Description Default
sup SalesUniversePair

SalesUniversePair containing sales and universe DataFrames.

required
settings dict

Settings dictionary.

required
verbose bool

If True, prints progress information.

False

Returns:

Type Description
SalesUniversePair

Enriched SalesUniversePair with spatial lag features.

Source code in openavmkit/pipeline.py
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
def enrich_sup_spatial_lag(
    sup: SalesUniversePair, settings: dict, verbose: bool = False
):
    """Enrich the sales and universe DataFrames with spatial lag features.

    This function calculates "spatial lag", that is, the spatially-weighted
    average, of the sale price and other fields, based on nearest neighbors.

    For sales, the spatial lag is calculated based on the training set of sales.
    For non-sale characteristics, the spatial lag is calculated based on the
    universe parcels.

    Parameters
    ----------
    sup : SalesUniversePair
        SalesUniversePair containing sales and universe DataFrames.
    settings : dict
        Settings dictionary.
    verbose : bool, optional
        If True, prints progress information.

    Returns
    -------
    SalesUniversePair
        Enriched SalesUniversePair with spatial lag features.
    """
    return openavmkit.data.enrich_sup_spatial_lag(sup, settings, verbose)

enrich_sup_streets

enrich_sup_streets(sup, settings, verbose=False)

Enrich a GeoDataFrame with street network data.

This function enriches the input GeoDataFrame with street network data by calculating frontage, depth, distance to street, and many other related metrics, for every road vs. every parcel in the GeoDataFrame, using OpenStreetMap data.

WARNING: This function can be VERY computationally and memory intensive for large datasets and may take a long time to run.

We definitely need to work on its performance or make it easier to split into smaller chunks.

Parameters:

Name Type Description Default
sup SalesUniversePair

The data you want to enrich

required
settings dict

Settings dictionary

required
verbose bool

If True, prints verbose output during processing. Defaults to False.

False

Returns:

Type Description
GeoDataFrame

Enriched GeoDataFrame with additional columns for street-related metrics.

Source code in openavmkit/pipeline.py
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
def enrich_sup_streets(sup: SalesUniversePair, settings: dict, verbose: bool = False):
    """Enrich a GeoDataFrame with street network data.

    This function enriches the input GeoDataFrame with street network data by calculating
    frontage, depth, distance to street, and many other related metrics, for every road vs.
    every parcel in the GeoDataFrame, using OpenStreetMap data.

    WARNING: This function can be VERY computationally and memory intensive for large datasets
    and may take a long time to run.

    We definitely need to work on its performance or make it easier to split into smaller chunks.

    Parameters
    ----------
    sup : SalesUniversePair
        The data you want to enrich
    settings : dict
        Settings dictionary
    verbose : bool, optional
        If True, prints verbose output during processing. Defaults to False.

    Returns
    -------
    gpd.GeoDataFrame
        Enriched GeoDataFrame with additional columns for street-related metrics.
    """
    df_univ = sup.universe
    df_univ = openavmkit.data.enrich_df_streets(df_univ, settings, verbose=verbose)
    sup.universe = df_univ
    return sup

examine_df

examine_df(df, s)

Print examination details of the dataframe. This function displays summary statistics and unique values.

Parameters:

Name Type Description Default
df DataFrame

The data you wish to examine

required
s dict

Settings dictionary.

required
Source code in openavmkit/pipeline.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
def examine_df(df: pd.DataFrame, s: dict):
    """
    Print examination details of the dataframe.
    This function displays summary statistics and unique values.

    Parameters
    ----------
    df : pd.DataFrame
        The data you wish to examine
    s : dict
        Settings dictionary.
    """

    def fill_str(char: str, size: int):
        text = ""
        for _i in range(0, size):
            text += char
        return text

    def fit_str(txt: str, size: int):
        if len(txt) >= size:
            len_first = int((size - 3) / 2)
            len_last = (size - 3) - len_first
            first_bit = txt[0:len_first]
            last_bit = txt[len(txt) - len_last :]
            txt = first_bit + "..." + last_bit
        return f"{txt:{size}}"

    def get_line(
        col, dtype, count_non_zero, p, count_non_null, pnn, uniques: list or str
    ):
        dtype = f"{dtype}"
        if type(count_non_zero) != str:
            count_non_zero = f"{count_non_zero:,}"

        if type(count_non_null) != str:
            count_non_null = f"{count_non_null:,}"

        if isinstance(uniques, list):
            unique_str = str(uniques)
            if len(unique_str) > 40:
                uniques = f"{len(uniques):,}"
            else:
                uniques = unique_str

        return f"{fit_str(col, 75)} {dtype:^10} {count_non_zero:>10} {p:>5.0%} {count_non_null:>10} {pnn:>5.0%} {uniques:>40}"

    buffer = ""
    lines = 0
    chunk_size = 3

    def print_horz_line(char: str):
        nonlocal buffer
        nonlocal lines
        if buffer != "":
            buffer += "\n"
        buffer += (
            fill_str(char, 30)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 5)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 5)
            + " "
            + fill_str(char, 40)
        )
        lines += 1
        if lines >= chunk_size:
            print(buffer)
            lines = 0
            buffer = ""

    def print_buffer(text: str):
        nonlocal buffer
        nonlocal lines
        if buffer != "":
            buffer += "\n"
        buffer += text
        lines += 1
        if lines >= chunk_size:
            print(buffer)
            buffer = ""
            lines = 0

    print(
        f"{'FIELD':^30} {'TYPE':^10} {'NON-ZERO':^10} {'%':^5} {'NON-NULL':^10} {'%':^5} {'UNIQUE':^40}"
    )

    fields_land = get_fields_land(s, df)
    fields_impr = get_fields_impr(s, df)
    fields_other = get_fields_other(s, df)

    fields_noted = []

    stuff = {
        "land": {"name": "LAND", "fields": fields_land},
        "impr": {"name": "IMPROVEMENT", "fields": fields_impr},
        "other": {"name": "OTHER", "fields": fields_other},
    }

    i = 0

    for landimpr in stuff:
        entry = stuff[landimpr]
        name = entry["name"]

        fields = entry["fields"]
        nums = fields["numeric"]
        bools = fields["boolean"]
        cats = fields["categorical"]

        if (len(nums) + len(bools) + len(cats)) == 0:
            continue

        if i != 0:
            print_buffer("")

        print_horz_line("=")
        print_buffer(f"{name:^30}")
        print_horz_line("=")

        nums.sort()
        bools.sort()
        cats.sort()

        if len(nums) > 0:
            print_horz_line("-")
            print_buffer(f"{'NUMERIC':^30}")
            print_horz_line("-")
            for n in nums:
                fields_noted.append(n)
                df_non_null = df[~pd.isna(df[n])]
                non_zero = len(df_non_null[np.abs(df_non_null[n]).gt(0)])
                if len(df) != 0:
                    perc = non_zero / len(df)
                else:
                    perc = float("nan")
                non_null = len(df_non_null)
                if len(df) != 0:
                    perc_non_null = non_null / len(df)
                else:
                    perc_non_null = float("nan")
                print_buffer(
                    get_line(
                        n, df[n].dtype, non_zero, perc, non_null, perc_non_null, ""
                    )
                )

        if len(bools) > 0:
            print_horz_line("-")
            print_buffer(f"{'BOOLEAN':^30}")
            print_horz_line("-")
            for b in bools:
                fields_noted.append(b)
                df_non_null = df[~pd.isna(df[b])]
                non_zero = len(df_non_null[np.abs(df_non_null[b]).gt(0)])
                if len(df) != 0:
                    perc = non_zero / len(df)
                else:
                    perc = float("nan")

                non_null = len(df_non_null)
                if non_null != 0:
                    perc_non_null = non_null / len(df)
                else:
                    perc_non_null = float("nan")
                print_buffer(
                    get_line(
                        b,
                        df[b].dtype,
                        non_zero,
                        perc,
                        non_null,
                        perc_non_null,
                        df[b].unique().tolist(),
                    )
                )

        if len(cats) > 0:
            print_horz_line("-")
            print_buffer(f"{'CATEGORICAL':^30}")
            print_horz_line("-")
            for c in cats:
                fields_noted.append(c)
                non_zero = (~pd.isna(df[c])).sum()
                if len(df) != 0:
                    perc = non_zero / len(df)
                else:
                    perc = float("nan")
                print_buffer(
                    get_line(
                        c,
                        df[c].dtype,
                        non_zero,
                        perc,
                        non_zero,
                        perc,
                        df[c].unique().tolist(),
                    )
                )
        i += 1

    fields_unclassified = []

    for column in df.columns:
        if column not in fields_noted:
            fields_unclassified.append(column)

    if len(fields_unclassified) > 0:
        fields_unclassified.sort()
        print_buffer("")
        print_horz_line("=")
        print_buffer(f"{'UNCLASSIFIED:':<30}")
        print_horz_line("=")
        for u in fields_unclassified:
            non_zero = (~pd.isna(df[u])).sum()
            if len(df) != 0:
                perc = non_zero / len(df)
                perc_non_null = non_zero / len(df)
            else:
                perc = float("nan")
                perc_non_null = float("nan")
            print_buffer(
                get_line(
                    u, df[u].dtype, non_zero, perc, non_zero, perc, list(df[u].unique())
                )
            )

    if len(buffer) > 0:
        print(buffer)
        buffer = ""
        lines = 0

examine_df_in_ridiculous_detail

examine_df_in_ridiculous_detail(df, s)

Print details of the dataframe, but in RIDICULOUS DETAIL

Parameters:

Name Type Description Default
df DataFrame

The data you wish to examine

required
s dict

Settings dictionary.

required
Source code in openavmkit/pipeline.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
def examine_df_in_ridiculous_detail(df: pd.DataFrame, s: dict):
    """
    Print details of the dataframe, but in RIDICULOUS DETAIL

    Parameters
    ----------
    df : pd.DataFrame
        The data you wish to examine
    s : dict
        Settings dictionary.
    """

    def fill_str(char: str, size: int):
        text = ""
        for _i in range(0, size):
            text += char
        return text

    def fit_str(txt: str, size: int):
        if len(txt) >= size:
            len_first = int((size - 3) / 2)
            len_last = (size - 3) - len_first
            first_bit = txt[0:len_first]
            last_bit = txt[len(txt) - len_last :]
            txt = first_bit + "..." + last_bit
        return f"{txt:{size}}"

    def get_num_line(col):
        describe = df[col].describe()
        return f"DESCRIBE --> {describe}\n\n"

    def get_cat_line(col):
        value_counts = df[col].value_counts()
        return f"VALUE COUNTS --> {value_counts}\n\n"

    def get_line(
        col, dtype, count_non_zero, p, count_non_null, pnn, uniques: list or str
    ):
        dtype = f"{dtype}"
        if type(count_non_zero) != str:
            count_non_zero = f"{count_non_zero:,}"

        if type(count_non_null) != str:
            count_non_null = f"{count_non_null:,}"

        if isinstance(uniques, list):
            unique_str = str(uniques)
            if len(unique_str) > 40:
                uniques = f"{len(uniques):,}"
            else:
                uniques = unique_str

        return f"{fit_str(col, 75)} {dtype:^10} {count_non_zero:>10} {p:>5.0%} {count_non_null:>10} {pnn:>5.0%} {uniques:>40}"

    def print_horz_line(char: str):
        print(
            fill_str(char, 30)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 5)
            + " "
            + fill_str(char, 10)
            + " "
            + fill_str(char, 5)
            + " "
            + fill_str(char, 40)
        )

    print(
        f"{'FIELD':^30} {'TYPE':^10} {'NON-ZERO':^10} {'%':^5} {'NON-NULL':^10} {'%':^5} {'UNIQUE':^40}"
    )

    fields_land = get_fields_land(s, df)
    fields_impr = get_fields_impr(s, df)
    fields_other = get_fields_other(s, df)

    fields_noted = []

    stuff = {
        "land": {"name": "LAND", "fields": fields_land},
        "impr": {"name": "IMPROVEMENT", "fields": fields_impr},
        "other": {"name": "OTHER", "fields": fields_other},
    }

    i = 0

    for landimpr in stuff:
        entry = stuff[landimpr]
        name = entry["name"]

        fields = entry["fields"]
        nums = fields["numeric"]
        bools = fields["boolean"]
        cats = fields["categorical"]

        if (len(nums) + len(bools) + len(cats)) == 0:
            continue

        if i != 0:
            print("")

        print_horz_line("=")
        print(f"{name:^30}")
        print_horz_line("=")

        nums.sort()
        bools.sort()
        cats.sort()

        if len(nums) > 0:
            print_horz_line("-")
            print(f"{'NUMERIC':^30}")
            print_horz_line("-")
            for n in nums:
                fields_noted.append(n)
                df_non_null = df[~pd.isna(df[n])]
                non_zero = len(df_non_null[np.abs(df_non_null[n]).gt(0)])
                perc = non_zero / len(df)
                non_null = len(df_non_null)
                if len(df) != 0:
                    perc_non_null = non_null / len(df)
                else:
                    perc_non_null = float('nan')
                print(
                    get_line(
                        n, df[n].dtype, non_zero, perc, non_null, perc_non_null, ""
                    )
                )
                print(get_num_line(n))

        if len(bools) > 0:
            print_horz_line("-")
            print(f"{'BOOLEAN':^30}")
            print_horz_line("-")
            for b in bools:
                fields_noted.append(b)
                df_non_null = df[~pd.isna(df[b])]
                non_zero = len(df_non_null[np.abs(df_non_null[b]).gt(0)])
                if len(df) != 0:
                    perc = non_zero / len(df)
                else:
                    perc = float("nan")
                non_null = len(df_non_null)
                if len(df) != 0:
                    perc_non_null = non_null / len(df)
                else:
                    perc_non_null = float("nan")
                print(
                    get_line(
                        b,
                        df[b].dtype,
                        non_zero,
                        perc,
                        non_null,
                        perc_non_null,
                        df[b].unique().tolist(),
                    )
                )

        if len(cats) > 0:
            print_horz_line("-")
            print(f"{'CATEGORICAL':^30}")
            print_horz_line("-")
            for c in cats:
                fields_noted.append(c)
                non_zero = (~pd.isna(df[c])).sum()
                perc = non_zero / len(df)
                print(
                    get_line(
                        c,
                        df[c].dtype,
                        non_zero,
                        perc,
                        non_zero,
                        perc,
                        df[c].unique().tolist(),
                    )
                )
                print(get_cat_line(c))

        i += 1

    fields_unclassified = []

    for column in df.columns:
        if column not in fields_noted:
            fields_unclassified.append(column)

    if len(fields_unclassified) > 0:
        fields_unclassified.sort()
        print("")
        print_horz_line("=")
        print(f"{'UNCLASSIFIED:':<30}")
        print_horz_line("=")
        for u in fields_unclassified:
            non_zero = (~pd.isna(df[u])).sum()
            if len(df) != 0:
                perc = non_zero / len(df)
            else:
                perc = float("nan")
            if len(df) != 0:
                perc_non_null = non_zero / len(df)
            else:
                perc_non_null = float("nan")
            print(
                get_line(
                    u, df[u].dtype, non_zero, perc, non_zero, perc, list(df[u].unique())
                )
            )

examine_sup

examine_sup(sup, s)

Print examination details of the sales and universe data from a SalesUniversePair.

This function displays summary statistics and unique values for both the sales and universe DataFrames contained in the provided SalesUniversePair.

Parameters:

Name Type Description Default
sup SalesUniversePair

Object containing 'sales' and 'universe' DataFrames.

required
s dict

Settings dictionary.

required
Source code in openavmkit/pipeline.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
def examine_sup(sup: SalesUniversePair, s: dict) -> None:
    """
    Print examination details of the sales and universe data from a SalesUniversePair.

    This function displays summary statistics and unique values for both the sales and
    universe DataFrames contained in the provided SalesUniversePair.

    Parameters
    ----------
    sup : SalesUniversePair
        Object containing 'sales' and 'universe' DataFrames.
    s : dict
        Settings dictionary.
    """

    print("")
    print("EXAMINING UNIVERSE...")
    print("")
    examine_df(sup["universe"], s)

    print("")
    print("EXAMINING SALES...")
    print("")
    examine_df(sup["sales"], s)

examine_sup_in_ridiculous_detail

examine_sup_in_ridiculous_detail(sup, s)

Print details of the sales and universe data from a SalesUniversePair, but in RIDICULOUS DETAIL.

Parameters:

Name Type Description Default
sup SalesUniversePair

Object containing 'sales' and 'universe' DataFrames.

required
s dict

Settings dictionary.

required
Source code in openavmkit/pipeline.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def examine_sup_in_ridiculous_detail(sup: SalesUniversePair, s: dict):
    """
    Print details of the sales and universe data from a SalesUniversePair,
    but in RIDICULOUS DETAIL.

    Parameters
    ----------
    sup : SalesUniversePair
        Object containing 'sales' and 'universe' DataFrames.
    s : dict
        Settings dictionary.
    """
    print("")
    print("EXAMINING UNIVERSE...")
    print("")
    examine_df_in_ridiculous_detail(sup["universe"], s)

    print("")
    print("EXAMINING SALES...")
    print("")
    examine_df_in_ridiculous_detail(sup["sales"], s)

fill_unknown_values_sup

fill_unknown_values_sup(sup, settings)

Fill unknown values with default values as specified in settings.

Parameters:

Name Type Description Default
sup SalesUniversePair

The SalesUniversePair containing sales and universe data.

required
settings dict

The settings dictionary containing configuration for filling unknown values.

required

Returns:

Type Description
SalesUniversePair

The updated SalesUniversePair with filled unknown values.

Source code in openavmkit/pipeline.py
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
def fill_unknown_values_sup(
    sup: SalesUniversePair, settings: dict
) -> SalesUniversePair:
    """Fill unknown values with default values as specified in settings.

    Parameters
    ----------
    sup : SalesUniversePair
        The SalesUniversePair containing sales and universe data.
    settings : dict
        The settings dictionary containing configuration for filling unknown values.

    Returns
    -------
    SalesUniversePair
        The updated SalesUniversePair with filled unknown values.
    """
    return openavmkit.cleaning.fill_unknown_values_sup(sup, settings)

finalize_models

finalize_models(sup, settings, save_params=True, use_saved_params=True, verbose=False)

Tries out predictive models on the given SalesUniversePair, finalizes results and writes to disk.

This function takes detailed instructions from the provided settings dictionary and handles all the internal details like splitting the data, training the models, and saving the results. It performs basic statistic analysis on each model, and optionally combines results into an ensemble model.

This function iterates over model groups and runs models for main, hedonic and vacant cases.

It delegates the model execution to openavmkit.benchmark.run_models with the given settings.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

The settings dictionary.

required
save_params bool

Whether to save model parameters.

True
use_saved_params bool

Whether to use saved model parameters.

True
verbose bool

If True, prints additional information.

False

Returns:

Type Description
MultiModelResults

The MultiModelResults containing all model results and benchmarks.

Source code in openavmkit/pipeline.py
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
def finalize_models(
    sup: SalesUniversePair,
    settings: dict,
    save_params: bool = True,
    use_saved_params: bool = True,
    verbose: bool = False,
) -> None:
    """
    Tries out predictive models on the given SalesUniversePair, finalizes results and writes to disk.

    This function takes detailed instructions from the provided settings dictionary and handles all the internal
    details like splitting the data, training the models, and saving the results. It performs basic statistic analysis
    on each model, and optionally combines results into an ensemble model.

    This function iterates over model groups and runs models for main, hedonic and vacant cases.

    It delegates the model execution to `openavmkit.benchmark.run_models` with the given settings.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        The settings dictionary.
    save_params : bool, optional
        Whether to save model parameters.
    use_saved_params : bool, optional
        Whether to use saved model parameters.
    verbose : bool, optional
        If True, prints additional information.

    Returns
    -------
    MultiModelResults
        The MultiModelResults containing all model results and benchmarks.
    """

    openavmkit.benchmark.run_models(
        sup,
        settings,
        save_params,
        use_saved_params,
        save_results=True,
        verbose=verbose,
        run_main=True,
        run_vacant=True,
        run_hedonic=True,
        run_ensemble=True,
        do_shaps=False,
        do_plots=False
    )

from_checkpoint

from_checkpoint(path, func, params)

Read cached data from a checkpoint file or generate it via a function.

Wrapper that attempts to load a DataFrame from the given checkpoint path. If the file does not exist, it calls the provided function with the given parameters to generate the data, saves the result to the checkpoint, and returns it.

Parameters:

Name Type Description Default
path str

Path to the checkpoint file.

required
func callable

Function to run if the checkpoint is not available. Should return a DataFrame.

required
params dict

Parameters to pass to func when generating the data.

required

Returns:

Type Description
DataFrame

The resulting DataFrame, loaded from the checkpoint or generated.

Source code in openavmkit/pipeline.py
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
def from_checkpoint(path: str, func: callable, params: dict) -> pd.DataFrame:
    """
    Read cached data from a checkpoint file or generate it via a function.

    Wrapper that attempts to load a DataFrame from the given checkpoint path. If the file
    does not exist, it calls the provided function with the given parameters to generate
    the data, saves the result to the checkpoint, and returns it.

    Parameters
    ----------
    path : str
        Path to the checkpoint file.
    func : callable
        Function to run if the checkpoint is not available. Should return a DataFrame.
    params : dict
        Parameters to pass to `func` when generating the data.

    Returns
    -------
    pd.DataFrame
        The resulting DataFrame, loaded from the checkpoint or generated.
    """
    return openavmkit.checkpoint.from_checkpoint(path, func, params)

init_notebook

init_notebook(locality)

Initialize the notebook environment for a specific locality.

This function sets up the notebook state by configuring the working directory and ensuring that the appropriate data directories exist.

Attributes:

Name Type Description
locality str

The locality slug (e.g., "us-nc-guilford").

Source code in openavmkit/pipeline.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
def init_notebook(locality: str):
    """Initialize the notebook environment for a specific locality.

    This function sets up the notebook state by configuring the working directory and
    ensuring that the appropriate data directories exist.

    Attributes
    ----------
    locality : str
        The locality slug (e.g., "us-nc-guilford").

    """
    first_run = False
    if hasattr(init_notebook, "nbs"):
        nbs = init_notebook.nbs
    else:
        nbs = None
        first_run = True
    nbs = _set_locality(nbs, locality)

    if first_run:
        init_notebook.nbs = nbs

        # Fix warnings too
        oldformatwarning = warnings.formatwarning

        # Customize warning format
        def custom_formatwarning(msg, category, filename, lineno, line):
            # if it's a user warning:
            if issubclass(category, UserWarning):
                return f"UserWarning: {msg}\n"
            else:
                return oldformatwarning(msg, category, filename, lineno, line)

        warnings.formatwarning = custom_formatwarning

    load_dotenv(dotenv_path=find_dotenv())

load_cleaned_data_for_modeling

load_cleaned_data_for_modeling(settings)

Read and return the cleaned data from notebook 2 so notebook 3 can use it. Additionally, check the sales scrutiny settings for the invalid key file, and if it's defined, use that to exclude any recently marked invalid sales.

(This saves having to do a full round trip through notebook 1&2 just to exclude a newly identified invalid sale)

Parameters:

Name Type Description Default
settings dict

Configuration settings

required

Returns:

Type Description
SalesUniversePair

The cleaned and ready SalesUniversePair

Source code in openavmkit/pipeline.py
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
def load_cleaned_data_for_modeling(settings: dict):
    """
    Read and return the cleaned data from notebook 2 so notebook 3 can use it.
    Additionally, check the sales scrutiny settings for the invalid key file, and
    if it's defined, use that to exclude any recently marked invalid sales.

    (This saves having to do a full round trip through notebook 1&2 just to exclude a newly
    identified invalid sale)

    Parameters
    ----------
    settings : dict
        Configuration settings

    Returns
    -------
    SalesUniversePair
        The cleaned and ready SalesUniversePair

    """
    sales_univ_pair = read_pickle("out/2-clean-sup")
    s_sales_scrutiny = settings.get("analysis", {}).get("sales_scrutiny", {})
    invalid_key_file = s_sales_scrutiny.get("invalid_key_file")
    if invalid_key_file is not None:
        if os.path.exists(invalid_key_file):
            df_invalid_keys = pd.read_csv(invalid_key_file, dtype={"key_sale": str})
            bad_keys = df_invalid_keys["key_sale"].values
            df_sales = sales_univ_pair.sales
            df_sales = df_sales[~df_sales["key_sale"].isin(bad_keys)].copy()
            sales_univ_pair.sales = df_sales
    return sales_univ_pair

load_dataframes

load_dataframes(settings, verbose=False)

Load dataframes based on the provided settings and return them in a dictionary.

This function reads various data sources defined in the settings and loads them into pandas DataFrames. It performs validations to ensure required data, such as 'geo_parcels', is present and correctly formatted.

Parameters:

Name Type Description Default
settings dict

Settings dictionary.

required
verbose bool

If True, prints detailed logs during data loading. Defaults to False.

False

Returns:

Type Description
dict

Dictionary mapping keys to loaded DataFrames.

Raises:

Type Description
ValueError

If required dataframes or columns (e.g., 'geo_parcels' or its 'geometry' column) are missing.

Source code in openavmkit/pipeline.py
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
def load_dataframes(settings: dict, verbose: bool = False) -> dict:
    """
    Load dataframes based on the provided settings and return them in a dictionary.

    This function reads various data sources defined in the settings and loads them into
    pandas DataFrames. It performs validations to ensure required data, such as
    'geo_parcels', is present and correctly formatted.

    Parameters
    ----------
    settings : dict
        Settings dictionary.
    verbose : bool, optional
        If True, prints detailed logs during data loading. Defaults to False.

    Returns
    -------
    dict
        Dictionary mapping keys to loaded DataFrames.

    Raises
    ------
    ValueError
        If required dataframes or columns (e.g., 'geo_parcels' or its 'geometry' column) are missing.
    """

    s_data = settings.get("data", {})
    s_load = s_data.get("load", {})
    dataframes = {}

    fields_cat = get_fields_categorical(settings, include_boolean=False)
    fields_bool = get_fields_boolean(settings)
    fields_num = get_fields_numeric(settings, include_boolean=False)

    for key in s_load:
        entry = s_load[key]
        df = load_dataframe(
            entry,
            settings,
            verbose=verbose,
            fields_cat=fields_cat,
            fields_bool=fields_bool,
            fields_num=fields_num,
        )
        if df is not None:
            dataframes[key] = df

    if "geo_parcels" not in dataframes:
        raise ValueError(
            "No 'geo_parcels' dataframe found in the dataframes. This layer is required, and it must contain parcel geometry."
        )

    if "geometry" not in dataframes["geo_parcels"].columns:
        raise ValueError(
            "The 'geo_parcels' dataframe does not contain a 'geometry' column. This layer must contain parcel geometry."
        )

    return dataframes

load_settings

load_settings(settings_file='in/settings.json', settings_object=None, error=True, warning=True)

Load and return the settings dictionary for the locality.

This merges the user's settings for their specific locality with the default settings template and the default data dictionary. It also performs variable substitution. The result is a fully resolved settings dictionary.

Parameters:

Name Type Description Default
settings_file str

Path to the settings file. Defaults to "in/settings.json".

'in/settings.json'
settings_object dict

Optional settings object to use instead of loading from a file.

None
error bool

If True, raises an error if the settings file cannot be loaded. Defaults to True.

True
warning bool

If True, raises a warning if the settings file cannot be loaded. Defaults to True.

True

Returns:

Type Description
dict

The fully resolved settings dictionary.

Source code in openavmkit/pipeline.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def load_settings(
    settings_file: str = "in/settings.json", settings_object: dict = None, error : bool = True, warning : bool = True
) -> dict:
    """
    Load and return the settings dictionary for the locality.

    This merges the user's settings for their specific locality with the default settings
    template and the default data dictionary. It also performs variable substitution.
    The result is a fully resolved settings dictionary.

    Parameters
    ----------
    settings_file : str, optional
        Path to the settings file. Defaults to "in/settings.json".
    settings_object : dict, optional
        Optional settings object to use instead of loading from a file.
    error : bool, optional
        If True, raises an error if the settings file cannot be loaded. Defaults to True.
    warning : bool, optional
        If True, raises a warning if the settings file cannot be loaded. Defaults to True.

    Returns
    -------
    dict
        The fully resolved settings dictionary.
    """

    return openavmkit.utilities.settings.load_settings(settings_file, settings_object, error, warning)

mark_horizontal_equity_clusters_per_model_group_sup

mark_horizontal_equity_clusters_per_model_group_sup(sup, settings, verbose=False, do_land_clusters=True, do_impr_clusters=True)

Cluster parcels for a horizontal equity study by assigning horizontal equity cluster IDs.

This is done for each model group within a SalesUniversePair. Marking IDs ahead of time allows for more efficient processing later. Delegates to openavmkit.horizontal_equity_study.mark_horizontal_equity_clusters_per_model_group_sup.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
verbose bool

If True, prints verbose output. Defaults to False.

False
do_land_clusters bool

If True, enables land clustering. Defaults to True.

True
do_impr_clusters bool

If True, enables improvement clustering. Defaults to True.

True

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair with horizontal equity clusters marked.

Source code in openavmkit/pipeline.py
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
def mark_horizontal_equity_clusters_per_model_group_sup(
    sup: SalesUniversePair,
    settings: dict,
    verbose: bool = False,
    do_land_clusters: bool = True,
    do_impr_clusters: bool = True,
) -> SalesUniversePair:
    """
    Cluster parcels for a horizontal equity study by assigning horizontal equity cluster IDs.

    This is done for each model group within a SalesUniversePair. Marking IDs ahead of time
    allows for more efficient processing later. Delegates to
    `openavmkit.horizontal_equity_study.mark_horizontal_equity_clusters_per_model_group_sup`.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    verbose : bool, optional
        If True, prints verbose output. Defaults to False.
    do_land_clusters : bool, optional
        If True, enables land clustering. Defaults to True.
    do_impr_clusters : bool, optional
        If True, enables improvement clustering. Defaults to True.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair with horizontal equity clusters marked.
    """
    return openavmkit.horizontal_equity_study.mark_horizontal_equity_clusters_per_model_group_sup(
        sup,
        settings,
        verbose,
        do_land_clusters=do_land_clusters,
        do_impr_clusters=do_impr_clusters,
    )

mark_ss_ids_per_model_group_sup

mark_ss_ids_per_model_group_sup(sup, settings, verbose=False)

Cluster parcels for a sales scrutiny study by assigning sales scrutiny IDs.

This function processes each model group within the provided SalesUniversePair, identifies clusters of parcels for scrutiny, and writes the cluster identifiers into a new field on the universe DataFrame.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
verbose bool

If True, prints verbose output during processing. Defaults to False.

False

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair with marked sales scrutiny IDs.

Source code in openavmkit/pipeline.py
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
def mark_ss_ids_per_model_group_sup(
    sup: SalesUniversePair, settings: dict, verbose: bool = False
) -> SalesUniversePair:
    """
    Cluster parcels for a sales scrutiny study by assigning sales scrutiny IDs.

    This function processes each model group within the provided SalesUniversePair,
    identifies clusters of parcels for scrutiny, and writes the cluster identifiers
    into a new field on the universe DataFrame.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    verbose : bool, optional
        If True, prints verbose output during processing. Defaults to False.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair with marked sales scrutiny IDs.
    """
    df_sales_hydrated = get_hydrated_sales_from_sup(sup)
    df_marked = mark_ss_ids_per_model_group(df_sales_hydrated, settings, verbose)
    sup.update_sales(df_marked, allow_remove_rows=False)
    return sup

process_dataframes

process_dataframes(dataframes, settings, verbose=False)

Load and process data according to provided settings.

This function first loads the dataframes, then merges and enriches the data, returning a SalesUniversePair.

Parameters:

Name Type Description Default
dataframes dict[str, DataFrame]

Dictionary of DataFrames.

required
settings dict

A dictionary of settings for data loading and processing.

required
verbose bool

If True, prints detailed logs during data loading. Defaults to False.

False

Returns:

Type Description
SalesUniversePair

A SalesUniversePair object containing the processed sales and universe data.

Source code in openavmkit/pipeline.py
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
def process_dataframes(dataframes: dict[str, pd.DataFrame], settings: dict, verbose: bool = False) -> SalesUniversePair:
    """
    Load and process data according to provided settings.

    This function first loads the dataframes, then merges and enriches the data,
    returning a SalesUniversePair.

    Parameters
    ----------
    dataframes : dict[str, pd.DataFrame]
        Dictionary of DataFrames.
    settings : dict
        A dictionary of settings for data loading and processing.
    verbose : bool, optional
        If True, prints detailed logs during data loading. Defaults to False.

    Returns
    -------
    SalesUniversePair
        A SalesUniversePair object containing the processed sales and universe data.
    """

    results = process_data(dataframes, settings, verbose)

    write_notebook_output_sup(results)

    return results

process_sales

process_sales(sup, settings, verbose=False)

Process sales data within a SalesUniversePair.

This function cleans invalid sales, applies time adjustments, and updates the SalesUniversePair with the enriched sales DataFrame.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
verbose bool

If True, prints verbose output during processing. Defaults to False.

False

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair with processed sales data.

Source code in openavmkit/pipeline.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
def process_sales(
    sup: SalesUniversePair, settings: dict, verbose: bool = False
) -> SalesUniversePair:
    """
    Process sales data within a SalesUniversePair.

    This function cleans invalid sales, applies time adjustments, and updates the
    SalesUniversePair with the enriched sales DataFrame.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    verbose : bool, optional
        If True, prints verbose output during processing. Defaults to False.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair with processed sales data.
    """

    # select only valid sales
    sup = clean_valid_sales(sup, settings)

    print(f"len before validate = {len(sup['sales'])}")

    # validate sales using filters
    sup = filter_invalid_sales(sup, settings, verbose)

    print(f"len after validate = {len(sup['sales'])}")

    # make sure sales field has necessary fields for the next step
    df_sales_hydrated = get_hydrated_sales_from_sup(sup)

    print(f"len after hydrate = {len(sup['sales'])}")

    # enrich with time adjustment, and mark what fields were added
    df_sales_enriched = enrich_time_adjustment(df_sales_hydrated, settings, verbose)

    print(f"len after enrich = {len(df_sales_enriched)}")

    df_sales_clipped = _clip_sales_to_use(df_sales_enriched, settings, verbose)

    print(f"len after clip = {len(df_sales_clipped)}")

    # update the SUP sales
    sup.update_sales(df_sales_clipped, allow_remove_rows=True)

    return sup

read_pickle

read_pickle(path)

Read and return data from a pickle file.

Parameters:

Name Type Description Default
path str

Path to the pickle file.

required

Returns:

Type Description
Any

The object loaded from the pickle file.

Source code in openavmkit/pipeline.py
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
def read_pickle(path: str) -> Any:
    """
    Read and return data from a pickle file.

    Parameters
    ----------
    path : str
        Path to the pickle file.

    Returns
    -------
    Any
        The object loaded from the pickle file.
    """
    return openavmkit.checkpoint.read_pickle(path)

run_and_write_ratio_study_breakdowns

run_and_write_ratio_study_breakdowns(settings)

Run ratio study breakdowns and write the results to disk.

Parameters:

Name Type Description Default
settings dict

Configuration settings for the ratio study.

required
Source code in openavmkit/pipeline.py
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
def run_and_write_ratio_study_breakdowns(settings: dict) -> None:
    """
    Run ratio study breakdowns and write the results to disk.

    Parameters
    ----------
    settings : dict
        Configuration settings for the ratio study.
    """
    openavmkit.ratio_study.run_and_write_ratio_study_breakdowns(settings)

run_models

run_models(sup, settings, save_params=True, use_saved_params=True, save_results=True, verbose=False, run_main=True, run_vacant=True, run_hedonic=True, run_ensemble=True, do_shaps=False, do_plots=False)

Runs predictive models on the given SalesUniversePair.

This function takes detailed instructions from the provided settings dictionary and handles all the internal details like splitting the data, training the models, and saving the results. It performs basic statistic analysis on each model, and optionally combines results into an ensemble model.

If "run_main" is true, it will run normal models as well as hedonic models (if the user so specifies), "hedonic" in this context meaning models that attempt to generate a land value and an improvement value separately. If "run_vacant" is true, it will run vacant models as well -- models that only use vacant models as evidence to generate land values.

This function iterates over model groups and runs models for both main and vacant cases.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

The settings dictionary.

required
save_params bool

Whether to save model parameters.

True
use_saved_params bool

Whether to use saved model parameters.

True
save_results bool

Whether to save model results.

True
verbose bool

If True, prints additional information.

False
run_main bool

Whether to run main (non-vacant) models.

True
run_vacant bool

Whether to run vacant models.

True
run_hedonic bool

Whether to run hedonic models.

True
run_ensemble bool

Whether to run ensemble models.

True
do_shaps bool

Whether to compute SHAP values.

False
do_plots bool

Whether to plot scatterplots

False

Returns:

Type Description
MultiModelResults

The MultiModelResults containing all model results and benchmarks.

Source code in openavmkit/pipeline.py
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
def run_models(
    sup: SalesUniversePair,
    settings: dict,
    save_params: bool = True,
    use_saved_params: bool = True,
    save_results: bool = True,
    verbose: bool = False,
    run_main: bool = True,
    run_vacant: bool = True,
    run_hedonic: bool = True,
    run_ensemble: bool = True,
    do_shaps: bool = False,
    do_plots: bool = False
):
    """
    Runs predictive models on the given SalesUniversePair.

    This function takes detailed instructions from the provided settings dictionary and handles all the internal
    details like splitting the data, training the models, and saving the results. It performs basic statistic analysis
    on each model, and optionally combines results into an ensemble model.

    If "run_main" is true, it will run normal models as well as hedonic models (if the user so specifies),
    "hedonic" in this context meaning models that attempt to generate a land value and an improvement value separately.
    If "run_vacant" is true, it will run vacant models as well -- models that only use vacant models as evidence
    to generate land values.

    This function iterates over model groups and runs models for both main and vacant cases.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        The settings dictionary.
    save_params : bool, optional
        Whether to save model parameters.
    use_saved_params : bool, optional
        Whether to use saved model parameters.
    save_results : bool, optional
        Whether to save model results.
    verbose : bool, optional
        If True, prints additional information.
    run_main : bool, optional
        Whether to run main (non-vacant) models.
    run_vacant : bool, optional
        Whether to run vacant models.
    run_hedonic : bool, optional
        Whether to run hedonic models.
    run_ensemble : bool, optional
        Whether to run ensemble models.
    do_shaps : bool, optional
        Whether to compute SHAP values.
    do_plots : bool, optional
        Whether to plot scatterplots

    Returns
    -------
    MultiModelResults
        The MultiModelResults containing all model results and benchmarks.
    """
    return openavmkit.benchmark.run_models(
        sup,
        settings,
        save_params,
        use_saved_params,
        save_results,
        verbose,
        run_main,
        run_vacant,
        run_hedonic,
        run_ensemble,
        do_shaps,
        do_plots
    )

run_sales_scrutiny

run_sales_scrutiny(sup, settings, drop_cluster_outliers=False, drop_heuristic_outliers=True, verbose=False)

Run sales scrutiny analysis for each model group within a SalesUniversePair.

  1. Performs basic sales validation heuristics
  2. Optionally drops manually excluded sales flagged by user
  3. Runs a cluster-based sales scrutiny analysis report

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
drop_cluster_outliers bool

If True, drops invalid sales identified through cluster analysis. Defaults to False.

False
drop_heuristic_outliers bool

If True, drops invalid sales identified through heuristics. Defaults to True.

True
verbose bool

If True, enables verbose logging. Defaults to False.

False

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair after sales scrutiny analysis.

Source code in openavmkit/pipeline.py
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
def run_sales_scrutiny(
    sup: SalesUniversePair,
    settings: dict,
    drop_cluster_outliers: bool = False,
    drop_heuristic_outliers: bool = True,
    verbose: bool = False,
) -> SalesUniversePair:
    """
    Run sales scrutiny analysis for each model group within a SalesUniversePair.

    1. Performs basic sales validation heuristics
    2. Optionally drops manually excluded sales flagged by user
    3. Runs a cluster-based sales scrutiny analysis report

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    drop_cluster_outliers : bool, optional
        If True, drops invalid sales identified through cluster analysis. Defaults to False.
    drop_heuristic_outliers : bool, optional
        If True, drops invalid sales identified through heuristics. Defaults to True.
    verbose : bool, optional
        If True, enables verbose logging. Defaults to False.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair after sales scrutiny analysis.
    """

    ss = settings.get("analysis", {}).get("sales_scrutiny", {})
    clusters_enabled = ss.get("clusters_enabled", True)
    heuristics_enabled = ss.get("heuristics_enabled", True)

    os.makedirs("out/sales_scrutiny/", exist_ok=True)

    if heuristics_enabled:
        sup = run_heuristics(sup, settings, drop_heuristic_outliers, verbose)
    elif verbose:
        print(f"Skipping sales scrutiny heuristics...")

    sup = drop_manual_exclusions(sup, settings, verbose)

    if clusters_enabled:
        sup = run_sales_scrutiny_per_model_group_sup(
            sup, settings, drop_cluster_outliers, verbose
        )
    elif verbose:
        print(f"Skipping clustered sales scrutiny analysis...")
    return sup

run_sales_scrutiny_per_model_group_sup

run_sales_scrutiny_per_model_group_sup(sup, settings, drop=True, verbose=False)

Run sales scrutiny analysis for each model group within a SalesUniversePair.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
drop bool

If True, drops invalid sales after scrutiny. Defaults to True.

True
verbose bool

If True, enables verbose logging. Defaults to False.

False

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair after sales scrutiny analysis.

Source code in openavmkit/pipeline.py
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
def run_sales_scrutiny_per_model_group_sup(
    sup: SalesUniversePair, settings: dict, drop: bool = True, verbose: bool = False
) -> SalesUniversePair:
    """
    Run sales scrutiny analysis for each model group within a SalesUniversePair.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    drop : bool, optional
        If True, drops invalid sales after scrutiny. Defaults to True.
    verbose : bool, optional
        If True, enables verbose logging. Defaults to False.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair after sales scrutiny analysis.
    """

    df_sales_hydrated = get_hydrated_sales_from_sup(sup)
    df_scrutinized = run_sales_scrutiny_per_model_group(
        df_sales_hydrated, settings, verbose
    )

    if drop:
        # Drop all invalid sales
        df_scrutinized = df_scrutinized[df_scrutinized["valid_sale"].eq(True)]
        sup_num_valid_before = len(sup.sales[sup.sales["valid_sale"].eq(True)])

        sup.update_sales(df_scrutinized, allow_remove_rows=True)

        sup_num_valid_after = len(sup.sales[sup.sales["valid_sale"].eq(True)])

        if verbose:
            diff = sup_num_valid_before - sup_num_valid_after
            print("")
            print(
                f"Number of valid sales in SUP before scrutiny: {sup_num_valid_before}"
            )
            print(f"Number of valid sales in SUP after scrutiny: {sup_num_valid_after}")
            print(f"Difference in valid sales in SUP: {diff}")
    else:
        sup.update_sales(df_scrutinized, allow_remove_rows=False)

    return sup

tag_model_groups_sup

tag_model_groups_sup(sup, settings, verbose=False)

Tag model groups for a SalesUniversePair.

This function applies user-specified filters that identify rows belonging to particular model groups, then writes the results to the model_group field.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
verbose bool

If True, enables verbose output.

False

Returns:

Type Description
SalesUniversePair

Updated SalesUniversePair with tagged model groups.

Source code in openavmkit/pipeline.py
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
def tag_model_groups_sup(
    sup: SalesUniversePair, settings: dict, verbose: bool = False
) -> SalesUniversePair:
    """
    Tag model groups for a SalesUniversePair.

    This function applies user-specified filters that identify rows belonging to
    particular model groups, then writes the results to the `model_group` field.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    verbose : bool, optional
        If True, enables verbose output.

    Returns
    -------
    SalesUniversePair
        Updated SalesUniversePair with tagged model groups.
    """
    return openavmkit.data._tag_model_groups_sup(sup, settings, verbose)

try_models

try_models(sup, settings, save_params=True, use_saved_params=True, verbose=False, run_main=True, run_vacant=True, run_hedonic=True, run_ensemble=True, do_shaps=False, do_plots=False)

Tries out predictive models on the given SalesUniversePair. Optimized for speed and iteration, doesn't finalize results or write anything to disk.

This function takes detailed instructions from the provided settings dictionary and handles all the internal details like splitting the data, training the models, and saving the results. It performs basic statistic analysis on each model, and optionally combines results into an ensemble model.

If "run_main" is true, it will run normal models as well as hedonic models (if the user so specifies), "hedonic" in this context meaning models that attempt to generate a land value and an improvement value separately. If "run_vacant" is true, it will run vacant models as well -- models that only use vacant models as evidence to generate land values.

This function delegates the model execution to openavmkit.benchmark.run_models with the given settings.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
save_params bool

Whether to save model parameters. Defaults to True.

True
use_saved_params bool

Whether to use saved model parameters. Defaults to True.

True
verbose bool

If True, enables verbose output. Defaults to False.

False
run_main bool

Flag to run main models. Defaults to True.

True
run_vacant bool

Flag to run vacant models. Defaults to True.

True
run_hedonic bool

Flag to run hedonic models. Defaults to True.

True
run_ensemble bool

Flag to run ensemble models. Defaults to True.

True
do_shaps bool

Flag to run SHAP analysis. Defaults to False.

False
do_plots bool

Flag to plot scatterplots. Defaults to False.

False
Source code in openavmkit/pipeline.py
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
def try_models(
    sup: SalesUniversePair,
    settings: dict,
    save_params: bool = True,
    use_saved_params: bool = True,
    verbose: bool = False,
    run_main: bool = True,
    run_vacant: bool = True,
    run_hedonic: bool = True,
    run_ensemble: bool = True,
    do_shaps: bool = False,
    do_plots: bool = False
) -> None:
    """
    Tries out predictive models on the given SalesUniversePair. Optimized for speed
    and iteration, doesn't finalize results or write anything to disk.

    This function takes detailed instructions from the provided settings dictionary and
    handles all the internal details like splitting the data, training the models, and
    saving the results. It performs basic statistic analysis on each model, and optionally
    combines results into an ensemble model.

    If "run_main" is true, it will run normal models as well as hedonic models (if the
    user so specifies), "hedonic" in this context meaning models that attempt to generate
    a land value and an improvement value separately. If "run_vacant" is true, it will run
    vacant models as well -- models that only use vacant models as evidence to generate
    land values.

    This function delegates the model execution to `openavmkit.benchmark.run_models`
    with the given settings.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    save_params : bool, optional
        Whether to save model parameters. Defaults to True.
    use_saved_params : bool, optional
        Whether to use saved model parameters. Defaults to True.
    verbose : bool, optional
        If True, enables verbose output. Defaults to False.
    run_main : bool, optional
        Flag to run main models. Defaults to True.
    run_vacant : bool, optional
        Flag to run vacant models. Defaults to True.
    run_hedonic : bool, optional
        Flag to run hedonic models. Defaults to True.
    run_ensemble : bool, optional
        Flag to run ensemble models. Defaults to True.
    do_shaps : bool, optional
        Flag to run SHAP analysis. Defaults to False.
    do_plots : bool, optional
        Flag to plot scatterplots. Defaults to False.
    """

    openavmkit.benchmark.run_models(
        sup,
        settings,
        save_params,
        use_saved_params,
        save_results=False,
        verbose=verbose,
        run_main=run_main,
        run_vacant=run_vacant,
        run_hedonic=run_hedonic,
        run_ensemble=run_ensemble,
        do_shaps=do_shaps,
        do_plots=do_plots
    )

try_variables

try_variables(sup, settings, verbose=False, plot=False, do_report=False)

Run tests on variables to figure out which might be the most predictive.

Parameters:

Name Type Description Default
sup SalesUniversePair

Your data

required
settings dict

Settings dictionary

required
verbose bool

If True, prints detailed logs during data loading. Defaults to False.

False
plot bool

If True, prints visual plots. Defaults to False.

False
do_report bool

If True, generates PDF reports. Defaults to False.

False
Source code in openavmkit/pipeline.py
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
def try_variables(
    sup: SalesUniversePair,
    settings: dict,
    verbose: bool = False,
    plot: bool = False,
    do_report: bool = False,
):
    """
    Run tests on variables to figure out which might be the most predictive.

    Parameters
    ----------
    sup : SalesUniversePair
        Your data
    settings : dict
        Settings dictionary
    verbose : bool, optional
        If True, prints detailed logs during data loading. Defaults to False.
    plot : bool, optional
        If True, prints visual plots. Defaults to False.
    do_report : bool, optional
        If True, generates PDF reports. Defaults to False.
    """
    sup = fill_unknown_values_sup(sup, settings)
    openavmkit.benchmark.try_variables(sup, settings, verbose, plot, do_report)

write_canonical_splits

write_canonical_splits(sup, settings, verbose=False)

Write canonical splits for the sales DataFrame.

This separates the sales data into training and test sets and stores the keys to disk, ensuring consistent splits across multiple models for proper ensembling. Delegates to openavmkit.data._write_canonical_splits.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
settings dict

Configuration settings.

required
verbose bool

Whether to print verbose output.

False
Source code in openavmkit/pipeline.py
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
def write_canonical_splits(sup: SalesUniversePair, settings: dict, verbose: bool = False) -> None:
    """
    Write canonical splits for the sales DataFrame.

    This separates the sales data into training and test sets and stores the keys to disk,
    ensuring consistent splits across multiple models for proper ensembling. Delegates to
    `openavmkit.data._write_canonical_splits`.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    settings : dict
        Configuration settings.
    verbose : bool
        Whether to print verbose output.
    """

    openavmkit.data._write_canonical_splits(sup, settings, verbose)

write_checkpoint

write_checkpoint(data, path)

Write data to a checkpoint file.

Saves a pandas DataFrame as Parquet if data is a DataFrame; otherwise, pickle-serializes data.

Parameters:

Name Type Description Default
data Any

Data to be checkpointed.

required
path str

File path for saving the checkpoint.

required
Source code in openavmkit/pipeline.py
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
def write_checkpoint(data: Any, path: str):
    """
    Write data to a checkpoint file.

    Saves a pandas DataFrame as Parquet if `data` is a DataFrame; otherwise, pickle-serializes `data`.

    Parameters
    ----------
    data : Any
        Data to be checkpointed.
    path : str
        File path for saving the checkpoint.
    """
    return openavmkit.checkpoint.write_checkpoint(data, path)

write_notebook_output_sup

write_notebook_output_sup(sup, prefix='1-assemble', parquet=True, gpkg=False, shp=False)

Write notebook output to disk.

This function saves the SalesUniversePair as a pickle file and writes the corresponding 'universe' and 'sales' DataFrames to Parquet files.

Parameters:

Name Type Description Default
sup SalesUniversePair

Sales and universe data.

required
prefix str

File prefix for naming output files. Defaults to "1-assemble".

'1-assemble'
parquet bool

Whether to write to parquet format. Defaults to true.

True
gpkg bool

Whether to write to gpkg format. Defaults to false.

False
shp bool

Whether to write to ESRI shapefile format. Defaults to false.

False
Source code in openavmkit/pipeline.py
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
def write_notebook_output_sup(
    sup: SalesUniversePair, 
    prefix: str = "1-assemble",
    parquet: bool = True,
    gpkg: bool = False,
    shp: bool = False
) -> None:
    """
    Write notebook output to disk.

    This function saves the SalesUniversePair as a pickle file and writes the
    corresponding 'universe' and 'sales' DataFrames to Parquet files.

    Parameters
    ----------
    sup : SalesUniversePair
        Sales and universe data.
    prefix : str, optional
        File prefix for naming output files. Defaults to "1-assemble".
    parquet : bool, optional
        Whether to write to parquet format. Defaults to true.
    gpkg : bool, optional
        Whether to write to gpkg format. Defaults to false.
    shp : bool, optional
        Whether to write to ESRI shapefile format. Defaults to false.
    """

    try:
        os.makedirs("out/look", exist_ok=True)
        with open(f"out/{prefix}-sup.pickle", "wb") as file:
            pickle.dump(sup, file)

        # universe
        if parquet:
            write_parquet(sup["universe"], f"out/look/{prefix}-universe.parquet")
        if gpkg:
            write_gpkg(sup["universe"], f"out/look/{prefix}-universe.gpkg")
        if shp:
            write_zipped_shapefile(sup["universe"], f"out/look/{prefix}-universe.shp.zip")

        # sales
        if parquet:
            write_parquet(sup["sales"], f"out/look/{prefix}-sales.parquet")

        # sales (hydrated)
        df_hydrated = get_hydrated_sales_from_sup(sup)
        if parquet:
            write_parquet(df_hydrated, f"out/look/{prefix}-sales-hydrated.parquet")
        if gpkg:
            write_gpkg(df_hydrated, f"out/look/{prefix}-sales-hydrated.gpkg")
        if shp:
            write_zipped_shapefile(df_hydrated, f"out/look/{prefix}-sales-hydrated.shp.zip")

        print(f"...out/{prefix}-sup.pickle")
        if parquet:
            print(f"...out/look/{prefix}-universe.parquet")
            print(f"...out/look/{prefix}-sales.parquet")
            print(f"...out/look/{prefix}-sales-hydrated.parquet")
        if gpkg:
            print(f"...out/look/{prefix}-universe.gpkg")
            print(f"...out/look/{prefix}-sales-hydrated.gpkg")
        if shp:
            print(f"...out/look/{prefix}-universe.shp.zip")
            print(f"...out/look/{prefix}-sales-hydrated.shp.zip")
    except Exception as e:
        warnings.warn(f"Failed to output sup: {str(e)}")